Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/fork.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* linux/kernel/fork.c
4
*
5
* Copyright (C) 1991, 1992 Linus Torvalds
6
*/
7
8
/*
9
* 'fork.c' contains the help-routines for the 'fork' system call
10
* (see also entry.S and others).
11
* Fork is rather simple, once you get the hang of it, but the memory
12
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
13
*/
14
15
#include <linux/anon_inodes.h>
16
#include <linux/slab.h>
17
#include <linux/sched/autogroup.h>
18
#include <linux/sched/mm.h>
19
#include <linux/sched/user.h>
20
#include <linux/sched/numa_balancing.h>
21
#include <linux/sched/stat.h>
22
#include <linux/sched/task.h>
23
#include <linux/sched/task_stack.h>
24
#include <linux/sched/cputime.h>
25
#include <linux/sched/ext.h>
26
#include <linux/seq_file.h>
27
#include <linux/rtmutex.h>
28
#include <linux/init.h>
29
#include <linux/unistd.h>
30
#include <linux/module.h>
31
#include <linux/vmalloc.h>
32
#include <linux/completion.h>
33
#include <linux/personality.h>
34
#include <linux/mempolicy.h>
35
#include <linux/sem.h>
36
#include <linux/file.h>
37
#include <linux/fdtable.h>
38
#include <linux/iocontext.h>
39
#include <linux/key.h>
40
#include <linux/kmsan.h>
41
#include <linux/binfmts.h>
42
#include <linux/mman.h>
43
#include <linux/mmu_notifier.h>
44
#include <linux/fs.h>
45
#include <linux/mm.h>
46
#include <linux/mm_inline.h>
47
#include <linux/memblock.h>
48
#include <linux/nsproxy.h>
49
#include <linux/capability.h>
50
#include <linux/cpu.h>
51
#include <linux/cgroup.h>
52
#include <linux/security.h>
53
#include <linux/hugetlb.h>
54
#include <linux/seccomp.h>
55
#include <linux/swap.h>
56
#include <linux/syscalls.h>
57
#include <linux/syscall_user_dispatch.h>
58
#include <linux/jiffies.h>
59
#include <linux/futex.h>
60
#include <linux/compat.h>
61
#include <linux/kthread.h>
62
#include <linux/task_io_accounting_ops.h>
63
#include <linux/rcupdate.h>
64
#include <linux/ptrace.h>
65
#include <linux/mount.h>
66
#include <linux/audit.h>
67
#include <linux/memcontrol.h>
68
#include <linux/ftrace.h>
69
#include <linux/proc_fs.h>
70
#include <linux/profile.h>
71
#include <linux/rmap.h>
72
#include <linux/ksm.h>
73
#include <linux/acct.h>
74
#include <linux/userfaultfd_k.h>
75
#include <linux/tsacct_kern.h>
76
#include <linux/cn_proc.h>
77
#include <linux/freezer.h>
78
#include <linux/delayacct.h>
79
#include <linux/taskstats_kern.h>
80
#include <linux/tty.h>
81
#include <linux/fs_struct.h>
82
#include <linux/magic.h>
83
#include <linux/perf_event.h>
84
#include <linux/posix-timers.h>
85
#include <linux/user-return-notifier.h>
86
#include <linux/oom.h>
87
#include <linux/khugepaged.h>
88
#include <linux/signalfd.h>
89
#include <linux/uprobes.h>
90
#include <linux/aio.h>
91
#include <linux/compiler.h>
92
#include <linux/sysctl.h>
93
#include <linux/kcov.h>
94
#include <linux/livepatch.h>
95
#include <linux/thread_info.h>
96
#include <linux/kstack_erase.h>
97
#include <linux/kasan.h>
98
#include <linux/scs.h>
99
#include <linux/io_uring.h>
100
#include <linux/bpf.h>
101
#include <linux/stackprotector.h>
102
#include <linux/user_events.h>
103
#include <linux/iommu.h>
104
#include <linux/rseq.h>
105
#include <uapi/linux/pidfd.h>
106
#include <linux/pidfs.h>
107
#include <linux/tick.h>
108
#include <linux/unwind_deferred.h>
109
110
#include <asm/pgalloc.h>
111
#include <linux/uaccess.h>
112
#include <asm/mmu_context.h>
113
#include <asm/cacheflush.h>
114
#include <asm/tlbflush.h>
115
116
/* For dup_mmap(). */
117
#include "../mm/internal.h"
118
119
#include <trace/events/sched.h>
120
121
#define CREATE_TRACE_POINTS
122
#include <trace/events/task.h>
123
124
#include <kunit/visibility.h>
125
126
/*
127
* Minimum number of threads to boot the kernel
128
*/
129
#define MIN_THREADS 20
130
131
/*
132
* Maximum number of threads
133
*/
134
#define MAX_THREADS FUTEX_TID_MASK
135
136
/*
137
* Protected counters by write_lock_irq(&tasklist_lock)
138
*/
139
unsigned long total_forks; /* Handle normal Linux uptimes. */
140
int nr_threads; /* The idle threads do not count.. */
141
142
static int max_threads; /* tunable limit on nr_threads */
143
144
#define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
145
146
static const char * const resident_page_types[] = {
147
NAMED_ARRAY_INDEX(MM_FILEPAGES),
148
NAMED_ARRAY_INDEX(MM_ANONPAGES),
149
NAMED_ARRAY_INDEX(MM_SWAPENTS),
150
NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
151
};
152
153
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
154
155
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
156
157
#ifdef CONFIG_PROVE_RCU
158
int lockdep_tasklist_lock_is_held(void)
159
{
160
return lockdep_is_held(&tasklist_lock);
161
}
162
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
163
#endif /* #ifdef CONFIG_PROVE_RCU */
164
165
int nr_processes(void)
166
{
167
int cpu;
168
int total = 0;
169
170
for_each_possible_cpu(cpu)
171
total += per_cpu(process_counts, cpu);
172
173
return total;
174
}
175
176
void __weak arch_release_task_struct(struct task_struct *tsk)
177
{
178
}
179
180
static struct kmem_cache *task_struct_cachep;
181
182
static inline struct task_struct *alloc_task_struct_node(int node)
183
{
184
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
185
}
186
187
static inline void free_task_struct(struct task_struct *tsk)
188
{
189
kmem_cache_free(task_struct_cachep, tsk);
190
}
191
192
#ifdef CONFIG_VMAP_STACK
193
/*
194
* vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
195
* flush. Try to minimize the number of calls by caching stacks.
196
*/
197
#define NR_CACHED_STACKS 2
198
static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
199
/*
200
* Allocated stacks are cached and later reused by new threads, so memcg
201
* accounting is performed by the code assigning/releasing stacks to tasks.
202
* We need a zeroed memory without __GFP_ACCOUNT.
203
*/
204
#define GFP_VMAP_STACK (GFP_KERNEL | __GFP_ZERO)
205
206
struct vm_stack {
207
struct rcu_head rcu;
208
struct vm_struct *stack_vm_area;
209
};
210
211
static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
212
{
213
unsigned int i;
214
215
for (i = 0; i < NR_CACHED_STACKS; i++) {
216
struct vm_struct *tmp = NULL;
217
218
if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm_area))
219
return true;
220
}
221
return false;
222
}
223
224
static void thread_stack_free_rcu(struct rcu_head *rh)
225
{
226
struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
227
struct vm_struct *vm_area = vm_stack->stack_vm_area;
228
229
if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
230
return;
231
232
vfree(vm_area->addr);
233
}
234
235
static void thread_stack_delayed_free(struct task_struct *tsk)
236
{
237
struct vm_stack *vm_stack = tsk->stack;
238
239
vm_stack->stack_vm_area = tsk->stack_vm_area;
240
call_rcu(&vm_stack->rcu, thread_stack_free_rcu);
241
}
242
243
static int free_vm_stack_cache(unsigned int cpu)
244
{
245
struct vm_struct **cached_vm_stack_areas = per_cpu_ptr(cached_stacks, cpu);
246
int i;
247
248
for (i = 0; i < NR_CACHED_STACKS; i++) {
249
struct vm_struct *vm_area = cached_vm_stack_areas[i];
250
251
if (!vm_area)
252
continue;
253
254
vfree(vm_area->addr);
255
cached_vm_stack_areas[i] = NULL;
256
}
257
258
return 0;
259
}
260
261
static int memcg_charge_kernel_stack(struct vm_struct *vm_area)
262
{
263
int i;
264
int ret;
265
int nr_charged = 0;
266
267
BUG_ON(vm_area->nr_pages != THREAD_SIZE / PAGE_SIZE);
268
269
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
270
ret = memcg_kmem_charge_page(vm_area->pages[i], GFP_KERNEL, 0);
271
if (ret)
272
goto err;
273
nr_charged++;
274
}
275
return 0;
276
err:
277
for (i = 0; i < nr_charged; i++)
278
memcg_kmem_uncharge_page(vm_area->pages[i], 0);
279
return ret;
280
}
281
282
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
283
{
284
struct vm_struct *vm_area;
285
void *stack;
286
int i;
287
288
for (i = 0; i < NR_CACHED_STACKS; i++) {
289
vm_area = this_cpu_xchg(cached_stacks[i], NULL);
290
if (!vm_area)
291
continue;
292
293
if (memcg_charge_kernel_stack(vm_area)) {
294
vfree(vm_area->addr);
295
return -ENOMEM;
296
}
297
298
/* Reset stack metadata. */
299
kasan_unpoison_range(vm_area->addr, THREAD_SIZE);
300
301
stack = kasan_reset_tag(vm_area->addr);
302
303
/* Clear stale pointers from reused stack. */
304
memset(stack, 0, THREAD_SIZE);
305
306
tsk->stack_vm_area = vm_area;
307
tsk->stack = stack;
308
return 0;
309
}
310
311
stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN,
312
GFP_VMAP_STACK,
313
node, __builtin_return_address(0));
314
if (!stack)
315
return -ENOMEM;
316
317
vm_area = find_vm_area(stack);
318
if (memcg_charge_kernel_stack(vm_area)) {
319
vfree(stack);
320
return -ENOMEM;
321
}
322
/*
323
* We can't call find_vm_area() in interrupt context, and
324
* free_thread_stack() can be called in interrupt context,
325
* so cache the vm_struct.
326
*/
327
tsk->stack_vm_area = vm_area;
328
stack = kasan_reset_tag(stack);
329
tsk->stack = stack;
330
return 0;
331
}
332
333
static void free_thread_stack(struct task_struct *tsk)
334
{
335
if (!try_release_thread_stack_to_cache(tsk->stack_vm_area))
336
thread_stack_delayed_free(tsk);
337
338
tsk->stack = NULL;
339
tsk->stack_vm_area = NULL;
340
}
341
342
#else /* !CONFIG_VMAP_STACK */
343
344
/*
345
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
346
* kmemcache based allocator.
347
*/
348
#if THREAD_SIZE >= PAGE_SIZE
349
350
static void thread_stack_free_rcu(struct rcu_head *rh)
351
{
352
__free_pages(virt_to_page(rh), THREAD_SIZE_ORDER);
353
}
354
355
static void thread_stack_delayed_free(struct task_struct *tsk)
356
{
357
struct rcu_head *rh = tsk->stack;
358
359
call_rcu(rh, thread_stack_free_rcu);
360
}
361
362
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
363
{
364
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
365
THREAD_SIZE_ORDER);
366
367
if (likely(page)) {
368
tsk->stack = kasan_reset_tag(page_address(page));
369
return 0;
370
}
371
return -ENOMEM;
372
}
373
374
static void free_thread_stack(struct task_struct *tsk)
375
{
376
thread_stack_delayed_free(tsk);
377
tsk->stack = NULL;
378
}
379
380
#else /* !(THREAD_SIZE >= PAGE_SIZE) */
381
382
static struct kmem_cache *thread_stack_cache;
383
384
static void thread_stack_free_rcu(struct rcu_head *rh)
385
{
386
kmem_cache_free(thread_stack_cache, rh);
387
}
388
389
static void thread_stack_delayed_free(struct task_struct *tsk)
390
{
391
struct rcu_head *rh = tsk->stack;
392
393
call_rcu(rh, thread_stack_free_rcu);
394
}
395
396
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
397
{
398
unsigned long *stack;
399
stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
400
stack = kasan_reset_tag(stack);
401
tsk->stack = stack;
402
return stack ? 0 : -ENOMEM;
403
}
404
405
static void free_thread_stack(struct task_struct *tsk)
406
{
407
thread_stack_delayed_free(tsk);
408
tsk->stack = NULL;
409
}
410
411
void thread_stack_cache_init(void)
412
{
413
thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
414
THREAD_SIZE, THREAD_SIZE, 0, 0,
415
THREAD_SIZE, NULL);
416
BUG_ON(thread_stack_cache == NULL);
417
}
418
419
#endif /* THREAD_SIZE >= PAGE_SIZE */
420
#endif /* CONFIG_VMAP_STACK */
421
422
/* SLAB cache for signal_struct structures (tsk->signal) */
423
static struct kmem_cache *signal_cachep;
424
425
/* SLAB cache for sighand_struct structures (tsk->sighand) */
426
struct kmem_cache *sighand_cachep;
427
428
/* SLAB cache for files_struct structures (tsk->files) */
429
struct kmem_cache *files_cachep;
430
431
/* SLAB cache for fs_struct structures (tsk->fs) */
432
struct kmem_cache *fs_cachep;
433
434
/* SLAB cache for mm_struct structures (tsk->mm) */
435
static struct kmem_cache *mm_cachep;
436
437
static void account_kernel_stack(struct task_struct *tsk, int account)
438
{
439
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
440
struct vm_struct *vm_area = task_stack_vm_area(tsk);
441
int i;
442
443
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
444
mod_lruvec_page_state(vm_area->pages[i], NR_KERNEL_STACK_KB,
445
account * (PAGE_SIZE / 1024));
446
} else {
447
void *stack = task_stack_page(tsk);
448
449
/* All stack pages are in the same node. */
450
mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
451
account * (THREAD_SIZE / 1024));
452
}
453
}
454
455
void exit_task_stack_account(struct task_struct *tsk)
456
{
457
account_kernel_stack(tsk, -1);
458
459
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
460
struct vm_struct *vm_area;
461
int i;
462
463
vm_area = task_stack_vm_area(tsk);
464
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
465
memcg_kmem_uncharge_page(vm_area->pages[i], 0);
466
}
467
}
468
469
static void release_task_stack(struct task_struct *tsk)
470
{
471
if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
472
return; /* Better to leak the stack than to free prematurely */
473
474
free_thread_stack(tsk);
475
}
476
477
#ifdef CONFIG_THREAD_INFO_IN_TASK
478
void put_task_stack(struct task_struct *tsk)
479
{
480
if (refcount_dec_and_test(&tsk->stack_refcount))
481
release_task_stack(tsk);
482
}
483
#endif
484
485
void free_task(struct task_struct *tsk)
486
{
487
#ifdef CONFIG_SECCOMP
488
WARN_ON_ONCE(tsk->seccomp.filter);
489
#endif
490
release_user_cpus_ptr(tsk);
491
scs_release(tsk);
492
493
#ifndef CONFIG_THREAD_INFO_IN_TASK
494
/*
495
* The task is finally done with both the stack and thread_info,
496
* so free both.
497
*/
498
release_task_stack(tsk);
499
#else
500
/*
501
* If the task had a separate stack allocation, it should be gone
502
* by now.
503
*/
504
WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
505
#endif
506
rt_mutex_debug_task_free(tsk);
507
ftrace_graph_exit_task(tsk);
508
arch_release_task_struct(tsk);
509
if (tsk->flags & PF_KTHREAD)
510
free_kthread_struct(tsk);
511
bpf_task_storage_free(tsk);
512
free_task_struct(tsk);
513
}
514
EXPORT_SYMBOL(free_task);
515
516
void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
517
{
518
struct file *exe_file;
519
520
exe_file = get_mm_exe_file(oldmm);
521
RCU_INIT_POINTER(mm->exe_file, exe_file);
522
/*
523
* We depend on the oldmm having properly denied write access to the
524
* exe_file already.
525
*/
526
if (exe_file && exe_file_deny_write_access(exe_file))
527
pr_warn_once("exe_file_deny_write_access() failed in %s\n", __func__);
528
}
529
530
#ifdef CONFIG_MMU
531
static inline int mm_alloc_pgd(struct mm_struct *mm)
532
{
533
mm->pgd = pgd_alloc(mm);
534
if (unlikely(!mm->pgd))
535
return -ENOMEM;
536
return 0;
537
}
538
539
static inline void mm_free_pgd(struct mm_struct *mm)
540
{
541
pgd_free(mm, mm->pgd);
542
}
543
#else
544
#define mm_alloc_pgd(mm) (0)
545
#define mm_free_pgd(mm)
546
#endif /* CONFIG_MMU */
547
548
#ifdef CONFIG_MM_ID
549
static DEFINE_IDA(mm_ida);
550
551
static inline int mm_alloc_id(struct mm_struct *mm)
552
{
553
int ret;
554
555
ret = ida_alloc_range(&mm_ida, MM_ID_MIN, MM_ID_MAX, GFP_KERNEL);
556
if (ret < 0)
557
return ret;
558
mm->mm_id = ret;
559
return 0;
560
}
561
562
static inline void mm_free_id(struct mm_struct *mm)
563
{
564
const mm_id_t id = mm->mm_id;
565
566
mm->mm_id = MM_ID_DUMMY;
567
if (id == MM_ID_DUMMY)
568
return;
569
if (WARN_ON_ONCE(id < MM_ID_MIN || id > MM_ID_MAX))
570
return;
571
ida_free(&mm_ida, id);
572
}
573
#else /* !CONFIG_MM_ID */
574
static inline int mm_alloc_id(struct mm_struct *mm) { return 0; }
575
static inline void mm_free_id(struct mm_struct *mm) {}
576
#endif /* CONFIG_MM_ID */
577
578
static void check_mm(struct mm_struct *mm)
579
{
580
int i;
581
582
BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
583
"Please make sure 'struct resident_page_types[]' is updated as well");
584
585
for (i = 0; i < NR_MM_COUNTERS; i++) {
586
long x = percpu_counter_sum(&mm->rss_stat[i]);
587
588
if (unlikely(x)) {
589
pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld Comm:%s Pid:%d\n",
590
mm, resident_page_types[i], x,
591
current->comm,
592
task_pid_nr(current));
593
}
594
}
595
596
if (mm_pgtables_bytes(mm))
597
pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
598
mm_pgtables_bytes(mm));
599
600
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
601
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
602
#endif
603
}
604
605
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
606
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
607
608
static void do_check_lazy_tlb(void *arg)
609
{
610
struct mm_struct *mm = arg;
611
612
WARN_ON_ONCE(current->active_mm == mm);
613
}
614
615
static void do_shoot_lazy_tlb(void *arg)
616
{
617
struct mm_struct *mm = arg;
618
619
if (current->active_mm == mm) {
620
WARN_ON_ONCE(current->mm);
621
current->active_mm = &init_mm;
622
switch_mm(mm, &init_mm, current);
623
}
624
}
625
626
static void cleanup_lazy_tlbs(struct mm_struct *mm)
627
{
628
if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
629
/*
630
* In this case, lazy tlb mms are refounted and would not reach
631
* __mmdrop until all CPUs have switched away and mmdrop()ed.
632
*/
633
return;
634
}
635
636
/*
637
* Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it
638
* requires lazy mm users to switch to another mm when the refcount
639
* drops to zero, before the mm is freed. This requires IPIs here to
640
* switch kernel threads to init_mm.
641
*
642
* archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm
643
* switch with the final userspace teardown TLB flush which leaves the
644
* mm lazy on this CPU but no others, reducing the need for additional
645
* IPIs here. There are cases where a final IPI is still required here,
646
* such as the final mmdrop being performed on a different CPU than the
647
* one exiting, or kernel threads using the mm when userspace exits.
648
*
649
* IPI overheads have not found to be expensive, but they could be
650
* reduced in a number of possible ways, for example (roughly
651
* increasing order of complexity):
652
* - The last lazy reference created by exit_mm() could instead switch
653
* to init_mm, however it's probable this will run on the same CPU
654
* immediately afterwards, so this may not reduce IPIs much.
655
* - A batch of mms requiring IPIs could be gathered and freed at once.
656
* - CPUs store active_mm where it can be remotely checked without a
657
* lock, to filter out false-positives in the cpumask.
658
* - After mm_users or mm_count reaches zero, switching away from the
659
* mm could clear mm_cpumask to reduce some IPIs, perhaps together
660
* with some batching or delaying of the final IPIs.
661
* - A delayed freeing and RCU-like quiescing sequence based on mm
662
* switching to avoid IPIs completely.
663
*/
664
on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1);
665
if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES))
666
on_each_cpu(do_check_lazy_tlb, (void *)mm, 1);
667
}
668
669
/*
670
* Called when the last reference to the mm
671
* is dropped: either by a lazy thread or by
672
* mmput. Free the page directory and the mm.
673
*/
674
void __mmdrop(struct mm_struct *mm)
675
{
676
BUG_ON(mm == &init_mm);
677
WARN_ON_ONCE(mm == current->mm);
678
679
/* Ensure no CPUs are using this as their lazy tlb mm */
680
cleanup_lazy_tlbs(mm);
681
682
WARN_ON_ONCE(mm == current->active_mm);
683
mm_free_pgd(mm);
684
mm_free_id(mm);
685
destroy_context(mm);
686
mmu_notifier_subscriptions_destroy(mm);
687
check_mm(mm);
688
put_user_ns(mm->user_ns);
689
mm_pasid_drop(mm);
690
mm_destroy_cid(mm);
691
percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS);
692
693
free_mm(mm);
694
}
695
EXPORT_SYMBOL_GPL(__mmdrop);
696
697
static void mmdrop_async_fn(struct work_struct *work)
698
{
699
struct mm_struct *mm;
700
701
mm = container_of(work, struct mm_struct, async_put_work);
702
__mmdrop(mm);
703
}
704
705
static void mmdrop_async(struct mm_struct *mm)
706
{
707
if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
708
INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
709
schedule_work(&mm->async_put_work);
710
}
711
}
712
713
static inline void free_signal_struct(struct signal_struct *sig)
714
{
715
taskstats_tgid_free(sig);
716
sched_autogroup_exit(sig);
717
/*
718
* __mmdrop is not safe to call from softirq context on x86 due to
719
* pgd_dtor so postpone it to the async context
720
*/
721
if (sig->oom_mm)
722
mmdrop_async(sig->oom_mm);
723
kmem_cache_free(signal_cachep, sig);
724
}
725
726
static inline void put_signal_struct(struct signal_struct *sig)
727
{
728
if (refcount_dec_and_test(&sig->sigcnt))
729
free_signal_struct(sig);
730
}
731
732
void __put_task_struct(struct task_struct *tsk)
733
{
734
WARN_ON(!tsk->exit_state);
735
WARN_ON(refcount_read(&tsk->usage));
736
WARN_ON(tsk == current);
737
738
unwind_task_free(tsk);
739
sched_ext_free(tsk);
740
io_uring_free(tsk);
741
cgroup_free(tsk);
742
task_numa_free(tsk, true);
743
security_task_free(tsk);
744
exit_creds(tsk);
745
delayacct_tsk_free(tsk);
746
put_signal_struct(tsk->signal);
747
sched_core_free(tsk);
748
free_task(tsk);
749
}
750
EXPORT_SYMBOL_GPL(__put_task_struct);
751
752
void __put_task_struct_rcu_cb(struct rcu_head *rhp)
753
{
754
struct task_struct *task = container_of(rhp, struct task_struct, rcu);
755
756
__put_task_struct(task);
757
}
758
EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
759
760
void __init __weak arch_task_cache_init(void) { }
761
762
/*
763
* set_max_threads
764
*/
765
static void __init set_max_threads(unsigned int max_threads_suggested)
766
{
767
u64 threads;
768
unsigned long nr_pages = memblock_estimated_nr_free_pages();
769
770
/*
771
* The number of threads shall be limited such that the thread
772
* structures may only consume a small part of the available memory.
773
*/
774
if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
775
threads = MAX_THREADS;
776
else
777
threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
778
(u64) THREAD_SIZE * 8UL);
779
780
if (threads > max_threads_suggested)
781
threads = max_threads_suggested;
782
783
max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
784
}
785
786
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
787
/* Initialized by the architecture: */
788
int arch_task_struct_size __read_mostly;
789
#endif
790
791
static void __init task_struct_whitelist(unsigned long *offset, unsigned long *size)
792
{
793
/* Fetch thread_struct whitelist for the architecture. */
794
arch_thread_struct_whitelist(offset, size);
795
796
/*
797
* Handle zero-sized whitelist or empty thread_struct, otherwise
798
* adjust offset to position of thread_struct in task_struct.
799
*/
800
if (unlikely(*size == 0))
801
*offset = 0;
802
else
803
*offset += offsetof(struct task_struct, thread);
804
}
805
806
void __init fork_init(void)
807
{
808
int i;
809
#ifndef ARCH_MIN_TASKALIGN
810
#define ARCH_MIN_TASKALIGN 0
811
#endif
812
int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
813
unsigned long useroffset, usersize;
814
815
/* create a slab on which task_structs can be allocated */
816
task_struct_whitelist(&useroffset, &usersize);
817
task_struct_cachep = kmem_cache_create_usercopy("task_struct",
818
arch_task_struct_size, align,
819
SLAB_PANIC|SLAB_ACCOUNT,
820
useroffset, usersize, NULL);
821
822
/* do the arch specific task caches init */
823
arch_task_cache_init();
824
825
set_max_threads(MAX_THREADS);
826
827
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
828
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
829
init_task.signal->rlim[RLIMIT_SIGPENDING] =
830
init_task.signal->rlim[RLIMIT_NPROC];
831
832
for (i = 0; i < UCOUNT_COUNTS; i++)
833
init_user_ns.ucount_max[i] = max_threads/2;
834
835
set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY);
836
set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY);
837
set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
838
set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY);
839
840
#ifdef CONFIG_VMAP_STACK
841
cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
842
NULL, free_vm_stack_cache);
843
#endif
844
845
scs_init();
846
847
lockdep_init_task(&init_task);
848
uprobes_init();
849
}
850
851
int __weak arch_dup_task_struct(struct task_struct *dst,
852
struct task_struct *src)
853
{
854
*dst = *src;
855
return 0;
856
}
857
858
void set_task_stack_end_magic(struct task_struct *tsk)
859
{
860
unsigned long *stackend;
861
862
stackend = end_of_stack(tsk);
863
*stackend = STACK_END_MAGIC; /* for overflow detection */
864
}
865
866
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
867
{
868
struct task_struct *tsk;
869
int err;
870
871
if (node == NUMA_NO_NODE)
872
node = tsk_fork_get_node(orig);
873
tsk = alloc_task_struct_node(node);
874
if (!tsk)
875
return NULL;
876
877
err = arch_dup_task_struct(tsk, orig);
878
if (err)
879
goto free_tsk;
880
881
err = alloc_thread_stack_node(tsk, node);
882
if (err)
883
goto free_tsk;
884
885
#ifdef CONFIG_THREAD_INFO_IN_TASK
886
refcount_set(&tsk->stack_refcount, 1);
887
#endif
888
account_kernel_stack(tsk, 1);
889
890
err = scs_prepare(tsk, node);
891
if (err)
892
goto free_stack;
893
894
#ifdef CONFIG_SECCOMP
895
/*
896
* We must handle setting up seccomp filters once we're under
897
* the sighand lock in case orig has changed between now and
898
* then. Until then, filter must be NULL to avoid messing up
899
* the usage counts on the error path calling free_task.
900
*/
901
tsk->seccomp.filter = NULL;
902
#endif
903
904
setup_thread_stack(tsk, orig);
905
clear_user_return_notifier(tsk);
906
clear_tsk_need_resched(tsk);
907
set_task_stack_end_magic(tsk);
908
clear_syscall_work_syscall_user_dispatch(tsk);
909
910
#ifdef CONFIG_STACKPROTECTOR
911
tsk->stack_canary = get_random_canary();
912
#endif
913
if (orig->cpus_ptr == &orig->cpus_mask)
914
tsk->cpus_ptr = &tsk->cpus_mask;
915
dup_user_cpus_ptr(tsk, orig, node);
916
917
/*
918
* One for the user space visible state that goes away when reaped.
919
* One for the scheduler.
920
*/
921
refcount_set(&tsk->rcu_users, 2);
922
/* One for the rcu users */
923
refcount_set(&tsk->usage, 1);
924
#ifdef CONFIG_BLK_DEV_IO_TRACE
925
tsk->btrace_seq = 0;
926
#endif
927
tsk->splice_pipe = NULL;
928
tsk->task_frag.page = NULL;
929
tsk->wake_q.next = NULL;
930
tsk->worker_private = NULL;
931
932
kcov_task_init(tsk);
933
kmsan_task_create(tsk);
934
kmap_local_fork(tsk);
935
936
#ifdef CONFIG_FAULT_INJECTION
937
tsk->fail_nth = 0;
938
#endif
939
940
#ifdef CONFIG_BLK_CGROUP
941
tsk->throttle_disk = NULL;
942
tsk->use_memdelay = 0;
943
#endif
944
945
#ifdef CONFIG_ARCH_HAS_CPU_PASID
946
tsk->pasid_activated = 0;
947
#endif
948
949
#ifdef CONFIG_MEMCG
950
tsk->active_memcg = NULL;
951
#endif
952
953
#ifdef CONFIG_X86_BUS_LOCK_DETECT
954
tsk->reported_split_lock = 0;
955
#endif
956
957
#ifdef CONFIG_SCHED_MM_CID
958
tsk->mm_cid = -1;
959
tsk->last_mm_cid = -1;
960
tsk->mm_cid_active = 0;
961
tsk->migrate_from_cpu = -1;
962
#endif
963
return tsk;
964
965
free_stack:
966
exit_task_stack_account(tsk);
967
free_thread_stack(tsk);
968
free_tsk:
969
free_task_struct(tsk);
970
return NULL;
971
}
972
973
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
974
975
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
976
977
static int __init coredump_filter_setup(char *s)
978
{
979
default_dump_filter =
980
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
981
MMF_DUMP_FILTER_MASK;
982
return 1;
983
}
984
985
__setup("coredump_filter=", coredump_filter_setup);
986
987
#include <linux/init_task.h>
988
989
static void mm_init_aio(struct mm_struct *mm)
990
{
991
#ifdef CONFIG_AIO
992
spin_lock_init(&mm->ioctx_lock);
993
mm->ioctx_table = NULL;
994
#endif
995
}
996
997
static __always_inline void mm_clear_owner(struct mm_struct *mm,
998
struct task_struct *p)
999
{
1000
#ifdef CONFIG_MEMCG
1001
if (mm->owner == p)
1002
WRITE_ONCE(mm->owner, NULL);
1003
#endif
1004
}
1005
1006
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1007
{
1008
#ifdef CONFIG_MEMCG
1009
mm->owner = p;
1010
#endif
1011
}
1012
1013
static void mm_init_uprobes_state(struct mm_struct *mm)
1014
{
1015
#ifdef CONFIG_UPROBES
1016
mm->uprobes_state.xol_area = NULL;
1017
arch_uprobe_init_state(mm);
1018
#endif
1019
}
1020
1021
static void mmap_init_lock(struct mm_struct *mm)
1022
{
1023
init_rwsem(&mm->mmap_lock);
1024
mm_lock_seqcount_init(mm);
1025
#ifdef CONFIG_PER_VMA_LOCK
1026
rcuwait_init(&mm->vma_writer_wait);
1027
#endif
1028
}
1029
1030
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1031
struct user_namespace *user_ns)
1032
{
1033
mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
1034
mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
1035
atomic_set(&mm->mm_users, 1);
1036
atomic_set(&mm->mm_count, 1);
1037
seqcount_init(&mm->write_protect_seq);
1038
mmap_init_lock(mm);
1039
INIT_LIST_HEAD(&mm->mmlist);
1040
mm_pgtables_bytes_init(mm);
1041
mm->map_count = 0;
1042
mm->locked_vm = 0;
1043
atomic64_set(&mm->pinned_vm, 0);
1044
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
1045
spin_lock_init(&mm->page_table_lock);
1046
spin_lock_init(&mm->arg_lock);
1047
mm_init_cpumask(mm);
1048
mm_init_aio(mm);
1049
mm_init_owner(mm, p);
1050
mm_pasid_init(mm);
1051
RCU_INIT_POINTER(mm->exe_file, NULL);
1052
mmu_notifier_subscriptions_init(mm);
1053
init_tlb_flush_pending(mm);
1054
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
1055
mm->pmd_huge_pte = NULL;
1056
#endif
1057
mm_init_uprobes_state(mm);
1058
hugetlb_count_init(mm);
1059
1060
mm_flags_clear_all(mm);
1061
if (current->mm) {
1062
unsigned long flags = __mm_flags_get_word(current->mm);
1063
1064
__mm_flags_set_word(mm, mmf_init_legacy_flags(flags));
1065
mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
1066
} else {
1067
__mm_flags_set_word(mm, default_dump_filter);
1068
mm->def_flags = 0;
1069
}
1070
1071
if (futex_mm_init(mm))
1072
goto fail_mm_init;
1073
1074
if (mm_alloc_pgd(mm))
1075
goto fail_nopgd;
1076
1077
if (mm_alloc_id(mm))
1078
goto fail_noid;
1079
1080
if (init_new_context(p, mm))
1081
goto fail_nocontext;
1082
1083
if (mm_alloc_cid(mm, p))
1084
goto fail_cid;
1085
1086
if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT,
1087
NR_MM_COUNTERS))
1088
goto fail_pcpu;
1089
1090
mm->user_ns = get_user_ns(user_ns);
1091
lru_gen_init_mm(mm);
1092
return mm;
1093
1094
fail_pcpu:
1095
mm_destroy_cid(mm);
1096
fail_cid:
1097
destroy_context(mm);
1098
fail_nocontext:
1099
mm_free_id(mm);
1100
fail_noid:
1101
mm_free_pgd(mm);
1102
fail_nopgd:
1103
futex_hash_free(mm);
1104
fail_mm_init:
1105
free_mm(mm);
1106
return NULL;
1107
}
1108
1109
/*
1110
* Allocate and initialize an mm_struct.
1111
*/
1112
struct mm_struct *mm_alloc(void)
1113
{
1114
struct mm_struct *mm;
1115
1116
mm = allocate_mm();
1117
if (!mm)
1118
return NULL;
1119
1120
memset(mm, 0, sizeof(*mm));
1121
return mm_init(mm, current, current_user_ns());
1122
}
1123
EXPORT_SYMBOL_IF_KUNIT(mm_alloc);
1124
1125
static inline void __mmput(struct mm_struct *mm)
1126
{
1127
VM_BUG_ON(atomic_read(&mm->mm_users));
1128
1129
uprobe_clear_state(mm);
1130
exit_aio(mm);
1131
ksm_exit(mm);
1132
khugepaged_exit(mm); /* must run before exit_mmap */
1133
exit_mmap(mm);
1134
mm_put_huge_zero_folio(mm);
1135
set_mm_exe_file(mm, NULL);
1136
if (!list_empty(&mm->mmlist)) {
1137
spin_lock(&mmlist_lock);
1138
list_del(&mm->mmlist);
1139
spin_unlock(&mmlist_lock);
1140
}
1141
if (mm->binfmt)
1142
module_put(mm->binfmt->module);
1143
lru_gen_del_mm(mm);
1144
futex_hash_free(mm);
1145
mmdrop(mm);
1146
}
1147
1148
/*
1149
* Decrement the use count and release all resources for an mm.
1150
*/
1151
void mmput(struct mm_struct *mm)
1152
{
1153
might_sleep();
1154
1155
if (atomic_dec_and_test(&mm->mm_users))
1156
__mmput(mm);
1157
}
1158
EXPORT_SYMBOL_GPL(mmput);
1159
1160
#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
1161
static void mmput_async_fn(struct work_struct *work)
1162
{
1163
struct mm_struct *mm = container_of(work, struct mm_struct,
1164
async_put_work);
1165
1166
__mmput(mm);
1167
}
1168
1169
void mmput_async(struct mm_struct *mm)
1170
{
1171
if (atomic_dec_and_test(&mm->mm_users)) {
1172
INIT_WORK(&mm->async_put_work, mmput_async_fn);
1173
schedule_work(&mm->async_put_work);
1174
}
1175
}
1176
EXPORT_SYMBOL_GPL(mmput_async);
1177
#endif
1178
1179
/**
1180
* set_mm_exe_file - change a reference to the mm's executable file
1181
* @mm: The mm to change.
1182
* @new_exe_file: The new file to use.
1183
*
1184
* This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1185
*
1186
* Main users are mmput() and sys_execve(). Callers prevent concurrent
1187
* invocations: in mmput() nobody alive left, in execve it happens before
1188
* the new mm is made visible to anyone.
1189
*
1190
* Can only fail if new_exe_file != NULL.
1191
*/
1192
int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1193
{
1194
struct file *old_exe_file;
1195
1196
/*
1197
* It is safe to dereference the exe_file without RCU as
1198
* this function is only called if nobody else can access
1199
* this mm -- see comment above for justification.
1200
*/
1201
old_exe_file = rcu_dereference_raw(mm->exe_file);
1202
1203
if (new_exe_file) {
1204
/*
1205
* We expect the caller (i.e., sys_execve) to already denied
1206
* write access, so this is unlikely to fail.
1207
*/
1208
if (unlikely(exe_file_deny_write_access(new_exe_file)))
1209
return -EACCES;
1210
get_file(new_exe_file);
1211
}
1212
rcu_assign_pointer(mm->exe_file, new_exe_file);
1213
if (old_exe_file) {
1214
exe_file_allow_write_access(old_exe_file);
1215
fput(old_exe_file);
1216
}
1217
return 0;
1218
}
1219
1220
/**
1221
* replace_mm_exe_file - replace a reference to the mm's executable file
1222
* @mm: The mm to change.
1223
* @new_exe_file: The new file to use.
1224
*
1225
* This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1226
*
1227
* Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE).
1228
*/
1229
int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1230
{
1231
struct vm_area_struct *vma;
1232
struct file *old_exe_file;
1233
int ret = 0;
1234
1235
/* Forbid mm->exe_file change if old file still mapped. */
1236
old_exe_file = get_mm_exe_file(mm);
1237
if (old_exe_file) {
1238
VMA_ITERATOR(vmi, mm, 0);
1239
mmap_read_lock(mm);
1240
for_each_vma(vmi, vma) {
1241
if (!vma->vm_file)
1242
continue;
1243
if (path_equal(&vma->vm_file->f_path,
1244
&old_exe_file->f_path)) {
1245
ret = -EBUSY;
1246
break;
1247
}
1248
}
1249
mmap_read_unlock(mm);
1250
fput(old_exe_file);
1251
if (ret)
1252
return ret;
1253
}
1254
1255
ret = exe_file_deny_write_access(new_exe_file);
1256
if (ret)
1257
return -EACCES;
1258
get_file(new_exe_file);
1259
1260
/* set the new file */
1261
mmap_write_lock(mm);
1262
old_exe_file = rcu_dereference_raw(mm->exe_file);
1263
rcu_assign_pointer(mm->exe_file, new_exe_file);
1264
mmap_write_unlock(mm);
1265
1266
if (old_exe_file) {
1267
exe_file_allow_write_access(old_exe_file);
1268
fput(old_exe_file);
1269
}
1270
return 0;
1271
}
1272
1273
/**
1274
* get_mm_exe_file - acquire a reference to the mm's executable file
1275
* @mm: The mm of interest.
1276
*
1277
* Returns %NULL if mm has no associated executable file.
1278
* User must release file via fput().
1279
*/
1280
struct file *get_mm_exe_file(struct mm_struct *mm)
1281
{
1282
struct file *exe_file;
1283
1284
rcu_read_lock();
1285
exe_file = get_file_rcu(&mm->exe_file);
1286
rcu_read_unlock();
1287
return exe_file;
1288
}
1289
1290
/**
1291
* get_task_exe_file - acquire a reference to the task's executable file
1292
* @task: The task.
1293
*
1294
* Returns %NULL if task's mm (if any) has no associated executable file or
1295
* this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1296
* User must release file via fput().
1297
*/
1298
struct file *get_task_exe_file(struct task_struct *task)
1299
{
1300
struct file *exe_file = NULL;
1301
struct mm_struct *mm;
1302
1303
if (task->flags & PF_KTHREAD)
1304
return NULL;
1305
1306
task_lock(task);
1307
mm = task->mm;
1308
if (mm)
1309
exe_file = get_mm_exe_file(mm);
1310
task_unlock(task);
1311
return exe_file;
1312
}
1313
1314
/**
1315
* get_task_mm - acquire a reference to the task's mm
1316
* @task: The task.
1317
*
1318
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1319
* this kernel workthread has transiently adopted a user mm with use_mm,
1320
* to do its AIO) is not set and if so returns a reference to it, after
1321
* bumping up the use count. User must release the mm via mmput()
1322
* after use. Typically used by /proc and ptrace.
1323
*/
1324
struct mm_struct *get_task_mm(struct task_struct *task)
1325
{
1326
struct mm_struct *mm;
1327
1328
if (task->flags & PF_KTHREAD)
1329
return NULL;
1330
1331
task_lock(task);
1332
mm = task->mm;
1333
if (mm)
1334
mmget(mm);
1335
task_unlock(task);
1336
return mm;
1337
}
1338
EXPORT_SYMBOL_GPL(get_task_mm);
1339
1340
static bool may_access_mm(struct mm_struct *mm, struct task_struct *task, unsigned int mode)
1341
{
1342
if (mm == current->mm)
1343
return true;
1344
if (ptrace_may_access(task, mode))
1345
return true;
1346
if ((mode & PTRACE_MODE_READ) && perfmon_capable())
1347
return true;
1348
return false;
1349
}
1350
1351
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1352
{
1353
struct mm_struct *mm;
1354
int err;
1355
1356
err = down_read_killable(&task->signal->exec_update_lock);
1357
if (err)
1358
return ERR_PTR(err);
1359
1360
mm = get_task_mm(task);
1361
if (!mm) {
1362
mm = ERR_PTR(-ESRCH);
1363
} else if (!may_access_mm(mm, task, mode)) {
1364
mmput(mm);
1365
mm = ERR_PTR(-EACCES);
1366
}
1367
up_read(&task->signal->exec_update_lock);
1368
1369
return mm;
1370
}
1371
1372
static void complete_vfork_done(struct task_struct *tsk)
1373
{
1374
struct completion *vfork;
1375
1376
task_lock(tsk);
1377
vfork = tsk->vfork_done;
1378
if (likely(vfork)) {
1379
tsk->vfork_done = NULL;
1380
complete(vfork);
1381
}
1382
task_unlock(tsk);
1383
}
1384
1385
static int wait_for_vfork_done(struct task_struct *child,
1386
struct completion *vfork)
1387
{
1388
unsigned int state = TASK_KILLABLE|TASK_FREEZABLE;
1389
int killed;
1390
1391
cgroup_enter_frozen();
1392
killed = wait_for_completion_state(vfork, state);
1393
cgroup_leave_frozen(false);
1394
1395
if (killed) {
1396
task_lock(child);
1397
child->vfork_done = NULL;
1398
task_unlock(child);
1399
}
1400
1401
put_task_struct(child);
1402
return killed;
1403
}
1404
1405
/* Please note the differences between mmput and mm_release.
1406
* mmput is called whenever we stop holding onto a mm_struct,
1407
* error success whatever.
1408
*
1409
* mm_release is called after a mm_struct has been removed
1410
* from the current process.
1411
*
1412
* This difference is important for error handling, when we
1413
* only half set up a mm_struct for a new process and need to restore
1414
* the old one. Because we mmput the new mm_struct before
1415
* restoring the old one. . .
1416
* Eric Biederman 10 January 1998
1417
*/
1418
static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1419
{
1420
uprobe_free_utask(tsk);
1421
1422
/* Get rid of any cached register state */
1423
deactivate_mm(tsk, mm);
1424
1425
/*
1426
* Signal userspace if we're not exiting with a core dump
1427
* because we want to leave the value intact for debugging
1428
* purposes.
1429
*/
1430
if (tsk->clear_child_tid) {
1431
if (atomic_read(&mm->mm_users) > 1) {
1432
/*
1433
* We don't check the error code - if userspace has
1434
* not set up a proper pointer then tough luck.
1435
*/
1436
put_user(0, tsk->clear_child_tid);
1437
do_futex(tsk->clear_child_tid, FUTEX_WAKE,
1438
1, NULL, NULL, 0, 0);
1439
}
1440
tsk->clear_child_tid = NULL;
1441
}
1442
1443
/*
1444
* All done, finally we can wake up parent and return this mm to him.
1445
* Also kthread_stop() uses this completion for synchronization.
1446
*/
1447
if (tsk->vfork_done)
1448
complete_vfork_done(tsk);
1449
}
1450
1451
void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1452
{
1453
futex_exit_release(tsk);
1454
mm_release(tsk, mm);
1455
}
1456
1457
void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1458
{
1459
futex_exec_release(tsk);
1460
mm_release(tsk, mm);
1461
}
1462
1463
/**
1464
* dup_mm() - duplicates an existing mm structure
1465
* @tsk: the task_struct with which the new mm will be associated.
1466
* @oldmm: the mm to duplicate.
1467
*
1468
* Allocates a new mm structure and duplicates the provided @oldmm structure
1469
* content into it.
1470
*
1471
* Return: the duplicated mm or NULL on failure.
1472
*/
1473
static struct mm_struct *dup_mm(struct task_struct *tsk,
1474
struct mm_struct *oldmm)
1475
{
1476
struct mm_struct *mm;
1477
int err;
1478
1479
mm = allocate_mm();
1480
if (!mm)
1481
goto fail_nomem;
1482
1483
memcpy(mm, oldmm, sizeof(*mm));
1484
1485
if (!mm_init(mm, tsk, mm->user_ns))
1486
goto fail_nomem;
1487
1488
uprobe_start_dup_mmap();
1489
err = dup_mmap(mm, oldmm);
1490
if (err)
1491
goto free_pt;
1492
uprobe_end_dup_mmap();
1493
1494
mm->hiwater_rss = get_mm_rss(mm);
1495
mm->hiwater_vm = mm->total_vm;
1496
1497
if (mm->binfmt && !try_module_get(mm->binfmt->module))
1498
goto free_pt;
1499
1500
return mm;
1501
1502
free_pt:
1503
/* don't put binfmt in mmput, we haven't got module yet */
1504
mm->binfmt = NULL;
1505
mm_init_owner(mm, NULL);
1506
mmput(mm);
1507
if (err)
1508
uprobe_end_dup_mmap();
1509
1510
fail_nomem:
1511
return NULL;
1512
}
1513
1514
static int copy_mm(u64 clone_flags, struct task_struct *tsk)
1515
{
1516
struct mm_struct *mm, *oldmm;
1517
1518
tsk->min_flt = tsk->maj_flt = 0;
1519
tsk->nvcsw = tsk->nivcsw = 0;
1520
#ifdef CONFIG_DETECT_HUNG_TASK
1521
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1522
tsk->last_switch_time = 0;
1523
#endif
1524
1525
tsk->mm = NULL;
1526
tsk->active_mm = NULL;
1527
1528
/*
1529
* Are we cloning a kernel thread?
1530
*
1531
* We need to steal a active VM for that..
1532
*/
1533
oldmm = current->mm;
1534
if (!oldmm)
1535
return 0;
1536
1537
if (clone_flags & CLONE_VM) {
1538
mmget(oldmm);
1539
mm = oldmm;
1540
} else {
1541
mm = dup_mm(tsk, current->mm);
1542
if (!mm)
1543
return -ENOMEM;
1544
}
1545
1546
tsk->mm = mm;
1547
tsk->active_mm = mm;
1548
sched_mm_cid_fork(tsk);
1549
return 0;
1550
}
1551
1552
static int copy_fs(u64 clone_flags, struct task_struct *tsk)
1553
{
1554
struct fs_struct *fs = current->fs;
1555
if (clone_flags & CLONE_FS) {
1556
/* tsk->fs is already what we want */
1557
read_seqlock_excl(&fs->seq);
1558
/* "users" and "in_exec" locked for check_unsafe_exec() */
1559
if (fs->in_exec) {
1560
read_sequnlock_excl(&fs->seq);
1561
return -EAGAIN;
1562
}
1563
fs->users++;
1564
read_sequnlock_excl(&fs->seq);
1565
return 0;
1566
}
1567
tsk->fs = copy_fs_struct(fs);
1568
if (!tsk->fs)
1569
return -ENOMEM;
1570
return 0;
1571
}
1572
1573
static int copy_files(u64 clone_flags, struct task_struct *tsk,
1574
int no_files)
1575
{
1576
struct files_struct *oldf, *newf;
1577
1578
/*
1579
* A background process may not have any files ...
1580
*/
1581
oldf = current->files;
1582
if (!oldf)
1583
return 0;
1584
1585
if (no_files) {
1586
tsk->files = NULL;
1587
return 0;
1588
}
1589
1590
if (clone_flags & CLONE_FILES) {
1591
atomic_inc(&oldf->count);
1592
return 0;
1593
}
1594
1595
newf = dup_fd(oldf, NULL);
1596
if (IS_ERR(newf))
1597
return PTR_ERR(newf);
1598
1599
tsk->files = newf;
1600
return 0;
1601
}
1602
1603
static int copy_sighand(u64 clone_flags, struct task_struct *tsk)
1604
{
1605
struct sighand_struct *sig;
1606
1607
if (clone_flags & CLONE_SIGHAND) {
1608
refcount_inc(&current->sighand->count);
1609
return 0;
1610
}
1611
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1612
RCU_INIT_POINTER(tsk->sighand, sig);
1613
if (!sig)
1614
return -ENOMEM;
1615
1616
refcount_set(&sig->count, 1);
1617
spin_lock_irq(&current->sighand->siglock);
1618
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1619
spin_unlock_irq(&current->sighand->siglock);
1620
1621
/* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1622
if (clone_flags & CLONE_CLEAR_SIGHAND)
1623
flush_signal_handlers(tsk, 0);
1624
1625
return 0;
1626
}
1627
1628
void __cleanup_sighand(struct sighand_struct *sighand)
1629
{
1630
if (refcount_dec_and_test(&sighand->count)) {
1631
signalfd_cleanup(sighand);
1632
/*
1633
* sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1634
* without an RCU grace period, see __lock_task_sighand().
1635
*/
1636
kmem_cache_free(sighand_cachep, sighand);
1637
}
1638
}
1639
1640
/*
1641
* Initialize POSIX timer handling for a thread group.
1642
*/
1643
static void posix_cpu_timers_init_group(struct signal_struct *sig)
1644
{
1645
struct posix_cputimers *pct = &sig->posix_cputimers;
1646
unsigned long cpu_limit;
1647
1648
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1649
posix_cputimers_group_init(pct, cpu_limit);
1650
}
1651
1652
static int copy_signal(u64 clone_flags, struct task_struct *tsk)
1653
{
1654
struct signal_struct *sig;
1655
1656
if (clone_flags & CLONE_THREAD)
1657
return 0;
1658
1659
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1660
tsk->signal = sig;
1661
if (!sig)
1662
return -ENOMEM;
1663
1664
sig->nr_threads = 1;
1665
sig->quick_threads = 1;
1666
atomic_set(&sig->live, 1);
1667
refcount_set(&sig->sigcnt, 1);
1668
1669
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1670
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1671
tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1672
1673
init_waitqueue_head(&sig->wait_chldexit);
1674
sig->curr_target = tsk;
1675
init_sigpending(&sig->shared_pending);
1676
INIT_HLIST_HEAD(&sig->multiprocess);
1677
seqlock_init(&sig->stats_lock);
1678
prev_cputime_init(&sig->prev_cputime);
1679
1680
#ifdef CONFIG_POSIX_TIMERS
1681
INIT_HLIST_HEAD(&sig->posix_timers);
1682
INIT_HLIST_HEAD(&sig->ignored_posix_timers);
1683
hrtimer_setup(&sig->real_timer, it_real_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1684
#endif
1685
1686
task_lock(current->group_leader);
1687
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1688
task_unlock(current->group_leader);
1689
1690
posix_cpu_timers_init_group(sig);
1691
1692
tty_audit_fork(sig);
1693
sched_autogroup_fork(sig);
1694
1695
#ifdef CONFIG_CGROUPS
1696
init_rwsem(&sig->cgroup_threadgroup_rwsem);
1697
#endif
1698
1699
sig->oom_score_adj = current->signal->oom_score_adj;
1700
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1701
1702
mutex_init(&sig->cred_guard_mutex);
1703
init_rwsem(&sig->exec_update_lock);
1704
1705
return 0;
1706
}
1707
1708
static void copy_seccomp(struct task_struct *p)
1709
{
1710
#ifdef CONFIG_SECCOMP
1711
/*
1712
* Must be called with sighand->lock held, which is common to
1713
* all threads in the group. Holding cred_guard_mutex is not
1714
* needed because this new task is not yet running and cannot
1715
* be racing exec.
1716
*/
1717
assert_spin_locked(&current->sighand->siglock);
1718
1719
/* Ref-count the new filter user, and assign it. */
1720
get_seccomp_filter(current);
1721
p->seccomp = current->seccomp;
1722
1723
/*
1724
* Explicitly enable no_new_privs here in case it got set
1725
* between the task_struct being duplicated and holding the
1726
* sighand lock. The seccomp state and nnp must be in sync.
1727
*/
1728
if (task_no_new_privs(current))
1729
task_set_no_new_privs(p);
1730
1731
/*
1732
* If the parent gained a seccomp mode after copying thread
1733
* flags and between before we held the sighand lock, we have
1734
* to manually enable the seccomp thread flag here.
1735
*/
1736
if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1737
set_task_syscall_work(p, SECCOMP);
1738
#endif
1739
}
1740
1741
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1742
{
1743
current->clear_child_tid = tidptr;
1744
1745
return task_pid_vnr(current);
1746
}
1747
1748
static void rt_mutex_init_task(struct task_struct *p)
1749
{
1750
raw_spin_lock_init(&p->pi_lock);
1751
#ifdef CONFIG_RT_MUTEXES
1752
p->pi_waiters = RB_ROOT_CACHED;
1753
p->pi_top_task = NULL;
1754
p->pi_blocked_on = NULL;
1755
#endif
1756
}
1757
1758
static inline void init_task_pid_links(struct task_struct *task)
1759
{
1760
enum pid_type type;
1761
1762
for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
1763
INIT_HLIST_NODE(&task->pid_links[type]);
1764
}
1765
1766
static inline void
1767
init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1768
{
1769
if (type == PIDTYPE_PID)
1770
task->thread_pid = pid;
1771
else
1772
task->signal->pids[type] = pid;
1773
}
1774
1775
static inline void rcu_copy_process(struct task_struct *p)
1776
{
1777
#ifdef CONFIG_PREEMPT_RCU
1778
p->rcu_read_lock_nesting = 0;
1779
p->rcu_read_unlock_special.s = 0;
1780
p->rcu_blocked_node = NULL;
1781
INIT_LIST_HEAD(&p->rcu_node_entry);
1782
#endif /* #ifdef CONFIG_PREEMPT_RCU */
1783
#ifdef CONFIG_TASKS_RCU
1784
p->rcu_tasks_holdout = false;
1785
INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1786
p->rcu_tasks_idle_cpu = -1;
1787
INIT_LIST_HEAD(&p->rcu_tasks_exit_list);
1788
#endif /* #ifdef CONFIG_TASKS_RCU */
1789
#ifdef CONFIG_TASKS_TRACE_RCU
1790
p->trc_reader_nesting = 0;
1791
p->trc_reader_special.s = 0;
1792
INIT_LIST_HEAD(&p->trc_holdout_list);
1793
INIT_LIST_HEAD(&p->trc_blkd_node);
1794
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
1795
}
1796
1797
/**
1798
* pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
1799
* @pid: the struct pid for which to create a pidfd
1800
* @flags: flags of the new @pidfd
1801
* @ret_file: return the new pidfs file
1802
*
1803
* Allocate a new file that stashes @pid and reserve a new pidfd number in the
1804
* caller's file descriptor table. The pidfd is reserved but not installed yet.
1805
*
1806
* The helper verifies that @pid is still in use, without PIDFD_THREAD the
1807
* task identified by @pid must be a thread-group leader.
1808
*
1809
* If this function returns successfully the caller is responsible to either
1810
* call fd_install() passing the returned pidfd and pidfd file as arguments in
1811
* order to install the pidfd into its file descriptor table or they must use
1812
* put_unused_fd() and fput() on the returned pidfd and pidfd file
1813
* respectively.
1814
*
1815
* This function is useful when a pidfd must already be reserved but there
1816
* might still be points of failure afterwards and the caller wants to ensure
1817
* that no pidfd is leaked into its file descriptor table.
1818
*
1819
* Return: On success, a reserved pidfd is returned from the function and a new
1820
* pidfd file is returned in the last argument to the function. On
1821
* error, a negative error code is returned from the function and the
1822
* last argument remains unchanged.
1823
*/
1824
int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file)
1825
{
1826
struct file *pidfs_file;
1827
1828
/*
1829
* PIDFD_STALE is only allowed to be passed if the caller knows
1830
* that @pid is already registered in pidfs and thus
1831
* PIDFD_INFO_EXIT information is guaranteed to be available.
1832
*/
1833
if (!(flags & PIDFD_STALE)) {
1834
/*
1835
* While holding the pidfd waitqueue lock removing the
1836
* task linkage for the thread-group leader pid
1837
* (PIDTYPE_TGID) isn't possible. Thus, if there's still
1838
* task linkage for PIDTYPE_PID not having thread-group
1839
* leader linkage for the pid means it wasn't a
1840
* thread-group leader in the first place.
1841
*/
1842
guard(spinlock_irq)(&pid->wait_pidfd.lock);
1843
1844
/* Task has already been reaped. */
1845
if (!pid_has_task(pid, PIDTYPE_PID))
1846
return -ESRCH;
1847
/*
1848
* If this struct pid isn't used as a thread-group
1849
* leader but the caller requested to create a
1850
* thread-group leader pidfd then report ENOENT.
1851
*/
1852
if (!(flags & PIDFD_THREAD) && !pid_has_task(pid, PIDTYPE_TGID))
1853
return -ENOENT;
1854
}
1855
1856
CLASS(get_unused_fd, pidfd)(O_CLOEXEC);
1857
if (pidfd < 0)
1858
return pidfd;
1859
1860
pidfs_file = pidfs_alloc_file(pid, flags | O_RDWR);
1861
if (IS_ERR(pidfs_file))
1862
return PTR_ERR(pidfs_file);
1863
1864
*ret_file = pidfs_file;
1865
return take_fd(pidfd);
1866
}
1867
1868
static void __delayed_free_task(struct rcu_head *rhp)
1869
{
1870
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
1871
1872
free_task(tsk);
1873
}
1874
1875
static __always_inline void delayed_free_task(struct task_struct *tsk)
1876
{
1877
if (IS_ENABLED(CONFIG_MEMCG))
1878
call_rcu(&tsk->rcu, __delayed_free_task);
1879
else
1880
free_task(tsk);
1881
}
1882
1883
static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
1884
{
1885
/* Skip if kernel thread */
1886
if (!tsk->mm)
1887
return;
1888
1889
/* Skip if spawning a thread or using vfork */
1890
if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
1891
return;
1892
1893
/* We need to synchronize with __set_oom_adj */
1894
mutex_lock(&oom_adj_mutex);
1895
mm_flags_set(MMF_MULTIPROCESS, tsk->mm);
1896
/* Update the values in case they were changed after copy_signal */
1897
tsk->signal->oom_score_adj = current->signal->oom_score_adj;
1898
tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
1899
mutex_unlock(&oom_adj_mutex);
1900
}
1901
1902
#ifdef CONFIG_RV
1903
static void rv_task_fork(struct task_struct *p)
1904
{
1905
memset(&p->rv, 0, sizeof(p->rv));
1906
}
1907
#else
1908
#define rv_task_fork(p) do {} while (0)
1909
#endif
1910
1911
static bool need_futex_hash_allocate_default(u64 clone_flags)
1912
{
1913
if ((clone_flags & (CLONE_THREAD | CLONE_VM)) != (CLONE_THREAD | CLONE_VM))
1914
return false;
1915
return true;
1916
}
1917
1918
/*
1919
* This creates a new process as a copy of the old one,
1920
* but does not actually start it yet.
1921
*
1922
* It copies the registers, and all the appropriate
1923
* parts of the process environment (as per the clone
1924
* flags). The actual kick-off is left to the caller.
1925
*/
1926
__latent_entropy struct task_struct *copy_process(
1927
struct pid *pid,
1928
int trace,
1929
int node,
1930
struct kernel_clone_args *args)
1931
{
1932
int pidfd = -1, retval;
1933
struct task_struct *p;
1934
struct multiprocess_signals delayed;
1935
struct file *pidfile = NULL;
1936
const u64 clone_flags = args->flags;
1937
struct nsproxy *nsp = current->nsproxy;
1938
1939
/*
1940
* Don't allow sharing the root directory with processes in a different
1941
* namespace
1942
*/
1943
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1944
return ERR_PTR(-EINVAL);
1945
1946
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1947
return ERR_PTR(-EINVAL);
1948
1949
/*
1950
* Thread groups must share signals as well, and detached threads
1951
* can only be started up within the thread group.
1952
*/
1953
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1954
return ERR_PTR(-EINVAL);
1955
1956
/*
1957
* Shared signal handlers imply shared VM. By way of the above,
1958
* thread groups also imply shared VM. Blocking this case allows
1959
* for various simplifications in other code.
1960
*/
1961
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1962
return ERR_PTR(-EINVAL);
1963
1964
/*
1965
* Siblings of global init remain as zombies on exit since they are
1966
* not reaped by their parent (swapper). To solve this and to avoid
1967
* multi-rooted process trees, prevent global and container-inits
1968
* from creating siblings.
1969
*/
1970
if ((clone_flags & CLONE_PARENT) &&
1971
current->signal->flags & SIGNAL_UNKILLABLE)
1972
return ERR_PTR(-EINVAL);
1973
1974
/*
1975
* If the new process will be in a different pid or user namespace
1976
* do not allow it to share a thread group with the forking task.
1977
*/
1978
if (clone_flags & CLONE_THREAD) {
1979
if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1980
(task_active_pid_ns(current) != nsp->pid_ns_for_children))
1981
return ERR_PTR(-EINVAL);
1982
}
1983
1984
if (clone_flags & CLONE_PIDFD) {
1985
/*
1986
* - CLONE_DETACHED is blocked so that we can potentially
1987
* reuse it later for CLONE_PIDFD.
1988
*/
1989
if (clone_flags & CLONE_DETACHED)
1990
return ERR_PTR(-EINVAL);
1991
}
1992
1993
/*
1994
* Force any signals received before this point to be delivered
1995
* before the fork happens. Collect up signals sent to multiple
1996
* processes that happen during the fork and delay them so that
1997
* they appear to happen after the fork.
1998
*/
1999
sigemptyset(&delayed.signal);
2000
INIT_HLIST_NODE(&delayed.node);
2001
2002
spin_lock_irq(&current->sighand->siglock);
2003
if (!(clone_flags & CLONE_THREAD))
2004
hlist_add_head(&delayed.node, &current->signal->multiprocess);
2005
recalc_sigpending();
2006
spin_unlock_irq(&current->sighand->siglock);
2007
retval = -ERESTARTNOINTR;
2008
if (task_sigpending(current))
2009
goto fork_out;
2010
2011
retval = -ENOMEM;
2012
p = dup_task_struct(current, node);
2013
if (!p)
2014
goto fork_out;
2015
p->flags &= ~PF_KTHREAD;
2016
if (args->kthread)
2017
p->flags |= PF_KTHREAD;
2018
if (args->user_worker) {
2019
/*
2020
* Mark us a user worker, and block any signal that isn't
2021
* fatal or STOP
2022
*/
2023
p->flags |= PF_USER_WORKER;
2024
siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
2025
}
2026
if (args->io_thread)
2027
p->flags |= PF_IO_WORKER;
2028
2029
if (args->name)
2030
strscpy_pad(p->comm, args->name, sizeof(p->comm));
2031
2032
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
2033
/*
2034
* Clear TID on mm_release()?
2035
*/
2036
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL;
2037
2038
ftrace_graph_init_task(p);
2039
2040
rt_mutex_init_task(p);
2041
2042
lockdep_assert_irqs_enabled();
2043
#ifdef CONFIG_PROVE_LOCKING
2044
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
2045
#endif
2046
retval = copy_creds(p, clone_flags);
2047
if (retval < 0)
2048
goto bad_fork_free;
2049
2050
retval = -EAGAIN;
2051
if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
2052
if (p->real_cred->user != INIT_USER &&
2053
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
2054
goto bad_fork_cleanup_count;
2055
}
2056
current->flags &= ~PF_NPROC_EXCEEDED;
2057
2058
/*
2059
* If multiple threads are within copy_process(), then this check
2060
* triggers too late. This doesn't hurt, the check is only there
2061
* to stop root fork bombs.
2062
*/
2063
retval = -EAGAIN;
2064
if (data_race(nr_threads >= max_threads))
2065
goto bad_fork_cleanup_count;
2066
2067
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
2068
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
2069
p->flags |= PF_FORKNOEXEC;
2070
INIT_LIST_HEAD(&p->children);
2071
INIT_LIST_HEAD(&p->sibling);
2072
rcu_copy_process(p);
2073
p->vfork_done = NULL;
2074
spin_lock_init(&p->alloc_lock);
2075
2076
init_sigpending(&p->pending);
2077
2078
p->utime = p->stime = p->gtime = 0;
2079
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2080
p->utimescaled = p->stimescaled = 0;
2081
#endif
2082
prev_cputime_init(&p->prev_cputime);
2083
2084
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2085
seqcount_init(&p->vtime.seqcount);
2086
p->vtime.starttime = 0;
2087
p->vtime.state = VTIME_INACTIVE;
2088
#endif
2089
2090
#ifdef CONFIG_IO_URING
2091
p->io_uring = NULL;
2092
#endif
2093
2094
p->default_timer_slack_ns = current->timer_slack_ns;
2095
2096
#ifdef CONFIG_PSI
2097
p->psi_flags = 0;
2098
#endif
2099
2100
task_io_accounting_init(&p->ioac);
2101
acct_clear_integrals(p);
2102
2103
posix_cputimers_init(&p->posix_cputimers);
2104
tick_dep_init_task(p);
2105
2106
p->io_context = NULL;
2107
audit_set_context(p, NULL);
2108
cgroup_fork(p);
2109
if (args->kthread) {
2110
if (!set_kthread_struct(p))
2111
goto bad_fork_cleanup_delayacct;
2112
}
2113
#ifdef CONFIG_NUMA
2114
p->mempolicy = mpol_dup(p->mempolicy);
2115
if (IS_ERR(p->mempolicy)) {
2116
retval = PTR_ERR(p->mempolicy);
2117
p->mempolicy = NULL;
2118
goto bad_fork_cleanup_delayacct;
2119
}
2120
#endif
2121
#ifdef CONFIG_CPUSETS
2122
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
2123
seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
2124
#endif
2125
#ifdef CONFIG_TRACE_IRQFLAGS
2126
memset(&p->irqtrace, 0, sizeof(p->irqtrace));
2127
p->irqtrace.hardirq_disable_ip = _THIS_IP_;
2128
p->irqtrace.softirq_enable_ip = _THIS_IP_;
2129
p->softirqs_enabled = 1;
2130
p->softirq_context = 0;
2131
#endif
2132
2133
p->pagefault_disabled = 0;
2134
2135
lockdep_init_task(p);
2136
2137
p->blocked_on = NULL; /* not blocked yet */
2138
2139
#ifdef CONFIG_BCACHE
2140
p->sequential_io = 0;
2141
p->sequential_io_avg = 0;
2142
#endif
2143
#ifdef CONFIG_BPF_SYSCALL
2144
RCU_INIT_POINTER(p->bpf_storage, NULL);
2145
p->bpf_ctx = NULL;
2146
#endif
2147
2148
unwind_task_init(p);
2149
2150
/* Perform scheduler related setup. Assign this task to a CPU. */
2151
retval = sched_fork(clone_flags, p);
2152
if (retval)
2153
goto bad_fork_cleanup_policy;
2154
2155
retval = perf_event_init_task(p, clone_flags);
2156
if (retval)
2157
goto bad_fork_sched_cancel_fork;
2158
retval = audit_alloc(p);
2159
if (retval)
2160
goto bad_fork_cleanup_perf;
2161
/* copy all the process information */
2162
shm_init_task(p);
2163
retval = security_task_alloc(p, clone_flags);
2164
if (retval)
2165
goto bad_fork_cleanup_audit;
2166
retval = copy_semundo(clone_flags, p);
2167
if (retval)
2168
goto bad_fork_cleanup_security;
2169
retval = copy_files(clone_flags, p, args->no_files);
2170
if (retval)
2171
goto bad_fork_cleanup_semundo;
2172
retval = copy_fs(clone_flags, p);
2173
if (retval)
2174
goto bad_fork_cleanup_files;
2175
retval = copy_sighand(clone_flags, p);
2176
if (retval)
2177
goto bad_fork_cleanup_fs;
2178
retval = copy_signal(clone_flags, p);
2179
if (retval)
2180
goto bad_fork_cleanup_sighand;
2181
retval = copy_mm(clone_flags, p);
2182
if (retval)
2183
goto bad_fork_cleanup_signal;
2184
retval = copy_namespaces(clone_flags, p);
2185
if (retval)
2186
goto bad_fork_cleanup_mm;
2187
retval = copy_io(clone_flags, p);
2188
if (retval)
2189
goto bad_fork_cleanup_namespaces;
2190
retval = copy_thread(p, args);
2191
if (retval)
2192
goto bad_fork_cleanup_io;
2193
2194
stackleak_task_init(p);
2195
2196
if (pid != &init_struct_pid) {
2197
pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
2198
args->set_tid_size);
2199
if (IS_ERR(pid)) {
2200
retval = PTR_ERR(pid);
2201
goto bad_fork_cleanup_thread;
2202
}
2203
}
2204
2205
/*
2206
* This has to happen after we've potentially unshared the file
2207
* descriptor table (so that the pidfd doesn't leak into the child
2208
* if the fd table isn't shared).
2209
*/
2210
if (clone_flags & CLONE_PIDFD) {
2211
int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0;
2212
2213
/*
2214
* Note that no task has been attached to @pid yet indicate
2215
* that via CLONE_PIDFD.
2216
*/
2217
retval = pidfd_prepare(pid, flags | PIDFD_STALE, &pidfile);
2218
if (retval < 0)
2219
goto bad_fork_free_pid;
2220
pidfd = retval;
2221
2222
retval = put_user(pidfd, args->pidfd);
2223
if (retval)
2224
goto bad_fork_put_pidfd;
2225
}
2226
2227
#ifdef CONFIG_BLOCK
2228
p->plug = NULL;
2229
#endif
2230
futex_init_task(p);
2231
2232
/*
2233
* sigaltstack should be cleared when sharing the same VM
2234
*/
2235
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
2236
sas_ss_reset(p);
2237
2238
/*
2239
* Syscall tracing and stepping should be turned off in the
2240
* child regardless of CLONE_PTRACE.
2241
*/
2242
user_disable_single_step(p);
2243
clear_task_syscall_work(p, SYSCALL_TRACE);
2244
#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
2245
clear_task_syscall_work(p, SYSCALL_EMU);
2246
#endif
2247
clear_tsk_latency_tracing(p);
2248
2249
/* ok, now we should be set up.. */
2250
p->pid = pid_nr(pid);
2251
if (clone_flags & CLONE_THREAD) {
2252
p->group_leader = current->group_leader;
2253
p->tgid = current->tgid;
2254
} else {
2255
p->group_leader = p;
2256
p->tgid = p->pid;
2257
}
2258
2259
p->nr_dirtied = 0;
2260
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
2261
p->dirty_paused_when = 0;
2262
2263
p->pdeath_signal = 0;
2264
p->task_works = NULL;
2265
clear_posix_cputimers_work(p);
2266
2267
#ifdef CONFIG_KRETPROBES
2268
p->kretprobe_instances.first = NULL;
2269
#endif
2270
#ifdef CONFIG_RETHOOK
2271
p->rethooks.first = NULL;
2272
#endif
2273
2274
/*
2275
* Ensure that the cgroup subsystem policies allow the new process to be
2276
* forked. It should be noted that the new process's css_set can be changed
2277
* between here and cgroup_post_fork() if an organisation operation is in
2278
* progress.
2279
*/
2280
retval = cgroup_can_fork(p, args);
2281
if (retval)
2282
goto bad_fork_put_pidfd;
2283
2284
/*
2285
* Now that the cgroups are pinned, re-clone the parent cgroup and put
2286
* the new task on the correct runqueue. All this *before* the task
2287
* becomes visible.
2288
*
2289
* This isn't part of ->can_fork() because while the re-cloning is
2290
* cgroup specific, it unconditionally needs to place the task on a
2291
* runqueue.
2292
*/
2293
retval = sched_cgroup_fork(p, args);
2294
if (retval)
2295
goto bad_fork_cancel_cgroup;
2296
2297
/*
2298
* Allocate a default futex hash for the user process once the first
2299
* thread spawns.
2300
*/
2301
if (need_futex_hash_allocate_default(clone_flags)) {
2302
retval = futex_hash_allocate_default();
2303
if (retval)
2304
goto bad_fork_cancel_cgroup;
2305
/*
2306
* If we fail beyond this point we don't free the allocated
2307
* futex hash map. We assume that another thread will be created
2308
* and makes use of it. The hash map will be freed once the main
2309
* thread terminates.
2310
*/
2311
}
2312
/*
2313
* From this point on we must avoid any synchronous user-space
2314
* communication until we take the tasklist-lock. In particular, we do
2315
* not want user-space to be able to predict the process start-time by
2316
* stalling fork(2) after we recorded the start_time but before it is
2317
* visible to the system.
2318
*/
2319
2320
p->start_time = ktime_get_ns();
2321
p->start_boottime = ktime_get_boottime_ns();
2322
2323
/*
2324
* Make it visible to the rest of the system, but dont wake it up yet.
2325
* Need tasklist lock for parent etc handling!
2326
*/
2327
write_lock_irq(&tasklist_lock);
2328
2329
/* CLONE_PARENT re-uses the old parent */
2330
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
2331
p->real_parent = current->real_parent;
2332
p->parent_exec_id = current->parent_exec_id;
2333
if (clone_flags & CLONE_THREAD)
2334
p->exit_signal = -1;
2335
else
2336
p->exit_signal = current->group_leader->exit_signal;
2337
} else {
2338
p->real_parent = current;
2339
p->parent_exec_id = current->self_exec_id;
2340
p->exit_signal = args->exit_signal;
2341
}
2342
2343
klp_copy_process(p);
2344
2345
sched_core_fork(p);
2346
2347
spin_lock(&current->sighand->siglock);
2348
2349
rv_task_fork(p);
2350
2351
rseq_fork(p, clone_flags);
2352
2353
/* Don't start children in a dying pid namespace */
2354
if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
2355
retval = -ENOMEM;
2356
goto bad_fork_core_free;
2357
}
2358
2359
/* Let kill terminate clone/fork in the middle */
2360
if (fatal_signal_pending(current)) {
2361
retval = -EINTR;
2362
goto bad_fork_core_free;
2363
}
2364
2365
/* No more failure paths after this point. */
2366
2367
/*
2368
* Copy seccomp details explicitly here, in case they were changed
2369
* before holding sighand lock.
2370
*/
2371
copy_seccomp(p);
2372
2373
init_task_pid_links(p);
2374
if (likely(p->pid)) {
2375
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
2376
2377
init_task_pid(p, PIDTYPE_PID, pid);
2378
if (thread_group_leader(p)) {
2379
init_task_pid(p, PIDTYPE_TGID, pid);
2380
init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
2381
init_task_pid(p, PIDTYPE_SID, task_session(current));
2382
2383
if (is_child_reaper(pid)) {
2384
ns_of_pid(pid)->child_reaper = p;
2385
p->signal->flags |= SIGNAL_UNKILLABLE;
2386
}
2387
p->signal->shared_pending.signal = delayed.signal;
2388
p->signal->tty = tty_kref_get(current->signal->tty);
2389
/*
2390
* Inherit has_child_subreaper flag under the same
2391
* tasklist_lock with adding child to the process tree
2392
* for propagate_has_child_subreaper optimization.
2393
*/
2394
p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
2395
p->real_parent->signal->is_child_subreaper;
2396
list_add_tail(&p->sibling, &p->real_parent->children);
2397
list_add_tail_rcu(&p->tasks, &init_task.tasks);
2398
attach_pid(p, PIDTYPE_TGID);
2399
attach_pid(p, PIDTYPE_PGID);
2400
attach_pid(p, PIDTYPE_SID);
2401
__this_cpu_inc(process_counts);
2402
} else {
2403
current->signal->nr_threads++;
2404
current->signal->quick_threads++;
2405
atomic_inc(&current->signal->live);
2406
refcount_inc(&current->signal->sigcnt);
2407
task_join_group_stop(p);
2408
list_add_tail_rcu(&p->thread_node,
2409
&p->signal->thread_head);
2410
}
2411
attach_pid(p, PIDTYPE_PID);
2412
nr_threads++;
2413
}
2414
total_forks++;
2415
hlist_del_init(&delayed.node);
2416
spin_unlock(&current->sighand->siglock);
2417
syscall_tracepoint_update(p);
2418
write_unlock_irq(&tasklist_lock);
2419
2420
if (pidfile)
2421
fd_install(pidfd, pidfile);
2422
2423
proc_fork_connector(p);
2424
sched_post_fork(p);
2425
cgroup_post_fork(p, args);
2426
perf_event_fork(p);
2427
2428
trace_task_newtask(p, clone_flags);
2429
uprobe_copy_process(p, clone_flags);
2430
user_events_fork(p, clone_flags);
2431
2432
copy_oom_score_adj(clone_flags, p);
2433
2434
return p;
2435
2436
bad_fork_core_free:
2437
sched_core_free(p);
2438
spin_unlock(&current->sighand->siglock);
2439
write_unlock_irq(&tasklist_lock);
2440
bad_fork_cancel_cgroup:
2441
cgroup_cancel_fork(p, args);
2442
bad_fork_put_pidfd:
2443
if (clone_flags & CLONE_PIDFD) {
2444
fput(pidfile);
2445
put_unused_fd(pidfd);
2446
}
2447
bad_fork_free_pid:
2448
if (pid != &init_struct_pid)
2449
free_pid(pid);
2450
bad_fork_cleanup_thread:
2451
exit_thread(p);
2452
bad_fork_cleanup_io:
2453
if (p->io_context)
2454
exit_io_context(p);
2455
bad_fork_cleanup_namespaces:
2456
exit_task_namespaces(p);
2457
bad_fork_cleanup_mm:
2458
if (p->mm) {
2459
mm_clear_owner(p->mm, p);
2460
mmput(p->mm);
2461
}
2462
bad_fork_cleanup_signal:
2463
if (!(clone_flags & CLONE_THREAD))
2464
free_signal_struct(p->signal);
2465
bad_fork_cleanup_sighand:
2466
__cleanup_sighand(p->sighand);
2467
bad_fork_cleanup_fs:
2468
exit_fs(p); /* blocking */
2469
bad_fork_cleanup_files:
2470
exit_files(p); /* blocking */
2471
bad_fork_cleanup_semundo:
2472
exit_sem(p);
2473
bad_fork_cleanup_security:
2474
security_task_free(p);
2475
bad_fork_cleanup_audit:
2476
audit_free(p);
2477
bad_fork_cleanup_perf:
2478
perf_event_free_task(p);
2479
bad_fork_sched_cancel_fork:
2480
sched_cancel_fork(p);
2481
bad_fork_cleanup_policy:
2482
lockdep_free_task(p);
2483
#ifdef CONFIG_NUMA
2484
mpol_put(p->mempolicy);
2485
#endif
2486
bad_fork_cleanup_delayacct:
2487
delayacct_tsk_free(p);
2488
bad_fork_cleanup_count:
2489
dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
2490
exit_creds(p);
2491
bad_fork_free:
2492
WRITE_ONCE(p->__state, TASK_DEAD);
2493
exit_task_stack_account(p);
2494
put_task_stack(p);
2495
delayed_free_task(p);
2496
fork_out:
2497
spin_lock_irq(&current->sighand->siglock);
2498
hlist_del_init(&delayed.node);
2499
spin_unlock_irq(&current->sighand->siglock);
2500
return ERR_PTR(retval);
2501
}
2502
2503
static inline void init_idle_pids(struct task_struct *idle)
2504
{
2505
enum pid_type type;
2506
2507
for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2508
INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
2509
init_task_pid(idle, type, &init_struct_pid);
2510
}
2511
}
2512
2513
static int idle_dummy(void *dummy)
2514
{
2515
/* This function is never called */
2516
return 0;
2517
}
2518
2519
struct task_struct * __init fork_idle(int cpu)
2520
{
2521
struct task_struct *task;
2522
struct kernel_clone_args args = {
2523
.flags = CLONE_VM,
2524
.fn = &idle_dummy,
2525
.fn_arg = NULL,
2526
.kthread = 1,
2527
.idle = 1,
2528
};
2529
2530
task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
2531
if (!IS_ERR(task)) {
2532
init_idle_pids(task);
2533
init_idle(task, cpu);
2534
}
2535
2536
return task;
2537
}
2538
2539
/*
2540
* This is like kernel_clone(), but shaved down and tailored to just
2541
* creating io_uring workers. It returns a created task, or an error pointer.
2542
* The returned task is inactive, and the caller must fire it up through
2543
* wake_up_new_task(p). All signals are blocked in the created task.
2544
*/
2545
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
2546
{
2547
unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
2548
CLONE_IO|CLONE_VM|CLONE_UNTRACED;
2549
struct kernel_clone_args args = {
2550
.flags = flags,
2551
.fn = fn,
2552
.fn_arg = arg,
2553
.io_thread = 1,
2554
.user_worker = 1,
2555
};
2556
2557
return copy_process(NULL, 0, node, &args);
2558
}
2559
2560
/*
2561
* Ok, this is the main fork-routine.
2562
*
2563
* It copies the process, and if successful kick-starts
2564
* it and waits for it to finish using the VM if required.
2565
*
2566
* args->exit_signal is expected to be checked for sanity by the caller.
2567
*/
2568
pid_t kernel_clone(struct kernel_clone_args *args)
2569
{
2570
u64 clone_flags = args->flags;
2571
struct completion vfork;
2572
struct pid *pid;
2573
struct task_struct *p;
2574
int trace = 0;
2575
pid_t nr;
2576
2577
/*
2578
* For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
2579
* to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
2580
* mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
2581
* field in struct clone_args and it still doesn't make sense to have
2582
* them both point at the same memory location. Performing this check
2583
* here has the advantage that we don't need to have a separate helper
2584
* to check for legacy clone().
2585
*/
2586
if ((clone_flags & CLONE_PIDFD) &&
2587
(clone_flags & CLONE_PARENT_SETTID) &&
2588
(args->pidfd == args->parent_tid))
2589
return -EINVAL;
2590
2591
/*
2592
* Determine whether and which event to report to ptracer. When
2593
* called from kernel_thread or CLONE_UNTRACED is explicitly
2594
* requested, no event is reported; otherwise, report if the event
2595
* for the type of forking is enabled.
2596
*/
2597
if (!(clone_flags & CLONE_UNTRACED)) {
2598
if (clone_flags & CLONE_VFORK)
2599
trace = PTRACE_EVENT_VFORK;
2600
else if (args->exit_signal != SIGCHLD)
2601
trace = PTRACE_EVENT_CLONE;
2602
else
2603
trace = PTRACE_EVENT_FORK;
2604
2605
if (likely(!ptrace_event_enabled(current, trace)))
2606
trace = 0;
2607
}
2608
2609
p = copy_process(NULL, trace, NUMA_NO_NODE, args);
2610
add_latent_entropy();
2611
2612
if (IS_ERR(p))
2613
return PTR_ERR(p);
2614
2615
/*
2616
* Do this prior waking up the new thread - the thread pointer
2617
* might get invalid after that point, if the thread exits quickly.
2618
*/
2619
trace_sched_process_fork(current, p);
2620
2621
pid = get_task_pid(p, PIDTYPE_PID);
2622
nr = pid_vnr(pid);
2623
2624
if (clone_flags & CLONE_PARENT_SETTID)
2625
put_user(nr, args->parent_tid);
2626
2627
if (clone_flags & CLONE_VFORK) {
2628
p->vfork_done = &vfork;
2629
init_completion(&vfork);
2630
get_task_struct(p);
2631
}
2632
2633
if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) {
2634
/* lock the task to synchronize with memcg migration */
2635
task_lock(p);
2636
lru_gen_add_mm(p->mm);
2637
task_unlock(p);
2638
}
2639
2640
wake_up_new_task(p);
2641
2642
/* forking complete and child started to run, tell ptracer */
2643
if (unlikely(trace))
2644
ptrace_event_pid(trace, pid);
2645
2646
if (clone_flags & CLONE_VFORK) {
2647
if (!wait_for_vfork_done(p, &vfork))
2648
ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2649
}
2650
2651
put_pid(pid);
2652
return nr;
2653
}
2654
2655
/*
2656
* Create a kernel thread.
2657
*/
2658
pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
2659
unsigned long flags)
2660
{
2661
struct kernel_clone_args args = {
2662
.flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL),
2663
.exit_signal = (flags & CSIGNAL),
2664
.fn = fn,
2665
.fn_arg = arg,
2666
.name = name,
2667
.kthread = 1,
2668
};
2669
2670
return kernel_clone(&args);
2671
}
2672
2673
/*
2674
* Create a user mode thread.
2675
*/
2676
pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
2677
{
2678
struct kernel_clone_args args = {
2679
.flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL),
2680
.exit_signal = (flags & CSIGNAL),
2681
.fn = fn,
2682
.fn_arg = arg,
2683
};
2684
2685
return kernel_clone(&args);
2686
}
2687
2688
#ifdef __ARCH_WANT_SYS_FORK
2689
SYSCALL_DEFINE0(fork)
2690
{
2691
#ifdef CONFIG_MMU
2692
struct kernel_clone_args args = {
2693
.exit_signal = SIGCHLD,
2694
};
2695
2696
return kernel_clone(&args);
2697
#else
2698
/* can not support in nommu mode */
2699
return -EINVAL;
2700
#endif
2701
}
2702
#endif
2703
2704
#ifdef __ARCH_WANT_SYS_VFORK
2705
SYSCALL_DEFINE0(vfork)
2706
{
2707
struct kernel_clone_args args = {
2708
.flags = CLONE_VFORK | CLONE_VM,
2709
.exit_signal = SIGCHLD,
2710
};
2711
2712
return kernel_clone(&args);
2713
}
2714
#endif
2715
2716
#ifdef __ARCH_WANT_SYS_CLONE
2717
#ifdef CONFIG_CLONE_BACKWARDS
2718
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2719
int __user *, parent_tidptr,
2720
unsigned long, tls,
2721
int __user *, child_tidptr)
2722
#elif defined(CONFIG_CLONE_BACKWARDS2)
2723
SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2724
int __user *, parent_tidptr,
2725
int __user *, child_tidptr,
2726
unsigned long, tls)
2727
#elif defined(CONFIG_CLONE_BACKWARDS3)
2728
SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2729
int, stack_size,
2730
int __user *, parent_tidptr,
2731
int __user *, child_tidptr,
2732
unsigned long, tls)
2733
#else
2734
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2735
int __user *, parent_tidptr,
2736
int __user *, child_tidptr,
2737
unsigned long, tls)
2738
#endif
2739
{
2740
struct kernel_clone_args args = {
2741
.flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
2742
.pidfd = parent_tidptr,
2743
.child_tid = child_tidptr,
2744
.parent_tid = parent_tidptr,
2745
.exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
2746
.stack = newsp,
2747
.tls = tls,
2748
};
2749
2750
return kernel_clone(&args);
2751
}
2752
#endif
2753
2754
static noinline int copy_clone_args_from_user(struct kernel_clone_args *kargs,
2755
struct clone_args __user *uargs,
2756
size_t usize)
2757
{
2758
int err;
2759
struct clone_args args;
2760
pid_t *kset_tid = kargs->set_tid;
2761
2762
BUILD_BUG_ON(offsetofend(struct clone_args, tls) !=
2763
CLONE_ARGS_SIZE_VER0);
2764
BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) !=
2765
CLONE_ARGS_SIZE_VER1);
2766
BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
2767
CLONE_ARGS_SIZE_VER2);
2768
BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
2769
2770
if (unlikely(usize > PAGE_SIZE))
2771
return -E2BIG;
2772
if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
2773
return -EINVAL;
2774
2775
err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
2776
if (err)
2777
return err;
2778
2779
if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL))
2780
return -EINVAL;
2781
2782
if (unlikely(!args.set_tid && args.set_tid_size > 0))
2783
return -EINVAL;
2784
2785
if (unlikely(args.set_tid && args.set_tid_size == 0))
2786
return -EINVAL;
2787
2788
/*
2789
* Verify that higher 32bits of exit_signal are unset and that
2790
* it is a valid signal
2791
*/
2792
if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
2793
!valid_signal(args.exit_signal)))
2794
return -EINVAL;
2795
2796
if ((args.flags & CLONE_INTO_CGROUP) &&
2797
(args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2))
2798
return -EINVAL;
2799
2800
*kargs = (struct kernel_clone_args){
2801
.flags = args.flags,
2802
.pidfd = u64_to_user_ptr(args.pidfd),
2803
.child_tid = u64_to_user_ptr(args.child_tid),
2804
.parent_tid = u64_to_user_ptr(args.parent_tid),
2805
.exit_signal = args.exit_signal,
2806
.stack = args.stack,
2807
.stack_size = args.stack_size,
2808
.tls = args.tls,
2809
.set_tid_size = args.set_tid_size,
2810
.cgroup = args.cgroup,
2811
};
2812
2813
if (args.set_tid &&
2814
copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid),
2815
(kargs->set_tid_size * sizeof(pid_t))))
2816
return -EFAULT;
2817
2818
kargs->set_tid = kset_tid;
2819
2820
return 0;
2821
}
2822
2823
/**
2824
* clone3_stack_valid - check and prepare stack
2825
* @kargs: kernel clone args
2826
*
2827
* Verify that the stack arguments userspace gave us are sane.
2828
* In addition, set the stack direction for userspace since it's easy for us to
2829
* determine.
2830
*/
2831
static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
2832
{
2833
if (kargs->stack == 0) {
2834
if (kargs->stack_size > 0)
2835
return false;
2836
} else {
2837
if (kargs->stack_size == 0)
2838
return false;
2839
2840
if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
2841
return false;
2842
2843
#if !defined(CONFIG_STACK_GROWSUP)
2844
kargs->stack += kargs->stack_size;
2845
#endif
2846
}
2847
2848
return true;
2849
}
2850
2851
static bool clone3_args_valid(struct kernel_clone_args *kargs)
2852
{
2853
/* Verify that no unknown flags are passed along. */
2854
if (kargs->flags &
2855
~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP))
2856
return false;
2857
2858
/*
2859
* - make the CLONE_DETACHED bit reusable for clone3
2860
* - make the CSIGNAL bits reusable for clone3
2861
*/
2862
if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
2863
return false;
2864
2865
if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
2866
(CLONE_SIGHAND | CLONE_CLEAR_SIGHAND))
2867
return false;
2868
2869
if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) &&
2870
kargs->exit_signal)
2871
return false;
2872
2873
if (!clone3_stack_valid(kargs))
2874
return false;
2875
2876
return true;
2877
}
2878
2879
/**
2880
* sys_clone3 - create a new process with specific properties
2881
* @uargs: argument structure
2882
* @size: size of @uargs
2883
*
2884
* clone3() is the extensible successor to clone()/clone2().
2885
* It takes a struct as argument that is versioned by its size.
2886
*
2887
* Return: On success, a positive PID for the child process.
2888
* On error, a negative errno number.
2889
*/
2890
SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
2891
{
2892
int err;
2893
2894
struct kernel_clone_args kargs;
2895
pid_t set_tid[MAX_PID_NS_LEVEL];
2896
2897
#ifdef __ARCH_BROKEN_SYS_CLONE3
2898
#warning clone3() entry point is missing, please fix
2899
return -ENOSYS;
2900
#endif
2901
2902
kargs.set_tid = set_tid;
2903
2904
err = copy_clone_args_from_user(&kargs, uargs, size);
2905
if (err)
2906
return err;
2907
2908
if (!clone3_args_valid(&kargs))
2909
return -EINVAL;
2910
2911
return kernel_clone(&kargs);
2912
}
2913
2914
void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
2915
{
2916
struct task_struct *leader, *parent, *child;
2917
int res;
2918
2919
read_lock(&tasklist_lock);
2920
leader = top = top->group_leader;
2921
down:
2922
for_each_thread(leader, parent) {
2923
list_for_each_entry(child, &parent->children, sibling) {
2924
res = visitor(child, data);
2925
if (res) {
2926
if (res < 0)
2927
goto out;
2928
leader = child;
2929
goto down;
2930
}
2931
up:
2932
;
2933
}
2934
}
2935
2936
if (leader != top) {
2937
child = leader;
2938
parent = child->real_parent;
2939
leader = parent->group_leader;
2940
goto up;
2941
}
2942
out:
2943
read_unlock(&tasklist_lock);
2944
}
2945
2946
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
2947
#define ARCH_MIN_MMSTRUCT_ALIGN 0
2948
#endif
2949
2950
static void sighand_ctor(void *data)
2951
{
2952
struct sighand_struct *sighand = data;
2953
2954
spin_lock_init(&sighand->siglock);
2955
init_waitqueue_head(&sighand->signalfd_wqh);
2956
}
2957
2958
void __init mm_cache_init(void)
2959
{
2960
unsigned int mm_size;
2961
2962
/*
2963
* The mm_cpumask is located at the end of mm_struct, and is
2964
* dynamically sized based on the maximum CPU number this system
2965
* can have, taking hotplug into account (nr_cpu_ids).
2966
*/
2967
mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();
2968
2969
mm_cachep = kmem_cache_create_usercopy("mm_struct",
2970
mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
2971
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2972
offsetof(struct mm_struct, saved_auxv),
2973
sizeof_field(struct mm_struct, saved_auxv),
2974
NULL);
2975
}
2976
2977
void __init proc_caches_init(void)
2978
{
2979
sighand_cachep = kmem_cache_create("sighand_cache",
2980
sizeof(struct sighand_struct), 0,
2981
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
2982
SLAB_ACCOUNT, sighand_ctor);
2983
signal_cachep = kmem_cache_create("signal_cache",
2984
sizeof(struct signal_struct), 0,
2985
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2986
NULL);
2987
files_cachep = kmem_cache_create("files_cache",
2988
sizeof(struct files_struct), 0,
2989
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2990
NULL);
2991
fs_cachep = kmem_cache_create("fs_cache",
2992
sizeof(struct fs_struct), 0,
2993
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2994
NULL);
2995
mmap_init();
2996
nsproxy_cache_init();
2997
}
2998
2999
/*
3000
* Check constraints on flags passed to the unshare system call.
3001
*/
3002
static int check_unshare_flags(unsigned long unshare_flags)
3003
{
3004
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
3005
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
3006
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
3007
CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP|
3008
CLONE_NEWTIME))
3009
return -EINVAL;
3010
/*
3011
* Not implemented, but pretend it works if there is nothing
3012
* to unshare. Note that unsharing the address space or the
3013
* signal handlers also need to unshare the signal queues (aka
3014
* CLONE_THREAD).
3015
*/
3016
if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
3017
if (!thread_group_empty(current))
3018
return -EINVAL;
3019
}
3020
if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
3021
if (refcount_read(&current->sighand->count) > 1)
3022
return -EINVAL;
3023
}
3024
if (unshare_flags & CLONE_VM) {
3025
if (!current_is_single_threaded())
3026
return -EINVAL;
3027
}
3028
3029
return 0;
3030
}
3031
3032
/*
3033
* Unshare the filesystem structure if it is being shared
3034
*/
3035
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
3036
{
3037
struct fs_struct *fs = current->fs;
3038
3039
if (!(unshare_flags & CLONE_FS) || !fs)
3040
return 0;
3041
3042
/* don't need lock here; in the worst case we'll do useless copy */
3043
if (fs->users == 1)
3044
return 0;
3045
3046
*new_fsp = copy_fs_struct(fs);
3047
if (!*new_fsp)
3048
return -ENOMEM;
3049
3050
return 0;
3051
}
3052
3053
/*
3054
* Unshare file descriptor table if it is being shared
3055
*/
3056
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
3057
{
3058
struct files_struct *fd = current->files;
3059
3060
if ((unshare_flags & CLONE_FILES) &&
3061
(fd && atomic_read(&fd->count) > 1)) {
3062
fd = dup_fd(fd, NULL);
3063
if (IS_ERR(fd))
3064
return PTR_ERR(fd);
3065
*new_fdp = fd;
3066
}
3067
3068
return 0;
3069
}
3070
3071
/*
3072
* unshare allows a process to 'unshare' part of the process
3073
* context which was originally shared using clone. copy_*
3074
* functions used by kernel_clone() cannot be used here directly
3075
* because they modify an inactive task_struct that is being
3076
* constructed. Here we are modifying the current, active,
3077
* task_struct.
3078
*/
3079
int ksys_unshare(unsigned long unshare_flags)
3080
{
3081
struct fs_struct *fs, *new_fs = NULL;
3082
struct files_struct *new_fd = NULL;
3083
struct cred *new_cred = NULL;
3084
struct nsproxy *new_nsproxy = NULL;
3085
int do_sysvsem = 0;
3086
int err;
3087
3088
/*
3089
* If unsharing a user namespace must also unshare the thread group
3090
* and unshare the filesystem root and working directories.
3091
*/
3092
if (unshare_flags & CLONE_NEWUSER)
3093
unshare_flags |= CLONE_THREAD | CLONE_FS;
3094
/*
3095
* If unsharing vm, must also unshare signal handlers.
3096
*/
3097
if (unshare_flags & CLONE_VM)
3098
unshare_flags |= CLONE_SIGHAND;
3099
/*
3100
* If unsharing a signal handlers, must also unshare the signal queues.
3101
*/
3102
if (unshare_flags & CLONE_SIGHAND)
3103
unshare_flags |= CLONE_THREAD;
3104
/*
3105
* If unsharing namespace, must also unshare filesystem information.
3106
*/
3107
if (unshare_flags & CLONE_NEWNS)
3108
unshare_flags |= CLONE_FS;
3109
3110
err = check_unshare_flags(unshare_flags);
3111
if (err)
3112
goto bad_unshare_out;
3113
/*
3114
* CLONE_NEWIPC must also detach from the undolist: after switching
3115
* to a new ipc namespace, the semaphore arrays from the old
3116
* namespace are unreachable.
3117
*/
3118
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
3119
do_sysvsem = 1;
3120
err = unshare_fs(unshare_flags, &new_fs);
3121
if (err)
3122
goto bad_unshare_out;
3123
err = unshare_fd(unshare_flags, &new_fd);
3124
if (err)
3125
goto bad_unshare_cleanup_fs;
3126
err = unshare_userns(unshare_flags, &new_cred);
3127
if (err)
3128
goto bad_unshare_cleanup_fd;
3129
err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
3130
new_cred, new_fs);
3131
if (err)
3132
goto bad_unshare_cleanup_cred;
3133
3134
if (new_cred) {
3135
err = set_cred_ucounts(new_cred);
3136
if (err)
3137
goto bad_unshare_cleanup_cred;
3138
}
3139
3140
if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
3141
if (do_sysvsem) {
3142
/*
3143
* CLONE_SYSVSEM is equivalent to sys_exit().
3144
*/
3145
exit_sem(current);
3146
}
3147
if (unshare_flags & CLONE_NEWIPC) {
3148
/* Orphan segments in old ns (see sem above). */
3149
exit_shm(current);
3150
shm_init_task(current);
3151
}
3152
3153
if (new_nsproxy)
3154
switch_task_namespaces(current, new_nsproxy);
3155
3156
task_lock(current);
3157
3158
if (new_fs) {
3159
fs = current->fs;
3160
read_seqlock_excl(&fs->seq);
3161
current->fs = new_fs;
3162
if (--fs->users)
3163
new_fs = NULL;
3164
else
3165
new_fs = fs;
3166
read_sequnlock_excl(&fs->seq);
3167
}
3168
3169
if (new_fd)
3170
swap(current->files, new_fd);
3171
3172
task_unlock(current);
3173
3174
if (new_cred) {
3175
/* Install the new user namespace */
3176
commit_creds(new_cred);
3177
new_cred = NULL;
3178
}
3179
}
3180
3181
perf_event_namespaces(current);
3182
3183
bad_unshare_cleanup_cred:
3184
if (new_cred)
3185
put_cred(new_cred);
3186
bad_unshare_cleanup_fd:
3187
if (new_fd)
3188
put_files_struct(new_fd);
3189
3190
bad_unshare_cleanup_fs:
3191
if (new_fs)
3192
free_fs_struct(new_fs);
3193
3194
bad_unshare_out:
3195
return err;
3196
}
3197
3198
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
3199
{
3200
return ksys_unshare(unshare_flags);
3201
}
3202
3203
/*
3204
* Helper to unshare the files of the current task.
3205
* We don't want to expose copy_files internals to
3206
* the exec layer of the kernel.
3207
*/
3208
3209
int unshare_files(void)
3210
{
3211
struct task_struct *task = current;
3212
struct files_struct *old, *copy = NULL;
3213
int error;
3214
3215
error = unshare_fd(CLONE_FILES, &copy);
3216
if (error || !copy)
3217
return error;
3218
3219
old = task->files;
3220
task_lock(task);
3221
task->files = copy;
3222
task_unlock(task);
3223
put_files_struct(old);
3224
return 0;
3225
}
3226
3227
static int sysctl_max_threads(const struct ctl_table *table, int write,
3228
void *buffer, size_t *lenp, loff_t *ppos)
3229
{
3230
struct ctl_table t;
3231
int ret;
3232
int threads = max_threads;
3233
int min = 1;
3234
int max = MAX_THREADS;
3235
3236
t = *table;
3237
t.data = &threads;
3238
t.extra1 = &min;
3239
t.extra2 = &max;
3240
3241
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3242
if (ret || !write)
3243
return ret;
3244
3245
max_threads = threads;
3246
3247
return 0;
3248
}
3249
3250
static const struct ctl_table fork_sysctl_table[] = {
3251
{
3252
.procname = "threads-max",
3253
.data = NULL,
3254
.maxlen = sizeof(int),
3255
.mode = 0644,
3256
.proc_handler = sysctl_max_threads,
3257
},
3258
};
3259
3260
static int __init init_fork_sysctl(void)
3261
{
3262
register_sysctl_init("kernel", fork_sysctl_table);
3263
return 0;
3264
}
3265
3266
subsys_initcall(init_fork_sysctl);
3267
3268