hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/fork.c
....@@ -42,7 +42,6 @@
4242 #include <linux/mmu_notifier.h>
4343 #include <linux/fs.h>
4444 #include <linux/mm.h>
45
-#include <linux/kprobes.h>
4645 #include <linux/vmacache.h>
4746 #include <linux/nsproxy.h>
4847 #include <linux/capability.h>
....@@ -295,7 +294,7 @@
295294 return;
296295 }
297296
298
- vfree(tsk->stack);
297
+ vfree_atomic(tsk->stack);
299298 return;
300299 }
301300 #endif
....@@ -449,6 +448,9 @@
449448
450449 void free_task(struct task_struct *tsk)
451450 {
451
+#ifdef CONFIG_SECCOMP
452
+ WARN_ON_ONCE(tsk->seccomp.filter);
453
+#endif
452454 cpufreq_task_times_exit(tsk);
453455 scs_release(tsk);
454456
....@@ -724,19 +726,6 @@
724726 }
725727 EXPORT_SYMBOL_GPL(__mmdrop);
726728
727
-#ifdef CONFIG_PREEMPT_RT
728
-/*
729
- * RCU callback for delayed mm drop. Not strictly rcu, but we don't
730
- * want another facility to make this work.
731
- */
732
-void __mmdrop_delayed(struct rcu_head *rhp)
733
-{
734
- struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
735
-
736
- __mmdrop(mm);
737
-}
738
-#endif
739
-
740729 static void mmdrop_async_fn(struct work_struct *work)
741730 {
742731 struct mm_struct *mm;
....@@ -778,15 +767,6 @@
778767 WARN_ON(refcount_read(&tsk->usage));
779768 WARN_ON(tsk == current);
780769
781
- /*
782
- * Remove function-return probe instances associated with this
783
- * task and put them back on the free list.
784
- */
785
- kprobe_flush_task(tsk);
786
-
787
- /* Task is done with its stack. */
788
- put_task_stack(tsk);
789
-
790770 io_uring_free(tsk);
791771 cgroup_free(tsk);
792772 task_numa_free(tsk, true);
....@@ -799,6 +779,14 @@
799779 free_task(tsk);
800780 }
801781 EXPORT_SYMBOL_GPL(__put_task_struct);
782
+
783
+void __put_task_struct_rcu_cb(struct rcu_head *rhp)
784
+{
785
+ struct task_struct *task = container_of(rhp, struct task_struct, rcu);
786
+
787
+ __put_task_struct(task);
788
+}
789
+EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
802790
803791 void __init __weak arch_task_cache_init(void) { }
804792
....@@ -984,13 +972,11 @@
984972 tsk->splice_pipe = NULL;
985973 tsk->task_frag.page = NULL;
986974 tsk->wake_q.next = NULL;
987
- tsk->wake_q_sleeper.next = NULL;
988975 tsk->pf_io_worker = NULL;
989976
990977 account_kernel_stack(tsk, 1);
991978
992979 kcov_task_init(tsk);
993
- kmap_local_fork(tsk);
994980
995981 #ifdef CONFIG_FAULT_INJECTION
996982 tsk->fail_nth = 0;
....@@ -2084,7 +2070,6 @@
20842070 spin_lock_init(&p->alloc_lock);
20852071
20862072 init_sigpending(&p->pending);
2087
- p->sigqueue_cache = NULL;
20882073
20892074 p->utime = p->stime = p->gtime = 0;
20902075 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
....@@ -2333,12 +2318,6 @@
23332318
23342319 spin_lock(&current->sighand->siglock);
23352320
2336
- /*
2337
- * Copy seccomp details explicitly here, in case they were changed
2338
- * before holding sighand lock.
2339
- */
2340
- copy_seccomp(p);
2341
-
23422321 rseq_fork(p, clone_flags);
23432322
23442323 /* Don't start children in a dying pid namespace */
....@@ -2352,6 +2331,14 @@
23522331 retval = -EINTR;
23532332 goto bad_fork_cancel_cgroup;
23542333 }
2334
+
2335
+ /* No more failure paths after this point. */
2336
+
2337
+ /*
2338
+ * Copy seccomp details explicitly here, in case they were changed
2339
+ * before holding sighand lock.
2340
+ */
2341
+ copy_seccomp(p);
23552342
23562343 init_task_pid_links(p);
23572344 if (likely(p->pid)) {
....@@ -2502,11 +2489,6 @@
25022489 }
25032490
25042491 return task;
2505
-}
2506
-
2507
-struct mm_struct *copy_init_mm(void)
2508
-{
2509
- return dup_mm(NULL, &init_mm);
25102492 }
25112493
25122494 /*
....@@ -2813,7 +2795,7 @@
28132795 * - make the CLONE_DETACHED bit reuseable for clone3
28142796 * - make the CSIGNAL bits reuseable for clone3
28152797 */
2816
- if (kargs->flags & (CLONE_DETACHED | CSIGNAL))
2798
+ if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
28172799 return false;
28182800
28192801 if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
....@@ -2905,10 +2887,27 @@
29052887 init_waitqueue_head(&sighand->signalfd_wqh);
29062888 }
29072889
2908
-void __init proc_caches_init(void)
2890
+void __init mm_cache_init(void)
29092891 {
29102892 unsigned int mm_size;
29112893
2894
+ /*
2895
+ * The mm_cpumask is located at the end of mm_struct, and is
2896
+ * dynamically sized based on the maximum CPU number this system
2897
+ * can have, taking hotplug into account (nr_cpu_ids).
2898
+ */
2899
+ mm_size = sizeof(struct mm_struct) + cpumask_size();
2900
+
2901
+ mm_cachep = kmem_cache_create_usercopy("mm_struct",
2902
+ mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
2903
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2904
+ offsetof(struct mm_struct, saved_auxv),
2905
+ sizeof_field(struct mm_struct, saved_auxv),
2906
+ NULL);
2907
+}
2908
+
2909
+void __init proc_caches_init(void)
2910
+{
29122911 sighand_cachep = kmem_cache_create("sighand_cache",
29132912 sizeof(struct sighand_struct), 0,
29142913 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
....@@ -2926,19 +2925,6 @@
29262925 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
29272926 NULL);
29282927
2929
- /*
2930
- * The mm_cpumask is located at the end of mm_struct, and is
2931
- * dynamically sized based on the maximum CPU number this system
2932
- * can have, taking hotplug into account (nr_cpu_ids).
2933
- */
2934
- mm_size = sizeof(struct mm_struct) + cpumask_size();
2935
-
2936
- mm_cachep = kmem_cache_create_usercopy("mm_struct",
2937
- mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
2938
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2939
- offsetof(struct mm_struct, saved_auxv),
2940
- sizeof_field(struct mm_struct, saved_auxv),
2941
- NULL);
29422928 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
29432929 mmap_init();
29442930 nsproxy_cache_init();