.. | .. |
---|
42 | 42 | #include <linux/mmu_notifier.h> |
---|
43 | 43 | #include <linux/fs.h> |
---|
44 | 44 | #include <linux/mm.h> |
---|
45 | | -#include <linux/kprobes.h> |
---|
46 | 45 | #include <linux/vmacache.h> |
---|
47 | 46 | #include <linux/nsproxy.h> |
---|
48 | 47 | #include <linux/capability.h> |
---|
.. | .. |
---|
295 | 294 | return; |
---|
296 | 295 | } |
---|
297 | 296 | |
---|
298 | | - vfree(tsk->stack); |
---|
| 297 | + vfree_atomic(tsk->stack); |
---|
299 | 298 | return; |
---|
300 | 299 | } |
---|
301 | 300 | #endif |
---|
.. | .. |
---|
449 | 448 | |
---|
450 | 449 | void free_task(struct task_struct *tsk) |
---|
451 | 450 | { |
---|
| 451 | +#ifdef CONFIG_SECCOMP |
---|
| 452 | + WARN_ON_ONCE(tsk->seccomp.filter); |
---|
| 453 | +#endif |
---|
452 | 454 | cpufreq_task_times_exit(tsk); |
---|
453 | 455 | scs_release(tsk); |
---|
454 | 456 | |
---|
.. | .. |
---|
724 | 726 | } |
---|
725 | 727 | EXPORT_SYMBOL_GPL(__mmdrop); |
---|
726 | 728 | |
---|
727 | | -#ifdef CONFIG_PREEMPT_RT |
---|
728 | | -/* |
---|
729 | | - * RCU callback for delayed mm drop. Not strictly rcu, but we don't |
---|
730 | | - * want another facility to make this work. |
---|
731 | | - */ |
---|
732 | | -void __mmdrop_delayed(struct rcu_head *rhp) |
---|
733 | | -{ |
---|
734 | | - struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); |
---|
735 | | - |
---|
736 | | - __mmdrop(mm); |
---|
737 | | -} |
---|
738 | | -#endif |
---|
739 | | - |
---|
740 | 729 | static void mmdrop_async_fn(struct work_struct *work) |
---|
741 | 730 | { |
---|
742 | 731 | struct mm_struct *mm; |
---|
.. | .. |
---|
778 | 767 | WARN_ON(refcount_read(&tsk->usage)); |
---|
779 | 768 | WARN_ON(tsk == current); |
---|
780 | 769 | |
---|
781 | | - /* |
---|
782 | | - * Remove function-return probe instances associated with this |
---|
783 | | - * task and put them back on the free list. |
---|
784 | | - */ |
---|
785 | | - kprobe_flush_task(tsk); |
---|
786 | | - |
---|
787 | | - /* Task is done with its stack. */ |
---|
788 | | - put_task_stack(tsk); |
---|
789 | | - |
---|
790 | 770 | io_uring_free(tsk); |
---|
791 | 771 | cgroup_free(tsk); |
---|
792 | 772 | task_numa_free(tsk, true); |
---|
.. | .. |
---|
799 | 779 | free_task(tsk); |
---|
800 | 780 | } |
---|
801 | 781 | EXPORT_SYMBOL_GPL(__put_task_struct); |
---|
| 782 | + |
---|
| 783 | +void __put_task_struct_rcu_cb(struct rcu_head *rhp) |
---|
| 784 | +{ |
---|
| 785 | + struct task_struct *task = container_of(rhp, struct task_struct, rcu); |
---|
| 786 | + |
---|
| 787 | + __put_task_struct(task); |
---|
| 788 | +} |
---|
| 789 | +EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb); |
---|
802 | 790 | |
---|
803 | 791 | void __init __weak arch_task_cache_init(void) { } |
---|
804 | 792 | |
---|
.. | .. |
---|
984 | 972 | tsk->splice_pipe = NULL; |
---|
985 | 973 | tsk->task_frag.page = NULL; |
---|
986 | 974 | tsk->wake_q.next = NULL; |
---|
987 | | - tsk->wake_q_sleeper.next = NULL; |
---|
988 | 975 | tsk->pf_io_worker = NULL; |
---|
989 | 976 | |
---|
990 | 977 | account_kernel_stack(tsk, 1); |
---|
991 | 978 | |
---|
992 | 979 | kcov_task_init(tsk); |
---|
993 | | - kmap_local_fork(tsk); |
---|
994 | 980 | |
---|
995 | 981 | #ifdef CONFIG_FAULT_INJECTION |
---|
996 | 982 | tsk->fail_nth = 0; |
---|
.. | .. |
---|
2084 | 2070 | spin_lock_init(&p->alloc_lock); |
---|
2085 | 2071 | |
---|
2086 | 2072 | init_sigpending(&p->pending); |
---|
2087 | | - p->sigqueue_cache = NULL; |
---|
2088 | 2073 | |
---|
2089 | 2074 | p->utime = p->stime = p->gtime = 0; |
---|
2090 | 2075 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
---|
.. | .. |
---|
2333 | 2318 | |
---|
2334 | 2319 | spin_lock(¤t->sighand->siglock); |
---|
2335 | 2320 | |
---|
2336 | | - /* |
---|
2337 | | - * Copy seccomp details explicitly here, in case they were changed |
---|
2338 | | - * before holding sighand lock. |
---|
2339 | | - */ |
---|
2340 | | - copy_seccomp(p); |
---|
2341 | | - |
---|
2342 | 2321 | rseq_fork(p, clone_flags); |
---|
2343 | 2322 | |
---|
2344 | 2323 | /* Don't start children in a dying pid namespace */ |
---|
.. | .. |
---|
2352 | 2331 | retval = -EINTR; |
---|
2353 | 2332 | goto bad_fork_cancel_cgroup; |
---|
2354 | 2333 | } |
---|
| 2334 | + |
---|
| 2335 | + /* No more failure paths after this point. */ |
---|
| 2336 | + |
---|
| 2337 | + /* |
---|
| 2338 | + * Copy seccomp details explicitly here, in case they were changed |
---|
| 2339 | + * before holding sighand lock. |
---|
| 2340 | + */ |
---|
| 2341 | + copy_seccomp(p); |
---|
2355 | 2342 | |
---|
2356 | 2343 | init_task_pid_links(p); |
---|
2357 | 2344 | if (likely(p->pid)) { |
---|
.. | .. |
---|
2502 | 2489 | } |
---|
2503 | 2490 | |
---|
2504 | 2491 | return task; |
---|
2505 | | -} |
---|
2506 | | - |
---|
2507 | | -struct mm_struct *copy_init_mm(void) |
---|
2508 | | -{ |
---|
2509 | | - return dup_mm(NULL, &init_mm); |
---|
2510 | 2492 | } |
---|
2511 | 2493 | |
---|
2512 | 2494 | /* |
---|
.. | .. |
---|
2813 | 2795 | * - make the CLONE_DETACHED bit reuseable for clone3 |
---|
2814 | 2796 | * - make the CSIGNAL bits reuseable for clone3 |
---|
2815 | 2797 | */ |
---|
2816 | | - if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) |
---|
| 2798 | + if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) |
---|
2817 | 2799 | return false; |
---|
2818 | 2800 | |
---|
2819 | 2801 | if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == |
---|
.. | .. |
---|
2905 | 2887 | init_waitqueue_head(&sighand->signalfd_wqh); |
---|
2906 | 2888 | } |
---|
2907 | 2889 | |
---|
2908 | | -void __init proc_caches_init(void) |
---|
| 2890 | +void __init mm_cache_init(void) |
---|
2909 | 2891 | { |
---|
2910 | 2892 | unsigned int mm_size; |
---|
2911 | 2893 | |
---|
| 2894 | + /* |
---|
| 2895 | + * The mm_cpumask is located at the end of mm_struct, and is |
---|
| 2896 | + * dynamically sized based on the maximum CPU number this system |
---|
| 2897 | + * can have, taking hotplug into account (nr_cpu_ids). |
---|
| 2898 | + */ |
---|
| 2899 | + mm_size = sizeof(struct mm_struct) + cpumask_size(); |
---|
| 2900 | + |
---|
| 2901 | + mm_cachep = kmem_cache_create_usercopy("mm_struct", |
---|
| 2902 | + mm_size, ARCH_MIN_MMSTRUCT_ALIGN, |
---|
| 2903 | + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
---|
| 2904 | + offsetof(struct mm_struct, saved_auxv), |
---|
| 2905 | + sizeof_field(struct mm_struct, saved_auxv), |
---|
| 2906 | + NULL); |
---|
| 2907 | +} |
---|
| 2908 | + |
---|
| 2909 | +void __init proc_caches_init(void) |
---|
| 2910 | +{ |
---|
2912 | 2911 | sighand_cachep = kmem_cache_create("sighand_cache", |
---|
2913 | 2912 | sizeof(struct sighand_struct), 0, |
---|
2914 | 2913 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| |
---|
.. | .. |
---|
2926 | 2925 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
---|
2927 | 2926 | NULL); |
---|
2928 | 2927 | |
---|
2929 | | - /* |
---|
2930 | | - * The mm_cpumask is located at the end of mm_struct, and is |
---|
2931 | | - * dynamically sized based on the maximum CPU number this system |
---|
2932 | | - * can have, taking hotplug into account (nr_cpu_ids). |
---|
2933 | | - */ |
---|
2934 | | - mm_size = sizeof(struct mm_struct) + cpumask_size(); |
---|
2935 | | - |
---|
2936 | | - mm_cachep = kmem_cache_create_usercopy("mm_struct", |
---|
2937 | | - mm_size, ARCH_MIN_MMSTRUCT_ALIGN, |
---|
2938 | | - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
---|
2939 | | - offsetof(struct mm_struct, saved_auxv), |
---|
2940 | | - sizeof_field(struct mm_struct, saved_auxv), |
---|
2941 | | - NULL); |
---|
2942 | 2928 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); |
---|
2943 | 2929 | mmap_init(); |
---|
2944 | 2930 | nsproxy_cache_init(); |
---|