.. | .. |
---|
8 | 8 | */ |
---|
9 | 9 | |
---|
10 | 10 | #include <linux/sched.h> |
---|
| 11 | +#include <linux/uaccess.h> |
---|
11 | 12 | |
---|
12 | 13 | struct task_struct; |
---|
13 | 14 | struct rusage; |
---|
14 | 15 | union thread_union; |
---|
| 16 | +struct css_set; |
---|
| 17 | + |
---|
| 18 | +/* All the bits taken by the old clone syscall. */ |
---|
| 19 | +#define CLONE_LEGACY_FLAGS 0xffffffffULL |
---|
| 20 | + |
---|
| 21 | +struct kernel_clone_args { |
---|
| 22 | + u64 flags; |
---|
| 23 | + int __user *pidfd; |
---|
| 24 | + int __user *child_tid; |
---|
| 25 | + int __user *parent_tid; |
---|
| 26 | + int exit_signal; |
---|
| 27 | + unsigned long stack; |
---|
| 28 | + unsigned long stack_size; |
---|
| 29 | + unsigned long tls; |
---|
| 30 | + pid_t *set_tid; |
---|
| 31 | + /* Number of elements in *set_tid */ |
---|
| 32 | + size_t set_tid_size; |
---|
| 33 | + int cgroup; |
---|
| 34 | + int io_thread; |
---|
| 35 | + struct cgroup *cgrp; |
---|
| 36 | + struct css_set *cset; |
---|
| 37 | +}; |
---|
15 | 38 | |
---|
16 | 39 | /* |
---|
17 | 40 | * This serializes "schedule()" and also protects |
---|
.. | .. |
---|
33 | 56 | extern void init_idle(struct task_struct *idle, int cpu); |
---|
34 | 57 | |
---|
35 | 58 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); |
---|
| 59 | +extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); |
---|
| 60 | +extern void sched_post_fork(struct task_struct *p); |
---|
36 | 61 | extern void sched_dead(struct task_struct *p); |
---|
37 | 62 | |
---|
38 | 63 | void __noreturn do_task_dead(void); |
---|
| 64 | +void __noreturn make_task_dead(int signr); |
---|
39 | 65 | |
---|
| 66 | +extern void mm_cache_init(void); |
---|
40 | 67 | extern void proc_caches_init(void); |
---|
41 | 68 | |
---|
42 | 69 | extern void fork_init(void); |
---|
43 | 70 | |
---|
44 | 71 | extern void release_task(struct task_struct * p); |
---|
45 | 72 | |
---|
46 | | -#ifdef CONFIG_HAVE_COPY_THREAD_TLS |
---|
47 | | -extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, |
---|
48 | | - struct task_struct *, unsigned long); |
---|
49 | | -#else |
---|
50 | 73 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
---|
51 | | - struct task_struct *); |
---|
| 74 | + struct task_struct *, unsigned long); |
---|
52 | 75 | |
---|
53 | | -/* Architectures that haven't opted into copy_thread_tls get the tls argument |
---|
54 | | - * via pt_regs, so ignore the tls argument passed via C. */ |
---|
55 | | -static inline int copy_thread_tls( |
---|
56 | | - unsigned long clone_flags, unsigned long sp, unsigned long arg, |
---|
57 | | - struct task_struct *p, unsigned long tls) |
---|
58 | | -{ |
---|
59 | | - return copy_thread(clone_flags, sp, arg, p); |
---|
60 | | -} |
---|
61 | | -#endif |
---|
62 | 76 | extern void flush_thread(void); |
---|
63 | 77 | |
---|
64 | 78 | #ifdef CONFIG_HAVE_EXIT_THREAD |
---|
.. | .. |
---|
71 | 85 | extern void do_group_exit(int); |
---|
72 | 86 | |
---|
73 | 87 | extern void exit_files(struct task_struct *); |
---|
74 | | -extern void exit_itimers(struct signal_struct *); |
---|
| 88 | +extern void exit_itimers(struct task_struct *); |
---|
75 | 89 | |
---|
76 | | -extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long); |
---|
77 | | -extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); |
---|
| 90 | +extern pid_t kernel_clone(struct kernel_clone_args *kargs); |
---|
| 91 | +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); |
---|
78 | 92 | struct task_struct *fork_idle(int); |
---|
79 | 93 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
---|
80 | 94 | extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); |
---|
| 95 | +int kernel_wait(pid_t pid, int *stat); |
---|
81 | 96 | |
---|
82 | 97 | extern void free_task(struct task_struct *tsk); |
---|
83 | 98 | |
---|
.. | .. |
---|
88 | 103 | #define sched_exec() {} |
---|
89 | 104 | #endif |
---|
90 | 105 | |
---|
91 | | -#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
---|
92 | | - |
---|
93 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
---|
94 | | -extern void __put_task_struct_cb(struct rcu_head *rhp); |
---|
95 | | - |
---|
96 | | -static inline void put_task_struct(struct task_struct *t) |
---|
| 106 | +static inline struct task_struct *get_task_struct(struct task_struct *t) |
---|
97 | 107 | { |
---|
98 | | - if (atomic_dec_and_test(&t->usage)) |
---|
99 | | - call_rcu(&t->put_rcu, __put_task_struct_cb); |
---|
| 108 | + refcount_inc(&t->usage); |
---|
| 109 | + return t; |
---|
100 | 110 | } |
---|
101 | | -#else |
---|
| 111 | + |
---|
102 | 112 | extern void __put_task_struct(struct task_struct *t); |
---|
| 113 | +extern void __put_task_struct_rcu_cb(struct rcu_head *rhp); |
---|
103 | 114 | |
---|
104 | 115 | static inline void put_task_struct(struct task_struct *t) |
---|
105 | 116 | { |
---|
106 | | - if (atomic_dec_and_test(&t->usage)) |
---|
| 117 | + if (!refcount_dec_and_test(&t->usage)) |
---|
| 118 | + return; |
---|
| 119 | + |
---|
| 120 | + /* |
---|
| 121 | + * under PREEMPT_RT, we can't call put_task_struct |
---|
| 122 | + * in atomic context because it will indirectly |
---|
| 123 | + * acquire sleeping locks. |
---|
| 124 | + * |
---|
| 125 | + * call_rcu() will schedule delayed_put_task_struct_rcu() |
---|
| 126 | + * to be called in process context. |
---|
| 127 | + * |
---|
| 128 | + * __put_task_struct() is called when |
---|
| 129 | + * refcount_dec_and_test(&t->usage) succeeds. |
---|
| 130 | + * |
---|
| 131 | + * This means that it can't "conflict" with |
---|
| 132 | + * put_task_struct_rcu_user() which abuses ->rcu the same |
---|
| 133 | + * way; rcu_users has a reference so task->usage can't be |
---|
| 134 | + * zero after rcu_users 1 -> 0 transition. |
---|
| 135 | + * |
---|
| 136 | + * delayed_free_task() also uses ->rcu, but it is only called |
---|
| 137 | + * when it fails to fork a process. Therefore, there is no |
---|
| 138 | + * way it can conflict with put_task_struct(). |
---|
| 139 | + */ |
---|
| 140 | + if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) |
---|
| 141 | + call_rcu(&t->rcu, __put_task_struct_rcu_cb); |
---|
| 142 | + else |
---|
107 | 143 | __put_task_struct(t); |
---|
108 | 144 | } |
---|
109 | | -#endif |
---|
110 | | -struct task_struct *task_rcu_dereference(struct task_struct **ptask); |
---|
| 145 | + |
---|
| 146 | +static inline void put_task_struct_many(struct task_struct *t, int nr) |
---|
| 147 | +{ |
---|
| 148 | + if (refcount_sub_and_test(nr, &t->usage)) |
---|
| 149 | + __put_task_struct(t); |
---|
| 150 | +} |
---|
| 151 | + |
---|
| 152 | +void put_task_struct_rcu_user(struct task_struct *task); |
---|
111 | 153 | |
---|
112 | 154 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT |
---|
113 | 155 | extern int arch_task_struct_size __read_mostly; |
---|