| .. | .. |
|---|
| 61 | 61 | extern void sched_dead(struct task_struct *p); |
|---|
| 62 | 62 | |
|---|
| 63 | 63 | void __noreturn do_task_dead(void); |
|---|
| 64 | +void __noreturn make_task_dead(int signr); |
|---|
| 64 | 65 | |
|---|
| 66 | +extern void mm_cache_init(void); |
|---|
| 65 | 67 | extern void proc_caches_init(void); |
|---|
| 66 | 68 | |
|---|
| 67 | 69 | extern void fork_init(void); |
|---|
| .. | .. |
|---|
| 88 | 90 | extern pid_t kernel_clone(struct kernel_clone_args *kargs); |
|---|
| 89 | 91 | struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); |
|---|
| 90 | 92 | struct task_struct *fork_idle(int); |
|---|
| 91 | | -struct mm_struct *copy_init_mm(void); |
|---|
| 92 | 93 | extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
|---|
| 93 | 94 | extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); |
|---|
| 94 | 95 | int kernel_wait(pid_t pid, int *stat); |
|---|
| .. | .. |
|---|
| 109 | 110 | } |
|---|
| 110 | 111 | |
|---|
| 111 | 112 | extern void __put_task_struct(struct task_struct *t); |
|---|
| 113 | +extern void __put_task_struct_rcu_cb(struct rcu_head *rhp); |
|---|
| 112 | 114 | |
|---|
| 113 | 115 | static inline void put_task_struct(struct task_struct *t) |
|---|
| 114 | 116 | { |
|---|
| 115 | | - if (refcount_dec_and_test(&t->usage)) |
|---|
| 117 | + if (!refcount_dec_and_test(&t->usage)) |
|---|
| 118 | + return; |
|---|
| 119 | + |
|---|
| 120 | + /* |
|---|
| 121 | + * under PREEMPT_RT, we can't call put_task_struct |
|---|
| 122 | + * in atomic context because it will indirectly |
|---|
| 123 | + * acquire sleeping locks. |
|---|
| 124 | + * |
|---|
| 125 | + * call_rcu() will schedule delayed_put_task_struct_rcu() |
|---|
| 126 | + * to be called in process context. |
|---|
| 127 | + * |
|---|
| 128 | + * __put_task_struct() is called when |
|---|
| 129 | + * refcount_dec_and_test(&t->usage) succeeds. |
|---|
| 130 | + * |
|---|
| 131 | + * This means that it can't "conflict" with |
|---|
| 132 | + * put_task_struct_rcu_user() which abuses ->rcu the same |
|---|
| 133 | + * way; rcu_users has a reference so task->usage can't be |
|---|
| 134 | + * zero after rcu_users 1 -> 0 transition. |
|---|
| 135 | + * |
|---|
| 136 | + * delayed_free_task() also uses ->rcu, but it is only called |
|---|
| 137 | + * when it fails to fork a process. Therefore, there is no |
|---|
| 138 | + * way it can conflict with put_task_struct(). |
|---|
| 139 | + */ |
|---|
| 140 | + if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible()) |
|---|
| 141 | + call_rcu(&t->rcu, __put_task_struct_rcu_cb); |
|---|
| 142 | + else |
|---|
| 116 | 143 | __put_task_struct(t); |
|---|
| 117 | 144 | } |
|---|
| 118 | 145 | |
|---|