hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/include/linux/sched/task.h
....@@ -8,10 +8,33 @@
88 */
99
1010 #include <linux/sched.h>
11
+#include <linux/uaccess.h>
1112
1213 struct task_struct;
1314 struct rusage;
1415 union thread_union;
16
+struct css_set;
17
+
18
+/* All the bits taken by the old clone syscall. */
19
+#define CLONE_LEGACY_FLAGS 0xffffffffULL
20
+
21
+struct kernel_clone_args {
22
+ u64 flags;
23
+ int __user *pidfd;
24
+ int __user *child_tid;
25
+ int __user *parent_tid;
26
+ int exit_signal;
27
+ unsigned long stack;
28
+ unsigned long stack_size;
29
+ unsigned long tls;
30
+ pid_t *set_tid;
31
+ /* Number of elements in *set_tid */
32
+ size_t set_tid_size;
33
+ int cgroup;
34
+ int io_thread;
35
+ struct cgroup *cgrp;
36
+ struct css_set *cset;
37
+};
1538
1639 /*
1740 * This serializes "schedule()" and also protects
....@@ -33,32 +56,23 @@
3356 extern void init_idle(struct task_struct *idle, int cpu);
3457
3558 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
59
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
60
+extern void sched_post_fork(struct task_struct *p);
3661 extern void sched_dead(struct task_struct *p);
3762
3863 void __noreturn do_task_dead(void);
64
+void __noreturn make_task_dead(int signr);
3965
66
+extern void mm_cache_init(void);
4067 extern void proc_caches_init(void);
4168
4269 extern void fork_init(void);
4370
4471 extern void release_task(struct task_struct * p);
4572
46
-#ifdef CONFIG_HAVE_COPY_THREAD_TLS
47
-extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
48
- struct task_struct *, unsigned long);
49
-#else
5073 extern int copy_thread(unsigned long, unsigned long, unsigned long,
51
- struct task_struct *);
74
+ struct task_struct *, unsigned long);
5275
53
-/* Architectures that haven't opted into copy_thread_tls get the tls argument
54
- * via pt_regs, so ignore the tls argument passed via C. */
55
-static inline int copy_thread_tls(
56
- unsigned long clone_flags, unsigned long sp, unsigned long arg,
57
- struct task_struct *p, unsigned long tls)
58
-{
59
- return copy_thread(clone_flags, sp, arg, p);
60
-}
61
-#endif
6276 extern void flush_thread(void);
6377
6478 #ifdef CONFIG_HAVE_EXIT_THREAD
....@@ -71,13 +85,14 @@
7185 extern void do_group_exit(int);
7286
7387 extern void exit_files(struct task_struct *);
74
-extern void exit_itimers(struct signal_struct *);
88
+extern void exit_itimers(struct task_struct *);
7589
76
-extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
77
-extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
90
+extern pid_t kernel_clone(struct kernel_clone_args *kargs);
91
+struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
7892 struct task_struct *fork_idle(int);
7993 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
8094 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
95
+int kernel_wait(pid_t pid, int *stat);
8196
8297 extern void free_task(struct task_struct *tsk);
8398
....@@ -88,17 +103,53 @@
88103 #define sched_exec() {}
89104 #endif
90105
91
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
106
+static inline struct task_struct *get_task_struct(struct task_struct *t)
107
+{
108
+ refcount_inc(&t->usage);
109
+ return t;
110
+}
92111
93112 extern void __put_task_struct(struct task_struct *t);
113
+extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
94114
95115 static inline void put_task_struct(struct task_struct *t)
96116 {
97
- if (atomic_dec_and_test(&t->usage))
117
+ if (!refcount_dec_and_test(&t->usage))
118
+ return;
119
+
120
+ /*
121
+ * under PREEMPT_RT, we can't call put_task_struct
122
+ * in atomic context because it will indirectly
123
+ * acquire sleeping locks.
124
+ *
125
+ * call_rcu() will schedule delayed_put_task_struct_rcu()
126
+ * to be called in process context.
127
+ *
128
+ * __put_task_struct() is called when
129
+ * refcount_dec_and_test(&t->usage) succeeds.
130
+ *
131
+ * This means that it can't "conflict" with
132
+ * put_task_struct_rcu_user() which abuses ->rcu the same
133
+ * way; rcu_users has a reference so task->usage can't be
134
+ * zero after rcu_users 1 -> 0 transition.
135
+ *
136
+ * delayed_free_task() also uses ->rcu, but it is only called
137
+ * when it fails to fork a process. Therefore, there is no
138
+ * way it can conflict with put_task_struct().
139
+ */
140
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !preemptible())
141
+ call_rcu(&t->rcu, __put_task_struct_rcu_cb);
142
+ else
98143 __put_task_struct(t);
99144 }
100145
101
-struct task_struct *task_rcu_dereference(struct task_struct **ptask);
146
+static inline void put_task_struct_many(struct task_struct *t, int nr)
147
+{
148
+ if (refcount_sub_and_test(nr, &t->usage))
149
+ __put_task_struct(t);
150
+}
151
+
152
+void put_task_struct_rcu_user(struct task_struct *task);
102153
103154 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
104155 extern int arch_task_struct_size __read_mostly;