hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/kernel/exit.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/kernel/exit.c
34 *
....@@ -62,11 +63,59 @@
6263 #include <linux/random.h>
6364 #include <linux/rcuwait.h>
6465 #include <linux/compat.h>
66
+#include <linux/io_uring.h>
67
+#include <linux/sysfs.h>
6568
6669 #include <linux/uaccess.h>
6770 #include <asm/unistd.h>
68
-#include <asm/pgtable.h>
6971 #include <asm/mmu_context.h>
72
+#include <trace/hooks/mm.h>
73
+
74
+/*
75
+ * The default value should be high enough to not crash a system that randomly
76
+ * crashes its kernel from time to time, but low enough to at least not permit
77
+ * overflowing 32-bit refcounts or the ldsem writer count.
78
+ */
79
+static unsigned int oops_limit = 10000;
80
+
81
+#ifdef CONFIG_SYSCTL
82
+static struct ctl_table kern_exit_table[] = {
83
+ {
84
+ .procname = "oops_limit",
85
+ .data = &oops_limit,
86
+ .maxlen = sizeof(oops_limit),
87
+ .mode = 0644,
88
+ .proc_handler = proc_douintvec,
89
+ },
90
+ { }
91
+};
92
+
93
+static __init int kernel_exit_sysctls_init(void)
94
+{
95
+ register_sysctl_init("kernel", kern_exit_table);
96
+ return 0;
97
+}
98
+late_initcall(kernel_exit_sysctls_init);
99
+#endif
100
+
101
+static atomic_t oops_count = ATOMIC_INIT(0);
102
+
103
+#ifdef CONFIG_SYSFS
104
+static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
105
+ char *page)
106
+{
107
+ return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
108
+}
109
+
110
+static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
111
+
112
+static __init int kernel_exit_sysfs_init(void)
113
+{
114
+ sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
115
+ return 0;
116
+}
117
+late_initcall(kernel_exit_sysfs_init);
118
+#endif
70119
71120 static void __unhash_process(struct task_struct *p, bool group_dead)
72121 {
....@@ -93,7 +142,7 @@
93142 struct signal_struct *sig = tsk->signal;
94143 bool group_dead = thread_group_leader(tsk);
95144 struct sighand_struct *sighand;
96
- struct tty_struct *uninitialized_var(tty);
145
+ struct tty_struct *tty;
97146 u64 utime, stime;
98147
99148 sighand = rcu_dereference_check(tsk->sighand,
....@@ -102,17 +151,8 @@
102151
103152 #ifdef CONFIG_POSIX_TIMERS
104153 posix_cpu_timers_exit(tsk);
105
- if (group_dead) {
154
+ if (group_dead)
106155 posix_cpu_timers_exit_group(tsk);
107
- } else {
108
- /*
109
- * This can only happen if the caller is de_thread().
110
- * FIXME: this is the temporary hack, we should teach
111
- * posix-cpu-timers to handle this case correctly.
112
- */
113
- if (unlikely(has_group_leader_pid(tsk)))
114
- posix_cpu_timers_exit_group(tsk);
115
- }
116156 #endif
117157
118158 if (group_dead) {
....@@ -181,10 +221,16 @@
181221 put_task_struct(tsk);
182222 }
183223
224
+void put_task_struct_rcu_user(struct task_struct *task)
225
+{
226
+ if (refcount_dec_and_test(&task->rcu_users))
227
+ call_rcu(&task->rcu, delayed_put_task_struct);
228
+}
184229
185230 void release_task(struct task_struct *p)
186231 {
187232 struct task_struct *leader;
233
+ struct pid *thread_pid;
188234 int zap_leader;
189235 repeat:
190236 /* don't need to get the RCU readlock here - the process is dead and
....@@ -193,11 +239,11 @@
193239 atomic_dec(&__task_cred(p)->user->processes);
194240 rcu_read_unlock();
195241
196
- proc_flush_task(p);
197242 cgroup_release(p);
198243
199244 write_lock_irq(&tasklist_lock);
200245 ptrace_release_task(p);
246
+ thread_pid = get_pid(p->thread_pid);
201247 __exit_signal(p);
202248
203249 /*
....@@ -220,79 +266,20 @@
220266 }
221267
222268 write_unlock_irq(&tasklist_lock);
269
+ seccomp_filter_release(p);
270
+ proc_flush_pid(thread_pid);
271
+ put_pid(thread_pid);
223272 release_thread(p);
224
- call_rcu(&p->rcu, delayed_put_task_struct);
273
+ put_task_struct_rcu_user(p);
225274
226275 p = leader;
227276 if (unlikely(zap_leader))
228277 goto repeat;
229278 }
230279
231
-/*
232
- * Note that if this function returns a valid task_struct pointer (!NULL)
233
- * task->usage must remain >0 for the duration of the RCU critical section.
234
- */
235
-struct task_struct *task_rcu_dereference(struct task_struct **ptask)
280
+int rcuwait_wake_up(struct rcuwait *w)
236281 {
237
- struct sighand_struct *sighand;
238
- struct task_struct *task;
239
-
240
- /*
241
- * We need to verify that release_task() was not called and thus
242
- * delayed_put_task_struct() can't run and drop the last reference
243
- * before rcu_read_unlock(). We check task->sighand != NULL,
244
- * but we can read the already freed and reused memory.
245
- */
246
-retry:
247
- task = rcu_dereference(*ptask);
248
- if (!task)
249
- return NULL;
250
-
251
- probe_kernel_address(&task->sighand, sighand);
252
-
253
- /*
254
- * Pairs with atomic_dec_and_test() in put_task_struct(). If this task
255
- * was already freed we can not miss the preceding update of this
256
- * pointer.
257
- */
258
- smp_rmb();
259
- if (unlikely(task != READ_ONCE(*ptask)))
260
- goto retry;
261
-
262
- /*
263
- * We've re-checked that "task == *ptask", now we have two different
264
- * cases:
265
- *
266
- * 1. This is actually the same task/task_struct. In this case
267
- * sighand != NULL tells us it is still alive.
268
- *
269
- * 2. This is another task which got the same memory for task_struct.
270
- * We can't know this of course, and we can not trust
271
- * sighand != NULL.
272
- *
273
- * In this case we actually return a random value, but this is
274
- * correct.
275
- *
276
- * If we return NULL - we can pretend that we actually noticed that
277
- * *ptask was updated when the previous task has exited. Or pretend
278
- * that probe_slab_address(&sighand) reads NULL.
279
- *
280
- * If we return the new task (because sighand is not NULL for any
281
- * reason) - this is fine too. This (new) task can't go away before
282
- * another gp pass.
283
- *
284
- * And note: We could even eliminate the false positive if re-read
285
- * task->sighand once again to avoid the falsely NULL. But this case
286
- * is very unlikely so we don't care.
287
- */
288
- if (!sighand)
289
- return NULL;
290
-
291
- return task;
292
-}
293
-
294
-void rcuwait_wake_up(struct rcuwait *w)
295
-{
282
+ int ret = 0;
296283 struct task_struct *task;
297284
298285 rcu_read_lock();
....@@ -300,7 +287,7 @@
300287 /*
301288 * Order condition vs @task, such that everything prior to the load
302289 * of @task is visible. This is the condition as to why the user called
303
- * rcuwait_trywake() in the first place. Pairs with set_current_state()
290
+ * rcuwait_wake() in the first place. Pairs with set_current_state()
304291 * barrier (A) in rcuwait_wait_event().
305292 *
306293 * WAIT WAKE
....@@ -310,15 +297,14 @@
310297 */
311298 smp_mb(); /* (B) */
312299
313
- /*
314
- * Avoid using task_rcu_dereference() magic as long as we are careful,
315
- * see comment in rcuwait_wait_event() regarding ->exit_state.
316
- */
317300 task = rcu_dereference(w->task);
318301 if (task)
319
- wake_up_process(task);
302
+ ret = wake_up_process(task);
320303 rcu_read_unlock();
304
+
305
+ return ret;
321306 }
307
+EXPORT_SYMBOL_GPL(rcuwait_wake_up);
322308
323309 /*
324310 * Determine if a process group is "orphaned", according to the POSIX
....@@ -422,7 +408,7 @@
422408 * freed task structure.
423409 */
424410 if (atomic_read(&mm->mm_users) <= 1) {
425
- mm->owner = NULL;
411
+ WRITE_ONCE(mm->owner, NULL);
426412 return;
427413 }
428414
....@@ -462,7 +448,7 @@
462448 * most likely racing with swapoff (try_to_unuse()) or /proc or
463449 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
464450 */
465
- mm->owner = NULL;
451
+ WRITE_ONCE(mm->owner, NULL);
466452 return;
467453
468454 assign_new_owner:
....@@ -483,7 +469,7 @@
483469 put_task_struct(c);
484470 goto retry;
485471 }
486
- mm->owner = c;
472
+ WRITE_ONCE(mm->owner, c);
487473 task_unlock(c);
488474 put_task_struct(c);
489475 }
....@@ -504,17 +490,17 @@
504490 sync_mm_rss(mm);
505491 /*
506492 * Serialize with any possible pending coredump.
507
- * We must hold mmap_sem around checking core_state
493
+ * We must hold mmap_lock around checking core_state
508494 * and clearing tsk->mm. The core-inducing thread
509495 * will increment ->nr_threads for each thread in the
510496 * group with ->mm != NULL.
511497 */
512
- down_read(&mm->mmap_sem);
498
+ mmap_read_lock(mm);
513499 core_state = mm->core_state;
514500 if (core_state) {
515501 struct core_thread self;
516502
517
- up_read(&mm->mmap_sem);
503
+ mmap_read_unlock(mm);
518504
519505 self.task = current;
520506 if (self.task->flags & PF_SIGNALED)
....@@ -535,17 +521,18 @@
535521 freezable_schedule();
536522 }
537523 __set_current_state(TASK_RUNNING);
538
- down_read(&mm->mmap_sem);
524
+ mmap_read_lock(mm);
539525 }
540526 mmgrab(mm);
541527 BUG_ON(mm != current->active_mm);
542528 /* more a memory barrier than a real lock */
543529 task_lock(current);
544530 current->mm = NULL;
545
- up_read(&mm->mmap_sem);
531
+ mmap_read_unlock(mm);
546532 enter_lazy_tlb(mm, current);
547533 task_unlock(current);
548534 mm_update_next_owner(mm);
535
+ trace_android_vh_exit_mm(mm);
549536 mmput(mm);
550537 if (test_thread_flag(TIF_MEMDIE))
551538 exit_oom_victim();
....@@ -683,8 +670,8 @@
683670 reaper = find_new_reaper(father, reaper);
684671 list_for_each_entry(p, &father->children, sibling) {
685672 for_each_thread(p, t) {
686
- t->real_parent = reaper;
687
- BUG_ON((!t->ptrace) != (t->parent == father));
673
+ RCU_INIT_POINTER(t->real_parent, reaper);
674
+ BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
688675 if (likely(!t->ptrace))
689676 t->parent = t->real_parent;
690677 if (t->pdeath_signal)
....@@ -732,9 +719,10 @@
732719 autoreap = true;
733720 }
734721
735
- tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
736
- if (tsk->exit_state == EXIT_DEAD)
722
+ if (autoreap) {
723
+ tsk->exit_state = EXIT_DEAD;
737724 list_add(&tsk->ptrace_entry, &dead);
725
+ }
738726
739727 /* mt-exec, de_thread() is waiting for group leader */
740728 if (unlikely(tsk->signal->notify_count < 0))
....@@ -797,7 +785,7 @@
797785 * mm_release()->clear_child_tid() from writing to a user-controlled
798786 * kernel address.
799787 */
800
- set_fs(USER_DS);
788
+ force_uaccess_begin();
801789
802790 if (unlikely(in_atomic())) {
803791 pr_info("note: %s[%d] exited with preempt_count %d\n",
....@@ -824,6 +812,7 @@
824812 schedule();
825813 }
826814
815
+ io_uring_files_cancel();
827816 exit_signals(tsk); /* sets PF_EXITING */
828817
829818 /* sync mm's RSS info before statistics gathering */
....@@ -842,7 +831,7 @@
842831
843832 #ifdef CONFIG_POSIX_TIMERS
844833 hrtimer_cancel(&tsk->signal->real_timer);
845
- exit_itimers(tsk->signal);
834
+ exit_itimers(tsk);
846835 #endif
847836 if (tsk->mm)
848837 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
....@@ -922,6 +911,31 @@
922911 do_task_dead();
923912 }
924913 EXPORT_SYMBOL_GPL(do_exit);
914
+
915
+void __noreturn make_task_dead(int signr)
916
+{
917
+ /*
918
+ * Take the task off the cpu after something catastrophic has
919
+ * happened.
920
+ */
921
+ unsigned int limit;
922
+
923
+ /*
924
+ * Every time the system oopses, if the oops happens while a reference
925
+ * to an object was held, the reference leaks.
926
+ * If the oops doesn't also leak memory, repeated oopsing can cause
927
+ * reference counters to wrap around (if they're not using refcount_t).
928
+ * This means that repeated oopsing can make unexploitable-looking bugs
929
+ * exploitable through repeated oopsing.
930
+ * To make sure this can't happen, place an upper bound on how often the
931
+ * kernel may oops without panic().
932
+ */
933
+ limit = READ_ONCE(oops_limit);
934
+ if (atomic_inc_return(&oops_count) >= limit && limit)
935
+ panic("Oopsed too often (kernel.oops_limit is %d)", limit);
936
+
937
+ do_exit(signr);
938
+}
925939
926940 void complete_and_exit(struct completion *comp, long code)
927941 {
....@@ -1482,7 +1496,7 @@
14821496 void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
14831497 {
14841498 __wake_up_sync_key(&parent->signal->wait_chldexit,
1485
- TASK_INTERRUPTIBLE, 1, p);
1499
+ TASK_INTERRUPTIBLE, p);
14861500 }
14871501
14881502 static long do_wait(struct wait_opts *wo)
....@@ -1504,7 +1518,7 @@
15041518 */
15051519 wo->notask_error = -ECHILD;
15061520 if ((wo->wo_type < PIDTYPE_MAX) &&
1507
- (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
1521
+ (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
15081522 goto notask;
15091523
15101524 set_current_state(TASK_INTERRUPTIBLE);
....@@ -1546,6 +1560,7 @@
15461560 struct pid *pid = NULL;
15471561 enum pid_type type;
15481562 long ret;
1563
+ unsigned int f_flags = 0;
15491564
15501565 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
15511566 __WNOTHREAD|__WCLONE|__WALL))
....@@ -1561,25 +1576,44 @@
15611576 type = PIDTYPE_PID;
15621577 if (upid <= 0)
15631578 return -EINVAL;
1579
+
1580
+ pid = find_get_pid(upid);
15641581 break;
15651582 case P_PGID:
15661583 type = PIDTYPE_PGID;
1567
- if (upid <= 0)
1584
+ if (upid < 0)
15681585 return -EINVAL;
1586
+
1587
+ if (upid)
1588
+ pid = find_get_pid(upid);
1589
+ else
1590
+ pid = get_task_pid(current, PIDTYPE_PGID);
1591
+ break;
1592
+ case P_PIDFD:
1593
+ type = PIDTYPE_PID;
1594
+ if (upid < 0)
1595
+ return -EINVAL;
1596
+
1597
+ pid = pidfd_get_pid(upid, &f_flags);
1598
+ if (IS_ERR(pid))
1599
+ return PTR_ERR(pid);
1600
+
15691601 break;
15701602 default:
15711603 return -EINVAL;
15721604 }
1573
-
1574
- if (type < PIDTYPE_MAX)
1575
- pid = find_get_pid(upid);
15761605
15771606 wo.wo_type = type;
15781607 wo.wo_pid = pid;
15791608 wo.wo_flags = options;
15801609 wo.wo_info = infop;
15811610 wo.wo_rusage = ru;
1611
+ if (f_flags & O_NONBLOCK)
1612
+ wo.wo_flags |= WNOHANG;
1613
+
15821614 ret = do_wait(&wo);
1615
+ if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK))
1616
+ ret = -EAGAIN;
15831617
15841618 put_pid(pid);
15851619 return ret;
....@@ -1602,7 +1636,7 @@
16021636 if (!infop)
16031637 return err;
16041638
1605
- if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop)))
1639
+ if (!user_write_access_begin(infop, sizeof(*infop)))
16061640 return -EFAULT;
16071641
16081642 unsafe_put_user(signo, &infop->si_signo, Efault);
....@@ -1611,10 +1645,10 @@
16111645 unsafe_put_user(info.pid, &infop->si_pid, Efault);
16121646 unsafe_put_user(info.uid, &infop->si_uid, Efault);
16131647 unsafe_put_user(info.status, &infop->si_status, Efault);
1614
- user_access_end();
1648
+ user_write_access_end();
16151649 return err;
16161650 Efault:
1617
- user_access_end();
1651
+ user_write_access_end();
16181652 return -EFAULT;
16191653 }
16201654
....@@ -1658,6 +1692,22 @@
16581692 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
16591693 ret = -EFAULT;
16601694
1695
+ return ret;
1696
+}
1697
+
1698
+int kernel_wait(pid_t pid, int *stat)
1699
+{
1700
+ struct wait_opts wo = {
1701
+ .wo_type = PIDTYPE_PID,
1702
+ .wo_pid = find_get_pid(pid),
1703
+ .wo_flags = WEXITED,
1704
+ };
1705
+ int ret;
1706
+
1707
+ ret = do_wait(&wo);
1708
+ if (ret > 0 && wo.wo_stat)
1709
+ *stat = wo.wo_stat;
1710
+ put_pid(wo.wo_pid);
16611711 return ret;
16621712 }
16631713
....@@ -1729,7 +1779,7 @@
17291779 if (!infop)
17301780 return err;
17311781
1732
- if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop)))
1782
+ if (!user_write_access_begin(infop, sizeof(*infop)))
17331783 return -EFAULT;
17341784
17351785 unsafe_put_user(signo, &infop->si_signo, Efault);
....@@ -1738,14 +1788,38 @@
17381788 unsafe_put_user(info.pid, &infop->si_pid, Efault);
17391789 unsafe_put_user(info.uid, &infop->si_uid, Efault);
17401790 unsafe_put_user(info.status, &infop->si_status, Efault);
1741
- user_access_end();
1791
+ user_write_access_end();
17421792 return err;
17431793 Efault:
1744
- user_access_end();
1794
+ user_write_access_end();
17451795 return -EFAULT;
17461796 }
17471797 #endif
17481798
1799
+/**
1800
+ * thread_group_exited - check that a thread group has exited
1801
+ * @pid: tgid of thread group to be checked.
1802
+ *
1803
+ * Test if the thread group represented by tgid has exited (all
1804
+ * threads are zombies, dead or completely gone).
1805
+ *
1806
+ * Return: true if the thread group has exited. false otherwise.
1807
+ */
1808
+bool thread_group_exited(struct pid *pid)
1809
+{
1810
+ struct task_struct *task;
1811
+ bool exited;
1812
+
1813
+ rcu_read_lock();
1814
+ task = pid_task(pid, PIDTYPE_PID);
1815
+ exited = !task ||
1816
+ (READ_ONCE(task->exit_state) && thread_group_empty(task));
1817
+ rcu_read_unlock();
1818
+
1819
+ return exited;
1820
+}
1821
+EXPORT_SYMBOL(thread_group_exited);
1822
+
17491823 __weak void abort(void)
17501824 {
17511825 BUG();