.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/kernel/exit.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
62 | 63 | #include <linux/random.h> |
---|
63 | 64 | #include <linux/rcuwait.h> |
---|
64 | 65 | #include <linux/compat.h> |
---|
| 66 | +#include <linux/io_uring.h> |
---|
| 67 | +#include <linux/sysfs.h> |
---|
65 | 68 | |
---|
66 | 69 | #include <linux/uaccess.h> |
---|
67 | 70 | #include <asm/unistd.h> |
---|
68 | | -#include <asm/pgtable.h> |
---|
69 | 71 | #include <asm/mmu_context.h> |
---|
| 72 | +#include <trace/hooks/mm.h> |
---|
| 73 | + |
---|
| 74 | +/* |
---|
| 75 | + * The default value should be high enough to not crash a system that randomly |
---|
| 76 | + * crashes its kernel from time to time, but low enough to at least not permit |
---|
| 77 | + * overflowing 32-bit refcounts or the ldsem writer count. |
---|
| 78 | + */ |
---|
| 79 | +static unsigned int oops_limit = 10000; |
---|
| 80 | + |
---|
| 81 | +#ifdef CONFIG_SYSCTL |
---|
| 82 | +static struct ctl_table kern_exit_table[] = { |
---|
| 83 | + { |
---|
| 84 | + .procname = "oops_limit", |
---|
| 85 | + .data = &oops_limit, |
---|
| 86 | + .maxlen = sizeof(oops_limit), |
---|
| 87 | + .mode = 0644, |
---|
| 88 | + .proc_handler = proc_douintvec, |
---|
| 89 | + }, |
---|
| 90 | + { } |
---|
| 91 | +}; |
---|
| 92 | + |
---|
| 93 | +static __init int kernel_exit_sysctls_init(void) |
---|
| 94 | +{ |
---|
| 95 | + register_sysctl_init("kernel", kern_exit_table); |
---|
| 96 | + return 0; |
---|
| 97 | +} |
---|
| 98 | +late_initcall(kernel_exit_sysctls_init); |
---|
| 99 | +#endif |
---|
| 100 | + |
---|
| 101 | +static atomic_t oops_count = ATOMIC_INIT(0); |
---|
| 102 | + |
---|
| 103 | +#ifdef CONFIG_SYSFS |
---|
| 104 | +static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, |
---|
| 105 | + char *page) |
---|
| 106 | +{ |
---|
| 107 | + return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); |
---|
| 108 | +} |
---|
| 109 | + |
---|
| 110 | +static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); |
---|
| 111 | + |
---|
| 112 | +static __init int kernel_exit_sysfs_init(void) |
---|
| 113 | +{ |
---|
| 114 | + sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); |
---|
| 115 | + return 0; |
---|
| 116 | +} |
---|
| 117 | +late_initcall(kernel_exit_sysfs_init); |
---|
| 118 | +#endif |
---|
70 | 119 | |
---|
71 | 120 | static void __unhash_process(struct task_struct *p, bool group_dead) |
---|
72 | 121 | { |
---|
.. | .. |
---|
93 | 142 | struct signal_struct *sig = tsk->signal; |
---|
94 | 143 | bool group_dead = thread_group_leader(tsk); |
---|
95 | 144 | struct sighand_struct *sighand; |
---|
96 | | - struct tty_struct *uninitialized_var(tty); |
---|
| 145 | + struct tty_struct *tty; |
---|
97 | 146 | u64 utime, stime; |
---|
98 | 147 | |
---|
99 | 148 | sighand = rcu_dereference_check(tsk->sighand, |
---|
.. | .. |
---|
102 | 151 | |
---|
103 | 152 | #ifdef CONFIG_POSIX_TIMERS |
---|
104 | 153 | posix_cpu_timers_exit(tsk); |
---|
105 | | - if (group_dead) { |
---|
| 154 | + if (group_dead) |
---|
106 | 155 | posix_cpu_timers_exit_group(tsk); |
---|
107 | | - } else { |
---|
108 | | - /* |
---|
109 | | - * This can only happen if the caller is de_thread(). |
---|
110 | | - * FIXME: this is the temporary hack, we should teach |
---|
111 | | - * posix-cpu-timers to handle this case correctly. |
---|
112 | | - */ |
---|
113 | | - if (unlikely(has_group_leader_pid(tsk))) |
---|
114 | | - posix_cpu_timers_exit_group(tsk); |
---|
115 | | - } |
---|
116 | 156 | #endif |
---|
117 | 157 | |
---|
118 | 158 | if (group_dead) { |
---|
.. | .. |
---|
181 | 221 | put_task_struct(tsk); |
---|
182 | 222 | } |
---|
183 | 223 | |
---|
| 224 | +void put_task_struct_rcu_user(struct task_struct *task) |
---|
| 225 | +{ |
---|
| 226 | + if (refcount_dec_and_test(&task->rcu_users)) |
---|
| 227 | + call_rcu(&task->rcu, delayed_put_task_struct); |
---|
| 228 | +} |
---|
184 | 229 | |
---|
185 | 230 | void release_task(struct task_struct *p) |
---|
186 | 231 | { |
---|
187 | 232 | struct task_struct *leader; |
---|
| 233 | + struct pid *thread_pid; |
---|
188 | 234 | int zap_leader; |
---|
189 | 235 | repeat: |
---|
190 | 236 | /* don't need to get the RCU readlock here - the process is dead and |
---|
.. | .. |
---|
193 | 239 | atomic_dec(&__task_cred(p)->user->processes); |
---|
194 | 240 | rcu_read_unlock(); |
---|
195 | 241 | |
---|
196 | | - proc_flush_task(p); |
---|
197 | 242 | cgroup_release(p); |
---|
198 | 243 | |
---|
199 | 244 | write_lock_irq(&tasklist_lock); |
---|
200 | 245 | ptrace_release_task(p); |
---|
| 246 | + thread_pid = get_pid(p->thread_pid); |
---|
201 | 247 | __exit_signal(p); |
---|
202 | 248 | |
---|
203 | 249 | /* |
---|
.. | .. |
---|
220 | 266 | } |
---|
221 | 267 | |
---|
222 | 268 | write_unlock_irq(&tasklist_lock); |
---|
| 269 | + seccomp_filter_release(p); |
---|
| 270 | + proc_flush_pid(thread_pid); |
---|
| 271 | + put_pid(thread_pid); |
---|
223 | 272 | release_thread(p); |
---|
224 | | - call_rcu(&p->rcu, delayed_put_task_struct); |
---|
| 273 | + put_task_struct_rcu_user(p); |
---|
225 | 274 | |
---|
226 | 275 | p = leader; |
---|
227 | 276 | if (unlikely(zap_leader)) |
---|
228 | 277 | goto repeat; |
---|
229 | 278 | } |
---|
230 | 279 | |
---|
231 | | -/* |
---|
232 | | - * Note that if this function returns a valid task_struct pointer (!NULL) |
---|
233 | | - * task->usage must remain >0 for the duration of the RCU critical section. |
---|
234 | | - */ |
---|
235 | | -struct task_struct *task_rcu_dereference(struct task_struct **ptask) |
---|
| 280 | +int rcuwait_wake_up(struct rcuwait *w) |
---|
236 | 281 | { |
---|
237 | | - struct sighand_struct *sighand; |
---|
238 | | - struct task_struct *task; |
---|
239 | | - |
---|
240 | | - /* |
---|
241 | | - * We need to verify that release_task() was not called and thus |
---|
242 | | - * delayed_put_task_struct() can't run and drop the last reference |
---|
243 | | - * before rcu_read_unlock(). We check task->sighand != NULL, |
---|
244 | | - * but we can read the already freed and reused memory. |
---|
245 | | - */ |
---|
246 | | -retry: |
---|
247 | | - task = rcu_dereference(*ptask); |
---|
248 | | - if (!task) |
---|
249 | | - return NULL; |
---|
250 | | - |
---|
251 | | - probe_kernel_address(&task->sighand, sighand); |
---|
252 | | - |
---|
253 | | - /* |
---|
254 | | - * Pairs with atomic_dec_and_test() in put_task_struct(). If this task |
---|
255 | | - * was already freed we can not miss the preceding update of this |
---|
256 | | - * pointer. |
---|
257 | | - */ |
---|
258 | | - smp_rmb(); |
---|
259 | | - if (unlikely(task != READ_ONCE(*ptask))) |
---|
260 | | - goto retry; |
---|
261 | | - |
---|
262 | | - /* |
---|
263 | | - * We've re-checked that "task == *ptask", now we have two different |
---|
264 | | - * cases: |
---|
265 | | - * |
---|
266 | | - * 1. This is actually the same task/task_struct. In this case |
---|
267 | | - * sighand != NULL tells us it is still alive. |
---|
268 | | - * |
---|
269 | | - * 2. This is another task which got the same memory for task_struct. |
---|
270 | | - * We can't know this of course, and we can not trust |
---|
271 | | - * sighand != NULL. |
---|
272 | | - * |
---|
273 | | - * In this case we actually return a random value, but this is |
---|
274 | | - * correct. |
---|
275 | | - * |
---|
276 | | - * If we return NULL - we can pretend that we actually noticed that |
---|
277 | | - * *ptask was updated when the previous task has exited. Or pretend |
---|
278 | | - * that probe_slab_address(&sighand) reads NULL. |
---|
279 | | - * |
---|
280 | | - * If we return the new task (because sighand is not NULL for any |
---|
281 | | - * reason) - this is fine too. This (new) task can't go away before |
---|
282 | | - * another gp pass. |
---|
283 | | - * |
---|
284 | | - * And note: We could even eliminate the false positive if re-read |
---|
285 | | - * task->sighand once again to avoid the falsely NULL. But this case |
---|
286 | | - * is very unlikely so we don't care. |
---|
287 | | - */ |
---|
288 | | - if (!sighand) |
---|
289 | | - return NULL; |
---|
290 | | - |
---|
291 | | - return task; |
---|
292 | | -} |
---|
293 | | - |
---|
294 | | -void rcuwait_wake_up(struct rcuwait *w) |
---|
295 | | -{ |
---|
| 282 | + int ret = 0; |
---|
296 | 283 | struct task_struct *task; |
---|
297 | 284 | |
---|
298 | 285 | rcu_read_lock(); |
---|
.. | .. |
---|
300 | 287 | /* |
---|
301 | 288 | * Order condition vs @task, such that everything prior to the load |
---|
302 | 289 | * of @task is visible. This is the condition as to why the user called |
---|
303 | | - * rcuwait_trywake() in the first place. Pairs with set_current_state() |
---|
| 290 | + * rcuwait_wake() in the first place. Pairs with set_current_state() |
---|
304 | 291 | * barrier (A) in rcuwait_wait_event(). |
---|
305 | 292 | * |
---|
306 | 293 | * WAIT WAKE |
---|
.. | .. |
---|
310 | 297 | */ |
---|
311 | 298 | smp_mb(); /* (B) */ |
---|
312 | 299 | |
---|
313 | | - /* |
---|
314 | | - * Avoid using task_rcu_dereference() magic as long as we are careful, |
---|
315 | | - * see comment in rcuwait_wait_event() regarding ->exit_state. |
---|
316 | | - */ |
---|
317 | 300 | task = rcu_dereference(w->task); |
---|
318 | 301 | if (task) |
---|
319 | | - wake_up_process(task); |
---|
| 302 | + ret = wake_up_process(task); |
---|
320 | 303 | rcu_read_unlock(); |
---|
| 304 | + |
---|
| 305 | + return ret; |
---|
321 | 306 | } |
---|
| 307 | +EXPORT_SYMBOL_GPL(rcuwait_wake_up); |
---|
322 | 308 | |
---|
323 | 309 | /* |
---|
324 | 310 | * Determine if a process group is "orphaned", according to the POSIX |
---|
.. | .. |
---|
422 | 408 | * freed task structure. |
---|
423 | 409 | */ |
---|
424 | 410 | if (atomic_read(&mm->mm_users) <= 1) { |
---|
425 | | - mm->owner = NULL; |
---|
| 411 | + WRITE_ONCE(mm->owner, NULL); |
---|
426 | 412 | return; |
---|
427 | 413 | } |
---|
428 | 414 | |
---|
.. | .. |
---|
462 | 448 | * most likely racing with swapoff (try_to_unuse()) or /proc or |
---|
463 | 449 | * ptrace or page migration (get_task_mm()). Mark owner as NULL. |
---|
464 | 450 | */ |
---|
465 | | - mm->owner = NULL; |
---|
| 451 | + WRITE_ONCE(mm->owner, NULL); |
---|
466 | 452 | return; |
---|
467 | 453 | |
---|
468 | 454 | assign_new_owner: |
---|
.. | .. |
---|
483 | 469 | put_task_struct(c); |
---|
484 | 470 | goto retry; |
---|
485 | 471 | } |
---|
486 | | - mm->owner = c; |
---|
| 472 | + WRITE_ONCE(mm->owner, c); |
---|
487 | 473 | task_unlock(c); |
---|
488 | 474 | put_task_struct(c); |
---|
489 | 475 | } |
---|
.. | .. |
---|
504 | 490 | sync_mm_rss(mm); |
---|
505 | 491 | /* |
---|
506 | 492 | * Serialize with any possible pending coredump. |
---|
507 | | - * We must hold mmap_sem around checking core_state |
---|
| 493 | + * We must hold mmap_lock around checking core_state |
---|
508 | 494 | * and clearing tsk->mm. The core-inducing thread |
---|
509 | 495 | * will increment ->nr_threads for each thread in the |
---|
510 | 496 | * group with ->mm != NULL. |
---|
511 | 497 | */ |
---|
512 | | - down_read(&mm->mmap_sem); |
---|
| 498 | + mmap_read_lock(mm); |
---|
513 | 499 | core_state = mm->core_state; |
---|
514 | 500 | if (core_state) { |
---|
515 | 501 | struct core_thread self; |
---|
516 | 502 | |
---|
517 | | - up_read(&mm->mmap_sem); |
---|
| 503 | + mmap_read_unlock(mm); |
---|
518 | 504 | |
---|
519 | 505 | self.task = current; |
---|
520 | 506 | if (self.task->flags & PF_SIGNALED) |
---|
.. | .. |
---|
535 | 521 | freezable_schedule(); |
---|
536 | 522 | } |
---|
537 | 523 | __set_current_state(TASK_RUNNING); |
---|
538 | | - down_read(&mm->mmap_sem); |
---|
| 524 | + mmap_read_lock(mm); |
---|
539 | 525 | } |
---|
540 | 526 | mmgrab(mm); |
---|
541 | 527 | BUG_ON(mm != current->active_mm); |
---|
542 | 528 | /* more a memory barrier than a real lock */ |
---|
543 | 529 | task_lock(current); |
---|
544 | 530 | current->mm = NULL; |
---|
545 | | - up_read(&mm->mmap_sem); |
---|
| 531 | + mmap_read_unlock(mm); |
---|
546 | 532 | enter_lazy_tlb(mm, current); |
---|
547 | 533 | task_unlock(current); |
---|
548 | 534 | mm_update_next_owner(mm); |
---|
| 535 | + trace_android_vh_exit_mm(mm); |
---|
549 | 536 | mmput(mm); |
---|
550 | 537 | if (test_thread_flag(TIF_MEMDIE)) |
---|
551 | 538 | exit_oom_victim(); |
---|
.. | .. |
---|
683 | 670 | reaper = find_new_reaper(father, reaper); |
---|
684 | 671 | list_for_each_entry(p, &father->children, sibling) { |
---|
685 | 672 | for_each_thread(p, t) { |
---|
686 | | - t->real_parent = reaper; |
---|
687 | | - BUG_ON((!t->ptrace) != (t->parent == father)); |
---|
| 673 | + RCU_INIT_POINTER(t->real_parent, reaper); |
---|
| 674 | + BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); |
---|
688 | 675 | if (likely(!t->ptrace)) |
---|
689 | 676 | t->parent = t->real_parent; |
---|
690 | 677 | if (t->pdeath_signal) |
---|
.. | .. |
---|
732 | 719 | autoreap = true; |
---|
733 | 720 | } |
---|
734 | 721 | |
---|
735 | | - tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; |
---|
736 | | - if (tsk->exit_state == EXIT_DEAD) |
---|
| 722 | + if (autoreap) { |
---|
| 723 | + tsk->exit_state = EXIT_DEAD; |
---|
737 | 724 | list_add(&tsk->ptrace_entry, &dead); |
---|
| 725 | + } |
---|
738 | 726 | |
---|
739 | 727 | /* mt-exec, de_thread() is waiting for group leader */ |
---|
740 | 728 | if (unlikely(tsk->signal->notify_count < 0)) |
---|
.. | .. |
---|
797 | 785 | * mm_release()->clear_child_tid() from writing to a user-controlled |
---|
798 | 786 | * kernel address. |
---|
799 | 787 | */ |
---|
800 | | - set_fs(USER_DS); |
---|
| 788 | + force_uaccess_begin(); |
---|
801 | 789 | |
---|
802 | 790 | if (unlikely(in_atomic())) { |
---|
803 | 791 | pr_info("note: %s[%d] exited with preempt_count %d\n", |
---|
.. | .. |
---|
824 | 812 | schedule(); |
---|
825 | 813 | } |
---|
826 | 814 | |
---|
| 815 | + io_uring_files_cancel(); |
---|
827 | 816 | exit_signals(tsk); /* sets PF_EXITING */ |
---|
828 | 817 | |
---|
829 | 818 | /* sync mm's RSS info before statistics gathering */ |
---|
.. | .. |
---|
842 | 831 | |
---|
843 | 832 | #ifdef CONFIG_POSIX_TIMERS |
---|
844 | 833 | hrtimer_cancel(&tsk->signal->real_timer); |
---|
845 | | - exit_itimers(tsk->signal); |
---|
| 834 | + exit_itimers(tsk); |
---|
846 | 835 | #endif |
---|
847 | 836 | if (tsk->mm) |
---|
848 | 837 | setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); |
---|
.. | .. |
---|
922 | 911 | do_task_dead(); |
---|
923 | 912 | } |
---|
924 | 913 | EXPORT_SYMBOL_GPL(do_exit); |
---|
| 914 | + |
---|
| 915 | +void __noreturn make_task_dead(int signr) |
---|
| 916 | +{ |
---|
| 917 | + /* |
---|
| 918 | + * Take the task off the cpu after something catastrophic has |
---|
| 919 | + * happened. |
---|
| 920 | + */ |
---|
| 921 | + unsigned int limit; |
---|
| 922 | + |
---|
| 923 | + /* |
---|
| 924 | + * Every time the system oopses, if the oops happens while a reference |
---|
| 925 | + * to an object was held, the reference leaks. |
---|
| 926 | + * If the oops doesn't also leak memory, repeated oopsing can cause |
---|
| 927 | + * reference counters to wrap around (if they're not using refcount_t). |
---|
| 928 | + * This means that repeated oopsing can make unexploitable-looking bugs |
---|
| 929 | + * exploitable through repeated oopsing. |
---|
| 930 | + * To make sure this can't happen, place an upper bound on how often the |
---|
| 931 | + * kernel may oops without panic(). |
---|
| 932 | + */ |
---|
| 933 | + limit = READ_ONCE(oops_limit); |
---|
| 934 | + if (atomic_inc_return(&oops_count) >= limit && limit) |
---|
| 935 | + panic("Oopsed too often (kernel.oops_limit is %d)", limit); |
---|
| 936 | + |
---|
| 937 | + do_exit(signr); |
---|
| 938 | +} |
---|
925 | 939 | |
---|
926 | 940 | void complete_and_exit(struct completion *comp, long code) |
---|
927 | 941 | { |
---|
.. | .. |
---|
1482 | 1496 | void __wake_up_parent(struct task_struct *p, struct task_struct *parent) |
---|
1483 | 1497 | { |
---|
1484 | 1498 | __wake_up_sync_key(&parent->signal->wait_chldexit, |
---|
1485 | | - TASK_INTERRUPTIBLE, 1, p); |
---|
| 1499 | + TASK_INTERRUPTIBLE, p); |
---|
1486 | 1500 | } |
---|
1487 | 1501 | |
---|
1488 | 1502 | static long do_wait(struct wait_opts *wo) |
---|
.. | .. |
---|
1504 | 1518 | */ |
---|
1505 | 1519 | wo->notask_error = -ECHILD; |
---|
1506 | 1520 | if ((wo->wo_type < PIDTYPE_MAX) && |
---|
1507 | | - (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) |
---|
| 1521 | + (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) |
---|
1508 | 1522 | goto notask; |
---|
1509 | 1523 | |
---|
1510 | 1524 | set_current_state(TASK_INTERRUPTIBLE); |
---|
.. | .. |
---|
1546 | 1560 | struct pid *pid = NULL; |
---|
1547 | 1561 | enum pid_type type; |
---|
1548 | 1562 | long ret; |
---|
| 1563 | + unsigned int f_flags = 0; |
---|
1549 | 1564 | |
---|
1550 | 1565 | if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| |
---|
1551 | 1566 | __WNOTHREAD|__WCLONE|__WALL)) |
---|
.. | .. |
---|
1561 | 1576 | type = PIDTYPE_PID; |
---|
1562 | 1577 | if (upid <= 0) |
---|
1563 | 1578 | return -EINVAL; |
---|
| 1579 | + |
---|
| 1580 | + pid = find_get_pid(upid); |
---|
1564 | 1581 | break; |
---|
1565 | 1582 | case P_PGID: |
---|
1566 | 1583 | type = PIDTYPE_PGID; |
---|
1567 | | - if (upid <= 0) |
---|
| 1584 | + if (upid < 0) |
---|
1568 | 1585 | return -EINVAL; |
---|
| 1586 | + |
---|
| 1587 | + if (upid) |
---|
| 1588 | + pid = find_get_pid(upid); |
---|
| 1589 | + else |
---|
| 1590 | + pid = get_task_pid(current, PIDTYPE_PGID); |
---|
| 1591 | + break; |
---|
| 1592 | + case P_PIDFD: |
---|
| 1593 | + type = PIDTYPE_PID; |
---|
| 1594 | + if (upid < 0) |
---|
| 1595 | + return -EINVAL; |
---|
| 1596 | + |
---|
| 1597 | + pid = pidfd_get_pid(upid, &f_flags); |
---|
| 1598 | + if (IS_ERR(pid)) |
---|
| 1599 | + return PTR_ERR(pid); |
---|
| 1600 | + |
---|
1569 | 1601 | break; |
---|
1570 | 1602 | default: |
---|
1571 | 1603 | return -EINVAL; |
---|
1572 | 1604 | } |
---|
1573 | | - |
---|
1574 | | - if (type < PIDTYPE_MAX) |
---|
1575 | | - pid = find_get_pid(upid); |
---|
1576 | 1605 | |
---|
1577 | 1606 | wo.wo_type = type; |
---|
1578 | 1607 | wo.wo_pid = pid; |
---|
1579 | 1608 | wo.wo_flags = options; |
---|
1580 | 1609 | wo.wo_info = infop; |
---|
1581 | 1610 | wo.wo_rusage = ru; |
---|
| 1611 | + if (f_flags & O_NONBLOCK) |
---|
| 1612 | + wo.wo_flags |= WNOHANG; |
---|
| 1613 | + |
---|
1582 | 1614 | ret = do_wait(&wo); |
---|
| 1615 | + if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK)) |
---|
| 1616 | + ret = -EAGAIN; |
---|
1583 | 1617 | |
---|
1584 | 1618 | put_pid(pid); |
---|
1585 | 1619 | return ret; |
---|
.. | .. |
---|
1602 | 1636 | if (!infop) |
---|
1603 | 1637 | return err; |
---|
1604 | 1638 | |
---|
1605 | | - if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) |
---|
| 1639 | + if (!user_write_access_begin(infop, sizeof(*infop))) |
---|
1606 | 1640 | return -EFAULT; |
---|
1607 | 1641 | |
---|
1608 | 1642 | unsafe_put_user(signo, &infop->si_signo, Efault); |
---|
.. | .. |
---|
1611 | 1645 | unsafe_put_user(info.pid, &infop->si_pid, Efault); |
---|
1612 | 1646 | unsafe_put_user(info.uid, &infop->si_uid, Efault); |
---|
1613 | 1647 | unsafe_put_user(info.status, &infop->si_status, Efault); |
---|
1614 | | - user_access_end(); |
---|
| 1648 | + user_write_access_end(); |
---|
1615 | 1649 | return err; |
---|
1616 | 1650 | Efault: |
---|
1617 | | - user_access_end(); |
---|
| 1651 | + user_write_access_end(); |
---|
1618 | 1652 | return -EFAULT; |
---|
1619 | 1653 | } |
---|
1620 | 1654 | |
---|
.. | .. |
---|
1658 | 1692 | if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) |
---|
1659 | 1693 | ret = -EFAULT; |
---|
1660 | 1694 | |
---|
| 1695 | + return ret; |
---|
| 1696 | +} |
---|
| 1697 | + |
---|
| 1698 | +int kernel_wait(pid_t pid, int *stat) |
---|
| 1699 | +{ |
---|
| 1700 | + struct wait_opts wo = { |
---|
| 1701 | + .wo_type = PIDTYPE_PID, |
---|
| 1702 | + .wo_pid = find_get_pid(pid), |
---|
| 1703 | + .wo_flags = WEXITED, |
---|
| 1704 | + }; |
---|
| 1705 | + int ret; |
---|
| 1706 | + |
---|
| 1707 | + ret = do_wait(&wo); |
---|
| 1708 | + if (ret > 0 && wo.wo_stat) |
---|
| 1709 | + *stat = wo.wo_stat; |
---|
| 1710 | + put_pid(wo.wo_pid); |
---|
1661 | 1711 | return ret; |
---|
1662 | 1712 | } |
---|
1663 | 1713 | |
---|
.. | .. |
---|
1729 | 1779 | if (!infop) |
---|
1730 | 1780 | return err; |
---|
1731 | 1781 | |
---|
1732 | | - if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) |
---|
| 1782 | + if (!user_write_access_begin(infop, sizeof(*infop))) |
---|
1733 | 1783 | return -EFAULT; |
---|
1734 | 1784 | |
---|
1735 | 1785 | unsafe_put_user(signo, &infop->si_signo, Efault); |
---|
.. | .. |
---|
1738 | 1788 | unsafe_put_user(info.pid, &infop->si_pid, Efault); |
---|
1739 | 1789 | unsafe_put_user(info.uid, &infop->si_uid, Efault); |
---|
1740 | 1790 | unsafe_put_user(info.status, &infop->si_status, Efault); |
---|
1741 | | - user_access_end(); |
---|
| 1791 | + user_write_access_end(); |
---|
1742 | 1792 | return err; |
---|
1743 | 1793 | Efault: |
---|
1744 | | - user_access_end(); |
---|
| 1794 | + user_write_access_end(); |
---|
1745 | 1795 | return -EFAULT; |
---|
1746 | 1796 | } |
---|
1747 | 1797 | #endif |
---|
1748 | 1798 | |
---|
| 1799 | +/** |
---|
| 1800 | + * thread_group_exited - check that a thread group has exited |
---|
| 1801 | + * @pid: tgid of thread group to be checked. |
---|
| 1802 | + * |
---|
| 1803 | + * Test if the thread group represented by tgid has exited (all |
---|
| 1804 | + * threads are zombies, dead or completely gone). |
---|
| 1805 | + * |
---|
| 1806 | + * Return: true if the thread group has exited. false otherwise. |
---|
| 1807 | + */ |
---|
| 1808 | +bool thread_group_exited(struct pid *pid) |
---|
| 1809 | +{ |
---|
| 1810 | + struct task_struct *task; |
---|
| 1811 | + bool exited; |
---|
| 1812 | + |
---|
| 1813 | + rcu_read_lock(); |
---|
| 1814 | + task = pid_task(pid, PIDTYPE_PID); |
---|
| 1815 | + exited = !task || |
---|
| 1816 | + (READ_ONCE(task->exit_state) && thread_group_empty(task)); |
---|
| 1817 | + rcu_read_unlock(); |
---|
| 1818 | + |
---|
| 1819 | + return exited; |
---|
| 1820 | +} |
---|
| 1821 | +EXPORT_SYMBOL(thread_group_exited); |
---|
| 1822 | + |
---|
1749 | 1823 | __weak void abort(void) |
---|
1750 | 1824 | { |
---|
1751 | 1825 | BUG(); |
---|