| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * linux/kernel/exit.c |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 62 | 63 | #include <linux/random.h> |
|---|
| 63 | 64 | #include <linux/rcuwait.h> |
|---|
| 64 | 65 | #include <linux/compat.h> |
|---|
| 66 | +#include <linux/io_uring.h> |
|---|
| 65 | 67 | |
|---|
| 66 | 68 | #include <linux/uaccess.h> |
|---|
| 67 | 69 | #include <asm/unistd.h> |
|---|
| 68 | | -#include <asm/pgtable.h> |
|---|
| 69 | 70 | #include <asm/mmu_context.h> |
|---|
| 71 | +#include <trace/hooks/mm.h> |
|---|
| 70 | 72 | |
|---|
| 71 | 73 | static void __unhash_process(struct task_struct *p, bool group_dead) |
|---|
| 72 | 74 | { |
|---|
| .. | .. |
|---|
| 93 | 95 | struct signal_struct *sig = tsk->signal; |
|---|
| 94 | 96 | bool group_dead = thread_group_leader(tsk); |
|---|
| 95 | 97 | struct sighand_struct *sighand; |
|---|
| 96 | | - struct tty_struct *uninitialized_var(tty); |
|---|
| 98 | + struct tty_struct *tty; |
|---|
| 97 | 99 | u64 utime, stime; |
|---|
| 98 | 100 | |
|---|
| 99 | 101 | sighand = rcu_dereference_check(tsk->sighand, |
|---|
| .. | .. |
|---|
| 102 | 104 | |
|---|
| 103 | 105 | #ifdef CONFIG_POSIX_TIMERS |
|---|
| 104 | 106 | posix_cpu_timers_exit(tsk); |
|---|
| 105 | | - if (group_dead) { |
|---|
| 107 | + if (group_dead) |
|---|
| 106 | 108 | posix_cpu_timers_exit_group(tsk); |
|---|
| 107 | | - } else { |
|---|
| 108 | | - /* |
|---|
| 109 | | - * This can only happen if the caller is de_thread(). |
|---|
| 110 | | - * FIXME: this is the temporary hack, we should teach |
|---|
| 111 | | - * posix-cpu-timers to handle this case correctly. |
|---|
| 112 | | - */ |
|---|
| 113 | | - if (unlikely(has_group_leader_pid(tsk))) |
|---|
| 114 | | - posix_cpu_timers_exit_group(tsk); |
|---|
| 115 | | - } |
|---|
| 116 | 109 | #endif |
|---|
| 117 | 110 | |
|---|
| 118 | 111 | if (group_dead) { |
|---|
| .. | .. |
|---|
| 181 | 174 | put_task_struct(tsk); |
|---|
| 182 | 175 | } |
|---|
| 183 | 176 | |
|---|
| 177 | +void put_task_struct_rcu_user(struct task_struct *task) |
|---|
| 178 | +{ |
|---|
| 179 | + if (refcount_dec_and_test(&task->rcu_users)) |
|---|
| 180 | + call_rcu(&task->rcu, delayed_put_task_struct); |
|---|
| 181 | +} |
|---|
| 184 | 182 | |
|---|
| 185 | 183 | void release_task(struct task_struct *p) |
|---|
| 186 | 184 | { |
|---|
| 187 | 185 | struct task_struct *leader; |
|---|
| 186 | + struct pid *thread_pid; |
|---|
| 188 | 187 | int zap_leader; |
|---|
| 189 | 188 | repeat: |
|---|
| 190 | 189 | /* don't need to get the RCU readlock here - the process is dead and |
|---|
| .. | .. |
|---|
| 193 | 192 | atomic_dec(&__task_cred(p)->user->processes); |
|---|
| 194 | 193 | rcu_read_unlock(); |
|---|
| 195 | 194 | |
|---|
| 196 | | - proc_flush_task(p); |
|---|
| 197 | 195 | cgroup_release(p); |
|---|
| 198 | 196 | |
|---|
| 199 | 197 | write_lock_irq(&tasklist_lock); |
|---|
| 200 | 198 | ptrace_release_task(p); |
|---|
| 199 | + thread_pid = get_pid(p->thread_pid); |
|---|
| 201 | 200 | __exit_signal(p); |
|---|
| 202 | 201 | |
|---|
| 203 | 202 | /* |
|---|
| .. | .. |
|---|
| 220 | 219 | } |
|---|
| 221 | 220 | |
|---|
| 222 | 221 | write_unlock_irq(&tasklist_lock); |
|---|
| 222 | + seccomp_filter_release(p); |
|---|
| 223 | + proc_flush_pid(thread_pid); |
|---|
| 224 | + put_pid(thread_pid); |
|---|
| 223 | 225 | release_thread(p); |
|---|
| 224 | | - call_rcu(&p->rcu, delayed_put_task_struct); |
|---|
| 226 | + put_task_struct_rcu_user(p); |
|---|
| 225 | 227 | |
|---|
| 226 | 228 | p = leader; |
|---|
| 227 | 229 | if (unlikely(zap_leader)) |
|---|
| 228 | 230 | goto repeat; |
|---|
| 229 | 231 | } |
|---|
| 230 | 232 | |
|---|
| 231 | | -/* |
|---|
| 232 | | - * Note that if this function returns a valid task_struct pointer (!NULL) |
|---|
| 233 | | - * task->usage must remain >0 for the duration of the RCU critical section. |
|---|
| 234 | | - */ |
|---|
| 235 | | -struct task_struct *task_rcu_dereference(struct task_struct **ptask) |
|---|
| 233 | +int rcuwait_wake_up(struct rcuwait *w) |
|---|
| 236 | 234 | { |
|---|
| 237 | | - struct sighand_struct *sighand; |
|---|
| 238 | | - struct task_struct *task; |
|---|
| 239 | | - |
|---|
| 240 | | - /* |
|---|
| 241 | | - * We need to verify that release_task() was not called and thus |
|---|
| 242 | | - * delayed_put_task_struct() can't run and drop the last reference |
|---|
| 243 | | - * before rcu_read_unlock(). We check task->sighand != NULL, |
|---|
| 244 | | - * but we can read the already freed and reused memory. |
|---|
| 245 | | - */ |
|---|
| 246 | | -retry: |
|---|
| 247 | | - task = rcu_dereference(*ptask); |
|---|
| 248 | | - if (!task) |
|---|
| 249 | | - return NULL; |
|---|
| 250 | | - |
|---|
| 251 | | - probe_kernel_address(&task->sighand, sighand); |
|---|
| 252 | | - |
|---|
| 253 | | - /* |
|---|
| 254 | | - * Pairs with atomic_dec_and_test() in put_task_struct(). If this task |
|---|
| 255 | | - * was already freed we can not miss the preceding update of this |
|---|
| 256 | | - * pointer. |
|---|
| 257 | | - */ |
|---|
| 258 | | - smp_rmb(); |
|---|
| 259 | | - if (unlikely(task != READ_ONCE(*ptask))) |
|---|
| 260 | | - goto retry; |
|---|
| 261 | | - |
|---|
| 262 | | - /* |
|---|
| 263 | | - * We've re-checked that "task == *ptask", now we have two different |
|---|
| 264 | | - * cases: |
|---|
| 265 | | - * |
|---|
| 266 | | - * 1. This is actually the same task/task_struct. In this case |
|---|
| 267 | | - * sighand != NULL tells us it is still alive. |
|---|
| 268 | | - * |
|---|
| 269 | | - * 2. This is another task which got the same memory for task_struct. |
|---|
| 270 | | - * We can't know this of course, and we can not trust |
|---|
| 271 | | - * sighand != NULL. |
|---|
| 272 | | - * |
|---|
| 273 | | - * In this case we actually return a random value, but this is |
|---|
| 274 | | - * correct. |
|---|
| 275 | | - * |
|---|
| 276 | | - * If we return NULL - we can pretend that we actually noticed that |
|---|
| 277 | | - * *ptask was updated when the previous task has exited. Or pretend |
|---|
| 278 | | - * that probe_slab_address(&sighand) reads NULL. |
|---|
| 279 | | - * |
|---|
| 280 | | - * If we return the new task (because sighand is not NULL for any |
|---|
| 281 | | - * reason) - this is fine too. This (new) task can't go away before |
|---|
| 282 | | - * another gp pass. |
|---|
| 283 | | - * |
|---|
| 284 | | - * And note: We could even eliminate the false positive if re-read |
|---|
| 285 | | - * task->sighand once again to avoid the falsely NULL. But this case |
|---|
| 286 | | - * is very unlikely so we don't care. |
|---|
| 287 | | - */ |
|---|
| 288 | | - if (!sighand) |
|---|
| 289 | | - return NULL; |
|---|
| 290 | | - |
|---|
| 291 | | - return task; |
|---|
| 292 | | -} |
|---|
| 293 | | - |
|---|
| 294 | | -void rcuwait_wake_up(struct rcuwait *w) |
|---|
| 295 | | -{ |
|---|
| 235 | + int ret = 0; |
|---|
| 296 | 236 | struct task_struct *task; |
|---|
| 297 | 237 | |
|---|
| 298 | 238 | rcu_read_lock(); |
|---|
| .. | .. |
|---|
| 300 | 240 | /* |
|---|
| 301 | 241 | * Order condition vs @task, such that everything prior to the load |
|---|
| 302 | 242 | * of @task is visible. This is the condition as to why the user called |
|---|
| 303 | | - * rcuwait_trywake() in the first place. Pairs with set_current_state() |
|---|
| 243 | + * rcuwait_wake() in the first place. Pairs with set_current_state() |
|---|
| 304 | 244 | * barrier (A) in rcuwait_wait_event(). |
|---|
| 305 | 245 | * |
|---|
| 306 | 246 | * WAIT WAKE |
|---|
| .. | .. |
|---|
| 310 | 250 | */ |
|---|
| 311 | 251 | smp_mb(); /* (B) */ |
|---|
| 312 | 252 | |
|---|
| 313 | | - /* |
|---|
| 314 | | - * Avoid using task_rcu_dereference() magic as long as we are careful, |
|---|
| 315 | | - * see comment in rcuwait_wait_event() regarding ->exit_state. |
|---|
| 316 | | - */ |
|---|
| 317 | 253 | task = rcu_dereference(w->task); |
|---|
| 318 | 254 | if (task) |
|---|
| 319 | | - wake_up_process(task); |
|---|
| 255 | + ret = wake_up_process(task); |
|---|
| 320 | 256 | rcu_read_unlock(); |
|---|
| 257 | + |
|---|
| 258 | + return ret; |
|---|
| 321 | 259 | } |
|---|
| 260 | +EXPORT_SYMBOL_GPL(rcuwait_wake_up); |
|---|
| 322 | 261 | |
|---|
| 323 | 262 | /* |
|---|
| 324 | 263 | * Determine if a process group is "orphaned", according to the POSIX |
|---|
| .. | .. |
|---|
| 422 | 361 | * freed task structure. |
|---|
| 423 | 362 | */ |
|---|
| 424 | 363 | if (atomic_read(&mm->mm_users) <= 1) { |
|---|
| 425 | | - mm->owner = NULL; |
|---|
| 364 | + WRITE_ONCE(mm->owner, NULL); |
|---|
| 426 | 365 | return; |
|---|
| 427 | 366 | } |
|---|
| 428 | 367 | |
|---|
| .. | .. |
|---|
| 462 | 401 | * most likely racing with swapoff (try_to_unuse()) or /proc or |
|---|
| 463 | 402 | * ptrace or page migration (get_task_mm()). Mark owner as NULL. |
|---|
| 464 | 403 | */ |
|---|
| 465 | | - mm->owner = NULL; |
|---|
| 404 | + WRITE_ONCE(mm->owner, NULL); |
|---|
| 466 | 405 | return; |
|---|
| 467 | 406 | |
|---|
| 468 | 407 | assign_new_owner: |
|---|
| .. | .. |
|---|
| 483 | 422 | put_task_struct(c); |
|---|
| 484 | 423 | goto retry; |
|---|
| 485 | 424 | } |
|---|
| 486 | | - mm->owner = c; |
|---|
| 425 | + WRITE_ONCE(mm->owner, c); |
|---|
| 487 | 426 | task_unlock(c); |
|---|
| 488 | 427 | put_task_struct(c); |
|---|
| 489 | 428 | } |
|---|
| .. | .. |
|---|
| 504 | 443 | sync_mm_rss(mm); |
|---|
| 505 | 444 | /* |
|---|
| 506 | 445 | * Serialize with any possible pending coredump. |
|---|
| 507 | | - * We must hold mmap_sem around checking core_state |
|---|
| 446 | + * We must hold mmap_lock around checking core_state |
|---|
| 508 | 447 | * and clearing tsk->mm. The core-inducing thread |
|---|
| 509 | 448 | * will increment ->nr_threads for each thread in the |
|---|
| 510 | 449 | * group with ->mm != NULL. |
|---|
| 511 | 450 | */ |
|---|
| 512 | | - down_read(&mm->mmap_sem); |
|---|
| 451 | + mmap_read_lock(mm); |
|---|
| 513 | 452 | core_state = mm->core_state; |
|---|
| 514 | 453 | if (core_state) { |
|---|
| 515 | 454 | struct core_thread self; |
|---|
| 516 | 455 | |
|---|
| 517 | | - up_read(&mm->mmap_sem); |
|---|
| 456 | + mmap_read_unlock(mm); |
|---|
| 518 | 457 | |
|---|
| 519 | 458 | self.task = current; |
|---|
| 520 | 459 | if (self.task->flags & PF_SIGNALED) |
|---|
| .. | .. |
|---|
| 535 | 474 | freezable_schedule(); |
|---|
| 536 | 475 | } |
|---|
| 537 | 476 | __set_current_state(TASK_RUNNING); |
|---|
| 538 | | - down_read(&mm->mmap_sem); |
|---|
| 477 | + mmap_read_lock(mm); |
|---|
| 539 | 478 | } |
|---|
| 540 | 479 | mmgrab(mm); |
|---|
| 541 | 480 | BUG_ON(mm != current->active_mm); |
|---|
| 542 | 481 | /* more a memory barrier than a real lock */ |
|---|
| 543 | 482 | task_lock(current); |
|---|
| 544 | 483 | current->mm = NULL; |
|---|
| 545 | | - up_read(&mm->mmap_sem); |
|---|
| 484 | + mmap_read_unlock(mm); |
|---|
| 546 | 485 | enter_lazy_tlb(mm, current); |
|---|
| 547 | 486 | task_unlock(current); |
|---|
| 548 | 487 | mm_update_next_owner(mm); |
|---|
| 488 | + trace_android_vh_exit_mm(mm); |
|---|
| 549 | 489 | mmput(mm); |
|---|
| 550 | 490 | if (test_thread_flag(TIF_MEMDIE)) |
|---|
| 551 | 491 | exit_oom_victim(); |
|---|
| .. | .. |
|---|
| 683 | 623 | reaper = find_new_reaper(father, reaper); |
|---|
| 684 | 624 | list_for_each_entry(p, &father->children, sibling) { |
|---|
| 685 | 625 | for_each_thread(p, t) { |
|---|
| 686 | | - t->real_parent = reaper; |
|---|
| 687 | | - BUG_ON((!t->ptrace) != (t->parent == father)); |
|---|
| 626 | + RCU_INIT_POINTER(t->real_parent, reaper); |
|---|
| 627 | + BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); |
|---|
| 688 | 628 | if (likely(!t->ptrace)) |
|---|
| 689 | 629 | t->parent = t->real_parent; |
|---|
| 690 | 630 | if (t->pdeath_signal) |
|---|
| .. | .. |
|---|
| 732 | 672 | autoreap = true; |
|---|
| 733 | 673 | } |
|---|
| 734 | 674 | |
|---|
| 735 | | - tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; |
|---|
| 736 | | - if (tsk->exit_state == EXIT_DEAD) |
|---|
| 675 | + if (autoreap) { |
|---|
| 676 | + tsk->exit_state = EXIT_DEAD; |
|---|
| 737 | 677 | list_add(&tsk->ptrace_entry, &dead); |
|---|
| 678 | + } |
|---|
| 738 | 679 | |
|---|
| 739 | 680 | /* mt-exec, de_thread() is waiting for group leader */ |
|---|
| 740 | 681 | if (unlikely(tsk->signal->notify_count < 0)) |
|---|
| .. | .. |
|---|
| 797 | 738 | * mm_release()->clear_child_tid() from writing to a user-controlled |
|---|
| 798 | 739 | * kernel address. |
|---|
| 799 | 740 | */ |
|---|
| 800 | | - set_fs(USER_DS); |
|---|
| 741 | + force_uaccess_begin(); |
|---|
| 801 | 742 | |
|---|
| 802 | 743 | if (unlikely(in_atomic())) { |
|---|
| 803 | 744 | pr_info("note: %s[%d] exited with preempt_count %d\n", |
|---|
| .. | .. |
|---|
| 824 | 765 | schedule(); |
|---|
| 825 | 766 | } |
|---|
| 826 | 767 | |
|---|
| 768 | + io_uring_files_cancel(); |
|---|
| 827 | 769 | exit_signals(tsk); /* sets PF_EXITING */ |
|---|
| 828 | 770 | |
|---|
| 829 | 771 | /* sync mm's RSS info before statistics gathering */ |
|---|
| .. | .. |
|---|
| 842 | 784 | |
|---|
| 843 | 785 | #ifdef CONFIG_POSIX_TIMERS |
|---|
| 844 | 786 | hrtimer_cancel(&tsk->signal->real_timer); |
|---|
| 845 | | - exit_itimers(tsk->signal); |
|---|
| 787 | + exit_itimers(tsk); |
|---|
| 846 | 788 | #endif |
|---|
| 847 | 789 | if (tsk->mm) |
|---|
| 848 | 790 | setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); |
|---|
| .. | .. |
|---|
| 1482 | 1424 | void __wake_up_parent(struct task_struct *p, struct task_struct *parent) |
|---|
| 1483 | 1425 | { |
|---|
| 1484 | 1426 | __wake_up_sync_key(&parent->signal->wait_chldexit, |
|---|
| 1485 | | - TASK_INTERRUPTIBLE, 1, p); |
|---|
| 1427 | + TASK_INTERRUPTIBLE, p); |
|---|
| 1486 | 1428 | } |
|---|
| 1487 | 1429 | |
|---|
| 1488 | 1430 | static long do_wait(struct wait_opts *wo) |
|---|
| .. | .. |
|---|
| 1504 | 1446 | */ |
|---|
| 1505 | 1447 | wo->notask_error = -ECHILD; |
|---|
| 1506 | 1448 | if ((wo->wo_type < PIDTYPE_MAX) && |
|---|
| 1507 | | - (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) |
|---|
| 1449 | + (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) |
|---|
| 1508 | 1450 | goto notask; |
|---|
| 1509 | 1451 | |
|---|
| 1510 | 1452 | set_current_state(TASK_INTERRUPTIBLE); |
|---|
| .. | .. |
|---|
| 1546 | 1488 | struct pid *pid = NULL; |
|---|
| 1547 | 1489 | enum pid_type type; |
|---|
| 1548 | 1490 | long ret; |
|---|
| 1491 | + unsigned int f_flags = 0; |
|---|
| 1549 | 1492 | |
|---|
| 1550 | 1493 | if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| |
|---|
| 1551 | 1494 | __WNOTHREAD|__WCLONE|__WALL)) |
|---|
| .. | .. |
|---|
| 1561 | 1504 | type = PIDTYPE_PID; |
|---|
| 1562 | 1505 | if (upid <= 0) |
|---|
| 1563 | 1506 | return -EINVAL; |
|---|
| 1507 | + |
|---|
| 1508 | + pid = find_get_pid(upid); |
|---|
| 1564 | 1509 | break; |
|---|
| 1565 | 1510 | case P_PGID: |
|---|
| 1566 | 1511 | type = PIDTYPE_PGID; |
|---|
| 1567 | | - if (upid <= 0) |
|---|
| 1512 | + if (upid < 0) |
|---|
| 1568 | 1513 | return -EINVAL; |
|---|
| 1514 | + |
|---|
| 1515 | + if (upid) |
|---|
| 1516 | + pid = find_get_pid(upid); |
|---|
| 1517 | + else |
|---|
| 1518 | + pid = get_task_pid(current, PIDTYPE_PGID); |
|---|
| 1519 | + break; |
|---|
| 1520 | + case P_PIDFD: |
|---|
| 1521 | + type = PIDTYPE_PID; |
|---|
| 1522 | + if (upid < 0) |
|---|
| 1523 | + return -EINVAL; |
|---|
| 1524 | + |
|---|
| 1525 | + pid = pidfd_get_pid(upid, &f_flags); |
|---|
| 1526 | + if (IS_ERR(pid)) |
|---|
| 1527 | + return PTR_ERR(pid); |
|---|
| 1528 | + |
|---|
| 1569 | 1529 | break; |
|---|
| 1570 | 1530 | default: |
|---|
| 1571 | 1531 | return -EINVAL; |
|---|
| 1572 | 1532 | } |
|---|
| 1573 | | - |
|---|
| 1574 | | - if (type < PIDTYPE_MAX) |
|---|
| 1575 | | - pid = find_get_pid(upid); |
|---|
| 1576 | 1533 | |
|---|
| 1577 | 1534 | wo.wo_type = type; |
|---|
| 1578 | 1535 | wo.wo_pid = pid; |
|---|
| 1579 | 1536 | wo.wo_flags = options; |
|---|
| 1580 | 1537 | wo.wo_info = infop; |
|---|
| 1581 | 1538 | wo.wo_rusage = ru; |
|---|
| 1539 | + if (f_flags & O_NONBLOCK) |
|---|
| 1540 | + wo.wo_flags |= WNOHANG; |
|---|
| 1541 | + |
|---|
| 1582 | 1542 | ret = do_wait(&wo); |
|---|
| 1543 | + if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK)) |
|---|
| 1544 | + ret = -EAGAIN; |
|---|
| 1583 | 1545 | |
|---|
| 1584 | 1546 | put_pid(pid); |
|---|
| 1585 | 1547 | return ret; |
|---|
| .. | .. |
|---|
| 1602 | 1564 | if (!infop) |
|---|
| 1603 | 1565 | return err; |
|---|
| 1604 | 1566 | |
|---|
| 1605 | | - if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) |
|---|
| 1567 | + if (!user_write_access_begin(infop, sizeof(*infop))) |
|---|
| 1606 | 1568 | return -EFAULT; |
|---|
| 1607 | 1569 | |
|---|
| 1608 | 1570 | unsafe_put_user(signo, &infop->si_signo, Efault); |
|---|
| .. | .. |
|---|
| 1611 | 1573 | unsafe_put_user(info.pid, &infop->si_pid, Efault); |
|---|
| 1612 | 1574 | unsafe_put_user(info.uid, &infop->si_uid, Efault); |
|---|
| 1613 | 1575 | unsafe_put_user(info.status, &infop->si_status, Efault); |
|---|
| 1614 | | - user_access_end(); |
|---|
| 1576 | + user_write_access_end(); |
|---|
| 1615 | 1577 | return err; |
|---|
| 1616 | 1578 | Efault: |
|---|
| 1617 | | - user_access_end(); |
|---|
| 1579 | + user_write_access_end(); |
|---|
| 1618 | 1580 | return -EFAULT; |
|---|
| 1619 | 1581 | } |
|---|
| 1620 | 1582 | |
|---|
| .. | .. |
|---|
| 1658 | 1620 | if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) |
|---|
| 1659 | 1621 | ret = -EFAULT; |
|---|
| 1660 | 1622 | |
|---|
| 1623 | + return ret; |
|---|
| 1624 | +} |
|---|
| 1625 | + |
|---|
| 1626 | +int kernel_wait(pid_t pid, int *stat) |
|---|
| 1627 | +{ |
|---|
| 1628 | + struct wait_opts wo = { |
|---|
| 1629 | + .wo_type = PIDTYPE_PID, |
|---|
| 1630 | + .wo_pid = find_get_pid(pid), |
|---|
| 1631 | + .wo_flags = WEXITED, |
|---|
| 1632 | + }; |
|---|
| 1633 | + int ret; |
|---|
| 1634 | + |
|---|
| 1635 | + ret = do_wait(&wo); |
|---|
| 1636 | + if (ret > 0 && wo.wo_stat) |
|---|
| 1637 | + *stat = wo.wo_stat; |
|---|
| 1638 | + put_pid(wo.wo_pid); |
|---|
| 1661 | 1639 | return ret; |
|---|
| 1662 | 1640 | } |
|---|
| 1663 | 1641 | |
|---|
| .. | .. |
|---|
| 1729 | 1707 | if (!infop) |
|---|
| 1730 | 1708 | return err; |
|---|
| 1731 | 1709 | |
|---|
| 1732 | | - if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop))) |
|---|
| 1710 | + if (!user_write_access_begin(infop, sizeof(*infop))) |
|---|
| 1733 | 1711 | return -EFAULT; |
|---|
| 1734 | 1712 | |
|---|
| 1735 | 1713 | unsafe_put_user(signo, &infop->si_signo, Efault); |
|---|
| .. | .. |
|---|
| 1738 | 1716 | unsafe_put_user(info.pid, &infop->si_pid, Efault); |
|---|
| 1739 | 1717 | unsafe_put_user(info.uid, &infop->si_uid, Efault); |
|---|
| 1740 | 1718 | unsafe_put_user(info.status, &infop->si_status, Efault); |
|---|
| 1741 | | - user_access_end(); |
|---|
| 1719 | + user_write_access_end(); |
|---|
| 1742 | 1720 | return err; |
|---|
| 1743 | 1721 | Efault: |
|---|
| 1744 | | - user_access_end(); |
|---|
| 1722 | + user_write_access_end(); |
|---|
| 1745 | 1723 | return -EFAULT; |
|---|
| 1746 | 1724 | } |
|---|
| 1747 | 1725 | #endif |
|---|
| 1748 | 1726 | |
|---|
| 1727 | +/** |
|---|
| 1728 | + * thread_group_exited - check that a thread group has exited |
|---|
| 1729 | + * @pid: tgid of thread group to be checked. |
|---|
| 1730 | + * |
|---|
| 1731 | + * Test if the thread group represented by tgid has exited (all |
|---|
| 1732 | + * threads are zombies, dead or completely gone). |
|---|
| 1733 | + * |
|---|
| 1734 | + * Return: true if the thread group has exited. false otherwise. |
|---|
| 1735 | + */ |
|---|
| 1736 | +bool thread_group_exited(struct pid *pid) |
|---|
| 1737 | +{ |
|---|
| 1738 | + struct task_struct *task; |
|---|
| 1739 | + bool exited; |
|---|
| 1740 | + |
|---|
| 1741 | + rcu_read_lock(); |
|---|
| 1742 | + task = pid_task(pid, PIDTYPE_PID); |
|---|
| 1743 | + exited = !task || |
|---|
| 1744 | + (READ_ONCE(task->exit_state) && thread_group_empty(task)); |
|---|
| 1745 | + rcu_read_unlock(); |
|---|
| 1746 | + |
|---|
| 1747 | + return exited; |
|---|
| 1748 | +} |
|---|
| 1749 | +EXPORT_SYMBOL(thread_group_exited); |
|---|
| 1750 | + |
|---|
| 1749 | 1751 | __weak void abort(void) |
|---|
| 1750 | 1752 | { |
|---|
| 1751 | 1753 | BUG(); |
|---|