| .. | .. |
|---|
| 8 | 8 | #include <linux/sched/jobctl.h> |
|---|
| 9 | 9 | #include <linux/sched/task.h> |
|---|
| 10 | 10 | #include <linux/cred.h> |
|---|
| 11 | +#include <linux/refcount.h> |
|---|
| 12 | +#include <linux/posix-timers.h> |
|---|
| 13 | +#include <linux/mm_types.h> |
|---|
| 14 | +#include <asm/ptrace.h> |
|---|
| 11 | 15 | #include <linux/android_kabi.h> |
|---|
| 12 | 16 | |
|---|
| 13 | 17 | /* |
|---|
| .. | .. |
|---|
| 15 | 19 | */ |
|---|
| 16 | 20 | |
|---|
| 17 | 21 | struct sighand_struct { |
|---|
| 18 | | - atomic_t count; |
|---|
| 19 | | - struct k_sigaction action[_NSIG]; |
|---|
| 20 | 22 | spinlock_t siglock; |
|---|
| 23 | + refcount_t count; |
|---|
| 21 | 24 | wait_queue_head_t signalfd_wqh; |
|---|
| 25 | + struct k_sigaction action[_NSIG]; |
|---|
| 22 | 26 | }; |
|---|
| 23 | 27 | |
|---|
| 24 | 28 | /* |
|---|
| .. | .. |
|---|
| 56 | 60 | /** |
|---|
| 57 | 61 | * struct thread_group_cputimer - thread group interval timer counts |
|---|
| 58 | 62 | * @cputime_atomic: atomic thread group interval timers. |
|---|
| 59 | | - * @running: true when there are timers running and |
|---|
| 60 | | - * @cputime_atomic receives updates. |
|---|
| 61 | | - * @checking_timer: true when a thread in the group is in the |
|---|
| 62 | | - * process of checking for thread group timers. |
|---|
| 63 | 63 | * |
|---|
| 64 | 64 | * This structure contains the version of task_cputime, above, that is |
|---|
| 65 | 65 | * used for thread group CPU timer calculations. |
|---|
| 66 | 66 | */ |
|---|
| 67 | 67 | struct thread_group_cputimer { |
|---|
| 68 | 68 | struct task_cputime_atomic cputime_atomic; |
|---|
| 69 | | - bool running; |
|---|
| 70 | | - bool checking_timer; |
|---|
| 71 | 69 | }; |
|---|
| 72 | 70 | |
|---|
| 73 | 71 | struct multiprocess_signals { |
|---|
| .. | .. |
|---|
| 83 | 81 | * the locking of signal_struct. |
|---|
| 84 | 82 | */ |
|---|
| 85 | 83 | struct signal_struct { |
|---|
| 86 | | - atomic_t sigcnt; |
|---|
| 84 | + refcount_t sigcnt; |
|---|
| 87 | 85 | atomic_t live; |
|---|
| 88 | 86 | int nr_threads; |
|---|
| 89 | 87 | struct list_head thread_head; |
|---|
| .. | .. |
|---|
| 148 | 146 | */ |
|---|
| 149 | 147 | struct thread_group_cputimer cputimer; |
|---|
| 150 | 148 | |
|---|
| 151 | | - /* Earliest-expiration cache. */ |
|---|
| 152 | | - struct task_cputime cputime_expires; |
|---|
| 153 | | - |
|---|
| 154 | | - struct list_head cpu_timers[3]; |
|---|
| 155 | | - |
|---|
| 156 | 149 | #endif |
|---|
| 150 | + /* Empty if CONFIG_POSIX_TIMERS=n */ |
|---|
| 151 | + struct posix_cputimers posix_cputimers; |
|---|
| 157 | 152 | |
|---|
| 158 | 153 | /* PID/PID hash table linkage. */ |
|---|
| 159 | 154 | struct pid *pids[PIDTYPE_MAX]; |
|---|
| .. | .. |
|---|
| 232 | 227 | |
|---|
| 233 | 228 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
|---|
| 234 | 229 | * credential calculations |
|---|
| 235 | | - * (notably. ptrace) */ |
|---|
| 230 | + * (notably. ptrace) |
|---|
| 231 | + * Deprecated do not use in new code. |
|---|
| 232 | + * Use exec_update_lock instead. |
|---|
| 233 | + */ |
|---|
| 234 | + struct rw_semaphore exec_update_lock; /* Held while task_struct is |
|---|
| 235 | + * being updated during exec, |
|---|
| 236 | + * and may have inconsistent |
|---|
| 237 | + * permissions. |
|---|
| 238 | + */ |
|---|
| 239 | + |
|---|
| 236 | 240 | ANDROID_KABI_RESERVE(1); |
|---|
| 237 | 241 | ANDROID_KABI_RESERVE(2); |
|---|
| 238 | 242 | ANDROID_KABI_RESERVE(3); |
|---|
| .. | .. |
|---|
| 275 | 279 | extern void flush_signals(struct task_struct *); |
|---|
| 276 | 280 | extern void ignore_signals(struct task_struct *); |
|---|
| 277 | 281 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
|---|
| 278 | | -extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
|---|
| 282 | +extern int dequeue_signal(struct task_struct *task, |
|---|
| 283 | + sigset_t *mask, kernel_siginfo_t *info); |
|---|
| 279 | 284 | |
|---|
| 280 | | -static inline int kernel_dequeue_signal(siginfo_t *info) |
|---|
| 285 | +static inline int kernel_dequeue_signal(void) |
|---|
| 281 | 286 | { |
|---|
| 282 | | - struct task_struct *tsk = current; |
|---|
| 283 | | - siginfo_t __info; |
|---|
| 287 | + struct task_struct *task = current; |
|---|
| 288 | + kernel_siginfo_t __info; |
|---|
| 284 | 289 | int ret; |
|---|
| 285 | 290 | |
|---|
| 286 | | - spin_lock_irq(&tsk->sighand->siglock); |
|---|
| 287 | | - ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); |
|---|
| 288 | | - spin_unlock_irq(&tsk->sighand->siglock); |
|---|
| 291 | + spin_lock_irq(&task->sighand->siglock); |
|---|
| 292 | + ret = dequeue_signal(task, &task->blocked, &__info); |
|---|
| 293 | + spin_unlock_irq(&task->sighand->siglock); |
|---|
| 289 | 294 | |
|---|
| 290 | 295 | return ret; |
|---|
| 291 | 296 | } |
|---|
| .. | .. |
|---|
| 310 | 315 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) |
|---|
| 311 | 316 | #endif |
|---|
| 312 | 317 | |
|---|
| 313 | | -int force_sig_fault(int sig, int code, void __user *addr |
|---|
| 318 | +int force_sig_fault_to_task(int sig, int code, void __user *addr |
|---|
| 314 | 319 | ___ARCH_SI_TRAPNO(int trapno) |
|---|
| 315 | 320 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
|---|
| 316 | 321 | , struct task_struct *t); |
|---|
| 322 | +int force_sig_fault(int sig, int code, void __user *addr |
|---|
| 323 | + ___ARCH_SI_TRAPNO(int trapno) |
|---|
| 324 | + ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); |
|---|
| 317 | 325 | int send_sig_fault(int sig, int code, void __user *addr |
|---|
| 318 | 326 | ___ARCH_SI_TRAPNO(int trapno) |
|---|
| 319 | 327 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
|---|
| 320 | 328 | , struct task_struct *t); |
|---|
| 321 | 329 | |
|---|
| 322 | | -int force_sig_mceerr(int code, void __user *, short, struct task_struct *); |
|---|
| 330 | +int force_sig_mceerr(int code, void __user *, short); |
|---|
| 323 | 331 | int send_sig_mceerr(int code, void __user *, short, struct task_struct *); |
|---|
| 324 | 332 | |
|---|
| 325 | 333 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); |
|---|
| .. | .. |
|---|
| 327 | 335 | |
|---|
| 328 | 336 | int force_sig_ptrace_errno_trap(int errno, void __user *addr); |
|---|
| 329 | 337 | |
|---|
| 330 | | -extern int send_sig_info(int, struct siginfo *, struct task_struct *); |
|---|
| 331 | | -extern void force_sigsegv(int sig, struct task_struct *p); |
|---|
| 332 | | -extern int force_sig_info(int, struct siginfo *, struct task_struct *); |
|---|
| 333 | | -extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); |
|---|
| 334 | | -extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); |
|---|
| 335 | | -extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, |
|---|
| 338 | +extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); |
|---|
| 339 | +extern void force_sigsegv(int sig); |
|---|
| 340 | +extern int force_sig_info(struct kernel_siginfo *); |
|---|
| 341 | +extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp); |
|---|
| 342 | +extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid); |
|---|
| 343 | +extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *, |
|---|
| 336 | 344 | const struct cred *); |
|---|
| 337 | 345 | extern int kill_pgrp(struct pid *pid, int sig, int priv); |
|---|
| 338 | 346 | extern int kill_pid(struct pid *pid, int sig, int priv); |
|---|
| 339 | 347 | extern __must_check bool do_notify_parent(struct task_struct *, int); |
|---|
| 340 | 348 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
|---|
| 341 | | -extern void force_sig(int, struct task_struct *); |
|---|
| 349 | +extern void force_sig(int); |
|---|
| 342 | 350 | extern int send_sig(int, struct task_struct *, int); |
|---|
| 343 | 351 | extern int zap_other_threads(struct task_struct *p); |
|---|
| 344 | 352 | extern struct sigqueue *sigqueue_alloc(void); |
|---|
| .. | .. |
|---|
| 352 | 360 | return -ERESTARTNOINTR; |
|---|
| 353 | 361 | } |
|---|
| 354 | 362 | |
|---|
| 355 | | -static inline int signal_pending(struct task_struct *p) |
|---|
| 363 | +static inline int task_sigpending(struct task_struct *p) |
|---|
| 356 | 364 | { |
|---|
| 357 | 365 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
|---|
| 366 | +} |
|---|
| 367 | + |
|---|
| 368 | +static inline int signal_pending(struct task_struct *p) |
|---|
| 369 | +{ |
|---|
| 370 | + /* |
|---|
| 371 | + * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same |
|---|
| 372 | + * behavior in terms of ensuring that we break out of wait loops |
|---|
| 373 | + * so that notify signal callbacks can be processed. |
|---|
| 374 | + */ |
|---|
| 375 | + if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL))) |
|---|
| 376 | + return 1; |
|---|
| 377 | + return task_sigpending(p); |
|---|
| 358 | 378 | } |
|---|
| 359 | 379 | |
|---|
| 360 | 380 | static inline int __fatal_signal_pending(struct task_struct *p) |
|---|
| .. | .. |
|---|
| 364 | 384 | |
|---|
| 365 | 385 | static inline int fatal_signal_pending(struct task_struct *p) |
|---|
| 366 | 386 | { |
|---|
| 367 | | - return signal_pending(p) && __fatal_signal_pending(p); |
|---|
| 387 | + return task_sigpending(p) && __fatal_signal_pending(p); |
|---|
| 368 | 388 | } |
|---|
| 369 | 389 | |
|---|
| 370 | 390 | static inline int signal_pending_state(long state, struct task_struct *p) |
|---|
| .. | .. |
|---|
| 375 | 395 | return 0; |
|---|
| 376 | 396 | |
|---|
| 377 | 397 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); |
|---|
| 398 | +} |
|---|
| 399 | + |
|---|
| 400 | +/* |
|---|
| 401 | + * This should only be used in fault handlers to decide whether we |
|---|
| 402 | + * should stop the current fault routine to handle the signals |
|---|
| 403 | + * instead, especially with the case where we've got interrupted with |
|---|
| 404 | + * a VM_FAULT_RETRY. |
|---|
| 405 | + */ |
|---|
| 406 | +static inline bool fault_signal_pending(vm_fault_t fault_flags, |
|---|
| 407 | + struct pt_regs *regs) |
|---|
| 408 | +{ |
|---|
| 409 | + return unlikely((fault_flags & VM_FAULT_RETRY) && |
|---|
| 410 | + (fatal_signal_pending(current) || |
|---|
| 411 | + (user_mode(regs) && signal_pending(current)))); |
|---|
| 378 | 412 | } |
|---|
| 379 | 413 | |
|---|
| 380 | 414 | /* |
|---|
| .. | .. |
|---|
| 420 | 454 | static inline void set_restore_sigmask(void) |
|---|
| 421 | 455 | { |
|---|
| 422 | 456 | set_thread_flag(TIF_RESTORE_SIGMASK); |
|---|
| 423 | | - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
|---|
| 424 | 457 | } |
|---|
| 425 | 458 | |
|---|
| 426 | | -static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) |
|---|
| 459 | +static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
|---|
| 427 | 460 | { |
|---|
| 428 | | - clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); |
|---|
| 461 | + clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); |
|---|
| 429 | 462 | } |
|---|
| 430 | 463 | |
|---|
| 431 | 464 | static inline void clear_restore_sigmask(void) |
|---|
| 432 | 465 | { |
|---|
| 433 | 466 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
|---|
| 434 | 467 | } |
|---|
| 435 | | -static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) |
|---|
| 468 | +static inline bool test_tsk_restore_sigmask(struct task_struct *task) |
|---|
| 436 | 469 | { |
|---|
| 437 | | - return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); |
|---|
| 470 | + return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK); |
|---|
| 438 | 471 | } |
|---|
| 439 | 472 | static inline bool test_restore_sigmask(void) |
|---|
| 440 | 473 | { |
|---|
| .. | .. |
|---|
| 451 | 484 | static inline void set_restore_sigmask(void) |
|---|
| 452 | 485 | { |
|---|
| 453 | 486 | current->restore_sigmask = true; |
|---|
| 454 | | - WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
|---|
| 455 | 487 | } |
|---|
| 456 | | -static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) |
|---|
| 488 | +static inline void clear_tsk_restore_sigmask(struct task_struct *task) |
|---|
| 457 | 489 | { |
|---|
| 458 | | - tsk->restore_sigmask = false; |
|---|
| 490 | + task->restore_sigmask = false; |
|---|
| 459 | 491 | } |
|---|
| 460 | 492 | static inline void clear_restore_sigmask(void) |
|---|
| 461 | 493 | { |
|---|
| .. | .. |
|---|
| 465 | 497 | { |
|---|
| 466 | 498 | return current->restore_sigmask; |
|---|
| 467 | 499 | } |
|---|
| 468 | | -static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) |
|---|
| 500 | +static inline bool test_tsk_restore_sigmask(struct task_struct *task) |
|---|
| 469 | 501 | { |
|---|
| 470 | | - return tsk->restore_sigmask; |
|---|
| 502 | + return task->restore_sigmask; |
|---|
| 471 | 503 | } |
|---|
| 472 | 504 | static inline bool test_and_clear_restore_sigmask(void) |
|---|
| 473 | 505 | { |
|---|
| .. | .. |
|---|
| 484 | 516 | __set_current_blocked(¤t->saved_sigmask); |
|---|
| 485 | 517 | } |
|---|
| 486 | 518 | |
|---|
| 519 | +extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize); |
|---|
| 520 | + |
|---|
| 521 | +static inline void restore_saved_sigmask_unless(bool interrupted) |
|---|
| 522 | +{ |
|---|
| 523 | + if (interrupted) |
|---|
| 524 | + WARN_ON(!signal_pending(current)); |
|---|
| 525 | + else |
|---|
| 526 | + restore_saved_sigmask(); |
|---|
| 527 | +} |
|---|
| 528 | + |
|---|
| 487 | 529 | static inline sigset_t *sigmask_to_save(void) |
|---|
| 488 | 530 | { |
|---|
| 489 | 531 | sigset_t *res = ¤t->blocked; |
|---|
| .. | .. |
|---|
| 498 | 540 | } |
|---|
| 499 | 541 | |
|---|
| 500 | 542 | /* These can be the second arg to send_sig_info/send_group_sig_info. */ |
|---|
| 501 | | -#define SEND_SIG_NOINFO ((struct siginfo *) 0) |
|---|
| 502 | | -#define SEND_SIG_PRIV ((struct siginfo *) 1) |
|---|
| 503 | | -#define SEND_SIG_FORCED ((struct siginfo *) 2) |
|---|
| 543 | +#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0) |
|---|
| 544 | +#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1) |
|---|
| 545 | + |
|---|
| 546 | +static inline int __on_sig_stack(unsigned long sp) |
|---|
| 547 | +{ |
|---|
| 548 | +#ifdef CONFIG_STACK_GROWSUP |
|---|
| 549 | + return sp >= current->sas_ss_sp && |
|---|
| 550 | + sp - current->sas_ss_sp < current->sas_ss_size; |
|---|
| 551 | +#else |
|---|
| 552 | + return sp > current->sas_ss_sp && |
|---|
| 553 | + sp - current->sas_ss_sp <= current->sas_ss_size; |
|---|
| 554 | +#endif |
|---|
| 555 | +} |
|---|
| 504 | 556 | |
|---|
| 505 | 557 | /* |
|---|
| 506 | 558 | * True if we are on the alternate signal stack. |
|---|
| .. | .. |
|---|
| 519 | 571 | if (current->sas_ss_flags & SS_AUTODISARM) |
|---|
| 520 | 572 | return 0; |
|---|
| 521 | 573 | |
|---|
| 522 | | -#ifdef CONFIG_STACK_GROWSUP |
|---|
| 523 | | - return sp >= current->sas_ss_sp && |
|---|
| 524 | | - sp - current->sas_ss_sp < current->sas_ss_size; |
|---|
| 525 | | -#else |
|---|
| 526 | | - return sp > current->sas_ss_sp && |
|---|
| 527 | | - sp - current->sas_ss_sp <= current->sas_ss_size; |
|---|
| 528 | | -#endif |
|---|
| 574 | + return __on_sig_stack(sp); |
|---|
| 529 | 575 | } |
|---|
| 530 | 576 | |
|---|
| 531 | 577 | static inline int sas_ss_flags(unsigned long sp) |
|---|
| .. | .. |
|---|
| 622 | 668 | return task->signal->pids[PIDTYPE_SID]; |
|---|
| 623 | 669 | } |
|---|
| 624 | 670 | |
|---|
| 625 | | -static inline int get_nr_threads(struct task_struct *tsk) |
|---|
| 671 | +static inline int get_nr_threads(struct task_struct *task) |
|---|
| 626 | 672 | { |
|---|
| 627 | | - return tsk->signal->nr_threads; |
|---|
| 673 | + return task->signal->nr_threads; |
|---|
| 628 | 674 | } |
|---|
| 629 | 675 | |
|---|
| 630 | 676 | static inline bool thread_group_leader(struct task_struct *p) |
|---|
| 631 | 677 | { |
|---|
| 632 | 678 | return p->exit_signal >= 0; |
|---|
| 633 | | -} |
|---|
| 634 | | - |
|---|
| 635 | | -/* Do to the insanities of de_thread it is possible for a process |
|---|
| 636 | | - * to have the pid of the thread group leader without actually being |
|---|
| 637 | | - * the thread group leader. For iteration through the pids in proc |
|---|
| 638 | | - * all we care about is that we have a task with the appropriate |
|---|
| 639 | | - * pid, we don't actually care if we have the right task. |
|---|
| 640 | | - */ |
|---|
| 641 | | -static inline bool has_group_leader_pid(struct task_struct *p) |
|---|
| 642 | | -{ |
|---|
| 643 | | - return task_pid(p) == task_tgid(p); |
|---|
| 644 | 679 | } |
|---|
| 645 | 680 | |
|---|
| 646 | 681 | static inline |
|---|
| .. | .. |
|---|
| 663 | 698 | #define delay_group_leader(p) \ |
|---|
| 664 | 699 | (thread_group_leader(p) && !thread_group_empty(p)) |
|---|
| 665 | 700 | |
|---|
| 666 | | -extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
|---|
| 701 | +extern bool thread_group_exited(struct pid *pid); |
|---|
| 702 | + |
|---|
| 703 | +extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, |
|---|
| 667 | 704 | unsigned long *flags); |
|---|
| 668 | 705 | |
|---|
| 669 | | -static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, |
|---|
| 706 | +static inline struct sighand_struct *lock_task_sighand(struct task_struct *task, |
|---|
| 670 | 707 | unsigned long *flags) |
|---|
| 671 | 708 | { |
|---|
| 672 | 709 | struct sighand_struct *ret; |
|---|
| 673 | 710 | |
|---|
| 674 | | - ret = __lock_task_sighand(tsk, flags); |
|---|
| 675 | | - (void)__cond_lock(&tsk->sighand->siglock, ret); |
|---|
| 711 | + ret = __lock_task_sighand(task, flags); |
|---|
| 712 | + (void)__cond_lock(&task->sighand->siglock, ret); |
|---|
| 676 | 713 | return ret; |
|---|
| 677 | 714 | } |
|---|
| 678 | 715 | |
|---|
| 679 | | -static inline void unlock_task_sighand(struct task_struct *tsk, |
|---|
| 716 | +static inline void unlock_task_sighand(struct task_struct *task, |
|---|
| 680 | 717 | unsigned long *flags) |
|---|
| 681 | 718 | { |
|---|
| 682 | | - spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
|---|
| 719 | + spin_unlock_irqrestore(&task->sighand->siglock, *flags); |
|---|
| 683 | 720 | } |
|---|
| 684 | 721 | |
|---|
| 685 | | -static inline unsigned long task_rlimit(const struct task_struct *tsk, |
|---|
| 722 | +static inline unsigned long task_rlimit(const struct task_struct *task, |
|---|
| 686 | 723 | unsigned int limit) |
|---|
| 687 | 724 | { |
|---|
| 688 | | - return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); |
|---|
| 725 | + return READ_ONCE(task->signal->rlim[limit].rlim_cur); |
|---|
| 689 | 726 | } |
|---|
| 690 | 727 | |
|---|
| 691 | | -static inline unsigned long task_rlimit_max(const struct task_struct *tsk, |
|---|
| 728 | +static inline unsigned long task_rlimit_max(const struct task_struct *task, |
|---|
| 692 | 729 | unsigned int limit) |
|---|
| 693 | 730 | { |
|---|
| 694 | | - return READ_ONCE(tsk->signal->rlim[limit].rlim_max); |
|---|
| 731 | + return READ_ONCE(task->signal->rlim[limit].rlim_max); |
|---|
| 695 | 732 | } |
|---|
| 696 | 733 | |
|---|
| 697 | 734 | static inline unsigned long rlimit(unsigned int limit) |
|---|