.. | .. |
---|
20 | 20 | #include <linux/sched/task_stack.h> |
---|
21 | 21 | #include <linux/sched/cputime.h> |
---|
22 | 22 | #include <linux/file.h> |
---|
| 23 | +#include <linux/sched/rt.h> |
---|
23 | 24 | #include <linux/fs.h> |
---|
24 | 25 | #include <linux/proc_fs.h> |
---|
25 | 26 | #include <linux/tty.h> |
---|
.. | .. |
---|
402 | 403 | task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); |
---|
403 | 404 | } |
---|
404 | 405 | |
---|
| 406 | +static inline struct sigqueue *get_task_cache(struct task_struct *t) |
---|
| 407 | +{ |
---|
| 408 | + struct sigqueue *q = t->sigqueue_cache; |
---|
| 409 | + |
---|
| 410 | + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) |
---|
| 411 | + return NULL; |
---|
| 412 | + return q; |
---|
| 413 | +} |
---|
| 414 | + |
---|
| 415 | +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) |
---|
| 416 | +{ |
---|
| 417 | + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) |
---|
| 418 | + return 0; |
---|
| 419 | + return 1; |
---|
| 420 | +} |
---|
| 421 | + |
---|
405 | 422 | /* |
---|
406 | 423 | * allocate a new signal queue record |
---|
407 | 424 | * - this may be called without locks if and only if t == current, otherwise an |
---|
408 | 425 | * appropriate lock must be held to stop the target task from exiting |
---|
409 | 426 | */ |
---|
410 | 427 | static struct sigqueue * |
---|
411 | | -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
---|
| 428 | +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, |
---|
| 429 | + int override_rlimit, int fromslab) |
---|
412 | 430 | { |
---|
413 | 431 | struct sigqueue *q = NULL; |
---|
414 | 432 | struct user_struct *user; |
---|
.. | .. |
---|
430 | 448 | rcu_read_unlock(); |
---|
431 | 449 | |
---|
432 | 450 | if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { |
---|
433 | | - q = kmem_cache_alloc(sigqueue_cachep, flags); |
---|
| 451 | + if (!fromslab) |
---|
| 452 | + q = get_task_cache(t); |
---|
| 453 | + if (!q) |
---|
| 454 | + q = kmem_cache_alloc(sigqueue_cachep, flags); |
---|
434 | 455 | } else { |
---|
435 | 456 | print_dropped_signal(sig); |
---|
436 | 457 | } |
---|
.. | .. |
---|
447 | 468 | return q; |
---|
448 | 469 | } |
---|
449 | 470 | |
---|
| 471 | +static struct sigqueue * |
---|
| 472 | +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, |
---|
| 473 | + int override_rlimit) |
---|
| 474 | +{ |
---|
| 475 | + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); |
---|
| 476 | +} |
---|
| 477 | + |
---|
450 | 478 | static void __sigqueue_free(struct sigqueue *q) |
---|
451 | 479 | { |
---|
452 | 480 | if (q->flags & SIGQUEUE_PREALLOC) |
---|
.. | .. |
---|
454 | 482 | if (atomic_dec_and_test(&q->user->sigpending)) |
---|
455 | 483 | free_uid(q->user); |
---|
456 | 484 | kmem_cache_free(sigqueue_cachep, q); |
---|
| 485 | +} |
---|
| 486 | + |
---|
| 487 | +static void sigqueue_free_current(struct sigqueue *q) |
---|
| 488 | +{ |
---|
| 489 | + struct user_struct *up; |
---|
| 490 | + |
---|
| 491 | + if (q->flags & SIGQUEUE_PREALLOC) |
---|
| 492 | + return; |
---|
| 493 | + |
---|
| 494 | + up = q->user; |
---|
| 495 | + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { |
---|
| 496 | + if (atomic_dec_and_test(&up->sigpending)) |
---|
| 497 | + free_uid(up); |
---|
| 498 | + } else |
---|
| 499 | + __sigqueue_free(q); |
---|
457 | 500 | } |
---|
458 | 501 | |
---|
459 | 502 | void flush_sigqueue(struct sigpending *queue) |
---|
.. | .. |
---|
466 | 509 | list_del_init(&q->list); |
---|
467 | 510 | __sigqueue_free(q); |
---|
468 | 511 | } |
---|
| 512 | +} |
---|
| 513 | + |
---|
| 514 | +/* |
---|
| 515 | + * Called from __exit_signal. Flush tsk->pending and |
---|
| 516 | + * tsk->sigqueue_cache |
---|
| 517 | + */ |
---|
| 518 | +void flush_task_sigqueue(struct task_struct *tsk) |
---|
| 519 | +{ |
---|
| 520 | + struct sigqueue *q; |
---|
| 521 | + |
---|
| 522 | + flush_sigqueue(&tsk->pending); |
---|
| 523 | + |
---|
| 524 | + q = get_task_cache(tsk); |
---|
| 525 | + if (q) |
---|
| 526 | + kmem_cache_free(sigqueue_cachep, q); |
---|
469 | 527 | } |
---|
470 | 528 | |
---|
471 | 529 | /* |
---|
.. | .. |
---|
591 | 649 | (info->si_code == SI_TIMER) && |
---|
592 | 650 | (info->si_sys_private); |
---|
593 | 651 | |
---|
594 | | - __sigqueue_free(first); |
---|
| 652 | + sigqueue_free_current(first); |
---|
595 | 653 | } else { |
---|
596 | 654 | /* |
---|
597 | 655 | * Ok, it wasn't in the queue. This must be |
---|
.. | .. |
---|
627 | 685 | { |
---|
628 | 686 | bool resched_timer = false; |
---|
629 | 687 | int signr; |
---|
| 688 | + |
---|
| 689 | + WARN_ON_ONCE(tsk != current); |
---|
630 | 690 | |
---|
631 | 691 | /* We only dequeue private signals from ourselves, we don't let |
---|
632 | 692 | * signalfd steal them |
---|
.. | .. |
---|
1287 | 1347 | * We don't want to have recursive SIGSEGV's etc, for example, |
---|
1288 | 1348 | * that is why we also clear SIGNAL_UNKILLABLE. |
---|
1289 | 1349 | */ |
---|
1290 | | -int |
---|
1291 | | -force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
---|
| 1350 | +static int |
---|
| 1351 | +do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
---|
1292 | 1352 | { |
---|
1293 | 1353 | unsigned long int flags; |
---|
1294 | 1354 | int ret, blocked, ignored; |
---|
.. | .. |
---|
1315 | 1375 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
---|
1316 | 1376 | |
---|
1317 | 1377 | return ret; |
---|
| 1378 | +} |
---|
| 1379 | + |
---|
| 1380 | +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
---|
| 1381 | +{ |
---|
| 1382 | +/* |
---|
| 1383 | + * On some archs, PREEMPT_RT has to delay sending a signal from a trap |
---|
| 1384 | + * since it can not enable preemption, and the signal code's spin_locks |
---|
| 1385 | + * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will |
---|
| 1386 | + * send the signal on exit of the trap. |
---|
| 1387 | + */ |
---|
| 1388 | +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND |
---|
| 1389 | + if (in_atomic()) { |
---|
| 1390 | + if (WARN_ON_ONCE(t != current)) |
---|
| 1391 | + return 0; |
---|
| 1392 | + if (WARN_ON_ONCE(t->forced_info.si_signo)) |
---|
| 1393 | + return 0; |
---|
| 1394 | + |
---|
| 1395 | + if (is_si_special(info)) { |
---|
| 1396 | + WARN_ON_ONCE(info != SEND_SIG_PRIV); |
---|
| 1397 | + t->forced_info.si_signo = sig; |
---|
| 1398 | + t->forced_info.si_errno = 0; |
---|
| 1399 | + t->forced_info.si_code = SI_KERNEL; |
---|
| 1400 | + t->forced_info.si_pid = 0; |
---|
| 1401 | + t->forced_info.si_uid = 0; |
---|
| 1402 | + } else { |
---|
| 1403 | + t->forced_info = *info; |
---|
| 1404 | + } |
---|
| 1405 | + |
---|
| 1406 | + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); |
---|
| 1407 | + return 0; |
---|
| 1408 | + } |
---|
| 1409 | +#endif |
---|
| 1410 | + return do_force_sig_info(sig, info, t); |
---|
1318 | 1411 | } |
---|
1319 | 1412 | |
---|
1320 | 1413 | /* |
---|
.. | .. |
---|
1733 | 1826 | */ |
---|
1734 | 1827 | struct sigqueue *sigqueue_alloc(void) |
---|
1735 | 1828 | { |
---|
1736 | | - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
---|
| 1829 | + /* Preallocated sigqueue objects always from the slabcache ! */ |
---|
| 1830 | + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); |
---|
1737 | 1831 | |
---|
1738 | 1832 | if (q) |
---|
1739 | 1833 | q->flags |= SIGQUEUE_PREALLOC; |
---|
.. | .. |
---|
2114 | 2208 | if (gstop_done && ptrace_reparented(current)) |
---|
2115 | 2209 | do_notify_parent_cldstop(current, false, why); |
---|
2116 | 2210 | |
---|
2117 | | - /* |
---|
2118 | | - * Don't want to allow preemption here, because |
---|
2119 | | - * sys_ptrace() needs this task to be inactive. |
---|
2120 | | - * |
---|
2121 | | - * XXX: implement read_unlock_no_resched(). |
---|
2122 | | - */ |
---|
2123 | | - preempt_disable(); |
---|
2124 | 2211 | read_unlock(&tasklist_lock); |
---|
2125 | | - cgroup_enter_frozen(); |
---|
2126 | | - preempt_enable_no_resched(); |
---|
2127 | 2212 | freezable_schedule(); |
---|
2128 | | - cgroup_leave_frozen(true); |
---|
2129 | 2213 | } else { |
---|
2130 | 2214 | /* |
---|
2131 | 2215 | * By the time we got the lock, our tracer went away. |
---|