.. | .. |
---|
20 | 20 | #include <linux/sched/task.h> |
---|
21 | 21 | #include <linux/sched/task_stack.h> |
---|
22 | 22 | #include <linux/sched/cputime.h> |
---|
23 | | -#include <linux/sched/rt.h> |
---|
24 | 23 | #include <linux/file.h> |
---|
25 | 24 | #include <linux/fs.h> |
---|
26 | 25 | #include <linux/proc_fs.h> |
---|
.. | .. |
---|
408 | 407 | task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); |
---|
409 | 408 | } |
---|
410 | 409 | |
---|
411 | | -static inline struct sigqueue *get_task_cache(struct task_struct *t) |
---|
412 | | -{ |
---|
413 | | - struct sigqueue *q = t->sigqueue_cache; |
---|
414 | | - |
---|
415 | | - if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) |
---|
416 | | - return NULL; |
---|
417 | | - return q; |
---|
418 | | -} |
---|
419 | | - |
---|
420 | | -static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) |
---|
421 | | -{ |
---|
422 | | - if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) |
---|
423 | | - return 0; |
---|
424 | | - return 1; |
---|
425 | | -} |
---|
426 | | - |
---|
427 | 410 | /* |
---|
428 | 411 | * allocate a new signal queue record |
---|
429 | 412 | * - this may be called without locks if and only if t == current, otherwise an |
---|
430 | 413 | * appropriate lock must be held to stop the target task from exiting |
---|
431 | 414 | */ |
---|
432 | 415 | static struct sigqueue * |
---|
433 | | -__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, |
---|
434 | | - int override_rlimit, int fromslab) |
---|
| 416 | +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
---|
435 | 417 | { |
---|
436 | 418 | struct sigqueue *q = NULL; |
---|
437 | 419 | struct user_struct *user; |
---|
.. | .. |
---|
453 | 435 | rcu_read_unlock(); |
---|
454 | 436 | |
---|
455 | 437 | if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { |
---|
456 | | - if (!fromslab) |
---|
457 | | - q = get_task_cache(t); |
---|
458 | | - if (!q) |
---|
459 | | - q = kmem_cache_alloc(sigqueue_cachep, flags); |
---|
| 438 | + q = kmem_cache_alloc(sigqueue_cachep, flags); |
---|
460 | 439 | } else { |
---|
461 | 440 | print_dropped_signal(sig); |
---|
462 | 441 | } |
---|
.. | .. |
---|
473 | 452 | return q; |
---|
474 | 453 | } |
---|
475 | 454 | |
---|
476 | | -static struct sigqueue * |
---|
477 | | -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, |
---|
478 | | - int override_rlimit) |
---|
479 | | -{ |
---|
480 | | - return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); |
---|
481 | | -} |
---|
482 | | - |
---|
483 | 455 | static void __sigqueue_free(struct sigqueue *q) |
---|
484 | 456 | { |
---|
485 | 457 | if (q->flags & SIGQUEUE_PREALLOC) |
---|
.. | .. |
---|
487 | 459 | if (atomic_dec_and_test(&q->user->sigpending)) |
---|
488 | 460 | free_uid(q->user); |
---|
489 | 461 | kmem_cache_free(sigqueue_cachep, q); |
---|
490 | | -} |
---|
491 | | - |
---|
492 | | -static void sigqueue_free_current(struct sigqueue *q) |
---|
493 | | -{ |
---|
494 | | - struct user_struct *up; |
---|
495 | | - |
---|
496 | | - if (q->flags & SIGQUEUE_PREALLOC) |
---|
497 | | - return; |
---|
498 | | - |
---|
499 | | - up = q->user; |
---|
500 | | - if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { |
---|
501 | | - if (atomic_dec_and_test(&up->sigpending)) |
---|
502 | | - free_uid(up); |
---|
503 | | - } else |
---|
504 | | - __sigqueue_free(q); |
---|
505 | 462 | } |
---|
506 | 463 | |
---|
507 | 464 | void flush_sigqueue(struct sigpending *queue) |
---|
.. | .. |
---|
514 | 471 | list_del_init(&q->list); |
---|
515 | 472 | __sigqueue_free(q); |
---|
516 | 473 | } |
---|
517 | | -} |
---|
518 | | - |
---|
519 | | -/* |
---|
520 | | - * Called from __exit_signal. Flush tsk->pending and |
---|
521 | | - * tsk->sigqueue_cache |
---|
522 | | - */ |
---|
523 | | -void flush_task_sigqueue(struct task_struct *tsk) |
---|
524 | | -{ |
---|
525 | | - struct sigqueue *q; |
---|
526 | | - |
---|
527 | | - flush_sigqueue(&tsk->pending); |
---|
528 | | - |
---|
529 | | - q = get_task_cache(tsk); |
---|
530 | | - if (q) |
---|
531 | | - kmem_cache_free(sigqueue_cachep, q); |
---|
532 | 474 | } |
---|
533 | 475 | |
---|
534 | 476 | /* |
---|
.. | .. |
---|
655 | 597 | (info->si_code == SI_TIMER) && |
---|
656 | 598 | (info->si_sys_private); |
---|
657 | 599 | |
---|
658 | | - sigqueue_free_current(first); |
---|
| 600 | + __sigqueue_free(first); |
---|
659 | 601 | } else { |
---|
660 | 602 | /* |
---|
661 | 603 | * Ok, it wasn't in the queue. This must be |
---|
.. | .. |
---|
691 | 633 | { |
---|
692 | 634 | bool resched_timer = false; |
---|
693 | 635 | int signr; |
---|
694 | | - |
---|
695 | | - WARN_ON_ONCE(tsk != current); |
---|
696 | 636 | |
---|
697 | 637 | /* We only dequeue private signals from ourselves, we don't let |
---|
698 | 638 | * signalfd steal them |
---|
.. | .. |
---|
1377 | 1317 | struct k_sigaction *action; |
---|
1378 | 1318 | int sig = info->si_signo; |
---|
1379 | 1319 | |
---|
1380 | | - /* |
---|
1381 | | - * On some archs, PREEMPT_RT has to delay sending a signal from a trap |
---|
1382 | | - * since it can not enable preemption, and the signal code's spin_locks |
---|
1383 | | - * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will |
---|
1384 | | - * send the signal on exit of the trap. |
---|
1385 | | - */ |
---|
1386 | | -#ifdef ARCH_RT_DELAYS_SIGNAL_SEND |
---|
1387 | | - if (in_atomic()) { |
---|
1388 | | - struct task_struct *t = current; |
---|
1389 | | - |
---|
1390 | | - if (WARN_ON_ONCE(t->forced_info.si_signo)) |
---|
1391 | | - return 0; |
---|
1392 | | - |
---|
1393 | | - if (is_si_special(info)) { |
---|
1394 | | - WARN_ON_ONCE(info != SEND_SIG_PRIV); |
---|
1395 | | - t->forced_info.si_signo = info->si_signo; |
---|
1396 | | - t->forced_info.si_errno = 0; |
---|
1397 | | - t->forced_info.si_code = SI_KERNEL; |
---|
1398 | | - t->forced_info.si_pid = 0; |
---|
1399 | | - t->forced_info.si_uid = 0; |
---|
1400 | | - } else { |
---|
1401 | | - t->forced_info = *info; |
---|
1402 | | - } |
---|
1403 | | - |
---|
1404 | | - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); |
---|
1405 | | - return 0; |
---|
1406 | | - } |
---|
1407 | | -#endif |
---|
1408 | 1320 | spin_lock_irqsave(&t->sighand->siglock, flags); |
---|
1409 | 1321 | action = &t->sighand->action[sig-1]; |
---|
1410 | 1322 | ignored = action->sa.sa_handler == SIG_IGN; |
---|
.. | .. |
---|
1907 | 1819 | */ |
---|
1908 | 1820 | struct sigqueue *sigqueue_alloc(void) |
---|
1909 | 1821 | { |
---|
1910 | | - /* Preallocated sigqueue objects always from the slabcache ! */ |
---|
1911 | | - struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); |
---|
| 1822 | + struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
---|
1912 | 1823 | |
---|
1913 | 1824 | if (q) |
---|
1914 | 1825 | q->flags |= SIGQUEUE_PREALLOC; |
---|
.. | .. |
---|
2294 | 2205 | if (gstop_done && ptrace_reparented(current)) |
---|
2295 | 2206 | do_notify_parent_cldstop(current, false, why); |
---|
2296 | 2207 | |
---|
| 2208 | + /* |
---|
| 2209 | + * Don't want to allow preemption here, because |
---|
| 2210 | + * sys_ptrace() needs this task to be inactive. |
---|
| 2211 | + * |
---|
| 2212 | + * XXX: implement read_unlock_no_resched(). |
---|
| 2213 | + */ |
---|
| 2214 | + preempt_disable(); |
---|
2297 | 2215 | read_unlock(&tasklist_lock); |
---|
2298 | 2216 | cgroup_enter_frozen(); |
---|
| 2217 | + preempt_enable_no_resched(); |
---|
2299 | 2218 | freezable_schedule(); |
---|
2300 | 2219 | cgroup_leave_frozen(true); |
---|
2301 | 2220 | } else { |
---|