hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/kernel/signal.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/kernel/signal.c
34 *
....@@ -20,7 +21,6 @@
2021 #include <linux/sched/task_stack.h>
2122 #include <linux/sched/cputime.h>
2223 #include <linux/file.h>
23
-#include <linux/sched/rt.h>
2424 #include <linux/fs.h>
2525 #include <linux/proc_fs.h>
2626 #include <linux/tty.h>
....@@ -45,6 +45,8 @@
4545 #include <linux/posix-timers.h>
4646 #include <linux/livepatch.h>
4747 #include <linux/cgroup.h>
48
+#include <linux/audit.h>
49
+#include <linux/oom.h>
4850
4951 #define CREATE_TRACE_POINTS
5052 #include <trace/events/signal.h>
....@@ -54,8 +56,9 @@
5456 #include <asm/unistd.h>
5557 #include <asm/siginfo.h>
5658 #include <asm/cacheflush.h>
57
-#include "audit.h" /* audit_signal_info() */
5859
60
+#undef CREATE_TRACE_POINTS
61
+#include <trace/hooks/signal.h>
5962 /*
6063 * SLAB caches for signal bits.
6164 */
....@@ -186,6 +189,7 @@
186189 clear_thread_flag(TIF_SIGPENDING);
187190
188191 }
192
+EXPORT_SYMBOL(recalc_sigpending);
189193
190194 void calculate_sigpending(void)
191195 {
....@@ -353,7 +357,7 @@
353357 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354358 * Group stop states are cleared and the group stop count is consumed if
355359 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
356
- * stop, the appropriate %SIGNAL_* flags are set.
360
+ * stop, the appropriate `SIGNAL_*` flags are set.
357361 *
358362 * CONTEXT:
359363 * Must be called with @task->sighand->siglock held.
....@@ -403,30 +407,13 @@
403407 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
404408 }
405409
406
-static inline struct sigqueue *get_task_cache(struct task_struct *t)
407
-{
408
- struct sigqueue *q = t->sigqueue_cache;
409
-
410
- if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
411
- return NULL;
412
- return q;
413
-}
414
-
415
-static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
416
-{
417
- if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
418
- return 0;
419
- return 1;
420
-}
421
-
422410 /*
423411 * allocate a new signal queue record
424412 * - this may be called without locks if and only if t == current, otherwise an
425413 * appropriate lock must be held to stop the target task from exiting
426414 */
427415 static struct sigqueue *
428
-__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
429
- int override_rlimit, int fromslab)
416
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
430417 {
431418 struct sigqueue *q = NULL;
432419 struct user_struct *user;
....@@ -448,10 +435,7 @@
448435 rcu_read_unlock();
449436
450437 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
451
- if (!fromslab)
452
- q = get_task_cache(t);
453
- if (!q)
454
- q = kmem_cache_alloc(sigqueue_cachep, flags);
438
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
455439 } else {
456440 print_dropped_signal(sig);
457441 }
....@@ -468,13 +452,6 @@
468452 return q;
469453 }
470454
471
-static struct sigqueue *
472
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
473
- int override_rlimit)
474
-{
475
- return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
476
-}
477
-
478455 static void __sigqueue_free(struct sigqueue *q)
479456 {
480457 if (q->flags & SIGQUEUE_PREALLOC)
....@@ -482,21 +459,6 @@
482459 if (atomic_dec_and_test(&q->user->sigpending))
483460 free_uid(q->user);
484461 kmem_cache_free(sigqueue_cachep, q);
485
-}
486
-
487
-static void sigqueue_free_current(struct sigqueue *q)
488
-{
489
- struct user_struct *up;
490
-
491
- if (q->flags & SIGQUEUE_PREALLOC)
492
- return;
493
-
494
- up = q->user;
495
- if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
496
- if (atomic_dec_and_test(&up->sigpending))
497
- free_uid(up);
498
- } else
499
- __sigqueue_free(q);
500462 }
501463
502464 void flush_sigqueue(struct sigpending *queue)
....@@ -512,21 +474,6 @@
512474 }
513475
514476 /*
515
- * Called from __exit_signal. Flush tsk->pending and
516
- * tsk->sigqueue_cache
517
- */
518
-void flush_task_sigqueue(struct task_struct *tsk)
519
-{
520
- struct sigqueue *q;
521
-
522
- flush_sigqueue(&tsk->pending);
523
-
524
- q = get_task_cache(tsk);
525
- if (q)
526
- kmem_cache_free(sigqueue_cachep, q);
527
-}
528
-
529
-/*
530477 * Flush all pending signals for this kthread.
531478 */
532479 void flush_signals(struct task_struct *t)
....@@ -539,6 +486,7 @@
539486 flush_sigqueue(&t->signal->shared_pending);
540487 spin_unlock_irqrestore(&t->sighand->siglock, flags);
541488 }
489
+EXPORT_SYMBOL(flush_signals);
542490
543491 #ifdef CONFIG_POSIX_TIMERS
544492 static void __flush_itimer_signals(struct sigpending *pending)
....@@ -620,7 +568,7 @@
620568 return !tsk->ptrace;
621569 }
622570
623
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
571
+static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
624572 bool *resched_timer)
625573 {
626574 struct sigqueue *q, *first = NULL;
....@@ -649,7 +597,7 @@
649597 (info->si_code == SI_TIMER) &&
650598 (info->si_sys_private);
651599
652
- sigqueue_free_current(first);
600
+ __sigqueue_free(first);
653601 } else {
654602 /*
655603 * Ok, it wasn't in the queue. This must be
....@@ -666,7 +614,7 @@
666614 }
667615
668616 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
669
- siginfo_t *info, bool *resched_timer)
617
+ kernel_siginfo_t *info, bool *resched_timer)
670618 {
671619 int sig = next_signal(pending, mask);
672620
....@@ -681,12 +629,10 @@
681629 *
682630 * All callers have to hold the siglock.
683631 */
684
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
632
+int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
685633 {
686634 bool resched_timer = false;
687635 int signr;
688
-
689
- WARN_ON_ONCE(tsk != current);
690636
691637 /* We only dequeue private signals from ourselves, we don't let
692638 * signalfd steal them
....@@ -759,8 +705,9 @@
759705 #endif
760706 return signr;
761707 }
708
+EXPORT_SYMBOL_GPL(dequeue_signal);
762709
763
-static int dequeue_synchronous_signal(siginfo_t *info)
710
+static int dequeue_synchronous_signal(kernel_siginfo_t *info)
764711 {
765712 struct task_struct *tsk = current;
766713 struct sigpending *pending = &tsk->pending;
....@@ -776,7 +723,7 @@
776723 * Return the first synchronous signal in the queue.
777724 */
778725 list_for_each_entry(q, &pending->list, list) {
779
- /* Synchronous signals have a postive si_code */
726
+ /* Synchronous signals have a positive si_code */
780727 if ((q->info.si_code > SI_USER) &&
781728 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
782729 sync = q;
....@@ -851,12 +798,12 @@
851798 }
852799 }
853800
854
-static inline int is_si_special(const struct siginfo *info)
801
+static inline int is_si_special(const struct kernel_siginfo *info)
855802 {
856
- return info <= SEND_SIG_FORCED;
803
+ return info <= SEND_SIG_PRIV;
857804 }
858805
859
-static inline bool si_fromuser(const struct siginfo *info)
806
+static inline bool si_fromuser(const struct kernel_siginfo *info)
860807 {
861808 return info == SEND_SIG_NOINFO ||
862809 (!is_si_special(info) && SI_FROMUSER(info));
....@@ -881,7 +828,7 @@
881828 * Bad permissions for sending the signal
882829 * - the caller must hold the RCU read lock
883830 */
884
-static int check_kill_permission(int sig, struct siginfo *info,
831
+static int check_kill_permission(int sig, struct kernel_siginfo *info,
885832 struct task_struct *t)
886833 {
887834 struct pid *sid;
....@@ -908,6 +855,7 @@
908855 */
909856 if (!sid || sid == task_session(current))
910857 break;
858
+ fallthrough;
911859 default:
912860 return -EPERM;
913861 }
....@@ -1006,7 +954,7 @@
1006954 /*
1007955 * The first thread which returns from do_signal_stop()
1008956 * will take ->siglock, notice SIGNAL_CLD_MASK, and
1009
- * notify its parent. See get_signal_to_deliver().
957
+ * notify its parent. See get_signal().
1010958 */
1011959 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
1012960 signal->group_stop_count = 0;
....@@ -1039,7 +987,7 @@
1039987 if (task_is_stopped_or_traced(p))
1040988 return false;
1041989
1042
- return task_curr(p) || !signal_pending(p);
990
+ return task_curr(p) || !task_sigpending(p);
1043991 }
1044992
1045993 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
....@@ -1123,29 +1071,8 @@
11231071 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
11241072 }
11251073
1126
-#ifdef CONFIG_USER_NS
1127
-static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1128
-{
1129
- if (current_user_ns() == task_cred_xxx(t, user_ns))
1130
- return;
1131
-
1132
- if (SI_FROMKERNEL(info))
1133
- return;
1134
-
1135
- rcu_read_lock();
1136
- info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1137
- make_kuid(current_user_ns(), info->si_uid));
1138
- rcu_read_unlock();
1139
-}
1140
-#else
1141
-static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1142
-{
1143
- return;
1144
-}
1145
-#endif
1146
-
1147
-static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1148
- enum pid_type type, int from_ancestor_ns)
1074
+static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1075
+ enum pid_type type, bool force)
11491076 {
11501077 struct sigpending *pending;
11511078 struct sigqueue *q;
....@@ -1155,8 +1082,7 @@
11551082 assert_spin_locked(&t->sighand->siglock);
11561083
11571084 result = TRACE_SIGNAL_IGNORED;
1158
- if (!prepare_signal(sig, t,
1159
- from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1085
+ if (!prepare_signal(sig, t, force))
11601086 goto ret;
11611087
11621088 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
....@@ -1171,10 +1097,9 @@
11711097
11721098 result = TRACE_SIGNAL_DELIVERED;
11731099 /*
1174
- * fast-pathed signals for kernel-internal things like SIGSTOP
1175
- * or SIGKILL.
1100
+ * Skip useless siginfo allocation for SIGKILL and kernel threads.
11761101 */
1177
- if (info == SEND_SIG_FORCED)
1102
+ if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
11781103 goto out_set;
11791104
11801105 /*
....@@ -1202,7 +1127,11 @@
12021127 q->info.si_code = SI_USER;
12031128 q->info.si_pid = task_tgid_nr_ns(current,
12041129 task_active_pid_ns(t));
1205
- q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1130
+ rcu_read_lock();
1131
+ q->info.si_uid =
1132
+ from_kuid_munged(task_cred_xxx(t, user_ns),
1133
+ current_uid());
1134
+ rcu_read_unlock();
12061135 break;
12071136 case (unsigned long) SEND_SIG_PRIV:
12081137 clear_siginfo(&q->info);
....@@ -1214,30 +1143,24 @@
12141143 break;
12151144 default:
12161145 copy_siginfo(&q->info, info);
1217
- if (from_ancestor_ns)
1218
- q->info.si_pid = 0;
12191146 break;
12201147 }
1221
-
1222
- userns_fixup_signal_uid(&q->info, t);
1223
-
1224
- } else if (!is_si_special(info)) {
1225
- if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1226
- /*
1227
- * Queue overflow, abort. We may abort if the
1228
- * signal was rt and sent by user using something
1229
- * other than kill().
1230
- */
1231
- result = TRACE_SIGNAL_OVERFLOW_FAIL;
1232
- ret = -EAGAIN;
1233
- goto ret;
1234
- } else {
1235
- /*
1236
- * This is a silent loss of information. We still
1237
- * send the signal, but the *info bits are lost.
1238
- */
1239
- result = TRACE_SIGNAL_LOSE_INFO;
1240
- }
1148
+ } else if (!is_si_special(info) &&
1149
+ sig >= SIGRTMIN && info->si_code != SI_USER) {
1150
+ /*
1151
+ * Queue overflow, abort. We may abort if the
1152
+ * signal was rt and sent by user using something
1153
+ * other than kill().
1154
+ */
1155
+ result = TRACE_SIGNAL_OVERFLOW_FAIL;
1156
+ ret = -EAGAIN;
1157
+ goto ret;
1158
+ } else {
1159
+ /*
1160
+ * This is a silent loss of information. We still
1161
+ * send the signal, but the *info bits are lost.
1162
+ */
1163
+ result = TRACE_SIGNAL_LOSE_INFO;
12411164 }
12421165
12431166 out_set:
....@@ -1264,17 +1187,62 @@
12641187 return ret;
12651188 }
12661189
1267
-static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1190
+static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191
+{
1192
+ bool ret = false;
1193
+ switch (siginfo_layout(info->si_signo, info->si_code)) {
1194
+ case SIL_KILL:
1195
+ case SIL_CHLD:
1196
+ case SIL_RT:
1197
+ ret = true;
1198
+ break;
1199
+ case SIL_TIMER:
1200
+ case SIL_POLL:
1201
+ case SIL_FAULT:
1202
+ case SIL_FAULT_MCEERR:
1203
+ case SIL_FAULT_BNDERR:
1204
+ case SIL_FAULT_PKUERR:
1205
+ case SIL_SYS:
1206
+ ret = false;
1207
+ break;
1208
+ }
1209
+ return ret;
1210
+}
1211
+
1212
+static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
12681213 enum pid_type type)
12691214 {
1270
- int from_ancestor_ns = 0;
1215
+ /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1216
+ bool force = false;
12711217
1272
-#ifdef CONFIG_PID_NS
1273
- from_ancestor_ns = si_fromuser(info) &&
1274
- !task_pid_nr_ns(current, task_active_pid_ns(t));
1275
-#endif
1218
+ if (info == SEND_SIG_NOINFO) {
1219
+ /* Force if sent from an ancestor pid namespace */
1220
+ force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221
+ } else if (info == SEND_SIG_PRIV) {
1222
+ /* Don't ignore kernel generated signals */
1223
+ force = true;
1224
+ } else if (has_si_pid_and_uid(info)) {
1225
+ /* SIGKILL and SIGSTOP is special or has ids */
1226
+ struct user_namespace *t_user_ns;
12761227
1277
- return __send_signal(sig, info, t, type, from_ancestor_ns);
1228
+ rcu_read_lock();
1229
+ t_user_ns = task_cred_xxx(t, user_ns);
1230
+ if (current_user_ns() != t_user_ns) {
1231
+ kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232
+ info->si_uid = from_kuid_munged(t_user_ns, uid);
1233
+ }
1234
+ rcu_read_unlock();
1235
+
1236
+ /* A kernel generated signal? */
1237
+ force = (info->si_code == SI_KERNEL);
1238
+
1239
+ /* From an ancestor pid namespace? */
1240
+ if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1241
+ info->si_pid = 0;
1242
+ force = true;
1243
+ }
1244
+ }
1245
+ return __send_signal(sig, info, t, type, force);
12781246 }
12791247
12801248 static void print_fatal_signal(int signr)
....@@ -1311,23 +1279,17 @@
13111279 __setup("print-fatal-signals=", setup_print_fatal_signals);
13121280
13131281 int
1314
-__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1282
+__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
13151283 {
13161284 return send_signal(sig, info, p, PIDTYPE_TGID);
13171285 }
13181286
1319
-static int
1320
-specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1321
-{
1322
- return send_signal(sig, info, t, PIDTYPE_PID);
1323
-}
1324
-
1325
-int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1287
+int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
13261288 enum pid_type type)
13271289 {
13281290 unsigned long flags;
13291291 int ret = -ESRCH;
1330
-
1292
+ trace_android_vh_do_send_sig_info(sig, current, p);
13311293 if (lock_task_sighand(p, &flags)) {
13321294 ret = send_signal(sig, info, p, type);
13331295 unlock_task_sighand(p, &flags);
....@@ -1348,11 +1310,12 @@
13481310 * that is why we also clear SIGNAL_UNKILLABLE.
13491311 */
13501312 static int
1351
-do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1313
+force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
13521314 {
13531315 unsigned long int flags;
13541316 int ret, blocked, ignored;
13551317 struct k_sigaction *action;
1318
+ int sig = info->si_signo;
13561319
13571320 spin_lock_irqsave(&t->sighand->siglock, flags);
13581321 action = &t->sighand->action[sig-1];
....@@ -1371,43 +1334,15 @@
13711334 */
13721335 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
13731336 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1374
- ret = specific_send_sig_info(sig, info, t);
1337
+ ret = send_signal(sig, info, t, PIDTYPE_PID);
13751338 spin_unlock_irqrestore(&t->sighand->siglock, flags);
13761339
13771340 return ret;
13781341 }
13791342
1380
-int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1343
+int force_sig_info(struct kernel_siginfo *info)
13811344 {
1382
-/*
1383
- * On some archs, PREEMPT_RT has to delay sending a signal from a trap
1384
- * since it can not enable preemption, and the signal code's spin_locks
1385
- * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
1386
- * send the signal on exit of the trap.
1387
- */
1388
-#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
1389
- if (in_atomic()) {
1390
- if (WARN_ON_ONCE(t != current))
1391
- return 0;
1392
- if (WARN_ON_ONCE(t->forced_info.si_signo))
1393
- return 0;
1394
-
1395
- if (is_si_special(info)) {
1396
- WARN_ON_ONCE(info != SEND_SIG_PRIV);
1397
- t->forced_info.si_signo = sig;
1398
- t->forced_info.si_errno = 0;
1399
- t->forced_info.si_code = SI_KERNEL;
1400
- t->forced_info.si_pid = 0;
1401
- t->forced_info.si_uid = 0;
1402
- } else {
1403
- t->forced_info = *info;
1404
- }
1405
-
1406
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1407
- return 0;
1408
- }
1409
-#endif
1410
- return do_force_sig_info(sig, info, t);
1345
+ return force_sig_info_to_task(info, current);
14111346 }
14121347
14131348 /*
....@@ -1457,7 +1392,7 @@
14571392 * must see ->sighand == NULL.
14581393 */
14591394 spin_lock_irqsave(&sighand->siglock, *flags);
1460
- if (likely(sighand == tsk->sighand))
1395
+ if (likely(sighand == rcu_access_pointer(tsk->sighand)))
14611396 break;
14621397 spin_unlock_irqrestore(&sighand->siglock, *flags);
14631398 }
....@@ -1469,8 +1404,8 @@
14691404 /*
14701405 * send signal info to all the members of a group
14711406 */
1472
-int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1473
- enum pid_type type)
1407
+int group_send_sig_info(int sig, struct kernel_siginfo *info,
1408
+ struct task_struct *p, enum pid_type type)
14741409 {
14751410 int ret;
14761411
....@@ -1478,8 +1413,17 @@
14781413 ret = check_kill_permission(sig, info, p);
14791414 rcu_read_unlock();
14801415
1481
- if (!ret && sig)
1416
+ if (!ret && sig) {
14821417 ret = do_send_sig_info(sig, info, p, type);
1418
+ if (!ret && sig == SIGKILL) {
1419
+ bool reap = false;
1420
+
1421
+ trace_android_vh_process_killed(current, &reap);
1422
+ trace_android_vh_killed_process(current, p, &reap);
1423
+ if (reap)
1424
+ add_to_oom_reaper(p);
1425
+ }
1426
+ }
14831427
14841428 return ret;
14851429 }
....@@ -1489,7 +1433,7 @@
14891433 * control characters do (^C, ^Z etc)
14901434 * - the caller must hold at least a readlock on tasklist_lock
14911435 */
1492
-int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1436
+int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
14931437 {
14941438 struct task_struct *p = NULL;
14951439 int retval, success;
....@@ -1504,7 +1448,7 @@
15041448 return success ? 0 : retval;
15051449 }
15061450
1507
-int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1451
+int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
15081452 {
15091453 int error = -ESRCH;
15101454 struct task_struct *p;
....@@ -1526,7 +1470,7 @@
15261470 }
15271471 }
15281472
1529
-static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1473
+static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
15301474 {
15311475 int error;
15321476 rcu_read_lock();
....@@ -1546,16 +1490,47 @@
15461490 uid_eq(cred->uid, pcred->uid);
15471491 }
15481492
1549
-/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1550
-int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1551
- const struct cred *cred)
1493
+/*
1494
+ * The usb asyncio usage of siginfo is wrong. The glibc support
1495
+ * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1496
+ * AKA after the generic fields:
1497
+ * kernel_pid_t si_pid;
1498
+ * kernel_uid32_t si_uid;
1499
+ * sigval_t si_value;
1500
+ *
1501
+ * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1502
+ * after the generic fields is:
1503
+ * void __user *si_addr;
1504
+ *
1505
+ * This is a practical problem when there is a 64bit big endian kernel
1506
+ * and a 32bit userspace. As the 32bit address will encoded in the low
1507
+ * 32bits of the pointer. Those low 32bits will be stored at higher
1508
+ * address than appear in a 32 bit pointer. So userspace will not
1509
+ * see the address it was expecting for it's completions.
1510
+ *
1511
+ * There is nothing in the encoding that can allow
1512
+ * copy_siginfo_to_user32 to detect this confusion of formats, so
1513
+ * handle this by requiring the caller of kill_pid_usb_asyncio to
1514
+ * notice when this situration takes place and to store the 32bit
1515
+ * pointer in sival_int, instead of sival_addr of the sigval_t addr
1516
+ * parameter.
1517
+ */
1518
+int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1519
+ struct pid *pid, const struct cred *cred)
15521520 {
1553
- int ret = -EINVAL;
1521
+ struct kernel_siginfo info;
15541522 struct task_struct *p;
15551523 unsigned long flags;
1524
+ int ret = -EINVAL;
15561525
15571526 if (!valid_signal(sig))
15581527 return ret;
1528
+
1529
+ clear_siginfo(&info);
1530
+ info.si_signo = sig;
1531
+ info.si_errno = errno;
1532
+ info.si_code = SI_ASYNCIO;
1533
+ *((sigval_t *)&info.si_pid) = addr;
15591534
15601535 rcu_read_lock();
15611536 p = pid_task(pid, PIDTYPE_PID);
....@@ -1563,17 +1538,17 @@
15631538 ret = -ESRCH;
15641539 goto out_unlock;
15651540 }
1566
- if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1541
+ if (!kill_as_cred_perm(cred, p)) {
15671542 ret = -EPERM;
15681543 goto out_unlock;
15691544 }
1570
- ret = security_task_kill(p, info, sig, cred);
1545
+ ret = security_task_kill(p, &info, sig, cred);
15711546 if (ret)
15721547 goto out_unlock;
15731548
15741549 if (sig) {
15751550 if (lock_task_sighand(p, &flags)) {
1576
- ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1551
+ ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
15771552 unlock_task_sighand(p, &flags);
15781553 } else
15791554 ret = -ESRCH;
....@@ -1582,7 +1557,7 @@
15821557 rcu_read_unlock();
15831558 return ret;
15841559 }
1585
-EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1560
+EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
15861561
15871562 /*
15881563 * kill_something_info() interprets pid in interesting ways just like kill(2).
....@@ -1591,16 +1566,12 @@
15911566 * is probably wrong. Should make it like BSD or SYSV.
15921567 */
15931568
1594
-static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1569
+static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
15951570 {
15961571 int ret;
15971572
1598
- if (pid > 0) {
1599
- rcu_read_lock();
1600
- ret = kill_pid_info(sig, info, find_vpid(pid));
1601
- rcu_read_unlock();
1602
- return ret;
1603
- }
1573
+ if (pid > 0)
1574
+ return kill_proc_info(sig, info, pid);
16041575
16051576 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
16061577 if (pid == INT_MIN)
....@@ -1635,7 +1606,7 @@
16351606 * These are for backward compatibility with the rest of the kernel source.
16361607 */
16371608
1638
-int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1609
+int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
16391610 {
16401611 /*
16411612 * Make sure legacy kernel users don't send in bad values
....@@ -1646,6 +1617,7 @@
16461617
16471618 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
16481619 }
1620
+EXPORT_SYMBOL(send_sig_info);
16491621
16501622 #define __si_special(priv) \
16511623 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
....@@ -1655,11 +1627,21 @@
16551627 {
16561628 return send_sig_info(sig, __si_special(priv), p);
16571629 }
1630
+EXPORT_SYMBOL(send_sig);
16581631
1659
-void force_sig(int sig, struct task_struct *p)
1632
+void force_sig(int sig)
16601633 {
1661
- force_sig_info(sig, SEND_SIG_PRIV, p);
1634
+ struct kernel_siginfo info;
1635
+
1636
+ clear_siginfo(&info);
1637
+ info.si_signo = sig;
1638
+ info.si_errno = 0;
1639
+ info.si_code = SI_KERNEL;
1640
+ info.si_pid = 0;
1641
+ info.si_uid = 0;
1642
+ force_sig_info(&info);
16621643 }
1644
+EXPORT_SYMBOL(force_sig);
16631645
16641646 /*
16651647 * When things go south during signal handling, we
....@@ -1667,23 +1649,25 @@
16671649 * the problem was already a SIGSEGV, we'll want to
16681650 * make sure we don't even try to deliver the signal..
16691651 */
1670
-void force_sigsegv(int sig, struct task_struct *p)
1652
+void force_sigsegv(int sig)
16711653 {
1654
+ struct task_struct *p = current;
1655
+
16721656 if (sig == SIGSEGV) {
16731657 unsigned long flags;
16741658 spin_lock_irqsave(&p->sighand->siglock, flags);
16751659 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
16761660 spin_unlock_irqrestore(&p->sighand->siglock, flags);
16771661 }
1678
- force_sig(SIGSEGV, p);
1662
+ force_sig(SIGSEGV);
16791663 }
16801664
1681
-int force_sig_fault(int sig, int code, void __user *addr
1665
+int force_sig_fault_to_task(int sig, int code, void __user *addr
16821666 ___ARCH_SI_TRAPNO(int trapno)
16831667 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
16841668 , struct task_struct *t)
16851669 {
1686
- struct siginfo info;
1670
+ struct kernel_siginfo info;
16871671
16881672 clear_siginfo(&info);
16891673 info.si_signo = sig;
....@@ -1698,7 +1682,16 @@
16981682 info.si_flags = flags;
16991683 info.si_isr = isr;
17001684 #endif
1701
- return force_sig_info(info.si_signo, &info, t);
1685
+ return force_sig_info_to_task(&info, t);
1686
+}
1687
+
1688
+int force_sig_fault(int sig, int code, void __user *addr
1689
+ ___ARCH_SI_TRAPNO(int trapno)
1690
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1691
+{
1692
+ return force_sig_fault_to_task(sig, code, addr
1693
+ ___ARCH_SI_TRAPNO(trapno)
1694
+ ___ARCH_SI_IA64(imm, flags, isr), current);
17021695 }
17031696
17041697 int send_sig_fault(int sig, int code, void __user *addr
....@@ -1706,7 +1699,7 @@
17061699 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
17071700 , struct task_struct *t)
17081701 {
1709
- struct siginfo info;
1702
+ struct kernel_siginfo info;
17101703
17111704 clear_siginfo(&info);
17121705 info.si_signo = sig;
....@@ -1724,9 +1717,9 @@
17241717 return send_sig_info(info.si_signo, &info, t);
17251718 }
17261719
1727
-int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1720
+int force_sig_mceerr(int code, void __user *addr, short lsb)
17281721 {
1729
- struct siginfo info;
1722
+ struct kernel_siginfo info;
17301723
17311724 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
17321725 clear_siginfo(&info);
....@@ -1735,12 +1728,12 @@
17351728 info.si_code = code;
17361729 info.si_addr = addr;
17371730 info.si_addr_lsb = lsb;
1738
- return force_sig_info(info.si_signo, &info, t);
1731
+ return force_sig_info(&info);
17391732 }
17401733
17411734 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
17421735 {
1743
- struct siginfo info;
1736
+ struct kernel_siginfo info;
17441737
17451738 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
17461739 clear_siginfo(&info);
....@@ -1755,7 +1748,7 @@
17551748
17561749 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
17571750 {
1758
- struct siginfo info;
1751
+ struct kernel_siginfo info;
17591752
17601753 clear_siginfo(&info);
17611754 info.si_signo = SIGSEGV;
....@@ -1764,13 +1757,13 @@
17641757 info.si_addr = addr;
17651758 info.si_lower = lower;
17661759 info.si_upper = upper;
1767
- return force_sig_info(info.si_signo, &info, current);
1760
+ return force_sig_info(&info);
17681761 }
17691762
17701763 #ifdef SEGV_PKUERR
17711764 int force_sig_pkuerr(void __user *addr, u32 pkey)
17721765 {
1773
- struct siginfo info;
1766
+ struct kernel_siginfo info;
17741767
17751768 clear_siginfo(&info);
17761769 info.si_signo = SIGSEGV;
....@@ -1778,7 +1771,7 @@
17781771 info.si_code = SEGV_PKUERR;
17791772 info.si_addr = addr;
17801773 info.si_pkey = pkey;
1781
- return force_sig_info(info.si_signo, &info, current);
1774
+ return force_sig_info(&info);
17821775 }
17831776 #endif
17841777
....@@ -1787,14 +1780,14 @@
17871780 */
17881781 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
17891782 {
1790
- struct siginfo info;
1783
+ struct kernel_siginfo info;
17911784
17921785 clear_siginfo(&info);
17931786 info.si_signo = SIGTRAP;
17941787 info.si_errno = errno;
17951788 info.si_code = TRAP_HWBKPT;
17961789 info.si_addr = addr;
1797
- return force_sig_info(info.si_signo, &info, current);
1790
+ return force_sig_info(&info);
17981791 }
17991792
18001793 int kill_pgrp(struct pid *pid, int sig, int priv)
....@@ -1826,8 +1819,7 @@
18261819 */
18271820 struct sigqueue *sigqueue_alloc(void)
18281821 {
1829
- /* Preallocated sigqueue objects always from the slabcache ! */
1830
- struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
1822
+ struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
18311823
18321824 if (q)
18331825 q->flags |= SIGQUEUE_PREALLOC;
....@@ -1912,6 +1904,7 @@
19121904 {
19131905 struct pid *pid;
19141906
1907
+ WARN_ON(task->exit_state == 0);
19151908 pid = task_pid(task);
19161909 wake_up_all(&pid->wait_pidfd);
19171910 }
....@@ -1925,18 +1918,18 @@
19251918 */
19261919 bool do_notify_parent(struct task_struct *tsk, int sig)
19271920 {
1928
- struct siginfo info;
1921
+ struct kernel_siginfo info;
19291922 unsigned long flags;
19301923 struct sighand_struct *psig;
19311924 bool autoreap = false;
19321925 u64 utime, stime;
19331926
1934
- BUG_ON(sig == -1);
1927
+ WARN_ON_ONCE(sig == -1);
19351928
1936
- /* do_notify_parent_cldstop should have been called instead. */
1937
- BUG_ON(task_is_stopped_or_traced(tsk));
1929
+ /* do_notify_parent_cldstop should have been called instead. */
1930
+ WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
19381931
1939
- BUG_ON(!tsk->ptrace &&
1932
+ WARN_ON_ONCE(!tsk->ptrace &&
19401933 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
19411934
19421935 /* Wake up all pidfd waiters */
....@@ -2009,8 +2002,12 @@
20092002 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
20102003 sig = 0;
20112004 }
2005
+ /*
2006
+ * Send with __send_signal as si_pid and si_uid are in the
2007
+ * parent's namespaces.
2008
+ */
20122009 if (valid_signal(sig) && sig)
2013
- __group_send_sig_info(sig, &info, tsk->parent);
2010
+ __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
20142011 __wake_up_parent(tsk, tsk->parent);
20152012 spin_unlock_irqrestore(&psig->siglock, flags);
20162013
....@@ -2033,7 +2030,7 @@
20332030 static void do_notify_parent_cldstop(struct task_struct *tsk,
20342031 bool for_ptracer, int why)
20352032 {
2036
- struct siginfo info;
2033
+ struct kernel_siginfo info;
20372034 unsigned long flags;
20382035 struct task_struct *parent;
20392036 struct sighand_struct *sighand;
....@@ -2124,7 +2121,7 @@
21242121 * If we actually decide not to stop at all because the tracer
21252122 * is gone, we keep current->exit_code unless clear_code.
21262123 */
2127
-static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
2124
+static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
21282125 __releases(&current->sighand->siglock)
21292126 __acquires(&current->sighand->siglock)
21302127 {
....@@ -2208,8 +2205,18 @@
22082205 if (gstop_done && ptrace_reparented(current))
22092206 do_notify_parent_cldstop(current, false, why);
22102207
2208
+ /*
2209
+ * Don't want to allow preemption here, because
2210
+ * sys_ptrace() needs this task to be inactive.
2211
+ *
2212
+ * XXX: implement read_unlock_no_resched().
2213
+ */
2214
+ preempt_disable();
22112215 read_unlock(&tasklist_lock);
2216
+ cgroup_enter_frozen();
2217
+ preempt_enable_no_resched();
22122218 freezable_schedule();
2219
+ cgroup_leave_frozen(true);
22132220 } else {
22142221 /*
22152222 * By the time we got the lock, our tracer went away.
....@@ -2252,7 +2259,7 @@
22522259
22532260 static void ptrace_do_notify(int signr, int exit_code, int why)
22542261 {
2255
- siginfo_t info;
2262
+ kernel_siginfo_t info;
22562263
22572264 clear_siginfo(&info);
22582265 info.si_signo = signr;
....@@ -2471,7 +2478,7 @@
24712478 freezable_schedule();
24722479 }
24732480
2474
-static int ptrace_signal(int signr, siginfo_t *info)
2481
+static int ptrace_signal(int signr, kernel_siginfo_t *info)
24752482 {
24762483 /*
24772484 * We do not check sig_kernel_stop(signr) but set this marker
....@@ -2512,11 +2519,31 @@
25122519
25132520 /* If the (new) signal is now blocked, requeue it. */
25142521 if (sigismember(&current->blocked, signr)) {
2515
- specific_send_sig_info(signr, info, current);
2522
+ send_signal(signr, info, current, PIDTYPE_PID);
25162523 signr = 0;
25172524 }
25182525
25192526 return signr;
2527
+}
2528
+
2529
+static void hide_si_addr_tag_bits(struct ksignal *ksig)
2530
+{
2531
+ switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2532
+ case SIL_FAULT:
2533
+ case SIL_FAULT_MCEERR:
2534
+ case SIL_FAULT_BNDERR:
2535
+ case SIL_FAULT_PKUERR:
2536
+ ksig->info.si_addr = arch_untagged_si_addr(
2537
+ ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2538
+ break;
2539
+ case SIL_KILL:
2540
+ case SIL_TIMER:
2541
+ case SIL_POLL:
2542
+ case SIL_CHLD:
2543
+ case SIL_RT:
2544
+ case SIL_SYS:
2545
+ break;
2546
+ }
25202547 }
25212548
25222549 bool get_signal(struct ksignal *ksig)
....@@ -2527,6 +2554,18 @@
25272554
25282555 if (unlikely(current->task_works))
25292556 task_work_run();
2557
+
2558
+ /*
2559
+ * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2560
+ * that the arch handlers don't all have to do it. If we get here
2561
+ * without TIF_SIGPENDING, just exit after running signal work.
2562
+ */
2563
+ if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2564
+ if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2565
+ tracehook_notify_signal();
2566
+ if (!task_sigpending(current))
2567
+ return false;
2568
+ }
25302569
25312570 if (unlikely(uprobe_deny_signal()))
25322571 return false;
....@@ -2540,6 +2579,7 @@
25402579
25412580 relock:
25422581 spin_lock_irq(&sighand->siglock);
2582
+
25432583 /*
25442584 * Every stopped thread goes here after wakeup. Check to see if
25452585 * we should notify the parent, prepare_signal(SIGCONT) encodes
....@@ -2583,10 +2623,6 @@
25832623 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
25842624 &sighand->action[SIGKILL - 1]);
25852625 recalc_sigpending();
2586
- current->jobctl &= ~JOBCTL_TRAP_FREEZE;
2587
- spin_unlock_irq(&sighand->siglock);
2588
- if (unlikely(cgroup_task_frozen(current)))
2589
- cgroup_leave_frozen(true);
25902626 goto fatal;
25912627 }
25922628
....@@ -2614,7 +2650,7 @@
26142650 */
26152651 if (unlikely(cgroup_task_frozen(current))) {
26162652 spin_unlock_irq(&sighand->siglock);
2617
- cgroup_leave_frozen(true);
2653
+ cgroup_leave_frozen(false);
26182654 goto relock;
26192655 }
26202656
....@@ -2708,8 +2744,10 @@
27082744 continue;
27092745 }
27102746
2711
- spin_unlock_irq(&sighand->siglock);
27122747 fatal:
2748
+ spin_unlock_irq(&sighand->siglock);
2749
+ if (unlikely(cgroup_task_frozen(current)))
2750
+ cgroup_leave_frozen(true);
27132751
27142752 /*
27152753 * Anything else is fatal, maybe with a core dump.
....@@ -2732,14 +2770,26 @@
27322770 }
27332771
27342772 /*
2773
+ * PF_IO_WORKER threads will catch and exit on fatal signals
2774
+ * themselves. They have cleanup that must be performed, so
2775
+ * we cannot call do_exit() on their behalf.
2776
+ */
2777
+ if (current->flags & PF_IO_WORKER)
2778
+ goto out;
2779
+
2780
+ /*
27352781 * Death signals, no core dump.
27362782 */
27372783 do_group_exit(ksig->info.si_signo);
27382784 /* NOTREACHED */
27392785 }
27402786 spin_unlock_irq(&sighand->siglock);
2741
-
2787
+out:
27422788 ksig->sig = signr;
2789
+
2790
+ if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2791
+ hide_si_addr_tag_bits(ksig);
2792
+
27432793 return ksig->sig > 0;
27442794 }
27452795
....@@ -2773,7 +2823,7 @@
27732823 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
27742824 {
27752825 if (failed)
2776
- force_sigsegv(ksig->sig, current);
2826
+ force_sigsegv(ksig->sig);
27772827 else
27782828 signal_delivered(ksig, stepping);
27792829 }
....@@ -2802,7 +2852,7 @@
28022852 /* Remove the signals this thread can handle. */
28032853 sigandsets(&retarget, &retarget, &t->blocked);
28042854
2805
- if (!signal_pending(t))
2855
+ if (!task_sigpending(t))
28062856 signal_wake_up(t, 0);
28072857
28082858 if (sigisemptyset(&retarget))
....@@ -2836,7 +2886,7 @@
28362886
28372887 cgroup_threadgroup_change_end(tsk);
28382888
2839
- if (!signal_pending(tsk))
2889
+ if (!task_sigpending(tsk))
28402890 goto out;
28412891
28422892 unblocked = tsk->blocked;
....@@ -2860,14 +2910,6 @@
28602910 }
28612911 }
28622912
2863
-EXPORT_SYMBOL(recalc_sigpending);
2864
-EXPORT_SYMBOL_GPL(dequeue_signal);
2865
-EXPORT_SYMBOL(flush_signals);
2866
-EXPORT_SYMBOL(force_sig);
2867
-EXPORT_SYMBOL(send_sig);
2868
-EXPORT_SYMBOL(send_sig_info);
2869
-EXPORT_SYMBOL(sigprocmask);
2870
-
28712913 /*
28722914 * System call entry points.
28732915 */
....@@ -2888,7 +2930,7 @@
28882930
28892931 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
28902932 {
2891
- if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2933
+ if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
28922934 sigset_t newblocked;
28932935 /* A set of now blocked but previously unblocked signals. */
28942936 sigandnsets(&newblocked, newset, &current->blocked);
....@@ -2961,6 +3003,55 @@
29613003 __set_current_blocked(&newset);
29623004 return 0;
29633005 }
3006
+EXPORT_SYMBOL(sigprocmask);
3007
+
3008
+/*
3009
+ * The api helps set app-provided sigmasks.
3010
+ *
3011
+ * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3012
+ * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3013
+ *
3014
+ * Note that it does set_restore_sigmask() in advance, so it must be always
3015
+ * paired with restore_saved_sigmask_unless() before return from syscall.
3016
+ */
3017
+int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3018
+{
3019
+ sigset_t kmask;
3020
+
3021
+ if (!umask)
3022
+ return 0;
3023
+ if (sigsetsize != sizeof(sigset_t))
3024
+ return -EINVAL;
3025
+ if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3026
+ return -EFAULT;
3027
+
3028
+ set_restore_sigmask();
3029
+ current->saved_sigmask = current->blocked;
3030
+ set_current_blocked(&kmask);
3031
+
3032
+ return 0;
3033
+}
3034
+
3035
+#ifdef CONFIG_COMPAT
3036
+int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3037
+ size_t sigsetsize)
3038
+{
3039
+ sigset_t kmask;
3040
+
3041
+ if (!umask)
3042
+ return 0;
3043
+ if (sigsetsize != sizeof(compat_sigset_t))
3044
+ return -EINVAL;
3045
+ if (get_compat_sigset(&kmask, umask))
3046
+ return -EFAULT;
3047
+
3048
+ set_restore_sigmask();
3049
+ current->saved_sigmask = current->blocked;
3050
+ set_current_blocked(&kmask);
3051
+
3052
+ return 0;
3053
+}
3054
+#endif
29643055
29653056 /**
29663057 * sys_rt_sigprocmask - change the list of currently blocked signals
....@@ -3071,27 +3162,48 @@
30713162 }
30723163 #endif
30733164
3165
+static const struct {
3166
+ unsigned char limit, layout;
3167
+} sig_sicodes[] = {
3168
+ [SIGILL] = { NSIGILL, SIL_FAULT },
3169
+ [SIGFPE] = { NSIGFPE, SIL_FAULT },
3170
+ [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3171
+ [SIGBUS] = { NSIGBUS, SIL_FAULT },
3172
+ [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3173
+#if defined(SIGEMT)
3174
+ [SIGEMT] = { NSIGEMT, SIL_FAULT },
3175
+#endif
3176
+ [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3177
+ [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3178
+ [SIGSYS] = { NSIGSYS, SIL_SYS },
3179
+};
3180
+
3181
+static bool known_siginfo_layout(unsigned sig, int si_code)
3182
+{
3183
+ if (si_code == SI_KERNEL)
3184
+ return true;
3185
+ else if ((si_code > SI_USER)) {
3186
+ if (sig_specific_sicodes(sig)) {
3187
+ if (si_code <= sig_sicodes[sig].limit)
3188
+ return true;
3189
+ }
3190
+ else if (si_code <= NSIGPOLL)
3191
+ return true;
3192
+ }
3193
+ else if (si_code >= SI_DETHREAD)
3194
+ return true;
3195
+ else if (si_code == SI_ASYNCNL)
3196
+ return true;
3197
+ return false;
3198
+}
3199
+
30743200 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
30753201 {
30763202 enum siginfo_layout layout = SIL_KILL;
30773203 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3078
- static const struct {
3079
- unsigned char limit, layout;
3080
- } filter[] = {
3081
- [SIGILL] = { NSIGILL, SIL_FAULT },
3082
- [SIGFPE] = { NSIGFPE, SIL_FAULT },
3083
- [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3084
- [SIGBUS] = { NSIGBUS, SIL_FAULT },
3085
- [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3086
-#if defined(SIGEMT) && defined(NSIGEMT)
3087
- [SIGEMT] = { NSIGEMT, SIL_FAULT },
3088
-#endif
3089
- [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3090
- [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3091
- [SIGSYS] = { NSIGSYS, SIL_SYS },
3092
- };
3093
- if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
3094
- layout = filter[sig].layout;
3204
+ if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3205
+ (si_code <= sig_sicodes[sig].limit)) {
3206
+ layout = sig_sicodes[sig].layout;
30953207 /* Handle the exceptions */
30963208 if ((sig == SIGBUS) &&
30973209 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
....@@ -3116,106 +3228,243 @@
31163228 return layout;
31173229 }
31183230
3119
-int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
3231
+static inline char __user *si_expansion(const siginfo_t __user *info)
31203232 {
3121
- if (copy_to_user(to, from , sizeof(struct siginfo)))
3233
+ return ((char __user *)info) + sizeof(struct kernel_siginfo);
3234
+}
3235
+
3236
+int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3237
+{
3238
+ char __user *expansion = si_expansion(to);
3239
+ if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3240
+ return -EFAULT;
3241
+ if (clear_user(expansion, SI_EXPANSION_SIZE))
31223242 return -EFAULT;
31233243 return 0;
3244
+}
3245
+
3246
+static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3247
+ const siginfo_t __user *from)
3248
+{
3249
+ if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3250
+ char __user *expansion = si_expansion(from);
3251
+ char buf[SI_EXPANSION_SIZE];
3252
+ int i;
3253
+ /*
3254
+ * An unknown si_code might need more than
3255
+ * sizeof(struct kernel_siginfo) bytes. Verify all of the
3256
+ * extra bytes are 0. This guarantees copy_siginfo_to_user
3257
+ * will return this data to userspace exactly.
3258
+ */
3259
+ if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3260
+ return -EFAULT;
3261
+ for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3262
+ if (buf[i] != 0)
3263
+ return -E2BIG;
3264
+ }
3265
+ }
3266
+ return 0;
3267
+}
3268
+
3269
+static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3270
+ const siginfo_t __user *from)
3271
+{
3272
+ if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3273
+ return -EFAULT;
3274
+ to->si_signo = signo;
3275
+ return post_copy_siginfo_from_user(to, from);
3276
+}
3277
+
3278
+int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3279
+{
3280
+ if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3281
+ return -EFAULT;
3282
+ return post_copy_siginfo_from_user(to, from);
31243283 }
31253284
31263285 #ifdef CONFIG_COMPAT
3127
-int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3128
- const struct siginfo *from)
3129
-#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3286
+/**
3287
+ * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3288
+ * @to: compat siginfo destination
3289
+ * @from: kernel siginfo source
3290
+ *
3291
+ * Note: This function does not work properly for the SIGCHLD on x32, but
3292
+ * fortunately it doesn't have to. The only valid callers for this function are
3293
+ * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3294
+ * The latter does not care because SIGCHLD will never cause a coredump.
3295
+ */
3296
+void copy_siginfo_to_external32(struct compat_siginfo *to,
3297
+ const struct kernel_siginfo *from)
31303298 {
3131
- return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3132
-}
3133
-int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3134
- const struct siginfo *from, bool x32_ABI)
3135
-#endif
3136
-{
3137
- struct compat_siginfo new;
3138
- memset(&new, 0, sizeof(new));
3299
+ memset(to, 0, sizeof(*to));
31393300
3140
- new.si_signo = from->si_signo;
3141
- new.si_errno = from->si_errno;
3142
- new.si_code = from->si_code;
3301
+ to->si_signo = from->si_signo;
3302
+ to->si_errno = from->si_errno;
3303
+ to->si_code = from->si_code;
31433304 switch(siginfo_layout(from->si_signo, from->si_code)) {
31443305 case SIL_KILL:
3145
- new.si_pid = from->si_pid;
3146
- new.si_uid = from->si_uid;
3306
+ to->si_pid = from->si_pid;
3307
+ to->si_uid = from->si_uid;
31473308 break;
31483309 case SIL_TIMER:
3149
- new.si_tid = from->si_tid;
3150
- new.si_overrun = from->si_overrun;
3151
- new.si_int = from->si_int;
3310
+ to->si_tid = from->si_tid;
3311
+ to->si_overrun = from->si_overrun;
3312
+ to->si_int = from->si_int;
31523313 break;
31533314 case SIL_POLL:
3154
- new.si_band = from->si_band;
3155
- new.si_fd = from->si_fd;
3315
+ to->si_band = from->si_band;
3316
+ to->si_fd = from->si_fd;
31563317 break;
31573318 case SIL_FAULT:
3158
- new.si_addr = ptr_to_compat(from->si_addr);
3319
+ to->si_addr = ptr_to_compat(from->si_addr);
31593320 #ifdef __ARCH_SI_TRAPNO
3160
- new.si_trapno = from->si_trapno;
3321
+ to->si_trapno = from->si_trapno;
31613322 #endif
31623323 break;
31633324 case SIL_FAULT_MCEERR:
3164
- new.si_addr = ptr_to_compat(from->si_addr);
3325
+ to->si_addr = ptr_to_compat(from->si_addr);
31653326 #ifdef __ARCH_SI_TRAPNO
3166
- new.si_trapno = from->si_trapno;
3327
+ to->si_trapno = from->si_trapno;
31673328 #endif
3168
- new.si_addr_lsb = from->si_addr_lsb;
3329
+ to->si_addr_lsb = from->si_addr_lsb;
31693330 break;
31703331 case SIL_FAULT_BNDERR:
3171
- new.si_addr = ptr_to_compat(from->si_addr);
3332
+ to->si_addr = ptr_to_compat(from->si_addr);
31723333 #ifdef __ARCH_SI_TRAPNO
3173
- new.si_trapno = from->si_trapno;
3334
+ to->si_trapno = from->si_trapno;
31743335 #endif
3175
- new.si_lower = ptr_to_compat(from->si_lower);
3176
- new.si_upper = ptr_to_compat(from->si_upper);
3336
+ to->si_lower = ptr_to_compat(from->si_lower);
3337
+ to->si_upper = ptr_to_compat(from->si_upper);
31773338 break;
31783339 case SIL_FAULT_PKUERR:
3179
- new.si_addr = ptr_to_compat(from->si_addr);
3340
+ to->si_addr = ptr_to_compat(from->si_addr);
31803341 #ifdef __ARCH_SI_TRAPNO
3181
- new.si_trapno = from->si_trapno;
3342
+ to->si_trapno = from->si_trapno;
31823343 #endif
3183
- new.si_pkey = from->si_pkey;
3344
+ to->si_pkey = from->si_pkey;
31843345 break;
31853346 case SIL_CHLD:
3186
- new.si_pid = from->si_pid;
3187
- new.si_uid = from->si_uid;
3188
- new.si_status = from->si_status;
3189
-#ifdef CONFIG_X86_X32_ABI
3190
- if (x32_ABI) {
3191
- new._sifields._sigchld_x32._utime = from->si_utime;
3192
- new._sifields._sigchld_x32._stime = from->si_stime;
3193
- } else
3194
-#endif
3195
- {
3196
- new.si_utime = from->si_utime;
3197
- new.si_stime = from->si_stime;
3198
- }
3347
+ to->si_pid = from->si_pid;
3348
+ to->si_uid = from->si_uid;
3349
+ to->si_status = from->si_status;
3350
+ to->si_utime = from->si_utime;
3351
+ to->si_stime = from->si_stime;
31993352 break;
32003353 case SIL_RT:
3201
- new.si_pid = from->si_pid;
3202
- new.si_uid = from->si_uid;
3203
- new.si_int = from->si_int;
3354
+ to->si_pid = from->si_pid;
3355
+ to->si_uid = from->si_uid;
3356
+ to->si_int = from->si_int;
32043357 break;
32053358 case SIL_SYS:
3206
- new.si_call_addr = ptr_to_compat(from->si_call_addr);
3207
- new.si_syscall = from->si_syscall;
3208
- new.si_arch = from->si_arch;
3359
+ to->si_call_addr = ptr_to_compat(from->si_call_addr);
3360
+ to->si_syscall = from->si_syscall;
3361
+ to->si_arch = from->si_arch;
32093362 break;
32103363 }
3364
+}
32113365
3366
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3367
+ const struct kernel_siginfo *from)
3368
+{
3369
+ struct compat_siginfo new;
3370
+
3371
+ copy_siginfo_to_external32(&new, from);
32123372 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
32133373 return -EFAULT;
3214
-
32153374 return 0;
32163375 }
32173376
3218
-int copy_siginfo_from_user32(struct siginfo *to,
3377
+static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3378
+ const struct compat_siginfo *from)
3379
+{
3380
+ clear_siginfo(to);
3381
+ to->si_signo = from->si_signo;
3382
+ to->si_errno = from->si_errno;
3383
+ to->si_code = from->si_code;
3384
+ switch(siginfo_layout(from->si_signo, from->si_code)) {
3385
+ case SIL_KILL:
3386
+ to->si_pid = from->si_pid;
3387
+ to->si_uid = from->si_uid;
3388
+ break;
3389
+ case SIL_TIMER:
3390
+ to->si_tid = from->si_tid;
3391
+ to->si_overrun = from->si_overrun;
3392
+ to->si_int = from->si_int;
3393
+ break;
3394
+ case SIL_POLL:
3395
+ to->si_band = from->si_band;
3396
+ to->si_fd = from->si_fd;
3397
+ break;
3398
+ case SIL_FAULT:
3399
+ to->si_addr = compat_ptr(from->si_addr);
3400
+#ifdef __ARCH_SI_TRAPNO
3401
+ to->si_trapno = from->si_trapno;
3402
+#endif
3403
+ break;
3404
+ case SIL_FAULT_MCEERR:
3405
+ to->si_addr = compat_ptr(from->si_addr);
3406
+#ifdef __ARCH_SI_TRAPNO
3407
+ to->si_trapno = from->si_trapno;
3408
+#endif
3409
+ to->si_addr_lsb = from->si_addr_lsb;
3410
+ break;
3411
+ case SIL_FAULT_BNDERR:
3412
+ to->si_addr = compat_ptr(from->si_addr);
3413
+#ifdef __ARCH_SI_TRAPNO
3414
+ to->si_trapno = from->si_trapno;
3415
+#endif
3416
+ to->si_lower = compat_ptr(from->si_lower);
3417
+ to->si_upper = compat_ptr(from->si_upper);
3418
+ break;
3419
+ case SIL_FAULT_PKUERR:
3420
+ to->si_addr = compat_ptr(from->si_addr);
3421
+#ifdef __ARCH_SI_TRAPNO
3422
+ to->si_trapno = from->si_trapno;
3423
+#endif
3424
+ to->si_pkey = from->si_pkey;
3425
+ break;
3426
+ case SIL_CHLD:
3427
+ to->si_pid = from->si_pid;
3428
+ to->si_uid = from->si_uid;
3429
+ to->si_status = from->si_status;
3430
+#ifdef CONFIG_X86_X32_ABI
3431
+ if (in_x32_syscall()) {
3432
+ to->si_utime = from->_sifields._sigchld_x32._utime;
3433
+ to->si_stime = from->_sifields._sigchld_x32._stime;
3434
+ } else
3435
+#endif
3436
+ {
3437
+ to->si_utime = from->si_utime;
3438
+ to->si_stime = from->si_stime;
3439
+ }
3440
+ break;
3441
+ case SIL_RT:
3442
+ to->si_pid = from->si_pid;
3443
+ to->si_uid = from->si_uid;
3444
+ to->si_int = from->si_int;
3445
+ break;
3446
+ case SIL_SYS:
3447
+ to->si_call_addr = compat_ptr(from->si_call_addr);
3448
+ to->si_syscall = from->si_syscall;
3449
+ to->si_arch = from->si_arch;
3450
+ break;
3451
+ }
3452
+ return 0;
3453
+}
3454
+
3455
+static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3456
+ const struct compat_siginfo __user *ufrom)
3457
+{
3458
+ struct compat_siginfo from;
3459
+
3460
+ if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3461
+ return -EFAULT;
3462
+
3463
+ from.si_signo = signo;
3464
+ return post_copy_siginfo_from_user32(to, &from);
3465
+}
3466
+
3467
+int copy_siginfo_from_user32(struct kernel_siginfo *to,
32193468 const struct compat_siginfo __user *ufrom)
32203469 {
32213470 struct compat_siginfo from;
....@@ -3223,79 +3472,7 @@
32233472 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
32243473 return -EFAULT;
32253474
3226
- clear_siginfo(to);
3227
- to->si_signo = from.si_signo;
3228
- to->si_errno = from.si_errno;
3229
- to->si_code = from.si_code;
3230
- switch(siginfo_layout(from.si_signo, from.si_code)) {
3231
- case SIL_KILL:
3232
- to->si_pid = from.si_pid;
3233
- to->si_uid = from.si_uid;
3234
- break;
3235
- case SIL_TIMER:
3236
- to->si_tid = from.si_tid;
3237
- to->si_overrun = from.si_overrun;
3238
- to->si_int = from.si_int;
3239
- break;
3240
- case SIL_POLL:
3241
- to->si_band = from.si_band;
3242
- to->si_fd = from.si_fd;
3243
- break;
3244
- case SIL_FAULT:
3245
- to->si_addr = compat_ptr(from.si_addr);
3246
-#ifdef __ARCH_SI_TRAPNO
3247
- to->si_trapno = from.si_trapno;
3248
-#endif
3249
- break;
3250
- case SIL_FAULT_MCEERR:
3251
- to->si_addr = compat_ptr(from.si_addr);
3252
-#ifdef __ARCH_SI_TRAPNO
3253
- to->si_trapno = from.si_trapno;
3254
-#endif
3255
- to->si_addr_lsb = from.si_addr_lsb;
3256
- break;
3257
- case SIL_FAULT_BNDERR:
3258
- to->si_addr = compat_ptr(from.si_addr);
3259
-#ifdef __ARCH_SI_TRAPNO
3260
- to->si_trapno = from.si_trapno;
3261
-#endif
3262
- to->si_lower = compat_ptr(from.si_lower);
3263
- to->si_upper = compat_ptr(from.si_upper);
3264
- break;
3265
- case SIL_FAULT_PKUERR:
3266
- to->si_addr = compat_ptr(from.si_addr);
3267
-#ifdef __ARCH_SI_TRAPNO
3268
- to->si_trapno = from.si_trapno;
3269
-#endif
3270
- to->si_pkey = from.si_pkey;
3271
- break;
3272
- case SIL_CHLD:
3273
- to->si_pid = from.si_pid;
3274
- to->si_uid = from.si_uid;
3275
- to->si_status = from.si_status;
3276
-#ifdef CONFIG_X86_X32_ABI
3277
- if (in_x32_syscall()) {
3278
- to->si_utime = from._sifields._sigchld_x32._utime;
3279
- to->si_stime = from._sifields._sigchld_x32._stime;
3280
- } else
3281
-#endif
3282
- {
3283
- to->si_utime = from.si_utime;
3284
- to->si_stime = from.si_stime;
3285
- }
3286
- break;
3287
- case SIL_RT:
3288
- to->si_pid = from.si_pid;
3289
- to->si_uid = from.si_uid;
3290
- to->si_int = from.si_int;
3291
- break;
3292
- case SIL_SYS:
3293
- to->si_call_addr = compat_ptr(from.si_call_addr);
3294
- to->si_syscall = from.si_syscall;
3295
- to->si_arch = from.si_arch;
3296
- break;
3297
- }
3298
- return 0;
3475
+ return post_copy_siginfo_from_user32(to, &from);
32993476 }
33003477 #endif /* CONFIG_COMPAT */
33013478
....@@ -3305,8 +3482,8 @@
33053482 * @info: if non-null, the signal's siginfo is returned here
33063483 * @ts: upper bound on process time suspension
33073484 */
3308
-static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3309
- const struct timespec *ts)
3485
+static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3486
+ const struct timespec64 *ts)
33103487 {
33113488 ktime_t *to = NULL, timeout = KTIME_MAX;
33123489 struct task_struct *tsk = current;
....@@ -3314,9 +3491,9 @@
33143491 int sig, ret = 0;
33153492
33163493 if (ts) {
3317
- if (!timespec_valid(ts))
3494
+ if (!timespec64_valid(ts))
33183495 return -EINVAL;
3319
- timeout = timespec_to_ktime(*ts);
3496
+ timeout = timespec64_to_ktime(*ts);
33203497 to = &timeout;
33213498 }
33223499
....@@ -3364,12 +3541,13 @@
33643541 * @sigsetsize: size of sigset_t type
33653542 */
33663543 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3367
- siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3544
+ siginfo_t __user *, uinfo,
3545
+ const struct __kernel_timespec __user *, uts,
33683546 size_t, sigsetsize)
33693547 {
33703548 sigset_t these;
3371
- struct timespec ts;
3372
- siginfo_t info;
3549
+ struct timespec64 ts;
3550
+ kernel_siginfo_t info;
33733551 int ret;
33743552
33753553 /* XXX: Don't preclude handling different sized sigset_t's. */
....@@ -3380,7 +3558,7 @@
33803558 return -EFAULT;
33813559
33823560 if (uts) {
3383
- if (copy_from_user(&ts, uts, sizeof(ts)))
3561
+ if (get_timespec64(&ts, uts))
33843562 return -EFAULT;
33853563 }
33863564
....@@ -3394,14 +3572,47 @@
33943572 return ret;
33953573 }
33963574
3575
+#ifdef CONFIG_COMPAT_32BIT_TIME
3576
+SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3577
+ siginfo_t __user *, uinfo,
3578
+ const struct old_timespec32 __user *, uts,
3579
+ size_t, sigsetsize)
3580
+{
3581
+ sigset_t these;
3582
+ struct timespec64 ts;
3583
+ kernel_siginfo_t info;
3584
+ int ret;
3585
+
3586
+ if (sigsetsize != sizeof(sigset_t))
3587
+ return -EINVAL;
3588
+
3589
+ if (copy_from_user(&these, uthese, sizeof(these)))
3590
+ return -EFAULT;
3591
+
3592
+ if (uts) {
3593
+ if (get_old_timespec32(&ts, uts))
3594
+ return -EFAULT;
3595
+ }
3596
+
3597
+ ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3598
+
3599
+ if (ret > 0 && uinfo) {
3600
+ if (copy_siginfo_to_user(uinfo, &info))
3601
+ ret = -EFAULT;
3602
+ }
3603
+
3604
+ return ret;
3605
+}
3606
+#endif
3607
+
33973608 #ifdef CONFIG_COMPAT
3398
-COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3609
+COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
33993610 struct compat_siginfo __user *, uinfo,
3400
- struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3611
+ struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
34013612 {
34023613 sigset_t s;
3403
- struct timespec t;
3404
- siginfo_t info;
3614
+ struct timespec64 t;
3615
+ kernel_siginfo_t info;
34053616 long ret;
34063617
34073618 if (sigsetsize != sizeof(sigset_t))
....@@ -3411,7 +3622,38 @@
34113622 return -EFAULT;
34123623
34133624 if (uts) {
3414
- if (compat_get_timespec(&t, uts))
3625
+ if (get_timespec64(&t, uts))
3626
+ return -EFAULT;
3627
+ }
3628
+
3629
+ ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3630
+
3631
+ if (ret > 0 && uinfo) {
3632
+ if (copy_siginfo_to_user32(uinfo, &info))
3633
+ ret = -EFAULT;
3634
+ }
3635
+
3636
+ return ret;
3637
+}
3638
+
3639
+#ifdef CONFIG_COMPAT_32BIT_TIME
3640
+COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3641
+ struct compat_siginfo __user *, uinfo,
3642
+ struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3643
+{
3644
+ sigset_t s;
3645
+ struct timespec64 t;
3646
+ kernel_siginfo_t info;
3647
+ long ret;
3648
+
3649
+ if (sigsetsize != sizeof(sigset_t))
3650
+ return -EINVAL;
3651
+
3652
+ if (get_compat_sigset(&s, uthese))
3653
+ return -EFAULT;
3654
+
3655
+ if (uts) {
3656
+ if (get_old_timespec32(&t, uts))
34153657 return -EFAULT;
34163658 }
34173659
....@@ -3425,8 +3667,9 @@
34253667 return ret;
34263668 }
34273669 #endif
3670
+#endif
34283671
3429
-static inline void prepare_kill_siginfo(int sig, struct siginfo *info)
3672
+static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
34303673 {
34313674 clear_siginfo(info);
34323675 info->si_signo = sig;
....@@ -3443,7 +3686,7 @@
34433686 */
34443687 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
34453688 {
3446
- struct siginfo info;
3689
+ struct kernel_siginfo info;
34473690
34483691 prepare_kill_siginfo(sig, &info);
34493692
....@@ -3471,7 +3714,7 @@
34713714 return true;
34723715 }
34733716
3474
-static int copy_siginfo_from_user_any(siginfo_t *kinfo, siginfo_t __user *info)
3717
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
34753718 {
34763719 #ifdef CONFIG_COMPAT
34773720 /*
....@@ -3483,13 +3726,16 @@
34833726 return copy_siginfo_from_user32(
34843727 kinfo, (struct compat_siginfo __user *)info);
34853728 #endif
3486
- return copy_from_user(kinfo, info, sizeof(siginfo_t));
3729
+ return copy_siginfo_from_user(kinfo, info);
34873730 }
34883731
34893732 static struct pid *pidfd_to_pid(const struct file *file)
34903733 {
3491
- if (file->f_op == &pidfd_fops)
3492
- return file->private_data;
3734
+ struct pid *pid;
3735
+
3736
+ pid = pidfd_pid(file);
3737
+ if (!IS_ERR(pid))
3738
+ return pid;
34933739
34943740 return tgid_pidfd_to_pid(file);
34953741 }
....@@ -3518,7 +3764,7 @@
35183764 int ret;
35193765 struct fd f;
35203766 struct pid *pid;
3521
- siginfo_t kinfo;
3767
+ kernel_siginfo_t kinfo;
35223768
35233769 /* Enforce flags be set to 0 until we add an extension. */
35243770 if (flags)
....@@ -3565,7 +3811,7 @@
35653811 }
35663812
35673813 static int
3568
-do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3814
+do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
35693815 {
35703816 struct task_struct *p;
35713817 int error = -ESRCH;
....@@ -3596,7 +3842,7 @@
35963842
35973843 static int do_tkill(pid_t tgid, pid_t pid, int sig)
35983844 {
3599
- struct siginfo info;
3845
+ struct kernel_siginfo info;
36003846
36013847 clear_siginfo(&info);
36023848 info.si_signo = sig;
....@@ -3643,7 +3889,7 @@
36433889 return do_tkill(0, pid, sig);
36443890 }
36453891
3646
-static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3892
+static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
36473893 {
36483894 /* Not even root can pretend to send signals from the kernel.
36493895 * Nor can they impersonate a kill()/tgkill(), which adds source info.
....@@ -3651,8 +3897,6 @@
36513897 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
36523898 (task_pid_vnr(current) != pid))
36533899 return -EPERM;
3654
-
3655
- info->si_signo = sig;
36563900
36573901 /* POSIX.1b doesn't mention process groups. */
36583902 return kill_proc_info(sig, info, pid);
....@@ -3667,9 +3911,10 @@
36673911 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
36683912 siginfo_t __user *, uinfo)
36693913 {
3670
- siginfo_t info;
3671
- if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3672
- return -EFAULT;
3914
+ kernel_siginfo_t info;
3915
+ int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3916
+ if (unlikely(ret))
3917
+ return ret;
36733918 return do_rt_sigqueueinfo(pid, sig, &info);
36743919 }
36753920
....@@ -3679,15 +3924,15 @@
36793924 int, sig,
36803925 struct compat_siginfo __user *, uinfo)
36813926 {
3682
- siginfo_t info;
3683
- int ret = copy_siginfo_from_user32(&info, uinfo);
3927
+ kernel_siginfo_t info;
3928
+ int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
36843929 if (unlikely(ret))
36853930 return ret;
36863931 return do_rt_sigqueueinfo(pid, sig, &info);
36873932 }
36883933 #endif
36893934
3690
-static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3935
+static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
36913936 {
36923937 /* This is only valid for single tasks */
36933938 if (pid <= 0 || tgid <= 0)
....@@ -3700,19 +3945,16 @@
37003945 (task_pid_vnr(current) != pid))
37013946 return -EPERM;
37023947
3703
- info->si_signo = sig;
3704
-
37053948 return do_send_specific(tgid, pid, sig, info);
37063949 }
37073950
37083951 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
37093952 siginfo_t __user *, uinfo)
37103953 {
3711
- siginfo_t info;
3712
-
3713
- if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3714
- return -EFAULT;
3715
-
3954
+ kernel_siginfo_t info;
3955
+ int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3956
+ if (unlikely(ret))
3957
+ return ret;
37163958 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
37173959 }
37183960
....@@ -3723,10 +3965,10 @@
37233965 int, sig,
37243966 struct compat_siginfo __user *, uinfo)
37253967 {
3726
- siginfo_t info;
3727
-
3728
- if (copy_siginfo_from_user32(&info, uinfo))
3729
- return -EFAULT;
3968
+ kernel_siginfo_t info;
3969
+ int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3970
+ if (unlikely(ret))
3971
+ return ret;
37303972 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
37313973 }
37323974 #endif
....@@ -3771,6 +4013,22 @@
37714013 spin_lock_irq(&p->sighand->siglock);
37724014 if (oact)
37734015 *oact = *k;
4016
+
4017
+ /*
4018
+ * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4019
+ * e.g. by having an architecture use the bit in their uapi.
4020
+ */
4021
+ BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4022
+
4023
+ /*
4024
+ * Clear unknown flag bits in order to allow userspace to detect missing
4025
+ * support for flag bits and to allow the kernel to use non-uapi bits
4026
+ * internally.
4027
+ */
4028
+ if (act)
4029
+ act->sa.sa_flags &= UAPI_SA_FLAGS;
4030
+ if (oact)
4031
+ oact->sa.sa_flags &= UAPI_SA_FLAGS;
37744032
37754033 sigaction_compat_abi(act, oact);
37764034
....@@ -4117,7 +4375,7 @@
41174375
41184376 if (act) {
41194377 old_sigset_t mask;
4120
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
4378
+ if (!access_ok(act, sizeof(*act)) ||
41214379 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
41224380 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
41234381 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
....@@ -4132,7 +4390,7 @@
41324390 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
41334391
41344392 if (!ret && oact) {
4135
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
4393
+ if (!access_ok(oact, sizeof(*oact)) ||
41364394 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
41374395 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
41384396 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
....@@ -4154,7 +4412,7 @@
41544412 compat_uptr_t handler, restorer;
41554413
41564414 if (act) {
4157
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
4415
+ if (!access_ok(act, sizeof(*act)) ||
41584416 __get_user(handler, &act->sa_handler) ||
41594417 __get_user(restorer, &act->sa_restorer) ||
41604418 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
....@@ -4172,7 +4430,7 @@
41724430 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
41734431
41744432 if (!ret && oact) {
4175
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
4433
+ if (!access_ok(oact, sizeof(*oact)) ||
41764434 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
41774435 &oact->sa_handler) ||
41784436 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
....@@ -4309,12 +4567,78 @@
43094567 return NULL;
43104568 }
43114569
4570
+static inline void siginfo_buildtime_checks(void)
4571
+{
4572
+ BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4573
+
4574
+ /* Verify the offsets in the two siginfos match */
4575
+#define CHECK_OFFSET(field) \
4576
+ BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4577
+
4578
+ /* kill */
4579
+ CHECK_OFFSET(si_pid);
4580
+ CHECK_OFFSET(si_uid);
4581
+
4582
+ /* timer */
4583
+ CHECK_OFFSET(si_tid);
4584
+ CHECK_OFFSET(si_overrun);
4585
+ CHECK_OFFSET(si_value);
4586
+
4587
+ /* rt */
4588
+ CHECK_OFFSET(si_pid);
4589
+ CHECK_OFFSET(si_uid);
4590
+ CHECK_OFFSET(si_value);
4591
+
4592
+ /* sigchld */
4593
+ CHECK_OFFSET(si_pid);
4594
+ CHECK_OFFSET(si_uid);
4595
+ CHECK_OFFSET(si_status);
4596
+ CHECK_OFFSET(si_utime);
4597
+ CHECK_OFFSET(si_stime);
4598
+
4599
+ /* sigfault */
4600
+ CHECK_OFFSET(si_addr);
4601
+ CHECK_OFFSET(si_addr_lsb);
4602
+ CHECK_OFFSET(si_lower);
4603
+ CHECK_OFFSET(si_upper);
4604
+ CHECK_OFFSET(si_pkey);
4605
+
4606
+ /* sigpoll */
4607
+ CHECK_OFFSET(si_band);
4608
+ CHECK_OFFSET(si_fd);
4609
+
4610
+ /* sigsys */
4611
+ CHECK_OFFSET(si_call_addr);
4612
+ CHECK_OFFSET(si_syscall);
4613
+ CHECK_OFFSET(si_arch);
4614
+#undef CHECK_OFFSET
4615
+
4616
+ /* usb asyncio */
4617
+ BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4618
+ offsetof(struct siginfo, si_addr));
4619
+ if (sizeof(int) == sizeof(void __user *)) {
4620
+ BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4621
+ sizeof(void __user *));
4622
+ } else {
4623
+ BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4624
+ sizeof_field(struct siginfo, si_uid)) !=
4625
+ sizeof(void __user *));
4626
+ BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4627
+ offsetof(struct siginfo, si_uid));
4628
+ }
4629
+#ifdef CONFIG_COMPAT
4630
+ BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4631
+ offsetof(struct compat_siginfo, si_addr));
4632
+ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4633
+ sizeof(compat_uptr_t));
4634
+ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4635
+ sizeof_field(struct siginfo, si_pid));
4636
+#endif
4637
+}
4638
+
43124639 void __init signals_init(void)
43134640 {
4314
- /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
4315
- BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
4316
- != offsetof(struct siginfo, _sifields._pad));
4317
- BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4641
+ siginfo_buildtime_checks();
43184642
43194643 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
43204644 }