hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/kernel/signal.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/kernel/signal.c
34 *
....@@ -19,6 +20,7 @@
1920 #include <linux/sched/task.h>
2021 #include <linux/sched/task_stack.h>
2122 #include <linux/sched/cputime.h>
23
+#include <linux/sched/rt.h>
2224 #include <linux/file.h>
2325 #include <linux/fs.h>
2426 #include <linux/proc_fs.h>
....@@ -44,6 +46,8 @@
4446 #include <linux/posix-timers.h>
4547 #include <linux/livepatch.h>
4648 #include <linux/cgroup.h>
49
+#include <linux/audit.h>
50
+#include <linux/oom.h>
4751
4852 #define CREATE_TRACE_POINTS
4953 #include <trace/events/signal.h>
....@@ -53,8 +57,9 @@
5357 #include <asm/unistd.h>
5458 #include <asm/siginfo.h>
5559 #include <asm/cacheflush.h>
56
-#include "audit.h" /* audit_signal_info() */
5760
61
+#undef CREATE_TRACE_POINTS
62
+#include <trace/hooks/signal.h>
5863 /*
5964 * SLAB caches for signal bits.
6065 */
....@@ -185,6 +190,7 @@
185190 clear_thread_flag(TIF_SIGPENDING);
186191
187192 }
193
+EXPORT_SYMBOL(recalc_sigpending);
188194
189195 void calculate_sigpending(void)
190196 {
....@@ -352,7 +358,7 @@
352358 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
353359 * Group stop states are cleared and the group stop count is consumed if
354360 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
355
- * stop, the appropriate %SIGNAL_* flags are set.
361
+ * stop, the appropriate `SIGNAL_*` flags are set.
356362 *
357363 * CONTEXT:
358364 * Must be called with @task->sighand->siglock held.
....@@ -402,13 +408,30 @@
402408 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
403409 }
404410
411
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
412
+{
413
+ struct sigqueue *q = t->sigqueue_cache;
414
+
415
+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
416
+ return NULL;
417
+ return q;
418
+}
419
+
420
+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
421
+{
422
+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
423
+ return 0;
424
+ return 1;
425
+}
426
+
405427 /*
406428 * allocate a new signal queue record
407429 * - this may be called without locks if and only if t == current, otherwise an
408430 * appropriate lock must be held to stop the target task from exiting
409431 */
410432 static struct sigqueue *
411
-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
433
+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
434
+ int override_rlimit, int fromslab)
412435 {
413436 struct sigqueue *q = NULL;
414437 struct user_struct *user;
....@@ -430,7 +453,10 @@
430453 rcu_read_unlock();
431454
432455 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
433
- q = kmem_cache_alloc(sigqueue_cachep, flags);
456
+ if (!fromslab)
457
+ q = get_task_cache(t);
458
+ if (!q)
459
+ q = kmem_cache_alloc(sigqueue_cachep, flags);
434460 } else {
435461 print_dropped_signal(sig);
436462 }
....@@ -447,6 +473,13 @@
447473 return q;
448474 }
449475
476
+static struct sigqueue *
477
+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
478
+ int override_rlimit)
479
+{
480
+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
481
+}
482
+
450483 static void __sigqueue_free(struct sigqueue *q)
451484 {
452485 if (q->flags & SIGQUEUE_PREALLOC)
....@@ -454,6 +487,21 @@
454487 if (atomic_dec_and_test(&q->user->sigpending))
455488 free_uid(q->user);
456489 kmem_cache_free(sigqueue_cachep, q);
490
+}
491
+
492
+static void sigqueue_free_current(struct sigqueue *q)
493
+{
494
+ struct user_struct *up;
495
+
496
+ if (q->flags & SIGQUEUE_PREALLOC)
497
+ return;
498
+
499
+ up = q->user;
500
+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
501
+ if (atomic_dec_and_test(&up->sigpending))
502
+ free_uid(up);
503
+ } else
504
+ __sigqueue_free(q);
457505 }
458506
459507 void flush_sigqueue(struct sigpending *queue)
....@@ -469,6 +517,21 @@
469517 }
470518
471519 /*
520
+ * Called from __exit_signal. Flush tsk->pending and
521
+ * tsk->sigqueue_cache
522
+ */
523
+void flush_task_sigqueue(struct task_struct *tsk)
524
+{
525
+ struct sigqueue *q;
526
+
527
+ flush_sigqueue(&tsk->pending);
528
+
529
+ q = get_task_cache(tsk);
530
+ if (q)
531
+ kmem_cache_free(sigqueue_cachep, q);
532
+}
533
+
534
+/*
472535 * Flush all pending signals for this kthread.
473536 */
474537 void flush_signals(struct task_struct *t)
....@@ -481,6 +544,7 @@
481544 flush_sigqueue(&t->signal->shared_pending);
482545 spin_unlock_irqrestore(&t->sighand->siglock, flags);
483546 }
547
+EXPORT_SYMBOL(flush_signals);
484548
485549 #ifdef CONFIG_POSIX_TIMERS
486550 static void __flush_itimer_signals(struct sigpending *pending)
....@@ -562,7 +626,7 @@
562626 return !tsk->ptrace;
563627 }
564628
565
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
629
+static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
566630 bool *resched_timer)
567631 {
568632 struct sigqueue *q, *first = NULL;
....@@ -591,7 +655,7 @@
591655 (info->si_code == SI_TIMER) &&
592656 (info->si_sys_private);
593657
594
- __sigqueue_free(first);
658
+ sigqueue_free_current(first);
595659 } else {
596660 /*
597661 * Ok, it wasn't in the queue. This must be
....@@ -608,7 +672,7 @@
608672 }
609673
610674 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
611
- siginfo_t *info, bool *resched_timer)
675
+ kernel_siginfo_t *info, bool *resched_timer)
612676 {
613677 int sig = next_signal(pending, mask);
614678
....@@ -623,10 +687,12 @@
623687 *
624688 * All callers have to hold the siglock.
625689 */
626
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
690
+int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
627691 {
628692 bool resched_timer = false;
629693 int signr;
694
+
695
+ WARN_ON_ONCE(tsk != current);
630696
631697 /* We only dequeue private signals from ourselves, we don't let
632698 * signalfd steal them
....@@ -699,8 +765,9 @@
699765 #endif
700766 return signr;
701767 }
768
+EXPORT_SYMBOL_GPL(dequeue_signal);
702769
703
-static int dequeue_synchronous_signal(siginfo_t *info)
770
+static int dequeue_synchronous_signal(kernel_siginfo_t *info)
704771 {
705772 struct task_struct *tsk = current;
706773 struct sigpending *pending = &tsk->pending;
....@@ -716,7 +783,7 @@
716783 * Return the first synchronous signal in the queue.
717784 */
718785 list_for_each_entry(q, &pending->list, list) {
719
- /* Synchronous signals have a postive si_code */
786
+ /* Synchronous signals have a positive si_code */
720787 if ((q->info.si_code > SI_USER) &&
721788 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
722789 sync = q;
....@@ -791,12 +858,12 @@
791858 }
792859 }
793860
794
-static inline int is_si_special(const struct siginfo *info)
861
+static inline int is_si_special(const struct kernel_siginfo *info)
795862 {
796
- return info <= SEND_SIG_FORCED;
863
+ return info <= SEND_SIG_PRIV;
797864 }
798865
799
-static inline bool si_fromuser(const struct siginfo *info)
866
+static inline bool si_fromuser(const struct kernel_siginfo *info)
800867 {
801868 return info == SEND_SIG_NOINFO ||
802869 (!is_si_special(info) && SI_FROMUSER(info));
....@@ -821,7 +888,7 @@
821888 * Bad permissions for sending the signal
822889 * - the caller must hold the RCU read lock
823890 */
824
-static int check_kill_permission(int sig, struct siginfo *info,
891
+static int check_kill_permission(int sig, struct kernel_siginfo *info,
825892 struct task_struct *t)
826893 {
827894 struct pid *sid;
....@@ -848,6 +915,7 @@
848915 */
849916 if (!sid || sid == task_session(current))
850917 break;
918
+ fallthrough;
851919 default:
852920 return -EPERM;
853921 }
....@@ -946,7 +1014,7 @@
9461014 /*
9471015 * The first thread which returns from do_signal_stop()
9481016 * will take ->siglock, notice SIGNAL_CLD_MASK, and
949
- * notify its parent. See get_signal_to_deliver().
1017
+ * notify its parent. See get_signal().
9501018 */
9511019 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
9521020 signal->group_stop_count = 0;
....@@ -979,7 +1047,7 @@
9791047 if (task_is_stopped_or_traced(p))
9801048 return false;
9811049
982
- return task_curr(p) || !signal_pending(p);
1050
+ return task_curr(p) || !task_sigpending(p);
9831051 }
9841052
9851053 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
....@@ -1063,29 +1131,8 @@
10631131 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
10641132 }
10651133
1066
-#ifdef CONFIG_USER_NS
1067
-static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1068
-{
1069
- if (current_user_ns() == task_cred_xxx(t, user_ns))
1070
- return;
1071
-
1072
- if (SI_FROMKERNEL(info))
1073
- return;
1074
-
1075
- rcu_read_lock();
1076
- info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1077
- make_kuid(current_user_ns(), info->si_uid));
1078
- rcu_read_unlock();
1079
-}
1080
-#else
1081
-static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1082
-{
1083
- return;
1084
-}
1085
-#endif
1086
-
1087
-static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1088
- enum pid_type type, int from_ancestor_ns)
1134
+static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1135
+ enum pid_type type, bool force)
10891136 {
10901137 struct sigpending *pending;
10911138 struct sigqueue *q;
....@@ -1095,8 +1142,7 @@
10951142 assert_spin_locked(&t->sighand->siglock);
10961143
10971144 result = TRACE_SIGNAL_IGNORED;
1098
- if (!prepare_signal(sig, t,
1099
- from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1145
+ if (!prepare_signal(sig, t, force))
11001146 goto ret;
11011147
11021148 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
....@@ -1111,10 +1157,9 @@
11111157
11121158 result = TRACE_SIGNAL_DELIVERED;
11131159 /*
1114
- * fast-pathed signals for kernel-internal things like SIGSTOP
1115
- * or SIGKILL.
1160
+ * Skip useless siginfo allocation for SIGKILL and kernel threads.
11161161 */
1117
- if (info == SEND_SIG_FORCED)
1162
+ if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
11181163 goto out_set;
11191164
11201165 /*
....@@ -1142,7 +1187,11 @@
11421187 q->info.si_code = SI_USER;
11431188 q->info.si_pid = task_tgid_nr_ns(current,
11441189 task_active_pid_ns(t));
1145
- q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1190
+ rcu_read_lock();
1191
+ q->info.si_uid =
1192
+ from_kuid_munged(task_cred_xxx(t, user_ns),
1193
+ current_uid());
1194
+ rcu_read_unlock();
11461195 break;
11471196 case (unsigned long) SEND_SIG_PRIV:
11481197 clear_siginfo(&q->info);
....@@ -1154,30 +1203,24 @@
11541203 break;
11551204 default:
11561205 copy_siginfo(&q->info, info);
1157
- if (from_ancestor_ns)
1158
- q->info.si_pid = 0;
11591206 break;
11601207 }
1161
-
1162
- userns_fixup_signal_uid(&q->info, t);
1163
-
1164
- } else if (!is_si_special(info)) {
1165
- if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1166
- /*
1167
- * Queue overflow, abort. We may abort if the
1168
- * signal was rt and sent by user using something
1169
- * other than kill().
1170
- */
1171
- result = TRACE_SIGNAL_OVERFLOW_FAIL;
1172
- ret = -EAGAIN;
1173
- goto ret;
1174
- } else {
1175
- /*
1176
- * This is a silent loss of information. We still
1177
- * send the signal, but the *info bits are lost.
1178
- */
1179
- result = TRACE_SIGNAL_LOSE_INFO;
1180
- }
1208
+ } else if (!is_si_special(info) &&
1209
+ sig >= SIGRTMIN && info->si_code != SI_USER) {
1210
+ /*
1211
+ * Queue overflow, abort. We may abort if the
1212
+ * signal was rt and sent by user using something
1213
+ * other than kill().
1214
+ */
1215
+ result = TRACE_SIGNAL_OVERFLOW_FAIL;
1216
+ ret = -EAGAIN;
1217
+ goto ret;
1218
+ } else {
1219
+ /*
1220
+ * This is a silent loss of information. We still
1221
+ * send the signal, but the *info bits are lost.
1222
+ */
1223
+ result = TRACE_SIGNAL_LOSE_INFO;
11811224 }
11821225
11831226 out_set:
....@@ -1204,17 +1247,62 @@
12041247 return ret;
12051248 }
12061249
1207
-static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1250
+static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1251
+{
1252
+ bool ret = false;
1253
+ switch (siginfo_layout(info->si_signo, info->si_code)) {
1254
+ case SIL_KILL:
1255
+ case SIL_CHLD:
1256
+ case SIL_RT:
1257
+ ret = true;
1258
+ break;
1259
+ case SIL_TIMER:
1260
+ case SIL_POLL:
1261
+ case SIL_FAULT:
1262
+ case SIL_FAULT_MCEERR:
1263
+ case SIL_FAULT_BNDERR:
1264
+ case SIL_FAULT_PKUERR:
1265
+ case SIL_SYS:
1266
+ ret = false;
1267
+ break;
1268
+ }
1269
+ return ret;
1270
+}
1271
+
1272
+static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
12081273 enum pid_type type)
12091274 {
1210
- int from_ancestor_ns = 0;
1275
+ /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1276
+ bool force = false;
12111277
1212
-#ifdef CONFIG_PID_NS
1213
- from_ancestor_ns = si_fromuser(info) &&
1214
- !task_pid_nr_ns(current, task_active_pid_ns(t));
1215
-#endif
1278
+ if (info == SEND_SIG_NOINFO) {
1279
+ /* Force if sent from an ancestor pid namespace */
1280
+ force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1281
+ } else if (info == SEND_SIG_PRIV) {
1282
+ /* Don't ignore kernel generated signals */
1283
+ force = true;
1284
+ } else if (has_si_pid_and_uid(info)) {
1285
+ /* SIGKILL and SIGSTOP is special or has ids */
1286
+ struct user_namespace *t_user_ns;
12161287
1217
- return __send_signal(sig, info, t, type, from_ancestor_ns);
1288
+ rcu_read_lock();
1289
+ t_user_ns = task_cred_xxx(t, user_ns);
1290
+ if (current_user_ns() != t_user_ns) {
1291
+ kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1292
+ info->si_uid = from_kuid_munged(t_user_ns, uid);
1293
+ }
1294
+ rcu_read_unlock();
1295
+
1296
+ /* A kernel generated signal? */
1297
+ force = (info->si_code == SI_KERNEL);
1298
+
1299
+ /* From an ancestor pid namespace? */
1300
+ if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1301
+ info->si_pid = 0;
1302
+ force = true;
1303
+ }
1304
+ }
1305
+ return __send_signal(sig, info, t, type, force);
12181306 }
12191307
12201308 static void print_fatal_signal(int signr)
....@@ -1251,23 +1339,17 @@
12511339 __setup("print-fatal-signals=", setup_print_fatal_signals);
12521340
12531341 int
1254
-__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1342
+__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
12551343 {
12561344 return send_signal(sig, info, p, PIDTYPE_TGID);
12571345 }
12581346
1259
-static int
1260
-specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1261
-{
1262
- return send_signal(sig, info, t, PIDTYPE_PID);
1263
-}
1264
-
1265
-int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1347
+int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
12661348 enum pid_type type)
12671349 {
12681350 unsigned long flags;
12691351 int ret = -ESRCH;
1270
-
1352
+ trace_android_vh_do_send_sig_info(sig, current, p);
12711353 if (lock_task_sighand(p, &flags)) {
12721354 ret = send_signal(sig, info, p, type);
12731355 unlock_task_sighand(p, &flags);
....@@ -1287,13 +1369,42 @@
12871369 * We don't want to have recursive SIGSEGV's etc, for example,
12881370 * that is why we also clear SIGNAL_UNKILLABLE.
12891371 */
1290
-int
1291
-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1372
+static int
1373
+force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
12921374 {
12931375 unsigned long int flags;
12941376 int ret, blocked, ignored;
12951377 struct k_sigaction *action;
1378
+ int sig = info->si_signo;
12961379
1380
+ /*
1381
+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
1382
+ * since it can not enable preemption, and the signal code's spin_locks
1383
+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
1384
+ * send the signal on exit of the trap.
1385
+ */
1386
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
1387
+ if (in_atomic()) {
1388
+ struct task_struct *t = current;
1389
+
1390
+ if (WARN_ON_ONCE(t->forced_info.si_signo))
1391
+ return 0;
1392
+
1393
+ if (is_si_special(info)) {
1394
+ WARN_ON_ONCE(info != SEND_SIG_PRIV);
1395
+ t->forced_info.si_signo = info->si_signo;
1396
+ t->forced_info.si_errno = 0;
1397
+ t->forced_info.si_code = SI_KERNEL;
1398
+ t->forced_info.si_pid = 0;
1399
+ t->forced_info.si_uid = 0;
1400
+ } else {
1401
+ t->forced_info = *info;
1402
+ }
1403
+
1404
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1405
+ return 0;
1406
+ }
1407
+#endif
12971408 spin_lock_irqsave(&t->sighand->siglock, flags);
12981409 action = &t->sighand->action[sig-1];
12991410 ignored = action->sa.sa_handler == SIG_IGN;
....@@ -1311,10 +1422,15 @@
13111422 */
13121423 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
13131424 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1314
- ret = specific_send_sig_info(sig, info, t);
1425
+ ret = send_signal(sig, info, t, PIDTYPE_PID);
13151426 spin_unlock_irqrestore(&t->sighand->siglock, flags);
13161427
13171428 return ret;
1429
+}
1430
+
1431
+int force_sig_info(struct kernel_siginfo *info)
1432
+{
1433
+ return force_sig_info_to_task(info, current);
13181434 }
13191435
13201436 /*
....@@ -1364,7 +1480,7 @@
13641480 * must see ->sighand == NULL.
13651481 */
13661482 spin_lock_irqsave(&sighand->siglock, *flags);
1367
- if (likely(sighand == tsk->sighand))
1483
+ if (likely(sighand == rcu_access_pointer(tsk->sighand)))
13681484 break;
13691485 spin_unlock_irqrestore(&sighand->siglock, *flags);
13701486 }
....@@ -1376,8 +1492,8 @@
13761492 /*
13771493 * send signal info to all the members of a group
13781494 */
1379
-int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1380
- enum pid_type type)
1495
+int group_send_sig_info(int sig, struct kernel_siginfo *info,
1496
+ struct task_struct *p, enum pid_type type)
13811497 {
13821498 int ret;
13831499
....@@ -1385,8 +1501,17 @@
13851501 ret = check_kill_permission(sig, info, p);
13861502 rcu_read_unlock();
13871503
1388
- if (!ret && sig)
1504
+ if (!ret && sig) {
13891505 ret = do_send_sig_info(sig, info, p, type);
1506
+ if (!ret && sig == SIGKILL) {
1507
+ bool reap = false;
1508
+
1509
+ trace_android_vh_process_killed(current, &reap);
1510
+ trace_android_vh_killed_process(current, p, &reap);
1511
+ if (reap)
1512
+ add_to_oom_reaper(p);
1513
+ }
1514
+ }
13901515
13911516 return ret;
13921517 }
....@@ -1396,7 +1521,7 @@
13961521 * control characters do (^C, ^Z etc)
13971522 * - the caller must hold at least a readlock on tasklist_lock
13981523 */
1399
-int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1524
+int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
14001525 {
14011526 struct task_struct *p = NULL;
14021527 int retval, success;
....@@ -1411,7 +1536,7 @@
14111536 return success ? 0 : retval;
14121537 }
14131538
1414
-int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1539
+int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
14151540 {
14161541 int error = -ESRCH;
14171542 struct task_struct *p;
....@@ -1433,7 +1558,7 @@
14331558 }
14341559 }
14351560
1436
-static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1561
+static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
14371562 {
14381563 int error;
14391564 rcu_read_lock();
....@@ -1453,16 +1578,47 @@
14531578 uid_eq(cred->uid, pcred->uid);
14541579 }
14551580
1456
-/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1457
-int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1458
- const struct cred *cred)
1581
+/*
1582
+ * The usb asyncio usage of siginfo is wrong. The glibc support
1583
+ * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1584
+ * AKA after the generic fields:
1585
+ * kernel_pid_t si_pid;
1586
+ * kernel_uid32_t si_uid;
1587
+ * sigval_t si_value;
1588
+ *
1589
+ * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1590
+ * after the generic fields is:
1591
+ * void __user *si_addr;
1592
+ *
1593
+ * This is a practical problem when there is a 64bit big endian kernel
1594
+ * and a 32bit userspace. As the 32bit address will encoded in the low
1595
+ * 32bits of the pointer. Those low 32bits will be stored at higher
1596
+ * address than appear in a 32 bit pointer. So userspace will not
1597
+ * see the address it was expecting for it's completions.
1598
+ *
1599
+ * There is nothing in the encoding that can allow
1600
+ * copy_siginfo_to_user32 to detect this confusion of formats, so
1601
+ * handle this by requiring the caller of kill_pid_usb_asyncio to
1602
+ * notice when this situration takes place and to store the 32bit
1603
+ * pointer in sival_int, instead of sival_addr of the sigval_t addr
1604
+ * parameter.
1605
+ */
1606
+int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1607
+ struct pid *pid, const struct cred *cred)
14591608 {
1460
- int ret = -EINVAL;
1609
+ struct kernel_siginfo info;
14611610 struct task_struct *p;
14621611 unsigned long flags;
1612
+ int ret = -EINVAL;
14631613
14641614 if (!valid_signal(sig))
14651615 return ret;
1616
+
1617
+ clear_siginfo(&info);
1618
+ info.si_signo = sig;
1619
+ info.si_errno = errno;
1620
+ info.si_code = SI_ASYNCIO;
1621
+ *((sigval_t *)&info.si_pid) = addr;
14661622
14671623 rcu_read_lock();
14681624 p = pid_task(pid, PIDTYPE_PID);
....@@ -1470,17 +1626,17 @@
14701626 ret = -ESRCH;
14711627 goto out_unlock;
14721628 }
1473
- if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1629
+ if (!kill_as_cred_perm(cred, p)) {
14741630 ret = -EPERM;
14751631 goto out_unlock;
14761632 }
1477
- ret = security_task_kill(p, info, sig, cred);
1633
+ ret = security_task_kill(p, &info, sig, cred);
14781634 if (ret)
14791635 goto out_unlock;
14801636
14811637 if (sig) {
14821638 if (lock_task_sighand(p, &flags)) {
1483
- ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1639
+ ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
14841640 unlock_task_sighand(p, &flags);
14851641 } else
14861642 ret = -ESRCH;
....@@ -1489,7 +1645,7 @@
14891645 rcu_read_unlock();
14901646 return ret;
14911647 }
1492
-EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1648
+EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
14931649
14941650 /*
14951651 * kill_something_info() interprets pid in interesting ways just like kill(2).
....@@ -1498,16 +1654,12 @@
14981654 * is probably wrong. Should make it like BSD or SYSV.
14991655 */
15001656
1501
-static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1657
+static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
15021658 {
15031659 int ret;
15041660
1505
- if (pid > 0) {
1506
- rcu_read_lock();
1507
- ret = kill_pid_info(sig, info, find_vpid(pid));
1508
- rcu_read_unlock();
1509
- return ret;
1510
- }
1661
+ if (pid > 0)
1662
+ return kill_proc_info(sig, info, pid);
15111663
15121664 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
15131665 if (pid == INT_MIN)
....@@ -1542,7 +1694,7 @@
15421694 * These are for backward compatibility with the rest of the kernel source.
15431695 */
15441696
1545
-int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1697
+int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
15461698 {
15471699 /*
15481700 * Make sure legacy kernel users don't send in bad values
....@@ -1553,6 +1705,7 @@
15531705
15541706 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
15551707 }
1708
+EXPORT_SYMBOL(send_sig_info);
15561709
15571710 #define __si_special(priv) \
15581711 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
....@@ -1562,11 +1715,21 @@
15621715 {
15631716 return send_sig_info(sig, __si_special(priv), p);
15641717 }
1718
+EXPORT_SYMBOL(send_sig);
15651719
1566
-void force_sig(int sig, struct task_struct *p)
1720
+void force_sig(int sig)
15671721 {
1568
- force_sig_info(sig, SEND_SIG_PRIV, p);
1722
+ struct kernel_siginfo info;
1723
+
1724
+ clear_siginfo(&info);
1725
+ info.si_signo = sig;
1726
+ info.si_errno = 0;
1727
+ info.si_code = SI_KERNEL;
1728
+ info.si_pid = 0;
1729
+ info.si_uid = 0;
1730
+ force_sig_info(&info);
15691731 }
1732
+EXPORT_SYMBOL(force_sig);
15701733
15711734 /*
15721735 * When things go south during signal handling, we
....@@ -1574,23 +1737,25 @@
15741737 * the problem was already a SIGSEGV, we'll want to
15751738 * make sure we don't even try to deliver the signal..
15761739 */
1577
-void force_sigsegv(int sig, struct task_struct *p)
1740
+void force_sigsegv(int sig)
15781741 {
1742
+ struct task_struct *p = current;
1743
+
15791744 if (sig == SIGSEGV) {
15801745 unsigned long flags;
15811746 spin_lock_irqsave(&p->sighand->siglock, flags);
15821747 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
15831748 spin_unlock_irqrestore(&p->sighand->siglock, flags);
15841749 }
1585
- force_sig(SIGSEGV, p);
1750
+ force_sig(SIGSEGV);
15861751 }
15871752
1588
-int force_sig_fault(int sig, int code, void __user *addr
1753
+int force_sig_fault_to_task(int sig, int code, void __user *addr
15891754 ___ARCH_SI_TRAPNO(int trapno)
15901755 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
15911756 , struct task_struct *t)
15921757 {
1593
- struct siginfo info;
1758
+ struct kernel_siginfo info;
15941759
15951760 clear_siginfo(&info);
15961761 info.si_signo = sig;
....@@ -1605,7 +1770,16 @@
16051770 info.si_flags = flags;
16061771 info.si_isr = isr;
16071772 #endif
1608
- return force_sig_info(info.si_signo, &info, t);
1773
+ return force_sig_info_to_task(&info, t);
1774
+}
1775
+
1776
+int force_sig_fault(int sig, int code, void __user *addr
1777
+ ___ARCH_SI_TRAPNO(int trapno)
1778
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1779
+{
1780
+ return force_sig_fault_to_task(sig, code, addr
1781
+ ___ARCH_SI_TRAPNO(trapno)
1782
+ ___ARCH_SI_IA64(imm, flags, isr), current);
16091783 }
16101784
16111785 int send_sig_fault(int sig, int code, void __user *addr
....@@ -1613,7 +1787,7 @@
16131787 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
16141788 , struct task_struct *t)
16151789 {
1616
- struct siginfo info;
1790
+ struct kernel_siginfo info;
16171791
16181792 clear_siginfo(&info);
16191793 info.si_signo = sig;
....@@ -1631,9 +1805,9 @@
16311805 return send_sig_info(info.si_signo, &info, t);
16321806 }
16331807
1634
-int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1808
+int force_sig_mceerr(int code, void __user *addr, short lsb)
16351809 {
1636
- struct siginfo info;
1810
+ struct kernel_siginfo info;
16371811
16381812 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
16391813 clear_siginfo(&info);
....@@ -1642,12 +1816,12 @@
16421816 info.si_code = code;
16431817 info.si_addr = addr;
16441818 info.si_addr_lsb = lsb;
1645
- return force_sig_info(info.si_signo, &info, t);
1819
+ return force_sig_info(&info);
16461820 }
16471821
16481822 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
16491823 {
1650
- struct siginfo info;
1824
+ struct kernel_siginfo info;
16511825
16521826 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
16531827 clear_siginfo(&info);
....@@ -1662,7 +1836,7 @@
16621836
16631837 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
16641838 {
1665
- struct siginfo info;
1839
+ struct kernel_siginfo info;
16661840
16671841 clear_siginfo(&info);
16681842 info.si_signo = SIGSEGV;
....@@ -1671,13 +1845,13 @@
16711845 info.si_addr = addr;
16721846 info.si_lower = lower;
16731847 info.si_upper = upper;
1674
- return force_sig_info(info.si_signo, &info, current);
1848
+ return force_sig_info(&info);
16751849 }
16761850
16771851 #ifdef SEGV_PKUERR
16781852 int force_sig_pkuerr(void __user *addr, u32 pkey)
16791853 {
1680
- struct siginfo info;
1854
+ struct kernel_siginfo info;
16811855
16821856 clear_siginfo(&info);
16831857 info.si_signo = SIGSEGV;
....@@ -1685,7 +1859,7 @@
16851859 info.si_code = SEGV_PKUERR;
16861860 info.si_addr = addr;
16871861 info.si_pkey = pkey;
1688
- return force_sig_info(info.si_signo, &info, current);
1862
+ return force_sig_info(&info);
16891863 }
16901864 #endif
16911865
....@@ -1694,14 +1868,14 @@
16941868 */
16951869 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
16961870 {
1697
- struct siginfo info;
1871
+ struct kernel_siginfo info;
16981872
16991873 clear_siginfo(&info);
17001874 info.si_signo = SIGTRAP;
17011875 info.si_errno = errno;
17021876 info.si_code = TRAP_HWBKPT;
17031877 info.si_addr = addr;
1704
- return force_sig_info(info.si_signo, &info, current);
1878
+ return force_sig_info(&info);
17051879 }
17061880
17071881 int kill_pgrp(struct pid *pid, int sig, int priv)
....@@ -1733,7 +1907,8 @@
17331907 */
17341908 struct sigqueue *sigqueue_alloc(void)
17351909 {
1736
- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1910
+ /* Preallocated sigqueue objects always from the slabcache ! */
1911
+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
17371912
17381913 if (q)
17391914 q->flags |= SIGQUEUE_PREALLOC;
....@@ -1818,6 +1993,7 @@
18181993 {
18191994 struct pid *pid;
18201995
1996
+ WARN_ON(task->exit_state == 0);
18211997 pid = task_pid(task);
18221998 wake_up_all(&pid->wait_pidfd);
18231999 }
....@@ -1831,18 +2007,18 @@
18312007 */
18322008 bool do_notify_parent(struct task_struct *tsk, int sig)
18332009 {
1834
- struct siginfo info;
2010
+ struct kernel_siginfo info;
18352011 unsigned long flags;
18362012 struct sighand_struct *psig;
18372013 bool autoreap = false;
18382014 u64 utime, stime;
18392015
1840
- BUG_ON(sig == -1);
2016
+ WARN_ON_ONCE(sig == -1);
18412017
1842
- /* do_notify_parent_cldstop should have been called instead. */
1843
- BUG_ON(task_is_stopped_or_traced(tsk));
2018
+ /* do_notify_parent_cldstop should have been called instead. */
2019
+ WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
18442020
1845
- BUG_ON(!tsk->ptrace &&
2021
+ WARN_ON_ONCE(!tsk->ptrace &&
18462022 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
18472023
18482024 /* Wake up all pidfd waiters */
....@@ -1915,8 +2091,12 @@
19152091 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
19162092 sig = 0;
19172093 }
2094
+ /*
2095
+ * Send with __send_signal as si_pid and si_uid are in the
2096
+ * parent's namespaces.
2097
+ */
19182098 if (valid_signal(sig) && sig)
1919
- __group_send_sig_info(sig, &info, tsk->parent);
2099
+ __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
19202100 __wake_up_parent(tsk, tsk->parent);
19212101 spin_unlock_irqrestore(&psig->siglock, flags);
19222102
....@@ -1939,7 +2119,7 @@
19392119 static void do_notify_parent_cldstop(struct task_struct *tsk,
19402120 bool for_ptracer, int why)
19412121 {
1942
- struct siginfo info;
2122
+ struct kernel_siginfo info;
19432123 unsigned long flags;
19442124 struct task_struct *parent;
19452125 struct sighand_struct *sighand;
....@@ -2030,7 +2210,7 @@
20302210 * If we actually decide not to stop at all because the tracer
20312211 * is gone, we keep current->exit_code unless clear_code.
20322212 */
2033
-static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
2213
+static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
20342214 __releases(&current->sighand->siglock)
20352215 __acquires(&current->sighand->siglock)
20362216 {
....@@ -2114,16 +2294,8 @@
21142294 if (gstop_done && ptrace_reparented(current))
21152295 do_notify_parent_cldstop(current, false, why);
21162296
2117
- /*
2118
- * Don't want to allow preemption here, because
2119
- * sys_ptrace() needs this task to be inactive.
2120
- *
2121
- * XXX: implement read_unlock_no_resched().
2122
- */
2123
- preempt_disable();
21242297 read_unlock(&tasklist_lock);
21252298 cgroup_enter_frozen();
2126
- preempt_enable_no_resched();
21272299 freezable_schedule();
21282300 cgroup_leave_frozen(true);
21292301 } else {
....@@ -2168,7 +2340,7 @@
21682340
21692341 static void ptrace_do_notify(int signr, int exit_code, int why)
21702342 {
2171
- siginfo_t info;
2343
+ kernel_siginfo_t info;
21722344
21732345 clear_siginfo(&info);
21742346 info.si_signo = signr;
....@@ -2387,7 +2559,7 @@
23872559 freezable_schedule();
23882560 }
23892561
2390
-static int ptrace_signal(int signr, siginfo_t *info)
2562
+static int ptrace_signal(int signr, kernel_siginfo_t *info)
23912563 {
23922564 /*
23932565 * We do not check sig_kernel_stop(signr) but set this marker
....@@ -2428,11 +2600,31 @@
24282600
24292601 /* If the (new) signal is now blocked, requeue it. */
24302602 if (sigismember(&current->blocked, signr)) {
2431
- specific_send_sig_info(signr, info, current);
2603
+ send_signal(signr, info, current, PIDTYPE_PID);
24322604 signr = 0;
24332605 }
24342606
24352607 return signr;
2608
+}
2609
+
2610
+static void hide_si_addr_tag_bits(struct ksignal *ksig)
2611
+{
2612
+ switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2613
+ case SIL_FAULT:
2614
+ case SIL_FAULT_MCEERR:
2615
+ case SIL_FAULT_BNDERR:
2616
+ case SIL_FAULT_PKUERR:
2617
+ ksig->info.si_addr = arch_untagged_si_addr(
2618
+ ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2619
+ break;
2620
+ case SIL_KILL:
2621
+ case SIL_TIMER:
2622
+ case SIL_POLL:
2623
+ case SIL_CHLD:
2624
+ case SIL_RT:
2625
+ case SIL_SYS:
2626
+ break;
2627
+ }
24362628 }
24372629
24382630 bool get_signal(struct ksignal *ksig)
....@@ -2443,6 +2635,18 @@
24432635
24442636 if (unlikely(current->task_works))
24452637 task_work_run();
2638
+
2639
+ /*
2640
+ * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2641
+ * that the arch handlers don't all have to do it. If we get here
2642
+ * without TIF_SIGPENDING, just exit after running signal work.
2643
+ */
2644
+ if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2645
+ if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2646
+ tracehook_notify_signal();
2647
+ if (!task_sigpending(current))
2648
+ return false;
2649
+ }
24462650
24472651 if (unlikely(uprobe_deny_signal()))
24482652 return false;
....@@ -2456,6 +2660,7 @@
24562660
24572661 relock:
24582662 spin_lock_irq(&sighand->siglock);
2663
+
24592664 /*
24602665 * Every stopped thread goes here after wakeup. Check to see if
24612666 * we should notify the parent, prepare_signal(SIGCONT) encodes
....@@ -2499,10 +2704,6 @@
24992704 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
25002705 &sighand->action[SIGKILL - 1]);
25012706 recalc_sigpending();
2502
- current->jobctl &= ~JOBCTL_TRAP_FREEZE;
2503
- spin_unlock_irq(&sighand->siglock);
2504
- if (unlikely(cgroup_task_frozen(current)))
2505
- cgroup_leave_frozen(true);
25062707 goto fatal;
25072708 }
25082709
....@@ -2530,7 +2731,7 @@
25302731 */
25312732 if (unlikely(cgroup_task_frozen(current))) {
25322733 spin_unlock_irq(&sighand->siglock);
2533
- cgroup_leave_frozen(true);
2734
+ cgroup_leave_frozen(false);
25342735 goto relock;
25352736 }
25362737
....@@ -2624,8 +2825,10 @@
26242825 continue;
26252826 }
26262827
2627
- spin_unlock_irq(&sighand->siglock);
26282828 fatal:
2829
+ spin_unlock_irq(&sighand->siglock);
2830
+ if (unlikely(cgroup_task_frozen(current)))
2831
+ cgroup_leave_frozen(true);
26292832
26302833 /*
26312834 * Anything else is fatal, maybe with a core dump.
....@@ -2648,14 +2851,26 @@
26482851 }
26492852
26502853 /*
2854
+ * PF_IO_WORKER threads will catch and exit on fatal signals
2855
+ * themselves. They have cleanup that must be performed, so
2856
+ * we cannot call do_exit() on their behalf.
2857
+ */
2858
+ if (current->flags & PF_IO_WORKER)
2859
+ goto out;
2860
+
2861
+ /*
26512862 * Death signals, no core dump.
26522863 */
26532864 do_group_exit(ksig->info.si_signo);
26542865 /* NOTREACHED */
26552866 }
26562867 spin_unlock_irq(&sighand->siglock);
2657
-
2868
+out:
26582869 ksig->sig = signr;
2870
+
2871
+ if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2872
+ hide_si_addr_tag_bits(ksig);
2873
+
26592874 return ksig->sig > 0;
26602875 }
26612876
....@@ -2689,7 +2904,7 @@
26892904 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
26902905 {
26912906 if (failed)
2692
- force_sigsegv(ksig->sig, current);
2907
+ force_sigsegv(ksig->sig);
26932908 else
26942909 signal_delivered(ksig, stepping);
26952910 }
....@@ -2718,7 +2933,7 @@
27182933 /* Remove the signals this thread can handle. */
27192934 sigandsets(&retarget, &retarget, &t->blocked);
27202935
2721
- if (!signal_pending(t))
2936
+ if (!task_sigpending(t))
27222937 signal_wake_up(t, 0);
27232938
27242939 if (sigisemptyset(&retarget))
....@@ -2752,7 +2967,7 @@
27522967
27532968 cgroup_threadgroup_change_end(tsk);
27542969
2755
- if (!signal_pending(tsk))
2970
+ if (!task_sigpending(tsk))
27562971 goto out;
27572972
27582973 unblocked = tsk->blocked;
....@@ -2776,14 +2991,6 @@
27762991 }
27772992 }
27782993
2779
-EXPORT_SYMBOL(recalc_sigpending);
2780
-EXPORT_SYMBOL_GPL(dequeue_signal);
2781
-EXPORT_SYMBOL(flush_signals);
2782
-EXPORT_SYMBOL(force_sig);
2783
-EXPORT_SYMBOL(send_sig);
2784
-EXPORT_SYMBOL(send_sig_info);
2785
-EXPORT_SYMBOL(sigprocmask);
2786
-
27872994 /*
27882995 * System call entry points.
27892996 */
....@@ -2804,7 +3011,7 @@
28043011
28053012 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
28063013 {
2807
- if (signal_pending(tsk) && !thread_group_empty(tsk)) {
3014
+ if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
28083015 sigset_t newblocked;
28093016 /* A set of now blocked but previously unblocked signals. */
28103017 sigandnsets(&newblocked, newset, &current->blocked);
....@@ -2877,6 +3084,55 @@
28773084 __set_current_blocked(&newset);
28783085 return 0;
28793086 }
3087
+EXPORT_SYMBOL(sigprocmask);
3088
+
3089
+/*
3090
+ * The api helps set app-provided sigmasks.
3091
+ *
3092
+ * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3093
+ * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3094
+ *
3095
+ * Note that it does set_restore_sigmask() in advance, so it must be always
3096
+ * paired with restore_saved_sigmask_unless() before return from syscall.
3097
+ */
3098
+int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3099
+{
3100
+ sigset_t kmask;
3101
+
3102
+ if (!umask)
3103
+ return 0;
3104
+ if (sigsetsize != sizeof(sigset_t))
3105
+ return -EINVAL;
3106
+ if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3107
+ return -EFAULT;
3108
+
3109
+ set_restore_sigmask();
3110
+ current->saved_sigmask = current->blocked;
3111
+ set_current_blocked(&kmask);
3112
+
3113
+ return 0;
3114
+}
3115
+
3116
+#ifdef CONFIG_COMPAT
3117
+int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3118
+ size_t sigsetsize)
3119
+{
3120
+ sigset_t kmask;
3121
+
3122
+ if (!umask)
3123
+ return 0;
3124
+ if (sigsetsize != sizeof(compat_sigset_t))
3125
+ return -EINVAL;
3126
+ if (get_compat_sigset(&kmask, umask))
3127
+ return -EFAULT;
3128
+
3129
+ set_restore_sigmask();
3130
+ current->saved_sigmask = current->blocked;
3131
+ set_current_blocked(&kmask);
3132
+
3133
+ return 0;
3134
+}
3135
+#endif
28803136
28813137 /**
28823138 * sys_rt_sigprocmask - change the list of currently blocked signals
....@@ -2987,27 +3243,48 @@
29873243 }
29883244 #endif
29893245
3246
+static const struct {
3247
+ unsigned char limit, layout;
3248
+} sig_sicodes[] = {
3249
+ [SIGILL] = { NSIGILL, SIL_FAULT },
3250
+ [SIGFPE] = { NSIGFPE, SIL_FAULT },
3251
+ [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3252
+ [SIGBUS] = { NSIGBUS, SIL_FAULT },
3253
+ [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3254
+#if defined(SIGEMT)
3255
+ [SIGEMT] = { NSIGEMT, SIL_FAULT },
3256
+#endif
3257
+ [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3258
+ [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3259
+ [SIGSYS] = { NSIGSYS, SIL_SYS },
3260
+};
3261
+
3262
+static bool known_siginfo_layout(unsigned sig, int si_code)
3263
+{
3264
+ if (si_code == SI_KERNEL)
3265
+ return true;
3266
+ else if ((si_code > SI_USER)) {
3267
+ if (sig_specific_sicodes(sig)) {
3268
+ if (si_code <= sig_sicodes[sig].limit)
3269
+ return true;
3270
+ }
3271
+ else if (si_code <= NSIGPOLL)
3272
+ return true;
3273
+ }
3274
+ else if (si_code >= SI_DETHREAD)
3275
+ return true;
3276
+ else if (si_code == SI_ASYNCNL)
3277
+ return true;
3278
+ return false;
3279
+}
3280
+
29903281 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
29913282 {
29923283 enum siginfo_layout layout = SIL_KILL;
29933284 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2994
- static const struct {
2995
- unsigned char limit, layout;
2996
- } filter[] = {
2997
- [SIGILL] = { NSIGILL, SIL_FAULT },
2998
- [SIGFPE] = { NSIGFPE, SIL_FAULT },
2999
- [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3000
- [SIGBUS] = { NSIGBUS, SIL_FAULT },
3001
- [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3002
-#if defined(SIGEMT) && defined(NSIGEMT)
3003
- [SIGEMT] = { NSIGEMT, SIL_FAULT },
3004
-#endif
3005
- [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3006
- [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3007
- [SIGSYS] = { NSIGSYS, SIL_SYS },
3008
- };
3009
- if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
3010
- layout = filter[sig].layout;
3285
+ if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3286
+ (si_code <= sig_sicodes[sig].limit)) {
3287
+ layout = sig_sicodes[sig].layout;
30113288 /* Handle the exceptions */
30123289 if ((sig == SIGBUS) &&
30133290 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
....@@ -3032,106 +3309,243 @@
30323309 return layout;
30333310 }
30343311
3035
-int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
3312
+static inline char __user *si_expansion(const siginfo_t __user *info)
30363313 {
3037
- if (copy_to_user(to, from , sizeof(struct siginfo)))
3314
+ return ((char __user *)info) + sizeof(struct kernel_siginfo);
3315
+}
3316
+
3317
+int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3318
+{
3319
+ char __user *expansion = si_expansion(to);
3320
+ if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3321
+ return -EFAULT;
3322
+ if (clear_user(expansion, SI_EXPANSION_SIZE))
30383323 return -EFAULT;
30393324 return 0;
3325
+}
3326
+
3327
+static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3328
+ const siginfo_t __user *from)
3329
+{
3330
+ if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3331
+ char __user *expansion = si_expansion(from);
3332
+ char buf[SI_EXPANSION_SIZE];
3333
+ int i;
3334
+ /*
3335
+ * An unknown si_code might need more than
3336
+ * sizeof(struct kernel_siginfo) bytes. Verify all of the
3337
+ * extra bytes are 0. This guarantees copy_siginfo_to_user
3338
+ * will return this data to userspace exactly.
3339
+ */
3340
+ if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3341
+ return -EFAULT;
3342
+ for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3343
+ if (buf[i] != 0)
3344
+ return -E2BIG;
3345
+ }
3346
+ }
3347
+ return 0;
3348
+}
3349
+
3350
+static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3351
+ const siginfo_t __user *from)
3352
+{
3353
+ if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3354
+ return -EFAULT;
3355
+ to->si_signo = signo;
3356
+ return post_copy_siginfo_from_user(to, from);
3357
+}
3358
+
3359
+int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3360
+{
3361
+ if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3362
+ return -EFAULT;
3363
+ return post_copy_siginfo_from_user(to, from);
30403364 }
30413365
30423366 #ifdef CONFIG_COMPAT
3043
-int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3044
- const struct siginfo *from)
3045
-#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3367
+/**
3368
+ * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3369
+ * @to: compat siginfo destination
3370
+ * @from: kernel siginfo source
3371
+ *
3372
+ * Note: This function does not work properly for the SIGCHLD on x32, but
3373
+ * fortunately it doesn't have to. The only valid callers for this function are
3374
+ * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3375
+ * The latter does not care because SIGCHLD will never cause a coredump.
3376
+ */
3377
+void copy_siginfo_to_external32(struct compat_siginfo *to,
3378
+ const struct kernel_siginfo *from)
30463379 {
3047
- return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3048
-}
3049
-int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3050
- const struct siginfo *from, bool x32_ABI)
3051
-#endif
3052
-{
3053
- struct compat_siginfo new;
3054
- memset(&new, 0, sizeof(new));
3380
+ memset(to, 0, sizeof(*to));
30553381
3056
- new.si_signo = from->si_signo;
3057
- new.si_errno = from->si_errno;
3058
- new.si_code = from->si_code;
3382
+ to->si_signo = from->si_signo;
3383
+ to->si_errno = from->si_errno;
3384
+ to->si_code = from->si_code;
30593385 switch(siginfo_layout(from->si_signo, from->si_code)) {
30603386 case SIL_KILL:
3061
- new.si_pid = from->si_pid;
3062
- new.si_uid = from->si_uid;
3387
+ to->si_pid = from->si_pid;
3388
+ to->si_uid = from->si_uid;
30633389 break;
30643390 case SIL_TIMER:
3065
- new.si_tid = from->si_tid;
3066
- new.si_overrun = from->si_overrun;
3067
- new.si_int = from->si_int;
3391
+ to->si_tid = from->si_tid;
3392
+ to->si_overrun = from->si_overrun;
3393
+ to->si_int = from->si_int;
30683394 break;
30693395 case SIL_POLL:
3070
- new.si_band = from->si_band;
3071
- new.si_fd = from->si_fd;
3396
+ to->si_band = from->si_band;
3397
+ to->si_fd = from->si_fd;
30723398 break;
30733399 case SIL_FAULT:
3074
- new.si_addr = ptr_to_compat(from->si_addr);
3400
+ to->si_addr = ptr_to_compat(from->si_addr);
30753401 #ifdef __ARCH_SI_TRAPNO
3076
- new.si_trapno = from->si_trapno;
3402
+ to->si_trapno = from->si_trapno;
30773403 #endif
30783404 break;
30793405 case SIL_FAULT_MCEERR:
3080
- new.si_addr = ptr_to_compat(from->si_addr);
3406
+ to->si_addr = ptr_to_compat(from->si_addr);
30813407 #ifdef __ARCH_SI_TRAPNO
3082
- new.si_trapno = from->si_trapno;
3408
+ to->si_trapno = from->si_trapno;
30833409 #endif
3084
- new.si_addr_lsb = from->si_addr_lsb;
3410
+ to->si_addr_lsb = from->si_addr_lsb;
30853411 break;
30863412 case SIL_FAULT_BNDERR:
3087
- new.si_addr = ptr_to_compat(from->si_addr);
3413
+ to->si_addr = ptr_to_compat(from->si_addr);
30883414 #ifdef __ARCH_SI_TRAPNO
3089
- new.si_trapno = from->si_trapno;
3415
+ to->si_trapno = from->si_trapno;
30903416 #endif
3091
- new.si_lower = ptr_to_compat(from->si_lower);
3092
- new.si_upper = ptr_to_compat(from->si_upper);
3417
+ to->si_lower = ptr_to_compat(from->si_lower);
3418
+ to->si_upper = ptr_to_compat(from->si_upper);
30933419 break;
30943420 case SIL_FAULT_PKUERR:
3095
- new.si_addr = ptr_to_compat(from->si_addr);
3421
+ to->si_addr = ptr_to_compat(from->si_addr);
30963422 #ifdef __ARCH_SI_TRAPNO
3097
- new.si_trapno = from->si_trapno;
3423
+ to->si_trapno = from->si_trapno;
30983424 #endif
3099
- new.si_pkey = from->si_pkey;
3425
+ to->si_pkey = from->si_pkey;
31003426 break;
31013427 case SIL_CHLD:
3102
- new.si_pid = from->si_pid;
3103
- new.si_uid = from->si_uid;
3104
- new.si_status = from->si_status;
3105
-#ifdef CONFIG_X86_X32_ABI
3106
- if (x32_ABI) {
3107
- new._sifields._sigchld_x32._utime = from->si_utime;
3108
- new._sifields._sigchld_x32._stime = from->si_stime;
3109
- } else
3110
-#endif
3111
- {
3112
- new.si_utime = from->si_utime;
3113
- new.si_stime = from->si_stime;
3114
- }
3428
+ to->si_pid = from->si_pid;
3429
+ to->si_uid = from->si_uid;
3430
+ to->si_status = from->si_status;
3431
+ to->si_utime = from->si_utime;
3432
+ to->si_stime = from->si_stime;
31153433 break;
31163434 case SIL_RT:
3117
- new.si_pid = from->si_pid;
3118
- new.si_uid = from->si_uid;
3119
- new.si_int = from->si_int;
3435
+ to->si_pid = from->si_pid;
3436
+ to->si_uid = from->si_uid;
3437
+ to->si_int = from->si_int;
31203438 break;
31213439 case SIL_SYS:
3122
- new.si_call_addr = ptr_to_compat(from->si_call_addr);
3123
- new.si_syscall = from->si_syscall;
3124
- new.si_arch = from->si_arch;
3440
+ to->si_call_addr = ptr_to_compat(from->si_call_addr);
3441
+ to->si_syscall = from->si_syscall;
3442
+ to->si_arch = from->si_arch;
31253443 break;
31263444 }
3445
+}
31273446
3447
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3448
+ const struct kernel_siginfo *from)
3449
+{
3450
+ struct compat_siginfo new;
3451
+
3452
+ copy_siginfo_to_external32(&new, from);
31283453 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
31293454 return -EFAULT;
3130
-
31313455 return 0;
31323456 }
31333457
3134
-int copy_siginfo_from_user32(struct siginfo *to,
3458
+static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3459
+ const struct compat_siginfo *from)
3460
+{
3461
+ clear_siginfo(to);
3462
+ to->si_signo = from->si_signo;
3463
+ to->si_errno = from->si_errno;
3464
+ to->si_code = from->si_code;
3465
+ switch(siginfo_layout(from->si_signo, from->si_code)) {
3466
+ case SIL_KILL:
3467
+ to->si_pid = from->si_pid;
3468
+ to->si_uid = from->si_uid;
3469
+ break;
3470
+ case SIL_TIMER:
3471
+ to->si_tid = from->si_tid;
3472
+ to->si_overrun = from->si_overrun;
3473
+ to->si_int = from->si_int;
3474
+ break;
3475
+ case SIL_POLL:
3476
+ to->si_band = from->si_band;
3477
+ to->si_fd = from->si_fd;
3478
+ break;
3479
+ case SIL_FAULT:
3480
+ to->si_addr = compat_ptr(from->si_addr);
3481
+#ifdef __ARCH_SI_TRAPNO
3482
+ to->si_trapno = from->si_trapno;
3483
+#endif
3484
+ break;
3485
+ case SIL_FAULT_MCEERR:
3486
+ to->si_addr = compat_ptr(from->si_addr);
3487
+#ifdef __ARCH_SI_TRAPNO
3488
+ to->si_trapno = from->si_trapno;
3489
+#endif
3490
+ to->si_addr_lsb = from->si_addr_lsb;
3491
+ break;
3492
+ case SIL_FAULT_BNDERR:
3493
+ to->si_addr = compat_ptr(from->si_addr);
3494
+#ifdef __ARCH_SI_TRAPNO
3495
+ to->si_trapno = from->si_trapno;
3496
+#endif
3497
+ to->si_lower = compat_ptr(from->si_lower);
3498
+ to->si_upper = compat_ptr(from->si_upper);
3499
+ break;
3500
+ case SIL_FAULT_PKUERR:
3501
+ to->si_addr = compat_ptr(from->si_addr);
3502
+#ifdef __ARCH_SI_TRAPNO
3503
+ to->si_trapno = from->si_trapno;
3504
+#endif
3505
+ to->si_pkey = from->si_pkey;
3506
+ break;
3507
+ case SIL_CHLD:
3508
+ to->si_pid = from->si_pid;
3509
+ to->si_uid = from->si_uid;
3510
+ to->si_status = from->si_status;
3511
+#ifdef CONFIG_X86_X32_ABI
3512
+ if (in_x32_syscall()) {
3513
+ to->si_utime = from->_sifields._sigchld_x32._utime;
3514
+ to->si_stime = from->_sifields._sigchld_x32._stime;
3515
+ } else
3516
+#endif
3517
+ {
3518
+ to->si_utime = from->si_utime;
3519
+ to->si_stime = from->si_stime;
3520
+ }
3521
+ break;
3522
+ case SIL_RT:
3523
+ to->si_pid = from->si_pid;
3524
+ to->si_uid = from->si_uid;
3525
+ to->si_int = from->si_int;
3526
+ break;
3527
+ case SIL_SYS:
3528
+ to->si_call_addr = compat_ptr(from->si_call_addr);
3529
+ to->si_syscall = from->si_syscall;
3530
+ to->si_arch = from->si_arch;
3531
+ break;
3532
+ }
3533
+ return 0;
3534
+}
3535
+
3536
+static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3537
+ const struct compat_siginfo __user *ufrom)
3538
+{
3539
+ struct compat_siginfo from;
3540
+
3541
+ if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3542
+ return -EFAULT;
3543
+
3544
+ from.si_signo = signo;
3545
+ return post_copy_siginfo_from_user32(to, &from);
3546
+}
3547
+
3548
+int copy_siginfo_from_user32(struct kernel_siginfo *to,
31353549 const struct compat_siginfo __user *ufrom)
31363550 {
31373551 struct compat_siginfo from;
....@@ -3139,79 +3553,7 @@
31393553 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
31403554 return -EFAULT;
31413555
3142
- clear_siginfo(to);
3143
- to->si_signo = from.si_signo;
3144
- to->si_errno = from.si_errno;
3145
- to->si_code = from.si_code;
3146
- switch(siginfo_layout(from.si_signo, from.si_code)) {
3147
- case SIL_KILL:
3148
- to->si_pid = from.si_pid;
3149
- to->si_uid = from.si_uid;
3150
- break;
3151
- case SIL_TIMER:
3152
- to->si_tid = from.si_tid;
3153
- to->si_overrun = from.si_overrun;
3154
- to->si_int = from.si_int;
3155
- break;
3156
- case SIL_POLL:
3157
- to->si_band = from.si_band;
3158
- to->si_fd = from.si_fd;
3159
- break;
3160
- case SIL_FAULT:
3161
- to->si_addr = compat_ptr(from.si_addr);
3162
-#ifdef __ARCH_SI_TRAPNO
3163
- to->si_trapno = from.si_trapno;
3164
-#endif
3165
- break;
3166
- case SIL_FAULT_MCEERR:
3167
- to->si_addr = compat_ptr(from.si_addr);
3168
-#ifdef __ARCH_SI_TRAPNO
3169
- to->si_trapno = from.si_trapno;
3170
-#endif
3171
- to->si_addr_lsb = from.si_addr_lsb;
3172
- break;
3173
- case SIL_FAULT_BNDERR:
3174
- to->si_addr = compat_ptr(from.si_addr);
3175
-#ifdef __ARCH_SI_TRAPNO
3176
- to->si_trapno = from.si_trapno;
3177
-#endif
3178
- to->si_lower = compat_ptr(from.si_lower);
3179
- to->si_upper = compat_ptr(from.si_upper);
3180
- break;
3181
- case SIL_FAULT_PKUERR:
3182
- to->si_addr = compat_ptr(from.si_addr);
3183
-#ifdef __ARCH_SI_TRAPNO
3184
- to->si_trapno = from.si_trapno;
3185
-#endif
3186
- to->si_pkey = from.si_pkey;
3187
- break;
3188
- case SIL_CHLD:
3189
- to->si_pid = from.si_pid;
3190
- to->si_uid = from.si_uid;
3191
- to->si_status = from.si_status;
3192
-#ifdef CONFIG_X86_X32_ABI
3193
- if (in_x32_syscall()) {
3194
- to->si_utime = from._sifields._sigchld_x32._utime;
3195
- to->si_stime = from._sifields._sigchld_x32._stime;
3196
- } else
3197
-#endif
3198
- {
3199
- to->si_utime = from.si_utime;
3200
- to->si_stime = from.si_stime;
3201
- }
3202
- break;
3203
- case SIL_RT:
3204
- to->si_pid = from.si_pid;
3205
- to->si_uid = from.si_uid;
3206
- to->si_int = from.si_int;
3207
- break;
3208
- case SIL_SYS:
3209
- to->si_call_addr = compat_ptr(from.si_call_addr);
3210
- to->si_syscall = from.si_syscall;
3211
- to->si_arch = from.si_arch;
3212
- break;
3213
- }
3214
- return 0;
3556
+ return post_copy_siginfo_from_user32(to, &from);
32153557 }
32163558 #endif /* CONFIG_COMPAT */
32173559
....@@ -3221,8 +3563,8 @@
32213563 * @info: if non-null, the signal's siginfo is returned here
32223564 * @ts: upper bound on process time suspension
32233565 */
3224
-static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3225
- const struct timespec *ts)
3566
+static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3567
+ const struct timespec64 *ts)
32263568 {
32273569 ktime_t *to = NULL, timeout = KTIME_MAX;
32283570 struct task_struct *tsk = current;
....@@ -3230,9 +3572,9 @@
32303572 int sig, ret = 0;
32313573
32323574 if (ts) {
3233
- if (!timespec_valid(ts))
3575
+ if (!timespec64_valid(ts))
32343576 return -EINVAL;
3235
- timeout = timespec_to_ktime(*ts);
3577
+ timeout = timespec64_to_ktime(*ts);
32363578 to = &timeout;
32373579 }
32383580
....@@ -3280,12 +3622,13 @@
32803622 * @sigsetsize: size of sigset_t type
32813623 */
32823624 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3283
- siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3625
+ siginfo_t __user *, uinfo,
3626
+ const struct __kernel_timespec __user *, uts,
32843627 size_t, sigsetsize)
32853628 {
32863629 sigset_t these;
3287
- struct timespec ts;
3288
- siginfo_t info;
3630
+ struct timespec64 ts;
3631
+ kernel_siginfo_t info;
32893632 int ret;
32903633
32913634 /* XXX: Don't preclude handling different sized sigset_t's. */
....@@ -3296,7 +3639,7 @@
32963639 return -EFAULT;
32973640
32983641 if (uts) {
3299
- if (copy_from_user(&ts, uts, sizeof(ts)))
3642
+ if (get_timespec64(&ts, uts))
33003643 return -EFAULT;
33013644 }
33023645
....@@ -3310,14 +3653,47 @@
33103653 return ret;
33113654 }
33123655
3656
+#ifdef CONFIG_COMPAT_32BIT_TIME
3657
+SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3658
+ siginfo_t __user *, uinfo,
3659
+ const struct old_timespec32 __user *, uts,
3660
+ size_t, sigsetsize)
3661
+{
3662
+ sigset_t these;
3663
+ struct timespec64 ts;
3664
+ kernel_siginfo_t info;
3665
+ int ret;
3666
+
3667
+ if (sigsetsize != sizeof(sigset_t))
3668
+ return -EINVAL;
3669
+
3670
+ if (copy_from_user(&these, uthese, sizeof(these)))
3671
+ return -EFAULT;
3672
+
3673
+ if (uts) {
3674
+ if (get_old_timespec32(&ts, uts))
3675
+ return -EFAULT;
3676
+ }
3677
+
3678
+ ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3679
+
3680
+ if (ret > 0 && uinfo) {
3681
+ if (copy_siginfo_to_user(uinfo, &info))
3682
+ ret = -EFAULT;
3683
+ }
3684
+
3685
+ return ret;
3686
+}
3687
+#endif
3688
+
33133689 #ifdef CONFIG_COMPAT
3314
-COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3690
+COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
33153691 struct compat_siginfo __user *, uinfo,
3316
- struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3692
+ struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
33173693 {
33183694 sigset_t s;
3319
- struct timespec t;
3320
- siginfo_t info;
3695
+ struct timespec64 t;
3696
+ kernel_siginfo_t info;
33213697 long ret;
33223698
33233699 if (sigsetsize != sizeof(sigset_t))
....@@ -3327,7 +3703,38 @@
33273703 return -EFAULT;
33283704
33293705 if (uts) {
3330
- if (compat_get_timespec(&t, uts))
3706
+ if (get_timespec64(&t, uts))
3707
+ return -EFAULT;
3708
+ }
3709
+
3710
+ ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3711
+
3712
+ if (ret > 0 && uinfo) {
3713
+ if (copy_siginfo_to_user32(uinfo, &info))
3714
+ ret = -EFAULT;
3715
+ }
3716
+
3717
+ return ret;
3718
+}
3719
+
3720
+#ifdef CONFIG_COMPAT_32BIT_TIME
3721
+COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3722
+ struct compat_siginfo __user *, uinfo,
3723
+ struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3724
+{
3725
+ sigset_t s;
3726
+ struct timespec64 t;
3727
+ kernel_siginfo_t info;
3728
+ long ret;
3729
+
3730
+ if (sigsetsize != sizeof(sigset_t))
3731
+ return -EINVAL;
3732
+
3733
+ if (get_compat_sigset(&s, uthese))
3734
+ return -EFAULT;
3735
+
3736
+ if (uts) {
3737
+ if (get_old_timespec32(&t, uts))
33313738 return -EFAULT;
33323739 }
33333740
....@@ -3341,8 +3748,9 @@
33413748 return ret;
33423749 }
33433750 #endif
3751
+#endif
33443752
3345
-static inline void prepare_kill_siginfo(int sig, struct siginfo *info)
3753
+static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
33463754 {
33473755 clear_siginfo(info);
33483756 info->si_signo = sig;
....@@ -3359,7 +3767,7 @@
33593767 */
33603768 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
33613769 {
3362
- struct siginfo info;
3770
+ struct kernel_siginfo info;
33633771
33643772 prepare_kill_siginfo(sig, &info);
33653773
....@@ -3387,7 +3795,7 @@
33873795 return true;
33883796 }
33893797
3390
-static int copy_siginfo_from_user_any(siginfo_t *kinfo, siginfo_t __user *info)
3798
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
33913799 {
33923800 #ifdef CONFIG_COMPAT
33933801 /*
....@@ -3399,13 +3807,16 @@
33993807 return copy_siginfo_from_user32(
34003808 kinfo, (struct compat_siginfo __user *)info);
34013809 #endif
3402
- return copy_from_user(kinfo, info, sizeof(siginfo_t));
3810
+ return copy_siginfo_from_user(kinfo, info);
34033811 }
34043812
34053813 static struct pid *pidfd_to_pid(const struct file *file)
34063814 {
3407
- if (file->f_op == &pidfd_fops)
3408
- return file->private_data;
3815
+ struct pid *pid;
3816
+
3817
+ pid = pidfd_pid(file);
3818
+ if (!IS_ERR(pid))
3819
+ return pid;
34093820
34103821 return tgid_pidfd_to_pid(file);
34113822 }
....@@ -3434,7 +3845,7 @@
34343845 int ret;
34353846 struct fd f;
34363847 struct pid *pid;
3437
- siginfo_t kinfo;
3848
+ kernel_siginfo_t kinfo;
34383849
34393850 /* Enforce flags be set to 0 until we add an extension. */
34403851 if (flags)
....@@ -3481,7 +3892,7 @@
34813892 }
34823893
34833894 static int
3484
-do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3895
+do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
34853896 {
34863897 struct task_struct *p;
34873898 int error = -ESRCH;
....@@ -3512,7 +3923,7 @@
35123923
35133924 static int do_tkill(pid_t tgid, pid_t pid, int sig)
35143925 {
3515
- struct siginfo info;
3926
+ struct kernel_siginfo info;
35163927
35173928 clear_siginfo(&info);
35183929 info.si_signo = sig;
....@@ -3559,7 +3970,7 @@
35593970 return do_tkill(0, pid, sig);
35603971 }
35613972
3562
-static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3973
+static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
35633974 {
35643975 /* Not even root can pretend to send signals from the kernel.
35653976 * Nor can they impersonate a kill()/tgkill(), which adds source info.
....@@ -3567,8 +3978,6 @@
35673978 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
35683979 (task_pid_vnr(current) != pid))
35693980 return -EPERM;
3570
-
3571
- info->si_signo = sig;
35723981
35733982 /* POSIX.1b doesn't mention process groups. */
35743983 return kill_proc_info(sig, info, pid);
....@@ -3583,9 +3992,10 @@
35833992 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
35843993 siginfo_t __user *, uinfo)
35853994 {
3586
- siginfo_t info;
3587
- if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3588
- return -EFAULT;
3995
+ kernel_siginfo_t info;
3996
+ int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3997
+ if (unlikely(ret))
3998
+ return ret;
35893999 return do_rt_sigqueueinfo(pid, sig, &info);
35904000 }
35914001
....@@ -3595,15 +4005,15 @@
35954005 int, sig,
35964006 struct compat_siginfo __user *, uinfo)
35974007 {
3598
- siginfo_t info;
3599
- int ret = copy_siginfo_from_user32(&info, uinfo);
4008
+ kernel_siginfo_t info;
4009
+ int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
36004010 if (unlikely(ret))
36014011 return ret;
36024012 return do_rt_sigqueueinfo(pid, sig, &info);
36034013 }
36044014 #endif
36054015
3606
-static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
4016
+static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
36074017 {
36084018 /* This is only valid for single tasks */
36094019 if (pid <= 0 || tgid <= 0)
....@@ -3616,19 +4026,16 @@
36164026 (task_pid_vnr(current) != pid))
36174027 return -EPERM;
36184028
3619
- info->si_signo = sig;
3620
-
36214029 return do_send_specific(tgid, pid, sig, info);
36224030 }
36234031
36244032 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
36254033 siginfo_t __user *, uinfo)
36264034 {
3627
- siginfo_t info;
3628
-
3629
- if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3630
- return -EFAULT;
3631
-
4035
+ kernel_siginfo_t info;
4036
+ int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4037
+ if (unlikely(ret))
4038
+ return ret;
36324039 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
36334040 }
36344041
....@@ -3639,10 +4046,10 @@
36394046 int, sig,
36404047 struct compat_siginfo __user *, uinfo)
36414048 {
3642
- siginfo_t info;
3643
-
3644
- if (copy_siginfo_from_user32(&info, uinfo))
3645
- return -EFAULT;
4049
+ kernel_siginfo_t info;
4050
+ int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4051
+ if (unlikely(ret))
4052
+ return ret;
36464053 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
36474054 }
36484055 #endif
....@@ -3687,6 +4094,22 @@
36874094 spin_lock_irq(&p->sighand->siglock);
36884095 if (oact)
36894096 *oact = *k;
4097
+
4098
+ /*
4099
+ * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4100
+ * e.g. by having an architecture use the bit in their uapi.
4101
+ */
4102
+ BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4103
+
4104
+ /*
4105
+ * Clear unknown flag bits in order to allow userspace to detect missing
4106
+ * support for flag bits and to allow the kernel to use non-uapi bits
4107
+ * internally.
4108
+ */
4109
+ if (act)
4110
+ act->sa.sa_flags &= UAPI_SA_FLAGS;
4111
+ if (oact)
4112
+ oact->sa.sa_flags &= UAPI_SA_FLAGS;
36904113
36914114 sigaction_compat_abi(act, oact);
36924115
....@@ -4033,7 +4456,7 @@
40334456
40344457 if (act) {
40354458 old_sigset_t mask;
4036
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
4459
+ if (!access_ok(act, sizeof(*act)) ||
40374460 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
40384461 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
40394462 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
....@@ -4048,7 +4471,7 @@
40484471 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
40494472
40504473 if (!ret && oact) {
4051
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
4474
+ if (!access_ok(oact, sizeof(*oact)) ||
40524475 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
40534476 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
40544477 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
....@@ -4070,7 +4493,7 @@
40704493 compat_uptr_t handler, restorer;
40714494
40724495 if (act) {
4073
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
4496
+ if (!access_ok(act, sizeof(*act)) ||
40744497 __get_user(handler, &act->sa_handler) ||
40754498 __get_user(restorer, &act->sa_restorer) ||
40764499 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
....@@ -4088,7 +4511,7 @@
40884511 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
40894512
40904513 if (!ret && oact) {
4091
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
4514
+ if (!access_ok(oact, sizeof(*oact)) ||
40924515 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
40934516 &oact->sa_handler) ||
40944517 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
....@@ -4225,12 +4648,78 @@
42254648 return NULL;
42264649 }
42274650
4651
+static inline void siginfo_buildtime_checks(void)
4652
+{
4653
+ BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4654
+
4655
+ /* Verify the offsets in the two siginfos match */
4656
+#define CHECK_OFFSET(field) \
4657
+ BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4658
+
4659
+ /* kill */
4660
+ CHECK_OFFSET(si_pid);
4661
+ CHECK_OFFSET(si_uid);
4662
+
4663
+ /* timer */
4664
+ CHECK_OFFSET(si_tid);
4665
+ CHECK_OFFSET(si_overrun);
4666
+ CHECK_OFFSET(si_value);
4667
+
4668
+ /* rt */
4669
+ CHECK_OFFSET(si_pid);
4670
+ CHECK_OFFSET(si_uid);
4671
+ CHECK_OFFSET(si_value);
4672
+
4673
+ /* sigchld */
4674
+ CHECK_OFFSET(si_pid);
4675
+ CHECK_OFFSET(si_uid);
4676
+ CHECK_OFFSET(si_status);
4677
+ CHECK_OFFSET(si_utime);
4678
+ CHECK_OFFSET(si_stime);
4679
+
4680
+ /* sigfault */
4681
+ CHECK_OFFSET(si_addr);
4682
+ CHECK_OFFSET(si_addr_lsb);
4683
+ CHECK_OFFSET(si_lower);
4684
+ CHECK_OFFSET(si_upper);
4685
+ CHECK_OFFSET(si_pkey);
4686
+
4687
+ /* sigpoll */
4688
+ CHECK_OFFSET(si_band);
4689
+ CHECK_OFFSET(si_fd);
4690
+
4691
+ /* sigsys */
4692
+ CHECK_OFFSET(si_call_addr);
4693
+ CHECK_OFFSET(si_syscall);
4694
+ CHECK_OFFSET(si_arch);
4695
+#undef CHECK_OFFSET
4696
+
4697
+ /* usb asyncio */
4698
+ BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4699
+ offsetof(struct siginfo, si_addr));
4700
+ if (sizeof(int) == sizeof(void __user *)) {
4701
+ BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4702
+ sizeof(void __user *));
4703
+ } else {
4704
+ BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4705
+ sizeof_field(struct siginfo, si_uid)) !=
4706
+ sizeof(void __user *));
4707
+ BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4708
+ offsetof(struct siginfo, si_uid));
4709
+ }
4710
+#ifdef CONFIG_COMPAT
4711
+ BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4712
+ offsetof(struct compat_siginfo, si_addr));
4713
+ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4714
+ sizeof(compat_uptr_t));
4715
+ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4716
+ sizeof_field(struct siginfo, si_pid));
4717
+#endif
4718
+}
4719
+
42284720 void __init signals_init(void)
42294721 {
4230
- /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
4231
- BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
4232
- != offsetof(struct siginfo, _sifields._pad));
4233
- BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4722
+ siginfo_buildtime_checks();
42344723
42354724 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
42364725 }