commit | author | age
|
a07526
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
H |
2 |
/* |
|
3 |
* Based on arch/arm/kernel/signal.c |
|
4 |
* |
|
5 |
* Copyright (C) 1995-2009 Russell King |
|
6 |
* Copyright (C) 2012 ARM Ltd. |
|
7 |
*/ |
|
8 |
|
|
9 |
#include <linux/cache.h> |
|
10 |
#include <linux/compat.h> |
|
11 |
#include <linux/errno.h> |
|
12 |
#include <linux/kernel.h> |
|
13 |
#include <linux/signal.h> |
2f529f
|
14 |
#include <linux/irq_pipeline.h> |
a07526
|
15 |
#include <linux/personality.h> |
H |
16 |
#include <linux/freezer.h> |
|
17 |
#include <linux/stddef.h> |
|
18 |
#include <linux/uaccess.h> |
|
19 |
#include <linux/sizes.h> |
|
20 |
#include <linux/string.h> |
|
21 |
#include <linux/tracehook.h> |
|
22 |
#include <linux/ratelimit.h> |
|
23 |
#include <linux/syscalls.h> |
|
24 |
|
|
25 |
#include <asm/daifflags.h> |
|
26 |
#include <asm/debug-monitors.h> |
|
27 |
#include <asm/elf.h> |
|
28 |
#include <asm/cacheflush.h> |
|
29 |
#include <asm/ucontext.h> |
|
30 |
#include <asm/unistd.h> |
|
31 |
#include <asm/fpsimd.h> |
|
32 |
#include <asm/ptrace.h> |
|
33 |
#include <asm/syscall.h> |
|
34 |
#include <asm/signal32.h> |
|
35 |
#include <asm/traps.h> |
|
36 |
#include <asm/vdso.h> |
|
37 |
|
|
38 |
/* |
|
39 |
* Do a signal return; undo the signal stack. These are aligned to 128-bit. |
|
40 |
*/ |
|
41 |
struct rt_sigframe { |
|
42 |
struct siginfo info; |
|
43 |
struct ucontext uc; |
|
44 |
}; |
|
45 |
|
|
46 |
struct frame_record { |
|
47 |
u64 fp; |
|
48 |
u64 lr; |
|
49 |
}; |
|
50 |
|
|
51 |
struct rt_sigframe_user_layout { |
|
52 |
struct rt_sigframe __user *sigframe; |
|
53 |
struct frame_record __user *next_frame; |
|
54 |
|
|
55 |
unsigned long size; /* size of allocated sigframe data */ |
|
56 |
unsigned long limit; /* largest allowed size */ |
|
57 |
|
|
58 |
unsigned long fpsimd_offset; |
|
59 |
unsigned long esr_offset; |
|
60 |
unsigned long sve_offset; |
|
61 |
unsigned long extra_offset; |
|
62 |
unsigned long end_offset; |
|
63 |
}; |
|
64 |
|
|
65 |
#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) |
|
66 |
#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) |
|
67 |
#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) |
|
68 |
|
|
69 |
static void init_user_layout(struct rt_sigframe_user_layout *user) |
|
70 |
{ |
|
71 |
const size_t reserved_size = |
|
72 |
sizeof(user->sigframe->uc.uc_mcontext.__reserved); |
|
73 |
|
|
74 |
memset(user, 0, sizeof(*user)); |
|
75 |
user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); |
|
76 |
|
|
77 |
user->limit = user->size + reserved_size; |
|
78 |
|
|
79 |
user->limit -= TERMINATOR_SIZE; |
|
80 |
user->limit -= EXTRA_CONTEXT_SIZE; |
|
81 |
/* Reserve space for extension and terminator ^ */ |
|
82 |
} |
|
83 |
|
|
84 |
static size_t sigframe_size(struct rt_sigframe_user_layout const *user) |
|
85 |
{ |
|
86 |
return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); |
|
87 |
} |
|
88 |
|
|
89 |
/* |
|
90 |
* Sanity limit on the approximate maximum size of signal frame we'll |
|
91 |
* try to generate. Stack alignment padding and the frame record are |
|
92 |
* not taken into account. This limit is not a guarantee and is |
|
93 |
* NOT ABI. |
|
94 |
*/ |
|
95 |
#define SIGFRAME_MAXSZ SZ_64K |
|
96 |
|
|
97 |
static int __sigframe_alloc(struct rt_sigframe_user_layout *user, |
|
98 |
unsigned long *offset, size_t size, bool extend) |
|
99 |
{ |
|
100 |
size_t padded_size = round_up(size, 16); |
|
101 |
|
|
102 |
if (padded_size > user->limit - user->size && |
|
103 |
!user->extra_offset && |
|
104 |
extend) { |
|
105 |
int ret; |
|
106 |
|
|
107 |
user->limit += EXTRA_CONTEXT_SIZE; |
|
108 |
ret = __sigframe_alloc(user, &user->extra_offset, |
|
109 |
sizeof(struct extra_context), false); |
|
110 |
if (ret) { |
|
111 |
user->limit -= EXTRA_CONTEXT_SIZE; |
|
112 |
return ret; |
|
113 |
} |
|
114 |
|
|
115 |
/* Reserve space for the __reserved[] terminator */ |
|
116 |
user->size += TERMINATOR_SIZE; |
|
117 |
|
|
118 |
/* |
|
119 |
* Allow expansion up to SIGFRAME_MAXSZ, ensuring space for |
|
120 |
* the terminator: |
|
121 |
*/ |
|
122 |
user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; |
|
123 |
} |
|
124 |
|
|
125 |
/* Still not enough space? Bad luck! */ |
|
126 |
if (padded_size > user->limit - user->size) |
|
127 |
return -ENOMEM; |
|
128 |
|
|
129 |
*offset = user->size; |
|
130 |
user->size += padded_size; |
|
131 |
|
|
132 |
return 0; |
|
133 |
} |
|
134 |
|
|
135 |
/* |
|
136 |
* Allocate space for an optional record of <size> bytes in the user |
|
137 |
* signal frame. The offset from the signal frame base address to the |
|
138 |
* allocated block is assigned to *offset. |
|
139 |
*/ |
|
140 |
static int sigframe_alloc(struct rt_sigframe_user_layout *user, |
|
141 |
unsigned long *offset, size_t size) |
|
142 |
{ |
|
143 |
return __sigframe_alloc(user, offset, size, true); |
|
144 |
} |
|
145 |
|
|
146 |
/* Allocate the null terminator record and prevent further allocations */ |
|
147 |
static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) |
|
148 |
{ |
|
149 |
int ret; |
|
150 |
|
|
151 |
/* Un-reserve the space reserved for the terminator: */ |
|
152 |
user->limit += TERMINATOR_SIZE; |
|
153 |
|
|
154 |
ret = sigframe_alloc(user, &user->end_offset, |
|
155 |
sizeof(struct _aarch64_ctx)); |
|
156 |
if (ret) |
|
157 |
return ret; |
|
158 |
|
|
159 |
/* Prevent further allocation: */ |
|
160 |
user->limit = user->size; |
|
161 |
return 0; |
|
162 |
} |
|
163 |
|
|
164 |
static void __user *apply_user_offset( |
|
165 |
struct rt_sigframe_user_layout const *user, unsigned long offset) |
|
166 |
{ |
|
167 |
char __user *base = (char __user *)user->sigframe; |
|
168 |
|
|
169 |
return base + offset; |
|
170 |
} |
|
171 |
|
|
172 |
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) |
|
173 |
{ |
|
174 |
struct user_fpsimd_state const *fpsimd = |
|
175 |
¤t->thread.uw.fpsimd_state; |
|
176 |
int err; |
|
177 |
|
|
178 |
/* copy the FP and status/control registers */ |
|
179 |
err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); |
|
180 |
__put_user_error(fpsimd->fpsr, &ctx->fpsr, err); |
|
181 |
__put_user_error(fpsimd->fpcr, &ctx->fpcr, err); |
|
182 |
|
|
183 |
/* copy the magic/size information */ |
|
184 |
__put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); |
|
185 |
__put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); |
|
186 |
|
|
187 |
return err ? -EFAULT : 0; |
|
188 |
} |
|
189 |
|
|
190 |
static int restore_fpsimd_context(struct fpsimd_context __user *ctx) |
|
191 |
{ |
|
192 |
struct user_fpsimd_state fpsimd; |
|
193 |
__u32 magic, size; |
|
194 |
int err = 0; |
|
195 |
|
|
196 |
/* check the magic/size information */ |
|
197 |
__get_user_error(magic, &ctx->head.magic, err); |
|
198 |
__get_user_error(size, &ctx->head.size, err); |
|
199 |
if (err) |
|
200 |
return -EFAULT; |
|
201 |
if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) |
|
202 |
return -EINVAL; |
|
203 |
|
|
204 |
/* copy the FP and status/control registers */ |
|
205 |
err = __copy_from_user(fpsimd.vregs, ctx->vregs, |
|
206 |
sizeof(fpsimd.vregs)); |
|
207 |
__get_user_error(fpsimd.fpsr, &ctx->fpsr, err); |
|
208 |
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err); |
|
209 |
|
|
210 |
clear_thread_flag(TIF_SVE); |
|
211 |
|
|
212 |
/* load the hardware registers from the fpsimd_state structure */ |
|
213 |
if (!err) |
|
214 |
fpsimd_update_current_state(&fpsimd); |
|
215 |
|
|
216 |
return err ? -EFAULT : 0; |
|
217 |
} |
|
218 |
|
|
219 |
|
|
220 |
struct user_ctxs { |
|
221 |
struct fpsimd_context __user *fpsimd; |
|
222 |
struct sve_context __user *sve; |
|
223 |
}; |
|
224 |
|
|
225 |
#ifdef CONFIG_ARM64_SVE |
|
226 |
|
|
227 |
static int preserve_sve_context(struct sve_context __user *ctx) |
|
228 |
{ |
|
229 |
int err = 0; |
|
230 |
u16 reserved[ARRAY_SIZE(ctx->__reserved)]; |
|
231 |
unsigned int vl = current->thread.sve_vl; |
|
232 |
unsigned int vq = 0; |
|
233 |
|
|
234 |
if (test_thread_flag(TIF_SVE)) |
|
235 |
vq = sve_vq_from_vl(vl); |
|
236 |
|
|
237 |
memset(reserved, 0, sizeof(reserved)); |
|
238 |
|
|
239 |
__put_user_error(SVE_MAGIC, &ctx->head.magic, err); |
|
240 |
__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), |
|
241 |
&ctx->head.size, err); |
|
242 |
__put_user_error(vl, &ctx->vl, err); |
|
243 |
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); |
|
244 |
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); |
|
245 |
|
|
246 |
if (vq) { |
|
247 |
/* |
|
248 |
* This assumes that the SVE state has already been saved to |
|
249 |
* the task struct by calling the function |
|
250 |
* fpsimd_signal_preserve_current_state(). |
|
251 |
*/ |
|
252 |
err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, |
|
253 |
current->thread.sve_state, |
|
254 |
SVE_SIG_REGS_SIZE(vq)); |
|
255 |
} |
|
256 |
|
|
257 |
return err ? -EFAULT : 0; |
|
258 |
} |
|
259 |
|
|
260 |
static int restore_sve_fpsimd_context(struct user_ctxs *user) |
|
261 |
{ |
|
262 |
int err; |
|
263 |
unsigned int vq; |
|
264 |
struct user_fpsimd_state fpsimd; |
|
265 |
struct sve_context sve; |
|
266 |
|
|
267 |
if (__copy_from_user(&sve, user->sve, sizeof(sve))) |
|
268 |
return -EFAULT; |
|
269 |
|
|
270 |
if (sve.vl != current->thread.sve_vl) |
|
271 |
return -EINVAL; |
|
272 |
|
|
273 |
if (sve.head.size <= sizeof(*user->sve)) { |
|
274 |
clear_thread_flag(TIF_SVE); |
|
275 |
goto fpsimd_only; |
|
276 |
} |
|
277 |
|
|
278 |
vq = sve_vq_from_vl(sve.vl); |
|
279 |
|
|
280 |
if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq)) |
|
281 |
return -EINVAL; |
|
282 |
|
|
283 |
/* |
|
284 |
* Careful: we are about __copy_from_user() directly into |
|
285 |
* thread.sve_state with preemption enabled, so protection is |
|
286 |
* needed to prevent a racing context switch from writing stale |
|
287 |
* registers back over the new data. |
|
288 |
*/ |
|
289 |
|
|
290 |
fpsimd_flush_task_state(current); |
|
291 |
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */ |
|
292 |
|
|
293 |
sve_alloc(current); |
|
294 |
err = __copy_from_user(current->thread.sve_state, |
|
295 |
(char __user const *)user->sve + |
|
296 |
SVE_SIG_REGS_OFFSET, |
|
297 |
SVE_SIG_REGS_SIZE(vq)); |
|
298 |
if (err) |
|
299 |
return -EFAULT; |
|
300 |
|
|
301 |
set_thread_flag(TIF_SVE); |
|
302 |
|
|
303 |
fpsimd_only: |
|
304 |
/* copy the FP and status/control registers */ |
|
305 |
/* restore_sigframe() already checked that user->fpsimd != NULL. */ |
|
306 |
err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, |
|
307 |
sizeof(fpsimd.vregs)); |
|
308 |
__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); |
|
309 |
__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); |
|
310 |
|
|
311 |
/* load the hardware registers from the fpsimd_state structure */ |
|
312 |
if (!err) |
|
313 |
fpsimd_update_current_state(&fpsimd); |
|
314 |
|
|
315 |
return err ? -EFAULT : 0; |
|
316 |
} |
|
317 |
|
|
318 |
#else /* ! CONFIG_ARM64_SVE */ |
|
319 |
|
|
320 |
/* Turn any non-optimised out attempts to use these into a link error: */ |
|
321 |
extern int preserve_sve_context(void __user *ctx); |
|
322 |
extern int restore_sve_fpsimd_context(struct user_ctxs *user); |
|
323 |
|
|
324 |
#endif /* ! CONFIG_ARM64_SVE */ |
|
325 |
|
|
326 |
|
|
327 |
static int parse_user_sigframe(struct user_ctxs *user, |
|
328 |
struct rt_sigframe __user *sf) |
|
329 |
{ |
|
330 |
struct sigcontext __user *const sc = &sf->uc.uc_mcontext; |
|
331 |
struct _aarch64_ctx __user *head; |
|
332 |
char __user *base = (char __user *)&sc->__reserved; |
|
333 |
size_t offset = 0; |
|
334 |
size_t limit = sizeof(sc->__reserved); |
|
335 |
bool have_extra_context = false; |
|
336 |
char const __user *const sfp = (char const __user *)sf; |
|
337 |
|
|
338 |
user->fpsimd = NULL; |
|
339 |
user->sve = NULL; |
|
340 |
|
|
341 |
if (!IS_ALIGNED((unsigned long)base, 16)) |
|
342 |
goto invalid; |
|
343 |
|
|
344 |
while (1) { |
|
345 |
int err = 0; |
|
346 |
u32 magic, size; |
|
347 |
char const __user *userp; |
|
348 |
struct extra_context const __user *extra; |
|
349 |
u64 extra_datap; |
|
350 |
u32 extra_size; |
|
351 |
struct _aarch64_ctx const __user *end; |
|
352 |
u32 end_magic, end_size; |
|
353 |
|
|
354 |
if (limit - offset < sizeof(*head)) |
|
355 |
goto invalid; |
|
356 |
|
|
357 |
if (!IS_ALIGNED(offset, 16)) |
|
358 |
goto invalid; |
|
359 |
|
|
360 |
head = (struct _aarch64_ctx __user *)(base + offset); |
|
361 |
__get_user_error(magic, &head->magic, err); |
|
362 |
__get_user_error(size, &head->size, err); |
|
363 |
if (err) |
|
364 |
return err; |
|
365 |
|
|
366 |
if (limit - offset < size) |
|
367 |
goto invalid; |
|
368 |
|
|
369 |
switch (magic) { |
|
370 |
case 0: |
|
371 |
if (size) |
|
372 |
goto invalid; |
|
373 |
|
|
374 |
goto done; |
|
375 |
|
|
376 |
case FPSIMD_MAGIC: |
|
377 |
if (!system_supports_fpsimd()) |
|
378 |
goto invalid; |
|
379 |
if (user->fpsimd) |
|
380 |
goto invalid; |
|
381 |
|
|
382 |
if (size < sizeof(*user->fpsimd)) |
|
383 |
goto invalid; |
|
384 |
|
|
385 |
user->fpsimd = (struct fpsimd_context __user *)head; |
|
386 |
break; |
|
387 |
|
|
388 |
case ESR_MAGIC: |
|
389 |
/* ignore */ |
|
390 |
break; |
|
391 |
|
|
392 |
case SVE_MAGIC: |
|
393 |
if (!system_supports_sve()) |
|
394 |
goto invalid; |
|
395 |
|
|
396 |
if (user->sve) |
|
397 |
goto invalid; |
|
398 |
|
|
399 |
if (size < sizeof(*user->sve)) |
|
400 |
goto invalid; |
|
401 |
|
|
402 |
user->sve = (struct sve_context __user *)head; |
|
403 |
break; |
|
404 |
|
|
405 |
case EXTRA_MAGIC: |
|
406 |
if (have_extra_context) |
|
407 |
goto invalid; |
|
408 |
|
|
409 |
if (size < sizeof(*extra)) |
|
410 |
goto invalid; |
|
411 |
|
|
412 |
userp = (char const __user *)head; |
|
413 |
|
|
414 |
extra = (struct extra_context const __user *)userp; |
|
415 |
userp += size; |
|
416 |
|
|
417 |
__get_user_error(extra_datap, &extra->datap, err); |
|
418 |
__get_user_error(extra_size, &extra->size, err); |
|
419 |
if (err) |
|
420 |
return err; |
|
421 |
|
|
422 |
/* Check for the dummy terminator in __reserved[]: */ |
|
423 |
|
|
424 |
if (limit - offset - size < TERMINATOR_SIZE) |
|
425 |
goto invalid; |
|
426 |
|
|
427 |
end = (struct _aarch64_ctx const __user *)userp; |
|
428 |
userp += TERMINATOR_SIZE; |
|
429 |
|
|
430 |
__get_user_error(end_magic, &end->magic, err); |
|
431 |
__get_user_error(end_size, &end->size, err); |
|
432 |
if (err) |
|
433 |
return err; |
|
434 |
|
|
435 |
if (end_magic || end_size) |
|
436 |
goto invalid; |
|
437 |
|
|
438 |
/* Prevent looping/repeated parsing of extra_context */ |
|
439 |
have_extra_context = true; |
|
440 |
|
|
441 |
base = (__force void __user *)extra_datap; |
|
442 |
if (!IS_ALIGNED((unsigned long)base, 16)) |
|
443 |
goto invalid; |
|
444 |
|
|
445 |
if (!IS_ALIGNED(extra_size, 16)) |
|
446 |
goto invalid; |
|
447 |
|
|
448 |
if (base != userp) |
|
449 |
goto invalid; |
|
450 |
|
|
451 |
/* Reject "unreasonably large" frames: */ |
|
452 |
if (extra_size > sfp + SIGFRAME_MAXSZ - userp) |
|
453 |
goto invalid; |
|
454 |
|
|
455 |
/* |
|
456 |
* Ignore trailing terminator in __reserved[] |
|
457 |
* and start parsing extra data: |
|
458 |
*/ |
|
459 |
offset = 0; |
|
460 |
limit = extra_size; |
|
461 |
|
|
462 |
if (!access_ok(base, limit)) |
|
463 |
goto invalid; |
|
464 |
|
|
465 |
continue; |
|
466 |
|
|
467 |
default: |
|
468 |
goto invalid; |
|
469 |
} |
|
470 |
|
|
471 |
if (size < sizeof(*head)) |
|
472 |
goto invalid; |
|
473 |
|
|
474 |
if (limit - offset < size) |
|
475 |
goto invalid; |
|
476 |
|
|
477 |
offset += size; |
|
478 |
} |
|
479 |
|
|
480 |
done: |
|
481 |
return 0; |
|
482 |
|
|
483 |
invalid: |
|
484 |
return -EINVAL; |
|
485 |
} |
|
486 |
|
|
487 |
static int restore_sigframe(struct pt_regs *regs, |
|
488 |
struct rt_sigframe __user *sf) |
|
489 |
{ |
|
490 |
sigset_t set; |
|
491 |
int i, err; |
|
492 |
struct user_ctxs user; |
|
493 |
|
|
494 |
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); |
|
495 |
if (err == 0) |
|
496 |
set_current_blocked(&set); |
|
497 |
|
|
498 |
for (i = 0; i < 31; i++) |
|
499 |
__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], |
|
500 |
err); |
|
501 |
__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); |
|
502 |
__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); |
|
503 |
__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); |
|
504 |
|
|
505 |
/* |
|
506 |
* Avoid sys_rt_sigreturn() restarting. |
|
507 |
*/ |
|
508 |
forget_syscall(regs); |
|
509 |
|
|
510 |
err |= !valid_user_regs(®s->user_regs, current); |
|
511 |
if (err == 0) |
|
512 |
err = parse_user_sigframe(&user, sf); |
|
513 |
|
|
514 |
if (err == 0 && system_supports_fpsimd()) { |
|
515 |
if (!user.fpsimd) |
|
516 |
return -EINVAL; |
|
517 |
|
|
518 |
if (user.sve) { |
|
519 |
if (!system_supports_sve()) |
|
520 |
return -EINVAL; |
|
521 |
|
|
522 |
err = restore_sve_fpsimd_context(&user); |
|
523 |
} else { |
|
524 |
err = restore_fpsimd_context(user.fpsimd); |
|
525 |
} |
|
526 |
} |
|
527 |
|
|
528 |
return err; |
|
529 |
} |
|
530 |
|
|
531 |
SYSCALL_DEFINE0(rt_sigreturn) |
|
532 |
{ |
|
533 |
struct pt_regs *regs = current_pt_regs(); |
|
534 |
struct rt_sigframe __user *frame; |
|
535 |
|
|
536 |
/* Always make any pending restarted system calls return -EINTR */ |
|
537 |
current->restart_block.fn = do_no_restart_syscall; |
|
538 |
|
|
539 |
/* |
|
540 |
* Since we stacked the signal on a 128-bit boundary, then 'sp' should |
|
541 |
* be word aligned here. |
|
542 |
*/ |
|
543 |
if (regs->sp & 15) |
|
544 |
goto badframe; |
|
545 |
|
|
546 |
frame = (struct rt_sigframe __user *)regs->sp; |
|
547 |
|
|
548 |
if (!access_ok(frame, sizeof (*frame))) |
|
549 |
goto badframe; |
|
550 |
|
|
551 |
if (restore_sigframe(regs, frame)) |
|
552 |
goto badframe; |
|
553 |
|
|
554 |
if (restore_altstack(&frame->uc.uc_stack)) |
|
555 |
goto badframe; |
|
556 |
|
|
557 |
return regs->regs[0]; |
|
558 |
|
|
559 |
badframe: |
|
560 |
arm64_notify_segfault(regs->sp); |
|
561 |
return 0; |
|
562 |
} |
|
563 |
|
|
564 |
/* |
|
565 |
* Determine the layout of optional records in the signal frame |
|
566 |
* |
|
567 |
* add_all: if true, lays out the biggest possible signal frame for |
|
568 |
* this task; otherwise, generates a layout for the current state |
|
569 |
* of the task. |
|
570 |
*/ |
|
571 |
static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, |
|
572 |
bool add_all) |
|
573 |
{ |
|
574 |
int err; |
|
575 |
|
|
576 |
if (system_supports_fpsimd()) { |
|
577 |
err = sigframe_alloc(user, &user->fpsimd_offset, |
|
578 |
sizeof(struct fpsimd_context)); |
|
579 |
if (err) |
|
580 |
return err; |
|
581 |
} |
|
582 |
|
|
583 |
/* fault information, if valid */ |
|
584 |
if (add_all || current->thread.fault_code) { |
|
585 |
err = sigframe_alloc(user, &user->esr_offset, |
|
586 |
sizeof(struct esr_context)); |
|
587 |
if (err) |
|
588 |
return err; |
|
589 |
} |
|
590 |
|
|
591 |
if (system_supports_sve()) { |
|
592 |
unsigned int vq = 0; |
|
593 |
|
|
594 |
if (add_all || test_thread_flag(TIF_SVE)) { |
|
595 |
int vl = sve_max_vl; |
|
596 |
|
|
597 |
if (!add_all) |
|
598 |
vl = current->thread.sve_vl; |
|
599 |
|
|
600 |
vq = sve_vq_from_vl(vl); |
|
601 |
} |
|
602 |
|
|
603 |
err = sigframe_alloc(user, &user->sve_offset, |
|
604 |
SVE_SIG_CONTEXT_SIZE(vq)); |
|
605 |
if (err) |
|
606 |
return err; |
|
607 |
} |
|
608 |
|
|
609 |
return sigframe_alloc_end(user); |
|
610 |
} |
|
611 |
|
|
612 |
static int setup_sigframe(struct rt_sigframe_user_layout *user, |
|
613 |
struct pt_regs *regs, sigset_t *set) |
|
614 |
{ |
|
615 |
int i, err = 0; |
|
616 |
struct rt_sigframe __user *sf = user->sigframe; |
|
617 |
|
|
618 |
/* set up the stack frame for unwinding */ |
|
619 |
__put_user_error(regs->regs[29], &user->next_frame->fp, err); |
|
620 |
__put_user_error(regs->regs[30], &user->next_frame->lr, err); |
|
621 |
|
|
622 |
for (i = 0; i < 31; i++) |
|
623 |
__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], |
|
624 |
err); |
|
625 |
__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); |
|
626 |
__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); |
|
627 |
__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); |
|
628 |
|
|
629 |
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); |
|
630 |
|
|
631 |
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); |
|
632 |
|
|
633 |
if (err == 0 && system_supports_fpsimd()) { |
|
634 |
struct fpsimd_context __user *fpsimd_ctx = |
|
635 |
apply_user_offset(user, user->fpsimd_offset); |
|
636 |
err |= preserve_fpsimd_context(fpsimd_ctx); |
|
637 |
} |
|
638 |
|
|
639 |
/* fault information, if valid */ |
|
640 |
if (err == 0 && user->esr_offset) { |
|
641 |
struct esr_context __user *esr_ctx = |
|
642 |
apply_user_offset(user, user->esr_offset); |
|
643 |
|
|
644 |
__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); |
|
645 |
__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); |
|
646 |
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err); |
|
647 |
} |
|
648 |
|
|
649 |
/* Scalable Vector Extension state, if present */ |
|
650 |
if (system_supports_sve() && err == 0 && user->sve_offset) { |
|
651 |
struct sve_context __user *sve_ctx = |
|
652 |
apply_user_offset(user, user->sve_offset); |
|
653 |
err |= preserve_sve_context(sve_ctx); |
|
654 |
} |
|
655 |
|
|
656 |
if (err == 0 && user->extra_offset) { |
|
657 |
char __user *sfp = (char __user *)user->sigframe; |
|
658 |
char __user *userp = |
|
659 |
apply_user_offset(user, user->extra_offset); |
|
660 |
|
|
661 |
struct extra_context __user *extra; |
|
662 |
struct _aarch64_ctx __user *end; |
|
663 |
u64 extra_datap; |
|
664 |
u32 extra_size; |
|
665 |
|
|
666 |
extra = (struct extra_context __user *)userp; |
|
667 |
userp += EXTRA_CONTEXT_SIZE; |
|
668 |
|
|
669 |
end = (struct _aarch64_ctx __user *)userp; |
|
670 |
userp += TERMINATOR_SIZE; |
|
671 |
|
|
672 |
/* |
|
673 |
* extra_datap is just written to the signal frame. |
|
674 |
* The value gets cast back to a void __user * |
|
675 |
* during sigreturn. |
|
676 |
*/ |
|
677 |
extra_datap = (__force u64)userp; |
|
678 |
extra_size = sfp + round_up(user->size, 16) - userp; |
|
679 |
|
|
680 |
__put_user_error(EXTRA_MAGIC, &extra->head.magic, err); |
|
681 |
__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); |
|
682 |
__put_user_error(extra_datap, &extra->datap, err); |
|
683 |
__put_user_error(extra_size, &extra->size, err); |
|
684 |
|
|
685 |
/* Add the terminator */ |
|
686 |
__put_user_error(0, &end->magic, err); |
|
687 |
__put_user_error(0, &end->size, err); |
|
688 |
} |
|
689 |
|
|
690 |
/* set the "end" magic */ |
|
691 |
if (err == 0) { |
|
692 |
struct _aarch64_ctx __user *end = |
|
693 |
apply_user_offset(user, user->end_offset); |
|
694 |
|
|
695 |
__put_user_error(0, &end->magic, err); |
|
696 |
__put_user_error(0, &end->size, err); |
|
697 |
} |
|
698 |
|
|
699 |
return err; |
|
700 |
} |
|
701 |
|
|
702 |
static int get_sigframe(struct rt_sigframe_user_layout *user, |
|
703 |
struct ksignal *ksig, struct pt_regs *regs) |
|
704 |
{ |
|
705 |
unsigned long sp, sp_top; |
|
706 |
int err; |
|
707 |
|
|
708 |
init_user_layout(user); |
|
709 |
err = setup_sigframe_layout(user, false); |
|
710 |
if (err) |
|
711 |
return err; |
|
712 |
|
|
713 |
sp = sp_top = sigsp(regs->sp, ksig); |
|
714 |
|
|
715 |
sp = round_down(sp - sizeof(struct frame_record), 16); |
|
716 |
user->next_frame = (struct frame_record __user *)sp; |
|
717 |
|
|
718 |
sp = round_down(sp, 16) - sigframe_size(user); |
|
719 |
user->sigframe = (struct rt_sigframe __user *)sp; |
|
720 |
|
|
721 |
/* |
|
722 |
* Check that we can actually write to the signal frame. |
|
723 |
*/ |
|
724 |
if (!access_ok(user->sigframe, sp_top - sp)) |
|
725 |
return -EFAULT; |
|
726 |
|
|
727 |
return 0; |
|
728 |
} |
|
729 |
|
|
730 |
static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, |
|
731 |
struct rt_sigframe_user_layout *user, int usig) |
|
732 |
{ |
|
733 |
__sigrestore_t sigtramp; |
|
734 |
|
|
735 |
regs->regs[0] = usig; |
|
736 |
regs->sp = (unsigned long)user->sigframe; |
|
737 |
regs->regs[29] = (unsigned long)&user->next_frame->fp; |
|
738 |
regs->pc = (unsigned long)ka->sa.sa_handler; |
|
739 |
|
|
740 |
/* |
|
741 |
* Signal delivery is a (wacky) indirect function call in |
|
742 |
* userspace, so simulate the same setting of BTYPE as a BLR |
|
743 |
* <register containing the signal handler entry point>. |
|
744 |
* Signal delivery to a location in a PROT_BTI guarded page |
|
745 |
* that is not a function entry point will now trigger a |
|
746 |
* SIGILL in userspace. |
|
747 |
* |
|
748 |
* If the signal handler entry point is not in a PROT_BTI |
|
749 |
* guarded page, this is harmless. |
|
750 |
*/ |
|
751 |
if (system_supports_bti()) { |
|
752 |
regs->pstate &= ~PSR_BTYPE_MASK; |
|
753 |
regs->pstate |= PSR_BTYPE_C; |
|
754 |
} |
|
755 |
|
|
756 |
/* TCO (Tag Check Override) always cleared for signal handlers */ |
|
757 |
regs->pstate &= ~PSR_TCO_BIT; |
|
758 |
|
|
759 |
if (ka->sa.sa_flags & SA_RESTORER) |
|
760 |
sigtramp = ka->sa.sa_restorer; |
|
761 |
else |
|
762 |
sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); |
|
763 |
|
|
764 |
regs->regs[30] = (unsigned long)sigtramp; |
|
765 |
} |
|
766 |
|
|
767 |
static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, |
|
768 |
struct pt_regs *regs) |
|
769 |
{ |
|
770 |
struct rt_sigframe_user_layout user; |
|
771 |
struct rt_sigframe __user *frame; |
|
772 |
int err = 0; |
|
773 |
|
|
774 |
fpsimd_signal_preserve_current_state(); |
|
775 |
|
|
776 |
if (get_sigframe(&user, ksig, regs)) |
|
777 |
return 1; |
|
778 |
|
|
779 |
frame = user.sigframe; |
|
780 |
|
|
781 |
__put_user_error(0, &frame->uc.uc_flags, err); |
|
782 |
__put_user_error(NULL, &frame->uc.uc_link, err); |
|
783 |
|
|
784 |
err |= __save_altstack(&frame->uc.uc_stack, regs->sp); |
|
785 |
err |= setup_sigframe(&user, regs, set); |
|
786 |
if (err == 0) { |
|
787 |
setup_return(regs, &ksig->ka, &user, usig); |
|
788 |
if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
|
789 |
err |= copy_siginfo_to_user(&frame->info, &ksig->info); |
|
790 |
regs->regs[1] = (unsigned long)&frame->info; |
|
791 |
regs->regs[2] = (unsigned long)&frame->uc; |
|
792 |
} |
|
793 |
} |
|
794 |
|
|
795 |
return err; |
|
796 |
} |
|
797 |
|
|
798 |
static void setup_restart_syscall(struct pt_regs *regs) |
|
799 |
{ |
|
800 |
if (is_compat_task()) |
|
801 |
compat_setup_restart_syscall(regs); |
|
802 |
else |
|
803 |
regs->regs[8] = __NR_restart_syscall; |
|
804 |
} |
|
805 |
|
|
806 |
/* |
|
807 |
* OK, we're invoking a handler |
|
808 |
*/ |
|
809 |
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
|
810 |
{ |
|
811 |
sigset_t *oldset = sigmask_to_save(); |
|
812 |
int usig = ksig->sig; |
|
813 |
int ret; |
|
814 |
|
|
815 |
rseq_signal_deliver(ksig, regs); |
|
816 |
|
|
817 |
/* |
|
818 |
* Set up the stack frame |
|
819 |
*/ |
|
820 |
if (is_compat_task()) { |
|
821 |
if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
|
822 |
ret = compat_setup_rt_frame(usig, ksig, oldset, regs); |
|
823 |
else |
|
824 |
ret = compat_setup_frame(usig, ksig, oldset, regs); |
|
825 |
} else { |
|
826 |
ret = setup_rt_frame(usig, ksig, oldset, regs); |
|
827 |
} |
|
828 |
|
|
829 |
/* |
|
830 |
* Check that the resulting registers are actually sane. |
|
831 |
*/ |
|
832 |
ret |= !valid_user_regs(®s->user_regs, current); |
|
833 |
|
|
834 |
/* Step into the signal handler if we are stepping */ |
|
835 |
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); |
|
836 |
} |
|
837 |
|
|
838 |
/* |
|
839 |
* Note that 'init' is a special process: it doesn't get signals it doesn't |
|
840 |
* want to handle. Thus you cannot kill init even with a SIGKILL even by |
|
841 |
* mistake. |
|
842 |
* |
|
843 |
* Note that we go through the signals twice: once to check the signals that |
|
844 |
* the kernel can handle, and then we build all the user-level signal handling |
|
845 |
* stack-frames in one go after that. |
|
846 |
*/ |
|
847 |
static void do_signal(struct pt_regs *regs) |
|
848 |
{ |
|
849 |
unsigned long continue_addr = 0, restart_addr = 0; |
|
850 |
int retval = 0; |
|
851 |
struct ksignal ksig; |
|
852 |
bool syscall = in_syscall(regs); |
|
853 |
|
|
854 |
/* |
|
855 |
* If we were from a system call, check for system call restarting... |
|
856 |
*/ |
|
857 |
if (syscall) { |
|
858 |
continue_addr = regs->pc; |
|
859 |
restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); |
|
860 |
retval = regs->regs[0]; |
|
861 |
|
|
862 |
/* |
|
863 |
* Avoid additional syscall restarting via ret_to_user. |
|
864 |
*/ |
|
865 |
forget_syscall(regs); |
|
866 |
|
|
867 |
/* |
|
868 |
* Prepare for system call restart. We do this here so that a |
|
869 |
* debugger will see the already changed PC. |
|
870 |
*/ |
|
871 |
switch (retval) { |
|
872 |
case -ERESTARTNOHAND: |
|
873 |
case -ERESTARTSYS: |
|
874 |
case -ERESTARTNOINTR: |
|
875 |
case -ERESTART_RESTARTBLOCK: |
|
876 |
regs->regs[0] = regs->orig_x0; |
|
877 |
regs->pc = restart_addr; |
|
878 |
break; |
|
879 |
} |
|
880 |
} |
|
881 |
|
|
882 |
/* |
|
883 |
* Get the signal to deliver. When running under ptrace, at this point |
|
884 |
* the debugger may change all of our registers. |
|
885 |
*/ |
|
886 |
if (get_signal(&ksig)) { |
|
887 |
/* |
|
888 |
* Depending on the signal settings, we may need to revert the |
|
889 |
* decision to restart the system call, but skip this if a |
|
890 |
* debugger has chosen to restart at a different PC. |
|
891 |
*/ |
|
892 |
if (regs->pc == restart_addr && |
|
893 |
(retval == -ERESTARTNOHAND || |
|
894 |
retval == -ERESTART_RESTARTBLOCK || |
|
895 |
(retval == -ERESTARTSYS && |
|
896 |
!(ksig.ka.sa.sa_flags & SA_RESTART)))) { |
|
897 |
syscall_set_return_value(current, regs, -EINTR, 0); |
|
898 |
regs->pc = continue_addr; |
|
899 |
} |
|
900 |
|
|
901 |
handle_signal(&ksig, regs); |
|
902 |
return; |
|
903 |
} |
|
904 |
|
|
905 |
/* |
|
906 |
* Handle restarting a different system call. As above, if a debugger |
|
907 |
* has chosen to restart at a different PC, ignore the restart. |
|
908 |
*/ |
|
909 |
if (syscall && regs->pc == restart_addr) { |
|
910 |
if (retval == -ERESTART_RESTARTBLOCK) |
|
911 |
setup_restart_syscall(regs); |
|
912 |
user_rewind_single_step(current); |
|
913 |
} |
|
914 |
|
|
915 |
restore_saved_sigmask(); |
|
916 |
} |
|
917 |
|
2f529f
|
918 |
static inline void do_retuser(void) |
H |
919 |
{ |
|
920 |
unsigned long thread_flags; |
|
921 |
|
|
922 |
if (dovetailing()) { |
|
923 |
thread_flags = current_thread_info()->flags; |
|
924 |
if (thread_flags & _TIF_RETUSER) |
|
925 |
inband_retuser_notify(); |
|
926 |
} |
|
927 |
} |
|
928 |
|
a07526
|
929 |
asmlinkage void do_notify_resume(struct pt_regs *regs, |
H |
930 |
unsigned long thread_flags) |
|
931 |
{ |
2f529f
|
932 |
WARN_ON_ONCE(irq_pipeline_debug() && running_oob()); |
H |
933 |
|
|
934 |
stall_inband(); |
|
935 |
|
a07526
|
936 |
do { |
H |
937 |
/* Check valid user FS if needed */ |
|
938 |
addr_limit_user_check(); |
|
939 |
|
|
940 |
if (thread_flags & _TIF_NEED_RESCHED) { |
|
941 |
/* Unmask Debug and SError for the next task */ |
2f529f
|
942 |
local_daif_restore(irqs_pipelined() ? DAIF_PROCCTX : |
H |
943 |
DAIF_PROCCTX_NOIRQ); |
a07526
|
944 |
|
H |
945 |
schedule(); |
|
946 |
} else { |
2f529f
|
947 |
unstall_inband(); |
a07526
|
948 |
local_daif_restore(DAIF_PROCCTX); |
H |
949 |
|
|
950 |
if (thread_flags & _TIF_UPROBE) |
|
951 |
uprobe_notify_resume(regs); |
|
952 |
|
|
953 |
if (thread_flags & _TIF_MTE_ASYNC_FAULT) { |
|
954 |
clear_thread_flag(TIF_MTE_ASYNC_FAULT); |
|
955 |
send_sig_fault(SIGSEGV, SEGV_MTEAERR, |
|
956 |
(void __user *)NULL, current); |
|
957 |
} |
|
958 |
|
|
959 |
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) |
|
960 |
do_signal(regs); |
|
961 |
|
|
962 |
if (thread_flags & _TIF_NOTIFY_RESUME) { |
|
963 |
tracehook_notify_resume(regs); |
|
964 |
rseq_handle_notify_resume(NULL, regs); |
|
965 |
} |
|
966 |
|
|
967 |
if (thread_flags & _TIF_FOREIGN_FPSTATE) |
|
968 |
fpsimd_restore_current_state(); |
2f529f
|
969 |
|
H |
970 |
do_retuser(); |
|
971 |
/* RETUSER might have switched oob */ |
|
972 |
if (running_oob()) { |
|
973 |
local_daif_mask(); |
|
974 |
return; |
|
975 |
} |
a07526
|
976 |
} |
H |
977 |
|
2f529f
|
978 |
/* |
H |
979 |
* Dovetail: we may have restored the fpsimd state for |
|
980 |
* current with no other opportunity to check for |
|
981 |
* _TIF_FOREIGN_FPSTATE until we are back running on |
|
982 |
* el0, so we must not take any interrupt until then, |
|
983 |
* otherwise we may end up resuming with some OOB |
|
984 |
* thread's fpsimd state. |
|
985 |
*/ |
a07526
|
986 |
local_daif_mask(); |
2f529f
|
987 |
stall_inband(); |
a07526
|
988 |
thread_flags = READ_ONCE(current_thread_info()->flags); |
H |
989 |
} while (thread_flags & _TIF_WORK_MASK); |
2f529f
|
990 |
|
H |
991 |
unstall_inband(); |
a07526
|
992 |
} |
H |
993 |
|
|
994 |
unsigned long __ro_after_init signal_minsigstksz; |
|
995 |
|
|
996 |
/* |
|
997 |
* Determine the stack space required for guaranteed signal devliery. |
|
998 |
* This function is used to populate AT_MINSIGSTKSZ at process startup. |
|
999 |
* cpufeatures setup is assumed to be complete. |
|
1000 |
*/ |
|
1001 |
void __init minsigstksz_setup(void) |
|
1002 |
{ |
|
1003 |
struct rt_sigframe_user_layout user; |
|
1004 |
|
|
1005 |
init_user_layout(&user); |
|
1006 |
|
|
1007 |
/* |
|
1008 |
* If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't |
|
1009 |
* be big enough, but it's our best guess: |
|
1010 |
*/ |
|
1011 |
if (WARN_ON(setup_sigframe_layout(&user, true))) |
|
1012 |
return; |
|
1013 |
|
|
1014 |
signal_minsigstksz = sigframe_size(&user) + |
|
1015 |
round_up(sizeof(struct frame_record), 16) + |
|
1016 |
16; /* max alignment padding */ |
|
1017 |
} |