.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * AArch64 loadable module support. |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2012 ARM Limited |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope that it will be useful, |
---|
11 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
12 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
13 | | - * GNU General Public License for more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License |
---|
16 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
17 | 6 | * |
---|
18 | 7 | * Author: Will Deacon <will.deacon@arm.com> |
---|
19 | 8 | */ |
---|
20 | 9 | |
---|
21 | 10 | #include <linux/bitops.h> |
---|
22 | 11 | #include <linux/elf.h> |
---|
| 12 | +#include <linux/ftrace.h> |
---|
23 | 13 | #include <linux/gfp.h> |
---|
24 | 14 | #include <linux/kasan.h> |
---|
25 | 15 | #include <linux/kernel.h> |
---|
.. | .. |
---|
40 | 30 | if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) |
---|
41 | 31 | gfp_mask |= __GFP_NOWARN; |
---|
42 | 32 | |
---|
43 | | - if (IS_ENABLED(CONFIG_KASAN)) |
---|
| 33 | + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || |
---|
| 34 | + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
---|
44 | 35 | /* don't exceed the static module region - see below */ |
---|
45 | 36 | module_alloc_end = MODULES_END; |
---|
46 | 37 | |
---|
47 | 38 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
---|
48 | | - module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0, |
---|
| 39 | + module_alloc_end, gfp_mask, PAGE_KERNEL, 0, |
---|
49 | 40 | NUMA_NO_NODE, __builtin_return_address(0)); |
---|
50 | 41 | |
---|
51 | 42 | if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && |
---|
52 | | - !IS_ENABLED(CONFIG_KASAN)) |
---|
| 43 | + (IS_ENABLED(CONFIG_KASAN_VMALLOC) || |
---|
| 44 | + (!IS_ENABLED(CONFIG_KASAN_GENERIC) && |
---|
| 45 | + !IS_ENABLED(CONFIG_KASAN_SW_TAGS)))) |
---|
53 | 46 | /* |
---|
54 | | - * KASAN can only deal with module allocations being served |
---|
55 | | - * from the reserved module region, since the remainder of |
---|
56 | | - * the vmalloc region is already backed by zero shadow pages, |
---|
57 | | - * and punching holes into it is non-trivial. Since the module |
---|
58 | | - * region is not randomized when KASAN is enabled, it is even |
---|
| 47 | + * KASAN without KASAN_VMALLOC can only deal with module |
---|
| 48 | + * allocations being served from the reserved module region, |
---|
| 49 | + * since the remainder of the vmalloc region is already |
---|
| 50 | + * backed by zero shadow pages, and punching holes into it |
---|
| 51 | + * is non-trivial. Since the module region is not randomized |
---|
| 52 | + * when KASAN is enabled without KASAN_VMALLOC, it is even |
---|
59 | 53 | * less likely that the module region gets exhausted, so we |
---|
60 | 54 | * can simply omit this fallback in that case. |
---|
61 | 55 | */ |
---|
62 | 56 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
---|
63 | 57 | module_alloc_base + SZ_2G, GFP_KERNEL, |
---|
64 | | - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, |
---|
| 58 | + PAGE_KERNEL, 0, NUMA_NO_NODE, |
---|
65 | 59 | __builtin_return_address(0)); |
---|
66 | 60 | |
---|
67 | 61 | if (p && (kasan_module_alloc(p, size) < 0)) { |
---|
.. | .. |
---|
100 | 94 | { |
---|
101 | 95 | s64 sval = do_reloc(op, place, val); |
---|
102 | 96 | |
---|
| 97 | + /* |
---|
| 98 | + * The ELF psABI for AArch64 documents the 16-bit and 32-bit place |
---|
| 99 | + * relative and absolute relocations as having a range of [-2^15, 2^16) |
---|
| 100 | + * or [-2^31, 2^32), respectively. However, in order to be able to |
---|
| 101 | + * detect overflows reliably, we have to choose whether we interpret |
---|
| 102 | + * such quantities as signed or as unsigned, and stick with it. |
---|
| 103 | + * The way we organize our address space requires a signed |
---|
| 104 | + * interpretation of 32-bit relative references, so let's use that |
---|
| 105 | + * for all R_AARCH64_PRELxx relocations. This means our upper |
---|
| 106 | + * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. |
---|
| 107 | + */ |
---|
| 108 | + |
---|
103 | 109 | switch (len) { |
---|
104 | 110 | case 16: |
---|
105 | 111 | *(s16 *)place = sval; |
---|
106 | | - if (sval < S16_MIN || sval > U16_MAX) |
---|
107 | | - return -ERANGE; |
---|
| 112 | + switch (op) { |
---|
| 113 | + case RELOC_OP_ABS: |
---|
| 114 | + if (sval < 0 || sval > U16_MAX) |
---|
| 115 | + return -ERANGE; |
---|
| 116 | + break; |
---|
| 117 | + case RELOC_OP_PREL: |
---|
| 118 | + if (sval < S16_MIN || sval > S16_MAX) |
---|
| 119 | + return -ERANGE; |
---|
| 120 | + break; |
---|
| 121 | + default: |
---|
| 122 | + pr_err("Invalid 16-bit data relocation (%d)\n", op); |
---|
| 123 | + return 0; |
---|
| 124 | + } |
---|
108 | 125 | break; |
---|
109 | 126 | case 32: |
---|
110 | 127 | *(s32 *)place = sval; |
---|
111 | | - if (sval < S32_MIN || sval > U32_MAX) |
---|
112 | | - return -ERANGE; |
---|
| 128 | + switch (op) { |
---|
| 129 | + case RELOC_OP_ABS: |
---|
| 130 | + if (sval < 0 || sval > U32_MAX) |
---|
| 131 | + return -ERANGE; |
---|
| 132 | + break; |
---|
| 133 | + case RELOC_OP_PREL: |
---|
| 134 | + if (sval < S32_MIN || sval > S32_MAX) |
---|
| 135 | + return -ERANGE; |
---|
| 136 | + break; |
---|
| 137 | + default: |
---|
| 138 | + pr_err("Invalid 32-bit data relocation (%d)\n", op); |
---|
| 139 | + return 0; |
---|
| 140 | + } |
---|
113 | 141 | break; |
---|
114 | 142 | case 64: |
---|
115 | 143 | *(s64 *)place = sval; |
---|
.. | .. |
---|
202 | 230 | return 0; |
---|
203 | 231 | } |
---|
204 | 232 | |
---|
205 | | -static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) |
---|
| 233 | +static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, |
---|
| 234 | + __le32 *place, u64 val) |
---|
206 | 235 | { |
---|
207 | 236 | u32 insn; |
---|
208 | 237 | |
---|
209 | | - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || |
---|
210 | | - !cpus_have_const_cap(ARM64_WORKAROUND_843419) || |
---|
211 | | - ((u64)place & 0xfff) < 0xff8) |
---|
| 238 | + if (!is_forbidden_offset_for_adrp(place)) |
---|
212 | 239 | return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, |
---|
213 | 240 | AARCH64_INSN_IMM_ADR); |
---|
214 | 241 | |
---|
.. | .. |
---|
219 | 246 | insn &= ~BIT(31); |
---|
220 | 247 | } else { |
---|
221 | 248 | /* out of range for ADR -> emit a veneer */ |
---|
222 | | - val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff); |
---|
| 249 | + val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); |
---|
223 | 250 | if (!val) |
---|
224 | 251 | return -ENOEXEC; |
---|
225 | 252 | insn = aarch64_insn_gen_branch_imm((u64)place, val, |
---|
.. | .. |
---|
292 | 319 | /* MOVW instruction relocations. */ |
---|
293 | 320 | case R_AARCH64_MOVW_UABS_G0_NC: |
---|
294 | 321 | overflow_check = false; |
---|
| 322 | + fallthrough; |
---|
295 | 323 | case R_AARCH64_MOVW_UABS_G0: |
---|
296 | 324 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, |
---|
297 | 325 | AARCH64_INSN_IMM_MOVKZ); |
---|
298 | 326 | break; |
---|
299 | 327 | case R_AARCH64_MOVW_UABS_G1_NC: |
---|
300 | 328 | overflow_check = false; |
---|
| 329 | + fallthrough; |
---|
301 | 330 | case R_AARCH64_MOVW_UABS_G1: |
---|
302 | 331 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, |
---|
303 | 332 | AARCH64_INSN_IMM_MOVKZ); |
---|
304 | 333 | break; |
---|
305 | 334 | case R_AARCH64_MOVW_UABS_G2_NC: |
---|
306 | 335 | overflow_check = false; |
---|
| 336 | + fallthrough; |
---|
307 | 337 | case R_AARCH64_MOVW_UABS_G2: |
---|
308 | 338 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, |
---|
309 | 339 | AARCH64_INSN_IMM_MOVKZ); |
---|
.. | .. |
---|
371 | 401 | break; |
---|
372 | 402 | case R_AARCH64_ADR_PREL_PG_HI21_NC: |
---|
373 | 403 | overflow_check = false; |
---|
| 404 | + fallthrough; |
---|
374 | 405 | case R_AARCH64_ADR_PREL_PG_HI21: |
---|
375 | | - ovf = reloc_insn_adrp(me, loc, val); |
---|
| 406 | + ovf = reloc_insn_adrp(me, sechdrs, loc, val); |
---|
376 | 407 | if (ovf && ovf != -ERANGE) |
---|
377 | 408 | return ovf; |
---|
378 | 409 | break; |
---|
.. | .. |
---|
417 | 448 | |
---|
418 | 449 | if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && |
---|
419 | 450 | ovf == -ERANGE) { |
---|
420 | | - val = module_emit_plt_entry(me, loc, &rel[i], sym); |
---|
| 451 | + val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); |
---|
421 | 452 | if (!val) |
---|
422 | 453 | return -ENOEXEC; |
---|
423 | 454 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, |
---|
.. | .. |
---|
444 | 475 | return -ENOEXEC; |
---|
445 | 476 | } |
---|
446 | 477 | |
---|
447 | | -int module_finalize(const Elf_Ehdr *hdr, |
---|
448 | | - const Elf_Shdr *sechdrs, |
---|
449 | | - struct module *me) |
---|
| 478 | +static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, |
---|
| 479 | + const Elf_Shdr *sechdrs, |
---|
| 480 | + const char *name) |
---|
450 | 481 | { |
---|
451 | 482 | const Elf_Shdr *s, *se; |
---|
452 | 483 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
---|
453 | 484 | |
---|
454 | 485 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { |
---|
455 | | - if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) |
---|
456 | | - apply_alternatives_module((void *)s->sh_addr, s->sh_size); |
---|
457 | | -#ifdef CONFIG_ARM64_MODULE_PLTS |
---|
458 | | - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && |
---|
459 | | - !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) |
---|
460 | | - me->arch.ftrace_trampoline = (void *)s->sh_addr; |
---|
461 | | -#endif |
---|
| 486 | + if (strcmp(name, secstrs + s->sh_name) == 0) |
---|
| 487 | + return s; |
---|
462 | 488 | } |
---|
463 | 489 | |
---|
| 490 | + return NULL; |
---|
| 491 | +} |
---|
| 492 | + |
---|
| 493 | +static inline void __init_plt(struct plt_entry *plt, unsigned long addr) |
---|
| 494 | +{ |
---|
| 495 | + *plt = get_plt_entry(addr, plt); |
---|
| 496 | +} |
---|
| 497 | + |
---|
| 498 | +static int module_init_ftrace_plt(const Elf_Ehdr *hdr, |
---|
| 499 | + const Elf_Shdr *sechdrs, |
---|
| 500 | + struct module *mod) |
---|
| 501 | +{ |
---|
| 502 | +#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE) |
---|
| 503 | + const Elf_Shdr *s; |
---|
| 504 | + struct plt_entry *plts; |
---|
| 505 | + |
---|
| 506 | + s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); |
---|
| 507 | + if (!s) |
---|
| 508 | + return -ENOEXEC; |
---|
| 509 | + |
---|
| 510 | + plts = (void *)s->sh_addr; |
---|
| 511 | + |
---|
| 512 | + __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); |
---|
| 513 | + |
---|
| 514 | + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) |
---|
| 515 | + __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR); |
---|
| 516 | + |
---|
| 517 | + mod->arch.ftrace_trampolines = plts; |
---|
| 518 | +#endif |
---|
464 | 519 | return 0; |
---|
465 | 520 | } |
---|
| 521 | + |
---|
| 522 | +int module_finalize(const Elf_Ehdr *hdr, |
---|
| 523 | + const Elf_Shdr *sechdrs, |
---|
| 524 | + struct module *me) |
---|
| 525 | +{ |
---|
| 526 | + const Elf_Shdr *s; |
---|
| 527 | + s = find_section(hdr, sechdrs, ".altinstructions"); |
---|
| 528 | + if (s) |
---|
| 529 | + apply_alternatives_module((void *)s->sh_addr, s->sh_size); |
---|
| 530 | + |
---|
| 531 | + return module_init_ftrace_plt(hdr, sechdrs, me); |
---|
| 532 | +} |
---|