hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/arm64/kernel/module.c
....@@ -1,25 +1,15 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * AArch64 loadable module support.
34 *
45 * Copyright (C) 2012 ARM Limited
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- * GNU General Public License for more details.
14
- *
15
- * You should have received a copy of the GNU General Public License
16
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
176 *
187 * Author: Will Deacon <will.deacon@arm.com>
198 */
209
2110 #include <linux/bitops.h>
2211 #include <linux/elf.h>
12
+#include <linux/ftrace.h>
2313 #include <linux/gfp.h>
2414 #include <linux/kasan.h>
2515 #include <linux/kernel.h>
....@@ -40,28 +30,32 @@
4030 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
4131 gfp_mask |= __GFP_NOWARN;
4232
43
- if (IS_ENABLED(CONFIG_KASAN))
33
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
34
+ IS_ENABLED(CONFIG_KASAN_SW_TAGS))
4435 /* don't exceed the static module region - see below */
4536 module_alloc_end = MODULES_END;
4637
4738 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
48
- module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
39
+ module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
4940 NUMA_NO_NODE, __builtin_return_address(0));
5041
5142 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
52
- !IS_ENABLED(CONFIG_KASAN))
43
+ (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
44
+ (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
45
+ !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
5346 /*
54
- * KASAN can only deal with module allocations being served
55
- * from the reserved module region, since the remainder of
56
- * the vmalloc region is already backed by zero shadow pages,
57
- * and punching holes into it is non-trivial. Since the module
58
- * region is not randomized when KASAN is enabled, it is even
47
+ * KASAN without KASAN_VMALLOC can only deal with module
48
+ * allocations being served from the reserved module region,
49
+ * since the remainder of the vmalloc region is already
50
+ * backed by zero shadow pages, and punching holes into it
51
+ * is non-trivial. Since the module region is not randomized
52
+ * when KASAN is enabled without KASAN_VMALLOC, it is even
5953 * less likely that the module region gets exhausted, so we
6054 * can simply omit this fallback in that case.
6155 */
6256 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
6357 module_alloc_base + SZ_2G, GFP_KERNEL,
64
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
58
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
6559 __builtin_return_address(0));
6660
6761 if (p && (kasan_module_alloc(p, size) < 0)) {
....@@ -100,16 +94,50 @@
10094 {
10195 s64 sval = do_reloc(op, place, val);
10296
97
+ /*
98
+ * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
99
+ * relative and absolute relocations as having a range of [-2^15, 2^16)
100
+ * or [-2^31, 2^32), respectively. However, in order to be able to
101
+ * detect overflows reliably, we have to choose whether we interpret
102
+ * such quantities as signed or as unsigned, and stick with it.
103
+ * The way we organize our address space requires a signed
104
+ * interpretation of 32-bit relative references, so let's use that
105
+ * for all R_AARCH64_PRELxx relocations. This means our upper
106
+ * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
107
+ */
108
+
103109 switch (len) {
104110 case 16:
105111 *(s16 *)place = sval;
106
- if (sval < S16_MIN || sval > U16_MAX)
107
- return -ERANGE;
112
+ switch (op) {
113
+ case RELOC_OP_ABS:
114
+ if (sval < 0 || sval > U16_MAX)
115
+ return -ERANGE;
116
+ break;
117
+ case RELOC_OP_PREL:
118
+ if (sval < S16_MIN || sval > S16_MAX)
119
+ return -ERANGE;
120
+ break;
121
+ default:
122
+ pr_err("Invalid 16-bit data relocation (%d)\n", op);
123
+ return 0;
124
+ }
108125 break;
109126 case 32:
110127 *(s32 *)place = sval;
111
- if (sval < S32_MIN || sval > U32_MAX)
112
- return -ERANGE;
128
+ switch (op) {
129
+ case RELOC_OP_ABS:
130
+ if (sval < 0 || sval > U32_MAX)
131
+ return -ERANGE;
132
+ break;
133
+ case RELOC_OP_PREL:
134
+ if (sval < S32_MIN || sval > S32_MAX)
135
+ return -ERANGE;
136
+ break;
137
+ default:
138
+ pr_err("Invalid 32-bit data relocation (%d)\n", op);
139
+ return 0;
140
+ }
113141 break;
114142 case 64:
115143 *(s64 *)place = sval;
....@@ -202,13 +230,12 @@
202230 return 0;
203231 }
204232
205
-static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
233
+static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
234
+ __le32 *place, u64 val)
206235 {
207236 u32 insn;
208237
209
- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
210
- !cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
211
- ((u64)place & 0xfff) < 0xff8)
238
+ if (!is_forbidden_offset_for_adrp(place))
212239 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
213240 AARCH64_INSN_IMM_ADR);
214241
....@@ -219,7 +246,7 @@
219246 insn &= ~BIT(31);
220247 } else {
221248 /* out of range for ADR -> emit a veneer */
222
- val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
249
+ val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
223250 if (!val)
224251 return -ENOEXEC;
225252 insn = aarch64_insn_gen_branch_imm((u64)place, val,
....@@ -292,18 +319,21 @@
292319 /* MOVW instruction relocations. */
293320 case R_AARCH64_MOVW_UABS_G0_NC:
294321 overflow_check = false;
322
+ fallthrough;
295323 case R_AARCH64_MOVW_UABS_G0:
296324 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
297325 AARCH64_INSN_IMM_MOVKZ);
298326 break;
299327 case R_AARCH64_MOVW_UABS_G1_NC:
300328 overflow_check = false;
329
+ fallthrough;
301330 case R_AARCH64_MOVW_UABS_G1:
302331 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
303332 AARCH64_INSN_IMM_MOVKZ);
304333 break;
305334 case R_AARCH64_MOVW_UABS_G2_NC:
306335 overflow_check = false;
336
+ fallthrough;
307337 case R_AARCH64_MOVW_UABS_G2:
308338 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
309339 AARCH64_INSN_IMM_MOVKZ);
....@@ -371,8 +401,9 @@
371401 break;
372402 case R_AARCH64_ADR_PREL_PG_HI21_NC:
373403 overflow_check = false;
404
+ fallthrough;
374405 case R_AARCH64_ADR_PREL_PG_HI21:
375
- ovf = reloc_insn_adrp(me, loc, val);
406
+ ovf = reloc_insn_adrp(me, sechdrs, loc, val);
376407 if (ovf && ovf != -ERANGE)
377408 return ovf;
378409 break;
....@@ -417,7 +448,7 @@
417448
418449 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
419450 ovf == -ERANGE) {
420
- val = module_emit_plt_entry(me, loc, &rel[i], sym);
451
+ val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
421452 if (!val)
422453 return -ENOEXEC;
423454 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
....@@ -444,22 +475,58 @@
444475 return -ENOEXEC;
445476 }
446477
447
-int module_finalize(const Elf_Ehdr *hdr,
448
- const Elf_Shdr *sechdrs,
449
- struct module *me)
478
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
479
+ const Elf_Shdr *sechdrs,
480
+ const char *name)
450481 {
451482 const Elf_Shdr *s, *se;
452483 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
453484
454485 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
455
- if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
456
- apply_alternatives_module((void *)s->sh_addr, s->sh_size);
457
-#ifdef CONFIG_ARM64_MODULE_PLTS
458
- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
459
- !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
460
- me->arch.ftrace_trampoline = (void *)s->sh_addr;
461
-#endif
486
+ if (strcmp(name, secstrs + s->sh_name) == 0)
487
+ return s;
462488 }
463489
490
+ return NULL;
491
+}
492
+
493
+static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
494
+{
495
+ *plt = get_plt_entry(addr, plt);
496
+}
497
+
498
+static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
499
+ const Elf_Shdr *sechdrs,
500
+ struct module *mod)
501
+{
502
+#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
503
+ const Elf_Shdr *s;
504
+ struct plt_entry *plts;
505
+
506
+ s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
507
+ if (!s)
508
+ return -ENOEXEC;
509
+
510
+ plts = (void *)s->sh_addr;
511
+
512
+ __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
513
+
514
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
515
+ __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
516
+
517
+ mod->arch.ftrace_trampolines = plts;
518
+#endif
464519 return 0;
465520 }
521
+
522
+int module_finalize(const Elf_Ehdr *hdr,
523
+ const Elf_Shdr *sechdrs,
524
+ struct module *me)
525
+{
526
+ const Elf_Shdr *s;
527
+ s = find_section(hdr, sechdrs, ".altinstructions");
528
+ if (s)
529
+ apply_alternatives_module((void *)s->sh_addr, s->sh_size);
530
+
531
+ return module_init_ftrace_plt(hdr, sechdrs, me);
532
+}