hc
2024-10-16 50a212ec906f7524620675f0c57357691c26c81f
kernel/arch/arm64/kernel/alternative.c
....@@ -1,20 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * alternative runtime patching
34 * inspired by the x86 version
45 *
56 * Copyright (C) 2014 ARM Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
187 */
198
209 #define pr_fmt(fmt) "alternatives: " fmt
....@@ -32,17 +21,28 @@
3221 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
3322 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
3423
35
-int alternatives_applied;
24
+/* Volatile, as we may be patching the guts of READ_ONCE() */
25
+static volatile int all_alternatives_applied;
26
+
27
+static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
3628
3729 struct alt_region {
3830 struct alt_instr *begin;
3931 struct alt_instr *end;
4032 };
4133
34
+bool alternative_is_applied(u16 cpufeature)
35
+{
36
+ if (WARN_ON(cpufeature >= ARM64_NCAPS))
37
+ return false;
38
+
39
+ return test_bit(cpufeature, applied_alternatives);
40
+}
41
+
4242 /*
4343 * Check if the target PC is within an alternative block.
4444 */
45
-static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
45
+static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
4646 {
4747 unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
4848 return !(pc >= replptr && pc <= (replptr + alt->alt_len));
....@@ -50,7 +50,7 @@
5050
5151 #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
5252
53
-static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
53
+static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
5454 {
5555 u32 insn;
5656
....@@ -95,7 +95,7 @@
9595 return insn;
9696 }
9797
98
-static void patch_alternative(struct alt_instr *alt,
98
+static noinstr void patch_alternative(struct alt_instr *alt,
9999 __le32 *origptr, __le32 *updptr, int nr_inst)
100100 {
101101 __le32 *replptr;
....@@ -133,7 +133,8 @@
133133 } while (cur += d_size, cur < end);
134134 }
135135
136
-static void __nocfi __apply_alternatives(void *alt_region, bool is_module)
136
+static void __nocfi __apply_alternatives(void *alt_region, bool is_module,
137
+ unsigned long *feature_mask)
137138 {
138139 struct alt_instr *alt;
139140 struct alt_region *region = alt_region;
....@@ -142,6 +143,9 @@
142143
143144 for (alt = region->begin; alt < region->end; alt++) {
144145 int nr_inst;
146
+
147
+ if (!test_bit(alt->cpufeature, feature_mask))
148
+ continue;
145149
146150 /* Use ARM64_CB_PATCH as an unconditional patch */
147151 if (alt->cpufeature < ARM64_CB_PATCH &&
....@@ -180,6 +184,12 @@
180184 dsb(ish);
181185 __flush_icache_all();
182186 isb();
187
+
188
+ /* Ignore ARM64_CB bit from feature mask */
189
+ bitmap_or(applied_alternatives, applied_alternatives,
190
+ feature_mask, ARM64_NCAPS);
191
+ bitmap_and(applied_alternatives, applied_alternatives,
192
+ cpu_hwcaps, ARM64_NCAPS);
183193 }
184194 }
185195
....@@ -196,14 +206,19 @@
196206
197207 /* We always have a CPU 0 at this point (__init) */
198208 if (smp_processor_id()) {
199
- while (!READ_ONCE(alternatives_applied))
209
+ while (!all_alternatives_applied)
200210 cpu_relax();
201211 isb();
202212 } else {
203
- BUG_ON(alternatives_applied);
204
- __apply_alternatives(&region, false);
213
+ DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
214
+
215
+ bitmap_complement(remaining_capabilities, boot_capabilities,
216
+ ARM64_NPATCHABLE);
217
+
218
+ BUG_ON(all_alternatives_applied);
219
+ __apply_alternatives(&region, false, remaining_capabilities);
205220 /* Barriers provided by the cache flushing */
206
- WRITE_ONCE(alternatives_applied, 1);
221
+ all_alternatives_applied = 1;
207222 }
208223
209224 return 0;
....@@ -212,8 +227,25 @@
212227 void __init apply_alternatives_all(void)
213228 {
214229 /* better not try code patching on a live SMP system */
215
- kvm_compute_layout();
216230 stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
231
+}
232
+
233
+/*
234
+ * This is called very early in the boot process (directly after we run
235
+ * a feature detect on the boot CPU). No need to worry about other CPUs
236
+ * here.
237
+ */
238
+void __init apply_boot_alternatives(void)
239
+{
240
+ struct alt_region region = {
241
+ .begin = (struct alt_instr *)__alt_instructions,
242
+ .end = (struct alt_instr *)__alt_instructions_end,
243
+ };
244
+
245
+ /* If called on non-boot cpu things could go wrong */
246
+ WARN_ON(smp_processor_id() != 0);
247
+
248
+ __apply_alternatives(&region, false, &boot_capabilities[0]);
217249 }
218250
219251 #ifdef CONFIG_MODULES
....@@ -223,7 +255,10 @@
223255 .begin = start,
224256 .end = start + length,
225257 };
258
+ DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
226259
227
- __apply_alternatives(&region, true);
260
+ bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
261
+
262
+ __apply_alternatives(&region, true, &all_capabilities[0]);
228263 }
229264 #endif