forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 10ebd8556b7990499c896a550e3d416b444211e6
kernel/arch/arm64/kernel/cpufeature.c
....@@ -1,42 +1,95 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Contains CPU feature definitions
34 *
45 * Copyright (C) 2015 ARM Ltd.
56 *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
7
+ * A note for the weary kernel hacker: the code here is confusing and hard to
8
+ * follow! That's partly because it's solving a nasty problem, but also because
9
+ * there's a little bit of over-abstraction that tends to obscure what's going
10
+ * on behind a maze of helper functions and macros.
911 *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- * GNU General Public License for more details.
12
+ * The basic problem is that hardware folks have started gluing together CPUs
13
+ * with distinct architectural features; in some cases even creating SoCs where
14
+ * user-visible instructions are available only on a subset of the available
15
+ * cores. We try to address this by snapshotting the feature registers of the
16
+ * boot CPU and comparing these with the feature registers of each secondary
17
+ * CPU when bringing them up. If there is a mismatch, then we update the
18
+ * snapshot state to indicate the lowest-common denominator of the feature,
19
+ * known as the "safe" value. This snapshot state can be queried to view the
20
+ * "sanitised" value of a feature register.
1421 *
15
- * You should have received a copy of the GNU General Public License
16
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
22
+ * The sanitised register values are used to decide which capabilities we
23
+ * have in the system. These may be in the form of traditional "hwcaps"
24
+ * advertised to userspace or internal "cpucaps" which are used to configure
25
+ * things like alternative patching and static keys. While a feature mismatch
26
+ * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch
27
+ * may prevent a CPU from being onlined at all.
28
+ *
29
+ * Some implementation details worth remembering:
30
+ *
31
+ * - Mismatched features are *always* sanitised to a "safe" value, which
32
+ * usually indicates that the feature is not supported.
33
+ *
34
+ * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
35
+ * warning when onlining an offending CPU and the kernel will be tainted
36
+ * with TAINT_CPU_OUT_OF_SPEC.
37
+ *
38
+ * - Features marked as FTR_VISIBLE have their sanitised value visible to
39
+ * userspace. FTR_VISIBLE features in registers that are only visible
40
+ * to EL0 by trapping *must* have a corresponding HWCAP so that late
41
+ * onlining of CPUs cannot lead to features disappearing at runtime.
42
+ *
43
+ * - A "feature" is typically a 4-bit register field. A "capability" is the
44
+ * high-level description derived from the sanitised field value.
45
+ *
46
+ * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
47
+ * scheme for fields in ID registers") to understand when feature fields
48
+ * may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly).
49
+ *
50
+ * - KVM exposes its own view of the feature registers to guest operating
51
+ * systems regardless of FTR_VISIBLE. This is typically driven from the
52
+ * sanitised register values to allow virtual CPUs to be migrated between
53
+ * arbitrary physical CPUs, but some features not present on the host are
54
+ * also advertised and emulated. Look at sys_reg_descs[] for the gory
55
+ * details.
56
+ *
57
+ * - If the arm64_ftr_bits[] for a register has a missing field, then this
58
+ * field is treated as STRICT RES0, including for read_sanitised_ftr_reg().
59
+ * This is stronger than FTR_HIDDEN and can be used to hide features from
60
+ * KVM guests.
1761 */
1862
1963 #define pr_fmt(fmt) "CPU features: " fmt
2064
2165 #include <linux/bsearch.h>
2266 #include <linux/cpumask.h>
67
+#include <linux/crash_dump.h>
68
+#include <linux/percpu.h>
2369 #include <linux/sort.h>
2470 #include <linux/stop_machine.h>
71
+#include <linux/sysfs.h>
2572 #include <linux/types.h>
2673 #include <linux/mm.h>
2774 #include <linux/cpu.h>
75
+#include <linux/kasan.h>
76
+
2877 #include <asm/cpu.h>
2978 #include <asm/cpufeature.h>
3079 #include <asm/cpu_ops.h>
3180 #include <asm/fpsimd.h>
81
+#include <asm/kvm_host.h>
82
+#include <asm/hwcap.h>
3283 #include <asm/mmu_context.h>
84
+#include <asm/mte.h>
3385 #include <asm/processor.h>
3486 #include <asm/sysreg.h>
3587 #include <asm/traps.h>
88
+#include <asm/vectors.h>
3689 #include <asm/virt.h>
3790
38
-unsigned long elf_hwcap __read_mostly;
39
-EXPORT_SYMBOL_GPL(elf_hwcap);
91
+/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
92
+static unsigned long elf_hwcap __read_mostly;
4093
4194 #ifdef CONFIG_COMPAT
4295 #define COMPAT_ELF_HWCAP_DEFAULT \
....@@ -50,6 +103,33 @@
50103
51104 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
52105 EXPORT_SYMBOL(cpu_hwcaps);
106
+static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
107
+
108
+/* Need also bit for ARM64_CB_PATCH */
109
+DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
110
+
111
+bool arm64_use_ng_mappings = false;
112
+EXPORT_SYMBOL(arm64_use_ng_mappings);
113
+
114
+DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
115
+
116
+/*
117
+ * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
118
+ * support it?
119
+ */
120
+static bool __read_mostly allow_mismatched_32bit_el0;
121
+
122
+/*
123
+ * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
124
+ * seen at least one CPU capable of 32-bit EL0.
125
+ */
126
+DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
127
+
128
+/*
129
+ * Mask of CPUs supporting 32-bit EL0.
130
+ * Only valid if arm64_mismatched_32bit_el0 is enabled.
131
+ */
132
+static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
53133
54134 /*
55135 * Flag to indicate if we have computed the system wide
....@@ -57,33 +137,21 @@
57137 * will be used to determine if a new booting CPU should
58138 * go through the verification process to make sure that it
59139 * supports the system capabilities, without using a hotplug
60
- * notifier.
140
+ * notifier. This is also used to decide if we could use
141
+ * the fast path for checking constant CPU caps.
61142 */
62
-static bool sys_caps_initialised;
63
-
64
-static inline void set_sys_caps_initialised(void)
143
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
144
+EXPORT_SYMBOL(arm64_const_caps_ready);
145
+static inline void finalize_system_capabilities(void)
65146 {
66
- sys_caps_initialised = true;
147
+ static_branch_enable(&arm64_const_caps_ready);
67148 }
68149
69
-static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
150
+void dump_cpu_features(void)
70151 {
71152 /* file-wide pr_fmt adds "CPU features: " prefix */
72153 pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
73
- return 0;
74154 }
75
-
76
-static struct notifier_block cpu_hwcaps_notifier = {
77
- .notifier_call = dump_cpu_hwcaps
78
-};
79
-
80
-static int __init register_cpu_hwcaps_dumper(void)
81
-{
82
- atomic_notifier_chain_register(&panic_notifier_list,
83
- &cpu_hwcaps_notifier);
84
- return 0;
85
-}
86
-__initcall(register_cpu_hwcaps_dumper);
87155
88156 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
89157 EXPORT_SYMBOL(cpu_hwcap_keys);
....@@ -116,12 +184,17 @@
116184 static bool __maybe_unused
117185 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
118186
187
+static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
188
+
189
+static bool __system_matches_cap(unsigned int n);
119190
120191 /*
121192 * NOTE: Any changes to the visibility of features should be kept in
122193 * sync with the documentation of the CPU feature register ABI.
123194 */
124195 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
196
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
197
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0),
125198 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
126199 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
127200 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
....@@ -138,10 +211,30 @@
138211 };
139212
140213 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
214
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
215
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
216
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
217
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
218
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
219
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
220
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
221
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
222
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
223
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
141224 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
142225 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
143226 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
227
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
228
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
229
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
230
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
144231 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
232
+ ARM64_FTR_END,
233
+};
234
+
235
+static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
236
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
237
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
145238 ARM64_FTR_END,
146239 };
147240
....@@ -149,6 +242,9 @@
149242 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
150243 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
151244 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
245
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0),
246
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0),
247
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0),
152248 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
153249 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
154250 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
....@@ -163,11 +259,60 @@
163259 };
164260
165261 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
166
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
262
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
263
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
264
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
265
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
266
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
267
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
268
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
269
+ ARM64_FTR_END,
270
+};
271
+
272
+static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
273
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
274
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
275
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
276
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
277
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
278
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
279
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
280
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
281
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
282
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
283
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
284
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
285
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
286
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
287
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
288
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
289
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
290
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
167291 ARM64_FTR_END,
168292 };
169293
170294 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
295
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
296
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
297
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
298
+ /*
299
+ * Page size not being supported at Stage-2 is not fatal. You
300
+ * just give up KVM if PAGE_SIZE isn't supported there. Go fix
301
+ * your favourite nesting hypervisor.
302
+ *
303
+ * There is a small corner case where the hypervisor explicitly
304
+ * advertises a given granule size at Stage-2 (value 2) on some
305
+ * vCPUs, and uses the fallback to Stage-1 (value 0) for other
306
+ * vCPUs. Although this is not forbidden by the architecture, it
307
+ * indicates that the hypervisor is being silly (or buggy).
308
+ *
309
+ * We make no effort to cope with this and pretend that if these
310
+ * fields are inconsistent across vCPUs, then it isn't worth
311
+ * trying to bring KVM up.
312
+ */
313
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
314
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
315
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
171316 /*
172317 * We already refuse to boot CPUs that don't support our configured
173318 * page size, so we can only detect mismatches for a page size other
....@@ -193,6 +338,11 @@
193338 };
194339
195340 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
341
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0),
342
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
343
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
344
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
345
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
196346 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
197347 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
198348 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
....@@ -203,10 +353,18 @@
203353 };
204354
205355 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
356
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
357
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
358
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
359
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
206360 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
361
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
207362 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
363
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
364
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
365
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
208366 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
209
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
367
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
210368 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
211369 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
212370 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
....@@ -225,30 +383,33 @@
225383 * make use of *minLine.
226384 * If we have differing I-cache policies, report it as the weakest - VIPT.
227385 */
228
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
386
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT), /* L1Ip */
229387 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
230388 ARM64_FTR_END,
231389 };
232390
391
+static struct arm64_ftr_override __ro_after_init no_override = { };
392
+
233393 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
234394 .name = "SYS_CTR_EL0",
235
- .ftr_bits = ftr_ctr
395
+ .ftr_bits = ftr_ctr,
396
+ .override = &no_override,
236397 };
237398
238399 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
239
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
240
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
241
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
242
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
243
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
244
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
245
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
246
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
400
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
401
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
402
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
403
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
404
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
405
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
406
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
407
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
247408 ARM64_FTR_END,
248409 };
249410
250411 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
251
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
412
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
252413 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
253414 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
254415 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
....@@ -263,17 +424,27 @@
263424 };
264425
265426 static const struct arm64_ftr_bits ftr_mvfr2[] = {
266
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
267
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
427
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
428
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
268429 ARM64_FTR_END,
269430 };
270431
271432 static const struct arm64_ftr_bits ftr_dczid[] = {
272
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
273
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
433
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_DZP_SHIFT, 1, 1),
434
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0),
274435 ARM64_FTR_END,
275436 };
276437
438
+static const struct arm64_ftr_bits ftr_id_isar0[] = {
439
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
440
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
441
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0),
442
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0),
443
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0),
444
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0),
445
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0),
446
+ ARM64_FTR_END,
447
+};
277448
278449 static const struct arm64_ftr_bits ftr_id_isar5[] = {
279450 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
....@@ -286,27 +457,94 @@
286457 };
287458
288459 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
289
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
460
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0),
461
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0),
462
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0),
463
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
464
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
465
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
466
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
467
+
468
+ /*
469
+ * SpecSEI = 1 indicates that the PE might generate an SError on an
470
+ * external abort on speculative read. It is safe to assume that an
471
+ * SError might be generated than it will not be. Hence it has been
472
+ * classified as FTR_HIGHER_SAFE.
473
+ */
474
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0),
475
+ ARM64_FTR_END,
476
+};
477
+
478
+static const struct arm64_ftr_bits ftr_id_isar4[] = {
479
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0),
480
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0),
481
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0),
482
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0),
483
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0),
484
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0),
485
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0),
486
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0),
487
+ ARM64_FTR_END,
488
+};
489
+
490
+static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
491
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0),
492
+ ARM64_FTR_END,
493
+};
494
+
495
+static const struct arm64_ftr_bits ftr_id_isar6[] = {
496
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
497
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
498
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
499
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
500
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
501
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
502
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
290503 ARM64_FTR_END,
291504 };
292505
293506 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
294
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
295
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
296
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
297
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
507
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
508
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
509
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
510
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
511
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
512
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
513
+ ARM64_FTR_END,
514
+};
515
+
516
+static const struct arm64_ftr_bits ftr_id_pfr1[] = {
517
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0),
518
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0),
519
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0),
520
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0),
521
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0),
522
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0),
523
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0),
524
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0),
525
+ ARM64_FTR_END,
526
+};
527
+
528
+static const struct arm64_ftr_bits ftr_id_pfr2[] = {
529
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0),
530
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0),
298531 ARM64_FTR_END,
299532 };
300533
301534 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
302535 /* [31:28] TraceFilt */
303
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
304
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
305
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
306
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
307
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
308
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
309
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
536
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0),
537
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
538
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
539
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
540
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
541
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
542
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
543
+ ARM64_FTR_END,
544
+};
545
+
546
+static const struct arm64_ftr_bits ftr_id_dfr1[] = {
547
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0),
310548 ARM64_FTR_END,
311549 };
312550
....@@ -320,7 +558,7 @@
320558 * Common ftr bits for a 32bit register with all hidden, strict
321559 * attributes, with 4bit feature fields and a default safe value of
322560 * 0. Covers the following 32bit registers:
323
- * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
561
+ * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
324562 */
325563 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
326564 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
....@@ -344,12 +582,23 @@
344582 ARM64_FTR_END,
345583 };
346584
347
-#define ARM64_FTR_REG(id, table) { \
348
- .sys_id = id, \
349
- .reg = &(struct arm64_ftr_reg){ \
350
- .name = #id, \
351
- .ftr_bits = &((table)[0]), \
585
+#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
586
+ .sys_id = id, \
587
+ .reg = &(struct arm64_ftr_reg){ \
588
+ .name = id_str, \
589
+ .override = (ovr), \
590
+ .ftr_bits = &((table)[0]), \
352591 }}
592
+
593
+#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
594
+ __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
595
+
596
+#define ARM64_FTR_REG(id, table) \
597
+ __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
598
+
599
+struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
600
+struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
601
+struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
353602
354603 static const struct __ftr_reg_entry {
355604 u32 sys_id;
....@@ -358,7 +607,7 @@
358607
359608 /* Op1 = 0, CRn = 0, CRm = 1 */
360609 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
361
- ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
610
+ ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1),
362611 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
363612 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
364613 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
....@@ -366,23 +615,28 @@
366615 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
367616
368617 /* Op1 = 0, CRn = 0, CRm = 2 */
369
- ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
618
+ ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0),
370619 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
371620 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
372621 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
373
- ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
622
+ ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4),
374623 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
375624 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
625
+ ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
376626
377627 /* Op1 = 0, CRn = 0, CRm = 3 */
378628 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
379629 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
380630 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
631
+ ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
632
+ ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
633
+ ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),
381634
382635 /* Op1 = 0, CRn = 0, CRm = 4 */
383636 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
384
- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
385
- ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
637
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
638
+ &id_aa64pfr1_override),
639
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
386640
387641 /* Op1 = 0, CRn = 0, CRm = 5 */
388642 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
....@@ -390,11 +644,14 @@
390644
391645 /* Op1 = 0, CRn = 0, CRm = 6 */
392646 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
393
- ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
647
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
648
+ &id_aa64isar1_override),
649
+ ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
394650
395651 /* Op1 = 0, CRn = 0, CRm = 7 */
396652 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
397
- ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
653
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
654
+ &id_aa64mmfr1_override),
398655 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
399656
400657 /* Op1 = 0, CRn = 1, CRm = 2 */
....@@ -414,16 +671,16 @@
414671 }
415672
416673 /*
417
- * get_arm64_ftr_reg - Lookup a feature register entry using its
418
- * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
419
- * ascending order of sys_id , we use binary search to find a matching
674
+ * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
675
+ * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the
676
+ * ascending order of sys_id, we use binary search to find a matching
420677 * entry.
421678 *
422679 * returns - Upon success, matching ftr_reg entry for id.
423680 * - NULL on failure. It is upto the caller to decide
424681 * the impact of a failure.
425682 */
426
-static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
683
+static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
427684 {
428685 const struct __ftr_reg_entry *ret;
429686
....@@ -435,6 +692,27 @@
435692 if (ret)
436693 return ret->reg;
437694 return NULL;
695
+}
696
+
697
+/*
698
+ * get_arm64_ftr_reg - Looks up a feature register entry using
699
+ * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn().
700
+ *
701
+ * returns - Upon success, matching ftr_reg entry for id.
702
+ * - NULL on failure but with an WARN_ON().
703
+ */
704
+static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
705
+{
706
+ struct arm64_ftr_reg *reg;
707
+
708
+ reg = get_arm64_ftr_reg_nowarn(sys_id);
709
+
710
+ /*
711
+ * Requesting a non-existent register search is an error. Warn
712
+ * and let the caller handle it.
713
+ */
714
+ WARN_ON(!reg);
715
+ return reg;
438716 }
439717
440718 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
....@@ -462,7 +740,7 @@
462740 case FTR_HIGHER_OR_ZERO_SAFE:
463741 if (!cur || !new)
464742 break;
465
- /* Fallthrough */
743
+ fallthrough;
466744 case FTR_HIGHER_SAFE:
467745 ret = new > cur ? new : cur;
468746 break;
....@@ -475,11 +753,52 @@
475753
476754 static void __init sort_ftr_regs(void)
477755 {
478
- int i;
756
+ unsigned int i;
479757
480
- /* Check that the array is sorted so that we can do the binary search */
481
- for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
758
+ for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
759
+ const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
760
+ const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
761
+ unsigned int j = 0;
762
+
763
+ /*
764
+ * Features here must be sorted in descending order with respect
765
+ * to their shift values and should not overlap with each other.
766
+ */
767
+ for (; ftr_bits->width != 0; ftr_bits++, j++) {
768
+ unsigned int width = ftr_reg->ftr_bits[j].width;
769
+ unsigned int shift = ftr_reg->ftr_bits[j].shift;
770
+ unsigned int prev_shift;
771
+
772
+ WARN((shift + width) > 64,
773
+ "%s has invalid feature at shift %d\n",
774
+ ftr_reg->name, shift);
775
+
776
+ /*
777
+ * Skip the first feature. There is nothing to
778
+ * compare against for now.
779
+ */
780
+ if (j == 0)
781
+ continue;
782
+
783
+ prev_shift = ftr_reg->ftr_bits[j - 1].shift;
784
+ WARN((shift + width) > prev_shift,
785
+ "%s has feature overlap at shift %d\n",
786
+ ftr_reg->name, shift);
787
+ }
788
+
789
+ /*
790
+ * Skip the first register. There is nothing to
791
+ * compare against for now.
792
+ */
793
+ if (i == 0)
794
+ continue;
795
+ /*
796
+ * Registers here must be sorted in ascending order with respect
797
+ * to sys_id for subsequent binary search in get_arm64_ftr_reg()
798
+ * to work correctly.
799
+ */
482800 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
801
+ }
483802 }
484803
485804 /*
....@@ -488,7 +807,7 @@
488807 * Any bits that are not covered by an arm64_ftr_bits entry are considered
489808 * RES0 for the system-wide value, and must strictly match.
490809 */
491
-static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
810
+static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
492811 {
493812 u64 val = 0;
494813 u64 strict_mask = ~0x0ULL;
....@@ -498,11 +817,39 @@
498817 const struct arm64_ftr_bits *ftrp;
499818 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
500819
501
- BUG_ON(!reg);
820
+ if (!reg)
821
+ return;
502822
503
- for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
823
+ for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
504824 u64 ftr_mask = arm64_ftr_mask(ftrp);
505825 s64 ftr_new = arm64_ftr_value(ftrp, new);
826
+ s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);
827
+
828
+ if ((ftr_mask & reg->override->mask) == ftr_mask) {
829
+ s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
830
+ char *str = NULL;
831
+
832
+ if (ftr_ovr != tmp) {
833
+ /* Unsafe, remove the override */
834
+ reg->override->mask &= ~ftr_mask;
835
+ reg->override->val &= ~ftr_mask;
836
+ tmp = ftr_ovr;
837
+ str = "ignoring override";
838
+ } else if (ftr_new != tmp) {
839
+ /* Override was valid */
840
+ ftr_new = tmp;
841
+ str = "forced";
842
+ } else if (ftr_ovr == tmp) {
843
+ /* Override was the safe value */
844
+ str = "already set";
845
+ }
846
+
847
+ if (str)
848
+ pr_warn("%s[%d:%d]: %s to %llx\n",
849
+ reg->name,
850
+ ftrp->shift + ftrp->width - 1,
851
+ ftrp->shift, str, tmp);
852
+ }
506853
507854 val = arm64_ftr_set_value(ftrp, val, ftr_new);
508855
....@@ -525,7 +872,55 @@
525872 }
526873
527874 extern const struct arm64_cpu_capabilities arm64_errata[];
875
+static const struct arm64_cpu_capabilities arm64_features[];
876
+
877
+static void __init
878
+init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
879
+{
880
+ for (; caps->matches; caps++) {
881
+ if (WARN(caps->capability >= ARM64_NCAPS,
882
+ "Invalid capability %d\n", caps->capability))
883
+ continue;
884
+ if (WARN(cpu_hwcaps_ptrs[caps->capability],
885
+ "Duplicate entry for capability %d\n",
886
+ caps->capability))
887
+ continue;
888
+ cpu_hwcaps_ptrs[caps->capability] = caps;
889
+ }
890
+}
891
+
892
+static void __init init_cpu_hwcaps_indirect_list(void)
893
+{
894
+ init_cpu_hwcaps_indirect_list_from_array(arm64_features);
895
+ init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
896
+}
897
+
528898 static void __init setup_boot_cpu_capabilities(void);
899
+
900
+static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
901
+{
902
+ init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
903
+ init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
904
+ init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
905
+ init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
906
+ init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
907
+ init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
908
+ init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
909
+ init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
910
+ init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
911
+ init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
912
+ init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
913
+ init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
914
+ init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
915
+ init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
916
+ init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
917
+ init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
918
+ init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
919
+ init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
920
+ init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
921
+ init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
922
+ init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
923
+}
529924
530925 void __init init_cpu_features(struct cpuinfo_arm64 *info)
531926 {
....@@ -539,6 +934,7 @@
539934 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
540935 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
541936 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
937
+ init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
542938 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
543939 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
544940 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
....@@ -546,29 +942,19 @@
546942 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
547943 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
548944
549
- if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
550
- init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
551
- init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
552
- init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
553
- init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
554
- init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
555
- init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
556
- init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
557
- init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
558
- init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
559
- init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
560
- init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
561
- init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
562
- init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
563
- init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
564
- init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
565
- init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
566
- }
945
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
946
+ init_32bit_cpu_features(&info->aarch32);
567947
568948 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
569949 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
570950 sve_init_vq_map();
571951 }
952
+
953
+ /*
954
+ * Initialize the indirect array of CPU hwcaps capabilities pointers
955
+ * before we handle the boot CPU below.
956
+ */
957
+ init_cpu_hwcaps_indirect_list();
572958
573959 /*
574960 * Detect and enable early CPU capabilities based on the boot CPU,
....@@ -598,13 +984,121 @@
598984 {
599985 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
600986
601
- BUG_ON(!regp);
987
+ if (!regp)
988
+ return 0;
989
+
602990 update_cpu_ftr_reg(regp, val);
603991 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
604992 return 0;
605993 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
606994 regp->name, boot, cpu, val);
607995 return 1;
996
+}
997
+
998
+static void relax_cpu_ftr_reg(u32 sys_id, int field)
999
+{
1000
+ const struct arm64_ftr_bits *ftrp;
1001
+ struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
1002
+
1003
+ if (!regp)
1004
+ return;
1005
+
1006
+ for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
1007
+ if (ftrp->shift == field) {
1008
+ regp->strict_mask &= ~arm64_ftr_mask(ftrp);
1009
+ break;
1010
+ }
1011
+ }
1012
+
1013
+ /* Bogus field? */
1014
+ WARN_ON(!ftrp->width);
1015
+}
1016
+
1017
+static void update_mismatched_32bit_el0_cpu_features(struct cpuinfo_arm64 *info,
1018
+ struct cpuinfo_arm64 *boot)
1019
+{
1020
+ static bool boot_cpu_32bit_regs_overridden = false;
1021
+
1022
+ if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
1023
+ return;
1024
+
1025
+ if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
1026
+ return;
1027
+
1028
+ boot->aarch32 = info->aarch32;
1029
+ init_32bit_cpu_features(&boot->aarch32);
1030
+ boot_cpu_32bit_regs_overridden = true;
1031
+}
1032
+
1033
+static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
1034
+ struct cpuinfo_32bit *boot)
1035
+{
1036
+ int taint = 0;
1037
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1038
+
1039
+ /*
1040
+ * If we don't have AArch32 at EL1, then relax the strictness of
1041
+ * EL1-dependent register fields to avoid spurious sanity check fails.
1042
+ */
1043
+ if (!id_aa64pfr0_32bit_el1(pfr0)) {
1044
+ relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT);
1045
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT);
1046
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT);
1047
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT);
1048
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT);
1049
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT);
1050
+ }
1051
+
1052
+ taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
1053
+ info->reg_id_dfr0, boot->reg_id_dfr0);
1054
+ taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
1055
+ info->reg_id_dfr1, boot->reg_id_dfr1);
1056
+ taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
1057
+ info->reg_id_isar0, boot->reg_id_isar0);
1058
+ taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
1059
+ info->reg_id_isar1, boot->reg_id_isar1);
1060
+ taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
1061
+ info->reg_id_isar2, boot->reg_id_isar2);
1062
+ taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
1063
+ info->reg_id_isar3, boot->reg_id_isar3);
1064
+ taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
1065
+ info->reg_id_isar4, boot->reg_id_isar4);
1066
+ taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
1067
+ info->reg_id_isar5, boot->reg_id_isar5);
1068
+ taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
1069
+ info->reg_id_isar6, boot->reg_id_isar6);
1070
+
1071
+ /*
1072
+ * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
1073
+ * ACTLR formats could differ across CPUs and therefore would have to
1074
+ * be trapped for virtualization anyway.
1075
+ */
1076
+ taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
1077
+ info->reg_id_mmfr0, boot->reg_id_mmfr0);
1078
+ taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
1079
+ info->reg_id_mmfr1, boot->reg_id_mmfr1);
1080
+ taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
1081
+ info->reg_id_mmfr2, boot->reg_id_mmfr2);
1082
+ taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
1083
+ info->reg_id_mmfr3, boot->reg_id_mmfr3);
1084
+ taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
1085
+ info->reg_id_mmfr4, boot->reg_id_mmfr4);
1086
+ taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
1087
+ info->reg_id_mmfr5, boot->reg_id_mmfr5);
1088
+ taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
1089
+ info->reg_id_pfr0, boot->reg_id_pfr0);
1090
+ taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
1091
+ info->reg_id_pfr1, boot->reg_id_pfr1);
1092
+ taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
1093
+ info->reg_id_pfr2, boot->reg_id_pfr2);
1094
+ taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
1095
+ info->reg_mvfr0, boot->reg_mvfr0);
1096
+ taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
1097
+ info->reg_mvfr1, boot->reg_mvfr1);
1098
+ taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
1099
+ info->reg_mvfr2, boot->reg_mvfr2);
1100
+
1101
+ return taint;
6081102 }
6091103
6101104 /*
....@@ -656,6 +1150,8 @@
6561150 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
6571151 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
6581152 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
1153
+ taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
1154
+ info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
6591155
6601156 /*
6611157 * Differing PARange support is fine as long as all peripherals and
....@@ -677,61 +1173,28 @@
6771173 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
6781174 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
6791175
680
- /*
681
- * If we have AArch32, we care about 32-bit features for compat.
682
- * If the system doesn't support AArch32, don't update them.
683
- */
684
- if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
685
- id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
686
-
687
- taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
688
- info->reg_id_dfr0, boot->reg_id_dfr0);
689
- taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
690
- info->reg_id_isar0, boot->reg_id_isar0);
691
- taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
692
- info->reg_id_isar1, boot->reg_id_isar1);
693
- taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
694
- info->reg_id_isar2, boot->reg_id_isar2);
695
- taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
696
- info->reg_id_isar3, boot->reg_id_isar3);
697
- taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
698
- info->reg_id_isar4, boot->reg_id_isar4);
699
- taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
700
- info->reg_id_isar5, boot->reg_id_isar5);
701
-
702
- /*
703
- * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
704
- * ACTLR formats could differ across CPUs and therefore would have to
705
- * be trapped for virtualization anyway.
706
- */
707
- taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
708
- info->reg_id_mmfr0, boot->reg_id_mmfr0);
709
- taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
710
- info->reg_id_mmfr1, boot->reg_id_mmfr1);
711
- taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
712
- info->reg_id_mmfr2, boot->reg_id_mmfr2);
713
- taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
714
- info->reg_id_mmfr3, boot->reg_id_mmfr3);
715
- taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
716
- info->reg_id_pfr0, boot->reg_id_pfr0);
717
- taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
718
- info->reg_id_pfr1, boot->reg_id_pfr1);
719
- taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
720
- info->reg_mvfr0, boot->reg_mvfr0);
721
- taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
722
- info->reg_mvfr1, boot->reg_mvfr1);
723
- taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
724
- info->reg_mvfr2, boot->reg_mvfr2);
725
- }
726
-
7271176 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
7281177 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
7291178 info->reg_zcr, boot->reg_zcr);
7301179
7311180 /* Probe vector lengths, unless we already gave up on SVE */
7321181 if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
733
- !sys_caps_initialised)
1182
+ !system_capabilities_finalized())
7341183 sve_update_vq_map();
1184
+ }
1185
+
1186
+ /*
1187
+ * If we don't have AArch32 at all then skip the checks entirely
1188
+ * as the register values may be UNKNOWN and we're not going to be
1189
+ * using them for anything.
1190
+ *
1191
+ * This relies on a sanitised view of the AArch64 ID registers
1192
+ * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
1193
+ */
1194
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
1195
+ update_mismatched_32bit_el0_cpu_features(info, boot);
1196
+ taint |= update_32bit_cpu_features(cpu, &info->aarch32,
1197
+ &boot->aarch32);
7351198 }
7361199
7371200 /*
....@@ -748,40 +1211,50 @@
7481211 {
7491212 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
7501213
751
- /* We shouldn't get a request for an unsupported register */
752
- BUG_ON(!regp);
1214
+ if (!regp)
1215
+ return 0;
7531216 return regp->sys_val;
7541217 }
1218
+EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
7551219
7561220 #define read_sysreg_case(r) \
757
- case r: return read_sysreg_s(r)
1221
+ case r: val = read_sysreg_s(r); break;
7581222
7591223 /*
7601224 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
7611225 * Read the system register on the current CPU
7621226 */
763
-static u64 __read_sysreg_by_encoding(u32 sys_id)
1227
+u64 __read_sysreg_by_encoding(u32 sys_id)
7641228 {
1229
+ struct arm64_ftr_reg *regp;
1230
+ u64 val;
1231
+
7651232 switch (sys_id) {
7661233 read_sysreg_case(SYS_ID_PFR0_EL1);
7671234 read_sysreg_case(SYS_ID_PFR1_EL1);
1235
+ read_sysreg_case(SYS_ID_PFR2_EL1);
7681236 read_sysreg_case(SYS_ID_DFR0_EL1);
1237
+ read_sysreg_case(SYS_ID_DFR1_EL1);
7691238 read_sysreg_case(SYS_ID_MMFR0_EL1);
7701239 read_sysreg_case(SYS_ID_MMFR1_EL1);
7711240 read_sysreg_case(SYS_ID_MMFR2_EL1);
7721241 read_sysreg_case(SYS_ID_MMFR3_EL1);
1242
+ read_sysreg_case(SYS_ID_MMFR4_EL1);
1243
+ read_sysreg_case(SYS_ID_MMFR5_EL1);
7731244 read_sysreg_case(SYS_ID_ISAR0_EL1);
7741245 read_sysreg_case(SYS_ID_ISAR1_EL1);
7751246 read_sysreg_case(SYS_ID_ISAR2_EL1);
7761247 read_sysreg_case(SYS_ID_ISAR3_EL1);
7771248 read_sysreg_case(SYS_ID_ISAR4_EL1);
7781249 read_sysreg_case(SYS_ID_ISAR5_EL1);
1250
+ read_sysreg_case(SYS_ID_ISAR6_EL1);
7791251 read_sysreg_case(SYS_MVFR0_EL1);
7801252 read_sysreg_case(SYS_MVFR1_EL1);
7811253 read_sysreg_case(SYS_MVFR2_EL1);
7821254
7831255 read_sysreg_case(SYS_ID_AA64PFR0_EL1);
7841256 read_sysreg_case(SYS_ID_AA64PFR1_EL1);
1257
+ read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
7851258 read_sysreg_case(SYS_ID_AA64DFR0_EL1);
7861259 read_sysreg_case(SYS_ID_AA64DFR1_EL1);
7871260 read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
....@@ -789,6 +1262,7 @@
7891262 read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
7901263 read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
7911264 read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
1265
+ read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
7921266
7931267 read_sysreg_case(SYS_CNTFRQ_EL0);
7941268 read_sysreg_case(SYS_CTR_EL0);
....@@ -798,6 +1272,14 @@
7981272 BUG();
7991273 return 0;
8001274 }
1275
+
1276
+ regp = get_arm64_ftr_reg(sys_id);
1277
+ if (regp) {
1278
+ val &= ~regp->override->mask;
1279
+ val |= (regp->override->val & regp->override->mask);
1280
+ }
1281
+
1282
+ return val;
8011283 }
8021284
8031285 #include <linux/irqchip/arm-gic-v3.h>
....@@ -822,6 +1304,54 @@
8221304 val = __read_sysreg_by_encoding(entry->sys_reg);
8231305
8241306 return feature_matches(val, entry);
1307
+}
1308
+
1309
+const struct cpumask *system_32bit_el0_cpumask(void)
1310
+{
1311
+ if (!system_supports_32bit_el0())
1312
+ return cpu_none_mask;
1313
+
1314
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
1315
+ return cpu_32bit_el0_mask;
1316
+
1317
+ return cpu_possible_mask;
1318
+}
1319
+EXPORT_SYMBOL_GPL(system_32bit_el0_cpumask);
1320
+
1321
+static int __init parse_32bit_el0_param(char *str)
1322
+{
1323
+ allow_mismatched_32bit_el0 = true;
1324
+ return 0;
1325
+}
1326
+early_param("allow_mismatched_32bit_el0", parse_32bit_el0_param);
1327
+
1328
+static ssize_t aarch32_el0_show(struct device *dev,
1329
+ struct device_attribute *attr, char *buf)
1330
+{
1331
+ const struct cpumask *mask = system_32bit_el0_cpumask();
1332
+
1333
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(mask));
1334
+}
1335
+static const DEVICE_ATTR_RO(aarch32_el0);
1336
+
1337
+static int __init aarch32_el0_sysfs_init(void)
1338
+{
1339
+ if (!allow_mismatched_32bit_el0)
1340
+ return 0;
1341
+
1342
+ return device_create_file(cpu_subsys.dev_root, &dev_attr_aarch32_el0);
1343
+}
1344
+device_initcall(aarch32_el0_sysfs_init);
1345
+
1346
+static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
1347
+{
1348
+ if (!has_cpuid_feature(entry, scope))
1349
+ return allow_mismatched_32bit_el0;
1350
+
1351
+ if (scope == SCOPE_SYSTEM)
1352
+ pr_info("detected: 32-bit EL0 Support\n");
1353
+
1354
+ return true;
8251355 }
8261356
8271357 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
....@@ -865,9 +1395,21 @@
8651395 if (scope == SCOPE_SYSTEM)
8661396 ctr = arm64_ftr_reg_ctrel0.sys_val;
8671397 else
868
- ctr = read_cpuid_cachetype();
1398
+ ctr = read_cpuid_effective_cachetype();
8691399
8701400 return ctr & BIT(CTR_IDC_SHIFT);
1401
+}
1402
+
1403
+static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
1404
+{
1405
+ /*
1406
+ * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
1407
+ * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
1408
+ * to the CTR_EL0 on this CPU and emulate it with the real/safe
1409
+ * value.
1410
+ */
1411
+ if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
1412
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
8711413 }
8721414
8731415 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
....@@ -883,6 +1425,60 @@
8831425 return ctr & BIT(CTR_DIC_SHIFT);
8841426 }
8851427
1428
+static bool __maybe_unused
1429
+has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
1430
+{
1431
+ /*
1432
+ * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
1433
+ * may share TLB entries with a CPU stuck in the crashed
1434
+ * kernel.
1435
+ */
1436
+ if (is_kdump_kernel())
1437
+ return false;
1438
+
1439
+ return has_cpuid_feature(entry, scope);
1440
+}
1441
+
1442
+/*
1443
+ * This check is triggered during the early boot before the cpufeature
1444
+ * is initialised. Checking the status on the local CPU allows the boot
1445
+ * CPU to detect the need for non-global mappings and thus avoiding a
1446
+ * pagetable re-write after all the CPUs are booted. This check will be
1447
+ * anyway run on individual CPUs, allowing us to get the consistent
1448
+ * state once the SMP CPUs are up and thus make the switch to non-global
1449
+ * mappings if required.
1450
+ */
1451
+bool kaslr_requires_kpti(void)
1452
+{
1453
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
1454
+ return false;
1455
+
1456
+ /*
1457
+ * E0PD does a similar job to KPTI so can be used instead
1458
+ * where available.
1459
+ */
1460
+ if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
1461
+ u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1462
+ if (cpuid_feature_extract_unsigned_field(mmfr2,
1463
+ ID_AA64MMFR2_E0PD_SHIFT))
1464
+ return false;
1465
+ }
1466
+
1467
+ /*
1468
+ * Systems affected by Cavium erratum 24756 are incompatible
1469
+ * with KPTI.
1470
+ */
1471
+ if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
1472
+ extern const struct midr_range cavium_erratum_27456_cpus[];
1473
+
1474
+ if (is_midr_in_range_list(read_cpuid_id(),
1475
+ cavium_erratum_27456_cpus))
1476
+ return false;
1477
+ }
1478
+
1479
+ return kaslr_offset() > 0;
1480
+}
1481
+
8861482 static bool __meltdown_safe = true;
8871483 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
8881484
....@@ -893,6 +1489,7 @@
8931489 static const struct midr_range kpti_safe_list[] = {
8941490 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
8951491 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
1492
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
8961493 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
8971494 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
8981495 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
....@@ -900,6 +1497,11 @@
9001497 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
9011498 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
9021499 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
1500
+ MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
1501
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD),
1502
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
1503
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
1504
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
9031505 { /* sentinel */ }
9041506 };
9051507 char const *str = "kpti command line option";
....@@ -925,7 +1527,7 @@
9251527 }
9261528
9271529 /* Useful for KASLR robustness */
928
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
1530
+ if (kaslr_requires_kpti()) {
9291531 if (!__kpti_forced) {
9301532 str = "KASLR";
9311533 __kpti_forced = 1;
....@@ -960,10 +1562,20 @@
9601562 extern kpti_remap_fn idmap_kpti_install_ng_mappings;
9611563 kpti_remap_fn *remap_fn;
9621564
963
- static bool kpti_applied = false;
9641565 int cpu = smp_processor_id();
9651566
966
- if (kpti_applied)
1567
+ if (__this_cpu_read(this_cpu_vector) == vectors) {
1568
+ const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
1569
+
1570
+ __this_cpu_write(this_cpu_vector, v);
1571
+ }
1572
+
1573
+ /*
1574
+ * We don't need to rewrite the page-tables if either we've done
1575
+ * it already or we have KASLR enabled and therefore have not
1576
+ * created any global mappings at all.
1577
+ */
1578
+ if (arm64_use_ng_mappings)
9671579 return;
9681580
9691581 remap_fn = (void *)__pa_function(idmap_kpti_install_ng_mappings);
....@@ -973,7 +1585,7 @@
9731585 cpu_uninstall_idmap();
9741586
9751587 if (!cpu)
976
- kpti_applied = true;
1588
+ arm64_use_ng_mappings = true;
9771589
9781590 return;
9791591 }
....@@ -1004,6 +1616,7 @@
10041616
10051617 write_sysreg(tcr, tcr_el1);
10061618 isb();
1619
+ local_flush_tlb_all();
10071620 }
10081621
10091622 static bool cpu_has_broken_dbm(void)
....@@ -1012,6 +1625,11 @@
10121625 static const struct midr_range cpus[] = {
10131626 #ifdef CONFIG_ARM64_ERRATUM_1024718
10141627 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1628
+ /* Kryo4xx Silver (rdpe => r1p0) */
1629
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
1630
+#endif
1631
+#ifdef CONFIG_ARM64_ERRATUM_2051678
1632
+ MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
10151633 #endif
10161634 {},
10171635 };
....@@ -1062,6 +1680,60 @@
10621680
10631681 #endif
10641682
1683
+#ifdef CONFIG_ARM64_AMU_EXTN
1684
+
1685
+/*
1686
+ * The "amu_cpus" cpumask only signals that the CPU implementation for the
1687
+ * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide
1688
+ * information regarding all the events that it supports. When a CPU bit is
1689
+ * set in the cpumask, the user of this feature can only rely on the presence
1690
+ * of the 4 fixed counters for that CPU. But this does not guarantee that the
1691
+ * counters are enabled or access to these counters is enabled by code
1692
+ * executed at higher exception levels (firmware).
1693
+ */
1694
+static struct cpumask amu_cpus __read_mostly;
1695
+
1696
+bool cpu_has_amu_feat(int cpu)
1697
+{
1698
+ return cpumask_test_cpu(cpu, &amu_cpus);
1699
+}
1700
+
1701
+/* Initialize the use of AMU counters for frequency invariance */
1702
+extern void init_cpu_freq_invariance_counters(void);
1703
+
1704
+static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
1705
+{
1706
+ if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
1707
+ pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
1708
+ smp_processor_id());
1709
+ cpumask_set_cpu(smp_processor_id(), &amu_cpus);
1710
+
1711
+ /* 0 reference values signal broken/disabled counters */
1712
+ if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168))
1713
+ init_cpu_freq_invariance_counters();
1714
+ }
1715
+}
1716
+
1717
+static bool has_amu(const struct arm64_cpu_capabilities *cap,
1718
+ int __unused)
1719
+{
1720
+ /*
1721
+ * The AMU extension is a non-conflicting feature: the kernel can
1722
+ * safely run a mix of CPUs with and without support for the
1723
+ * activity monitors extension. Therefore, unconditionally enable
1724
+ * the capability to allow any late CPU to use the feature.
1725
+ *
1726
+ * With this feature unconditionally enabled, the cpu_enable
1727
+ * function will be called for all CPUs that match the criteria,
1728
+ * including secondary and hotplugged, marking this feature as
1729
+ * present on that respective CPU. The enable function will also
1730
+ * print a detection message.
1731
+ */
1732
+
1733
+ return true;
1734
+}
1735
+#endif
1736
+
10651737 #ifdef CONFIG_ARM64_VHE
10661738 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
10671739 {
....@@ -1078,7 +1750,7 @@
10781750 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
10791751 * do anything here.
10801752 */
1081
- if (!alternatives_applied)
1753
+ if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
10821754 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
10831755 }
10841756 #endif
....@@ -1091,53 +1763,174 @@
10911763 WARN_ON(val & (7 << 27 | 7 << 21));
10921764 }
10931765
1094
-#ifdef CONFIG_ARM64_SSBD
1095
-static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
1766
+#ifdef CONFIG_ARM64_PAN
1767
+static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
10961768 {
1097
- if (user_mode(regs))
1098
- return 1;
1769
+ /*
1770
+ * We modify PSTATE. This won't work from irq context as the PSTATE
1771
+ * is discarded once we return from the exception.
1772
+ */
1773
+ WARN_ON_ONCE(in_interrupt());
10991774
1100
- if (instr & BIT(PSTATE_Imm_shift))
1101
- regs->pstate |= PSR_SSBS_BIT;
1102
- else
1103
- regs->pstate &= ~PSR_SSBS_BIT;
1775
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
1776
+ set_pstate_pan(1);
1777
+}
1778
+#endif /* CONFIG_ARM64_PAN */
11041779
1105
- arm64_skip_faulting_instruction(regs, 4);
1106
- return 0;
1780
+#ifdef CONFIG_ARM64_RAS_EXTN
1781
+static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1782
+{
1783
+ /* Firmware may have left a deferred SError in this register. */
1784
+ write_sysreg_s(0, SYS_DISR_EL1);
1785
+}
1786
+#endif /* CONFIG_ARM64_RAS_EXTN */
1787
+
1788
+#ifdef CONFIG_ARM64_PTR_AUTH
1789
+static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
1790
+{
1791
+ int boot_val, sec_val;
1792
+
1793
+ /* We don't expect to be called with SCOPE_SYSTEM */
1794
+ WARN_ON(scope == SCOPE_SYSTEM);
1795
+ /*
1796
+ * The ptr-auth feature levels are not intercompatible with lower
1797
+ * levels. Hence we must match ptr-auth feature level of the secondary
1798
+ * CPUs with that of the boot CPU. The level of boot cpu is fetched
1799
+ * from the sanitised register whereas direct register read is done for
1800
+ * the secondary CPUs.
1801
+ * The sanitised feature state is guaranteed to match that of the
1802
+ * boot CPU as a mismatched secondary CPU is parked before it gets
1803
+ * a chance to update the state, with the capability.
1804
+ */
1805
+ boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg),
1806
+ entry->field_pos, entry->sign);
1807
+ if (scope & SCOPE_BOOT_CPU)
1808
+ return boot_val >= entry->min_field_value;
1809
+ /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */
1810
+ sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
1811
+ entry->field_pos, entry->sign);
1812
+ return sec_val == boot_val;
11071813 }
11081814
1109
-static struct undef_hook ssbs_emulation_hook = {
1110
- .instr_mask = ~(1U << PSTATE_Imm_shift),
1111
- .instr_val = 0xd500401f | PSTATE_SSBS,
1112
- .fn = ssbs_emulation_handler,
1113
-};
1114
-
1115
-static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
1815
+static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
1816
+ int scope)
11161817 {
1117
- static bool undef_hook_registered = false;
1118
- static DEFINE_RAW_SPINLOCK(hook_lock);
1119
-
1120
- raw_spin_lock(&hook_lock);
1121
- if (!undef_hook_registered) {
1122
- register_undef_hook(&ssbs_emulation_hook);
1123
- undef_hook_registered = true;
1124
- }
1125
- raw_spin_unlock(&hook_lock);
1126
-
1127
- if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
1128
- sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
1129
- arm64_set_ssbd_mitigation(false);
1130
- } else {
1131
- arm64_set_ssbd_mitigation(true);
1132
- }
1818
+ return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
1819
+ has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
11331820 }
1134
-#endif /* CONFIG_ARM64_SSBD */
1821
+
1822
+static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
1823
+ int __unused)
1824
+{
1825
+ return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
1826
+ __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
1827
+}
1828
+#endif /* CONFIG_ARM64_PTR_AUTH */
1829
+
1830
+#ifdef CONFIG_ARM64_E0PD
1831
+static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
1832
+{
1833
+ if (this_cpu_has_cap(ARM64_HAS_E0PD))
1834
+ sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
1835
+}
1836
+#endif /* CONFIG_ARM64_E0PD */
1837
+
1838
+#ifdef CONFIG_ARM64_PSEUDO_NMI
1839
+static bool enable_pseudo_nmi;
1840
+
1841
+static int __init early_enable_pseudo_nmi(char *p)
1842
+{
1843
+ return strtobool(p, &enable_pseudo_nmi);
1844
+}
1845
+early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
1846
+
1847
+static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
1848
+ int scope)
1849
+{
1850
+ return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
1851
+}
1852
+#endif
1853
+
1854
+#ifdef CONFIG_ARM64_BTI
1855
+static void bti_enable(const struct arm64_cpu_capabilities *__unused)
1856
+{
1857
+ /*
1858
+ * Use of X16/X17 for tail-calls and trampolines that jump to
1859
+ * function entry points using BR is a requirement for
1860
+ * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI.
1861
+ * So, be strict and forbid other BRs using other registers to
1862
+ * jump onto a PACIxSP instruction:
1863
+ */
1864
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1);
1865
+ isb();
1866
+}
1867
+#endif /* CONFIG_ARM64_BTI */
1868
+
1869
+#ifdef CONFIG_ARM64_MTE
1870
+static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
1871
+{
1872
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
1873
+
1874
+ mte_cpu_setup();
1875
+
1876
+ /*
1877
+ * Clear the tags in the zero page. This needs to be done via the
1878
+ * linear map which has the Tagged attribute.
1879
+ */
1880
+ if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
1881
+ mte_clear_page_tags(lm_alias(empty_zero_page));
1882
+
1883
+ kasan_init_hw_tags_cpu();
1884
+}
1885
+#endif /* CONFIG_ARM64_MTE */
1886
+
1887
+#ifdef CONFIG_KVM
1888
+static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
1889
+{
1890
+ if (kvm_get_mode() != KVM_MODE_PROTECTED)
1891
+ return false;
1892
+
1893
+ if (is_kernel_in_hyp_mode()) {
1894
+ pr_warn("Protected KVM not available with VHE\n");
1895
+ return false;
1896
+ }
1897
+
1898
+ return true;
1899
+}
1900
+#endif /* CONFIG_KVM */
1901
+
1902
+static void elf_hwcap_fixup(void)
1903
+{
1904
+#ifdef CONFIG_ARM64_ERRATUM_1742098
1905
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
1906
+ compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
1907
+#endif /* ARM64_ERRATUM_1742098 */
1908
+}
1909
+
1910
+/* Internal helper functions to match cpu capability type */
1911
+static bool
1912
+cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
1913
+{
1914
+ return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
1915
+}
1916
+
1917
+static bool
1918
+cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
1919
+{
1920
+ return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
1921
+}
1922
+
1923
+static bool
1924
+cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
1925
+{
1926
+ return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
1927
+}
11351928
11361929 static const struct arm64_cpu_capabilities arm64_features[] = {
11371930 {
11381931 .desc = "GIC system register CPU interface",
11391932 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1140
- .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1933
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
11411934 .matches = has_useable_gicv3_cpuif,
11421935 .sys_reg = SYS_ID_AA64PFR0_EL1,
11431936 .field_pos = ID_AA64PFR0_GIC_SHIFT,
....@@ -1157,7 +1950,7 @@
11571950 .cpu_enable = cpu_enable_pan,
11581951 },
11591952 #endif /* CONFIG_ARM64_PAN */
1160
-#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
1953
+#ifdef CONFIG_ARM64_LSE_ATOMICS
11611954 {
11621955 .desc = "LSE atomic instructions",
11631956 .capability = ARM64_HAS_LSE_ATOMICS,
....@@ -1168,7 +1961,7 @@
11681961 .sign = FTR_UNSIGNED,
11691962 .min_field_value = 2,
11701963 },
1171
-#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
1964
+#endif /* CONFIG_ARM64_LSE_ATOMICS */
11721965 {
11731966 .desc = "Software prefetching using PRFM",
11741967 .capability = ARM64_HAS_NO_HW_PREFETCH,
....@@ -1207,15 +2000,32 @@
12072000 },
12082001 #endif /* CONFIG_ARM64_VHE */
12092002 {
1210
- .desc = "32-bit EL0 Support",
1211
- .capability = ARM64_HAS_32BIT_EL0,
2003
+ .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
12122004 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1213
- .matches = has_cpuid_feature,
2005
+ .matches = has_32bit_el0,
12142006 .sys_reg = SYS_ID_AA64PFR0_EL1,
12152007 .sign = FTR_UNSIGNED,
12162008 .field_pos = ID_AA64PFR0_EL0_SHIFT,
12172009 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
12182010 },
2011
+#ifdef CONFIG_KVM
2012
+ {
2013
+ .desc = "32-bit EL1 Support",
2014
+ .capability = ARM64_HAS_32BIT_EL1,
2015
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2016
+ .matches = has_cpuid_feature,
2017
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
2018
+ .sign = FTR_UNSIGNED,
2019
+ .field_pos = ID_AA64PFR0_EL1_SHIFT,
2020
+ .min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT,
2021
+ },
2022
+ {
2023
+ .desc = "Protected KVM",
2024
+ .capability = ARM64_KVM_PROTECTED_MODE,
2025
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2026
+ .matches = is_kvm_protected_mode,
2027
+ },
2028
+#endif
12192029 {
12202030 .desc = "Kernel page table isolation (KPTI)",
12212031 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
....@@ -1248,6 +2058,16 @@
12482058 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
12492059 .min_field_value = 1,
12502060 },
2061
+ {
2062
+ .desc = "Data cache clean to Point of Deep Persistence",
2063
+ .capability = ARM64_HAS_DCPODP,
2064
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2065
+ .matches = has_cpuid_feature,
2066
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
2067
+ .sign = FTR_UNSIGNED,
2068
+ .field_pos = ID_AA64ISAR1_DPB_SHIFT,
2069
+ .min_field_value = 2,
2070
+ },
12512071 #endif
12522072 #ifdef CONFIG_ARM64_SVE
12532073 {
....@@ -1275,11 +2095,30 @@
12752095 .cpu_enable = cpu_clear_disr,
12762096 },
12772097 #endif /* CONFIG_ARM64_RAS_EXTN */
2098
+#ifdef CONFIG_ARM64_AMU_EXTN
2099
+ {
2100
+ /*
2101
+ * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
2102
+ * Therefore, don't provide .desc as we don't want the detection
2103
+ * message to be shown until at least one CPU is detected to
2104
+ * support the feature.
2105
+ */
2106
+ .capability = ARM64_HAS_AMU_EXTN,
2107
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2108
+ .matches = has_amu,
2109
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
2110
+ .sign = FTR_UNSIGNED,
2111
+ .field_pos = ID_AA64PFR0_AMU_SHIFT,
2112
+ .min_field_value = ID_AA64PFR0_AMU,
2113
+ .cpu_enable = cpu_amu_enable,
2114
+ },
2115
+#endif /* CONFIG_ARM64_AMU_EXTN */
12782116 {
12792117 .desc = "Data cache clean to the PoU not required for I/D coherence",
12802118 .capability = ARM64_HAS_CACHE_IDC,
12812119 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
12822120 .matches = has_cache_idc,
2121
+ .cpu_enable = cpu_emulate_effective_ctr,
12832122 },
12842123 {
12852124 .desc = "Instruction cache invalidation not required for I/D coherence",
....@@ -1297,6 +2136,26 @@
12972136 .min_field_value = 1,
12982137 .matches = has_cpuid_feature,
12992138 .cpu_enable = cpu_has_fwb,
2139
+ },
2140
+ {
2141
+ .desc = "ARMv8.4 Translation Table Level",
2142
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2143
+ .capability = ARM64_HAS_ARMv8_4_TTL,
2144
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
2145
+ .sign = FTR_UNSIGNED,
2146
+ .field_pos = ID_AA64MMFR2_TTL_SHIFT,
2147
+ .min_field_value = 1,
2148
+ .matches = has_cpuid_feature,
2149
+ },
2150
+ {
2151
+ .desc = "TLB range maintenance instructions",
2152
+ .capability = ARM64_HAS_TLB_RANGE,
2153
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2154
+ .matches = has_cpuid_feature,
2155
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
2156
+ .field_pos = ID_AA64ISAR0_TLB_SHIFT,
2157
+ .sign = FTR_UNSIGNED,
2158
+ .min_field_value = ID_AA64ISAR0_TLB_RANGE,
13002159 },
13012160 #ifdef CONFIG_ARM64_HW_AFDBM
13022161 {
....@@ -1318,78 +2177,302 @@
13182177 .cpu_enable = cpu_enable_hw_dbm,
13192178 },
13202179 #endif
1321
-#ifdef CONFIG_ARM64_SSBD
2180
+ {
2181
+ .desc = "CRC32 instructions",
2182
+ .capability = ARM64_HAS_CRC32,
2183
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2184
+ .matches = has_cpuid_feature,
2185
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
2186
+ .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
2187
+ .min_field_value = 1,
2188
+ },
13222189 {
13232190 .desc = "Speculative Store Bypassing Safe (SSBS)",
13242191 .capability = ARM64_SSBS,
1325
- .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
2192
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
13262193 .matches = has_cpuid_feature,
13272194 .sys_reg = SYS_ID_AA64PFR1_EL1,
13282195 .field_pos = ID_AA64PFR1_SSBS_SHIFT,
13292196 .sign = FTR_UNSIGNED,
13302197 .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
1331
- .cpu_enable = cpu_enable_ssbs,
2198
+ },
2199
+#ifdef CONFIG_ARM64_CNP
2200
+ {
2201
+ .desc = "Common not Private translations",
2202
+ .capability = ARM64_HAS_CNP,
2203
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2204
+ .matches = has_useable_cnp,
2205
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
2206
+ .sign = FTR_UNSIGNED,
2207
+ .field_pos = ID_AA64MMFR2_CNP_SHIFT,
2208
+ .min_field_value = 1,
2209
+ .cpu_enable = cpu_enable_cnp,
13322210 },
13332211 #endif
2212
+ {
2213
+ .desc = "Speculation barrier (SB)",
2214
+ .capability = ARM64_HAS_SB,
2215
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2216
+ .matches = has_cpuid_feature,
2217
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
2218
+ .field_pos = ID_AA64ISAR1_SB_SHIFT,
2219
+ .sign = FTR_UNSIGNED,
2220
+ .min_field_value = 1,
2221
+ },
2222
+#ifdef CONFIG_ARM64_PTR_AUTH
2223
+ {
2224
+ .desc = "Address authentication (architected algorithm)",
2225
+ .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
2226
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2227
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
2228
+ .sign = FTR_UNSIGNED,
2229
+ .field_pos = ID_AA64ISAR1_APA_SHIFT,
2230
+ .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
2231
+ .matches = has_address_auth_cpucap,
2232
+ },
2233
+ {
2234
+ .desc = "Address authentication (IMP DEF algorithm)",
2235
+ .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
2236
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2237
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
2238
+ .sign = FTR_UNSIGNED,
2239
+ .field_pos = ID_AA64ISAR1_API_SHIFT,
2240
+ .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
2241
+ .matches = has_address_auth_cpucap,
2242
+ },
2243
+ {
2244
+ .capability = ARM64_HAS_ADDRESS_AUTH,
2245
+ .type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
2246
+ .matches = has_address_auth_metacap,
2247
+ },
2248
+ {
2249
+ .desc = "Generic authentication (architected algorithm)",
2250
+ .capability = ARM64_HAS_GENERIC_AUTH_ARCH,
2251
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2252
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
2253
+ .sign = FTR_UNSIGNED,
2254
+ .field_pos = ID_AA64ISAR1_GPA_SHIFT,
2255
+ .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
2256
+ .matches = has_cpuid_feature,
2257
+ },
2258
+ {
2259
+ .desc = "Generic authentication (IMP DEF algorithm)",
2260
+ .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
2261
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2262
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
2263
+ .sign = FTR_UNSIGNED,
2264
+ .field_pos = ID_AA64ISAR1_GPI_SHIFT,
2265
+ .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
2266
+ .matches = has_cpuid_feature,
2267
+ },
2268
+ {
2269
+ .capability = ARM64_HAS_GENERIC_AUTH,
2270
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2271
+ .matches = has_generic_auth,
2272
+ },
2273
+#endif /* CONFIG_ARM64_PTR_AUTH */
2274
+#ifdef CONFIG_ARM64_PSEUDO_NMI
2275
+ {
2276
+ /*
2277
+ * Depends on having GICv3
2278
+ */
2279
+ .desc = "IRQ priority masking",
2280
+ .capability = ARM64_HAS_IRQ_PRIO_MASKING,
2281
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2282
+ .matches = can_use_gic_priorities,
2283
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
2284
+ .field_pos = ID_AA64PFR0_GIC_SHIFT,
2285
+ .sign = FTR_UNSIGNED,
2286
+ .min_field_value = 1,
2287
+ },
2288
+#endif
2289
+#ifdef CONFIG_ARM64_E0PD
2290
+ {
2291
+ .desc = "E0PD",
2292
+ .capability = ARM64_HAS_E0PD,
2293
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2294
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
2295
+ .sign = FTR_UNSIGNED,
2296
+ .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
2297
+ .matches = has_cpuid_feature,
2298
+ .min_field_value = 1,
2299
+ .cpu_enable = cpu_enable_e0pd,
2300
+ },
2301
+#endif
2302
+#ifdef CONFIG_ARCH_RANDOM
2303
+ {
2304
+ .desc = "Random Number Generator",
2305
+ .capability = ARM64_HAS_RNG,
2306
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2307
+ .matches = has_cpuid_feature,
2308
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
2309
+ .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
2310
+ .sign = FTR_UNSIGNED,
2311
+ .min_field_value = 1,
2312
+ },
2313
+#endif
2314
+#ifdef CONFIG_ARM64_BTI
2315
+ {
2316
+ .desc = "Branch Target Identification",
2317
+ .capability = ARM64_BTI,
2318
+#ifdef CONFIG_ARM64_BTI_KERNEL
2319
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2320
+#else
2321
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2322
+#endif
2323
+ .matches = has_cpuid_feature,
2324
+ .cpu_enable = bti_enable,
2325
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
2326
+ .field_pos = ID_AA64PFR1_BT_SHIFT,
2327
+ .min_field_value = ID_AA64PFR1_BT_BTI,
2328
+ .sign = FTR_UNSIGNED,
2329
+ },
2330
+#endif
2331
+#ifdef CONFIG_ARM64_MTE
2332
+ {
2333
+ .desc = "Memory Tagging Extension",
2334
+ .capability = ARM64_MTE,
2335
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
2336
+ .matches = has_cpuid_feature,
2337
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
2338
+ .field_pos = ID_AA64PFR1_MTE_SHIFT,
2339
+ .min_field_value = ID_AA64PFR1_MTE,
2340
+ .sign = FTR_UNSIGNED,
2341
+ .cpu_enable = cpu_enable_mte,
2342
+ },
2343
+#endif /* CONFIG_ARM64_MTE */
2344
+ {
2345
+ .desc = "RCpc load-acquire (LDAPR)",
2346
+ .capability = ARM64_HAS_LDAPR,
2347
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
2348
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
2349
+ .sign = FTR_UNSIGNED,
2350
+ .field_pos = ID_AA64ISAR1_LRCPC_SHIFT,
2351
+ .matches = has_cpuid_feature,
2352
+ .min_field_value = 1,
2353
+ },
13342354 {},
13352355 };
13362356
2357
+#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
2358
+ .matches = has_cpuid_feature, \
2359
+ .sys_reg = reg, \
2360
+ .field_pos = field, \
2361
+ .sign = s, \
2362
+ .min_field_value = min_value,
13372363
1338
-#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
1339
- .matches = has_cpuid_feature, \
1340
- .sys_reg = reg, \
1341
- .field_pos = field, \
1342
- .sign = s, \
1343
- .min_field_value = min_value, \
2364
+#define __HWCAP_CAP(name, cap_type, cap) \
2365
+ .desc = name, \
2366
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
2367
+ .hwcap_type = cap_type, \
2368
+ .hwcap = cap, \
13442369
1345
-#define __HWCAP_CAP(name, cap_type, cap) \
1346
- .desc = name, \
1347
- .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
1348
- .hwcap_type = cap_type, \
1349
- .hwcap = cap, \
1350
-
1351
-#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
1352
- { \
1353
- __HWCAP_CAP(#cap, cap_type, cap) \
1354
- HWCAP_CPUID_MATCH(reg, field, s, min_value) \
2370
+#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
2371
+ { \
2372
+ __HWCAP_CAP(#cap, cap_type, cap) \
2373
+ HWCAP_CPUID_MATCH(reg, field, s, min_value) \
13552374 }
13562375
1357
-#define HWCAP_CAP_MATCH(match, cap_type, cap) \
1358
- { \
1359
- __HWCAP_CAP(#cap, cap_type, cap) \
1360
- .matches = match, \
2376
+#define HWCAP_MULTI_CAP(list, cap_type, cap) \
2377
+ { \
2378
+ __HWCAP_CAP(#cap, cap_type, cap) \
2379
+ .matches = cpucap_multi_entry_cap_matches, \
2380
+ .match_list = list, \
13612381 }
2382
+
2383
+#define HWCAP_CAP_MATCH(match, cap_type, cap) \
2384
+ { \
2385
+ __HWCAP_CAP(#cap, cap_type, cap) \
2386
+ .matches = match, \
2387
+ }
2388
+
2389
+#ifdef CONFIG_ARM64_PTR_AUTH
2390
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
2391
+ {
2392
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
2393
+ FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
2394
+ },
2395
+ {
2396
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
2397
+ FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
2398
+ },
2399
+ {},
2400
+};
2401
+
2402
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
2403
+ {
2404
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
2405
+ FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
2406
+ },
2407
+ {
2408
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
2409
+ FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
2410
+ },
2411
+ {},
2412
+};
2413
+#endif
13622414
13632415 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1364
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
1365
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
1366
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
1367
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1368
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
1369
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
1370
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1371
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
1372
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
1373
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
1374
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
1375
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
1376
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
1377
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
1378
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1379
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1380
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1381
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1382
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
1383
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
1384
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
1385
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
1386
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
1387
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
1388
- HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
2416
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
2417
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
2418
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
2419
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
2420
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
2421
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
2422
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
2423
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
2424
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
2425
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
2426
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
2427
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
2428
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
2429
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
2430
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
2431
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
2432
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
2433
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
2434
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
2435
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
2436
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
2437
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
2438
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
2439
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
2440
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
2441
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
2442
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
2443
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
2444
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
2445
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
2446
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
2447
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
2448
+ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
13892449 #ifdef CONFIG_ARM64_SVE
1390
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
2450
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
2451
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
2452
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
2453
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
2454
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
2455
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
2456
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
2457
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
2458
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
2459
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
2460
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
13912461 #endif
1392
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
2462
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
2463
+#ifdef CONFIG_ARM64_BTI
2464
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI),
2465
+#endif
2466
+#ifdef CONFIG_ARM64_PTR_AUTH
2467
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
2468
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
2469
+#endif
2470
+#ifdef CONFIG_ARM64_MTE
2471
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
2472
+#endif /* CONFIG_ARM64_MTE */
2473
+ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
2474
+ HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
2475
+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
13932476 {},
13942477 };
13952478
....@@ -1431,11 +2514,11 @@
14312514 {},
14322515 };
14332516
1434
-static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
2517
+static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
14352518 {
14362519 switch (cap->hwcap_type) {
14372520 case CAP_HWCAP:
1438
- elf_hwcap |= cap->hwcap;
2521
+ cpu_set_feature(cap->hwcap);
14392522 break;
14402523 #ifdef CONFIG_COMPAT
14412524 case CAP_COMPAT_HWCAP:
....@@ -1458,7 +2541,7 @@
14582541
14592542 switch (cap->hwcap_type) {
14602543 case CAP_HWCAP:
1461
- rc = (elf_hwcap & cap->hwcap) != 0;
2544
+ rc = cpu_have_feature(cap->hwcap);
14622545 break;
14632546 #ifdef CONFIG_COMPAT
14642547 case CAP_COMPAT_HWCAP:
....@@ -1476,61 +2559,58 @@
14762559 return rc;
14772560 }
14782561
1479
-static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
2562
+static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
14802563 {
14812564 /* We support emulation of accesses to CPU ID feature registers */
1482
- elf_hwcap |= HWCAP_CPUID;
2565
+ cpu_set_named_feature(CPUID);
14832566 for (; hwcaps->matches; hwcaps++)
14842567 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
14852568 cap_set_elf_hwcap(hwcaps);
14862569 }
14872570
1488
-/*
1489
- * Check if the current CPU has a given feature capability.
1490
- * Should be called from non-preemptible context.
1491
- */
1492
-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1493
- unsigned int cap)
2571
+static void update_cpu_capabilities(u16 scope_mask)
14942572 {
2573
+ int i;
14952574 const struct arm64_cpu_capabilities *caps;
14962575
1497
- if (WARN_ON(preemptible()))
1498
- return false;
1499
-
1500
- for (caps = cap_array; caps->matches; caps++)
1501
- if (caps->capability == cap)
1502
- return caps->matches(caps, SCOPE_LOCAL_CPU);
1503
-
1504
- return false;
1505
-}
1506
-
1507
-static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1508
- u16 scope_mask, const char *info)
1509
-{
15102576 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1511
- for (; caps->matches; caps++) {
1512
- if (!(caps->type & scope_mask) ||
2577
+ for (i = 0; i < ARM64_NCAPS; i++) {
2578
+ caps = cpu_hwcaps_ptrs[i];
2579
+ if (!caps || !(caps->type & scope_mask) ||
2580
+ cpus_have_cap(caps->capability) ||
15132581 !caps->matches(caps, cpucap_default_scope(caps)))
15142582 continue;
15152583
1516
- if (!cpus_have_cap(caps->capability) && caps->desc)
1517
- pr_info("%s %s\n", info, caps->desc);
2584
+ if (caps->desc)
2585
+ pr_info("detected: %s\n", caps->desc);
15182586 cpus_set_cap(caps->capability);
2587
+
2588
+ if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
2589
+ set_bit(caps->capability, boot_capabilities);
15192590 }
15202591 }
15212592
1522
-static void update_cpu_capabilities(u16 scope_mask)
2593
+/*
2594
+ * Enable all the available capabilities on this CPU. The capabilities
2595
+ * with BOOT_CPU scope are handled separately and hence skipped here.
2596
+ */
2597
+static int cpu_enable_non_boot_scope_capabilities(void *__unused)
15232598 {
1524
- __update_cpu_capabilities(arm64_errata, scope_mask,
1525
- "enabling workaround for");
1526
- __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1527
-}
2599
+ int i;
2600
+ u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
15282601
1529
-static int __enable_cpu_capability(void *arg)
1530
-{
1531
- const struct arm64_cpu_capabilities *cap = arg;
2602
+ for_each_available_cap(i) {
2603
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
15322604
1533
- cap->cpu_enable(cap);
2605
+ if (WARN_ON(!cap))
2606
+ continue;
2607
+
2608
+ if (!(cap->type & non_boot_scope))
2609
+ continue;
2610
+
2611
+ if (cap->cpu_enable)
2612
+ cap->cpu_enable(cap);
2613
+ }
15342614 return 0;
15352615 }
15362616
....@@ -1538,21 +2618,29 @@
15382618 * Run through the enabled capabilities and enable() it on all active
15392619 * CPUs
15402620 */
1541
-static void __init
1542
-__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1543
- u16 scope_mask)
2621
+static void __init enable_cpu_capabilities(u16 scope_mask)
15442622 {
1545
- scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1546
- for (; caps->matches; caps++) {
1547
- unsigned int num = caps->capability;
2623
+ int i;
2624
+ const struct arm64_cpu_capabilities *caps;
2625
+ bool boot_scope;
15482626
1549
- if (!(caps->type & scope_mask) || !cpus_have_cap(num))
2627
+ scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2628
+ boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
2629
+
2630
+ for (i = 0; i < ARM64_NCAPS; i++) {
2631
+ unsigned int num;
2632
+
2633
+ caps = cpu_hwcaps_ptrs[i];
2634
+ if (!caps || !(caps->type & scope_mask))
2635
+ continue;
2636
+ num = caps->capability;
2637
+ if (!cpus_have_cap(num))
15502638 continue;
15512639
15522640 /* Ensure cpus_have_const_cap(num) works */
15532641 static_branch_enable(&cpu_hwcap_keys[num]);
15542642
1555
- if (caps->cpu_enable) {
2643
+ if (boot_scope && caps->cpu_enable)
15562644 /*
15572645 * Capabilities with SCOPE_BOOT_CPU scope are finalised
15582646 * before any secondary CPU boots. Thus, each secondary
....@@ -1561,44 +2649,37 @@
15612649 * the boot CPU, for which the capability must be
15622650 * enabled here. This approach avoids costly
15632651 * stop_machine() calls for this case.
1564
- *
1565
- * Otherwise, use stop_machine() as it schedules the
1566
- * work allowing us to modify PSTATE, instead of
1567
- * on_each_cpu() which uses an IPI, giving us a PSTATE
1568
- * that disappears when we return.
15692652 */
1570
- if (scope_mask & SCOPE_BOOT_CPU)
1571
- caps->cpu_enable(caps);
1572
- else
1573
- stop_machine(__enable_cpu_capability,
1574
- (void *)caps, cpu_online_mask);
1575
- }
2653
+ caps->cpu_enable(caps);
15762654 }
1577
-}
15782655
1579
-static void __init enable_cpu_capabilities(u16 scope_mask)
1580
-{
1581
- __enable_cpu_capabilities(arm64_errata, scope_mask);
1582
- __enable_cpu_capabilities(arm64_features, scope_mask);
2656
+ /*
2657
+ * For all non-boot scope capabilities, use stop_machine()
2658
+ * as it schedules the work allowing us to modify PSTATE,
2659
+ * instead of on_each_cpu() which uses an IPI, giving us a
2660
+ * PSTATE that disappears when we return.
2661
+ */
2662
+ if (!boot_scope)
2663
+ stop_machine(cpu_enable_non_boot_scope_capabilities,
2664
+ NULL, cpu_online_mask);
15832665 }
15842666
15852667 /*
15862668 * Run through the list of capabilities to check for conflicts.
15872669 * If the system has already detected a capability, take necessary
15882670 * action on this CPU.
1589
- *
1590
- * Returns "false" on conflicts.
15912671 */
1592
-static bool
1593
-__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
1594
- u16 scope_mask)
2672
+static void verify_local_cpu_caps(u16 scope_mask)
15952673 {
2674
+ int i;
15962675 bool cpu_has_cap, system_has_cap;
2676
+ const struct arm64_cpu_capabilities *caps;
15972677
15982678 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
15992679
1600
- for (; caps->matches; caps++) {
1601
- if (!(caps->type & scope_mask))
2680
+ for (i = 0; i < ARM64_NCAPS; i++) {
2681
+ caps = cpu_hwcaps_ptrs[i];
2682
+ if (!caps || !(caps->type & scope_mask))
16022683 continue;
16032684
16042685 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
....@@ -1629,20 +2710,16 @@
16292710 }
16302711 }
16312712
1632
- if (caps->matches) {
2713
+ if (i < ARM64_NCAPS) {
16332714 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
16342715 smp_processor_id(), caps->capability,
16352716 caps->desc, system_has_cap, cpu_has_cap);
1636
- return false;
2717
+
2718
+ if (cpucap_panic_on_conflict(caps))
2719
+ cpu_panic_kernel();
2720
+ else
2721
+ cpu_die_early();
16372722 }
1638
-
1639
- return true;
1640
-}
1641
-
1642
-static bool verify_local_cpu_caps(u16 scope_mask)
1643
-{
1644
- return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
1645
- __verify_local_cpu_caps(arm64_features, scope_mask);
16462723 }
16472724
16482725 /*
....@@ -1652,16 +2729,12 @@
16522729 static void check_early_cpu_features(void)
16532730 {
16542731 verify_cpu_asid_bits();
1655
- /*
1656
- * Early features are used by the kernel already. If there
1657
- * is a conflict, we cannot proceed further.
1658
- */
1659
- if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
1660
- cpu_panic_kernel();
2732
+
2733
+ verify_local_cpu_caps(SCOPE_BOOT_CPU);
16612734 }
16622735
16632736 static void
1664
-verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
2737
+__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
16652738 {
16662739
16672740 for (; caps->matches; caps++)
....@@ -1670,6 +2743,14 @@
16702743 smp_processor_id(), caps->desc);
16712744 cpu_die_early();
16722745 }
2746
+}
2747
+
2748
+static void verify_local_elf_hwcaps(void)
2749
+{
2750
+ __verify_local_elf_hwcaps(arm64_elf_hwcaps);
2751
+
2752
+ if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
2753
+ __verify_local_elf_hwcaps(compat_elf_hwcaps);
16732754 }
16742755
16752756 static void verify_sve_features(void)
....@@ -1681,7 +2762,7 @@
16812762 unsigned int len = zcr & ZCR_ELx_LEN_MASK;
16822763
16832764 if (len < safe_len || sve_verify_vq_map()) {
1684
- pr_crit("CPU%d: SVE: required vector length(s) missing\n",
2765
+ pr_crit("CPU%d: SVE: vector length support mismatch\n",
16852766 smp_processor_id());
16862767 cpu_die_early();
16872768 }
....@@ -1689,6 +2770,36 @@
16892770 /* Add checks on other ZCR bits here if necessary */
16902771 }
16912772
2773
+static void verify_hyp_capabilities(void)
2774
+{
2775
+ u64 safe_mmfr1, mmfr0, mmfr1;
2776
+ int parange, ipa_max;
2777
+ unsigned int safe_vmid_bits, vmid_bits;
2778
+
2779
+ if (!IS_ENABLED(CONFIG_KVM))
2780
+ return;
2781
+
2782
+ safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2783
+ mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
2784
+ mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
2785
+
2786
+ /* Verify VMID bits */
2787
+ safe_vmid_bits = get_vmid_bits(safe_mmfr1);
2788
+ vmid_bits = get_vmid_bits(mmfr1);
2789
+ if (vmid_bits < safe_vmid_bits) {
2790
+ pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id());
2791
+ cpu_die_early();
2792
+ }
2793
+
2794
+ /* Verify IPA range */
2795
+ parange = cpuid_feature_extract_unsigned_field(mmfr0,
2796
+ ID_AA64MMFR0_PARANGE_SHIFT);
2797
+ ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
2798
+ if (ipa_max < get_kvm_ipa_limit()) {
2799
+ pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
2800
+ cpu_die_early();
2801
+ }
2802
+}
16922803
16932804 /*
16942805 * Run through the enabled system capabilities and enable() it on this CPU.
....@@ -1705,16 +2816,14 @@
17052816 * check_early_cpu_features(), as they need to be verified
17062817 * on all secondary CPUs.
17072818 */
1708
- if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
1709
- cpu_die_early();
1710
-
1711
- verify_local_elf_hwcaps(arm64_elf_hwcaps);
1712
-
1713
- if (system_supports_32bit_el0())
1714
- verify_local_elf_hwcaps(compat_elf_hwcaps);
2819
+ verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
2820
+ verify_local_elf_hwcaps();
17152821
17162822 if (system_supports_sve())
17172823 verify_sve_features();
2824
+
2825
+ if (is_hyp_mode_available())
2826
+ verify_hyp_capabilities();
17182827 }
17192828
17202829 void check_local_cpu_capabilities(void)
....@@ -1731,7 +2840,7 @@
17312840 * Otherwise, this CPU should verify that it has all the system
17322841 * advertised capabilities.
17332842 */
1734
- if (!sys_caps_initialised)
2843
+ if (!system_capabilities_finalized())
17352844 update_cpu_capabilities(SCOPE_LOCAL_CPU);
17362845 else
17372846 verify_local_cpu_capabilities();
....@@ -1745,20 +2854,62 @@
17452854 enable_cpu_capabilities(SCOPE_BOOT_CPU);
17462855 }
17472856
1748
-DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1749
-EXPORT_SYMBOL(arm64_const_caps_ready);
1750
-
1751
-static void __init mark_const_caps_ready(void)
2857
+bool this_cpu_has_cap(unsigned int n)
17522858 {
1753
- static_branch_enable(&arm64_const_caps_ready);
2859
+ if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
2860
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2861
+
2862
+ if (cap)
2863
+ return cap->matches(cap, SCOPE_LOCAL_CPU);
2864
+ }
2865
+
2866
+ return false;
17542867 }
17552868
1756
-extern const struct arm64_cpu_capabilities arm64_errata[];
1757
-
1758
-bool this_cpu_has_cap(unsigned int cap)
2869
+/*
2870
+ * This helper function is used in a narrow window when,
2871
+ * - The system wide safe registers are set with all the SMP CPUs and,
2872
+ * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
2873
+ * In all other cases cpus_have_{const_}cap() should be used.
2874
+ */
2875
+static bool __system_matches_cap(unsigned int n)
17592876 {
1760
- return (__this_cpu_has_cap(arm64_features, cap) ||
1761
- __this_cpu_has_cap(arm64_errata, cap));
2877
+ if (n < ARM64_NCAPS) {
2878
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2879
+
2880
+ if (cap)
2881
+ return cap->matches(cap, SCOPE_SYSTEM);
2882
+ }
2883
+ return false;
2884
+}
2885
+
2886
+void cpu_set_feature(unsigned int num)
2887
+{
2888
+ WARN_ON(num >= MAX_CPU_FEATURES);
2889
+ elf_hwcap |= BIT(num);
2890
+}
2891
+EXPORT_SYMBOL_GPL(cpu_set_feature);
2892
+
2893
+bool cpu_have_feature(unsigned int num)
2894
+{
2895
+ WARN_ON(num >= MAX_CPU_FEATURES);
2896
+ return elf_hwcap & BIT(num);
2897
+}
2898
+EXPORT_SYMBOL_GPL(cpu_have_feature);
2899
+
2900
+unsigned long cpu_get_elf_hwcap(void)
2901
+{
2902
+ /*
2903
+ * We currently only populate the first 32 bits of AT_HWCAP. Please
2904
+ * note that for userspace compatibility we guarantee that bits 62
2905
+ * and 63 will always be returned as 0.
2906
+ */
2907
+ return lower_32_bits(elf_hwcap);
2908
+}
2909
+
2910
+unsigned long cpu_get_elf_hwcap2(void)
2911
+{
2912
+ return upper_32_bits(elf_hwcap);
17622913 }
17632914
17642915 static void __init setup_system_capabilities(void)
....@@ -1778,11 +2929,12 @@
17782929 u32 cwg;
17792930
17802931 setup_system_capabilities();
1781
- mark_const_caps_ready();
17822932 setup_elf_hwcaps(arm64_elf_hwcaps);
17832933
1784
- if (system_supports_32bit_el0())
2934
+ if (system_supports_32bit_el0()) {
17852935 setup_elf_hwcaps(compat_elf_hwcaps);
2936
+ elf_hwcap_fixup();
2937
+ }
17862938
17872939 if (system_uses_ttbr0_pan())
17882940 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
....@@ -1791,7 +2943,7 @@
17912943 minsigstksz_setup();
17922944
17932945 /* Advertise that we have computed the system capabilities */
1794
- set_sys_caps_initialised();
2946
+ finalize_system_capabilities();
17952947
17962948 /*
17972949 * Check for sane CTR_EL0.CWG value.
....@@ -1802,10 +2954,61 @@
18022954 ARCH_DMA_MINALIGN);
18032955 }
18042956
2957
+static int enable_mismatched_32bit_el0(unsigned int cpu)
2958
+{
2959
+ static int lucky_winner = -1;
2960
+
2961
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
2962
+ bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
2963
+
2964
+ if (cpu_32bit) {
2965
+ cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
2966
+ static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
2967
+ }
2968
+
2969
+ if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
2970
+ return 0;
2971
+
2972
+ if (lucky_winner >= 0)
2973
+ return 0;
2974
+
2975
+ /*
2976
+ * We've detected a mismatch. We need to keep one of our CPUs with
2977
+ * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting
2978
+ * every CPU in the system for a 32-bit task.
2979
+ */
2980
+ lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
2981
+ cpu_active_mask);
2982
+ get_cpu_device(lucky_winner)->offline_disabled = true;
2983
+ setup_elf_hwcaps(compat_elf_hwcaps);
2984
+ pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
2985
+ cpu, lucky_winner);
2986
+ return 0;
2987
+}
2988
+
2989
+static int __init init_32bit_el0_mask(void)
2990
+{
2991
+ if (!allow_mismatched_32bit_el0)
2992
+ return 0;
2993
+
2994
+ if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
2995
+ return -ENOMEM;
2996
+
2997
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
2998
+ "arm64/mismatched_32bit_el0:online",
2999
+ enable_mismatched_32bit_el0, NULL);
3000
+}
3001
+subsys_initcall_sync(init_32bit_el0_mask);
3002
+
18053003 static bool __maybe_unused
18063004 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
18073005 {
1808
- return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
3006
+ return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
3007
+}
3008
+
3009
+static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
3010
+{
3011
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
18093012 }
18103013
18113014 /*
....@@ -1857,7 +3060,7 @@
18573060 if (sys_reg_CRm(id) == 0)
18583061 return emulate_id_reg(id, valp);
18593062
1860
- regp = get_arm64_ftr_reg(id);
3063
+ regp = get_arm64_ftr_reg_nowarn(id);
18613064 if (regp)
18623065 *valp = arm64_ftr_reg_user_value(regp);
18633066 else
....@@ -1869,25 +3072,30 @@
18693072 return 0;
18703073 }
18713074
1872
-static int emulate_mrs(struct pt_regs *regs, u32 insn)
3075
+int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
18733076 {
18743077 int rc;
1875
- u32 sys_reg, dst;
18763078 u64 val;
3079
+
3080
+ rc = emulate_sys_reg(sys_reg, &val);
3081
+ if (!rc) {
3082
+ pt_regs_write_reg(regs, rt, val);
3083
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
3084
+ }
3085
+ return rc;
3086
+}
3087
+
3088
+static int emulate_mrs(struct pt_regs *regs, u32 insn)
3089
+{
3090
+ u32 sys_reg, rt;
18773091
18783092 /*
18793093 * sys_reg values are defined as used in mrs/msr instruction.
18803094 * shift the imm value to get the encoding.
18813095 */
18823096 sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
1883
- rc = emulate_sys_reg(sys_reg, &val);
1884
- if (!rc) {
1885
- dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
1886
- pt_regs_write_reg(regs, dst, val);
1887
- arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1888
- }
1889
-
1890
- return rc;
3097
+ rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
3098
+ return do_emulate_mrs(regs, sys_reg, rt);
18913099 }
18923100
18933101 static struct undef_hook mrs_hook = {
....@@ -1906,20 +3114,28 @@
19063114
19073115 core_initcall(enable_mrs_emulation);
19083116
1909
-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
3117
+enum mitigation_state arm64_get_meltdown_state(void)
19103118 {
1911
- /* Firmware may have left a deferred SError in this register. */
1912
- write_sysreg_s(0, SYS_DISR_EL1);
3119
+ if (__meltdown_safe)
3120
+ return SPECTRE_UNAFFECTED;
3121
+
3122
+ if (arm64_kernel_unmapped_at_el0())
3123
+ return SPECTRE_MITIGATED;
3124
+
3125
+ return SPECTRE_VULNERABLE;
19133126 }
19143127
19153128 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
19163129 char *buf)
19173130 {
1918
- if (__meltdown_safe)
3131
+ switch (arm64_get_meltdown_state()) {
3132
+ case SPECTRE_UNAFFECTED:
19193133 return sprintf(buf, "Not affected\n");
19203134
1921
- if (arm64_kernel_unmapped_at_el0())
3135
+ case SPECTRE_MITIGATED:
19223136 return sprintf(buf, "Mitigation: PTI\n");
19233137
1924
- return sprintf(buf, "Vulnerable\n");
3138
+ default:
3139
+ return sprintf(buf, "Vulnerable\n");
3140
+ }
19253141 }