.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Contains CPU feature definitions |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2015 ARM Ltd. |
---|
5 | 6 | * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
| 7 | + * A note for the weary kernel hacker: the code here is confusing and hard to |
---|
| 8 | + * follow! That's partly because it's solving a nasty problem, but also because |
---|
| 9 | + * there's a little bit of over-abstraction that tends to obscure what's going |
---|
| 10 | + * on behind a maze of helper functions and macros. |
---|
9 | 11 | * |
---|
10 | | - * This program is distributed in the hope that it will be useful, |
---|
11 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
12 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
13 | | - * GNU General Public License for more details. |
---|
| 12 | + * The basic problem is that hardware folks have started gluing together CPUs |
---|
| 13 | + * with distinct architectural features; in some cases even creating SoCs where |
---|
| 14 | + * user-visible instructions are available only on a subset of the available |
---|
| 15 | + * cores. We try to address this by snapshotting the feature registers of the |
---|
| 16 | + * boot CPU and comparing these with the feature registers of each secondary |
---|
| 17 | + * CPU when bringing them up. If there is a mismatch, then we update the |
---|
| 18 | + * snapshot state to indicate the lowest-common denominator of the feature, |
---|
| 19 | + * known as the "safe" value. This snapshot state can be queried to view the |
---|
| 20 | + * "sanitised" value of a feature register. |
---|
14 | 21 | * |
---|
15 | | - * You should have received a copy of the GNU General Public License |
---|
16 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
| 22 | + * The sanitised register values are used to decide which capabilities we |
---|
| 23 | + * have in the system. These may be in the form of traditional "hwcaps" |
---|
| 24 | + * advertised to userspace or internal "cpucaps" which are used to configure |
---|
| 25 | + * things like alternative patching and static keys. While a feature mismatch |
---|
| 26 | + * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch |
---|
| 27 | + * may prevent a CPU from being onlined at all. |
---|
| 28 | + * |
---|
| 29 | + * Some implementation details worth remembering: |
---|
| 30 | + * |
---|
| 31 | + * - Mismatched features are *always* sanitised to a "safe" value, which |
---|
| 32 | + * usually indicates that the feature is not supported. |
---|
| 33 | + * |
---|
| 34 | + * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK" |
---|
| 35 | + * warning when onlining an offending CPU and the kernel will be tainted |
---|
| 36 | + * with TAINT_CPU_OUT_OF_SPEC. |
---|
| 37 | + * |
---|
| 38 | + * - Features marked as FTR_VISIBLE have their sanitised value visible to |
---|
| 39 | + * userspace. FTR_VISIBLE features in registers that are only visible |
---|
| 40 | + * to EL0 by trapping *must* have a corresponding HWCAP so that late |
---|
| 41 | + * onlining of CPUs cannot lead to features disappearing at runtime. |
---|
| 42 | + * |
---|
| 43 | + * - A "feature" is typically a 4-bit register field. A "capability" is the |
---|
| 44 | + * high-level description derived from the sanitised field value. |
---|
| 45 | + * |
---|
| 46 | + * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID |
---|
| 47 | + * scheme for fields in ID registers") to understand when feature fields |
---|
| 48 | + * may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly). |
---|
| 49 | + * |
---|
| 50 | + * - KVM exposes its own view of the feature registers to guest operating |
---|
| 51 | + * systems regardless of FTR_VISIBLE. This is typically driven from the |
---|
| 52 | + * sanitised register values to allow virtual CPUs to be migrated between |
---|
| 53 | + * arbitrary physical CPUs, but some features not present on the host are |
---|
| 54 | + * also advertised and emulated. Look at sys_reg_descs[] for the gory |
---|
| 55 | + * details. |
---|
| 56 | + * |
---|
| 57 | + * - If the arm64_ftr_bits[] for a register has a missing field, then this |
---|
| 58 | + * field is treated as STRICT RES0, including for read_sanitised_ftr_reg(). |
---|
| 59 | + * This is stronger than FTR_HIDDEN and can be used to hide features from |
---|
| 60 | + * KVM guests. |
---|
17 | 61 | */ |
---|
18 | 62 | |
---|
19 | 63 | #define pr_fmt(fmt) "CPU features: " fmt |
---|
20 | 64 | |
---|
21 | 65 | #include <linux/bsearch.h> |
---|
22 | 66 | #include <linux/cpumask.h> |
---|
| 67 | +#include <linux/crash_dump.h> |
---|
| 68 | +#include <linux/percpu.h> |
---|
23 | 69 | #include <linux/sort.h> |
---|
24 | 70 | #include <linux/stop_machine.h> |
---|
| 71 | +#include <linux/sysfs.h> |
---|
25 | 72 | #include <linux/types.h> |
---|
26 | 73 | #include <linux/mm.h> |
---|
27 | 74 | #include <linux/cpu.h> |
---|
| 75 | +#include <linux/kasan.h> |
---|
| 76 | + |
---|
28 | 77 | #include <asm/cpu.h> |
---|
29 | 78 | #include <asm/cpufeature.h> |
---|
30 | 79 | #include <asm/cpu_ops.h> |
---|
31 | 80 | #include <asm/fpsimd.h> |
---|
| 81 | +#include <asm/kvm_host.h> |
---|
| 82 | +#include <asm/hwcap.h> |
---|
32 | 83 | #include <asm/mmu_context.h> |
---|
| 84 | +#include <asm/mte.h> |
---|
33 | 85 | #include <asm/processor.h> |
---|
34 | 86 | #include <asm/sysreg.h> |
---|
35 | 87 | #include <asm/traps.h> |
---|
| 88 | +#include <asm/vectors.h> |
---|
36 | 89 | #include <asm/virt.h> |
---|
37 | 90 | |
---|
38 | | -unsigned long elf_hwcap __read_mostly; |
---|
39 | | -EXPORT_SYMBOL_GPL(elf_hwcap); |
---|
| 91 | +/* Kernel representation of AT_HWCAP and AT_HWCAP2 */ |
---|
| 92 | +static unsigned long elf_hwcap __read_mostly; |
---|
40 | 93 | |
---|
41 | 94 | #ifdef CONFIG_COMPAT |
---|
42 | 95 | #define COMPAT_ELF_HWCAP_DEFAULT \ |
---|
.. | .. |
---|
50 | 103 | |
---|
51 | 104 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
---|
52 | 105 | EXPORT_SYMBOL(cpu_hwcaps); |
---|
| 106 | +static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; |
---|
| 107 | + |
---|
| 108 | +/* Need also bit for ARM64_CB_PATCH */ |
---|
| 109 | +DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); |
---|
| 110 | + |
---|
| 111 | +bool arm64_use_ng_mappings = false; |
---|
| 112 | +EXPORT_SYMBOL(arm64_use_ng_mappings); |
---|
| 113 | + |
---|
| 114 | +DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; |
---|
| 115 | + |
---|
| 116 | +/* |
---|
| 117 | + * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs |
---|
| 118 | + * support it? |
---|
| 119 | + */ |
---|
| 120 | +static bool __read_mostly allow_mismatched_32bit_el0; |
---|
| 121 | + |
---|
| 122 | +/* |
---|
| 123 | + * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have |
---|
| 124 | + * seen at least one CPU capable of 32-bit EL0. |
---|
| 125 | + */ |
---|
| 126 | +DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0); |
---|
| 127 | + |
---|
| 128 | +/* |
---|
| 129 | + * Mask of CPUs supporting 32-bit EL0. |
---|
| 130 | + * Only valid if arm64_mismatched_32bit_el0 is enabled. |
---|
| 131 | + */ |
---|
| 132 | +static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly; |
---|
53 | 133 | |
---|
54 | 134 | /* |
---|
55 | 135 | * Flag to indicate if we have computed the system wide |
---|
.. | .. |
---|
57 | 137 | * will be used to determine if a new booting CPU should |
---|
58 | 138 | * go through the verification process to make sure that it |
---|
59 | 139 | * supports the system capabilities, without using a hotplug |
---|
60 | | - * notifier. |
---|
| 140 | + * notifier. This is also used to decide if we could use |
---|
| 141 | + * the fast path for checking constant CPU caps. |
---|
61 | 142 | */ |
---|
62 | | -static bool sys_caps_initialised; |
---|
63 | | - |
---|
64 | | -static inline void set_sys_caps_initialised(void) |
---|
| 143 | +DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); |
---|
| 144 | +EXPORT_SYMBOL(arm64_const_caps_ready); |
---|
| 145 | +static inline void finalize_system_capabilities(void) |
---|
65 | 146 | { |
---|
66 | | - sys_caps_initialised = true; |
---|
| 147 | + static_branch_enable(&arm64_const_caps_ready); |
---|
67 | 148 | } |
---|
68 | 149 | |
---|
69 | | -static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p) |
---|
| 150 | +void dump_cpu_features(void) |
---|
70 | 151 | { |
---|
71 | 152 | /* file-wide pr_fmt adds "CPU features: " prefix */ |
---|
72 | 153 | pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); |
---|
73 | | - return 0; |
---|
74 | 154 | } |
---|
75 | | - |
---|
76 | | -static struct notifier_block cpu_hwcaps_notifier = { |
---|
77 | | - .notifier_call = dump_cpu_hwcaps |
---|
78 | | -}; |
---|
79 | | - |
---|
80 | | -static int __init register_cpu_hwcaps_dumper(void) |
---|
81 | | -{ |
---|
82 | | - atomic_notifier_chain_register(&panic_notifier_list, |
---|
83 | | - &cpu_hwcaps_notifier); |
---|
84 | | - return 0; |
---|
85 | | -} |
---|
86 | | -__initcall(register_cpu_hwcaps_dumper); |
---|
87 | 155 | |
---|
88 | 156 | DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); |
---|
89 | 157 | EXPORT_SYMBOL(cpu_hwcap_keys); |
---|
.. | .. |
---|
116 | 184 | static bool __maybe_unused |
---|
117 | 185 | cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused); |
---|
118 | 186 | |
---|
| 187 | +static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); |
---|
| 188 | + |
---|
| 189 | +static bool __system_matches_cap(unsigned int n); |
---|
119 | 190 | |
---|
120 | 191 | /* |
---|
121 | 192 | * NOTE: Any changes to the visibility of features should be kept in |
---|
122 | 193 | * sync with the documentation of the CPU feature register ABI. |
---|
123 | 194 | */ |
---|
124 | 195 | static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { |
---|
| 196 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0), |
---|
| 197 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TLB_SHIFT, 4, 0), |
---|
125 | 198 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), |
---|
126 | 199 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), |
---|
127 | 200 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), |
---|
.. | .. |
---|
138 | 211 | }; |
---|
139 | 212 | |
---|
140 | 213 | static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { |
---|
| 214 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0), |
---|
| 215 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0), |
---|
| 216 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0), |
---|
| 217 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0), |
---|
| 218 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), |
---|
| 219 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0), |
---|
| 220 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
---|
| 221 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0), |
---|
| 222 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
---|
| 223 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0), |
---|
141 | 224 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), |
---|
142 | 225 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), |
---|
143 | 226 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), |
---|
| 227 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
---|
| 228 | + FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0), |
---|
| 229 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), |
---|
| 230 | + FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0), |
---|
144 | 231 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0), |
---|
| 232 | + ARM64_FTR_END, |
---|
| 233 | +}; |
---|
| 234 | + |
---|
| 235 | +static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { |
---|
| 236 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0), |
---|
| 237 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), |
---|
145 | 238 | ARM64_FTR_END, |
---|
146 | 239 | }; |
---|
147 | 240 | |
---|
.. | .. |
---|
149 | 242 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), |
---|
150 | 243 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), |
---|
151 | 244 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), |
---|
| 245 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_AMU_SHIFT, 4, 0), |
---|
| 246 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_MPAM_SHIFT, 4, 0), |
---|
| 247 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SEL2_SHIFT, 4, 0), |
---|
152 | 248 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
153 | 249 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), |
---|
154 | 250 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), |
---|
.. | .. |
---|
163 | 259 | }; |
---|
164 | 260 | |
---|
165 | 261 | static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { |
---|
166 | | - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), |
---|
| 262 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0), |
---|
| 263 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0), |
---|
| 264 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE), |
---|
| 265 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI), |
---|
| 266 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI), |
---|
| 267 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI), |
---|
| 268 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0), |
---|
| 269 | + ARM64_FTR_END, |
---|
| 270 | +}; |
---|
| 271 | + |
---|
| 272 | +static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { |
---|
| 273 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 274 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0), |
---|
| 275 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 276 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0), |
---|
| 277 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 278 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0), |
---|
| 279 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 280 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), |
---|
| 281 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 282 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), |
---|
| 283 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 284 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0), |
---|
| 285 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 286 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), |
---|
| 287 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 288 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), |
---|
| 289 | + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
---|
| 290 | + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), |
---|
167 | 291 | ARM64_FTR_END, |
---|
168 | 292 | }; |
---|
169 | 293 | |
---|
170 | 294 | static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { |
---|
| 295 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0), |
---|
| 296 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0), |
---|
| 297 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0), |
---|
| 298 | + /* |
---|
| 299 | + * Page size not being supported at Stage-2 is not fatal. You |
---|
| 300 | + * just give up KVM if PAGE_SIZE isn't supported there. Go fix |
---|
| 301 | + * your favourite nesting hypervisor. |
---|
| 302 | + * |
---|
| 303 | + * There is a small corner case where the hypervisor explicitly |
---|
| 304 | + * advertises a given granule size at Stage-2 (value 2) on some |
---|
| 305 | + * vCPUs, and uses the fallback to Stage-1 (value 0) for other |
---|
| 306 | + * vCPUs. Although this is not forbidden by the architecture, it |
---|
| 307 | + * indicates that the hypervisor is being silly (or buggy). |
---|
| 308 | + * |
---|
| 309 | + * We make no effort to cope with this and pretend that if these |
---|
| 310 | + * fields are inconsistent across vCPUs, then it isn't worth |
---|
| 311 | + * trying to bring KVM up. |
---|
| 312 | + */ |
---|
| 313 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1), |
---|
| 314 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1), |
---|
| 315 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1), |
---|
171 | 316 | /* |
---|
172 | 317 | * We already refuse to boot CPUs that don't support our configured |
---|
173 | 318 | * page size, so we can only detect mismatches for a page size other |
---|
.. | .. |
---|
193 | 338 | }; |
---|
194 | 339 | |
---|
195 | 340 | static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { |
---|
| 341 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), |
---|
| 342 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), |
---|
| 343 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), |
---|
| 344 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), |
---|
| 345 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0), |
---|
196 | 346 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), |
---|
197 | 347 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), |
---|
198 | 348 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), |
---|
.. | .. |
---|
203 | 353 | }; |
---|
204 | 354 | |
---|
205 | 355 | static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { |
---|
| 356 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), |
---|
| 357 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0), |
---|
| 358 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0), |
---|
| 359 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0), |
---|
206 | 360 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), |
---|
| 361 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0), |
---|
207 | 362 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), |
---|
| 363 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0), |
---|
| 364 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0), |
---|
| 365 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0), |
---|
208 | 366 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), |
---|
209 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), |
---|
| 367 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), |
---|
210 | 368 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), |
---|
211 | 369 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0), |
---|
212 | 370 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0), |
---|
.. | .. |
---|
225 | 383 | * make use of *minLine. |
---|
226 | 384 | * If we have differing I-cache policies, report it as the weakest - VIPT. |
---|
227 | 385 | */ |
---|
228 | | - ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */ |
---|
| 386 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT), /* L1Ip */ |
---|
229 | 387 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0), |
---|
230 | 388 | ARM64_FTR_END, |
---|
231 | 389 | }; |
---|
232 | 390 | |
---|
| 391 | +static struct arm64_ftr_override __ro_after_init no_override = { }; |
---|
| 392 | + |
---|
233 | 393 | struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { |
---|
234 | 394 | .name = "SYS_CTR_EL0", |
---|
235 | | - .ftr_bits = ftr_ctr |
---|
| 395 | + .ftr_bits = ftr_ctr, |
---|
| 396 | + .override = &no_override, |
---|
236 | 397 | }; |
---|
237 | 398 | |
---|
238 | 399 | static const struct arm64_ftr_bits ftr_id_mmfr0[] = { |
---|
239 | | - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */ |
---|
240 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */ |
---|
241 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ |
---|
242 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */ |
---|
243 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */ |
---|
244 | | - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */ |
---|
245 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */ |
---|
246 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */ |
---|
| 400 | + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf), |
---|
| 401 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0), |
---|
| 402 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0), |
---|
| 403 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0), |
---|
| 404 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0), |
---|
| 405 | + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf), |
---|
| 406 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0), |
---|
| 407 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0), |
---|
247 | 408 | ARM64_FTR_END, |
---|
248 | 409 | }; |
---|
249 | 410 | |
---|
250 | 411 | static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { |
---|
251 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0), |
---|
| 412 | + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0), |
---|
252 | 413 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0), |
---|
253 | 414 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), |
---|
254 | 415 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), |
---|
.. | .. |
---|
263 | 424 | }; |
---|
264 | 425 | |
---|
265 | 426 | static const struct arm64_ftr_bits ftr_mvfr2[] = { |
---|
266 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */ |
---|
267 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */ |
---|
| 427 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0), |
---|
| 428 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0), |
---|
268 | 429 | ARM64_FTR_END, |
---|
269 | 430 | }; |
---|
270 | 431 | |
---|
271 | 432 | static const struct arm64_ftr_bits ftr_dczid[] = { |
---|
272 | | - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ |
---|
273 | | - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ |
---|
| 433 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_DZP_SHIFT, 1, 1), |
---|
| 434 | + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0), |
---|
274 | 435 | ARM64_FTR_END, |
---|
275 | 436 | }; |
---|
276 | 437 | |
---|
| 438 | +static const struct arm64_ftr_bits ftr_id_isar0[] = { |
---|
| 439 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0), |
---|
| 440 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0), |
---|
| 441 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_COPROC_SHIFT, 4, 0), |
---|
| 442 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_CMPBRANCH_SHIFT, 4, 0), |
---|
| 443 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITFIELD_SHIFT, 4, 0), |
---|
| 444 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_BITCOUNT_SHIFT, 4, 0), |
---|
| 445 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_SWAP_SHIFT, 4, 0), |
---|
| 446 | + ARM64_FTR_END, |
---|
| 447 | +}; |
---|
277 | 448 | |
---|
278 | 449 | static const struct arm64_ftr_bits ftr_id_isar5[] = { |
---|
279 | 450 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0), |
---|
.. | .. |
---|
286 | 457 | }; |
---|
287 | 458 | |
---|
288 | 459 | static const struct arm64_ftr_bits ftr_id_mmfr4[] = { |
---|
289 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */ |
---|
| 460 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EVT_SHIFT, 4, 0), |
---|
| 461 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CCIDX_SHIFT, 4, 0), |
---|
| 462 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_LSM_SHIFT, 4, 0), |
---|
| 463 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0), |
---|
| 464 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0), |
---|
| 465 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0), |
---|
| 466 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0), |
---|
| 467 | + |
---|
| 468 | + /* |
---|
| 469 | + * SpecSEI = 1 indicates that the PE might generate an SError on an |
---|
| 470 | + * external abort on speculative read. It is safe to assume that an |
---|
| 471 | + * SError might be generated than it will not be. Hence it has been |
---|
| 472 | + * classified as FTR_HIGHER_SAFE. |
---|
| 473 | + */ |
---|
| 474 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_SPECSEI_SHIFT, 4, 0), |
---|
| 475 | + ARM64_FTR_END, |
---|
| 476 | +}; |
---|
| 477 | + |
---|
| 478 | +static const struct arm64_ftr_bits ftr_id_isar4[] = { |
---|
| 479 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SWP_FRAC_SHIFT, 4, 0), |
---|
| 480 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_PSR_M_SHIFT, 4, 0), |
---|
| 481 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT, 4, 0), |
---|
| 482 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_BARRIER_SHIFT, 4, 0), |
---|
| 483 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_SMC_SHIFT, 4, 0), |
---|
| 484 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WRITEBACK_SHIFT, 4, 0), |
---|
| 485 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_WITHSHIFTS_SHIFT, 4, 0), |
---|
| 486 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_UNPRIV_SHIFT, 4, 0), |
---|
| 487 | + ARM64_FTR_END, |
---|
| 488 | +}; |
---|
| 489 | + |
---|
| 490 | +static const struct arm64_ftr_bits ftr_id_mmfr5[] = { |
---|
| 491 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_ETS_SHIFT, 4, 0), |
---|
| 492 | + ARM64_FTR_END, |
---|
| 493 | +}; |
---|
| 494 | + |
---|
| 495 | +static const struct arm64_ftr_bits ftr_id_isar6[] = { |
---|
| 496 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0), |
---|
| 497 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0), |
---|
| 498 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0), |
---|
| 499 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0), |
---|
| 500 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0), |
---|
| 501 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0), |
---|
| 502 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0), |
---|
290 | 503 | ARM64_FTR_END, |
---|
291 | 504 | }; |
---|
292 | 505 | |
---|
293 | 506 | static const struct arm64_ftr_bits ftr_id_pfr0[] = { |
---|
294 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */ |
---|
295 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */ |
---|
296 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */ |
---|
297 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */ |
---|
| 507 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0), |
---|
| 508 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0), |
---|
| 509 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0), |
---|
| 510 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0), |
---|
| 511 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0), |
---|
| 512 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0), |
---|
| 513 | + ARM64_FTR_END, |
---|
| 514 | +}; |
---|
| 515 | + |
---|
| 516 | +static const struct arm64_ftr_bits ftr_id_pfr1[] = { |
---|
| 517 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GIC_SHIFT, 4, 0), |
---|
| 518 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRT_FRAC_SHIFT, 4, 0), |
---|
| 519 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SEC_FRAC_SHIFT, 4, 0), |
---|
| 520 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_GENTIMER_SHIFT, 4, 0), |
---|
| 521 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_VIRTUALIZATION_SHIFT, 4, 0), |
---|
| 522 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_MPROGMOD_SHIFT, 4, 0), |
---|
| 523 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_SECURITY_SHIFT, 4, 0), |
---|
| 524 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_PROGMOD_SHIFT, 4, 0), |
---|
| 525 | + ARM64_FTR_END, |
---|
| 526 | +}; |
---|
| 527 | + |
---|
| 528 | +static const struct arm64_ftr_bits ftr_id_pfr2[] = { |
---|
| 529 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_SSBS_SHIFT, 4, 0), |
---|
| 530 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_CSV3_SHIFT, 4, 0), |
---|
298 | 531 | ARM64_FTR_END, |
---|
299 | 532 | }; |
---|
300 | 533 | |
---|
301 | 534 | static const struct arm64_ftr_bits ftr_id_dfr0[] = { |
---|
302 | 535 | /* [31:28] TraceFilt */ |
---|
303 | | - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ |
---|
304 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), |
---|
305 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), |
---|
306 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), |
---|
307 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), |
---|
308 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), |
---|
309 | | - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), |
---|
| 536 | + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_PERFMON_SHIFT, 4, 0), |
---|
| 537 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0), |
---|
| 538 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0), |
---|
| 539 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0), |
---|
| 540 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0), |
---|
| 541 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0), |
---|
| 542 | + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0), |
---|
| 543 | + ARM64_FTR_END, |
---|
| 544 | +}; |
---|
| 545 | + |
---|
| 546 | +static const struct arm64_ftr_bits ftr_id_dfr1[] = { |
---|
| 547 | + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_MTPMU_SHIFT, 4, 0), |
---|
310 | 548 | ARM64_FTR_END, |
---|
311 | 549 | }; |
---|
312 | 550 | |
---|
.. | .. |
---|
320 | 558 | * Common ftr bits for a 32bit register with all hidden, strict |
---|
321 | 559 | * attributes, with 4bit feature fields and a default safe value of |
---|
322 | 560 | * 0. Covers the following 32bit registers: |
---|
323 | | - * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] |
---|
| 561 | + * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] |
---|
324 | 562 | */ |
---|
325 | 563 | static const struct arm64_ftr_bits ftr_generic_32bits[] = { |
---|
326 | 564 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), |
---|
.. | .. |
---|
344 | 582 | ARM64_FTR_END, |
---|
345 | 583 | }; |
---|
346 | 584 | |
---|
347 | | -#define ARM64_FTR_REG(id, table) { \ |
---|
348 | | - .sys_id = id, \ |
---|
349 | | - .reg = &(struct arm64_ftr_reg){ \ |
---|
350 | | - .name = #id, \ |
---|
351 | | - .ftr_bits = &((table)[0]), \ |
---|
| 585 | +#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \ |
---|
| 586 | + .sys_id = id, \ |
---|
| 587 | + .reg = &(struct arm64_ftr_reg){ \ |
---|
| 588 | + .name = id_str, \ |
---|
| 589 | + .override = (ovr), \ |
---|
| 590 | + .ftr_bits = &((table)[0]), \ |
---|
352 | 591 | }} |
---|
| 592 | + |
---|
| 593 | +#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \ |
---|
| 594 | + __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr) |
---|
| 595 | + |
---|
| 596 | +#define ARM64_FTR_REG(id, table) \ |
---|
| 597 | + __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override) |
---|
| 598 | + |
---|
| 599 | +struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override; |
---|
| 600 | +struct arm64_ftr_override __ro_after_init id_aa64pfr1_override; |
---|
| 601 | +struct arm64_ftr_override __ro_after_init id_aa64isar1_override; |
---|
353 | 602 | |
---|
354 | 603 | static const struct __ftr_reg_entry { |
---|
355 | 604 | u32 sys_id; |
---|
.. | .. |
---|
358 | 607 | |
---|
359 | 608 | /* Op1 = 0, CRn = 0, CRm = 1 */ |
---|
360 | 609 | ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), |
---|
361 | | - ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits), |
---|
| 610 | + ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1), |
---|
362 | 611 | ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0), |
---|
363 | 612 | ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), |
---|
364 | 613 | ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), |
---|
.. | .. |
---|
366 | 615 | ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), |
---|
367 | 616 | |
---|
368 | 617 | /* Op1 = 0, CRn = 0, CRm = 2 */ |
---|
369 | | - ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits), |
---|
| 618 | + ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0), |
---|
370 | 619 | ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), |
---|
371 | 620 | ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), |
---|
372 | 621 | ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), |
---|
373 | | - ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits), |
---|
| 622 | + ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4), |
---|
374 | 623 | ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), |
---|
375 | 624 | ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), |
---|
| 625 | + ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6), |
---|
376 | 626 | |
---|
377 | 627 | /* Op1 = 0, CRn = 0, CRm = 3 */ |
---|
378 | 628 | ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), |
---|
379 | 629 | ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), |
---|
380 | 630 | ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), |
---|
| 631 | + ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2), |
---|
| 632 | + ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1), |
---|
| 633 | + ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5), |
---|
381 | 634 | |
---|
382 | 635 | /* Op1 = 0, CRn = 0, CRm = 4 */ |
---|
383 | 636 | ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), |
---|
384 | | - ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1), |
---|
385 | | - ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz), |
---|
| 637 | + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1, |
---|
| 638 | + &id_aa64pfr1_override), |
---|
| 639 | + ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0), |
---|
386 | 640 | |
---|
387 | 641 | /* Op1 = 0, CRn = 0, CRm = 5 */ |
---|
388 | 642 | ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), |
---|
.. | .. |
---|
390 | 644 | |
---|
391 | 645 | /* Op1 = 0, CRn = 0, CRm = 6 */ |
---|
392 | 646 | ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), |
---|
393 | | - ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1), |
---|
| 647 | + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1, |
---|
| 648 | + &id_aa64isar1_override), |
---|
| 649 | + ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2), |
---|
394 | 650 | |
---|
395 | 651 | /* Op1 = 0, CRn = 0, CRm = 7 */ |
---|
396 | 652 | ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), |
---|
397 | | - ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), |
---|
| 653 | + ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1, |
---|
| 654 | + &id_aa64mmfr1_override), |
---|
398 | 655 | ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), |
---|
399 | 656 | |
---|
400 | 657 | /* Op1 = 0, CRn = 1, CRm = 2 */ |
---|
.. | .. |
---|
414 | 671 | } |
---|
415 | 672 | |
---|
416 | 673 | /* |
---|
417 | | - * get_arm64_ftr_reg - Lookup a feature register entry using its |
---|
418 | | - * sys_reg() encoding. With the array arm64_ftr_regs sorted in the |
---|
419 | | - * ascending order of sys_id , we use binary search to find a matching |
---|
| 674 | + * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using |
---|
| 675 | + * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the |
---|
| 676 | + * ascending order of sys_id, we use binary search to find a matching |
---|
420 | 677 | * entry. |
---|
421 | 678 | * |
---|
422 | 679 | * returns - Upon success, matching ftr_reg entry for id. |
---|
423 | 680 | * - NULL on failure. It is upto the caller to decide |
---|
424 | 681 | * the impact of a failure. |
---|
425 | 682 | */ |
---|
426 | | -static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) |
---|
| 683 | +static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id) |
---|
427 | 684 | { |
---|
428 | 685 | const struct __ftr_reg_entry *ret; |
---|
429 | 686 | |
---|
.. | .. |
---|
435 | 692 | if (ret) |
---|
436 | 693 | return ret->reg; |
---|
437 | 694 | return NULL; |
---|
| 695 | +} |
---|
| 696 | + |
---|
| 697 | +/* |
---|
| 698 | + * get_arm64_ftr_reg - Looks up a feature register entry using |
---|
| 699 | + * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn(). |
---|
| 700 | + * |
---|
| 701 | + * returns - Upon success, matching ftr_reg entry for id. |
---|
| 702 | + * - NULL on failure but with an WARN_ON(). |
---|
| 703 | + */ |
---|
| 704 | +static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) |
---|
| 705 | +{ |
---|
| 706 | + struct arm64_ftr_reg *reg; |
---|
| 707 | + |
---|
| 708 | + reg = get_arm64_ftr_reg_nowarn(sys_id); |
---|
| 709 | + |
---|
| 710 | + /* |
---|
| 711 | + * Requesting a non-existent register search is an error. Warn |
---|
| 712 | + * and let the caller handle it. |
---|
| 713 | + */ |
---|
| 714 | + WARN_ON(!reg); |
---|
| 715 | + return reg; |
---|
438 | 716 | } |
---|
439 | 717 | |
---|
440 | 718 | static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg, |
---|
.. | .. |
---|
462 | 740 | case FTR_HIGHER_OR_ZERO_SAFE: |
---|
463 | 741 | if (!cur || !new) |
---|
464 | 742 | break; |
---|
465 | | - /* Fallthrough */ |
---|
| 743 | + fallthrough; |
---|
466 | 744 | case FTR_HIGHER_SAFE: |
---|
467 | 745 | ret = new > cur ? new : cur; |
---|
468 | 746 | break; |
---|
.. | .. |
---|
475 | 753 | |
---|
476 | 754 | static void __init sort_ftr_regs(void) |
---|
477 | 755 | { |
---|
478 | | - int i; |
---|
| 756 | + unsigned int i; |
---|
479 | 757 | |
---|
480 | | - /* Check that the array is sorted so that we can do the binary search */ |
---|
481 | | - for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++) |
---|
| 758 | + for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) { |
---|
| 759 | + const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg; |
---|
| 760 | + const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits; |
---|
| 761 | + unsigned int j = 0; |
---|
| 762 | + |
---|
| 763 | + /* |
---|
| 764 | + * Features here must be sorted in descending order with respect |
---|
| 765 | + * to their shift values and should not overlap with each other. |
---|
| 766 | + */ |
---|
| 767 | + for (; ftr_bits->width != 0; ftr_bits++, j++) { |
---|
| 768 | + unsigned int width = ftr_reg->ftr_bits[j].width; |
---|
| 769 | + unsigned int shift = ftr_reg->ftr_bits[j].shift; |
---|
| 770 | + unsigned int prev_shift; |
---|
| 771 | + |
---|
| 772 | + WARN((shift + width) > 64, |
---|
| 773 | + "%s has invalid feature at shift %d\n", |
---|
| 774 | + ftr_reg->name, shift); |
---|
| 775 | + |
---|
| 776 | + /* |
---|
| 777 | + * Skip the first feature. There is nothing to |
---|
| 778 | + * compare against for now. |
---|
| 779 | + */ |
---|
| 780 | + if (j == 0) |
---|
| 781 | + continue; |
---|
| 782 | + |
---|
| 783 | + prev_shift = ftr_reg->ftr_bits[j - 1].shift; |
---|
| 784 | + WARN((shift + width) > prev_shift, |
---|
| 785 | + "%s has feature overlap at shift %d\n", |
---|
| 786 | + ftr_reg->name, shift); |
---|
| 787 | + } |
---|
| 788 | + |
---|
| 789 | + /* |
---|
| 790 | + * Skip the first register. There is nothing to |
---|
| 791 | + * compare against for now. |
---|
| 792 | + */ |
---|
| 793 | + if (i == 0) |
---|
| 794 | + continue; |
---|
| 795 | + /* |
---|
| 796 | + * Registers here must be sorted in ascending order with respect |
---|
| 797 | + * to sys_id for subsequent binary search in get_arm64_ftr_reg() |
---|
| 798 | + * to work correctly. |
---|
| 799 | + */ |
---|
482 | 800 | BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); |
---|
| 801 | + } |
---|
483 | 802 | } |
---|
484 | 803 | |
---|
485 | 804 | /* |
---|
.. | .. |
---|
488 | 807 | * Any bits that are not covered by an arm64_ftr_bits entry are considered |
---|
489 | 808 | * RES0 for the system-wide value, and must strictly match. |
---|
490 | 809 | */ |
---|
491 | | -static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) |
---|
| 810 | +static void init_cpu_ftr_reg(u32 sys_reg, u64 new) |
---|
492 | 811 | { |
---|
493 | 812 | u64 val = 0; |
---|
494 | 813 | u64 strict_mask = ~0x0ULL; |
---|
.. | .. |
---|
498 | 817 | const struct arm64_ftr_bits *ftrp; |
---|
499 | 818 | struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); |
---|
500 | 819 | |
---|
501 | | - BUG_ON(!reg); |
---|
| 820 | + if (!reg) |
---|
| 821 | + return; |
---|
502 | 822 | |
---|
503 | | - for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { |
---|
| 823 | + for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { |
---|
504 | 824 | u64 ftr_mask = arm64_ftr_mask(ftrp); |
---|
505 | 825 | s64 ftr_new = arm64_ftr_value(ftrp, new); |
---|
| 826 | + s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val); |
---|
| 827 | + |
---|
| 828 | + if ((ftr_mask & reg->override->mask) == ftr_mask) { |
---|
| 829 | + s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new); |
---|
| 830 | + char *str = NULL; |
---|
| 831 | + |
---|
| 832 | + if (ftr_ovr != tmp) { |
---|
| 833 | + /* Unsafe, remove the override */ |
---|
| 834 | + reg->override->mask &= ~ftr_mask; |
---|
| 835 | + reg->override->val &= ~ftr_mask; |
---|
| 836 | + tmp = ftr_ovr; |
---|
| 837 | + str = "ignoring override"; |
---|
| 838 | + } else if (ftr_new != tmp) { |
---|
| 839 | + /* Override was valid */ |
---|
| 840 | + ftr_new = tmp; |
---|
| 841 | + str = "forced"; |
---|
| 842 | + } else if (ftr_ovr == tmp) { |
---|
| 843 | + /* Override was the safe value */ |
---|
| 844 | + str = "already set"; |
---|
| 845 | + } |
---|
| 846 | + |
---|
| 847 | + if (str) |
---|
| 848 | + pr_warn("%s[%d:%d]: %s to %llx\n", |
---|
| 849 | + reg->name, |
---|
| 850 | + ftrp->shift + ftrp->width - 1, |
---|
| 851 | + ftrp->shift, str, tmp); |
---|
| 852 | + } |
---|
506 | 853 | |
---|
507 | 854 | val = arm64_ftr_set_value(ftrp, val, ftr_new); |
---|
508 | 855 | |
---|
.. | .. |
---|
525 | 872 | } |
---|
526 | 873 | |
---|
527 | 874 | extern const struct arm64_cpu_capabilities arm64_errata[]; |
---|
| 875 | +static const struct arm64_cpu_capabilities arm64_features[]; |
---|
| 876 | + |
---|
| 877 | +static void __init |
---|
| 878 | +init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps) |
---|
| 879 | +{ |
---|
| 880 | + for (; caps->matches; caps++) { |
---|
| 881 | + if (WARN(caps->capability >= ARM64_NCAPS, |
---|
| 882 | + "Invalid capability %d\n", caps->capability)) |
---|
| 883 | + continue; |
---|
| 884 | + if (WARN(cpu_hwcaps_ptrs[caps->capability], |
---|
| 885 | + "Duplicate entry for capability %d\n", |
---|
| 886 | + caps->capability)) |
---|
| 887 | + continue; |
---|
| 888 | + cpu_hwcaps_ptrs[caps->capability] = caps; |
---|
| 889 | + } |
---|
| 890 | +} |
---|
| 891 | + |
---|
| 892 | +static void __init init_cpu_hwcaps_indirect_list(void) |
---|
| 893 | +{ |
---|
| 894 | + init_cpu_hwcaps_indirect_list_from_array(arm64_features); |
---|
| 895 | + init_cpu_hwcaps_indirect_list_from_array(arm64_errata); |
---|
| 896 | +} |
---|
| 897 | + |
---|
528 | 898 | static void __init setup_boot_cpu_capabilities(void); |
---|
| 899 | + |
---|
| 900 | +static void init_32bit_cpu_features(struct cpuinfo_32bit *info) |
---|
| 901 | +{ |
---|
| 902 | + init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); |
---|
| 903 | + init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1); |
---|
| 904 | + init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); |
---|
| 905 | + init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); |
---|
| 906 | + init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); |
---|
| 907 | + init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); |
---|
| 908 | + init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); |
---|
| 909 | + init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); |
---|
| 910 | + init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6); |
---|
| 911 | + init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); |
---|
| 912 | + init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); |
---|
| 913 | + init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); |
---|
| 914 | + init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); |
---|
| 915 | + init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4); |
---|
| 916 | + init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5); |
---|
| 917 | + init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); |
---|
| 918 | + init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); |
---|
| 919 | + init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2); |
---|
| 920 | + init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); |
---|
| 921 | + init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); |
---|
| 922 | + init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); |
---|
| 923 | +} |
---|
529 | 924 | |
---|
530 | 925 | void __init init_cpu_features(struct cpuinfo_arm64 *info) |
---|
531 | 926 | { |
---|
.. | .. |
---|
539 | 934 | init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); |
---|
540 | 935 | init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); |
---|
541 | 936 | init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); |
---|
| 937 | + init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); |
---|
542 | 938 | init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); |
---|
543 | 939 | init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); |
---|
544 | 940 | init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); |
---|
.. | .. |
---|
546 | 942 | init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); |
---|
547 | 943 | init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); |
---|
548 | 944 | |
---|
549 | | - if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { |
---|
550 | | - init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); |
---|
551 | | - init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); |
---|
552 | | - init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); |
---|
553 | | - init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); |
---|
554 | | - init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); |
---|
555 | | - init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); |
---|
556 | | - init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); |
---|
557 | | - init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); |
---|
558 | | - init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); |
---|
559 | | - init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); |
---|
560 | | - init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); |
---|
561 | | - init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); |
---|
562 | | - init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); |
---|
563 | | - init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); |
---|
564 | | - init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); |
---|
565 | | - init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); |
---|
566 | | - } |
---|
| 945 | + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) |
---|
| 946 | + init_32bit_cpu_features(&info->aarch32); |
---|
567 | 947 | |
---|
568 | 948 | if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { |
---|
569 | 949 | init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); |
---|
570 | 950 | sve_init_vq_map(); |
---|
571 | 951 | } |
---|
| 952 | + |
---|
| 953 | + /* |
---|
| 954 | + * Initialize the indirect array of CPU hwcaps capabilities pointers |
---|
| 955 | + * before we handle the boot CPU below. |
---|
| 956 | + */ |
---|
| 957 | + init_cpu_hwcaps_indirect_list(); |
---|
572 | 958 | |
---|
573 | 959 | /* |
---|
574 | 960 | * Detect and enable early CPU capabilities based on the boot CPU, |
---|
.. | .. |
---|
598 | 984 | { |
---|
599 | 985 | struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); |
---|
600 | 986 | |
---|
601 | | - BUG_ON(!regp); |
---|
| 987 | + if (!regp) |
---|
| 988 | + return 0; |
---|
| 989 | + |
---|
602 | 990 | update_cpu_ftr_reg(regp, val); |
---|
603 | 991 | if ((boot & regp->strict_mask) == (val & regp->strict_mask)) |
---|
604 | 992 | return 0; |
---|
605 | 993 | pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", |
---|
606 | 994 | regp->name, boot, cpu, val); |
---|
607 | 995 | return 1; |
---|
| 996 | +} |
---|
| 997 | + |
---|
| 998 | +static void relax_cpu_ftr_reg(u32 sys_id, int field) |
---|
| 999 | +{ |
---|
| 1000 | + const struct arm64_ftr_bits *ftrp; |
---|
| 1001 | + struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); |
---|
| 1002 | + |
---|
| 1003 | + if (!regp) |
---|
| 1004 | + return; |
---|
| 1005 | + |
---|
| 1006 | + for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) { |
---|
| 1007 | + if (ftrp->shift == field) { |
---|
| 1008 | + regp->strict_mask &= ~arm64_ftr_mask(ftrp); |
---|
| 1009 | + break; |
---|
| 1010 | + } |
---|
| 1011 | + } |
---|
| 1012 | + |
---|
| 1013 | + /* Bogus field? */ |
---|
| 1014 | + WARN_ON(!ftrp->width); |
---|
| 1015 | +} |
---|
| 1016 | + |
---|
| 1017 | +static void update_mismatched_32bit_el0_cpu_features(struct cpuinfo_arm64 *info, |
---|
| 1018 | + struct cpuinfo_arm64 *boot) |
---|
| 1019 | +{ |
---|
| 1020 | + static bool boot_cpu_32bit_regs_overridden = false; |
---|
| 1021 | + |
---|
| 1022 | + if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden) |
---|
| 1023 | + return; |
---|
| 1024 | + |
---|
| 1025 | + if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0)) |
---|
| 1026 | + return; |
---|
| 1027 | + |
---|
| 1028 | + boot->aarch32 = info->aarch32; |
---|
| 1029 | + init_32bit_cpu_features(&boot->aarch32); |
---|
| 1030 | + boot_cpu_32bit_regs_overridden = true; |
---|
| 1031 | +} |
---|
| 1032 | + |
---|
| 1033 | +static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info, |
---|
| 1034 | + struct cpuinfo_32bit *boot) |
---|
| 1035 | +{ |
---|
| 1036 | + int taint = 0; |
---|
| 1037 | + u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
---|
| 1038 | + |
---|
| 1039 | + /* |
---|
| 1040 | + * If we don't have AArch32 at EL1, then relax the strictness of |
---|
| 1041 | + * EL1-dependent register fields to avoid spurious sanity check fails. |
---|
| 1042 | + */ |
---|
| 1043 | + if (!id_aa64pfr0_32bit_el1(pfr0)) { |
---|
| 1044 | + relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT); |
---|
| 1045 | + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT); |
---|
| 1046 | + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT); |
---|
| 1047 | + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT); |
---|
| 1048 | + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT); |
---|
| 1049 | + relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT); |
---|
| 1050 | + } |
---|
| 1051 | + |
---|
| 1052 | + taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, |
---|
| 1053 | + info->reg_id_dfr0, boot->reg_id_dfr0); |
---|
| 1054 | + taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu, |
---|
| 1055 | + info->reg_id_dfr1, boot->reg_id_dfr1); |
---|
| 1056 | + taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, |
---|
| 1057 | + info->reg_id_isar0, boot->reg_id_isar0); |
---|
| 1058 | + taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, |
---|
| 1059 | + info->reg_id_isar1, boot->reg_id_isar1); |
---|
| 1060 | + taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, |
---|
| 1061 | + info->reg_id_isar2, boot->reg_id_isar2); |
---|
| 1062 | + taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, |
---|
| 1063 | + info->reg_id_isar3, boot->reg_id_isar3); |
---|
| 1064 | + taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, |
---|
| 1065 | + info->reg_id_isar4, boot->reg_id_isar4); |
---|
| 1066 | + taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, |
---|
| 1067 | + info->reg_id_isar5, boot->reg_id_isar5); |
---|
| 1068 | + taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu, |
---|
| 1069 | + info->reg_id_isar6, boot->reg_id_isar6); |
---|
| 1070 | + |
---|
| 1071 | + /* |
---|
| 1072 | + * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and |
---|
| 1073 | + * ACTLR formats could differ across CPUs and therefore would have to |
---|
| 1074 | + * be trapped for virtualization anyway. |
---|
| 1075 | + */ |
---|
| 1076 | + taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, |
---|
| 1077 | + info->reg_id_mmfr0, boot->reg_id_mmfr0); |
---|
| 1078 | + taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, |
---|
| 1079 | + info->reg_id_mmfr1, boot->reg_id_mmfr1); |
---|
| 1080 | + taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, |
---|
| 1081 | + info->reg_id_mmfr2, boot->reg_id_mmfr2); |
---|
| 1082 | + taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, |
---|
| 1083 | + info->reg_id_mmfr3, boot->reg_id_mmfr3); |
---|
| 1084 | + taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu, |
---|
| 1085 | + info->reg_id_mmfr4, boot->reg_id_mmfr4); |
---|
| 1086 | + taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu, |
---|
| 1087 | + info->reg_id_mmfr5, boot->reg_id_mmfr5); |
---|
| 1088 | + taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, |
---|
| 1089 | + info->reg_id_pfr0, boot->reg_id_pfr0); |
---|
| 1090 | + taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, |
---|
| 1091 | + info->reg_id_pfr1, boot->reg_id_pfr1); |
---|
| 1092 | + taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu, |
---|
| 1093 | + info->reg_id_pfr2, boot->reg_id_pfr2); |
---|
| 1094 | + taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, |
---|
| 1095 | + info->reg_mvfr0, boot->reg_mvfr0); |
---|
| 1096 | + taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, |
---|
| 1097 | + info->reg_mvfr1, boot->reg_mvfr1); |
---|
| 1098 | + taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, |
---|
| 1099 | + info->reg_mvfr2, boot->reg_mvfr2); |
---|
| 1100 | + |
---|
| 1101 | + return taint; |
---|
608 | 1102 | } |
---|
609 | 1103 | |
---|
610 | 1104 | /* |
---|
.. | .. |
---|
656 | 1150 | info->reg_id_aa64isar0, boot->reg_id_aa64isar0); |
---|
657 | 1151 | taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, |
---|
658 | 1152 | info->reg_id_aa64isar1, boot->reg_id_aa64isar1); |
---|
| 1153 | + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, |
---|
| 1154 | + info->reg_id_aa64isar2, boot->reg_id_aa64isar2); |
---|
659 | 1155 | |
---|
660 | 1156 | /* |
---|
661 | 1157 | * Differing PARange support is fine as long as all peripherals and |
---|
.. | .. |
---|
677 | 1173 | taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu, |
---|
678 | 1174 | info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0); |
---|
679 | 1175 | |
---|
680 | | - /* |
---|
681 | | - * If we have AArch32, we care about 32-bit features for compat. |
---|
682 | | - * If the system doesn't support AArch32, don't update them. |
---|
683 | | - */ |
---|
684 | | - if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && |
---|
685 | | - id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { |
---|
686 | | - |
---|
687 | | - taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, |
---|
688 | | - info->reg_id_dfr0, boot->reg_id_dfr0); |
---|
689 | | - taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, |
---|
690 | | - info->reg_id_isar0, boot->reg_id_isar0); |
---|
691 | | - taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, |
---|
692 | | - info->reg_id_isar1, boot->reg_id_isar1); |
---|
693 | | - taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, |
---|
694 | | - info->reg_id_isar2, boot->reg_id_isar2); |
---|
695 | | - taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, |
---|
696 | | - info->reg_id_isar3, boot->reg_id_isar3); |
---|
697 | | - taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, |
---|
698 | | - info->reg_id_isar4, boot->reg_id_isar4); |
---|
699 | | - taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, |
---|
700 | | - info->reg_id_isar5, boot->reg_id_isar5); |
---|
701 | | - |
---|
702 | | - /* |
---|
703 | | - * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and |
---|
704 | | - * ACTLR formats could differ across CPUs and therefore would have to |
---|
705 | | - * be trapped for virtualization anyway. |
---|
706 | | - */ |
---|
707 | | - taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, |
---|
708 | | - info->reg_id_mmfr0, boot->reg_id_mmfr0); |
---|
709 | | - taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, |
---|
710 | | - info->reg_id_mmfr1, boot->reg_id_mmfr1); |
---|
711 | | - taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, |
---|
712 | | - info->reg_id_mmfr2, boot->reg_id_mmfr2); |
---|
713 | | - taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, |
---|
714 | | - info->reg_id_mmfr3, boot->reg_id_mmfr3); |
---|
715 | | - taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, |
---|
716 | | - info->reg_id_pfr0, boot->reg_id_pfr0); |
---|
717 | | - taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, |
---|
718 | | - info->reg_id_pfr1, boot->reg_id_pfr1); |
---|
719 | | - taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, |
---|
720 | | - info->reg_mvfr0, boot->reg_mvfr0); |
---|
721 | | - taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, |
---|
722 | | - info->reg_mvfr1, boot->reg_mvfr1); |
---|
723 | | - taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, |
---|
724 | | - info->reg_mvfr2, boot->reg_mvfr2); |
---|
725 | | - } |
---|
726 | | - |
---|
727 | 1176 | if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) { |
---|
728 | 1177 | taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu, |
---|
729 | 1178 | info->reg_zcr, boot->reg_zcr); |
---|
730 | 1179 | |
---|
731 | 1180 | /* Probe vector lengths, unless we already gave up on SVE */ |
---|
732 | 1181 | if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) && |
---|
733 | | - !sys_caps_initialised) |
---|
| 1182 | + !system_capabilities_finalized()) |
---|
734 | 1183 | sve_update_vq_map(); |
---|
| 1184 | + } |
---|
| 1185 | + |
---|
| 1186 | + /* |
---|
| 1187 | + * If we don't have AArch32 at all then skip the checks entirely |
---|
| 1188 | + * as the register values may be UNKNOWN and we're not going to be |
---|
| 1189 | + * using them for anything. |
---|
| 1190 | + * |
---|
| 1191 | + * This relies on a sanitised view of the AArch64 ID registers |
---|
| 1192 | + * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last. |
---|
| 1193 | + */ |
---|
| 1194 | + if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) { |
---|
| 1195 | + update_mismatched_32bit_el0_cpu_features(info, boot); |
---|
| 1196 | + taint |= update_32bit_cpu_features(cpu, &info->aarch32, |
---|
| 1197 | + &boot->aarch32); |
---|
735 | 1198 | } |
---|
736 | 1199 | |
---|
737 | 1200 | /* |
---|
.. | .. |
---|
748 | 1211 | { |
---|
749 | 1212 | struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id); |
---|
750 | 1213 | |
---|
751 | | - /* We shouldn't get a request for an unsupported register */ |
---|
752 | | - BUG_ON(!regp); |
---|
| 1214 | + if (!regp) |
---|
| 1215 | + return 0; |
---|
753 | 1216 | return regp->sys_val; |
---|
754 | 1217 | } |
---|
| 1218 | +EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg); |
---|
755 | 1219 | |
---|
756 | 1220 | #define read_sysreg_case(r) \ |
---|
757 | | - case r: return read_sysreg_s(r) |
---|
| 1221 | + case r: val = read_sysreg_s(r); break; |
---|
758 | 1222 | |
---|
759 | 1223 | /* |
---|
760 | 1224 | * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated. |
---|
761 | 1225 | * Read the system register on the current CPU |
---|
762 | 1226 | */ |
---|
763 | | -static u64 __read_sysreg_by_encoding(u32 sys_id) |
---|
| 1227 | +u64 __read_sysreg_by_encoding(u32 sys_id) |
---|
764 | 1228 | { |
---|
| 1229 | + struct arm64_ftr_reg *regp; |
---|
| 1230 | + u64 val; |
---|
| 1231 | + |
---|
765 | 1232 | switch (sys_id) { |
---|
766 | 1233 | read_sysreg_case(SYS_ID_PFR0_EL1); |
---|
767 | 1234 | read_sysreg_case(SYS_ID_PFR1_EL1); |
---|
| 1235 | + read_sysreg_case(SYS_ID_PFR2_EL1); |
---|
768 | 1236 | read_sysreg_case(SYS_ID_DFR0_EL1); |
---|
| 1237 | + read_sysreg_case(SYS_ID_DFR1_EL1); |
---|
769 | 1238 | read_sysreg_case(SYS_ID_MMFR0_EL1); |
---|
770 | 1239 | read_sysreg_case(SYS_ID_MMFR1_EL1); |
---|
771 | 1240 | read_sysreg_case(SYS_ID_MMFR2_EL1); |
---|
772 | 1241 | read_sysreg_case(SYS_ID_MMFR3_EL1); |
---|
| 1242 | + read_sysreg_case(SYS_ID_MMFR4_EL1); |
---|
| 1243 | + read_sysreg_case(SYS_ID_MMFR5_EL1); |
---|
773 | 1244 | read_sysreg_case(SYS_ID_ISAR0_EL1); |
---|
774 | 1245 | read_sysreg_case(SYS_ID_ISAR1_EL1); |
---|
775 | 1246 | read_sysreg_case(SYS_ID_ISAR2_EL1); |
---|
776 | 1247 | read_sysreg_case(SYS_ID_ISAR3_EL1); |
---|
777 | 1248 | read_sysreg_case(SYS_ID_ISAR4_EL1); |
---|
778 | 1249 | read_sysreg_case(SYS_ID_ISAR5_EL1); |
---|
| 1250 | + read_sysreg_case(SYS_ID_ISAR6_EL1); |
---|
779 | 1251 | read_sysreg_case(SYS_MVFR0_EL1); |
---|
780 | 1252 | read_sysreg_case(SYS_MVFR1_EL1); |
---|
781 | 1253 | read_sysreg_case(SYS_MVFR2_EL1); |
---|
782 | 1254 | |
---|
783 | 1255 | read_sysreg_case(SYS_ID_AA64PFR0_EL1); |
---|
784 | 1256 | read_sysreg_case(SYS_ID_AA64PFR1_EL1); |
---|
| 1257 | + read_sysreg_case(SYS_ID_AA64ZFR0_EL1); |
---|
785 | 1258 | read_sysreg_case(SYS_ID_AA64DFR0_EL1); |
---|
786 | 1259 | read_sysreg_case(SYS_ID_AA64DFR1_EL1); |
---|
787 | 1260 | read_sysreg_case(SYS_ID_AA64MMFR0_EL1); |
---|
.. | .. |
---|
789 | 1262 | read_sysreg_case(SYS_ID_AA64MMFR2_EL1); |
---|
790 | 1263 | read_sysreg_case(SYS_ID_AA64ISAR0_EL1); |
---|
791 | 1264 | read_sysreg_case(SYS_ID_AA64ISAR1_EL1); |
---|
| 1265 | + read_sysreg_case(SYS_ID_AA64ISAR2_EL1); |
---|
792 | 1266 | |
---|
793 | 1267 | read_sysreg_case(SYS_CNTFRQ_EL0); |
---|
794 | 1268 | read_sysreg_case(SYS_CTR_EL0); |
---|
.. | .. |
---|
798 | 1272 | BUG(); |
---|
799 | 1273 | return 0; |
---|
800 | 1274 | } |
---|
| 1275 | + |
---|
| 1276 | + regp = get_arm64_ftr_reg(sys_id); |
---|
| 1277 | + if (regp) { |
---|
| 1278 | + val &= ~regp->override->mask; |
---|
| 1279 | + val |= (regp->override->val & regp->override->mask); |
---|
| 1280 | + } |
---|
| 1281 | + |
---|
| 1282 | + return val; |
---|
801 | 1283 | } |
---|
802 | 1284 | |
---|
803 | 1285 | #include <linux/irqchip/arm-gic-v3.h> |
---|
.. | .. |
---|
822 | 1304 | val = __read_sysreg_by_encoding(entry->sys_reg); |
---|
823 | 1305 | |
---|
824 | 1306 | return feature_matches(val, entry); |
---|
| 1307 | +} |
---|
| 1308 | + |
---|
| 1309 | +const struct cpumask *system_32bit_el0_cpumask(void) |
---|
| 1310 | +{ |
---|
| 1311 | + if (!system_supports_32bit_el0()) |
---|
| 1312 | + return cpu_none_mask; |
---|
| 1313 | + |
---|
| 1314 | + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) |
---|
| 1315 | + return cpu_32bit_el0_mask; |
---|
| 1316 | + |
---|
| 1317 | + return cpu_possible_mask; |
---|
| 1318 | +} |
---|
| 1319 | +EXPORT_SYMBOL_GPL(system_32bit_el0_cpumask); |
---|
| 1320 | + |
---|
| 1321 | +static int __init parse_32bit_el0_param(char *str) |
---|
| 1322 | +{ |
---|
| 1323 | + allow_mismatched_32bit_el0 = true; |
---|
| 1324 | + return 0; |
---|
| 1325 | +} |
---|
| 1326 | +early_param("allow_mismatched_32bit_el0", parse_32bit_el0_param); |
---|
| 1327 | + |
---|
| 1328 | +static ssize_t aarch32_el0_show(struct device *dev, |
---|
| 1329 | + struct device_attribute *attr, char *buf) |
---|
| 1330 | +{ |
---|
| 1331 | + const struct cpumask *mask = system_32bit_el0_cpumask(); |
---|
| 1332 | + |
---|
| 1333 | + return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(mask)); |
---|
| 1334 | +} |
---|
| 1335 | +static const DEVICE_ATTR_RO(aarch32_el0); |
---|
| 1336 | + |
---|
| 1337 | +static int __init aarch32_el0_sysfs_init(void) |
---|
| 1338 | +{ |
---|
| 1339 | + if (!allow_mismatched_32bit_el0) |
---|
| 1340 | + return 0; |
---|
| 1341 | + |
---|
| 1342 | + return device_create_file(cpu_subsys.dev_root, &dev_attr_aarch32_el0); |
---|
| 1343 | +} |
---|
| 1344 | +device_initcall(aarch32_el0_sysfs_init); |
---|
| 1345 | + |
---|
| 1346 | +static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope) |
---|
| 1347 | +{ |
---|
| 1348 | + if (!has_cpuid_feature(entry, scope)) |
---|
| 1349 | + return allow_mismatched_32bit_el0; |
---|
| 1350 | + |
---|
| 1351 | + if (scope == SCOPE_SYSTEM) |
---|
| 1352 | + pr_info("detected: 32-bit EL0 Support\n"); |
---|
| 1353 | + |
---|
| 1354 | + return true; |
---|
825 | 1355 | } |
---|
826 | 1356 | |
---|
827 | 1357 | static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope) |
---|
.. | .. |
---|
865 | 1395 | if (scope == SCOPE_SYSTEM) |
---|
866 | 1396 | ctr = arm64_ftr_reg_ctrel0.sys_val; |
---|
867 | 1397 | else |
---|
868 | | - ctr = read_cpuid_cachetype(); |
---|
| 1398 | + ctr = read_cpuid_effective_cachetype(); |
---|
869 | 1399 | |
---|
870 | 1400 | return ctr & BIT(CTR_IDC_SHIFT); |
---|
| 1401 | +} |
---|
| 1402 | + |
---|
| 1403 | +static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused) |
---|
| 1404 | +{ |
---|
| 1405 | + /* |
---|
| 1406 | + * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively |
---|
| 1407 | + * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses |
---|
| 1408 | + * to the CTR_EL0 on this CPU and emulate it with the real/safe |
---|
| 1409 | + * value. |
---|
| 1410 | + */ |
---|
| 1411 | + if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT))) |
---|
| 1412 | + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
---|
871 | 1413 | } |
---|
872 | 1414 | |
---|
873 | 1415 | static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, |
---|
.. | .. |
---|
883 | 1425 | return ctr & BIT(CTR_DIC_SHIFT); |
---|
884 | 1426 | } |
---|
885 | 1427 | |
---|
| 1428 | +static bool __maybe_unused |
---|
| 1429 | +has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) |
---|
| 1430 | +{ |
---|
| 1431 | + /* |
---|
| 1432 | + * Kdump isn't guaranteed to power-off all secondary CPUs, CNP |
---|
| 1433 | + * may share TLB entries with a CPU stuck in the crashed |
---|
| 1434 | + * kernel. |
---|
| 1435 | + */ |
---|
| 1436 | + if (is_kdump_kernel()) |
---|
| 1437 | + return false; |
---|
| 1438 | + |
---|
| 1439 | + return has_cpuid_feature(entry, scope); |
---|
| 1440 | +} |
---|
| 1441 | + |
---|
| 1442 | +/* |
---|
| 1443 | + * This check is triggered during the early boot before the cpufeature |
---|
| 1444 | + * is initialised. Checking the status on the local CPU allows the boot |
---|
| 1445 | + * CPU to detect the need for non-global mappings and thus avoiding a |
---|
| 1446 | + * pagetable re-write after all the CPUs are booted. This check will be |
---|
| 1447 | + * anyway run on individual CPUs, allowing us to get the consistent |
---|
| 1448 | + * state once the SMP CPUs are up and thus make the switch to non-global |
---|
| 1449 | + * mappings if required. |
---|
| 1450 | + */ |
---|
| 1451 | +bool kaslr_requires_kpti(void) |
---|
| 1452 | +{ |
---|
| 1453 | + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) |
---|
| 1454 | + return false; |
---|
| 1455 | + |
---|
| 1456 | + /* |
---|
| 1457 | + * E0PD does a similar job to KPTI so can be used instead |
---|
| 1458 | + * where available. |
---|
| 1459 | + */ |
---|
| 1460 | + if (IS_ENABLED(CONFIG_ARM64_E0PD)) { |
---|
| 1461 | + u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1); |
---|
| 1462 | + if (cpuid_feature_extract_unsigned_field(mmfr2, |
---|
| 1463 | + ID_AA64MMFR2_E0PD_SHIFT)) |
---|
| 1464 | + return false; |
---|
| 1465 | + } |
---|
| 1466 | + |
---|
| 1467 | + /* |
---|
| 1468 | + * Systems affected by Cavium erratum 24756 are incompatible |
---|
| 1469 | + * with KPTI. |
---|
| 1470 | + */ |
---|
| 1471 | + if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) { |
---|
| 1472 | + extern const struct midr_range cavium_erratum_27456_cpus[]; |
---|
| 1473 | + |
---|
| 1474 | + if (is_midr_in_range_list(read_cpuid_id(), |
---|
| 1475 | + cavium_erratum_27456_cpus)) |
---|
| 1476 | + return false; |
---|
| 1477 | + } |
---|
| 1478 | + |
---|
| 1479 | + return kaslr_offset() > 0; |
---|
| 1480 | +} |
---|
| 1481 | + |
---|
886 | 1482 | static bool __meltdown_safe = true; |
---|
887 | 1483 | static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ |
---|
888 | 1484 | |
---|
.. | .. |
---|
893 | 1489 | static const struct midr_range kpti_safe_list[] = { |
---|
894 | 1490 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
---|
895 | 1491 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
---|
| 1492 | + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
---|
896 | 1493 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), |
---|
897 | 1494 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), |
---|
898 | 1495 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
---|
.. | .. |
---|
900 | 1497 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
---|
901 | 1498 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
---|
902 | 1499 | MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), |
---|
| 1500 | + MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), |
---|
| 1501 | + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_GOLD), |
---|
| 1502 | + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), |
---|
| 1503 | + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
---|
| 1504 | + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), |
---|
903 | 1505 | { /* sentinel */ } |
---|
904 | 1506 | }; |
---|
905 | 1507 | char const *str = "kpti command line option"; |
---|
.. | .. |
---|
925 | 1527 | } |
---|
926 | 1528 | |
---|
927 | 1529 | /* Useful for KASLR robustness */ |
---|
928 | | - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) { |
---|
| 1530 | + if (kaslr_requires_kpti()) { |
---|
929 | 1531 | if (!__kpti_forced) { |
---|
930 | 1532 | str = "KASLR"; |
---|
931 | 1533 | __kpti_forced = 1; |
---|
.. | .. |
---|
960 | 1562 | extern kpti_remap_fn idmap_kpti_install_ng_mappings; |
---|
961 | 1563 | kpti_remap_fn *remap_fn; |
---|
962 | 1564 | |
---|
963 | | - static bool kpti_applied = false; |
---|
964 | 1565 | int cpu = smp_processor_id(); |
---|
965 | 1566 | |
---|
966 | | - if (kpti_applied) |
---|
| 1567 | + if (__this_cpu_read(this_cpu_vector) == vectors) { |
---|
| 1568 | + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); |
---|
| 1569 | + |
---|
| 1570 | + __this_cpu_write(this_cpu_vector, v); |
---|
| 1571 | + } |
---|
| 1572 | + |
---|
| 1573 | + /* |
---|
| 1574 | + * We don't need to rewrite the page-tables if either we've done |
---|
| 1575 | + * it already or we have KASLR enabled and therefore have not |
---|
| 1576 | + * created any global mappings at all. |
---|
| 1577 | + */ |
---|
| 1578 | + if (arm64_use_ng_mappings) |
---|
967 | 1579 | return; |
---|
968 | 1580 | |
---|
969 | 1581 | remap_fn = (void *)__pa_function(idmap_kpti_install_ng_mappings); |
---|
.. | .. |
---|
973 | 1585 | cpu_uninstall_idmap(); |
---|
974 | 1586 | |
---|
975 | 1587 | if (!cpu) |
---|
976 | | - kpti_applied = true; |
---|
| 1588 | + arm64_use_ng_mappings = true; |
---|
977 | 1589 | |
---|
978 | 1590 | return; |
---|
979 | 1591 | } |
---|
.. | .. |
---|
1004 | 1616 | |
---|
1005 | 1617 | write_sysreg(tcr, tcr_el1); |
---|
1006 | 1618 | isb(); |
---|
| 1619 | + local_flush_tlb_all(); |
---|
1007 | 1620 | } |
---|
1008 | 1621 | |
---|
1009 | 1622 | static bool cpu_has_broken_dbm(void) |
---|
.. | .. |
---|
1012 | 1625 | static const struct midr_range cpus[] = { |
---|
1013 | 1626 | #ifdef CONFIG_ARM64_ERRATUM_1024718 |
---|
1014 | 1627 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
---|
| 1628 | + /* Kryo4xx Silver (rdpe => r1p0) */ |
---|
| 1629 | + MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), |
---|
| 1630 | +#endif |
---|
| 1631 | +#ifdef CONFIG_ARM64_ERRATUM_2051678 |
---|
| 1632 | + MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), |
---|
1015 | 1633 | #endif |
---|
1016 | 1634 | {}, |
---|
1017 | 1635 | }; |
---|
.. | .. |
---|
1062 | 1680 | |
---|
1063 | 1681 | #endif |
---|
1064 | 1682 | |
---|
| 1683 | +#ifdef CONFIG_ARM64_AMU_EXTN |
---|
| 1684 | + |
---|
| 1685 | +/* |
---|
| 1686 | + * The "amu_cpus" cpumask only signals that the CPU implementation for the |
---|
| 1687 | + * flagged CPUs supports the Activity Monitors Unit (AMU) but does not provide |
---|
| 1688 | + * information regarding all the events that it supports. When a CPU bit is |
---|
| 1689 | + * set in the cpumask, the user of this feature can only rely on the presence |
---|
| 1690 | + * of the 4 fixed counters for that CPU. But this does not guarantee that the |
---|
| 1691 | + * counters are enabled or access to these counters is enabled by code |
---|
| 1692 | + * executed at higher exception levels (firmware). |
---|
| 1693 | + */ |
---|
| 1694 | +static struct cpumask amu_cpus __read_mostly; |
---|
| 1695 | + |
---|
| 1696 | +bool cpu_has_amu_feat(int cpu) |
---|
| 1697 | +{ |
---|
| 1698 | + return cpumask_test_cpu(cpu, &amu_cpus); |
---|
| 1699 | +} |
---|
| 1700 | + |
---|
| 1701 | +/* Initialize the use of AMU counters for frequency invariance */ |
---|
| 1702 | +extern void init_cpu_freq_invariance_counters(void); |
---|
| 1703 | + |
---|
| 1704 | +static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) |
---|
| 1705 | +{ |
---|
| 1706 | + if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { |
---|
| 1707 | + pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", |
---|
| 1708 | + smp_processor_id()); |
---|
| 1709 | + cpumask_set_cpu(smp_processor_id(), &amu_cpus); |
---|
| 1710 | + |
---|
| 1711 | + /* 0 reference values signal broken/disabled counters */ |
---|
| 1712 | + if (!this_cpu_has_cap(ARM64_WORKAROUND_2457168)) |
---|
| 1713 | + init_cpu_freq_invariance_counters(); |
---|
| 1714 | + } |
---|
| 1715 | +} |
---|
| 1716 | + |
---|
| 1717 | +static bool has_amu(const struct arm64_cpu_capabilities *cap, |
---|
| 1718 | + int __unused) |
---|
| 1719 | +{ |
---|
| 1720 | + /* |
---|
| 1721 | + * The AMU extension is a non-conflicting feature: the kernel can |
---|
| 1722 | + * safely run a mix of CPUs with and without support for the |
---|
| 1723 | + * activity monitors extension. Therefore, unconditionally enable |
---|
| 1724 | + * the capability to allow any late CPU to use the feature. |
---|
| 1725 | + * |
---|
| 1726 | + * With this feature unconditionally enabled, the cpu_enable |
---|
| 1727 | + * function will be called for all CPUs that match the criteria, |
---|
| 1728 | + * including secondary and hotplugged, marking this feature as |
---|
| 1729 | + * present on that respective CPU. The enable function will also |
---|
| 1730 | + * print a detection message. |
---|
| 1731 | + */ |
---|
| 1732 | + |
---|
| 1733 | + return true; |
---|
| 1734 | +} |
---|
| 1735 | +#endif |
---|
| 1736 | + |
---|
1065 | 1737 | #ifdef CONFIG_ARM64_VHE |
---|
1066 | 1738 | static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) |
---|
1067 | 1739 | { |
---|
.. | .. |
---|
1078 | 1750 | * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to |
---|
1079 | 1751 | * do anything here. |
---|
1080 | 1752 | */ |
---|
1081 | | - if (!alternatives_applied) |
---|
| 1753 | + if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) |
---|
1082 | 1754 | write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); |
---|
1083 | 1755 | } |
---|
1084 | 1756 | #endif |
---|
.. | .. |
---|
1091 | 1763 | WARN_ON(val & (7 << 27 | 7 << 21)); |
---|
1092 | 1764 | } |
---|
1093 | 1765 | |
---|
1094 | | -#ifdef CONFIG_ARM64_SSBD |
---|
1095 | | -static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr) |
---|
| 1766 | +#ifdef CONFIG_ARM64_PAN |
---|
| 1767 | +static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) |
---|
1096 | 1768 | { |
---|
1097 | | - if (user_mode(regs)) |
---|
1098 | | - return 1; |
---|
| 1769 | + /* |
---|
| 1770 | + * We modify PSTATE. This won't work from irq context as the PSTATE |
---|
| 1771 | + * is discarded once we return from the exception. |
---|
| 1772 | + */ |
---|
| 1773 | + WARN_ON_ONCE(in_interrupt()); |
---|
1099 | 1774 | |
---|
1100 | | - if (instr & BIT(PSTATE_Imm_shift)) |
---|
1101 | | - regs->pstate |= PSR_SSBS_BIT; |
---|
1102 | | - else |
---|
1103 | | - regs->pstate &= ~PSR_SSBS_BIT; |
---|
| 1775 | + sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); |
---|
| 1776 | + set_pstate_pan(1); |
---|
| 1777 | +} |
---|
| 1778 | +#endif /* CONFIG_ARM64_PAN */ |
---|
1104 | 1779 | |
---|
1105 | | - arm64_skip_faulting_instruction(regs, 4); |
---|
1106 | | - return 0; |
---|
| 1780 | +#ifdef CONFIG_ARM64_RAS_EXTN |
---|
| 1781 | +static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) |
---|
| 1782 | +{ |
---|
| 1783 | + /* Firmware may have left a deferred SError in this register. */ |
---|
| 1784 | + write_sysreg_s(0, SYS_DISR_EL1); |
---|
| 1785 | +} |
---|
| 1786 | +#endif /* CONFIG_ARM64_RAS_EXTN */ |
---|
| 1787 | + |
---|
| 1788 | +#ifdef CONFIG_ARM64_PTR_AUTH |
---|
| 1789 | +static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope) |
---|
| 1790 | +{ |
---|
| 1791 | + int boot_val, sec_val; |
---|
| 1792 | + |
---|
| 1793 | + /* We don't expect to be called with SCOPE_SYSTEM */ |
---|
| 1794 | + WARN_ON(scope == SCOPE_SYSTEM); |
---|
| 1795 | + /* |
---|
| 1796 | + * The ptr-auth feature levels are not intercompatible with lower |
---|
| 1797 | + * levels. Hence we must match ptr-auth feature level of the secondary |
---|
| 1798 | + * CPUs with that of the boot CPU. The level of boot cpu is fetched |
---|
| 1799 | + * from the sanitised register whereas direct register read is done for |
---|
| 1800 | + * the secondary CPUs. |
---|
| 1801 | + * The sanitised feature state is guaranteed to match that of the |
---|
| 1802 | + * boot CPU as a mismatched secondary CPU is parked before it gets |
---|
| 1803 | + * a chance to update the state, with the capability. |
---|
| 1804 | + */ |
---|
| 1805 | + boot_val = cpuid_feature_extract_field(read_sanitised_ftr_reg(entry->sys_reg), |
---|
| 1806 | + entry->field_pos, entry->sign); |
---|
| 1807 | + if (scope & SCOPE_BOOT_CPU) |
---|
| 1808 | + return boot_val >= entry->min_field_value; |
---|
| 1809 | + /* Now check for the secondary CPUs with SCOPE_LOCAL_CPU scope */ |
---|
| 1810 | + sec_val = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), |
---|
| 1811 | + entry->field_pos, entry->sign); |
---|
| 1812 | + return sec_val == boot_val; |
---|
1107 | 1813 | } |
---|
1108 | 1814 | |
---|
1109 | | -static struct undef_hook ssbs_emulation_hook = { |
---|
1110 | | - .instr_mask = ~(1U << PSTATE_Imm_shift), |
---|
1111 | | - .instr_val = 0xd500401f | PSTATE_SSBS, |
---|
1112 | | - .fn = ssbs_emulation_handler, |
---|
1113 | | -}; |
---|
1114 | | - |
---|
1115 | | -static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused) |
---|
| 1815 | +static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry, |
---|
| 1816 | + int scope) |
---|
1116 | 1817 | { |
---|
1117 | | - static bool undef_hook_registered = false; |
---|
1118 | | - static DEFINE_RAW_SPINLOCK(hook_lock); |
---|
1119 | | - |
---|
1120 | | - raw_spin_lock(&hook_lock); |
---|
1121 | | - if (!undef_hook_registered) { |
---|
1122 | | - register_undef_hook(&ssbs_emulation_hook); |
---|
1123 | | - undef_hook_registered = true; |
---|
1124 | | - } |
---|
1125 | | - raw_spin_unlock(&hook_lock); |
---|
1126 | | - |
---|
1127 | | - if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { |
---|
1128 | | - sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); |
---|
1129 | | - arm64_set_ssbd_mitigation(false); |
---|
1130 | | - } else { |
---|
1131 | | - arm64_set_ssbd_mitigation(true); |
---|
1132 | | - } |
---|
| 1818 | + return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) || |
---|
| 1819 | + has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope); |
---|
1133 | 1820 | } |
---|
1134 | | -#endif /* CONFIG_ARM64_SSBD */ |
---|
| 1821 | + |
---|
| 1822 | +static bool has_generic_auth(const struct arm64_cpu_capabilities *entry, |
---|
| 1823 | + int __unused) |
---|
| 1824 | +{ |
---|
| 1825 | + return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) || |
---|
| 1826 | + __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF); |
---|
| 1827 | +} |
---|
| 1828 | +#endif /* CONFIG_ARM64_PTR_AUTH */ |
---|
| 1829 | + |
---|
| 1830 | +#ifdef CONFIG_ARM64_E0PD |
---|
| 1831 | +static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) |
---|
| 1832 | +{ |
---|
| 1833 | + if (this_cpu_has_cap(ARM64_HAS_E0PD)) |
---|
| 1834 | + sysreg_clear_set(tcr_el1, 0, TCR_E0PD1); |
---|
| 1835 | +} |
---|
| 1836 | +#endif /* CONFIG_ARM64_E0PD */ |
---|
| 1837 | + |
---|
| 1838 | +#ifdef CONFIG_ARM64_PSEUDO_NMI |
---|
| 1839 | +static bool enable_pseudo_nmi; |
---|
| 1840 | + |
---|
| 1841 | +static int __init early_enable_pseudo_nmi(char *p) |
---|
| 1842 | +{ |
---|
| 1843 | + return strtobool(p, &enable_pseudo_nmi); |
---|
| 1844 | +} |
---|
| 1845 | +early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); |
---|
| 1846 | + |
---|
| 1847 | +static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, |
---|
| 1848 | + int scope) |
---|
| 1849 | +{ |
---|
| 1850 | + return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); |
---|
| 1851 | +} |
---|
| 1852 | +#endif |
---|
| 1853 | + |
---|
| 1854 | +#ifdef CONFIG_ARM64_BTI |
---|
| 1855 | +static void bti_enable(const struct arm64_cpu_capabilities *__unused) |
---|
| 1856 | +{ |
---|
| 1857 | + /* |
---|
| 1858 | + * Use of X16/X17 for tail-calls and trampolines that jump to |
---|
| 1859 | + * function entry points using BR is a requirement for |
---|
| 1860 | + * marking binaries with GNU_PROPERTY_AARCH64_FEATURE_1_BTI. |
---|
| 1861 | + * So, be strict and forbid other BRs using other registers to |
---|
| 1862 | + * jump onto a PACIxSP instruction: |
---|
| 1863 | + */ |
---|
| 1864 | + sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_BT0 | SCTLR_EL1_BT1); |
---|
| 1865 | + isb(); |
---|
| 1866 | +} |
---|
| 1867 | +#endif /* CONFIG_ARM64_BTI */ |
---|
| 1868 | + |
---|
| 1869 | +#ifdef CONFIG_ARM64_MTE |
---|
| 1870 | +static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) |
---|
| 1871 | +{ |
---|
| 1872 | + sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0); |
---|
| 1873 | + |
---|
| 1874 | + mte_cpu_setup(); |
---|
| 1875 | + |
---|
| 1876 | + /* |
---|
| 1877 | + * Clear the tags in the zero page. This needs to be done via the |
---|
| 1878 | + * linear map which has the Tagged attribute. |
---|
| 1879 | + */ |
---|
| 1880 | + if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags)) |
---|
| 1881 | + mte_clear_page_tags(lm_alias(empty_zero_page)); |
---|
| 1882 | + |
---|
| 1883 | + kasan_init_hw_tags_cpu(); |
---|
| 1884 | +} |
---|
| 1885 | +#endif /* CONFIG_ARM64_MTE */ |
---|
| 1886 | + |
---|
| 1887 | +#ifdef CONFIG_KVM |
---|
| 1888 | +static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused) |
---|
| 1889 | +{ |
---|
| 1890 | + if (kvm_get_mode() != KVM_MODE_PROTECTED) |
---|
| 1891 | + return false; |
---|
| 1892 | + |
---|
| 1893 | + if (is_kernel_in_hyp_mode()) { |
---|
| 1894 | + pr_warn("Protected KVM not available with VHE\n"); |
---|
| 1895 | + return false; |
---|
| 1896 | + } |
---|
| 1897 | + |
---|
| 1898 | + return true; |
---|
| 1899 | +} |
---|
| 1900 | +#endif /* CONFIG_KVM */ |
---|
| 1901 | + |
---|
| 1902 | +static void elf_hwcap_fixup(void) |
---|
| 1903 | +{ |
---|
| 1904 | +#ifdef CONFIG_ARM64_ERRATUM_1742098 |
---|
| 1905 | + if (cpus_have_const_cap(ARM64_WORKAROUND_1742098)) |
---|
| 1906 | + compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES; |
---|
| 1907 | +#endif /* ARM64_ERRATUM_1742098 */ |
---|
| 1908 | +} |
---|
| 1909 | + |
---|
| 1910 | +/* Internal helper functions to match cpu capability type */ |
---|
| 1911 | +static bool |
---|
| 1912 | +cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) |
---|
| 1913 | +{ |
---|
| 1914 | + return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); |
---|
| 1915 | +} |
---|
| 1916 | + |
---|
| 1917 | +static bool |
---|
| 1918 | +cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) |
---|
| 1919 | +{ |
---|
| 1920 | + return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); |
---|
| 1921 | +} |
---|
| 1922 | + |
---|
| 1923 | +static bool |
---|
| 1924 | +cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) |
---|
| 1925 | +{ |
---|
| 1926 | + return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); |
---|
| 1927 | +} |
---|
1135 | 1928 | |
---|
1136 | 1929 | static const struct arm64_cpu_capabilities arm64_features[] = { |
---|
1137 | 1930 | { |
---|
1138 | 1931 | .desc = "GIC system register CPU interface", |
---|
1139 | 1932 | .capability = ARM64_HAS_SYSREG_GIC_CPUIF, |
---|
1140 | | - .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 1933 | + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
---|
1141 | 1934 | .matches = has_useable_gicv3_cpuif, |
---|
1142 | 1935 | .sys_reg = SYS_ID_AA64PFR0_EL1, |
---|
1143 | 1936 | .field_pos = ID_AA64PFR0_GIC_SHIFT, |
---|
.. | .. |
---|
1157 | 1950 | .cpu_enable = cpu_enable_pan, |
---|
1158 | 1951 | }, |
---|
1159 | 1952 | #endif /* CONFIG_ARM64_PAN */ |
---|
1160 | | -#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) |
---|
| 1953 | +#ifdef CONFIG_ARM64_LSE_ATOMICS |
---|
1161 | 1954 | { |
---|
1162 | 1955 | .desc = "LSE atomic instructions", |
---|
1163 | 1956 | .capability = ARM64_HAS_LSE_ATOMICS, |
---|
.. | .. |
---|
1168 | 1961 | .sign = FTR_UNSIGNED, |
---|
1169 | 1962 | .min_field_value = 2, |
---|
1170 | 1963 | }, |
---|
1171 | | -#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ |
---|
| 1964 | +#endif /* CONFIG_ARM64_LSE_ATOMICS */ |
---|
1172 | 1965 | { |
---|
1173 | 1966 | .desc = "Software prefetching using PRFM", |
---|
1174 | 1967 | .capability = ARM64_HAS_NO_HW_PREFETCH, |
---|
.. | .. |
---|
1207 | 2000 | }, |
---|
1208 | 2001 | #endif /* CONFIG_ARM64_VHE */ |
---|
1209 | 2002 | { |
---|
1210 | | - .desc = "32-bit EL0 Support", |
---|
1211 | | - .capability = ARM64_HAS_32BIT_EL0, |
---|
| 2003 | + .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE, |
---|
1212 | 2004 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
1213 | | - .matches = has_cpuid_feature, |
---|
| 2005 | + .matches = has_32bit_el0, |
---|
1214 | 2006 | .sys_reg = SYS_ID_AA64PFR0_EL1, |
---|
1215 | 2007 | .sign = FTR_UNSIGNED, |
---|
1216 | 2008 | .field_pos = ID_AA64PFR0_EL0_SHIFT, |
---|
1217 | 2009 | .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT, |
---|
1218 | 2010 | }, |
---|
| 2011 | +#ifdef CONFIG_KVM |
---|
| 2012 | + { |
---|
| 2013 | + .desc = "32-bit EL1 Support", |
---|
| 2014 | + .capability = ARM64_HAS_32BIT_EL1, |
---|
| 2015 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2016 | + .matches = has_cpuid_feature, |
---|
| 2017 | + .sys_reg = SYS_ID_AA64PFR0_EL1, |
---|
| 2018 | + .sign = FTR_UNSIGNED, |
---|
| 2019 | + .field_pos = ID_AA64PFR0_EL1_SHIFT, |
---|
| 2020 | + .min_field_value = ID_AA64PFR0_EL1_32BIT_64BIT, |
---|
| 2021 | + }, |
---|
| 2022 | + { |
---|
| 2023 | + .desc = "Protected KVM", |
---|
| 2024 | + .capability = ARM64_KVM_PROTECTED_MODE, |
---|
| 2025 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2026 | + .matches = is_kvm_protected_mode, |
---|
| 2027 | + }, |
---|
| 2028 | +#endif |
---|
1219 | 2029 | { |
---|
1220 | 2030 | .desc = "Kernel page table isolation (KPTI)", |
---|
1221 | 2031 | .capability = ARM64_UNMAP_KERNEL_AT_EL0, |
---|
.. | .. |
---|
1248 | 2058 | .field_pos = ID_AA64ISAR1_DPB_SHIFT, |
---|
1249 | 2059 | .min_field_value = 1, |
---|
1250 | 2060 | }, |
---|
| 2061 | + { |
---|
| 2062 | + .desc = "Data cache clean to Point of Deep Persistence", |
---|
| 2063 | + .capability = ARM64_HAS_DCPODP, |
---|
| 2064 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2065 | + .matches = has_cpuid_feature, |
---|
| 2066 | + .sys_reg = SYS_ID_AA64ISAR1_EL1, |
---|
| 2067 | + .sign = FTR_UNSIGNED, |
---|
| 2068 | + .field_pos = ID_AA64ISAR1_DPB_SHIFT, |
---|
| 2069 | + .min_field_value = 2, |
---|
| 2070 | + }, |
---|
1251 | 2071 | #endif |
---|
1252 | 2072 | #ifdef CONFIG_ARM64_SVE |
---|
1253 | 2073 | { |
---|
.. | .. |
---|
1275 | 2095 | .cpu_enable = cpu_clear_disr, |
---|
1276 | 2096 | }, |
---|
1277 | 2097 | #endif /* CONFIG_ARM64_RAS_EXTN */ |
---|
| 2098 | +#ifdef CONFIG_ARM64_AMU_EXTN |
---|
| 2099 | + { |
---|
| 2100 | + /* |
---|
| 2101 | + * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y. |
---|
| 2102 | + * Therefore, don't provide .desc as we don't want the detection |
---|
| 2103 | + * message to be shown until at least one CPU is detected to |
---|
| 2104 | + * support the feature. |
---|
| 2105 | + */ |
---|
| 2106 | + .capability = ARM64_HAS_AMU_EXTN, |
---|
| 2107 | + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
---|
| 2108 | + .matches = has_amu, |
---|
| 2109 | + .sys_reg = SYS_ID_AA64PFR0_EL1, |
---|
| 2110 | + .sign = FTR_UNSIGNED, |
---|
| 2111 | + .field_pos = ID_AA64PFR0_AMU_SHIFT, |
---|
| 2112 | + .min_field_value = ID_AA64PFR0_AMU, |
---|
| 2113 | + .cpu_enable = cpu_amu_enable, |
---|
| 2114 | + }, |
---|
| 2115 | +#endif /* CONFIG_ARM64_AMU_EXTN */ |
---|
1278 | 2116 | { |
---|
1279 | 2117 | .desc = "Data cache clean to the PoU not required for I/D coherence", |
---|
1280 | 2118 | .capability = ARM64_HAS_CACHE_IDC, |
---|
1281 | 2119 | .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
1282 | 2120 | .matches = has_cache_idc, |
---|
| 2121 | + .cpu_enable = cpu_emulate_effective_ctr, |
---|
1283 | 2122 | }, |
---|
1284 | 2123 | { |
---|
1285 | 2124 | .desc = "Instruction cache invalidation not required for I/D coherence", |
---|
.. | .. |
---|
1297 | 2136 | .min_field_value = 1, |
---|
1298 | 2137 | .matches = has_cpuid_feature, |
---|
1299 | 2138 | .cpu_enable = cpu_has_fwb, |
---|
| 2139 | + }, |
---|
| 2140 | + { |
---|
| 2141 | + .desc = "ARMv8.4 Translation Table Level", |
---|
| 2142 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2143 | + .capability = ARM64_HAS_ARMv8_4_TTL, |
---|
| 2144 | + .sys_reg = SYS_ID_AA64MMFR2_EL1, |
---|
| 2145 | + .sign = FTR_UNSIGNED, |
---|
| 2146 | + .field_pos = ID_AA64MMFR2_TTL_SHIFT, |
---|
| 2147 | + .min_field_value = 1, |
---|
| 2148 | + .matches = has_cpuid_feature, |
---|
| 2149 | + }, |
---|
| 2150 | + { |
---|
| 2151 | + .desc = "TLB range maintenance instructions", |
---|
| 2152 | + .capability = ARM64_HAS_TLB_RANGE, |
---|
| 2153 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2154 | + .matches = has_cpuid_feature, |
---|
| 2155 | + .sys_reg = SYS_ID_AA64ISAR0_EL1, |
---|
| 2156 | + .field_pos = ID_AA64ISAR0_TLB_SHIFT, |
---|
| 2157 | + .sign = FTR_UNSIGNED, |
---|
| 2158 | + .min_field_value = ID_AA64ISAR0_TLB_RANGE, |
---|
1300 | 2159 | }, |
---|
1301 | 2160 | #ifdef CONFIG_ARM64_HW_AFDBM |
---|
1302 | 2161 | { |
---|
.. | .. |
---|
1318 | 2177 | .cpu_enable = cpu_enable_hw_dbm, |
---|
1319 | 2178 | }, |
---|
1320 | 2179 | #endif |
---|
1321 | | -#ifdef CONFIG_ARM64_SSBD |
---|
| 2180 | + { |
---|
| 2181 | + .desc = "CRC32 instructions", |
---|
| 2182 | + .capability = ARM64_HAS_CRC32, |
---|
| 2183 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2184 | + .matches = has_cpuid_feature, |
---|
| 2185 | + .sys_reg = SYS_ID_AA64ISAR0_EL1, |
---|
| 2186 | + .field_pos = ID_AA64ISAR0_CRC32_SHIFT, |
---|
| 2187 | + .min_field_value = 1, |
---|
| 2188 | + }, |
---|
1322 | 2189 | { |
---|
1323 | 2190 | .desc = "Speculative Store Bypassing Safe (SSBS)", |
---|
1324 | 2191 | .capability = ARM64_SSBS, |
---|
1325 | | - .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
---|
| 2192 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
1326 | 2193 | .matches = has_cpuid_feature, |
---|
1327 | 2194 | .sys_reg = SYS_ID_AA64PFR1_EL1, |
---|
1328 | 2195 | .field_pos = ID_AA64PFR1_SSBS_SHIFT, |
---|
1329 | 2196 | .sign = FTR_UNSIGNED, |
---|
1330 | 2197 | .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY, |
---|
1331 | | - .cpu_enable = cpu_enable_ssbs, |
---|
| 2198 | + }, |
---|
| 2199 | +#ifdef CONFIG_ARM64_CNP |
---|
| 2200 | + { |
---|
| 2201 | + .desc = "Common not Private translations", |
---|
| 2202 | + .capability = ARM64_HAS_CNP, |
---|
| 2203 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2204 | + .matches = has_useable_cnp, |
---|
| 2205 | + .sys_reg = SYS_ID_AA64MMFR2_EL1, |
---|
| 2206 | + .sign = FTR_UNSIGNED, |
---|
| 2207 | + .field_pos = ID_AA64MMFR2_CNP_SHIFT, |
---|
| 2208 | + .min_field_value = 1, |
---|
| 2209 | + .cpu_enable = cpu_enable_cnp, |
---|
1332 | 2210 | }, |
---|
1333 | 2211 | #endif |
---|
| 2212 | + { |
---|
| 2213 | + .desc = "Speculation barrier (SB)", |
---|
| 2214 | + .capability = ARM64_HAS_SB, |
---|
| 2215 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2216 | + .matches = has_cpuid_feature, |
---|
| 2217 | + .sys_reg = SYS_ID_AA64ISAR1_EL1, |
---|
| 2218 | + .field_pos = ID_AA64ISAR1_SB_SHIFT, |
---|
| 2219 | + .sign = FTR_UNSIGNED, |
---|
| 2220 | + .min_field_value = 1, |
---|
| 2221 | + }, |
---|
| 2222 | +#ifdef CONFIG_ARM64_PTR_AUTH |
---|
| 2223 | + { |
---|
| 2224 | + .desc = "Address authentication (architected algorithm)", |
---|
| 2225 | + .capability = ARM64_HAS_ADDRESS_AUTH_ARCH, |
---|
| 2226 | + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
---|
| 2227 | + .sys_reg = SYS_ID_AA64ISAR1_EL1, |
---|
| 2228 | + .sign = FTR_UNSIGNED, |
---|
| 2229 | + .field_pos = ID_AA64ISAR1_APA_SHIFT, |
---|
| 2230 | + .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED, |
---|
| 2231 | + .matches = has_address_auth_cpucap, |
---|
| 2232 | + }, |
---|
| 2233 | + { |
---|
| 2234 | + .desc = "Address authentication (IMP DEF algorithm)", |
---|
| 2235 | + .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, |
---|
| 2236 | + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
---|
| 2237 | + .sys_reg = SYS_ID_AA64ISAR1_EL1, |
---|
| 2238 | + .sign = FTR_UNSIGNED, |
---|
| 2239 | + .field_pos = ID_AA64ISAR1_API_SHIFT, |
---|
| 2240 | + .min_field_value = ID_AA64ISAR1_API_IMP_DEF, |
---|
| 2241 | + .matches = has_address_auth_cpucap, |
---|
| 2242 | + }, |
---|
| 2243 | + { |
---|
| 2244 | + .capability = ARM64_HAS_ADDRESS_AUTH, |
---|
| 2245 | + .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, |
---|
| 2246 | + .matches = has_address_auth_metacap, |
---|
| 2247 | + }, |
---|
| 2248 | + { |
---|
| 2249 | + .desc = "Generic authentication (architected algorithm)", |
---|
| 2250 | + .capability = ARM64_HAS_GENERIC_AUTH_ARCH, |
---|
| 2251 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2252 | + .sys_reg = SYS_ID_AA64ISAR1_EL1, |
---|
| 2253 | + .sign = FTR_UNSIGNED, |
---|
| 2254 | + .field_pos = ID_AA64ISAR1_GPA_SHIFT, |
---|
| 2255 | + .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED, |
---|
| 2256 | + .matches = has_cpuid_feature, |
---|
| 2257 | + }, |
---|
| 2258 | + { |
---|
| 2259 | + .desc = "Generic authentication (IMP DEF algorithm)", |
---|
| 2260 | + .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF, |
---|
| 2261 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2262 | + .sys_reg = SYS_ID_AA64ISAR1_EL1, |
---|
| 2263 | + .sign = FTR_UNSIGNED, |
---|
| 2264 | + .field_pos = ID_AA64ISAR1_GPI_SHIFT, |
---|
| 2265 | + .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF, |
---|
| 2266 | + .matches = has_cpuid_feature, |
---|
| 2267 | + }, |
---|
| 2268 | + { |
---|
| 2269 | + .capability = ARM64_HAS_GENERIC_AUTH, |
---|
| 2270 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2271 | + .matches = has_generic_auth, |
---|
| 2272 | + }, |
---|
| 2273 | +#endif /* CONFIG_ARM64_PTR_AUTH */ |
---|
| 2274 | +#ifdef CONFIG_ARM64_PSEUDO_NMI |
---|
| 2275 | + { |
---|
| 2276 | + /* |
---|
| 2277 | + * Depends on having GICv3 |
---|
| 2278 | + */ |
---|
| 2279 | + .desc = "IRQ priority masking", |
---|
| 2280 | + .capability = ARM64_HAS_IRQ_PRIO_MASKING, |
---|
| 2281 | + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
---|
| 2282 | + .matches = can_use_gic_priorities, |
---|
| 2283 | + .sys_reg = SYS_ID_AA64PFR0_EL1, |
---|
| 2284 | + .field_pos = ID_AA64PFR0_GIC_SHIFT, |
---|
| 2285 | + .sign = FTR_UNSIGNED, |
---|
| 2286 | + .min_field_value = 1, |
---|
| 2287 | + }, |
---|
| 2288 | +#endif |
---|
| 2289 | +#ifdef CONFIG_ARM64_E0PD |
---|
| 2290 | + { |
---|
| 2291 | + .desc = "E0PD", |
---|
| 2292 | + .capability = ARM64_HAS_E0PD, |
---|
| 2293 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2294 | + .sys_reg = SYS_ID_AA64MMFR2_EL1, |
---|
| 2295 | + .sign = FTR_UNSIGNED, |
---|
| 2296 | + .field_pos = ID_AA64MMFR2_E0PD_SHIFT, |
---|
| 2297 | + .matches = has_cpuid_feature, |
---|
| 2298 | + .min_field_value = 1, |
---|
| 2299 | + .cpu_enable = cpu_enable_e0pd, |
---|
| 2300 | + }, |
---|
| 2301 | +#endif |
---|
| 2302 | +#ifdef CONFIG_ARCH_RANDOM |
---|
| 2303 | + { |
---|
| 2304 | + .desc = "Random Number Generator", |
---|
| 2305 | + .capability = ARM64_HAS_RNG, |
---|
| 2306 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2307 | + .matches = has_cpuid_feature, |
---|
| 2308 | + .sys_reg = SYS_ID_AA64ISAR0_EL1, |
---|
| 2309 | + .field_pos = ID_AA64ISAR0_RNDR_SHIFT, |
---|
| 2310 | + .sign = FTR_UNSIGNED, |
---|
| 2311 | + .min_field_value = 1, |
---|
| 2312 | + }, |
---|
| 2313 | +#endif |
---|
| 2314 | +#ifdef CONFIG_ARM64_BTI |
---|
| 2315 | + { |
---|
| 2316 | + .desc = "Branch Target Identification", |
---|
| 2317 | + .capability = ARM64_BTI, |
---|
| 2318 | +#ifdef CONFIG_ARM64_BTI_KERNEL |
---|
| 2319 | + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
---|
| 2320 | +#else |
---|
| 2321 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2322 | +#endif |
---|
| 2323 | + .matches = has_cpuid_feature, |
---|
| 2324 | + .cpu_enable = bti_enable, |
---|
| 2325 | + .sys_reg = SYS_ID_AA64PFR1_EL1, |
---|
| 2326 | + .field_pos = ID_AA64PFR1_BT_SHIFT, |
---|
| 2327 | + .min_field_value = ID_AA64PFR1_BT_BTI, |
---|
| 2328 | + .sign = FTR_UNSIGNED, |
---|
| 2329 | + }, |
---|
| 2330 | +#endif |
---|
| 2331 | +#ifdef CONFIG_ARM64_MTE |
---|
| 2332 | + { |
---|
| 2333 | + .desc = "Memory Tagging Extension", |
---|
| 2334 | + .capability = ARM64_MTE, |
---|
| 2335 | + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, |
---|
| 2336 | + .matches = has_cpuid_feature, |
---|
| 2337 | + .sys_reg = SYS_ID_AA64PFR1_EL1, |
---|
| 2338 | + .field_pos = ID_AA64PFR1_MTE_SHIFT, |
---|
| 2339 | + .min_field_value = ID_AA64PFR1_MTE, |
---|
| 2340 | + .sign = FTR_UNSIGNED, |
---|
| 2341 | + .cpu_enable = cpu_enable_mte, |
---|
| 2342 | + }, |
---|
| 2343 | +#endif /* CONFIG_ARM64_MTE */ |
---|
| 2344 | + { |
---|
| 2345 | + .desc = "RCpc load-acquire (LDAPR)", |
---|
| 2346 | + .capability = ARM64_HAS_LDAPR, |
---|
| 2347 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, |
---|
| 2348 | + .sys_reg = SYS_ID_AA64ISAR1_EL1, |
---|
| 2349 | + .sign = FTR_UNSIGNED, |
---|
| 2350 | + .field_pos = ID_AA64ISAR1_LRCPC_SHIFT, |
---|
| 2351 | + .matches = has_cpuid_feature, |
---|
| 2352 | + .min_field_value = 1, |
---|
| 2353 | + }, |
---|
1334 | 2354 | {}, |
---|
1335 | 2355 | }; |
---|
1336 | 2356 | |
---|
| 2357 | +#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ |
---|
| 2358 | + .matches = has_cpuid_feature, \ |
---|
| 2359 | + .sys_reg = reg, \ |
---|
| 2360 | + .field_pos = field, \ |
---|
| 2361 | + .sign = s, \ |
---|
| 2362 | + .min_field_value = min_value, |
---|
1337 | 2363 | |
---|
1338 | | -#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \ |
---|
1339 | | - .matches = has_cpuid_feature, \ |
---|
1340 | | - .sys_reg = reg, \ |
---|
1341 | | - .field_pos = field, \ |
---|
1342 | | - .sign = s, \ |
---|
1343 | | - .min_field_value = min_value, \ |
---|
| 2364 | +#define __HWCAP_CAP(name, cap_type, cap) \ |
---|
| 2365 | + .desc = name, \ |
---|
| 2366 | + .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ |
---|
| 2367 | + .hwcap_type = cap_type, \ |
---|
| 2368 | + .hwcap = cap, \ |
---|
1344 | 2369 | |
---|
1345 | | -#define __HWCAP_CAP(name, cap_type, cap) \ |
---|
1346 | | - .desc = name, \ |
---|
1347 | | - .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ |
---|
1348 | | - .hwcap_type = cap_type, \ |
---|
1349 | | - .hwcap = cap, \ |
---|
1350 | | - |
---|
1351 | | -#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ |
---|
1352 | | - { \ |
---|
1353 | | - __HWCAP_CAP(#cap, cap_type, cap) \ |
---|
1354 | | - HWCAP_CPUID_MATCH(reg, field, s, min_value) \ |
---|
| 2370 | +#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ |
---|
| 2371 | + { \ |
---|
| 2372 | + __HWCAP_CAP(#cap, cap_type, cap) \ |
---|
| 2373 | + HWCAP_CPUID_MATCH(reg, field, s, min_value) \ |
---|
1355 | 2374 | } |
---|
1356 | 2375 | |
---|
1357 | | -#define HWCAP_CAP_MATCH(match, cap_type, cap) \ |
---|
1358 | | - { \ |
---|
1359 | | - __HWCAP_CAP(#cap, cap_type, cap) \ |
---|
1360 | | - .matches = match, \ |
---|
| 2376 | +#define HWCAP_MULTI_CAP(list, cap_type, cap) \ |
---|
| 2377 | + { \ |
---|
| 2378 | + __HWCAP_CAP(#cap, cap_type, cap) \ |
---|
| 2379 | + .matches = cpucap_multi_entry_cap_matches, \ |
---|
| 2380 | + .match_list = list, \ |
---|
1361 | 2381 | } |
---|
| 2382 | + |
---|
| 2383 | +#define HWCAP_CAP_MATCH(match, cap_type, cap) \ |
---|
| 2384 | + { \ |
---|
| 2385 | + __HWCAP_CAP(#cap, cap_type, cap) \ |
---|
| 2386 | + .matches = match, \ |
---|
| 2387 | + } |
---|
| 2388 | + |
---|
| 2389 | +#ifdef CONFIG_ARM64_PTR_AUTH |
---|
| 2390 | +static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { |
---|
| 2391 | + { |
---|
| 2392 | + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT, |
---|
| 2393 | + FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED) |
---|
| 2394 | + }, |
---|
| 2395 | + { |
---|
| 2396 | + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT, |
---|
| 2397 | + FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF) |
---|
| 2398 | + }, |
---|
| 2399 | + {}, |
---|
| 2400 | +}; |
---|
| 2401 | + |
---|
| 2402 | +static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { |
---|
| 2403 | + { |
---|
| 2404 | + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT, |
---|
| 2405 | + FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED) |
---|
| 2406 | + }, |
---|
| 2407 | + { |
---|
| 2408 | + HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT, |
---|
| 2409 | + FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF) |
---|
| 2410 | + }, |
---|
| 2411 | + {}, |
---|
| 2412 | +}; |
---|
| 2413 | +#endif |
---|
1362 | 2414 | |
---|
1363 | 2415 | static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { |
---|
1364 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL), |
---|
1365 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES), |
---|
1366 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1), |
---|
1367 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2), |
---|
1368 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512), |
---|
1369 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32), |
---|
1370 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS), |
---|
1371 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM), |
---|
1372 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3), |
---|
1373 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3), |
---|
1374 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4), |
---|
1375 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP), |
---|
1376 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM), |
---|
1377 | | - HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM), |
---|
1378 | | - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP), |
---|
1379 | | - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP), |
---|
1380 | | - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD), |
---|
1381 | | - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP), |
---|
1382 | | - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT), |
---|
1383 | | - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP), |
---|
1384 | | - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT), |
---|
1385 | | - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), |
---|
1386 | | - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), |
---|
1387 | | - HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), |
---|
1388 | | - HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), |
---|
| 2416 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL), |
---|
| 2417 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES), |
---|
| 2418 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1), |
---|
| 2419 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2), |
---|
| 2420 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512), |
---|
| 2421 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32), |
---|
| 2422 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS), |
---|
| 2423 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM), |
---|
| 2424 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3), |
---|
| 2425 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3), |
---|
| 2426 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4), |
---|
| 2427 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP), |
---|
| 2428 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM), |
---|
| 2429 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM), |
---|
| 2430 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2), |
---|
| 2431 | + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG), |
---|
| 2432 | + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP), |
---|
| 2433 | + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP), |
---|
| 2434 | + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD), |
---|
| 2435 | + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP), |
---|
| 2436 | + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT), |
---|
| 2437 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP), |
---|
| 2438 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP), |
---|
| 2439 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT), |
---|
| 2440 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA), |
---|
| 2441 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC), |
---|
| 2442 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC), |
---|
| 2443 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT), |
---|
| 2444 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB), |
---|
| 2445 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16), |
---|
| 2446 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH), |
---|
| 2447 | + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM), |
---|
| 2448 | + HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT), |
---|
1389 | 2449 | #ifdef CONFIG_ARM64_SVE |
---|
1390 | | - HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), |
---|
| 2450 | + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE), |
---|
| 2451 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2), |
---|
| 2452 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES), |
---|
| 2453 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL), |
---|
| 2454 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM), |
---|
| 2455 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16), |
---|
| 2456 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3), |
---|
| 2457 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4), |
---|
| 2458 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM), |
---|
| 2459 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM), |
---|
| 2460 | + HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM), |
---|
1391 | 2461 | #endif |
---|
1392 | | - HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS), |
---|
| 2462 | + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS), |
---|
| 2463 | +#ifdef CONFIG_ARM64_BTI |
---|
| 2464 | + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_BT_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_BT_BTI, CAP_HWCAP, KERNEL_HWCAP_BTI), |
---|
| 2465 | +#endif |
---|
| 2466 | +#ifdef CONFIG_ARM64_PTR_AUTH |
---|
| 2467 | + HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA), |
---|
| 2468 | + HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG), |
---|
| 2469 | +#endif |
---|
| 2470 | +#ifdef CONFIG_ARM64_MTE |
---|
| 2471 | + HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), |
---|
| 2472 | +#endif /* CONFIG_ARM64_MTE */ |
---|
| 2473 | + HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), |
---|
| 2474 | + HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), |
---|
| 2475 | + HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), |
---|
1393 | 2476 | {}, |
---|
1394 | 2477 | }; |
---|
1395 | 2478 | |
---|
.. | .. |
---|
1431 | 2514 | {}, |
---|
1432 | 2515 | }; |
---|
1433 | 2516 | |
---|
1434 | | -static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) |
---|
| 2517 | +static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap) |
---|
1435 | 2518 | { |
---|
1436 | 2519 | switch (cap->hwcap_type) { |
---|
1437 | 2520 | case CAP_HWCAP: |
---|
1438 | | - elf_hwcap |= cap->hwcap; |
---|
| 2521 | + cpu_set_feature(cap->hwcap); |
---|
1439 | 2522 | break; |
---|
1440 | 2523 | #ifdef CONFIG_COMPAT |
---|
1441 | 2524 | case CAP_COMPAT_HWCAP: |
---|
.. | .. |
---|
1458 | 2541 | |
---|
1459 | 2542 | switch (cap->hwcap_type) { |
---|
1460 | 2543 | case CAP_HWCAP: |
---|
1461 | | - rc = (elf_hwcap & cap->hwcap) != 0; |
---|
| 2544 | + rc = cpu_have_feature(cap->hwcap); |
---|
1462 | 2545 | break; |
---|
1463 | 2546 | #ifdef CONFIG_COMPAT |
---|
1464 | 2547 | case CAP_COMPAT_HWCAP: |
---|
.. | .. |
---|
1476 | 2559 | return rc; |
---|
1477 | 2560 | } |
---|
1478 | 2561 | |
---|
1479 | | -static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) |
---|
| 2562 | +static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) |
---|
1480 | 2563 | { |
---|
1481 | 2564 | /* We support emulation of accesses to CPU ID feature registers */ |
---|
1482 | | - elf_hwcap |= HWCAP_CPUID; |
---|
| 2565 | + cpu_set_named_feature(CPUID); |
---|
1483 | 2566 | for (; hwcaps->matches; hwcaps++) |
---|
1484 | 2567 | if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) |
---|
1485 | 2568 | cap_set_elf_hwcap(hwcaps); |
---|
1486 | 2569 | } |
---|
1487 | 2570 | |
---|
1488 | | -/* |
---|
1489 | | - * Check if the current CPU has a given feature capability. |
---|
1490 | | - * Should be called from non-preemptible context. |
---|
1491 | | - */ |
---|
1492 | | -static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, |
---|
1493 | | - unsigned int cap) |
---|
| 2571 | +static void update_cpu_capabilities(u16 scope_mask) |
---|
1494 | 2572 | { |
---|
| 2573 | + int i; |
---|
1495 | 2574 | const struct arm64_cpu_capabilities *caps; |
---|
1496 | 2575 | |
---|
1497 | | - if (WARN_ON(preemptible())) |
---|
1498 | | - return false; |
---|
1499 | | - |
---|
1500 | | - for (caps = cap_array; caps->matches; caps++) |
---|
1501 | | - if (caps->capability == cap) |
---|
1502 | | - return caps->matches(caps, SCOPE_LOCAL_CPU); |
---|
1503 | | - |
---|
1504 | | - return false; |
---|
1505 | | -} |
---|
1506 | | - |
---|
1507 | | -static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, |
---|
1508 | | - u16 scope_mask, const char *info) |
---|
1509 | | -{ |
---|
1510 | 2576 | scope_mask &= ARM64_CPUCAP_SCOPE_MASK; |
---|
1511 | | - for (; caps->matches; caps++) { |
---|
1512 | | - if (!(caps->type & scope_mask) || |
---|
| 2577 | + for (i = 0; i < ARM64_NCAPS; i++) { |
---|
| 2578 | + caps = cpu_hwcaps_ptrs[i]; |
---|
| 2579 | + if (!caps || !(caps->type & scope_mask) || |
---|
| 2580 | + cpus_have_cap(caps->capability) || |
---|
1513 | 2581 | !caps->matches(caps, cpucap_default_scope(caps))) |
---|
1514 | 2582 | continue; |
---|
1515 | 2583 | |
---|
1516 | | - if (!cpus_have_cap(caps->capability) && caps->desc) |
---|
1517 | | - pr_info("%s %s\n", info, caps->desc); |
---|
| 2584 | + if (caps->desc) |
---|
| 2585 | + pr_info("detected: %s\n", caps->desc); |
---|
1518 | 2586 | cpus_set_cap(caps->capability); |
---|
| 2587 | + |
---|
| 2588 | + if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) |
---|
| 2589 | + set_bit(caps->capability, boot_capabilities); |
---|
1519 | 2590 | } |
---|
1520 | 2591 | } |
---|
1521 | 2592 | |
---|
1522 | | -static void update_cpu_capabilities(u16 scope_mask) |
---|
| 2593 | +/* |
---|
| 2594 | + * Enable all the available capabilities on this CPU. The capabilities |
---|
| 2595 | + * with BOOT_CPU scope are handled separately and hence skipped here. |
---|
| 2596 | + */ |
---|
| 2597 | +static int cpu_enable_non_boot_scope_capabilities(void *__unused) |
---|
1523 | 2598 | { |
---|
1524 | | - __update_cpu_capabilities(arm64_errata, scope_mask, |
---|
1525 | | - "enabling workaround for"); |
---|
1526 | | - __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); |
---|
1527 | | -} |
---|
| 2599 | + int i; |
---|
| 2600 | + u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU; |
---|
1528 | 2601 | |
---|
1529 | | -static int __enable_cpu_capability(void *arg) |
---|
1530 | | -{ |
---|
1531 | | - const struct arm64_cpu_capabilities *cap = arg; |
---|
| 2602 | + for_each_available_cap(i) { |
---|
| 2603 | + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i]; |
---|
1532 | 2604 | |
---|
1533 | | - cap->cpu_enable(cap); |
---|
| 2605 | + if (WARN_ON(!cap)) |
---|
| 2606 | + continue; |
---|
| 2607 | + |
---|
| 2608 | + if (!(cap->type & non_boot_scope)) |
---|
| 2609 | + continue; |
---|
| 2610 | + |
---|
| 2611 | + if (cap->cpu_enable) |
---|
| 2612 | + cap->cpu_enable(cap); |
---|
| 2613 | + } |
---|
1534 | 2614 | return 0; |
---|
1535 | 2615 | } |
---|
1536 | 2616 | |
---|
.. | .. |
---|
1538 | 2618 | * Run through the enabled capabilities and enable() it on all active |
---|
1539 | 2619 | * CPUs |
---|
1540 | 2620 | */ |
---|
1541 | | -static void __init |
---|
1542 | | -__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, |
---|
1543 | | - u16 scope_mask) |
---|
| 2621 | +static void __init enable_cpu_capabilities(u16 scope_mask) |
---|
1544 | 2622 | { |
---|
1545 | | - scope_mask &= ARM64_CPUCAP_SCOPE_MASK; |
---|
1546 | | - for (; caps->matches; caps++) { |
---|
1547 | | - unsigned int num = caps->capability; |
---|
| 2623 | + int i; |
---|
| 2624 | + const struct arm64_cpu_capabilities *caps; |
---|
| 2625 | + bool boot_scope; |
---|
1548 | 2626 | |
---|
1549 | | - if (!(caps->type & scope_mask) || !cpus_have_cap(num)) |
---|
| 2627 | + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; |
---|
| 2628 | + boot_scope = !!(scope_mask & SCOPE_BOOT_CPU); |
---|
| 2629 | + |
---|
| 2630 | + for (i = 0; i < ARM64_NCAPS; i++) { |
---|
| 2631 | + unsigned int num; |
---|
| 2632 | + |
---|
| 2633 | + caps = cpu_hwcaps_ptrs[i]; |
---|
| 2634 | + if (!caps || !(caps->type & scope_mask)) |
---|
| 2635 | + continue; |
---|
| 2636 | + num = caps->capability; |
---|
| 2637 | + if (!cpus_have_cap(num)) |
---|
1550 | 2638 | continue; |
---|
1551 | 2639 | |
---|
1552 | 2640 | /* Ensure cpus_have_const_cap(num) works */ |
---|
1553 | 2641 | static_branch_enable(&cpu_hwcap_keys[num]); |
---|
1554 | 2642 | |
---|
1555 | | - if (caps->cpu_enable) { |
---|
| 2643 | + if (boot_scope && caps->cpu_enable) |
---|
1556 | 2644 | /* |
---|
1557 | 2645 | * Capabilities with SCOPE_BOOT_CPU scope are finalised |
---|
1558 | 2646 | * before any secondary CPU boots. Thus, each secondary |
---|
.. | .. |
---|
1561 | 2649 | * the boot CPU, for which the capability must be |
---|
1562 | 2650 | * enabled here. This approach avoids costly |
---|
1563 | 2651 | * stop_machine() calls for this case. |
---|
1564 | | - * |
---|
1565 | | - * Otherwise, use stop_machine() as it schedules the |
---|
1566 | | - * work allowing us to modify PSTATE, instead of |
---|
1567 | | - * on_each_cpu() which uses an IPI, giving us a PSTATE |
---|
1568 | | - * that disappears when we return. |
---|
1569 | 2652 | */ |
---|
1570 | | - if (scope_mask & SCOPE_BOOT_CPU) |
---|
1571 | | - caps->cpu_enable(caps); |
---|
1572 | | - else |
---|
1573 | | - stop_machine(__enable_cpu_capability, |
---|
1574 | | - (void *)caps, cpu_online_mask); |
---|
1575 | | - } |
---|
| 2653 | + caps->cpu_enable(caps); |
---|
1576 | 2654 | } |
---|
1577 | | -} |
---|
1578 | 2655 | |
---|
1579 | | -static void __init enable_cpu_capabilities(u16 scope_mask) |
---|
1580 | | -{ |
---|
1581 | | - __enable_cpu_capabilities(arm64_errata, scope_mask); |
---|
1582 | | - __enable_cpu_capabilities(arm64_features, scope_mask); |
---|
| 2656 | + /* |
---|
| 2657 | + * For all non-boot scope capabilities, use stop_machine() |
---|
| 2658 | + * as it schedules the work allowing us to modify PSTATE, |
---|
| 2659 | + * instead of on_each_cpu() which uses an IPI, giving us a |
---|
| 2660 | + * PSTATE that disappears when we return. |
---|
| 2661 | + */ |
---|
| 2662 | + if (!boot_scope) |
---|
| 2663 | + stop_machine(cpu_enable_non_boot_scope_capabilities, |
---|
| 2664 | + NULL, cpu_online_mask); |
---|
1583 | 2665 | } |
---|
1584 | 2666 | |
---|
1585 | 2667 | /* |
---|
1586 | 2668 | * Run through the list of capabilities to check for conflicts. |
---|
1587 | 2669 | * If the system has already detected a capability, take necessary |
---|
1588 | 2670 | * action on this CPU. |
---|
1589 | | - * |
---|
1590 | | - * Returns "false" on conflicts. |
---|
1591 | 2671 | */ |
---|
1592 | | -static bool |
---|
1593 | | -__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, |
---|
1594 | | - u16 scope_mask) |
---|
| 2672 | +static void verify_local_cpu_caps(u16 scope_mask) |
---|
1595 | 2673 | { |
---|
| 2674 | + int i; |
---|
1596 | 2675 | bool cpu_has_cap, system_has_cap; |
---|
| 2676 | + const struct arm64_cpu_capabilities *caps; |
---|
1597 | 2677 | |
---|
1598 | 2678 | scope_mask &= ARM64_CPUCAP_SCOPE_MASK; |
---|
1599 | 2679 | |
---|
1600 | | - for (; caps->matches; caps++) { |
---|
1601 | | - if (!(caps->type & scope_mask)) |
---|
| 2680 | + for (i = 0; i < ARM64_NCAPS; i++) { |
---|
| 2681 | + caps = cpu_hwcaps_ptrs[i]; |
---|
| 2682 | + if (!caps || !(caps->type & scope_mask)) |
---|
1602 | 2683 | continue; |
---|
1603 | 2684 | |
---|
1604 | 2685 | cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); |
---|
.. | .. |
---|
1629 | 2710 | } |
---|
1630 | 2711 | } |
---|
1631 | 2712 | |
---|
1632 | | - if (caps->matches) { |
---|
| 2713 | + if (i < ARM64_NCAPS) { |
---|
1633 | 2714 | pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", |
---|
1634 | 2715 | smp_processor_id(), caps->capability, |
---|
1635 | 2716 | caps->desc, system_has_cap, cpu_has_cap); |
---|
1636 | | - return false; |
---|
| 2717 | + |
---|
| 2718 | + if (cpucap_panic_on_conflict(caps)) |
---|
| 2719 | + cpu_panic_kernel(); |
---|
| 2720 | + else |
---|
| 2721 | + cpu_die_early(); |
---|
1637 | 2722 | } |
---|
1638 | | - |
---|
1639 | | - return true; |
---|
1640 | | -} |
---|
1641 | | - |
---|
1642 | | -static bool verify_local_cpu_caps(u16 scope_mask) |
---|
1643 | | -{ |
---|
1644 | | - return __verify_local_cpu_caps(arm64_errata, scope_mask) && |
---|
1645 | | - __verify_local_cpu_caps(arm64_features, scope_mask); |
---|
1646 | 2723 | } |
---|
1647 | 2724 | |
---|
1648 | 2725 | /* |
---|
.. | .. |
---|
1652 | 2729 | static void check_early_cpu_features(void) |
---|
1653 | 2730 | { |
---|
1654 | 2731 | verify_cpu_asid_bits(); |
---|
1655 | | - /* |
---|
1656 | | - * Early features are used by the kernel already. If there |
---|
1657 | | - * is a conflict, we cannot proceed further. |
---|
1658 | | - */ |
---|
1659 | | - if (!verify_local_cpu_caps(SCOPE_BOOT_CPU)) |
---|
1660 | | - cpu_panic_kernel(); |
---|
| 2732 | + |
---|
| 2733 | + verify_local_cpu_caps(SCOPE_BOOT_CPU); |
---|
1661 | 2734 | } |
---|
1662 | 2735 | |
---|
1663 | 2736 | static void |
---|
1664 | | -verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) |
---|
| 2737 | +__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) |
---|
1665 | 2738 | { |
---|
1666 | 2739 | |
---|
1667 | 2740 | for (; caps->matches; caps++) |
---|
.. | .. |
---|
1670 | 2743 | smp_processor_id(), caps->desc); |
---|
1671 | 2744 | cpu_die_early(); |
---|
1672 | 2745 | } |
---|
| 2746 | +} |
---|
| 2747 | + |
---|
| 2748 | +static void verify_local_elf_hwcaps(void) |
---|
| 2749 | +{ |
---|
| 2750 | + __verify_local_elf_hwcaps(arm64_elf_hwcaps); |
---|
| 2751 | + |
---|
| 2752 | + if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1))) |
---|
| 2753 | + __verify_local_elf_hwcaps(compat_elf_hwcaps); |
---|
1673 | 2754 | } |
---|
1674 | 2755 | |
---|
1675 | 2756 | static void verify_sve_features(void) |
---|
.. | .. |
---|
1681 | 2762 | unsigned int len = zcr & ZCR_ELx_LEN_MASK; |
---|
1682 | 2763 | |
---|
1683 | 2764 | if (len < safe_len || sve_verify_vq_map()) { |
---|
1684 | | - pr_crit("CPU%d: SVE: required vector length(s) missing\n", |
---|
| 2765 | + pr_crit("CPU%d: SVE: vector length support mismatch\n", |
---|
1685 | 2766 | smp_processor_id()); |
---|
1686 | 2767 | cpu_die_early(); |
---|
1687 | 2768 | } |
---|
.. | .. |
---|
1689 | 2770 | /* Add checks on other ZCR bits here if necessary */ |
---|
1690 | 2771 | } |
---|
1691 | 2772 | |
---|
| 2773 | +static void verify_hyp_capabilities(void) |
---|
| 2774 | +{ |
---|
| 2775 | + u64 safe_mmfr1, mmfr0, mmfr1; |
---|
| 2776 | + int parange, ipa_max; |
---|
| 2777 | + unsigned int safe_vmid_bits, vmid_bits; |
---|
| 2778 | + |
---|
| 2779 | + if (!IS_ENABLED(CONFIG_KVM)) |
---|
| 2780 | + return; |
---|
| 2781 | + |
---|
| 2782 | + safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
---|
| 2783 | + mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); |
---|
| 2784 | + mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); |
---|
| 2785 | + |
---|
| 2786 | + /* Verify VMID bits */ |
---|
| 2787 | + safe_vmid_bits = get_vmid_bits(safe_mmfr1); |
---|
| 2788 | + vmid_bits = get_vmid_bits(mmfr1); |
---|
| 2789 | + if (vmid_bits < safe_vmid_bits) { |
---|
| 2790 | + pr_crit("CPU%d: VMID width mismatch\n", smp_processor_id()); |
---|
| 2791 | + cpu_die_early(); |
---|
| 2792 | + } |
---|
| 2793 | + |
---|
| 2794 | + /* Verify IPA range */ |
---|
| 2795 | + parange = cpuid_feature_extract_unsigned_field(mmfr0, |
---|
| 2796 | + ID_AA64MMFR0_PARANGE_SHIFT); |
---|
| 2797 | + ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange); |
---|
| 2798 | + if (ipa_max < get_kvm_ipa_limit()) { |
---|
| 2799 | + pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id()); |
---|
| 2800 | + cpu_die_early(); |
---|
| 2801 | + } |
---|
| 2802 | +} |
---|
1692 | 2803 | |
---|
1693 | 2804 | /* |
---|
1694 | 2805 | * Run through the enabled system capabilities and enable() it on this CPU. |
---|
.. | .. |
---|
1705 | 2816 | * check_early_cpu_features(), as they need to be verified |
---|
1706 | 2817 | * on all secondary CPUs. |
---|
1707 | 2818 | */ |
---|
1708 | | - if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU)) |
---|
1709 | | - cpu_die_early(); |
---|
1710 | | - |
---|
1711 | | - verify_local_elf_hwcaps(arm64_elf_hwcaps); |
---|
1712 | | - |
---|
1713 | | - if (system_supports_32bit_el0()) |
---|
1714 | | - verify_local_elf_hwcaps(compat_elf_hwcaps); |
---|
| 2819 | + verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU); |
---|
| 2820 | + verify_local_elf_hwcaps(); |
---|
1715 | 2821 | |
---|
1716 | 2822 | if (system_supports_sve()) |
---|
1717 | 2823 | verify_sve_features(); |
---|
| 2824 | + |
---|
| 2825 | + if (is_hyp_mode_available()) |
---|
| 2826 | + verify_hyp_capabilities(); |
---|
1718 | 2827 | } |
---|
1719 | 2828 | |
---|
1720 | 2829 | void check_local_cpu_capabilities(void) |
---|
.. | .. |
---|
1731 | 2840 | * Otherwise, this CPU should verify that it has all the system |
---|
1732 | 2841 | * advertised capabilities. |
---|
1733 | 2842 | */ |
---|
1734 | | - if (!sys_caps_initialised) |
---|
| 2843 | + if (!system_capabilities_finalized()) |
---|
1735 | 2844 | update_cpu_capabilities(SCOPE_LOCAL_CPU); |
---|
1736 | 2845 | else |
---|
1737 | 2846 | verify_local_cpu_capabilities(); |
---|
.. | .. |
---|
1745 | 2854 | enable_cpu_capabilities(SCOPE_BOOT_CPU); |
---|
1746 | 2855 | } |
---|
1747 | 2856 | |
---|
1748 | | -DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); |
---|
1749 | | -EXPORT_SYMBOL(arm64_const_caps_ready); |
---|
1750 | | - |
---|
1751 | | -static void __init mark_const_caps_ready(void) |
---|
| 2857 | +bool this_cpu_has_cap(unsigned int n) |
---|
1752 | 2858 | { |
---|
1753 | | - static_branch_enable(&arm64_const_caps_ready); |
---|
| 2859 | + if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { |
---|
| 2860 | + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; |
---|
| 2861 | + |
---|
| 2862 | + if (cap) |
---|
| 2863 | + return cap->matches(cap, SCOPE_LOCAL_CPU); |
---|
| 2864 | + } |
---|
| 2865 | + |
---|
| 2866 | + return false; |
---|
1754 | 2867 | } |
---|
1755 | 2868 | |
---|
1756 | | -extern const struct arm64_cpu_capabilities arm64_errata[]; |
---|
1757 | | - |
---|
1758 | | -bool this_cpu_has_cap(unsigned int cap) |
---|
| 2869 | +/* |
---|
| 2870 | + * This helper function is used in a narrow window when, |
---|
| 2871 | + * - The system wide safe registers are set with all the SMP CPUs and, |
---|
| 2872 | + * - The SYSTEM_FEATURE cpu_hwcaps may not have been set. |
---|
| 2873 | + * In all other cases cpus_have_{const_}cap() should be used. |
---|
| 2874 | + */ |
---|
| 2875 | +static bool __system_matches_cap(unsigned int n) |
---|
1759 | 2876 | { |
---|
1760 | | - return (__this_cpu_has_cap(arm64_features, cap) || |
---|
1761 | | - __this_cpu_has_cap(arm64_errata, cap)); |
---|
| 2877 | + if (n < ARM64_NCAPS) { |
---|
| 2878 | + const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; |
---|
| 2879 | + |
---|
| 2880 | + if (cap) |
---|
| 2881 | + return cap->matches(cap, SCOPE_SYSTEM); |
---|
| 2882 | + } |
---|
| 2883 | + return false; |
---|
| 2884 | +} |
---|
| 2885 | + |
---|
| 2886 | +void cpu_set_feature(unsigned int num) |
---|
| 2887 | +{ |
---|
| 2888 | + WARN_ON(num >= MAX_CPU_FEATURES); |
---|
| 2889 | + elf_hwcap |= BIT(num); |
---|
| 2890 | +} |
---|
| 2891 | +EXPORT_SYMBOL_GPL(cpu_set_feature); |
---|
| 2892 | + |
---|
| 2893 | +bool cpu_have_feature(unsigned int num) |
---|
| 2894 | +{ |
---|
| 2895 | + WARN_ON(num >= MAX_CPU_FEATURES); |
---|
| 2896 | + return elf_hwcap & BIT(num); |
---|
| 2897 | +} |
---|
| 2898 | +EXPORT_SYMBOL_GPL(cpu_have_feature); |
---|
| 2899 | + |
---|
| 2900 | +unsigned long cpu_get_elf_hwcap(void) |
---|
| 2901 | +{ |
---|
| 2902 | + /* |
---|
| 2903 | + * We currently only populate the first 32 bits of AT_HWCAP. Please |
---|
| 2904 | + * note that for userspace compatibility we guarantee that bits 62 |
---|
| 2905 | + * and 63 will always be returned as 0. |
---|
| 2906 | + */ |
---|
| 2907 | + return lower_32_bits(elf_hwcap); |
---|
| 2908 | +} |
---|
| 2909 | + |
---|
| 2910 | +unsigned long cpu_get_elf_hwcap2(void) |
---|
| 2911 | +{ |
---|
| 2912 | + return upper_32_bits(elf_hwcap); |
---|
1762 | 2913 | } |
---|
1763 | 2914 | |
---|
1764 | 2915 | static void __init setup_system_capabilities(void) |
---|
.. | .. |
---|
1778 | 2929 | u32 cwg; |
---|
1779 | 2930 | |
---|
1780 | 2931 | setup_system_capabilities(); |
---|
1781 | | - mark_const_caps_ready(); |
---|
1782 | 2932 | setup_elf_hwcaps(arm64_elf_hwcaps); |
---|
1783 | 2933 | |
---|
1784 | | - if (system_supports_32bit_el0()) |
---|
| 2934 | + if (system_supports_32bit_el0()) { |
---|
1785 | 2935 | setup_elf_hwcaps(compat_elf_hwcaps); |
---|
| 2936 | + elf_hwcap_fixup(); |
---|
| 2937 | + } |
---|
1786 | 2938 | |
---|
1787 | 2939 | if (system_uses_ttbr0_pan()) |
---|
1788 | 2940 | pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); |
---|
.. | .. |
---|
1791 | 2943 | minsigstksz_setup(); |
---|
1792 | 2944 | |
---|
1793 | 2945 | /* Advertise that we have computed the system capabilities */ |
---|
1794 | | - set_sys_caps_initialised(); |
---|
| 2946 | + finalize_system_capabilities(); |
---|
1795 | 2947 | |
---|
1796 | 2948 | /* |
---|
1797 | 2949 | * Check for sane CTR_EL0.CWG value. |
---|
.. | .. |
---|
1802 | 2954 | ARCH_DMA_MINALIGN); |
---|
1803 | 2955 | } |
---|
1804 | 2956 | |
---|
| 2957 | +static int enable_mismatched_32bit_el0(unsigned int cpu) |
---|
| 2958 | +{ |
---|
| 2959 | + static int lucky_winner = -1; |
---|
| 2960 | + |
---|
| 2961 | + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu); |
---|
| 2962 | + bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0); |
---|
| 2963 | + |
---|
| 2964 | + if (cpu_32bit) { |
---|
| 2965 | + cpumask_set_cpu(cpu, cpu_32bit_el0_mask); |
---|
| 2966 | + static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0); |
---|
| 2967 | + } |
---|
| 2968 | + |
---|
| 2969 | + if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit) |
---|
| 2970 | + return 0; |
---|
| 2971 | + |
---|
| 2972 | + if (lucky_winner >= 0) |
---|
| 2973 | + return 0; |
---|
| 2974 | + |
---|
| 2975 | + /* |
---|
| 2976 | + * We've detected a mismatch. We need to keep one of our CPUs with |
---|
| 2977 | + * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting |
---|
| 2978 | + * every CPU in the system for a 32-bit task. |
---|
| 2979 | + */ |
---|
| 2980 | + lucky_winner = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask, |
---|
| 2981 | + cpu_active_mask); |
---|
| 2982 | + get_cpu_device(lucky_winner)->offline_disabled = true; |
---|
| 2983 | + setup_elf_hwcaps(compat_elf_hwcaps); |
---|
| 2984 | + pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n", |
---|
| 2985 | + cpu, lucky_winner); |
---|
| 2986 | + return 0; |
---|
| 2987 | +} |
---|
| 2988 | + |
---|
| 2989 | +static int __init init_32bit_el0_mask(void) |
---|
| 2990 | +{ |
---|
| 2991 | + if (!allow_mismatched_32bit_el0) |
---|
| 2992 | + return 0; |
---|
| 2993 | + |
---|
| 2994 | + if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL)) |
---|
| 2995 | + return -ENOMEM; |
---|
| 2996 | + |
---|
| 2997 | + return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
---|
| 2998 | + "arm64/mismatched_32bit_el0:online", |
---|
| 2999 | + enable_mismatched_32bit_el0, NULL); |
---|
| 3000 | +} |
---|
| 3001 | +subsys_initcall_sync(init_32bit_el0_mask); |
---|
| 3002 | + |
---|
1805 | 3003 | static bool __maybe_unused |
---|
1806 | 3004 | cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) |
---|
1807 | 3005 | { |
---|
1808 | | - return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO)); |
---|
| 3006 | + return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO)); |
---|
| 3007 | +} |
---|
| 3008 | + |
---|
| 3009 | +static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) |
---|
| 3010 | +{ |
---|
| 3011 | + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
---|
1809 | 3012 | } |
---|
1810 | 3013 | |
---|
1811 | 3014 | /* |
---|
.. | .. |
---|
1857 | 3060 | if (sys_reg_CRm(id) == 0) |
---|
1858 | 3061 | return emulate_id_reg(id, valp); |
---|
1859 | 3062 | |
---|
1860 | | - regp = get_arm64_ftr_reg(id); |
---|
| 3063 | + regp = get_arm64_ftr_reg_nowarn(id); |
---|
1861 | 3064 | if (regp) |
---|
1862 | 3065 | *valp = arm64_ftr_reg_user_value(regp); |
---|
1863 | 3066 | else |
---|
.. | .. |
---|
1869 | 3072 | return 0; |
---|
1870 | 3073 | } |
---|
1871 | 3074 | |
---|
1872 | | -static int emulate_mrs(struct pt_regs *regs, u32 insn) |
---|
| 3075 | +int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt) |
---|
1873 | 3076 | { |
---|
1874 | 3077 | int rc; |
---|
1875 | | - u32 sys_reg, dst; |
---|
1876 | 3078 | u64 val; |
---|
| 3079 | + |
---|
| 3080 | + rc = emulate_sys_reg(sys_reg, &val); |
---|
| 3081 | + if (!rc) { |
---|
| 3082 | + pt_regs_write_reg(regs, rt, val); |
---|
| 3083 | + arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
---|
| 3084 | + } |
---|
| 3085 | + return rc; |
---|
| 3086 | +} |
---|
| 3087 | + |
---|
| 3088 | +static int emulate_mrs(struct pt_regs *regs, u32 insn) |
---|
| 3089 | +{ |
---|
| 3090 | + u32 sys_reg, rt; |
---|
1877 | 3091 | |
---|
1878 | 3092 | /* |
---|
1879 | 3093 | * sys_reg values are defined as used in mrs/msr instruction. |
---|
1880 | 3094 | * shift the imm value to get the encoding. |
---|
1881 | 3095 | */ |
---|
1882 | 3096 | sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5; |
---|
1883 | | - rc = emulate_sys_reg(sys_reg, &val); |
---|
1884 | | - if (!rc) { |
---|
1885 | | - dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); |
---|
1886 | | - pt_regs_write_reg(regs, dst, val); |
---|
1887 | | - arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); |
---|
1888 | | - } |
---|
1889 | | - |
---|
1890 | | - return rc; |
---|
| 3097 | + rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn); |
---|
| 3098 | + return do_emulate_mrs(regs, sys_reg, rt); |
---|
1891 | 3099 | } |
---|
1892 | 3100 | |
---|
1893 | 3101 | static struct undef_hook mrs_hook = { |
---|
.. | .. |
---|
1906 | 3114 | |
---|
1907 | 3115 | core_initcall(enable_mrs_emulation); |
---|
1908 | 3116 | |
---|
1909 | | -void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) |
---|
| 3117 | +enum mitigation_state arm64_get_meltdown_state(void) |
---|
1910 | 3118 | { |
---|
1911 | | - /* Firmware may have left a deferred SError in this register. */ |
---|
1912 | | - write_sysreg_s(0, SYS_DISR_EL1); |
---|
| 3119 | + if (__meltdown_safe) |
---|
| 3120 | + return SPECTRE_UNAFFECTED; |
---|
| 3121 | + |
---|
| 3122 | + if (arm64_kernel_unmapped_at_el0()) |
---|
| 3123 | + return SPECTRE_MITIGATED; |
---|
| 3124 | + |
---|
| 3125 | + return SPECTRE_VULNERABLE; |
---|
1913 | 3126 | } |
---|
1914 | 3127 | |
---|
1915 | 3128 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, |
---|
1916 | 3129 | char *buf) |
---|
1917 | 3130 | { |
---|
1918 | | - if (__meltdown_safe) |
---|
| 3131 | + switch (arm64_get_meltdown_state()) { |
---|
| 3132 | + case SPECTRE_UNAFFECTED: |
---|
1919 | 3133 | return sprintf(buf, "Not affected\n"); |
---|
1920 | 3134 | |
---|
1921 | | - if (arm64_kernel_unmapped_at_el0()) |
---|
| 3135 | + case SPECTRE_MITIGATED: |
---|
1922 | 3136 | return sprintf(buf, "Mitigation: PTI\n"); |
---|
1923 | 3137 | |
---|
1924 | | - return sprintf(buf, "Vulnerable\n"); |
---|
| 3138 | + default: |
---|
| 3139 | + return sprintf(buf, "Vulnerable\n"); |
---|
| 3140 | + } |
---|
1925 | 3141 | } |
---|