| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Contains CPU specific errata definitions |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2014 ARM Ltd. |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 7 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 8 | | - * published by the Free Software Foundation. |
|---|
| 9 | | - * |
|---|
| 10 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 11 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 12 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 13 | | - * GNU General Public License for more details. |
|---|
| 14 | | - * |
|---|
| 15 | | - * You should have received a copy of the GNU General Public License |
|---|
| 16 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 17 | 6 | */ |
|---|
| 18 | 7 | |
|---|
| 19 | 8 | #include <linux/arm-smccc.h> |
|---|
| .. | .. |
|---|
| 22 | 11 | #include <asm/cpu.h> |
|---|
| 23 | 12 | #include <asm/cputype.h> |
|---|
| 24 | 13 | #include <asm/cpufeature.h> |
|---|
| 14 | +#include <asm/kvm_asm.h> |
|---|
| 25 | 15 | #include <asm/smp_plat.h> |
|---|
| 26 | 16 | |
|---|
| 27 | 17 | static bool __maybe_unused |
|---|
| .. | .. |
|---|
| 69 | 59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
|---|
| 70 | 60 | int scope) |
|---|
| 71 | 61 | { |
|---|
| 72 | | - u64 mask = CTR_CACHE_MINLINE_MASK; |
|---|
| 73 | | - |
|---|
| 74 | | - /* Skip matching the min line sizes for cache type check */ |
|---|
| 75 | | - if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE) |
|---|
| 76 | | - mask ^= arm64_ftr_reg_ctrel0.strict_mask; |
|---|
| 62 | + u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
|---|
| 63 | + u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; |
|---|
| 64 | + u64 ctr_raw, ctr_real; |
|---|
| 77 | 65 | |
|---|
| 78 | 66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
|---|
| 79 | | - return (read_cpuid_cachetype() & mask) != |
|---|
| 80 | | - (arm64_ftr_reg_ctrel0.sys_val & mask); |
|---|
| 67 | + |
|---|
| 68 | + /* |
|---|
| 69 | + * We want to make sure that all the CPUs in the system expose |
|---|
| 70 | + * a consistent CTR_EL0 to make sure that applications behaves |
|---|
| 71 | + * correctly with migration. |
|---|
| 72 | + * |
|---|
| 73 | + * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : |
|---|
| 74 | + * |
|---|
| 75 | + * 1) It is safe if the system doesn't support IDC, as CPU anyway |
|---|
| 76 | + * reports IDC = 0, consistent with the rest. |
|---|
| 77 | + * |
|---|
| 78 | + * 2) If the system has IDC, it is still safe as we trap CTR_EL0 |
|---|
| 79 | + * access on this CPU via the ARM64_HAS_CACHE_IDC capability. |
|---|
| 80 | + * |
|---|
| 81 | + * So, we need to make sure either the raw CTR_EL0 or the effective |
|---|
| 82 | + * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. |
|---|
| 83 | + */ |
|---|
| 84 | + ctr_raw = read_cpuid_cachetype() & mask; |
|---|
| 85 | + ctr_real = read_cpuid_effective_cachetype() & mask; |
|---|
| 86 | + |
|---|
| 87 | + return (ctr_real != sys) && (ctr_raw != sys); |
|---|
| 81 | 88 | } |
|---|
| 82 | 89 | |
|---|
| 83 | 90 | static void |
|---|
| 84 | | -cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) |
|---|
| 91 | +cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) |
|---|
| 85 | 92 | { |
|---|
| 86 | | - sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
|---|
| 93 | + u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
|---|
| 94 | + bool enable_uct_trap = false; |
|---|
| 95 | + |
|---|
| 96 | + /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ |
|---|
| 97 | + if ((read_cpuid_cachetype() & mask) != |
|---|
| 98 | + (arm64_ftr_reg_ctrel0.sys_val & mask)) |
|---|
| 99 | + enable_uct_trap = true; |
|---|
| 100 | + |
|---|
| 101 | + /* ... or if the system is affected by an erratum */ |
|---|
| 102 | + if (cap->capability == ARM64_WORKAROUND_1542419) |
|---|
| 103 | + enable_uct_trap = true; |
|---|
| 104 | + |
|---|
| 105 | + if (enable_uct_trap) |
|---|
| 106 | + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
|---|
| 87 | 107 | } |
|---|
| 88 | | - |
|---|
| 89 | | -atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
|---|
| 90 | | - |
|---|
| 91 | | -#include <asm/mmu_context.h> |
|---|
| 92 | | -#include <asm/cacheflush.h> |
|---|
| 93 | | - |
|---|
| 94 | | -DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
|---|
| 95 | | - |
|---|
| 96 | | -#ifdef CONFIG_KVM_INDIRECT_VECTORS |
|---|
| 97 | | -extern char __smccc_workaround_1_smc_start[]; |
|---|
| 98 | | -extern char __smccc_workaround_1_smc_end[]; |
|---|
| 99 | | - |
|---|
| 100 | | -static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
|---|
| 101 | | - const char *hyp_vecs_end) |
|---|
| 102 | | -{ |
|---|
| 103 | | - void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); |
|---|
| 104 | | - int i; |
|---|
| 105 | | - |
|---|
| 106 | | - for (i = 0; i < SZ_2K; i += 0x80) |
|---|
| 107 | | - memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); |
|---|
| 108 | | - |
|---|
| 109 | | - __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
|---|
| 110 | | -} |
|---|
| 111 | | - |
|---|
| 112 | | -static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
|---|
| 113 | | - const char *hyp_vecs_start, |
|---|
| 114 | | - const char *hyp_vecs_end) |
|---|
| 115 | | -{ |
|---|
| 116 | | - static DEFINE_SPINLOCK(bp_lock); |
|---|
| 117 | | - int cpu, slot = -1; |
|---|
| 118 | | - |
|---|
| 119 | | - spin_lock(&bp_lock); |
|---|
| 120 | | - for_each_possible_cpu(cpu) { |
|---|
| 121 | | - if (per_cpu(bp_hardening_data.fn, cpu) == fn) { |
|---|
| 122 | | - slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); |
|---|
| 123 | | - break; |
|---|
| 124 | | - } |
|---|
| 125 | | - } |
|---|
| 126 | | - |
|---|
| 127 | | - if (slot == -1) { |
|---|
| 128 | | - slot = atomic_inc_return(&arm64_el2_vector_last_slot); |
|---|
| 129 | | - BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); |
|---|
| 130 | | - __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
|---|
| 131 | | - } |
|---|
| 132 | | - |
|---|
| 133 | | - __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); |
|---|
| 134 | | - __this_cpu_write(bp_hardening_data.fn, fn); |
|---|
| 135 | | - spin_unlock(&bp_lock); |
|---|
| 136 | | -} |
|---|
| 137 | | -#else |
|---|
| 138 | | -#define __smccc_workaround_1_smc_start NULL |
|---|
| 139 | | -#define __smccc_workaround_1_smc_end NULL |
|---|
| 140 | | - |
|---|
| 141 | | -static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
|---|
| 142 | | - const char *hyp_vecs_start, |
|---|
| 143 | | - const char *hyp_vecs_end) |
|---|
| 144 | | -{ |
|---|
| 145 | | - __this_cpu_write(bp_hardening_data.fn, fn); |
|---|
| 146 | | -} |
|---|
| 147 | | -#endif /* CONFIG_KVM_INDIRECT_VECTORS */ |
|---|
| 148 | | - |
|---|
| 149 | | -#include <linux/arm-smccc.h> |
|---|
| 150 | | - |
|---|
| 151 | | -static void call_smc_arch_workaround_1(void) |
|---|
| 152 | | -{ |
|---|
| 153 | | - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); |
|---|
| 154 | | -} |
|---|
| 155 | | - |
|---|
| 156 | | -static void call_hvc_arch_workaround_1(void) |
|---|
| 157 | | -{ |
|---|
| 158 | | - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); |
|---|
| 159 | | -} |
|---|
| 160 | | - |
|---|
| 161 | | -static void qcom_link_stack_sanitization(void) |
|---|
| 162 | | -{ |
|---|
| 163 | | - u64 tmp; |
|---|
| 164 | | - |
|---|
| 165 | | - asm volatile("mov %0, x30 \n" |
|---|
| 166 | | - ".rept 16 \n" |
|---|
| 167 | | - "bl . + 4 \n" |
|---|
| 168 | | - ".endr \n" |
|---|
| 169 | | - "mov x30, %0 \n" |
|---|
| 170 | | - : "=&r" (tmp)); |
|---|
| 171 | | -} |
|---|
| 172 | | - |
|---|
| 173 | | -static bool __nospectre_v2; |
|---|
| 174 | | -static int __init parse_nospectre_v2(char *str) |
|---|
| 175 | | -{ |
|---|
| 176 | | - __nospectre_v2 = true; |
|---|
| 177 | | - return 0; |
|---|
| 178 | | -} |
|---|
| 179 | | -early_param("nospectre_v2", parse_nospectre_v2); |
|---|
| 180 | | - |
|---|
| 181 | | -/* |
|---|
| 182 | | - * -1: No workaround |
|---|
| 183 | | - * 0: No workaround required |
|---|
| 184 | | - * 1: Workaround installed |
|---|
| 185 | | - */ |
|---|
| 186 | | -static int detect_harden_bp_fw(void) |
|---|
| 187 | | -{ |
|---|
| 188 | | - bp_hardening_cb_t cb; |
|---|
| 189 | | - void *smccc_start, *smccc_end; |
|---|
| 190 | | - struct arm_smccc_res res; |
|---|
| 191 | | - u32 midr = read_cpuid_id(); |
|---|
| 192 | | - |
|---|
| 193 | | - switch (arm_smccc_1_1_get_conduit()) { |
|---|
| 194 | | - case SMCCC_CONDUIT_HVC: |
|---|
| 195 | | - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
|---|
| 196 | | - ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
|---|
| 197 | | - switch ((int)res.a0) { |
|---|
| 198 | | - case 1: |
|---|
| 199 | | - /* Firmware says we're just fine */ |
|---|
| 200 | | - return 0; |
|---|
| 201 | | - case 0: |
|---|
| 202 | | - cb = call_hvc_arch_workaround_1; |
|---|
| 203 | | - /* This is a guest, no need to patch KVM vectors */ |
|---|
| 204 | | - smccc_start = NULL; |
|---|
| 205 | | - smccc_end = NULL; |
|---|
| 206 | | - break; |
|---|
| 207 | | - default: |
|---|
| 208 | | - return -1; |
|---|
| 209 | | - } |
|---|
| 210 | | - break; |
|---|
| 211 | | - |
|---|
| 212 | | - case SMCCC_CONDUIT_SMC: |
|---|
| 213 | | - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
|---|
| 214 | | - ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
|---|
| 215 | | - switch ((int)res.a0) { |
|---|
| 216 | | - case 1: |
|---|
| 217 | | - /* Firmware says we're just fine */ |
|---|
| 218 | | - return 0; |
|---|
| 219 | | - case 0: |
|---|
| 220 | | - cb = call_smc_arch_workaround_1; |
|---|
| 221 | | - smccc_start = __smccc_workaround_1_smc_start; |
|---|
| 222 | | - smccc_end = __smccc_workaround_1_smc_end; |
|---|
| 223 | | - break; |
|---|
| 224 | | - default: |
|---|
| 225 | | - return -1; |
|---|
| 226 | | - } |
|---|
| 227 | | - break; |
|---|
| 228 | | - |
|---|
| 229 | | - default: |
|---|
| 230 | | - return -1; |
|---|
| 231 | | - } |
|---|
| 232 | | - |
|---|
| 233 | | - if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
|---|
| 234 | | - ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) |
|---|
| 235 | | - cb = qcom_link_stack_sanitization; |
|---|
| 236 | | - |
|---|
| 237 | | - if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) |
|---|
| 238 | | - install_bp_hardening_cb(cb, smccc_start, smccc_end); |
|---|
| 239 | | - |
|---|
| 240 | | - return 1; |
|---|
| 241 | | -} |
|---|
| 242 | | - |
|---|
| 243 | | -DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
|---|
| 244 | | - |
|---|
| 245 | | -int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
|---|
| 246 | | -static bool __ssb_safe = true; |
|---|
| 247 | | - |
|---|
| 248 | | -static const struct ssbd_options { |
|---|
| 249 | | - const char *str; |
|---|
| 250 | | - int state; |
|---|
| 251 | | -} ssbd_options[] = { |
|---|
| 252 | | - { "force-on", ARM64_SSBD_FORCE_ENABLE, }, |
|---|
| 253 | | - { "force-off", ARM64_SSBD_FORCE_DISABLE, }, |
|---|
| 254 | | - { "kernel", ARM64_SSBD_KERNEL, }, |
|---|
| 255 | | -}; |
|---|
| 256 | | - |
|---|
| 257 | | -static int __init ssbd_cfg(char *buf) |
|---|
| 258 | | -{ |
|---|
| 259 | | - int i; |
|---|
| 260 | | - |
|---|
| 261 | | - if (!buf || !buf[0]) |
|---|
| 262 | | - return -EINVAL; |
|---|
| 263 | | - |
|---|
| 264 | | - for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { |
|---|
| 265 | | - int len = strlen(ssbd_options[i].str); |
|---|
| 266 | | - |
|---|
| 267 | | - if (strncmp(buf, ssbd_options[i].str, len)) |
|---|
| 268 | | - continue; |
|---|
| 269 | | - |
|---|
| 270 | | - ssbd_state = ssbd_options[i].state; |
|---|
| 271 | | - return 0; |
|---|
| 272 | | - } |
|---|
| 273 | | - |
|---|
| 274 | | - return -EINVAL; |
|---|
| 275 | | -} |
|---|
| 276 | | -early_param("ssbd", ssbd_cfg); |
|---|
| 277 | | - |
|---|
| 278 | | -void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
|---|
| 279 | | - __le32 *origptr, __le32 *updptr, |
|---|
| 280 | | - int nr_inst) |
|---|
| 281 | | -{ |
|---|
| 282 | | - u32 insn; |
|---|
| 283 | | - |
|---|
| 284 | | - BUG_ON(nr_inst != 1); |
|---|
| 285 | | - |
|---|
| 286 | | - switch (arm_smccc_1_1_get_conduit()) { |
|---|
| 287 | | - case SMCCC_CONDUIT_HVC: |
|---|
| 288 | | - insn = aarch64_insn_get_hvc_value(); |
|---|
| 289 | | - break; |
|---|
| 290 | | - case SMCCC_CONDUIT_SMC: |
|---|
| 291 | | - insn = aarch64_insn_get_smc_value(); |
|---|
| 292 | | - break; |
|---|
| 293 | | - default: |
|---|
| 294 | | - return; |
|---|
| 295 | | - } |
|---|
| 296 | | - |
|---|
| 297 | | - *updptr = cpu_to_le32(insn); |
|---|
| 298 | | -} |
|---|
| 299 | | - |
|---|
| 300 | | -void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
|---|
| 301 | | - __le32 *origptr, __le32 *updptr, |
|---|
| 302 | | - int nr_inst) |
|---|
| 303 | | -{ |
|---|
| 304 | | - BUG_ON(nr_inst != 1); |
|---|
| 305 | | - /* |
|---|
| 306 | | - * Only allow mitigation on EL1 entry/exit and guest |
|---|
| 307 | | - * ARCH_WORKAROUND_2 handling if the SSBD state allows it to |
|---|
| 308 | | - * be flipped. |
|---|
| 309 | | - */ |
|---|
| 310 | | - if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) |
|---|
| 311 | | - *updptr = cpu_to_le32(aarch64_insn_gen_nop()); |
|---|
| 312 | | -} |
|---|
| 313 | | - |
|---|
| 314 | | -void arm64_set_ssbd_mitigation(bool state) |
|---|
| 315 | | -{ |
|---|
| 316 | | - if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { |
|---|
| 317 | | - pr_info_once("SSBD disabled by kernel configuration\n"); |
|---|
| 318 | | - return; |
|---|
| 319 | | - } |
|---|
| 320 | | - |
|---|
| 321 | | - if (this_cpu_has_cap(ARM64_SSBS)) { |
|---|
| 322 | | - if (state) |
|---|
| 323 | | - asm volatile(SET_PSTATE_SSBS(0)); |
|---|
| 324 | | - else |
|---|
| 325 | | - asm volatile(SET_PSTATE_SSBS(1)); |
|---|
| 326 | | - return; |
|---|
| 327 | | - } |
|---|
| 328 | | - |
|---|
| 329 | | - switch (arm_smccc_1_1_get_conduit()) { |
|---|
| 330 | | - case SMCCC_CONDUIT_HVC: |
|---|
| 331 | | - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); |
|---|
| 332 | | - break; |
|---|
| 333 | | - |
|---|
| 334 | | - case SMCCC_CONDUIT_SMC: |
|---|
| 335 | | - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); |
|---|
| 336 | | - break; |
|---|
| 337 | | - |
|---|
| 338 | | - default: |
|---|
| 339 | | - WARN_ON_ONCE(1); |
|---|
| 340 | | - break; |
|---|
| 341 | | - } |
|---|
| 342 | | -} |
|---|
| 343 | | - |
|---|
| 344 | | -static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, |
|---|
| 345 | | - int scope) |
|---|
| 346 | | -{ |
|---|
| 347 | | - struct arm_smccc_res res; |
|---|
| 348 | | - bool required = true; |
|---|
| 349 | | - s32 val; |
|---|
| 350 | | - bool this_cpu_safe = false; |
|---|
| 351 | | - |
|---|
| 352 | | - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
|---|
| 353 | | - |
|---|
| 354 | | - if (cpu_mitigations_off()) |
|---|
| 355 | | - ssbd_state = ARM64_SSBD_FORCE_DISABLE; |
|---|
| 356 | | - |
|---|
| 357 | | - /* delay setting __ssb_safe until we get a firmware response */ |
|---|
| 358 | | - if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) |
|---|
| 359 | | - this_cpu_safe = true; |
|---|
| 360 | | - |
|---|
| 361 | | - if (this_cpu_has_cap(ARM64_SSBS)) { |
|---|
| 362 | | - if (!this_cpu_safe) |
|---|
| 363 | | - __ssb_safe = false; |
|---|
| 364 | | - required = false; |
|---|
| 365 | | - goto out_printmsg; |
|---|
| 366 | | - } |
|---|
| 367 | | - |
|---|
| 368 | | - switch (arm_smccc_1_1_get_conduit()) { |
|---|
| 369 | | - case SMCCC_CONDUIT_HVC: |
|---|
| 370 | | - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
|---|
| 371 | | - ARM_SMCCC_ARCH_WORKAROUND_2, &res); |
|---|
| 372 | | - break; |
|---|
| 373 | | - |
|---|
| 374 | | - case SMCCC_CONDUIT_SMC: |
|---|
| 375 | | - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
|---|
| 376 | | - ARM_SMCCC_ARCH_WORKAROUND_2, &res); |
|---|
| 377 | | - break; |
|---|
| 378 | | - |
|---|
| 379 | | - default: |
|---|
| 380 | | - ssbd_state = ARM64_SSBD_UNKNOWN; |
|---|
| 381 | | - if (!this_cpu_safe) |
|---|
| 382 | | - __ssb_safe = false; |
|---|
| 383 | | - return false; |
|---|
| 384 | | - } |
|---|
| 385 | | - |
|---|
| 386 | | - val = (s32)res.a0; |
|---|
| 387 | | - |
|---|
| 388 | | - switch (val) { |
|---|
| 389 | | - case SMCCC_RET_NOT_SUPPORTED: |
|---|
| 390 | | - ssbd_state = ARM64_SSBD_UNKNOWN; |
|---|
| 391 | | - if (!this_cpu_safe) |
|---|
| 392 | | - __ssb_safe = false; |
|---|
| 393 | | - return false; |
|---|
| 394 | | - |
|---|
| 395 | | - /* machines with mixed mitigation requirements must not return this */ |
|---|
| 396 | | - case SMCCC_RET_NOT_REQUIRED: |
|---|
| 397 | | - pr_info_once("%s mitigation not required\n", entry->desc); |
|---|
| 398 | | - ssbd_state = ARM64_SSBD_MITIGATED; |
|---|
| 399 | | - return false; |
|---|
| 400 | | - |
|---|
| 401 | | - case SMCCC_RET_SUCCESS: |
|---|
| 402 | | - __ssb_safe = false; |
|---|
| 403 | | - required = true; |
|---|
| 404 | | - break; |
|---|
| 405 | | - |
|---|
| 406 | | - case 1: /* Mitigation not required on this CPU */ |
|---|
| 407 | | - required = false; |
|---|
| 408 | | - break; |
|---|
| 409 | | - |
|---|
| 410 | | - default: |
|---|
| 411 | | - WARN_ON(1); |
|---|
| 412 | | - if (!this_cpu_safe) |
|---|
| 413 | | - __ssb_safe = false; |
|---|
| 414 | | - return false; |
|---|
| 415 | | - } |
|---|
| 416 | | - |
|---|
| 417 | | - switch (ssbd_state) { |
|---|
| 418 | | - case ARM64_SSBD_FORCE_DISABLE: |
|---|
| 419 | | - arm64_set_ssbd_mitigation(false); |
|---|
| 420 | | - required = false; |
|---|
| 421 | | - break; |
|---|
| 422 | | - |
|---|
| 423 | | - case ARM64_SSBD_KERNEL: |
|---|
| 424 | | - if (required) { |
|---|
| 425 | | - __this_cpu_write(arm64_ssbd_callback_required, 1); |
|---|
| 426 | | - arm64_set_ssbd_mitigation(true); |
|---|
| 427 | | - } |
|---|
| 428 | | - break; |
|---|
| 429 | | - |
|---|
| 430 | | - case ARM64_SSBD_FORCE_ENABLE: |
|---|
| 431 | | - arm64_set_ssbd_mitigation(true); |
|---|
| 432 | | - required = true; |
|---|
| 433 | | - break; |
|---|
| 434 | | - |
|---|
| 435 | | - default: |
|---|
| 436 | | - WARN_ON(1); |
|---|
| 437 | | - break; |
|---|
| 438 | | - } |
|---|
| 439 | | - |
|---|
| 440 | | -out_printmsg: |
|---|
| 441 | | - switch (ssbd_state) { |
|---|
| 442 | | - case ARM64_SSBD_FORCE_DISABLE: |
|---|
| 443 | | - pr_info_once("%s disabled from command-line\n", entry->desc); |
|---|
| 444 | | - break; |
|---|
| 445 | | - |
|---|
| 446 | | - case ARM64_SSBD_FORCE_ENABLE: |
|---|
| 447 | | - pr_info_once("%s forced from command-line\n", entry->desc); |
|---|
| 448 | | - break; |
|---|
| 449 | | - } |
|---|
| 450 | | - |
|---|
| 451 | | - return required; |
|---|
| 452 | | -} |
|---|
| 453 | | - |
|---|
| 454 | | -/* known invulnerable cores */ |
|---|
| 455 | | -static const struct midr_range arm64_ssb_cpus[] = { |
|---|
| 456 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), |
|---|
| 457 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), |
|---|
| 458 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
|---|
| 459 | | - {}, |
|---|
| 460 | | -}; |
|---|
| 461 | 108 | |
|---|
| 462 | 109 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
|---|
| 463 | 110 | DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); |
|---|
| .. | .. |
|---|
| 466 | 113 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, |
|---|
| 467 | 114 | int scope) |
|---|
| 468 | 115 | { |
|---|
| 469 | | - u32 midr = read_cpuid_id(); |
|---|
| 470 | | - /* Cortex-A76 r0p0 - r3p1 */ |
|---|
| 471 | | - struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); |
|---|
| 472 | | - |
|---|
| 473 | | - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
|---|
| 474 | | - return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); |
|---|
| 116 | + return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); |
|---|
| 475 | 117 | } |
|---|
| 476 | 118 | #endif |
|---|
| 119 | + |
|---|
| 120 | +static void __maybe_unused |
|---|
| 121 | +cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) |
|---|
| 122 | +{ |
|---|
| 123 | + sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); |
|---|
| 124 | +} |
|---|
| 477 | 125 | |
|---|
| 478 | 126 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
|---|
| 479 | 127 | .matches = is_affected_midr_range, \ |
|---|
| .. | .. |
|---|
| 512 | 160 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ |
|---|
| 513 | 161 | CAP_MIDR_RANGE_LIST(midr_list) |
|---|
| 514 | 162 | |
|---|
| 515 | | -/* Track overall mitigation state. We are only mitigated if all cores are ok */ |
|---|
| 516 | | -static bool __hardenbp_enab = true; |
|---|
| 517 | | -static bool __spectrev2_safe = true; |
|---|
| 518 | | - |
|---|
| 519 | | -/* |
|---|
| 520 | | - * Generic helper for handling capabilties with multiple (match,enable) pairs |
|---|
| 521 | | - * of call backs, sharing the same capability bit. |
|---|
| 522 | | - * Iterate over each entry to see if at least one matches. |
|---|
| 523 | | - */ |
|---|
| 524 | | -static bool __maybe_unused |
|---|
| 525 | | -multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope) |
|---|
| 526 | | -{ |
|---|
| 527 | | - const struct arm64_cpu_capabilities *caps; |
|---|
| 528 | | - |
|---|
| 529 | | - for (caps = entry->match_list; caps->matches; caps++) |
|---|
| 530 | | - if (caps->matches(caps, scope)) |
|---|
| 531 | | - return true; |
|---|
| 532 | | - |
|---|
| 533 | | - return false; |
|---|
| 534 | | -} |
|---|
| 535 | | - |
|---|
| 536 | | -/* |
|---|
| 537 | | - * Take appropriate action for all matching entries in the shared capability |
|---|
| 538 | | - * entry. |
|---|
| 539 | | - */ |
|---|
| 540 | | -static void __maybe_unused |
|---|
| 541 | | -multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) |
|---|
| 542 | | -{ |
|---|
| 543 | | - const struct arm64_cpu_capabilities *caps; |
|---|
| 544 | | - |
|---|
| 545 | | - for (caps = entry->match_list; caps->matches; caps++) |
|---|
| 546 | | - if (caps->matches(caps, SCOPE_LOCAL_CPU) && |
|---|
| 547 | | - caps->cpu_enable) |
|---|
| 548 | | - caps->cpu_enable(caps); |
|---|
| 549 | | -} |
|---|
| 550 | | - |
|---|
| 551 | | -/* |
|---|
| 552 | | - * List of CPUs that do not need any Spectre-v2 mitigation at all. |
|---|
| 553 | | - */ |
|---|
| 554 | | -static const struct midr_range spectre_v2_safe_list[] = { |
|---|
| 555 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), |
|---|
| 556 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), |
|---|
| 557 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), |
|---|
| 558 | | - { /* sentinel */ } |
|---|
| 559 | | -}; |
|---|
| 560 | | - |
|---|
| 561 | | -/* |
|---|
| 562 | | - * Track overall bp hardening for all heterogeneous cores in the machine. |
|---|
| 563 | | - * We are only considered "safe" if all booted cores are known safe. |
|---|
| 564 | | - */ |
|---|
| 565 | | -static bool __maybe_unused |
|---|
| 566 | | -check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) |
|---|
| 567 | | -{ |
|---|
| 568 | | - int need_wa; |
|---|
| 569 | | - |
|---|
| 570 | | - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
|---|
| 571 | | - |
|---|
| 572 | | - /* If the CPU has CSV2 set, we're safe */ |
|---|
| 573 | | - if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), |
|---|
| 574 | | - ID_AA64PFR0_CSV2_SHIFT)) |
|---|
| 575 | | - return false; |
|---|
| 576 | | - |
|---|
| 577 | | - /* Alternatively, we have a list of unaffected CPUs */ |
|---|
| 578 | | - if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) |
|---|
| 579 | | - return false; |
|---|
| 580 | | - |
|---|
| 581 | | - /* Fallback to firmware detection */ |
|---|
| 582 | | - need_wa = detect_harden_bp_fw(); |
|---|
| 583 | | - if (!need_wa) |
|---|
| 584 | | - return false; |
|---|
| 585 | | - |
|---|
| 586 | | - __spectrev2_safe = false; |
|---|
| 587 | | - |
|---|
| 588 | | - if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { |
|---|
| 589 | | - pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); |
|---|
| 590 | | - __hardenbp_enab = false; |
|---|
| 591 | | - return false; |
|---|
| 592 | | - } |
|---|
| 593 | | - |
|---|
| 594 | | - /* forced off */ |
|---|
| 595 | | - if (__nospectre_v2 || cpu_mitigations_off()) { |
|---|
| 596 | | - pr_info_once("spectrev2 mitigation disabled by command line option\n"); |
|---|
| 597 | | - __hardenbp_enab = false; |
|---|
| 598 | | - return false; |
|---|
| 599 | | - } |
|---|
| 600 | | - |
|---|
| 601 | | - if (need_wa < 0) { |
|---|
| 602 | | - pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); |
|---|
| 603 | | - __hardenbp_enab = false; |
|---|
| 604 | | - } |
|---|
| 605 | | - |
|---|
| 606 | | - return (need_wa > 0); |
|---|
| 607 | | -} |
|---|
| 608 | | - |
|---|
| 609 | | -static void |
|---|
| 610 | | -cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap) |
|---|
| 611 | | -{ |
|---|
| 612 | | - cap->matches(cap, SCOPE_LOCAL_CPU); |
|---|
| 613 | | -} |
|---|
| 614 | | - |
|---|
| 615 | 163 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
|---|
| 616 | 164 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
|---|
| 617 | 165 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
|---|
| .. | .. |
|---|
| 648 | 196 | return is_midr_in_range(midr, &range) && has_dic; |
|---|
| 649 | 197 | } |
|---|
| 650 | 198 | |
|---|
| 651 | | -#ifdef CONFIG_HARDEN_EL2_VECTORS |
|---|
| 652 | | - |
|---|
| 653 | | -static const struct midr_range arm64_harden_el2_vectors[] = { |
|---|
| 654 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
|---|
| 655 | | - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
|---|
| 199 | +#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
|---|
| 200 | +static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
|---|
| 201 | +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
|---|
| 202 | + { |
|---|
| 203 | + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) |
|---|
| 204 | + }, |
|---|
| 205 | + { |
|---|
| 206 | + .midr_range.model = MIDR_QCOM_KRYO, |
|---|
| 207 | + .matches = is_kryo_midr, |
|---|
| 208 | + }, |
|---|
| 209 | +#endif |
|---|
| 210 | +#ifdef CONFIG_ARM64_ERRATUM_1286807 |
|---|
| 211 | + { |
|---|
| 212 | + ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), |
|---|
| 213 | + }, |
|---|
| 214 | + { |
|---|
| 215 | + /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */ |
|---|
| 216 | + ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe), |
|---|
| 217 | + }, |
|---|
| 218 | +#endif |
|---|
| 656 | 219 | {}, |
|---|
| 657 | 220 | }; |
|---|
| 658 | | - |
|---|
| 659 | 221 | #endif |
|---|
| 660 | 222 | |
|---|
| 661 | | -const struct arm64_cpu_capabilities arm64_errata[] = { |
|---|
| 223 | +#ifdef CONFIG_CAVIUM_ERRATUM_27456 |
|---|
| 224 | +const struct midr_range cavium_erratum_27456_cpus[] = { |
|---|
| 225 | + /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
|---|
| 226 | + MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), |
|---|
| 227 | + /* Cavium ThunderX, T81 pass 1.0 */ |
|---|
| 228 | + MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), |
|---|
| 229 | + {}, |
|---|
| 230 | +}; |
|---|
| 231 | +#endif |
|---|
| 232 | + |
|---|
| 233 | +#ifdef CONFIG_CAVIUM_ERRATUM_30115 |
|---|
| 234 | +static const struct midr_range cavium_erratum_30115_cpus[] = { |
|---|
| 235 | + /* Cavium ThunderX, T88 pass 1.x - 2.2 */ |
|---|
| 236 | + MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), |
|---|
| 237 | + /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ |
|---|
| 238 | + MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), |
|---|
| 239 | + /* Cavium ThunderX, T83 pass 1.0 */ |
|---|
| 240 | + MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), |
|---|
| 241 | + {}, |
|---|
| 242 | +}; |
|---|
| 243 | +#endif |
|---|
| 244 | + |
|---|
| 245 | +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
|---|
| 246 | +static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { |
|---|
| 247 | + { |
|---|
| 248 | + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), |
|---|
| 249 | + }, |
|---|
| 250 | + { |
|---|
| 251 | + .midr_range.model = MIDR_QCOM_KRYO, |
|---|
| 252 | + .matches = is_kryo_midr, |
|---|
| 253 | + }, |
|---|
| 254 | + {}, |
|---|
| 255 | +}; |
|---|
| 256 | +#endif |
|---|
| 257 | + |
|---|
| 258 | +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
|---|
| 259 | +static const struct midr_range workaround_clean_cache[] = { |
|---|
| 662 | 260 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
|---|
| 663 | 261 | defined(CONFIG_ARM64_ERRATUM_827319) || \ |
|---|
| 664 | 262 | defined(CONFIG_ARM64_ERRATUM_824069) |
|---|
| 665 | | - { |
|---|
| 666 | | - /* Cortex-A53 r0p[012] */ |
|---|
| 667 | | - .desc = "ARM errata 826319, 827319, 824069", |
|---|
| 668 | | - .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
|---|
| 669 | | - ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), |
|---|
| 670 | | - .cpu_enable = cpu_enable_cache_maint_trap, |
|---|
| 671 | | - }, |
|---|
| 263 | + /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
|---|
| 264 | + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), |
|---|
| 672 | 265 | #endif |
|---|
| 673 | | -#ifdef CONFIG_ARM64_ERRATUM_819472 |
|---|
| 266 | +#ifdef CONFIG_ARM64_ERRATUM_819472 |
|---|
| 267 | + /* Cortex-A53 r0p[01] : ARM errata 819472 */ |
|---|
| 268 | + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), |
|---|
| 269 | +#endif |
|---|
| 270 | + {}, |
|---|
| 271 | +}; |
|---|
| 272 | +#endif |
|---|
| 273 | + |
|---|
| 274 | +#ifdef CONFIG_ARM64_ERRATUM_1418040 |
|---|
| 275 | +/* |
|---|
| 276 | + * - 1188873 affects r0p0 to r2p0 |
|---|
| 277 | + * - 1418040 affects r0p0 to r3p1 |
|---|
| 278 | + */ |
|---|
| 279 | +static const struct midr_range erratum_1418040_list[] = { |
|---|
| 280 | + /* Cortex-A76 r0p0 to r3p1 */ |
|---|
| 281 | + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), |
|---|
| 282 | + /* Neoverse-N1 r0p0 to r3p1 */ |
|---|
| 283 | + MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), |
|---|
| 284 | + /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
|---|
| 285 | + MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), |
|---|
| 286 | + {}, |
|---|
| 287 | +}; |
|---|
| 288 | +#endif |
|---|
| 289 | + |
|---|
| 290 | +#ifdef CONFIG_ARM64_ERRATUM_845719 |
|---|
| 291 | +static const struct midr_range erratum_845719_list[] = { |
|---|
| 292 | + /* Cortex-A53 r0p[01234] */ |
|---|
| 293 | + MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
|---|
| 294 | + /* Brahma-B53 r0p[0] */ |
|---|
| 295 | + MIDR_REV(MIDR_BRAHMA_B53, 0, 0), |
|---|
| 296 | + /* Kryo2XX Silver rAp4 */ |
|---|
| 297 | + MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4), |
|---|
| 298 | + {}, |
|---|
| 299 | +}; |
|---|
| 300 | +#endif |
|---|
| 301 | + |
|---|
| 302 | +#ifdef CONFIG_ARM64_ERRATUM_843419 |
|---|
| 303 | +static const struct arm64_cpu_capabilities erratum_843419_list[] = { |
|---|
| 674 | 304 | { |
|---|
| 675 | | - /* Cortex-A53 r0p[01] */ |
|---|
| 676 | | - .desc = "ARM errata 819472", |
|---|
| 305 | + /* Cortex-A53 r0p[01234] */ |
|---|
| 306 | + .matches = is_affected_midr_range, |
|---|
| 307 | + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
|---|
| 308 | + MIDR_FIXED(0x4, BIT(8)), |
|---|
| 309 | + }, |
|---|
| 310 | + { |
|---|
| 311 | + /* Brahma-B53 r0p[0] */ |
|---|
| 312 | + .matches = is_affected_midr_range, |
|---|
| 313 | + ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), |
|---|
| 314 | + }, |
|---|
| 315 | + {}, |
|---|
| 316 | +}; |
|---|
| 317 | +#endif |
|---|
| 318 | + |
|---|
| 319 | +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
|---|
| 320 | +static const struct midr_range erratum_speculative_at_list[] = { |
|---|
| 321 | +#ifdef CONFIG_ARM64_ERRATUM_1165522 |
|---|
| 322 | + /* Cortex A76 r0p0 to r2p0 */ |
|---|
| 323 | + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), |
|---|
| 324 | +#endif |
|---|
| 325 | +#ifdef CONFIG_ARM64_ERRATUM_1319367 |
|---|
| 326 | + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
|---|
| 327 | + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
|---|
| 328 | +#endif |
|---|
| 329 | +#ifdef CONFIG_ARM64_ERRATUM_1530923 |
|---|
| 330 | + /* Cortex A55 r0p0 to r2p0 */ |
|---|
| 331 | + MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), |
|---|
| 332 | + /* Kryo4xx Silver (rdpe => r1p0) */ |
|---|
| 333 | + MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), |
|---|
| 334 | +#endif |
|---|
| 335 | + {}, |
|---|
| 336 | +}; |
|---|
| 337 | +#endif |
|---|
| 338 | + |
|---|
| 339 | +#ifdef CONFIG_ARM64_ERRATUM_1463225 |
|---|
| 340 | +static const struct midr_range erratum_1463225[] = { |
|---|
| 341 | + /* Cortex-A76 r0p0 - r3p1 */ |
|---|
| 342 | + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), |
|---|
| 343 | + /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
|---|
| 344 | + MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), |
|---|
| 345 | + {}, |
|---|
| 346 | +}; |
|---|
| 347 | +#endif |
|---|
| 348 | + |
|---|
| 349 | +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE |
|---|
| 350 | +static const struct midr_range tsb_flush_fail_cpus[] = { |
|---|
| 351 | +#ifdef CONFIG_ARM64_ERRATUM_2067961 |
|---|
| 352 | + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), |
|---|
| 353 | +#endif |
|---|
| 354 | +#ifdef CONFIG_ARM64_ERRATUM_2054223 |
|---|
| 355 | + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), |
|---|
| 356 | +#endif |
|---|
| 357 | + {}, |
|---|
| 358 | +}; |
|---|
| 359 | +#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ |
|---|
| 360 | + |
|---|
| 361 | +#ifdef CONFIG_ARM64_ERRATUM_1742098 |
|---|
| 362 | +static struct midr_range broken_aarch32_aes[] = { |
|---|
| 363 | + MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf), |
|---|
| 364 | + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
|---|
| 365 | + {}, |
|---|
| 366 | +}; |
|---|
| 367 | +#endif |
|---|
| 368 | + |
|---|
| 369 | +const struct arm64_cpu_capabilities arm64_errata[] = { |
|---|
| 370 | +#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
|---|
| 371 | + { |
|---|
| 372 | + .desc = "ARM errata 826319, 827319, 824069, or 819472", |
|---|
| 677 | 373 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
|---|
| 678 | | - ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), |
|---|
| 374 | + ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
|---|
| 679 | 375 | .cpu_enable = cpu_enable_cache_maint_trap, |
|---|
| 680 | 376 | }, |
|---|
| 681 | 377 | #endif |
|---|
| .. | .. |
|---|
| 701 | 397 | #endif |
|---|
| 702 | 398 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
|---|
| 703 | 399 | { |
|---|
| 704 | | - /* Cortex-A53 r0p[01234] */ |
|---|
| 705 | 400 | .desc = "ARM erratum 843419", |
|---|
| 706 | 401 | .capability = ARM64_WORKAROUND_843419, |
|---|
| 707 | | - ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
|---|
| 708 | | - MIDR_FIXED(0x4, BIT(8)), |
|---|
| 402 | + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 403 | + .matches = cpucap_multi_entry_cap_matches, |
|---|
| 404 | + .match_list = erratum_843419_list, |
|---|
| 709 | 405 | }, |
|---|
| 710 | 406 | #endif |
|---|
| 711 | 407 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
|---|
| 712 | 408 | { |
|---|
| 713 | | - /* Cortex-A53 r0p[01234] */ |
|---|
| 714 | 409 | .desc = "ARM erratum 845719", |
|---|
| 715 | 410 | .capability = ARM64_WORKAROUND_845719, |
|---|
| 716 | | - ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
|---|
| 411 | + ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
|---|
| 717 | 412 | }, |
|---|
| 718 | 413 | #endif |
|---|
| 719 | 414 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 |
|---|
| .. | .. |
|---|
| 726 | 421 | #endif |
|---|
| 727 | 422 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
|---|
| 728 | 423 | { |
|---|
| 729 | | - /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
|---|
| 730 | 424 | .desc = "Cavium erratum 27456", |
|---|
| 731 | 425 | .capability = ARM64_WORKAROUND_CAVIUM_27456, |
|---|
| 732 | | - ERRATA_MIDR_RANGE(MIDR_THUNDERX, |
|---|
| 733 | | - 0, 0, |
|---|
| 734 | | - 1, 1), |
|---|
| 735 | | - }, |
|---|
| 736 | | - { |
|---|
| 737 | | - /* Cavium ThunderX, T81 pass 1.0 */ |
|---|
| 738 | | - .desc = "Cavium erratum 27456", |
|---|
| 739 | | - .capability = ARM64_WORKAROUND_CAVIUM_27456, |
|---|
| 740 | | - ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), |
|---|
| 426 | + ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
|---|
| 741 | 427 | }, |
|---|
| 742 | 428 | #endif |
|---|
| 743 | 429 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 |
|---|
| 744 | 430 | { |
|---|
| 745 | | - /* Cavium ThunderX, T88 pass 1.x - 2.2 */ |
|---|
| 746 | 431 | .desc = "Cavium erratum 30115", |
|---|
| 747 | 432 | .capability = ARM64_WORKAROUND_CAVIUM_30115, |
|---|
| 748 | | - ERRATA_MIDR_RANGE(MIDR_THUNDERX, |
|---|
| 749 | | - 0, 0, |
|---|
| 750 | | - 1, 2), |
|---|
| 751 | | - }, |
|---|
| 752 | | - { |
|---|
| 753 | | - /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ |
|---|
| 754 | | - .desc = "Cavium erratum 30115", |
|---|
| 755 | | - .capability = ARM64_WORKAROUND_CAVIUM_30115, |
|---|
| 756 | | - ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), |
|---|
| 757 | | - }, |
|---|
| 758 | | - { |
|---|
| 759 | | - /* Cavium ThunderX, T83 pass 1.0 */ |
|---|
| 760 | | - .desc = "Cavium erratum 30115", |
|---|
| 761 | | - .capability = ARM64_WORKAROUND_CAVIUM_30115, |
|---|
| 762 | | - ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), |
|---|
| 433 | + ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
|---|
| 763 | 434 | }, |
|---|
| 764 | 435 | #endif |
|---|
| 765 | 436 | { |
|---|
| 766 | | - .desc = "Mismatched cache line size", |
|---|
| 767 | | - .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, |
|---|
| 768 | | - .matches = has_mismatched_cache_type, |
|---|
| 769 | | - .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 770 | | - .cpu_enable = cpu_enable_trap_ctr_access, |
|---|
| 771 | | - }, |
|---|
| 772 | | - { |
|---|
| 773 | | - .desc = "Mismatched cache type", |
|---|
| 437 | + .desc = "Mismatched cache type (CTR_EL0)", |
|---|
| 774 | 438 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
|---|
| 775 | 439 | .matches = has_mismatched_cache_type, |
|---|
| 776 | 440 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| .. | .. |
|---|
| 778 | 442 | }, |
|---|
| 779 | 443 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
|---|
| 780 | 444 | { |
|---|
| 781 | | - .desc = "Qualcomm Technologies Falkor erratum 1003", |
|---|
| 782 | | - .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
|---|
| 783 | | - ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), |
|---|
| 784 | | - }, |
|---|
| 785 | | - { |
|---|
| 786 | | - .desc = "Qualcomm Technologies Kryo erratum 1003", |
|---|
| 445 | + .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", |
|---|
| 787 | 446 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
|---|
| 788 | 447 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 789 | | - .midr_range.model = MIDR_QCOM_KRYO, |
|---|
| 790 | | - .matches = is_kryo_midr, |
|---|
| 448 | + .matches = cpucap_multi_entry_cap_matches, |
|---|
| 449 | + .match_list = qcom_erratum_1003_list, |
|---|
| 791 | 450 | }, |
|---|
| 792 | 451 | #endif |
|---|
| 793 | | -#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
|---|
| 452 | +#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
|---|
| 794 | 453 | { |
|---|
| 795 | | - .desc = "Qualcomm Technologies Falkor erratum 1009", |
|---|
| 454 | + .desc = "Qualcomm erratum 1009, or ARM erratum 1286807", |
|---|
| 796 | 455 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
|---|
| 797 | | - ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), |
|---|
| 456 | + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 457 | + .matches = cpucap_multi_entry_cap_matches, |
|---|
| 458 | + .match_list = arm64_repeat_tlbi_list, |
|---|
| 798 | 459 | }, |
|---|
| 799 | 460 | #endif |
|---|
| 800 | 461 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
|---|
| .. | .. |
|---|
| 806 | 467 | }, |
|---|
| 807 | 468 | #endif |
|---|
| 808 | 469 | { |
|---|
| 809 | | - .desc = "Branch predictor hardening", |
|---|
| 810 | | - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
|---|
| 470 | + .desc = "Spectre-v2", |
|---|
| 471 | + .capability = ARM64_SPECTRE_V2, |
|---|
| 811 | 472 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 812 | | - .matches = check_branch_predictor, |
|---|
| 813 | | - .cpu_enable = cpu_enable_branch_predictor_hardening, |
|---|
| 473 | + .matches = has_spectre_v2, |
|---|
| 474 | + .cpu_enable = spectre_v2_enable_mitigation, |
|---|
| 814 | 475 | }, |
|---|
| 815 | | -#ifdef CONFIG_HARDEN_EL2_VECTORS |
|---|
| 476 | +#ifdef CONFIG_RANDOMIZE_BASE |
|---|
| 816 | 477 | { |
|---|
| 817 | | - .desc = "EL2 vector hardening", |
|---|
| 818 | | - .capability = ARM64_HARDEN_EL2_VECTORS, |
|---|
| 819 | | - ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), |
|---|
| 478 | + /* Must come after the Spectre-v2 entry */ |
|---|
| 479 | + .desc = "Spectre-v3a", |
|---|
| 480 | + .capability = ARM64_SPECTRE_V3A, |
|---|
| 481 | + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 482 | + .matches = has_spectre_v3a, |
|---|
| 483 | + .cpu_enable = spectre_v3a_enable_mitigation, |
|---|
| 820 | 484 | }, |
|---|
| 821 | 485 | #endif |
|---|
| 822 | 486 | { |
|---|
| 823 | | - .desc = "Speculative Store Bypass Disable", |
|---|
| 824 | | - .capability = ARM64_SSBD, |
|---|
| 487 | + .desc = "Spectre-v4", |
|---|
| 488 | + .capability = ARM64_SPECTRE_V4, |
|---|
| 825 | 489 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 826 | | - .matches = has_ssbd_mitigation, |
|---|
| 827 | | - .midr_range_list = arm64_ssb_cpus, |
|---|
| 490 | + .matches = has_spectre_v4, |
|---|
| 491 | + .cpu_enable = spectre_v4_enable_mitigation, |
|---|
| 828 | 492 | }, |
|---|
| 493 | + { |
|---|
| 494 | + .desc = "Spectre-BHB", |
|---|
| 495 | + .capability = ARM64_SPECTRE_BHB, |
|---|
| 496 | + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 497 | + .matches = is_spectre_bhb_affected, |
|---|
| 498 | + .cpu_enable = spectre_bhb_enable_mitigation, |
|---|
| 499 | + }, |
|---|
| 500 | +#ifdef CONFIG_ARM64_ERRATUM_1418040 |
|---|
| 501 | + { |
|---|
| 502 | + .desc = "ARM erratum 1418040", |
|---|
| 503 | + .capability = ARM64_WORKAROUND_1418040, |
|---|
| 504 | + ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), |
|---|
| 505 | + /* |
|---|
| 506 | + * We need to allow affected CPUs to come in late, but |
|---|
| 507 | + * also need the non-affected CPUs to be able to come |
|---|
| 508 | + * in at any point in time. Wonderful. |
|---|
| 509 | + */ |
|---|
| 510 | + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
|---|
| 511 | + }, |
|---|
| 512 | +#endif |
|---|
| 513 | +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
|---|
| 514 | + { |
|---|
| 515 | + .desc = "ARM errata 1165522, 1319367, or 1530923", |
|---|
| 516 | + .capability = ARM64_WORKAROUND_SPECULATIVE_AT, |
|---|
| 517 | + ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), |
|---|
| 518 | + }, |
|---|
| 519 | +#endif |
|---|
| 829 | 520 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
|---|
| 830 | 521 | { |
|---|
| 831 | 522 | .desc = "ARM erratum 1463225", |
|---|
| 832 | 523 | .capability = ARM64_WORKAROUND_1463225, |
|---|
| 833 | 524 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 834 | 525 | .matches = has_cortex_a76_erratum_1463225, |
|---|
| 526 | + .midr_range_list = erratum_1463225, |
|---|
| 835 | 527 | }, |
|---|
| 836 | 528 | #endif |
|---|
| 837 | 529 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 |
|---|
| .. | .. |
|---|
| 840 | 532 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, |
|---|
| 841 | 533 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), |
|---|
| 842 | 534 | .matches = needs_tx2_tvm_workaround, |
|---|
| 535 | + }, |
|---|
| 536 | + { |
|---|
| 537 | + .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", |
|---|
| 538 | + .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, |
|---|
| 539 | + ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), |
|---|
| 843 | 540 | }, |
|---|
| 844 | 541 | #endif |
|---|
| 845 | 542 | #ifdef CONFIG_ARM64_ERRATUM_1542419 |
|---|
| .. | .. |
|---|
| 852 | 549 | .cpu_enable = cpu_enable_trap_ctr_access, |
|---|
| 853 | 550 | }, |
|---|
| 854 | 551 | #endif |
|---|
| 552 | +#ifdef CONFIG_ARM64_ERRATUM_1508412 |
|---|
| 553 | + { |
|---|
| 554 | + /* we depend on the firmware portion for correctness */ |
|---|
| 555 | + .desc = "ARM erratum 1508412 (kernel portion)", |
|---|
| 556 | + .capability = ARM64_WORKAROUND_1508412, |
|---|
| 557 | + ERRATA_MIDR_RANGE(MIDR_CORTEX_A77, |
|---|
| 558 | + 0, 0, |
|---|
| 559 | + 1, 0), |
|---|
| 560 | + }, |
|---|
| 561 | +#endif |
|---|
| 562 | +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE |
|---|
| 563 | + { |
|---|
| 564 | + .desc = "ARM erratum 2067961 or 2054223", |
|---|
| 565 | + .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, |
|---|
| 566 | + ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), |
|---|
| 567 | + }, |
|---|
| 568 | +#endif |
|---|
| 569 | +#ifdef CONFIG_ARM64_ERRATUM_2457168 |
|---|
| 570 | + { |
|---|
| 571 | + .desc = "ARM erratum 2457168", |
|---|
| 572 | + .capability = ARM64_WORKAROUND_2457168, |
|---|
| 573 | + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, |
|---|
| 574 | + /* Cortex-A510 r0p0-r1p1 */ |
|---|
| 575 | + CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1) |
|---|
| 576 | + }, |
|---|
| 577 | +#endif |
|---|
| 578 | +#ifdef CONFIG_ARM64_ERRATUM_1742098 |
|---|
| 579 | + { |
|---|
| 580 | + .desc = "ARM erratum 1742098", |
|---|
| 581 | + .capability = ARM64_WORKAROUND_1742098, |
|---|
| 582 | + CAP_MIDR_RANGE_LIST(broken_aarch32_aes), |
|---|
| 583 | + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
|---|
| 584 | + }, |
|---|
| 585 | +#endif |
|---|
| 855 | 586 | { |
|---|
| 856 | 587 | } |
|---|
| 857 | 588 | }; |
|---|
| 858 | | - |
|---|
| 859 | | -ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, |
|---|
| 860 | | - char *buf) |
|---|
| 861 | | -{ |
|---|
| 862 | | - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); |
|---|
| 863 | | -} |
|---|
| 864 | | - |
|---|
| 865 | | -ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, |
|---|
| 866 | | - char *buf) |
|---|
| 867 | | -{ |
|---|
| 868 | | - if (__spectrev2_safe) |
|---|
| 869 | | - return sprintf(buf, "Not affected\n"); |
|---|
| 870 | | - |
|---|
| 871 | | - if (__hardenbp_enab) |
|---|
| 872 | | - return sprintf(buf, "Mitigation: Branch predictor hardening\n"); |
|---|
| 873 | | - |
|---|
| 874 | | - return sprintf(buf, "Vulnerable\n"); |
|---|
| 875 | | -} |
|---|
| 876 | | - |
|---|
| 877 | | -ssize_t cpu_show_spec_store_bypass(struct device *dev, |
|---|
| 878 | | - struct device_attribute *attr, char *buf) |
|---|
| 879 | | -{ |
|---|
| 880 | | - if (__ssb_safe) |
|---|
| 881 | | - return sprintf(buf, "Not affected\n"); |
|---|
| 882 | | - |
|---|
| 883 | | - switch (ssbd_state) { |
|---|
| 884 | | - case ARM64_SSBD_KERNEL: |
|---|
| 885 | | - case ARM64_SSBD_FORCE_ENABLE: |
|---|
| 886 | | - if (IS_ENABLED(CONFIG_ARM64_SSBD)) |
|---|
| 887 | | - return sprintf(buf, |
|---|
| 888 | | - "Mitigation: Speculative Store Bypass disabled via prctl\n"); |
|---|
| 889 | | - } |
|---|
| 890 | | - |
|---|
| 891 | | - return sprintf(buf, "Vulnerable\n"); |
|---|
| 892 | | -} |
|---|