.. | .. |
---|
6 | 6 | #include <asm/cp15.h> |
---|
7 | 7 | #include <asm/cputype.h> |
---|
8 | 8 | #include <asm/proc-fns.h> |
---|
| 9 | +#include <asm/spectre.h> |
---|
9 | 10 | #include <asm/system_misc.h> |
---|
| 11 | + |
---|
| 12 | +#ifdef CONFIG_ARM_PSCI |
---|
| 13 | +static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) |
---|
| 14 | +{ |
---|
| 15 | + struct arm_smccc_res res; |
---|
| 16 | + |
---|
| 17 | + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
---|
| 18 | + ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
---|
| 19 | + |
---|
| 20 | + switch ((int)res.a0) { |
---|
| 21 | + case SMCCC_RET_SUCCESS: |
---|
| 22 | + return SPECTRE_MITIGATED; |
---|
| 23 | + |
---|
| 24 | + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: |
---|
| 25 | + return SPECTRE_UNAFFECTED; |
---|
| 26 | + |
---|
| 27 | + default: |
---|
| 28 | + return SPECTRE_VULNERABLE; |
---|
| 29 | + } |
---|
| 30 | +} |
---|
| 31 | +#else |
---|
| 32 | +static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) |
---|
| 33 | +{ |
---|
| 34 | + return SPECTRE_VULNERABLE; |
---|
| 35 | +} |
---|
| 36 | +#endif |
---|
10 | 37 | |
---|
11 | 38 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
---|
12 | 39 | DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); |
---|
.. | .. |
---|
36 | 63 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); |
---|
37 | 64 | } |
---|
38 | 65 | |
---|
39 | | -static void cpu_v7_spectre_init(void) |
---|
| 66 | +static unsigned int spectre_v2_install_workaround(unsigned int method) |
---|
40 | 67 | { |
---|
41 | 68 | const char *spectre_v2_method = NULL; |
---|
42 | 69 | int cpu = smp_processor_id(); |
---|
43 | 70 | |
---|
44 | 71 | if (per_cpu(harden_branch_predictor_fn, cpu)) |
---|
45 | | - return; |
---|
| 72 | + return SPECTRE_MITIGATED; |
---|
| 73 | + |
---|
| 74 | + switch (method) { |
---|
| 75 | + case SPECTRE_V2_METHOD_BPIALL: |
---|
| 76 | + per_cpu(harden_branch_predictor_fn, cpu) = |
---|
| 77 | + harden_branch_predictor_bpiall; |
---|
| 78 | + spectre_v2_method = "BPIALL"; |
---|
| 79 | + break; |
---|
| 80 | + |
---|
| 81 | + case SPECTRE_V2_METHOD_ICIALLU: |
---|
| 82 | + per_cpu(harden_branch_predictor_fn, cpu) = |
---|
| 83 | + harden_branch_predictor_iciallu; |
---|
| 84 | + spectre_v2_method = "ICIALLU"; |
---|
| 85 | + break; |
---|
| 86 | + |
---|
| 87 | + case SPECTRE_V2_METHOD_HVC: |
---|
| 88 | + per_cpu(harden_branch_predictor_fn, cpu) = |
---|
| 89 | + call_hvc_arch_workaround_1; |
---|
| 90 | + cpu_do_switch_mm = cpu_v7_hvc_switch_mm; |
---|
| 91 | + spectre_v2_method = "hypervisor"; |
---|
| 92 | + break; |
---|
| 93 | + |
---|
| 94 | + case SPECTRE_V2_METHOD_SMC: |
---|
| 95 | + per_cpu(harden_branch_predictor_fn, cpu) = |
---|
| 96 | + call_smc_arch_workaround_1; |
---|
| 97 | + cpu_do_switch_mm = cpu_v7_smc_switch_mm; |
---|
| 98 | + spectre_v2_method = "firmware"; |
---|
| 99 | + break; |
---|
| 100 | + } |
---|
| 101 | + |
---|
| 102 | + if (spectre_v2_method) |
---|
| 103 | + pr_info("CPU%u: Spectre v2: using %s workaround\n", |
---|
| 104 | + smp_processor_id(), spectre_v2_method); |
---|
| 105 | + |
---|
| 106 | + return SPECTRE_MITIGATED; |
---|
| 107 | +} |
---|
| 108 | +#else |
---|
| 109 | +static unsigned int spectre_v2_install_workaround(unsigned int method) |
---|
| 110 | +{ |
---|
| 111 | + pr_info_once("Spectre V2: workarounds disabled by configuration\n"); |
---|
| 112 | + |
---|
| 113 | + return SPECTRE_VULNERABLE; |
---|
| 114 | +} |
---|
| 115 | +#endif |
---|
| 116 | + |
---|
| 117 | +static void cpu_v7_spectre_v2_init(void) |
---|
| 118 | +{ |
---|
| 119 | + unsigned int state, method = 0; |
---|
46 | 120 | |
---|
47 | 121 | switch (read_cpuid_part()) { |
---|
48 | 122 | case ARM_CPU_PART_CORTEX_A8: |
---|
.. | .. |
---|
51 | 125 | case ARM_CPU_PART_CORTEX_A17: |
---|
52 | 126 | case ARM_CPU_PART_CORTEX_A73: |
---|
53 | 127 | case ARM_CPU_PART_CORTEX_A75: |
---|
54 | | - per_cpu(harden_branch_predictor_fn, cpu) = |
---|
55 | | - harden_branch_predictor_bpiall; |
---|
56 | | - spectre_v2_method = "BPIALL"; |
---|
| 128 | + state = SPECTRE_MITIGATED; |
---|
| 129 | + method = SPECTRE_V2_METHOD_BPIALL; |
---|
57 | 130 | break; |
---|
58 | 131 | |
---|
59 | 132 | case ARM_CPU_PART_CORTEX_A15: |
---|
60 | 133 | case ARM_CPU_PART_BRAHMA_B15: |
---|
61 | | - per_cpu(harden_branch_predictor_fn, cpu) = |
---|
62 | | - harden_branch_predictor_iciallu; |
---|
63 | | - spectre_v2_method = "ICIALLU"; |
---|
| 134 | + state = SPECTRE_MITIGATED; |
---|
| 135 | + method = SPECTRE_V2_METHOD_ICIALLU; |
---|
64 | 136 | break; |
---|
65 | 137 | |
---|
66 | | -#ifdef CONFIG_ARM_PSCI |
---|
67 | 138 | case ARM_CPU_PART_BRAHMA_B53: |
---|
68 | 139 | /* Requires no workaround */ |
---|
| 140 | + state = SPECTRE_UNAFFECTED; |
---|
69 | 141 | break; |
---|
| 142 | + |
---|
70 | 143 | default: |
---|
71 | 144 | /* Other ARM CPUs require no workaround */ |
---|
72 | | - if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) |
---|
| 145 | + if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) { |
---|
| 146 | + state = SPECTRE_UNAFFECTED; |
---|
73 | 147 | break; |
---|
74 | | - /* fallthrough */ |
---|
75 | | - /* Cortex A57/A72 require firmware workaround */ |
---|
| 148 | + } |
---|
| 149 | + |
---|
| 150 | + fallthrough; |
---|
| 151 | + |
---|
| 152 | + /* Cortex A57/A72 require firmware workaround */ |
---|
76 | 153 | case ARM_CPU_PART_CORTEX_A57: |
---|
77 | | - case ARM_CPU_PART_CORTEX_A72: { |
---|
78 | | - struct arm_smccc_res res; |
---|
| 154 | + case ARM_CPU_PART_CORTEX_A72: |
---|
| 155 | + state = spectre_v2_get_cpu_fw_mitigation_state(); |
---|
| 156 | + if (state != SPECTRE_MITIGATED) |
---|
| 157 | + break; |
---|
79 | 158 | |
---|
80 | 159 | switch (arm_smccc_1_1_get_conduit()) { |
---|
81 | 160 | case SMCCC_CONDUIT_HVC: |
---|
82 | | - arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
---|
83 | | - ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
---|
84 | | - if ((int)res.a0 != 0) |
---|
85 | | - break; |
---|
86 | | - per_cpu(harden_branch_predictor_fn, cpu) = |
---|
87 | | - call_hvc_arch_workaround_1; |
---|
88 | | - cpu_do_switch_mm = cpu_v7_hvc_switch_mm; |
---|
89 | | - spectre_v2_method = "hypervisor"; |
---|
| 161 | + method = SPECTRE_V2_METHOD_HVC; |
---|
90 | 162 | break; |
---|
91 | 163 | |
---|
92 | 164 | case SMCCC_CONDUIT_SMC: |
---|
93 | | - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
---|
94 | | - ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
---|
95 | | - if ((int)res.a0 != 0) |
---|
96 | | - break; |
---|
97 | | - per_cpu(harden_branch_predictor_fn, cpu) = |
---|
98 | | - call_smc_arch_workaround_1; |
---|
99 | | - cpu_do_switch_mm = cpu_v7_smc_switch_mm; |
---|
100 | | - spectre_v2_method = "firmware"; |
---|
| 165 | + method = SPECTRE_V2_METHOD_SMC; |
---|
101 | 166 | break; |
---|
102 | 167 | |
---|
103 | 168 | default: |
---|
| 169 | + state = SPECTRE_VULNERABLE; |
---|
104 | 170 | break; |
---|
105 | 171 | } |
---|
106 | 172 | } |
---|
107 | | -#endif |
---|
| 173 | + |
---|
| 174 | + if (state == SPECTRE_MITIGATED) |
---|
| 175 | + state = spectre_v2_install_workaround(method); |
---|
| 176 | + |
---|
| 177 | + spectre_v2_update_state(state, method); |
---|
| 178 | +} |
---|
| 179 | + |
---|
| 180 | +#ifdef CONFIG_HARDEN_BRANCH_HISTORY |
---|
| 181 | +static int spectre_bhb_method; |
---|
| 182 | + |
---|
| 183 | +static const char *spectre_bhb_method_name(int method) |
---|
| 184 | +{ |
---|
| 185 | + switch (method) { |
---|
| 186 | + case SPECTRE_V2_METHOD_LOOP8: |
---|
| 187 | + return "loop"; |
---|
| 188 | + |
---|
| 189 | + case SPECTRE_V2_METHOD_BPIALL: |
---|
| 190 | + return "BPIALL"; |
---|
| 191 | + |
---|
| 192 | + default: |
---|
| 193 | + return "unknown"; |
---|
| 194 | + } |
---|
| 195 | +} |
---|
| 196 | + |
---|
| 197 | +static int spectre_bhb_install_workaround(int method) |
---|
| 198 | +{ |
---|
| 199 | + if (spectre_bhb_method != method) { |
---|
| 200 | + if (spectre_bhb_method) { |
---|
| 201 | + pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n", |
---|
| 202 | + smp_processor_id()); |
---|
| 203 | + |
---|
| 204 | + return SPECTRE_VULNERABLE; |
---|
| 205 | + } |
---|
| 206 | + |
---|
| 207 | + if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE) |
---|
| 208 | + return SPECTRE_VULNERABLE; |
---|
| 209 | + |
---|
| 210 | + spectre_bhb_method = method; |
---|
| 211 | + |
---|
| 212 | + pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n", |
---|
| 213 | + smp_processor_id(), spectre_bhb_method_name(method)); |
---|
108 | 214 | } |
---|
109 | 215 | |
---|
110 | | - if (spectre_v2_method) |
---|
111 | | - pr_info("CPU%u: Spectre v2: using %s workaround\n", |
---|
112 | | - smp_processor_id(), spectre_v2_method); |
---|
| 216 | + return SPECTRE_MITIGATED; |
---|
113 | 217 | } |
---|
114 | 218 | #else |
---|
115 | | -static void cpu_v7_spectre_init(void) |
---|
| 219 | +static int spectre_bhb_install_workaround(int method) |
---|
116 | 220 | { |
---|
| 221 | + return SPECTRE_VULNERABLE; |
---|
117 | 222 | } |
---|
118 | 223 | #endif |
---|
| 224 | + |
---|
| 225 | +static void cpu_v7_spectre_bhb_init(void) |
---|
| 226 | +{ |
---|
| 227 | + unsigned int state, method = 0; |
---|
| 228 | + |
---|
| 229 | + switch (read_cpuid_part()) { |
---|
| 230 | + case ARM_CPU_PART_CORTEX_A15: |
---|
| 231 | + case ARM_CPU_PART_BRAHMA_B15: |
---|
| 232 | + case ARM_CPU_PART_CORTEX_A57: |
---|
| 233 | + case ARM_CPU_PART_CORTEX_A72: |
---|
| 234 | + state = SPECTRE_MITIGATED; |
---|
| 235 | + method = SPECTRE_V2_METHOD_LOOP8; |
---|
| 236 | + break; |
---|
| 237 | + |
---|
| 238 | + case ARM_CPU_PART_CORTEX_A73: |
---|
| 239 | + case ARM_CPU_PART_CORTEX_A75: |
---|
| 240 | + state = SPECTRE_MITIGATED; |
---|
| 241 | + method = SPECTRE_V2_METHOD_BPIALL; |
---|
| 242 | + break; |
---|
| 243 | + |
---|
| 244 | + default: |
---|
| 245 | + state = SPECTRE_UNAFFECTED; |
---|
| 246 | + break; |
---|
| 247 | + } |
---|
| 248 | + |
---|
| 249 | + if (state == SPECTRE_MITIGATED) |
---|
| 250 | + state = spectre_bhb_install_workaround(method); |
---|
| 251 | + |
---|
| 252 | + spectre_v2_update_state(state, method); |
---|
| 253 | +} |
---|
119 | 254 | |
---|
120 | 255 | static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, |
---|
121 | 256 | u32 mask, const char *msg) |
---|
.. | .. |
---|
145 | 280 | void cpu_v7_ca8_ibe(void) |
---|
146 | 281 | { |
---|
147 | 282 | if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) |
---|
148 | | - cpu_v7_spectre_init(); |
---|
| 283 | + cpu_v7_spectre_v2_init(); |
---|
149 | 284 | } |
---|
150 | 285 | |
---|
151 | 286 | void cpu_v7_ca15_ibe(void) |
---|
152 | 287 | { |
---|
153 | 288 | if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) |
---|
154 | | - cpu_v7_spectre_init(); |
---|
| 289 | + cpu_v7_spectre_v2_init(); |
---|
| 290 | + cpu_v7_spectre_bhb_init(); |
---|
155 | 291 | } |
---|
156 | 292 | |
---|
157 | 293 | void cpu_v7_bugs_init(void) |
---|
158 | 294 | { |
---|
159 | | - cpu_v7_spectre_init(); |
---|
| 295 | + cpu_v7_spectre_v2_init(); |
---|
| 296 | + cpu_v7_spectre_bhb_init(); |
---|
160 | 297 | } |
---|