forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/x86/kernel/cpu/amd.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/export.h>
23 #include <linux/bitops.h>
34 #include <linux/elf.h>
....@@ -7,14 +8,18 @@
78 #include <linux/sched.h>
89 #include <linux/sched/clock.h>
910 #include <linux/random.h>
11
+#include <linux/topology.h>
1012 #include <asm/processor.h>
1113 #include <asm/apic.h>
1214 #include <asm/cacheinfo.h>
1315 #include <asm/cpu.h>
1416 #include <asm/spec-ctrl.h>
1517 #include <asm/smp.h>
18
+#include <asm/numa.h>
1619 #include <asm/pci-direct.h>
1720 #include <asm/delay.h>
21
+#include <asm/debugreg.h>
22
+#include <asm/resctrl.h>
1823
1924 #ifdef CONFIG_X86_64
2025 # include <asm/mmconfig.h>
....@@ -23,17 +28,89 @@
2328
2429 #include "cpu.h"
2530
26
-static const int amd_erratum_383[];
27
-static const int amd_erratum_400[];
28
-static const int amd_erratum_1054[];
29
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
30
-
3131 /*
3232 * nodes_per_socket: Stores the number of nodes per socket.
3333 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
3434 * Node Identifiers[10:8]
3535 */
3636 static u32 nodes_per_socket = 1;
37
+
38
+/*
39
+ * AMD errata checking
40
+ *
41
+ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
42
+ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
43
+ * have an OSVW id assigned, which it takes as first argument. Both take a
44
+ * variable number of family-specific model-stepping ranges created by
45
+ * AMD_MODEL_RANGE().
46
+ *
47
+ * Example:
48
+ *
49
+ * const int amd_erratum_319[] =
50
+ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
51
+ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
52
+ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
53
+ */
54
+
55
+#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
56
+#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
57
+#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
58
+ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
59
+#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
60
+#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
61
+#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
62
+
63
+static const int amd_erratum_400[] =
64
+ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
65
+ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
66
+
67
+static const int amd_erratum_383[] =
68
+ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
69
+
70
+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
71
+static const int amd_erratum_1054[] =
72
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
73
+
74
+static const int amd_zenbleed[] =
75
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf),
76
+ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
77
+ AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf),
78
+ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
79
+
80
+static const int amd_div0[] =
81
+ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
82
+ AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
83
+
84
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
85
+{
86
+ int osvw_id = *erratum++;
87
+ u32 range;
88
+ u32 ms;
89
+
90
+ if (osvw_id >= 0 && osvw_id < 65536 &&
91
+ cpu_has(cpu, X86_FEATURE_OSVW)) {
92
+ u64 osvw_len;
93
+
94
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
95
+ if (osvw_id < osvw_len) {
96
+ u64 osvw_bits;
97
+
98
+ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
99
+ osvw_bits);
100
+ return osvw_bits & (1ULL << (osvw_id & 0x3f));
101
+ }
102
+ }
103
+
104
+ /* OSVW unavailable or ID unknown, match family-model-stepping range */
105
+ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
106
+ while ((range = *erratum++))
107
+ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
108
+ (ms >= AMD_MODEL_RANGE_START(range)) &&
109
+ (ms <= AMD_MODEL_RANGE_END(range)))
110
+ return true;
111
+
112
+ return false;
113
+}
37114
38115 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
39116 {
....@@ -82,11 +159,14 @@
82159 * performance at the same time..
83160 */
84161
162
+#ifdef CONFIG_X86_32
85163 extern __visible void vide(void);
86
-__asm__(".globl vide\n"
164
+__asm__(".text\n"
165
+ ".globl vide\n"
87166 ".type vide, @function\n"
88167 ".align 4\n"
89168 "vide: ret\n");
169
+#endif
90170
91171 static void init_amd_k5(struct cpuinfo_x86 *c)
92172 {
....@@ -314,13 +394,6 @@
314394 c->cpu_core_id %= cus_per_node;
315395 }
316396
317
-
318
-static void amd_get_topology_early(struct cpuinfo_x86 *c)
319
-{
320
- if (cpu_has(c, X86_FEATURE_TOPOEXT))
321
- smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
322
-}
323
-
324397 /*
325398 * Fixup core topology information for
326399 * (1) AMD multi-node processors
....@@ -329,7 +402,6 @@
329402 */
330403 static void amd_get_topology(struct cpuinfo_x86 *c)
331404 {
332
- u8 node_id;
333405 int cpu = smp_processor_id();
334406
335407 /* get information required for multi-node processors */
....@@ -339,7 +411,7 @@
339411
340412 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
341413
342
- node_id = ecx & 0xff;
414
+ c->cpu_die_id = ecx & 0xff;
343415
344416 if (c->x86 == 0x15)
345417 c->cu_id = ebx & 0xff;
....@@ -359,15 +431,15 @@
359431 if (!err)
360432 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
361433
362
- cacheinfo_amd_init_llc_id(c, cpu, node_id);
434
+ cacheinfo_amd_init_llc_id(c, cpu);
363435
364436 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
365437 u64 value;
366438
367439 rdmsrl(MSR_FAM10H_NODE_ID, value);
368
- node_id = value & 7;
440
+ c->cpu_die_id = value & 7;
369441
370
- per_cpu(cpu_llc_id, cpu) = node_id;
442
+ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
371443 } else
372444 return;
373445
....@@ -392,7 +464,36 @@
392464 /* Convert the initial APIC ID into the socket ID */
393465 c->phys_proc_id = c->initial_apicid >> bits;
394466 /* use socket ID also for last level cache */
395
- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
467
+ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
468
+}
469
+
470
+static void amd_detect_ppin(struct cpuinfo_x86 *c)
471
+{
472
+ unsigned long long val;
473
+
474
+ if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
475
+ return;
476
+
477
+ /* When PPIN is defined in CPUID, still need to check PPIN_CTL MSR */
478
+ if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
479
+ goto clear_ppin;
480
+
481
+ /* PPIN is locked in disabled mode, clear feature bit */
482
+ if ((val & 3UL) == 1UL)
483
+ goto clear_ppin;
484
+
485
+ /* If PPIN is disabled, try to enable it */
486
+ if (!(val & 2UL)) {
487
+ wrmsrl_safe(MSR_AMD_PPIN_CTL, val | 2UL);
488
+ rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
489
+ }
490
+
491
+ /* If PPIN_EN bit is 1, return from here; otherwise fall through */
492
+ if (val & 2UL)
493
+ return;
494
+
495
+clear_ppin:
496
+ clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
396497 }
397498
398499 u16 amd_get_nb_id(int cpu)
....@@ -540,12 +641,12 @@
540641 u32 ecx;
541642
542643 ecx = cpuid_ecx(0x8000001e);
543
- nodes_per_socket = ((ecx >> 8) & 7) + 1;
644
+ __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
544645 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
545646 u64 value;
546647
547648 rdmsrl(MSR_FAM10H_NODE_ID, value);
548
- nodes_per_socket = ((value >> 3) & 7) + 1;
649
+ __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
549650 }
550651
551652 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
....@@ -569,6 +670,8 @@
569670 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
570671 }
571672 }
673
+
674
+ resctrl_cpu_detect(c);
572675 }
573676
574677 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
....@@ -582,7 +685,7 @@
582685 * If BIOS has not enabled SME then don't advertise the
583686 * SME feature (set in scattered.c).
584687 * For SEV: If BIOS has not enabled SEV then don't advertise the
585
- * SEV feature (set in scattered.c).
688
+ * SEV and SEV_ES feature (set in scattered.c).
586689 *
587690 * In all cases, since support for SME and SEV requires long mode,
588691 * don't advertise the feature under CONFIG_X86_32.
....@@ -613,6 +716,7 @@
613716 setup_clear_cpu_cap(X86_FEATURE_SME);
614717 clear_sev:
615718 setup_clear_cpu_cap(X86_FEATURE_SEV);
719
+ setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
616720 }
617721 }
618722
....@@ -712,7 +816,8 @@
712816 }
713817 }
714818
715
- amd_get_topology_early(c);
819
+ if (cpu_has(c, X86_FEATURE_TOPOEXT))
820
+ smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
716821 }
717822
718823 static void init_amd_k8(struct cpuinfo_x86 *c)
....@@ -788,8 +893,6 @@
788893 if (cpu_has_amd_erratum(c, amd_erratum_383))
789894 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
790895 }
791
-
792
-#define MSR_AMD64_DE_CFG 0xC0011029
793896
794897 static void init_amd_ln(struct cpuinfo_x86 *c)
795898 {
....@@ -881,16 +984,101 @@
881984 clear_rdrand_cpuid_bit(c);
882985 }
883986
987
+void init_spectral_chicken(struct cpuinfo_x86 *c)
988
+{
989
+#ifdef CONFIG_CPU_UNRET_ENTRY
990
+ u64 value;
991
+
992
+ /*
993
+ * On Zen2 we offer this chicken (bit) on the altar of Speculation.
994
+ *
995
+ * This suppresses speculation from the middle of a basic block, i.e. it
996
+ * suppresses non-branch predictions.
997
+ *
998
+ * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
999
+ */
1000
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1001
+ if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
1002
+ value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
1003
+ wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
1004
+ }
1005
+ }
1006
+#endif
1007
+ /*
1008
+ * Work around Erratum 1386. The XSAVES instruction malfunctions in
1009
+ * certain circumstances on Zen1/2 uarch, and not all parts have had
1010
+ * updated microcode at the time of writing (March 2023).
1011
+ *
1012
+ * Affected parts all have no supervisor XSAVE states, meaning that
1013
+ * the XSAVEC instruction (which works fine) is equivalent.
1014
+ */
1015
+ clear_cpu_cap(c, X86_FEATURE_XSAVES);
1016
+}
1017
+
8841018 static void init_amd_zn(struct cpuinfo_x86 *c)
8851019 {
8861020 set_cpu_cap(c, X86_FEATURE_ZEN);
8871021
888
- /*
889
- * Fix erratum 1076: CPB feature bit not being set in CPUID.
890
- * Always set it, except when running under a hypervisor.
891
- */
892
- if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
893
- set_cpu_cap(c, X86_FEATURE_CPB);
1022
+#ifdef CONFIG_NUMA
1023
+ node_reclaim_distance = 32;
1024
+#endif
1025
+
1026
+ /* Fix up CPUID bits, but only if not virtualised. */
1027
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1028
+
1029
+ /* Erratum 1076: CPB feature bit not being set in CPUID. */
1030
+ if (!cpu_has(c, X86_FEATURE_CPB))
1031
+ set_cpu_cap(c, X86_FEATURE_CPB);
1032
+
1033
+ /*
1034
+ * Zen3 (Fam19 model < 0x10) parts are not susceptible to
1035
+ * Branch Type Confusion, but predate the allocation of the
1036
+ * BTC_NO bit.
1037
+ */
1038
+ if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
1039
+ set_cpu_cap(c, X86_FEATURE_BTC_NO);
1040
+ }
1041
+}
1042
+
1043
+static bool cpu_has_zenbleed_microcode(void)
1044
+{
1045
+ u32 good_rev = 0;
1046
+
1047
+ switch (boot_cpu_data.x86_model) {
1048
+ case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
1049
+ case 0x60 ... 0x67: good_rev = 0x0860010b; break;
1050
+ case 0x68 ... 0x6f: good_rev = 0x08608105; break;
1051
+ case 0x70 ... 0x7f: good_rev = 0x08701032; break;
1052
+ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
1053
+
1054
+ default:
1055
+ return false;
1056
+ break;
1057
+ }
1058
+
1059
+ if (boot_cpu_data.microcode < good_rev)
1060
+ return false;
1061
+
1062
+ return true;
1063
+}
1064
+
1065
+static void zenbleed_check(struct cpuinfo_x86 *c)
1066
+{
1067
+ if (!cpu_has_amd_erratum(c, amd_zenbleed))
1068
+ return;
1069
+
1070
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
1071
+ return;
1072
+
1073
+ if (!cpu_has(c, X86_FEATURE_AVX))
1074
+ return;
1075
+
1076
+ if (!cpu_has_zenbleed_microcode()) {
1077
+ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
1078
+ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
1079
+ } else {
1080
+ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
1081
+ }
8941082 }
8951083
8961084 static void init_amd(struct cpuinfo_x86 *c)
....@@ -922,7 +1110,9 @@
9221110 case 0x12: init_amd_ln(c); break;
9231111 case 0x15: init_amd_bd(c); break;
9241112 case 0x16: init_amd_jg(c); break;
925
- case 0x17: init_amd_zn(c); break;
1113
+ case 0x17: init_spectral_chicken(c);
1114
+ fallthrough;
1115
+ case 0x19: init_amd_zn(c); break;
9261116 }
9271117
9281118 /*
....@@ -937,36 +1127,22 @@
9371127 amd_detect_cmp(c);
9381128 amd_get_topology(c);
9391129 srat_detect_node(c);
1130
+ amd_detect_ppin(c);
9401131
9411132 init_amd_cacheinfo(c);
9421133
9431134 if (cpu_has(c, X86_FEATURE_XMM2)) {
944
- unsigned long long val;
945
- int ret;
946
-
9471135 /*
948
- * A serializing LFENCE has less overhead than MFENCE, so
949
- * use it for execution serialization. On families which
1136
+ * Use LFENCE for execution serialization. On families which
9501137 * don't have that MSR, LFENCE is already serializing.
9511138 * msr_set_bit() uses the safe accessors, too, even if the MSR
9521139 * is not present.
9531140 */
954
- msr_set_bit(MSR_F10H_DECFG,
955
- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
1141
+ msr_set_bit(MSR_AMD64_DE_CFG,
1142
+ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
9561143
957
- /*
958
- * Verify that the MSR write was successful (could be running
959
- * under a hypervisor) and only then assume that LFENCE is
960
- * serializing.
961
- */
962
- ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
963
- if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
964
- /* A serializing LFENCE stops RDTSC speculation */
965
- set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
966
- } else {
967
- /* MFENCE stops RDTSC speculation */
968
- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
969
- }
1144
+ /* A serializing LFENCE stops RDTSC speculation */
1145
+ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
9701146 }
9711147
9721148 /*
....@@ -995,6 +1171,13 @@
9951171 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
9961172
9971173 check_null_seg_clears_base(c);
1174
+
1175
+ zenbleed_check(c);
1176
+
1177
+ if (cpu_has_amd_erratum(c, amd_div0)) {
1178
+ pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
1179
+ setup_force_cpu_bug(X86_BUG_DIV0);
1180
+ }
9981181 }
9991182
10001183 #ifdef CONFIG_X86_32
....@@ -1090,73 +1273,6 @@
10901273
10911274 cpu_dev_register(amd_cpu_dev);
10921275
1093
-/*
1094
- * AMD errata checking
1095
- *
1096
- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1097
- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1098
- * have an OSVW id assigned, which it takes as first argument. Both take a
1099
- * variable number of family-specific model-stepping ranges created by
1100
- * AMD_MODEL_RANGE().
1101
- *
1102
- * Example:
1103
- *
1104
- * const int amd_erratum_319[] =
1105
- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1106
- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1107
- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1108
- */
1109
-
1110
-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1111
-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1112
-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1113
- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1114
-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1115
-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1116
-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1117
-
1118
-static const int amd_erratum_400[] =
1119
- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1120
- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1121
-
1122
-static const int amd_erratum_383[] =
1123
- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1124
-
1125
-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
1126
-static const int amd_erratum_1054[] =
1127
- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1128
-
1129
-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1130
-{
1131
- int osvw_id = *erratum++;
1132
- u32 range;
1133
- u32 ms;
1134
-
1135
- if (osvw_id >= 0 && osvw_id < 65536 &&
1136
- cpu_has(cpu, X86_FEATURE_OSVW)) {
1137
- u64 osvw_len;
1138
-
1139
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1140
- if (osvw_id < osvw_len) {
1141
- u64 osvw_bits;
1142
-
1143
- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1144
- osvw_bits);
1145
- return osvw_bits & (1ULL << (osvw_id & 0x3f));
1146
- }
1147
- }
1148
-
1149
- /* OSVW unavailable or ID unknown, match family-model-stepping range */
1150
- ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1151
- while ((range = *erratum++))
1152
- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1153
- (ms >= AMD_MODEL_RANGE_START(range)) &&
1154
- (ms <= AMD_MODEL_RANGE_END(range)))
1155
- return true;
1156
-
1157
- return false;
1158
-}
1159
-
11601276 void set_dr_addr_mask(unsigned long mask, int dr)
11611277 {
11621278 if (!boot_cpu_has(X86_FEATURE_BPEXT))
....@@ -1175,3 +1291,45 @@
11751291 break;
11761292 }
11771293 }
1294
+
1295
+bool cpu_has_ibpb_brtype_microcode(void)
1296
+{
1297
+ switch (boot_cpu_data.x86) {
1298
+ /* Zen1/2 IBPB flushes branch type predictions too. */
1299
+ case 0x17:
1300
+ return boot_cpu_has(X86_FEATURE_AMD_IBPB);
1301
+ case 0x19:
1302
+ /* Poke the MSR bit on Zen3/4 to check its presence. */
1303
+ if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
1304
+ setup_force_cpu_cap(X86_FEATURE_SBPB);
1305
+ return true;
1306
+ } else {
1307
+ return false;
1308
+ }
1309
+ default:
1310
+ return false;
1311
+ }
1312
+}
1313
+
1314
+static void zenbleed_check_cpu(void *unused)
1315
+{
1316
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1317
+
1318
+ zenbleed_check(c);
1319
+}
1320
+
1321
+void amd_check_microcode(void)
1322
+{
1323
+ on_each_cpu(zenbleed_check_cpu, NULL, 1);
1324
+}
1325
+
1326
+/*
1327
+ * Issue a DIV 0/1 insn to clear any division data from previous DIV
1328
+ * operations.
1329
+ */
1330
+void noinstr amd_clear_divider(void)
1331
+{
1332
+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
1333
+ :: "a" (0), "d" (0), "r" (1));
1334
+}
1335
+EXPORT_SYMBOL_GPL(amd_clear_divider);