hc
2024-02-19 1c055e55a242a33e574e48be530e06770a210dcd
kernel/arch/x86/include/asm/debugreg.h
....@@ -8,7 +8,7 @@
88
99 DECLARE_PER_CPU(unsigned long, cpu_dr7);
1010
11
-#ifndef CONFIG_PARAVIRT
11
+#ifndef CONFIG_PARAVIRT_XXL
1212 /*
1313 * These special macros can be used to get or set a debugging register
1414 */
....@@ -18,7 +18,7 @@
1818 native_set_debugreg(register, value)
1919 #endif
2020
21
-static inline unsigned long native_get_debugreg(int regno)
21
+static __always_inline unsigned long native_get_debugreg(int regno)
2222 {
2323 unsigned long val = 0; /* Damn you, gcc! */
2424
....@@ -39,7 +39,20 @@
3939 asm("mov %%db6, %0" :"=r" (val));
4040 break;
4141 case 7:
42
- asm("mov %%db7, %0" :"=r" (val));
42
+ /*
43
+ * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
44
+ * with other code.
45
+ *
46
+ * This is needed because a DR7 access can cause a #VC exception
47
+ * when running under SEV-ES. Taking a #VC exception is not a
48
+ * safe thing to do just anywhere in the entry code and
49
+ * re-ordering might place the access into an unsafe location.
50
+ *
51
+ * This happened in the NMI handler, where the DR7 read was
52
+ * re-ordered to happen before the call to sev_es_ist_enter(),
53
+ * causing stack recursion.
54
+ */
55
+ asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
4356 break;
4457 default:
4558 BUG();
....@@ -47,7 +60,7 @@
4760 return val;
4861 }
4962
50
-static inline void native_set_debugreg(int regno, unsigned long value)
63
+static __always_inline void native_set_debugreg(int regno, unsigned long value)
5164 {
5265 switch (regno) {
5366 case 0:
....@@ -66,7 +79,16 @@
6679 asm("mov %0, %%db6" ::"r" (value));
6780 break;
6881 case 7:
69
- asm("mov %0, %%db7" ::"r" (value));
82
+ /*
83
+ * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
84
+ * with other code.
85
+ *
86
+ * While is didn't happen with a DR7 write (see the DR7 read
87
+ * comment above which explains where it happened), add the
88
+ * __FORCE_ORDER here too to avoid similar problems in the
89
+ * future.
90
+ */
91
+ asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
7092 break;
7193 default:
7294 BUG();
....@@ -85,35 +107,45 @@
85107 set_debugreg(0UL, 3);
86108 }
87109
88
-static inline int hw_breakpoint_active(void)
110
+static __always_inline bool hw_breakpoint_active(void)
89111 {
90112 return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
91113 }
92114
93
-extern void aout_dump_debugregs(struct user *dump);
94
-
95115 extern void hw_breakpoint_restore(void);
96116
97
-#ifdef CONFIG_X86_64
98
-DECLARE_PER_CPU(int, debug_stack_usage);
99
-static inline void debug_stack_usage_inc(void)
117
+static __always_inline unsigned long local_db_save(void)
100118 {
101
- __this_cpu_inc(debug_stack_usage);
119
+ unsigned long dr7;
120
+
121
+ if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
122
+ return 0;
123
+
124
+ get_debugreg(dr7, 7);
125
+ dr7 &= ~0x400; /* architecturally set bit */
126
+ if (dr7)
127
+ set_debugreg(0, 7);
128
+ /*
129
+ * Ensure the compiler doesn't lower the above statements into
130
+ * the critical section; disabling breakpoints late would not
131
+ * be good.
132
+ */
133
+ barrier();
134
+
135
+ return dr7;
102136 }
103
-static inline void debug_stack_usage_dec(void)
137
+
138
+static __always_inline void local_db_restore(unsigned long dr7)
104139 {
105
- __this_cpu_dec(debug_stack_usage);
140
+ /*
141
+ * Ensure the compiler doesn't raise this statement into
142
+ * the critical section; enabling breakpoints early would
143
+ * not be good.
144
+ */
145
+ barrier();
146
+ if (dr7)
147
+ set_debugreg(dr7, 7);
106148 }
107
-int is_debug_stack(unsigned long addr);
108
-void debug_stack_set_zero(void);
109
-void debug_stack_reset(void);
110
-#else /* !X86_64 */
111
-static inline int is_debug_stack(unsigned long addr) { return 0; }
112
-static inline void debug_stack_set_zero(void) { }
113
-static inline void debug_stack_reset(void) { }
114
-static inline void debug_stack_usage_inc(void) { }
115
-static inline void debug_stack_usage_dec(void) { }
116
-#endif /* X86_64 */
117149
118150 #ifdef CONFIG_CPU_SUP_AMD
119151 extern void set_dr_addr_mask(unsigned long mask, int dr);