.. | .. |
---|
60 | 60 | #define EAX_EDX_RET(val, low, high) "=A" (val) |
---|
61 | 61 | #endif |
---|
62 | 62 | |
---|
63 | | -#ifdef CONFIG_TRACEPOINTS |
---|
64 | 63 | /* |
---|
65 | 64 | * Be very careful with includes. This header is prone to include loops. |
---|
66 | 65 | */ |
---|
67 | 66 | #include <asm/atomic.h> |
---|
68 | 67 | #include <linux/tracepoint-defs.h> |
---|
69 | 68 | |
---|
70 | | -extern struct tracepoint __tracepoint_read_msr; |
---|
71 | | -extern struct tracepoint __tracepoint_write_msr; |
---|
72 | | -extern struct tracepoint __tracepoint_rdpmc; |
---|
73 | | -#define msr_tracepoint_active(t) static_key_false(&(t).key) |
---|
| 69 | +#ifdef CONFIG_TRACEPOINTS |
---|
| 70 | +DECLARE_TRACEPOINT(read_msr); |
---|
| 71 | +DECLARE_TRACEPOINT(write_msr); |
---|
| 72 | +DECLARE_TRACEPOINT(rdpmc); |
---|
74 | 73 | extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); |
---|
75 | 74 | extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); |
---|
76 | 75 | extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); |
---|
77 | 76 | #else |
---|
78 | | -#define msr_tracepoint_active(t) false |
---|
79 | 77 | static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} |
---|
80 | 78 | static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} |
---|
81 | 79 | static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} |
---|
.. | .. |
---|
128 | 126 | |
---|
129 | 127 | val = __rdmsr(msr); |
---|
130 | 128 | |
---|
131 | | - if (msr_tracepoint_active(__tracepoint_read_msr)) |
---|
| 129 | + if (tracepoint_enabled(read_msr)) |
---|
132 | 130 | do_trace_read_msr(msr, val, 0); |
---|
133 | 131 | |
---|
134 | 132 | return val; |
---|
.. | .. |
---|
150 | 148 | _ASM_EXTABLE(2b, 3b) |
---|
151 | 149 | : [err] "=r" (*err), EAX_EDX_RET(val, low, high) |
---|
152 | 150 | : "c" (msr), [fault] "i" (-EIO)); |
---|
153 | | - if (msr_tracepoint_active(__tracepoint_read_msr)) |
---|
| 151 | + if (tracepoint_enabled(read_msr)) |
---|
154 | 152 | do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); |
---|
155 | 153 | return EAX_EDX_VAL(val, low, high); |
---|
156 | 154 | } |
---|
.. | .. |
---|
161 | 159 | { |
---|
162 | 160 | __wrmsr(msr, low, high); |
---|
163 | 161 | |
---|
164 | | - if (msr_tracepoint_active(__tracepoint_write_msr)) |
---|
| 162 | + if (tracepoint_enabled(write_msr)) |
---|
165 | 163 | do_trace_write_msr(msr, ((u64)high << 32 | low), 0); |
---|
166 | 164 | } |
---|
167 | 165 | |
---|
.. | .. |
---|
181 | 179 | : "c" (msr), "0" (low), "d" (high), |
---|
182 | 180 | [fault] "i" (-EIO) |
---|
183 | 181 | : "memory"); |
---|
184 | | - if (msr_tracepoint_active(__tracepoint_write_msr)) |
---|
| 182 | + if (tracepoint_enabled(write_msr)) |
---|
185 | 183 | do_trace_write_msr(msr, ((u64)high << 32 | low), err); |
---|
186 | 184 | return err; |
---|
187 | 185 | } |
---|
.. | .. |
---|
217 | 215 | */ |
---|
218 | 216 | static __always_inline unsigned long long rdtsc_ordered(void) |
---|
219 | 217 | { |
---|
| 218 | + DECLARE_ARGS(val, low, high); |
---|
| 219 | + |
---|
220 | 220 | /* |
---|
221 | 221 | * The RDTSC instruction is not ordered relative to memory |
---|
222 | 222 | * access. The Intel SDM and the AMD APM are both vague on this |
---|
.. | .. |
---|
227 | 227 | * ordering guarantees as reading from a global memory location |
---|
228 | 228 | * that some other imaginary CPU is updating continuously with a |
---|
229 | 229 | * time stamp. |
---|
| 230 | + * |
---|
| 231 | + * Thus, use the preferred barrier on the respective CPU, aiming for |
---|
| 232 | + * RDTSCP as the default. |
---|
230 | 233 | */ |
---|
231 | | - barrier_nospec(); |
---|
232 | | - return rdtsc(); |
---|
| 234 | + asm volatile(ALTERNATIVE_2("rdtsc", |
---|
| 235 | + "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, |
---|
| 236 | + "rdtscp", X86_FEATURE_RDTSCP) |
---|
| 237 | + : EAX_EDX_RET(val, low, high) |
---|
| 238 | + /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ |
---|
| 239 | + :: "ecx"); |
---|
| 240 | + |
---|
| 241 | + return EAX_EDX_VAL(val, low, high); |
---|
233 | 242 | } |
---|
234 | 243 | |
---|
235 | 244 | static inline unsigned long long native_read_pmc(int counter) |
---|
.. | .. |
---|
237 | 246 | DECLARE_ARGS(val, low, high); |
---|
238 | 247 | |
---|
239 | 248 | asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); |
---|
240 | | - if (msr_tracepoint_active(__tracepoint_rdpmc)) |
---|
| 249 | + if (tracepoint_enabled(rdpmc)) |
---|
241 | 250 | do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); |
---|
242 | 251 | return EAX_EDX_VAL(val, low, high); |
---|
243 | 252 | } |
---|
244 | 253 | |
---|
245 | | -#ifdef CONFIG_PARAVIRT |
---|
| 254 | +#ifdef CONFIG_PARAVIRT_XXL |
---|
246 | 255 | #include <asm/paravirt.h> |
---|
247 | 256 | #else |
---|
248 | 257 | #include <linux/errno.h> |
---|
.. | .. |
---|
305 | 314 | |
---|
306 | 315 | #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) |
---|
307 | 316 | |
---|
308 | | -#endif /* !CONFIG_PARAVIRT */ |
---|
| 317 | +#endif /* !CONFIG_PARAVIRT_XXL */ |
---|
309 | 318 | |
---|
310 | 319 | /* |
---|
311 | 320 | * 64-bit version of wrmsr_safe(): |
---|