.. | .. |
---|
20 | 20 | |
---|
21 | 21 | #define MWAIT_ECX_INTERRUPT_BREAK 0x1 |
---|
22 | 22 | #define MWAITX_ECX_TIMER_ENABLE BIT(1) |
---|
23 | | -#define MWAITX_MAX_LOOPS ((u32)-1) |
---|
| 23 | +#define MWAITX_MAX_WAIT_CYCLES UINT_MAX |
---|
24 | 24 | #define MWAITX_DISABLE_CSTATES 0xf0 |
---|
| 25 | +#define TPAUSE_C01_STATE 1 |
---|
| 26 | +#define TPAUSE_C02_STATE 0 |
---|
25 | 27 | |
---|
26 | 28 | static inline void __monitor(const void *eax, unsigned long ecx, |
---|
27 | 29 | unsigned long edx) |
---|
.. | .. |
---|
87 | 89 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) |
---|
88 | 90 | { |
---|
89 | 91 | mds_idle_clear_cpu_buffers(); |
---|
90 | | - |
---|
91 | | - trace_hardirqs_on(); |
---|
92 | 92 | /* "mwait %eax, %ecx;" */ |
---|
93 | 93 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" |
---|
94 | 94 | :: "a" (eax), "c" (ecx)); |
---|
.. | .. |
---|
120 | 120 | current_clr_polling(); |
---|
121 | 121 | } |
---|
122 | 122 | |
---|
| 123 | +/* |
---|
| 124 | + * Caller can specify whether to enter C0.1 (low latency, less |
---|
| 125 | + * power saving) or C0.2 state (saves more power, but longer wakeup |
---|
| 126 | + * latency). This may be overridden by the IA32_UMWAIT_CONTROL MSR |
---|
| 127 | + * which can force requests for C0.2 to be downgraded to C0.1. |
---|
| 128 | + */ |
---|
| 129 | +static inline void __tpause(u32 ecx, u32 edx, u32 eax) |
---|
| 130 | +{ |
---|
| 131 | + /* "tpause %ecx, %edx, %eax;" */ |
---|
| 132 | + #ifdef CONFIG_AS_TPAUSE |
---|
| 133 | + asm volatile("tpause %%ecx\n" |
---|
| 134 | + : |
---|
| 135 | + : "c"(ecx), "d"(edx), "a"(eax)); |
---|
| 136 | + #else |
---|
| 137 | + asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n" |
---|
| 138 | + : |
---|
| 139 | + : "c"(ecx), "d"(edx), "a"(eax)); |
---|
| 140 | + #endif |
---|
| 141 | +} |
---|
| 142 | + |
---|
123 | 143 | #endif /* _ASM_X86_MWAIT_H */ |
---|