hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/mwait.h
....@@ -20,8 +20,10 @@
2020
2121 #define MWAIT_ECX_INTERRUPT_BREAK 0x1
2222 #define MWAITX_ECX_TIMER_ENABLE BIT(1)
23
-#define MWAITX_MAX_LOOPS ((u32)-1)
23
+#define MWAITX_MAX_WAIT_CYCLES UINT_MAX
2424 #define MWAITX_DISABLE_CSTATES 0xf0
25
+#define TPAUSE_C01_STATE 1
26
+#define TPAUSE_C02_STATE 0
2527
2628 static inline void __monitor(const void *eax, unsigned long ecx,
2729 unsigned long edx)
....@@ -87,8 +89,6 @@
8789 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
8890 {
8991 mds_idle_clear_cpu_buffers();
90
-
91
- trace_hardirqs_on();
9292 /* "mwait %eax, %ecx;" */
9393 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
9494 :: "a" (eax), "c" (ecx));
....@@ -120,4 +120,24 @@
120120 current_clr_polling();
121121 }
122122
123
+/*
124
+ * Caller can specify whether to enter C0.1 (low latency, less
125
+ * power saving) or C0.2 state (saves more power, but longer wakeup
126
+ * latency). This may be overridden by the IA32_UMWAIT_CONTROL MSR
127
+ * which can force requests for C0.2 to be downgraded to C0.1.
128
+ */
129
+static inline void __tpause(u32 ecx, u32 edx, u32 eax)
130
+{
131
+ /* "tpause %ecx, %edx, %eax;" */
132
+ #ifdef CONFIG_AS_TPAUSE
133
+ asm volatile("tpause %%ecx\n"
134
+ :
135
+ : "c"(ecx), "d"(edx), "a"(eax));
136
+ #else
137
+ asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
138
+ :
139
+ : "c"(ecx), "d"(edx), "a"(eax));
140
+ #endif
141
+}
142
+
123143 #endif /* _ASM_X86_MWAIT_H */