hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/kernel/sched/idle.c
....@@ -80,6 +80,7 @@
8080 void __weak arch_cpu_idle(void)
8181 {
8282 cpu_idle_force_poll = 1;
83
+ hard_local_irq_enable();
8384 raw_local_irq_enable();
8485 }
8586
....@@ -87,13 +88,18 @@
8788 * default_idle_call - Default CPU idle routine.
8889 *
8990 * To use when the cpuidle framework cannot be used.
91
+ *
92
+ * When interrupts are pipelined, this call is entered with hard irqs
93
+ * on and the in-band stage is stalled. Returns with hard irqs on,
94
+ * in-band stage stalled. irq_cpuidle_enter() then turns off hard irqs
95
+ * before synchronizing irqs, making sure we have no event lingering
96
+ * in the interrupt log as we go for a nap.
9097 */
9198 void __cpuidle default_idle_call(void)
9299 {
93100 if (current_clr_polling_and_test()) {
94
- local_irq_enable();
95
- } else {
96
-
101
+ local_irq_enable_full();
102
+ } else if (irq_cpuidle_enter(NULL, NULL)) { /* hard irqs off now */
97103 trace_cpu_idle(1, smp_processor_id());
98104 stop_critical_timings();
99105
....@@ -127,6 +133,8 @@
127133
128134 start_critical_timings();
129135 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
136
+ } else {
137
+ local_irq_enable_full();
130138 }
131139 }
132140
....@@ -249,6 +257,13 @@
249257 __current_set_polling();
250258
251259 /*
260
+ * Catch mishandling of the CPU's interrupt disable flag when
261
+ * pipelining IRQs.
262
+ */
263
+ if (WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()))
264
+ hard_local_irq_enable();
265
+
266
+ /*
252267 * It is up to the idle functions to reenable local interrupts
253268 */
254269 if (WARN_ON_ONCE(irqs_disabled()))
....@@ -300,6 +315,7 @@
300315 cpu_idle_poll();
301316 } else {
302317 cpuidle_idle_call();
318
+ WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
303319 }
304320 arch_cpu_idle_exit();
305321 }