| .. | .. |
|---|
| 80 | 80 | void __weak arch_cpu_idle(void) |
|---|
| 81 | 81 | { |
|---|
| 82 | 82 | cpu_idle_force_poll = 1; |
|---|
| 83 | + hard_local_irq_enable(); |
|---|
| 83 | 84 | raw_local_irq_enable(); |
|---|
| 84 | 85 | } |
|---|
| 85 | 86 | |
|---|
| .. | .. |
|---|
| 87 | 88 | * default_idle_call - Default CPU idle routine. |
|---|
| 88 | 89 | * |
|---|
| 89 | 90 | * To use when the cpuidle framework cannot be used. |
|---|
| 91 | + * |
|---|
| 92 | + * When interrupts are pipelined, this call is entered with hard irqs |
|---|
| 93 | + * on and the in-band stage is stalled. Returns with hard irqs on, |
|---|
| 94 | + * in-band stage stalled. irq_cpuidle_enter() then turns off hard irqs |
|---|
| 95 | + * before synchronizing irqs, making sure we have no event lingering |
|---|
| 96 | + * in the interrupt log as we go for a nap. |
|---|
| 90 | 97 | */ |
|---|
| 91 | 98 | void __cpuidle default_idle_call(void) |
|---|
| 92 | 99 | { |
|---|
| 93 | 100 | if (current_clr_polling_and_test()) { |
|---|
| 94 | | - local_irq_enable(); |
|---|
| 95 | | - } else { |
|---|
| 96 | | - |
|---|
| 101 | + local_irq_enable_full(); |
|---|
| 102 | + } else if (irq_cpuidle_enter(NULL, NULL)) { /* hard irqs off now */ |
|---|
| 97 | 103 | trace_cpu_idle(1, smp_processor_id()); |
|---|
| 98 | 104 | stop_critical_timings(); |
|---|
| 99 | 105 | |
|---|
| .. | .. |
|---|
| 127 | 133 | |
|---|
| 128 | 134 | start_critical_timings(); |
|---|
| 129 | 135 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); |
|---|
| 136 | + } else { |
|---|
| 137 | + local_irq_enable_full(); |
|---|
| 130 | 138 | } |
|---|
| 131 | 139 | } |
|---|
| 132 | 140 | |
|---|
| .. | .. |
|---|
| 249 | 257 | __current_set_polling(); |
|---|
| 250 | 258 | |
|---|
| 251 | 259 | /* |
|---|
| 260 | + * Catch mishandling of the CPU's interrupt disable flag when |
|---|
| 261 | + * pipelining IRQs. |
|---|
| 262 | + */ |
|---|
| 263 | + if (WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled())) |
|---|
| 264 | + hard_local_irq_enable(); |
|---|
| 265 | + |
|---|
| 266 | + /* |
|---|
| 252 | 267 | * It is up to the idle functions to reenable local interrupts |
|---|
| 253 | 268 | */ |
|---|
| 254 | 269 | if (WARN_ON_ONCE(irqs_disabled())) |
|---|
| .. | .. |
|---|
| 300 | 315 | cpu_idle_poll(); |
|---|
| 301 | 316 | } else { |
|---|
| 302 | 317 | cpuidle_idle_call(); |
|---|
| 318 | + WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()); |
|---|
| 303 | 319 | } |
|---|
| 304 | 320 | arch_cpu_idle_exit(); |
|---|
| 305 | 321 | } |
|---|