From 7e970c18f85f99acc678d90128b6e01dce1bf273 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:40:12 +0000
Subject: [PATCH] gmac get mac form eeprom
---
kernel/kernel/sched/idle.c | 22 +++++++++++++++++++---
1 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/kernel/kernel/sched/idle.c b/kernel/kernel/sched/idle.c
index 6dc7d9a..cc2710e 100644
--- a/kernel/kernel/sched/idle.c
+++ b/kernel/kernel/sched/idle.c
@@ -80,6 +80,7 @@
void __weak arch_cpu_idle(void)
{
cpu_idle_force_poll = 1;
+ hard_local_irq_enable();
raw_local_irq_enable();
}
@@ -87,13 +88,18 @@
* default_idle_call - Default CPU idle routine.
*
* To use when the cpuidle framework cannot be used.
+ *
+ * When interrupts are pipelined, this call is entered with hard irqs
+ * on and the in-band stage is stalled. Returns with hard irqs on,
+ * in-band stage stalled. irq_cpuidle_enter() then turns off hard irqs
+ * before synchronizing irqs, making sure we have no event lingering
+ * in the interrupt log as we go for a nap.
*/
void __cpuidle default_idle_call(void)
{
if (current_clr_polling_and_test()) {
- local_irq_enable();
- } else {
-
+ local_irq_enable_full();
+ } else if (irq_cpuidle_enter(NULL, NULL)) { /* hard irqs off now */
trace_cpu_idle(1, smp_processor_id());
stop_critical_timings();
@@ -127,6 +133,8 @@
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+ } else {
+ local_irq_enable_full();
}
}
@@ -249,6 +257,13 @@
__current_set_polling();
/*
+ * Catch mishandling of the CPU's interrupt disable flag when
+ * pipelining IRQs.
+ */
+ if (WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()))
+ hard_local_irq_enable();
+
+ /*
* It is up to the idle functions to reenable local interrupts
*/
if (WARN_ON_ONCE(irqs_disabled()))
@@ -300,6 +315,7 @@
cpu_idle_poll();
} else {
cpuidle_idle_call();
+ WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
}
arch_cpu_idle_exit();
}
--
Gitblit v1.6.2