From 7e970c18f85f99acc678d90128b6e01dce1bf273 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:40:12 +0000
Subject: [PATCH] gmac get mac form eeprom
---
kernel/kernel/irq/chip.c | 269 ++++++++++++++++++++++++++++++++++++++++++-----------
1 files changed, 210 insertions(+), 59 deletions(-)
diff --git a/kernel/kernel/irq/chip.c b/kernel/kernel/irq/chip.c
index 520b9fa..13edfa8 100644
--- a/kernel/kernel/irq/chip.c
+++ b/kernel/kernel/irq/chip.c
@@ -15,6 +15,7 @@
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
#include <linux/wakeup_reason.h>
+#include <linux/irq_pipeline.h>
#include <trace/events/irq.h>
@@ -49,6 +50,10 @@
if (!chip)
chip = &no_irq_chip;
+ else
+ WARN_ONCE(irqs_pipelined() &&
+ (chip->flags & IRQCHIP_PIPELINE_SAFE) == 0,
+ "irqchip %s is not pipeline-safe!", chip->name);
desc->irq_data.chip = chip;
irq_put_desc_unlock(desc, flags);
@@ -155,14 +160,6 @@
return 0;
}
EXPORT_SYMBOL(irq_set_chip_data);
-
-struct irq_data *irq_get_irq_data(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- return desc ? &desc->irq_data : NULL;
-}
-EXPORT_SYMBOL_GPL(irq_get_irq_data);
static void irq_state_clr_disabled(struct irq_desc *desc)
{
@@ -386,7 +383,8 @@
*/
void irq_disable(struct irq_desc *desc)
{
- __irq_disable(desc, irq_settings_disable_unlazy(desc));
+ __irq_disable(desc,
+ irq_settings_disable_unlazy(desc) || irqs_pipelined());
}
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
@@ -532,8 +530,22 @@
* If the interrupt is an armed wakeup source, mark it pending
* and suspended, disable it and notify the pm core about the
* event.
+ *
+ * When pipelining, the logic is as follows:
+ *
+ * - from a pipeline entry context, we might have preempted
+ * the oob stage, or irqs might be [virtually] off, so we may
+ * not run the in-band PM code. Just make sure any wakeup
+ * interrupt is detected later on when the flow handler
+ * re-runs from the in-band stage.
+ *
+ * - from the in-band context, run the PM wakeup check.
*/
- if (irq_pm_check_wakeup(desc))
+ if (irqs_pipelined()) {
+ WARN_ON_ONCE(irq_pipeline_debug() && !in_pipeline());
+ if (irqd_is_wakeup_armed(&desc->irq_data))
+ return true;
+ } else if (irq_pm_check_wakeup(desc))
return false;
/*
@@ -557,8 +569,13 @@
{
raw_spin_lock(&desc->lock);
- if (!irq_may_run(desc))
+ if (start_irq_flow() && !irq_may_run(desc))
goto out_unlock;
+
+ if (on_pipeline_entry()) {
+ handle_oob_irq(desc);
+ goto out_unlock;
+ }
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -594,8 +611,13 @@
raw_spin_lock(&desc->lock);
- if (!irq_may_run(desc))
+ if (start_irq_flow() && !irq_may_run(desc))
goto out_unlock;
+
+ if (on_pipeline_entry()) {
+ handle_oob_irq(desc);
+ goto out_unlock;
+ }
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -617,6 +639,20 @@
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_untracked_irq);
+
+static inline void cond_eoi_irq(struct irq_desc *desc)
+{
+ struct irq_chip *chip = desc->irq_data.chip;
+
+ if (!(chip->flags & IRQCHIP_EOI_THREADED))
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static inline void mask_cond_eoi_irq(struct irq_desc *desc)
+{
+ mask_irq(desc);
+ cond_eoi_irq(desc);
+}
/*
* Called unconditionally from handle_level_irq() and only for oneshot
@@ -648,10 +684,19 @@
void handle_level_irq(struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
- mask_ack_irq(desc);
- if (!irq_may_run(desc))
+ if (start_irq_flow()) {
+ mask_ack_irq(desc);
+
+ if (!irq_may_run(desc))
+ goto out_unlock;
+ }
+
+ if (on_pipeline_entry()) {
+ if (handle_oob_irq(desc))
+ goto out_unmask;
goto out_unlock;
+ }
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -666,7 +711,7 @@
kstat_incr_irqs_this_cpu(desc);
handle_irq_event(desc);
-
+out_unmask:
cond_unmask_irq(desc);
out_unlock:
@@ -677,7 +722,10 @@
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
if (!(desc->istate & IRQS_ONESHOT)) {
- chip->irq_eoi(&desc->irq_data);
+ if (!irqs_pipelined())
+ chip->irq_eoi(&desc->irq_data);
+ else if (!irqd_irq_disabled(&desc->irq_data))
+ unmask_irq(desc);
return;
}
/*
@@ -688,9 +736,11 @@
*/
if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
- chip->irq_eoi(&desc->irq_data);
+ if (!irqs_pipelined())
+ chip->irq_eoi(&desc->irq_data);
unmask_irq(desc);
- } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
+ } else if (!irqs_pipelined() &&
+ !(chip->flags & IRQCHIP_EOI_THREADED)) {
chip->irq_eoi(&desc->irq_data);
}
}
@@ -710,8 +760,16 @@
raw_spin_lock(&desc->lock);
- if (!irq_may_run(desc))
+ if (start_irq_flow() && !irq_may_run(desc))
goto out;
+
+ if (on_pipeline_entry()) {
+ if (handle_oob_irq(desc))
+ chip->irq_eoi(&desc->irq_data);
+ else
+ mask_cond_eoi_irq(desc);
+ goto out_unlock;
+ }
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -726,13 +784,13 @@
}
kstat_incr_irqs_this_cpu(desc);
- if (desc->istate & IRQS_ONESHOT)
+ if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT))
mask_irq(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
-
+out_unlock:
raw_spin_unlock(&desc->lock);
return;
out:
@@ -792,30 +850,42 @@
*/
void handle_edge_irq(struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
raw_spin_lock(&desc->lock);
- desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ if (start_irq_flow()) {
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- if (!irq_may_run(desc)) {
- desc->istate |= IRQS_PENDING;
- mask_ack_irq(desc);
- goto out_unlock;
+ if (!irq_may_run(desc)) {
+ desc->istate |= IRQS_PENDING;
+ mask_ack_irq(desc);
+ goto out_unlock;
+ }
+
+ /*
+ * If its disabled or no action available then mask it
+ * and get out of here.
+ */
+ if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
+ desc->istate |= IRQS_PENDING;
+ mask_ack_irq(desc);
+ goto out_unlock;
+ }
}
- /*
- * If its disabled or no action available then mask it and get
- * out of here.
- */
- if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
- desc->istate |= IRQS_PENDING;
- mask_ack_irq(desc);
+ if (on_pipeline_entry()) {
+ chip->irq_ack(&desc->irq_data);
+ desc->istate |= IRQS_EDGE;
+ handle_oob_irq(desc);
goto out_unlock;
}
kstat_incr_irqs_this_cpu(desc);
/* Start handling the irq */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ if (!irqs_pipelined())
+ chip->irq_ack(&desc->irq_data);
do {
if (unlikely(!desc->action)) {
@@ -840,6 +910,8 @@
!irqd_irq_disabled(&desc->irq_data));
out_unlock:
+ if (on_pipeline_entry())
+ desc->istate &= ~IRQS_EDGE;
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL(handle_edge_irq);
@@ -858,11 +930,20 @@
raw_spin_lock(&desc->lock);
- desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+ if (start_irq_flow()) {
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- if (!irq_may_run(desc)) {
- desc->istate |= IRQS_PENDING;
- goto out_eoi;
+ if (!irq_may_run(desc)) {
+ desc->istate |= IRQS_PENDING;
+ goto out_eoi;
+ }
+ }
+
+ if (on_pipeline_entry()) {
+ desc->istate |= IRQS_EDGE;
+ if (handle_oob_irq(desc))
+ goto out_eoi;
+ goto out;
}
/*
@@ -887,6 +968,9 @@
out_eoi:
chip->irq_eoi(&desc->irq_data);
+out:
+ if (on_pipeline_entry())
+ desc->istate &= ~IRQS_EDGE;
raw_spin_unlock(&desc->lock);
}
#endif
@@ -900,6 +984,18 @@
void handle_percpu_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ bool handled;
+
+ if (on_pipeline_entry()) {
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+ handled = handle_oob_irq(desc);
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+ if (!handled && chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+ return;
+ }
/*
* PER CPU interrupts are not serialized. Do not touch
@@ -907,13 +1003,17 @@
*/
__kstat_incr_irqs_this_cpu(desc);
- if (chip->irq_ack)
- chip->irq_ack(&desc->irq_data);
-
- handle_irq_event_percpu(desc);
-
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
+ if (irqs_pipelined()) {
+ handle_irq_event_percpu(desc);
+ if (chip->irq_unmask)
+ chip->irq_unmask(&desc->irq_data);
+ } else {
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+ handle_irq_event_percpu(desc);
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+ }
}
/**
@@ -933,6 +1033,18 @@
struct irqaction *action = desc->action;
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;
+ bool handled;
+
+ if (on_pipeline_entry()) {
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+ handled = handle_oob_irq(desc);
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+ if (!handled && chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+ return;
+ }
/*
* PER CPU interrupts are not serialized. Do not touch
@@ -940,7 +1052,7 @@
*/
__kstat_incr_irqs_this_cpu(desc);
- if (chip->irq_ack)
+ if (!irqs_pipelined() && chip->irq_ack)
chip->irq_ack(&desc->irq_data);
if (likely(action)) {
@@ -958,8 +1070,11 @@
enabled ? " and unmasked" : "", irq, cpu);
}
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
+ if (irqs_pipelined()) {
+ if (chip->irq_unmask)
+ chip->irq_unmask(&desc->irq_data);
+ } else if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
}
/**
@@ -979,10 +1094,21 @@
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;
- __kstat_incr_irqs_this_cpu(desc);
-
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+
+ if (on_pipeline_entry()) {
+ handle_oob_irq(desc);
+ return;
+ }
+
+ /* Trap spurious IPIs if pipelined. */
+ if (irqs_pipelined() && !action) {
+ print_irq_desc(irq, desc);
+ return;
+ }
+
+ __kstat_incr_irqs_this_cpu(desc);
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
@@ -1076,6 +1202,7 @@
desc->handle_irq = handle;
}
+ irq_settings_set_chained(desc);
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
@@ -1251,8 +1378,17 @@
raw_spin_lock(&desc->lock);
- if (!irq_may_run(desc))
+ if (start_irq_flow() && !irq_may_run(desc))
goto out;
+
+ if (on_pipeline_entry()) {
+ chip->irq_ack(&desc->irq_data);
+ if (handle_oob_irq(desc))
+ chip->irq_eoi(&desc->irq_data);
+ else
+ mask_cond_eoi_irq(desc);
+ goto out_unlock;
+ }
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -1267,11 +1403,13 @@
}
kstat_incr_irqs_this_cpu(desc);
- if (desc->istate & IRQS_ONESHOT)
- mask_irq(desc);
+ if (!irqs_pipelined()) {
+ if (desc->istate & IRQS_ONESHOT)
+ mask_irq(desc);
- /* Start handling the irq */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ /* Start handling the irq */
+ chip->irq_ack(&desc->irq_data);
+ }
handle_irq_event(desc);
@@ -1282,6 +1420,7 @@
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
+out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
@@ -1301,10 +1440,21 @@
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
- mask_ack_irq(desc);
- if (!irq_may_run(desc))
- goto out;
+ if (start_irq_flow()) {
+ mask_ack_irq(desc);
+
+ if (!irq_may_run(desc))
+ goto out;
+ }
+
+ if (on_pipeline_entry()) {
+ if (handle_oob_irq(desc))
+ chip->irq_eoi(&desc->irq_data);
+ else
+ cond_eoi_irq(desc);
+ goto out_unlock;
+ }
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -1319,7 +1469,7 @@
}
kstat_incr_irqs_this_cpu(desc);
- if (desc->istate & IRQS_ONESHOT)
+ if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT))
mask_irq(desc);
handle_irq_event(desc);
@@ -1331,6 +1481,7 @@
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
+out_unlock:
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
--
Gitblit v1.6.2