From a46a1ad097419aeea7350987dd95230f50d90392 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 15 Nov 2024 08:53:41 +0000
Subject: [PATCH] 固定GMAC1 网卡名为 eth3
---
kernel/arch/x86/kernel/apic/apic.c | 70 +++++++++++++++++++++++++++++-----
1 files changed, 59 insertions(+), 11 deletions(-)
diff --git a/kernel/arch/x86/kernel/apic/apic.c b/kernel/arch/x86/kernel/apic/apic.c
index 1c96f24..8984c79 100644
--- a/kernel/arch/x86/kernel/apic/apic.c
+++ b/kernel/arch/x86/kernel/apic/apic.c
@@ -31,6 +31,7 @@
#include <linux/i8253.h>
#include <linux/dmar.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/cpu.h>
#include <linux/dmi.h>
#include <linux/smp.h>
@@ -272,10 +273,10 @@
{
unsigned long flags;
- local_irq_save(flags);
+ flags = hard_local_irq_save();
apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
apic_write(APIC_ICR, low);
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
}
u64 native_apic_icr_read(void)
@@ -331,6 +332,9 @@
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
{
unsigned int lvtt_value, tmp_value;
+ unsigned long flags;
+
+ flags = hard_cond_local_irq_save();
lvtt_value = LOCAL_TIMER_VECTOR;
if (!oneshot)
@@ -353,6 +357,8 @@
* According to Intel, MFENCE can do the serialization here.
*/
asm volatile("mfence" : : : "memory");
+ hard_cond_local_irq_restore(flags);
+ printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
return;
}
@@ -366,6 +372,8 @@
if (!oneshot)
apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
+
+ hard_cond_local_irq_restore(flags);
}
/*
@@ -471,28 +479,34 @@
static int lapic_next_deadline(unsigned long delta,
struct clock_event_device *evt)
{
+ unsigned long flags;
u64 tsc;
/* This MSR is special and need a special fence: */
weak_wrmsr_fence();
+ flags = hard_local_irq_save();
tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
+ hard_local_irq_restore(flags);
return 0;
}
static int lapic_timer_shutdown(struct clock_event_device *evt)
{
+ unsigned long flags;
unsigned int v;
/* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return 0;
+ flags = hard_local_irq_save();
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
apic_write(APIC_TMICT, 0);
+ hard_local_irq_restore(flags);
return 0;
}
@@ -527,6 +541,32 @@
#endif
}
+static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+#define LAPIC_TIMER_IRQ apicm_vector_irq(LOCAL_TIMER_VECTOR)
+
+static irqreturn_t lapic_oob_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
+
+ trace_local_timer_entry(LOCAL_TIMER_VECTOR);
+ clockevents_handle_event(evt);
+ trace_local_timer_exit(LOCAL_TIMER_VECTOR);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction lapic_oob_action = {
+ .handler = lapic_oob_handler,
+ .name = "Out-of-band LAPIC timer interrupt",
+ .flags = IRQF_TIMER | IRQF_PERCPU,
+};
+
+#else
+#define LAPIC_TIMER_IRQ -1
+#endif
/*
* The local apic timer can be used for any function which is CPU local.
@@ -534,8 +574,8 @@
static struct clock_event_device lapic_clockevent = {
.name = "lapic",
.features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
- | CLOCK_EVT_FEAT_DUMMY,
+ CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PIPELINE | CLOCK_EVT_FEAT_DUMMY,
.shift = 32,
.set_state_shutdown = lapic_timer_shutdown,
.set_state_periodic = lapic_timer_set_periodic,
@@ -544,9 +584,8 @@
.set_next_event = lapic_next_event,
.broadcast = lapic_timer_broadcast,
.rating = 100,
- .irq = -1,
+ .irq = LAPIC_TIMER_IRQ,
};
-static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
static const struct x86_cpu_id deadline_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
@@ -1042,6 +1081,9 @@
/* Setup the lapic or request the broadcast */
setup_APIC_timer();
amd_e400_c1e_apic_setup();
+#ifdef CONFIG_IRQ_PIPELINE
+ setup_percpu_irq(LAPIC_TIMER_IRQ, &lapic_oob_action);
+#endif
}
void setup_secondary_APIC_clock(void)
@@ -1092,7 +1134,8 @@
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
-DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR,
+ sysvec_apic_timer_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1513,7 +1556,7 @@
* per set bit.
*/
for_each_set_bit(bit, isr->map, APIC_IR_BITS)
- ack_APIC_irq();
+ __ack_APIC_irq();
return true;
}
@@ -2131,7 +2174,7 @@
*
* Also called from sysvec_spurious_apic_interrupt().
*/
-DEFINE_IDTENTRY_IRQ(spurious_interrupt)
+DEFINE_IDTENTRY_IRQ_PIPELINED(spurious_interrupt)
{
u32 v;
@@ -2157,7 +2200,7 @@
if (v & (1 << (vector & 0x1f))) {
pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
vector, smp_processor_id());
- ack_APIC_irq();
+ __ack_APIC_irq();
} else {
pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
vector, smp_processor_id());
@@ -2166,13 +2209,18 @@
trace_spurious_apic_exit(vector);
}
-DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(SPURIOUS_APIC_VECTOR,
+ sysvec_spurious_apic_interrupt)
{
__spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
}
/*
* This interrupt should never happen with our APIC/SMP architecture
+ *
+ * irq_pipeline: same as spurious_interrupt, would run directly out of
+ * the IDT, no deferral via the interrupt log which means that only
+ * the hardware IRQ state is considered for masking.
*/
DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
{
--
Gitblit v1.6.2