From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai

---
 kernel/arch/arm/kernel/smp.c |  121 ++++++++++++++++++++++++++++++++++++----
 1 files changed, 108 insertions(+), 13 deletions(-)

diff --git a/kernel/arch/arm/kernel/smp.c b/kernel/arch/arm/kernel/smp.c
index 123432b..bdb4f7e 100644
--- a/kernel/arch/arm/kernel/smp.c
+++ b/kernel/arch/arm/kernel/smp.c
@@ -84,7 +84,7 @@
 	MAX_IPI
 };
 
-static int ipi_irq_base __read_mostly;
+int ipi_irq_base __read_mostly;
 static int nr_ipi __read_mostly = NR_IPI;
 static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
 
@@ -329,7 +329,7 @@
 
 	idle_task_exit();
 
-	local_irq_disable();
+	local_irq_disable_full();
 
 	/*
 	 * Flush the data out of the L1 cache for this CPU.  This must be
@@ -421,6 +421,13 @@
 	local_flush_tlb_all();
 
 	/*
+	 * irq_pipeline: debug_smp_processor_id() accesses percpu
+	 * data.
+	 */
+	if (irqs_pipelined())
+		set_my_cpu_offset(per_cpu_offset(raw_smp_processor_id()));
+
+	/*
 	 * All kernel threads share the same mm context; grab a
 	 * reference and switch to it.
 	 */
@@ -463,7 +470,7 @@
 
 	complete(&cpu_running);
 
-	local_irq_enable();
+	local_irq_enable_full();
 	local_fiq_enable();
 	local_abt_enable();
 
@@ -539,6 +546,8 @@
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
 
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu);
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@@ -553,7 +562,7 @@
 		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
 
 		for_each_online_cpu(cpu)
-			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+			seq_printf(p, "%10u ", get_ipi_count(irq, cpu));
 
 		seq_printf(p, " %s\n", ipi_types[i]);
 	}
@@ -606,7 +615,7 @@
 	set_cpu_online(cpu, false);
 
 	local_fiq_disable();
-	local_irq_disable();
+	local_irq_disable_full();
 
 	while (1) {
 		cpu_relax();
@@ -695,12 +704,85 @@
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
+	/*
+	 * We don't support legacy IPI delivery when pipelining
+	 * interrupts.
+	 */
+	WARN_ON_ONCE(irqs_pipelined());
+
 	irq_enter();
 	do_handle_IPI(ipinr);
 	irq_exit();
 
 	set_irq_regs(old_regs);
 }
+
+static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	trace_ipi_raise(target, ipi_types[ipinr]);
+	__ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static DEFINE_PER_CPU(unsigned long, ipi_messages);
+
+static DEFINE_PER_CPU(unsigned int [MAX_IPI], ipi_counts);
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+	unsigned long *pmsg;
+	unsigned int ipinr;
+
+	/*
+	 * Decode in-band IPIs (0..MAX_IPI - 1) multiplexed over
+	 * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own
+	 * individual handler.
+	 */
+	pmsg = raw_cpu_ptr(&ipi_messages);
+	while (*pmsg) {
+		ipinr = ffs(*pmsg) - 1;
+		clear_bit(ipinr, pmsg);
+		__this_cpu_inc(ipi_counts[ipinr]);
+		do_handle_IPI(ipinr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	unsigned int cpu;
+
+	/* regular in-band IPI (multiplexed over SGI0). */
+	for_each_cpu(cpu, target)
+		set_bit(ipinr, &per_cpu(ipi_messages, cpu));
+
+	wmb();
+	__smp_cross_call(target, 0);
+}
+
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
+{
+	return per_cpu(ipi_counts[irq - ipi_irq_base], cpu);
+}
+
+void irq_send_oob_ipi(unsigned int irq,
+		const struct cpumask *cpumask)
+{
+	unsigned int sgi = irq - ipi_irq_base;
+
+	if (WARN_ON(irq_pipeline_debug() &&
+		    (sgi < OOB_IPI_OFFSET ||
+		     sgi >= OOB_IPI_OFFSET + OOB_NR_IPI)))
+		return;
+
+	/* Out-of-band IPI (SGI1-2). */
+	__smp_cross_call(cpumask, sgi);
+}
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
+
+#else
 
 static irqreturn_t ipi_handler(int irq, void *data)
 {
@@ -710,9 +792,15 @@
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-	trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
-	__ipi_send_mask(ipi_desc[ipinr], target);
+	__smp_cross_call(target, ipinr);
 }
+
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
+{
+	return kstat_irqs_cpu(irq, cpu);
+}
+
+#endif /* CONFIG_IRQ_PIPELINE */
 
 static void ipi_setup(int cpu)
 {
@@ -727,18 +815,25 @@
 
 void __init set_smp_ipi_range(int ipi_base, int n)
 {
-	int i;
+	int i, inband_nr_ipi;
 
 	WARN_ON(n < MAX_IPI);
 	nr_ipi = min(n, MAX_IPI);
+	/*
+	 * irq_pipeline: the in-band stage traps SGI0 only,
+	 * over which IPI messages are mutiplexed. Other SGIs
+	 * are available for exchanging out-of-band IPIs.
+	 */
+	inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi;
 
 	for (i = 0; i < nr_ipi; i++) {
-		int err;
+		if (i < inband_nr_ipi) {
+			int err;
 
-		err = request_percpu_irq(ipi_base + i, ipi_handler,
-					 "IPI", &irq_stat);
-		WARN_ON(err);
-
+			err = request_percpu_irq(ipi_base + i, ipi_handler,
+						"IPI", &irq_stat);
+			WARN_ON(err);
+		}
 		ipi_desc[i] = irq_to_desc(ipi_base + i);
 		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
 

--
Gitblit v1.6.2