From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai
---
kernel/arch/arm64/kernel/entry-common.c | 152 +++++++++++++++++++++++++++++++++++++++++++++-----
1 files changed, 135 insertions(+), 17 deletions(-)
diff --git a/kernel/arch/arm64/kernel/entry-common.c b/kernel/arch/arm64/kernel/entry-common.c
index 64cfe4a..120cff8 100644
--- a/kernel/arch/arm64/kernel/entry-common.c
+++ b/kernel/arch/arm64/kernel/entry-common.c
@@ -8,6 +8,7 @@
#include <linux/context_tracking.h>
#include <linux/ptrace.h>
#include <linux/thread_info.h>
+#include <linux/irqstage.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
@@ -21,7 +22,7 @@
* This is intended to match the logic in irqentry_enter(), handling the kernel
* mode transitions only.
*/
-static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+static void noinstr __enter_from_kernel_mode(struct pt_regs *regs)
{
regs->exit_rcu = false;
@@ -41,11 +42,50 @@
mte_check_tfsr_entry();
}
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+{
+#ifdef CONFIG_IRQ_PIPELINE
+ /*
+ * CAUTION: we may switch in-band as a result of handling a
+ * trap, so if we are running out-of-band, we must make sure
+ * not to perform the RCU exit since we did not enter it in
+ * the first place.
+ */
+ regs->oob_on_entry = running_oob();
+ if (regs->oob_on_entry) {
+ regs->exit_rcu = false;
+ return;
+ }
+
+ /*
+ * We trapped from kernel space running in-band, we need to
+ * record the virtual interrupt state into the current
+ * register frame (regs->stalled_on_entry) in order to
+ * reinstate it from exit_to_kernel_mode(). Next we stall the
+ * in-band stage in order to mirror the current hardware state
+ * (i.e. hardirqs are off).
+ */
+ regs->stalled_on_entry = test_and_stall_inband_nocheck();
+#endif
+
+ __enter_from_kernel_mode(regs);
+
+#ifdef CONFIG_IRQ_PIPELINE
+ /*
+ * Our caller is going to inherit the hardware interrupt state
+ * from the trapped context once we have returned: if running
+ * in-band, align the stall bit on the upcoming state.
+ */
+ if (running_inband() && interrupts_enabled(regs))
+ unstall_inband_nocheck();
+#endif
+}
+
/*
* This is intended to match the logic in irqentry_exit(), handling the kernel
* mode transitions only, and with preemption handled elsewhere.
*/
-static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+static void noinstr __exit_to_kernel_mode(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
@@ -67,8 +107,35 @@
}
}
+/*
+ * This is intended to match the logic in irqentry_exit(), handling the kernel
+ * mode transitions only, and with preemption handled elsewhere.
+ */
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+{
+ if (running_oob())
+ return;
+
+ __exit_to_kernel_mode(regs);
+
+#ifdef CONFIG_IRQ_PIPELINE
+ /*
+ * Reinstate the virtual interrupt state which was in effect
+ * on entry to the trap.
+ */
+ if (!regs->oob_on_entry) {
+ if (regs->stalled_on_entry)
+ stall_inband_nocheck();
+ else
+ unstall_inband_nocheck();
+ }
+#endif
+ return;
+}
+
void noinstr arm64_enter_nmi(struct pt_regs *regs)
{
+ /* irq_pipeline: running this code oob is ok. */
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
__nmi_enter();
@@ -99,18 +166,57 @@
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ /*
+ * IRQ pipeline: the interrupt entry is special in that we may
+ * run the lockdep and RCU prologue/epilogue only if the IRQ
+ * is going to be dispatched to its handler on behalf of the
+ * current context, i.e. only if running in-band and
+ * unstalled. If so, we also have to reconcile the hardware
+ * and virtual interrupt states temporarily in order to run
+ * such prologue.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
arm64_enter_nmi(regs);
- else
- enter_from_kernel_mode(regs);
+ } else {
+#ifdef CONFIG_IRQ_PIPELINE
+ if (running_inband()) {
+ regs->stalled_on_entry = test_inband_stall();
+ if (!regs->stalled_on_entry) {
+ stall_inband_nocheck();
+ __enter_from_kernel_mode(regs);
+ unstall_inband_nocheck();
+ }
+ }
+#else
+ __enter_from_kernel_mode(regs);
+#endif
+ }
}
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
arm64_exit_nmi(regs);
- else
- exit_to_kernel_mode(regs);
+ } else {
+#ifdef CONFIG_IRQ_PIPELINE
+ /*
+ * See enter_el1_irq_or_nmi() for details. UGLY: we
+ * also have to tell the tracer that irqs are off,
+ * since sync_current_irq_stage() did the opposite on
+ * exit. Hopefully, at some point arm64 will convert
+ * to the generic entry code which exhibits a less
+ * convoluted logic.
+ */
+ if (running_inband() && !regs->stalled_on_entry) {
+ stall_inband_nocheck();
+ trace_hardirqs_off();
+ __exit_to_kernel_mode(regs);
+ unstall_inband_nocheck();
+ }
+#else
+ __exit_to_kernel_mode(regs);
+#endif
+ }
}
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
@@ -231,20 +337,32 @@
asmlinkage void noinstr enter_from_user_mode(void)
{
- lockdep_hardirqs_off(CALLER_ADDR0);
- CT_WARN_ON(ct_state() != CONTEXT_USER);
- user_exit_irqoff();
- trace_hardirqs_off_finish();
+ if (running_inband()) {
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ stall_inband_nocheck();
+ user_exit_irqoff();
+ unstall_inband_nocheck();
+ trace_hardirqs_off_finish();
+ }
}
asmlinkage void noinstr exit_to_user_mode(void)
{
- mte_check_tfsr_exit();
+ if (running_inband()) {
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ user_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ unstall_inband_nocheck();
+ }
+}
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- user_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
+asmlinkage void noinstr enter_el0_irq(void)
+{
+ if (running_inband() && !test_inband_stall())
+ enter_from_user_mode();
}
static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
--
Gitblit v1.6.2