From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai
---
kernel/lib/dump_stack.c | 36 +++++++++++++++++++++++++++++++++---
1 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/kernel/lib/dump_stack.c b/kernel/lib/dump_stack.c
index b9acd9c..aed74e3 100644
--- a/kernel/lib/dump_stack.c
+++ b/kernel/lib/dump_stack.c
@@ -9,9 +9,11 @@
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/smp.h>
+#include <linux/irqstage.h>
#include <linux/atomic.h>
#include <linux/kexec.h>
#include <linux/utsname.h>
+#include <linux/hardirq.h>
static char dump_stack_arch_desc_str[128];
@@ -56,6 +58,11 @@
printk("%sHardware name: %s\n",
log_lvl, dump_stack_arch_desc_str);
+#ifdef CONFIG_IRQ_PIPELINE
+ printk("%sIRQ stage: %s\n",
+ log_lvl, current_irq_stage->name);
+#endif
+
print_worker_info(log_lvl, current);
}
@@ -85,6 +92,29 @@
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);
+static unsigned long disable_local_irqs(void)
+{
+ unsigned long flags = 0; /* only to trick the UMR detection */
+
+ /*
+ * We neither need nor want to disable in-band IRQs over the
+ * oob stage, where CPU migration can't happen. Conversely, we
+ * neither need nor want to disable hard IRQs from the oob
+ * stage, so that latency won't skyrocket as a result of
+ * dumping the stack backtrace.
+ */
+ if (running_inband() && !on_pipeline_entry())
+ local_irq_save(flags);
+
+ return flags;
+}
+
+static void restore_local_irqs(unsigned long flags)
+{
+ if (running_inband() && !on_pipeline_entry())
+ local_irq_restore(flags);
+}
+
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
unsigned long flags;
@@ -97,7 +127,7 @@
* against other CPUs
*/
retry:
- local_irq_save(flags);
+ flags = disable_local_irqs();
cpu = smp_processor_id();
old = atomic_cmpxchg(&dump_lock, -1, cpu);
if (old == -1) {
@@ -105,7 +135,7 @@
} else if (old == cpu) {
was_locked = 1;
} else {
- local_irq_restore(flags);
+ restore_local_irqs(flags);
/*
* Wait for the lock to release before jumping to
* atomic_cmpxchg() in order to mitigate the thundering herd
@@ -120,7 +150,7 @@
if (!was_locked)
atomic_set(&dump_lock, -1);
- local_irq_restore(flags);
+ restore_local_irqs(flags);
}
#else
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
--
Gitblit v1.6.2