hc
2025-02-14 bbb9540dc49f70f6b703d1c8d1b85fa5f602d86e
kernel/lib/dump_stack.c
....@@ -9,9 +9,11 @@
99 #include <linux/sched.h>
1010 #include <linux/sched/debug.h>
1111 #include <linux/smp.h>
12
+#include <linux/irqstage.h>
1213 #include <linux/atomic.h>
1314 #include <linux/kexec.h>
1415 #include <linux/utsname.h>
16
+#include <linux/hardirq.h>
1517
1618 static char dump_stack_arch_desc_str[128];
1719
....@@ -56,6 +58,11 @@
5658 printk("%sHardware name: %s\n",
5759 log_lvl, dump_stack_arch_desc_str);
5860
61
+#ifdef CONFIG_IRQ_PIPELINE
62
+ printk("%sIRQ stage: %s\n",
63
+ log_lvl, current_irq_stage->name);
64
+#endif
65
+
5966 print_worker_info(log_lvl, current);
6067 }
6168
....@@ -85,6 +92,29 @@
8592 #ifdef CONFIG_SMP
8693 static atomic_t dump_lock = ATOMIC_INIT(-1);
8794
95
+static unsigned long disable_local_irqs(void)
96
+{
97
+ unsigned long flags = 0; /* only to trick the UMR detection */
98
+
99
+ /*
100
+ * We neither need nor want to disable in-band IRQs over the
101
+ * oob stage, where CPU migration can't happen. Conversely, we
102
+ * neither need nor want to disable hard IRQs from the oob
103
+ * stage, so that latency won't skyrocket as a result of
104
+ * dumping the stack backtrace.
105
+ */
106
+ if (running_inband() && !on_pipeline_entry())
107
+ local_irq_save(flags);
108
+
109
+ return flags;
110
+}
111
+
112
+static void restore_local_irqs(unsigned long flags)
113
+{
114
+ if (running_inband() && !on_pipeline_entry())
115
+ local_irq_restore(flags);
116
+}
117
+
88118 asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
89119 {
90120 unsigned long flags;
....@@ -97,7 +127,7 @@
97127 * against other CPUs
98128 */
99129 retry:
100
- local_irq_save(flags);
130
+ flags = disable_local_irqs();
101131 cpu = smp_processor_id();
102132 old = atomic_cmpxchg(&dump_lock, -1, cpu);
103133 if (old == -1) {
....@@ -105,7 +135,7 @@
105135 } else if (old == cpu) {
106136 was_locked = 1;
107137 } else {
108
- local_irq_restore(flags);
138
+ restore_local_irqs(flags);
109139 /*
110140 * Wait for the lock to release before jumping to
111141 * atomic_cmpxchg() in order to mitigate the thundering herd
....@@ -120,7 +150,7 @@
120150 if (!was_locked)
121151 atomic_set(&dump_lock, -1);
122152
123
- local_irq_restore(flags);
153
+ restore_local_irqs(flags);
124154 }
125155 #else
126156 asmlinkage __visible void dump_stack_lvl(const char *log_lvl)