| .. | .. |
|---|
| 9 | 9 | #include <linux/sched.h> |
|---|
| 10 | 10 | #include <linux/sched/debug.h> |
|---|
| 11 | 11 | #include <linux/smp.h> |
|---|
| 12 | +#include <linux/irqstage.h> |
|---|
| 12 | 13 | #include <linux/atomic.h> |
|---|
| 13 | 14 | #include <linux/kexec.h> |
|---|
| 14 | 15 | #include <linux/utsname.h> |
|---|
| 16 | +#include <linux/hardirq.h> |
|---|
| 15 | 17 | |
|---|
| 16 | 18 | static char dump_stack_arch_desc_str[128]; |
|---|
| 17 | 19 | |
|---|
| .. | .. |
|---|
| 56 | 58 | printk("%sHardware name: %s\n", |
|---|
| 57 | 59 | log_lvl, dump_stack_arch_desc_str); |
|---|
| 58 | 60 | |
|---|
| 61 | +#ifdef CONFIG_IRQ_PIPELINE |
|---|
| 62 | + printk("%sIRQ stage: %s\n", |
|---|
| 63 | + log_lvl, current_irq_stage->name); |
|---|
| 64 | +#endif |
|---|
| 65 | + |
|---|
| 59 | 66 | print_worker_info(log_lvl, current); |
|---|
| 60 | 67 | } |
|---|
| 61 | 68 | |
|---|
| .. | .. |
|---|
| 85 | 92 | #ifdef CONFIG_SMP |
|---|
| 86 | 93 | static atomic_t dump_lock = ATOMIC_INIT(-1); |
|---|
| 87 | 94 | |
|---|
| 95 | +static unsigned long disable_local_irqs(void) |
|---|
| 96 | +{ |
|---|
| 97 | + unsigned long flags = 0; /* only to trick the UMR detection */ |
|---|
| 98 | + |
|---|
| 99 | + /* |
|---|
| 100 | + * We neither need nor want to disable in-band IRQs over the |
|---|
| 101 | + * oob stage, where CPU migration can't happen. Conversely, we |
|---|
| 102 | + * neither need nor want to disable hard IRQs from the oob |
|---|
| 103 | + * stage, so that latency won't skyrocket as a result of |
|---|
| 104 | + * dumping the stack backtrace. |
|---|
| 105 | + */ |
|---|
| 106 | + if (running_inband() && !on_pipeline_entry()) |
|---|
| 107 | + local_irq_save(flags); |
|---|
| 108 | + |
|---|
| 109 | + return flags; |
|---|
| 110 | +} |
|---|
| 111 | + |
|---|
| 112 | +static void restore_local_irqs(unsigned long flags) |
|---|
| 113 | +{ |
|---|
| 114 | + if (running_inband() && !on_pipeline_entry()) |
|---|
| 115 | + local_irq_restore(flags); |
|---|
| 116 | +} |
|---|
| 117 | + |
|---|
| 88 | 118 | asmlinkage __visible void dump_stack_lvl(const char *log_lvl) |
|---|
| 89 | 119 | { |
|---|
| 90 | 120 | unsigned long flags; |
|---|
| .. | .. |
|---|
| 97 | 127 | * against other CPUs |
|---|
| 98 | 128 | */ |
|---|
| 99 | 129 | retry: |
|---|
| 100 | | - local_irq_save(flags); |
|---|
| 130 | + flags = disable_local_irqs(); |
|---|
| 101 | 131 | cpu = smp_processor_id(); |
|---|
| 102 | 132 | old = atomic_cmpxchg(&dump_lock, -1, cpu); |
|---|
| 103 | 133 | if (old == -1) { |
|---|
| .. | .. |
|---|
| 105 | 135 | } else if (old == cpu) { |
|---|
| 106 | 136 | was_locked = 1; |
|---|
| 107 | 137 | } else { |
|---|
| 108 | | - local_irq_restore(flags); |
|---|
| 138 | + restore_local_irqs(flags); |
|---|
| 109 | 139 | /* |
|---|
| 110 | 140 | * Wait for the lock to release before jumping to |
|---|
| 111 | 141 | * atomic_cmpxchg() in order to mitigate the thundering herd |
|---|
| .. | .. |
|---|
| 120 | 150 | if (!was_locked) |
|---|
| 121 | 151 | atomic_set(&dump_lock, -1); |
|---|
| 122 | 152 | |
|---|
| 123 | | - local_irq_restore(flags); |
|---|
| 153 | + restore_local_irqs(flags); |
|---|
| 124 | 154 | } |
|---|
| 125 | 155 | #else |
|---|
| 126 | 156 | asmlinkage __visible void dump_stack_lvl(const char *log_lvl) |
|---|