.. | .. |
---|
27 | 27 | #include <linux/sysrq.h> |
---|
28 | 28 | #include <linux/init.h> |
---|
29 | 29 | #include <linux/nmi.h> |
---|
| 30 | +#include <linux/irq_pipeline.h> |
---|
30 | 31 | #include <linux/console.h> |
---|
31 | 32 | #include <linux/bug.h> |
---|
32 | 33 | #include <linux/ratelimit.h> |
---|
.. | .. |
---|
49 | 50 | IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0; |
---|
50 | 51 | static int pause_on_oops; |
---|
51 | 52 | static int pause_on_oops_flag; |
---|
52 | | -static DEFINE_SPINLOCK(pause_on_oops_lock); |
---|
| 53 | +static DEFINE_HARD_SPINLOCK(pause_on_oops_lock); |
---|
53 | 54 | bool crash_kexec_post_notifiers; |
---|
54 | 55 | int panic_on_warn __read_mostly; |
---|
55 | 56 | unsigned long panic_on_taint; |
---|
.. | .. |
---|
189 | 190 | * there is nothing to prevent an interrupt handler (that runs |
---|
190 | 191 | * after setting panic_cpu) from invoking panic() again. |
---|
191 | 192 | */ |
---|
192 | | - local_irq_disable(); |
---|
| 193 | + hard_local_irq_disable(); |
---|
193 | 194 | preempt_disable_notrace(); |
---|
| 195 | + irq_pipeline_oops(); |
---|
194 | 196 | |
---|
195 | 197 | /* |
---|
196 | 198 | * It's possible to come here directly from a panic-assertion and |
---|
.. | .. |
---|
267 | 269 | |
---|
268 | 270 | /* |
---|
269 | 271 | * Run any panic handlers, including those that might need to |
---|
270 | | - * add information to the kmsg dump output. |
---|
| 272 | + * add information to the kmsg dump output. Skip panic |
---|
| 273 | + * handlers if running over the oob stage, as they would most |
---|
| 274 | + * likely break. |
---|
271 | 275 | */ |
---|
272 | | - atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
---|
| 276 | + if (running_inband()) |
---|
| 277 | + atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
---|
273 | 278 | |
---|
274 | 279 | /* Call flush even twice. It tries harder with a single online CPU */ |
---|
275 | 280 | printk_safe_flush_on_panic(); |
---|
.. | .. |
---|
474 | 479 | if (!pause_on_oops) |
---|
475 | 480 | return; |
---|
476 | 481 | |
---|
477 | | - spin_lock_irqsave(&pause_on_oops_lock, flags); |
---|
| 482 | + raw_spin_lock_irqsave(&pause_on_oops_lock, flags); |
---|
478 | 483 | if (pause_on_oops_flag == 0) { |
---|
479 | 484 | /* This CPU may now print the oops message */ |
---|
480 | 485 | pause_on_oops_flag = 1; |
---|
.. | .. |
---|
484 | 489 | /* This CPU gets to do the counting */ |
---|
485 | 490 | spin_counter = pause_on_oops; |
---|
486 | 491 | do { |
---|
487 | | - spin_unlock(&pause_on_oops_lock); |
---|
| 492 | + raw_spin_unlock(&pause_on_oops_lock); |
---|
488 | 493 | spin_msec(MSEC_PER_SEC); |
---|
489 | | - spin_lock(&pause_on_oops_lock); |
---|
| 494 | + raw_spin_lock(&pause_on_oops_lock); |
---|
490 | 495 | } while (--spin_counter); |
---|
491 | 496 | pause_on_oops_flag = 0; |
---|
492 | 497 | } else { |
---|
493 | 498 | /* This CPU waits for a different one */ |
---|
494 | 499 | while (spin_counter) { |
---|
495 | | - spin_unlock(&pause_on_oops_lock); |
---|
| 500 | + raw_spin_unlock(&pause_on_oops_lock); |
---|
496 | 501 | spin_msec(1); |
---|
497 | | - spin_lock(&pause_on_oops_lock); |
---|
| 502 | + raw_spin_lock(&pause_on_oops_lock); |
---|
498 | 503 | } |
---|
499 | 504 | } |
---|
500 | 505 | } |
---|
501 | | - spin_unlock_irqrestore(&pause_on_oops_lock, flags); |
---|
| 506 | + raw_spin_unlock_irqrestore(&pause_on_oops_lock, flags); |
---|
502 | 507 | } |
---|
503 | 508 | |
---|
504 | 509 | /* |
---|
.. | .. |
---|
528 | 533 | { |
---|
529 | 534 | tracing_off(); |
---|
530 | 535 | /* can't trust the integrity of the kernel anymore: */ |
---|
| 536 | + irq_pipeline_oops(); |
---|
531 | 537 | debug_locks_off(); |
---|
532 | 538 | do_oops_enter_exit(); |
---|
533 | 539 | |
---|