.. | .. |
---|
55 | 55 | #include <linux/mm.h> |
---|
56 | 56 | #include <linux/vmacache.h> |
---|
57 | 57 | #include <linux/rcupdate.h> |
---|
| 58 | +#include <linux/irq.h> |
---|
| 59 | +#include <linux/security.h> |
---|
58 | 60 | |
---|
59 | 61 | #include <asm/cacheflush.h> |
---|
60 | 62 | #include <asm/byteorder.h> |
---|
.. | .. |
---|
66 | 68 | |
---|
67 | 69 | struct debuggerinfo_struct kgdb_info[NR_CPUS]; |
---|
68 | 70 | |
---|
69 | | -/** |
---|
70 | | - * kgdb_connected - Is a host GDB connected to us? |
---|
71 | | - */ |
---|
| 71 | +/* kgdb_connected - Is a host GDB connected to us? */ |
---|
72 | 72 | int kgdb_connected; |
---|
73 | 73 | EXPORT_SYMBOL_GPL(kgdb_connected); |
---|
74 | 74 | |
---|
.. | .. |
---|
81 | 81 | struct kgdb_io *dbg_io_ops; |
---|
82 | 82 | static DEFINE_SPINLOCK(kgdb_registration_lock); |
---|
83 | 83 | |
---|
84 | | -/* Action for the reboot notifiter, a global allow kdb to change it */ |
---|
| 84 | +/* Action for the reboot notifier, a global allow kdb to change it */ |
---|
85 | 85 | static int kgdbreboot; |
---|
86 | 86 | /* kgdb console driver is loaded */ |
---|
87 | 87 | static int kgdb_con_registered; |
---|
.. | .. |
---|
156 | 156 | |
---|
157 | 157 | /* |
---|
158 | 158 | * Weak aliases for breakpoint management, |
---|
159 | | - * can be overriden by architectures when needed: |
---|
| 159 | + * can be overridden by architectures when needed: |
---|
160 | 160 | */ |
---|
161 | 161 | int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) |
---|
162 | 162 | { |
---|
163 | 163 | int err; |
---|
164 | 164 | |
---|
165 | | - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, |
---|
| 165 | + err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, |
---|
166 | 166 | BREAK_INSTR_SIZE); |
---|
167 | 167 | if (err) |
---|
168 | 168 | return err; |
---|
169 | | - err = probe_kernel_write((char *)bpt->bpt_addr, |
---|
| 169 | + err = copy_to_kernel_nofault((char *)bpt->bpt_addr, |
---|
170 | 170 | arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); |
---|
171 | 171 | return err; |
---|
172 | 172 | } |
---|
| 173 | +NOKPROBE_SYMBOL(kgdb_arch_set_breakpoint); |
---|
173 | 174 | |
---|
174 | 175 | int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) |
---|
175 | 176 | { |
---|
176 | | - return probe_kernel_write((char *)bpt->bpt_addr, |
---|
| 177 | + return copy_to_kernel_nofault((char *)bpt->bpt_addr, |
---|
177 | 178 | (char *)bpt->saved_instr, BREAK_INSTR_SIZE); |
---|
178 | 179 | } |
---|
| 180 | +NOKPROBE_SYMBOL(kgdb_arch_remove_breakpoint); |
---|
179 | 181 | |
---|
180 | 182 | int __weak kgdb_validate_break_address(unsigned long addr) |
---|
181 | 183 | { |
---|
182 | 184 | struct kgdb_bkpt tmp; |
---|
183 | 185 | int err; |
---|
| 186 | + |
---|
| 187 | + if (kgdb_within_blocklist(addr)) |
---|
| 188 | + return -EINVAL; |
---|
| 189 | + |
---|
184 | 190 | /* Validate setting the breakpoint and then removing it. If the |
---|
185 | 191 | * remove fails, the kernel needs to emit a bad message because we |
---|
186 | 192 | * are deep trouble not being able to put things back the way we |
---|
.. | .. |
---|
201 | 207 | { |
---|
202 | 208 | return instruction_pointer(regs); |
---|
203 | 209 | } |
---|
| 210 | +NOKPROBE_SYMBOL(kgdb_arch_pc); |
---|
204 | 211 | |
---|
205 | 212 | int __weak kgdb_arch_init(void) |
---|
206 | 213 | { |
---|
.. | .. |
---|
211 | 218 | { |
---|
212 | 219 | return 0; |
---|
213 | 220 | } |
---|
| 221 | +NOKPROBE_SYMBOL(kgdb_skipexception); |
---|
| 222 | + |
---|
| 223 | +#ifdef CONFIG_SMP |
---|
| 224 | + |
---|
| 225 | +/* |
---|
| 226 | + * Default (weak) implementation for kgdb_roundup_cpus |
---|
| 227 | + */ |
---|
| 228 | + |
---|
| 229 | +static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd); |
---|
| 230 | + |
---|
| 231 | +void __weak kgdb_call_nmi_hook(void *ignored) |
---|
| 232 | +{ |
---|
| 233 | + /* |
---|
| 234 | + * NOTE: get_irq_regs() is supposed to get the registers from |
---|
| 235 | + * before the IPI interrupt happened and so is supposed to |
---|
| 236 | + * show where the processor was. In some situations it's |
---|
| 237 | + * possible we might be called without an IPI, so it might be |
---|
| 238 | + * safer to figure out how to make kgdb_breakpoint() work |
---|
| 239 | + * properly here. |
---|
| 240 | + */ |
---|
| 241 | + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); |
---|
| 242 | +} |
---|
| 243 | +NOKPROBE_SYMBOL(kgdb_call_nmi_hook); |
---|
| 244 | + |
---|
| 245 | +void __weak kgdb_roundup_cpus(void) |
---|
| 246 | +{ |
---|
| 247 | + call_single_data_t *csd; |
---|
| 248 | + int this_cpu = raw_smp_processor_id(); |
---|
| 249 | + int cpu; |
---|
| 250 | + int ret; |
---|
| 251 | + |
---|
| 252 | + for_each_online_cpu(cpu) { |
---|
| 253 | + /* No need to roundup ourselves */ |
---|
| 254 | + if (cpu == this_cpu) |
---|
| 255 | + continue; |
---|
| 256 | + |
---|
| 257 | + csd = &per_cpu(kgdb_roundup_csd, cpu); |
---|
| 258 | + |
---|
| 259 | + /* |
---|
| 260 | + * If it didn't round up last time, don't try again |
---|
| 261 | + * since smp_call_function_single_async() will block. |
---|
| 262 | + * |
---|
| 263 | + * If rounding_up is false then we know that the |
---|
| 264 | + * previous call must have at least started and that |
---|
| 265 | + * means smp_call_function_single_async() won't block. |
---|
| 266 | + */ |
---|
| 267 | + if (kgdb_info[cpu].rounding_up) |
---|
| 268 | + continue; |
---|
| 269 | + kgdb_info[cpu].rounding_up = true; |
---|
| 270 | + |
---|
| 271 | + csd->func = kgdb_call_nmi_hook; |
---|
| 272 | + ret = smp_call_function_single_async(cpu, csd); |
---|
| 273 | + if (ret) |
---|
| 274 | + kgdb_info[cpu].rounding_up = false; |
---|
| 275 | + } |
---|
| 276 | +} |
---|
| 277 | +NOKPROBE_SYMBOL(kgdb_roundup_cpus); |
---|
| 278 | + |
---|
| 279 | +#endif |
---|
214 | 280 | |
---|
215 | 281 | /* |
---|
216 | 282 | * Some architectures need cache flushes when we set/clear a |
---|
.. | .. |
---|
235 | 301 | /* Force flush instruction cache if it was outside the mm */ |
---|
236 | 302 | flush_icache_range(addr, addr + BREAK_INSTR_SIZE); |
---|
237 | 303 | } |
---|
| 304 | +NOKPROBE_SYMBOL(kgdb_flush_swbreak_addr); |
---|
238 | 305 | |
---|
239 | 306 | /* |
---|
240 | 307 | * SW breakpoint management: |
---|
.. | .. |
---|
262 | 329 | } |
---|
263 | 330 | return ret; |
---|
264 | 331 | } |
---|
| 332 | +NOKPROBE_SYMBOL(dbg_activate_sw_breakpoints); |
---|
265 | 333 | |
---|
266 | 334 | int dbg_set_sw_break(unsigned long addr) |
---|
267 | 335 | { |
---|
.. | .. |
---|
325 | 393 | } |
---|
326 | 394 | return ret; |
---|
327 | 395 | } |
---|
| 396 | +NOKPROBE_SYMBOL(dbg_deactivate_sw_breakpoints); |
---|
328 | 397 | |
---|
329 | 398 | int dbg_remove_sw_break(unsigned long addr) |
---|
330 | 399 | { |
---|
.. | .. |
---|
347 | 416 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
---|
348 | 417 | if ((kgdb_break[i].state == BP_REMOVED) && |
---|
349 | 418 | (kgdb_break[i].bpt_addr == addr)) |
---|
| 419 | + return 1; |
---|
| 420 | + } |
---|
| 421 | + return 0; |
---|
| 422 | +} |
---|
| 423 | + |
---|
| 424 | +int kgdb_has_hit_break(unsigned long addr) |
---|
| 425 | +{ |
---|
| 426 | + int i; |
---|
| 427 | + |
---|
| 428 | + for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
---|
| 429 | + if (kgdb_break[i].state == BP_ACTIVE && |
---|
| 430 | + kgdb_break[i].bpt_addr == addr) |
---|
350 | 431 | return 1; |
---|
351 | 432 | } |
---|
352 | 433 | return 0; |
---|
.. | .. |
---|
376 | 457 | return 0; |
---|
377 | 458 | } |
---|
378 | 459 | |
---|
| 460 | +void kgdb_free_init_mem(void) |
---|
| 461 | +{ |
---|
| 462 | + int i; |
---|
| 463 | + |
---|
| 464 | + /* Clear init memory breakpoints. */ |
---|
| 465 | + for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
---|
| 466 | + if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0)) |
---|
| 467 | + kgdb_break[i].state = BP_UNDEFINED; |
---|
| 468 | + } |
---|
| 469 | +} |
---|
| 470 | + |
---|
| 471 | +#ifdef CONFIG_KGDB_KDB |
---|
| 472 | +void kdb_dump_stack_on_cpu(int cpu) |
---|
| 473 | +{ |
---|
| 474 | + if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) { |
---|
| 475 | + dump_stack(); |
---|
| 476 | + return; |
---|
| 477 | + } |
---|
| 478 | + |
---|
| 479 | + if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) { |
---|
| 480 | + kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n", |
---|
| 481 | + cpu); |
---|
| 482 | + return; |
---|
| 483 | + } |
---|
| 484 | + |
---|
| 485 | + /* |
---|
| 486 | + * In general, architectures don't support dumping the stack of a |
---|
| 487 | + * "running" process that's not the current one. From the point of |
---|
| 488 | + * view of the Linux, kernel processes that are looping in the kgdb |
---|
| 489 | + * slave loop are still "running". There's also no API (that actually |
---|
| 490 | + * works across all architectures) that can do a stack crawl based |
---|
| 491 | + * on registers passed as a parameter. |
---|
| 492 | + * |
---|
| 493 | + * Solve this conundrum by asking slave CPUs to do the backtrace |
---|
| 494 | + * themselves. |
---|
| 495 | + */ |
---|
| 496 | + kgdb_info[cpu].exception_state |= DCPU_WANT_BT; |
---|
| 497 | + while (kgdb_info[cpu].exception_state & DCPU_WANT_BT) |
---|
| 498 | + cpu_relax(); |
---|
| 499 | +} |
---|
| 500 | +#endif |
---|
| 501 | + |
---|
379 | 502 | /* |
---|
380 | 503 | * Return true if there is a valid kgdb I/O module. Also if no |
---|
381 | 504 | * debugger is attached a message can be printed to the console about |
---|
.. | .. |
---|
403 | 526 | } |
---|
404 | 527 | return 1; |
---|
405 | 528 | } |
---|
| 529 | +NOKPROBE_SYMBOL(kgdb_io_ready); |
---|
406 | 530 | |
---|
407 | 531 | static int kgdb_reenter_check(struct kgdb_state *ks) |
---|
408 | 532 | { |
---|
.. | .. |
---|
450 | 574 | |
---|
451 | 575 | return 1; |
---|
452 | 576 | } |
---|
| 577 | +NOKPROBE_SYMBOL(kgdb_reenter_check); |
---|
453 | 578 | |
---|
454 | 579 | static void dbg_touch_watchdogs(void) |
---|
455 | 580 | { |
---|
.. | .. |
---|
457 | 582 | clocksource_touch_watchdog(); |
---|
458 | 583 | rcu_cpu_stall_reset(); |
---|
459 | 584 | } |
---|
| 585 | +NOKPROBE_SYMBOL(dbg_touch_watchdogs); |
---|
460 | 586 | |
---|
461 | 587 | static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, |
---|
462 | 588 | int exception_state) |
---|
.. | .. |
---|
517 | 643 | atomic_xchg(&kgdb_active, cpu); |
---|
518 | 644 | break; |
---|
519 | 645 | } |
---|
| 646 | + } else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) { |
---|
| 647 | + dump_stack(); |
---|
| 648 | + kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT; |
---|
520 | 649 | } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) { |
---|
521 | 650 | if (!raw_spin_is_locked(&dbg_slave_lock)) |
---|
522 | 651 | goto return_normal; |
---|
.. | .. |
---|
593 | 722 | |
---|
594 | 723 | /* Signal the other CPUs to enter kgdb_wait() */ |
---|
595 | 724 | else if ((!kgdb_single_step) && kgdb_do_roundup) |
---|
596 | | - kgdb_roundup_cpus(flags); |
---|
| 725 | + kgdb_roundup_cpus(); |
---|
597 | 726 | #endif |
---|
598 | 727 | |
---|
599 | 728 | /* |
---|
.. | .. |
---|
628 | 757 | continue; |
---|
629 | 758 | kgdb_connected = 0; |
---|
630 | 759 | } else { |
---|
| 760 | + /* |
---|
| 761 | + * This is a brutal way to interfere with the debugger |
---|
| 762 | + * and prevent gdb being used to poke at kernel memory. |
---|
| 763 | + * This could cause trouble if lockdown is applied when |
---|
| 764 | + * there is already an active gdb session. For now the |
---|
| 765 | + * answer is simply "don't do that". Typically lockdown |
---|
| 766 | + * *will* be applied before the debug core gets started |
---|
| 767 | + * so only developers using kgdb for fairly advanced |
---|
| 768 | + * early kernel debug can be biten by this. Hopefully |
---|
| 769 | + * they are sophisticated enough to take care of |
---|
| 770 | + * themselves, especially with help from the lockdown |
---|
| 771 | + * message printed on the console! |
---|
| 772 | + */ |
---|
| 773 | + if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) { |
---|
| 774 | + if (IS_ENABLED(CONFIG_KGDB_KDB)) { |
---|
| 775 | + /* Switch back to kdb if possible... */ |
---|
| 776 | + dbg_kdb_mode = 1; |
---|
| 777 | + continue; |
---|
| 778 | + } else { |
---|
| 779 | + /* ... otherwise just bail */ |
---|
| 780 | + break; |
---|
| 781 | + } |
---|
| 782 | + } |
---|
631 | 783 | error = gdb_serial_stub(ks); |
---|
632 | 784 | } |
---|
633 | 785 | |
---|
.. | .. |
---|
642 | 794 | break; |
---|
643 | 795 | } |
---|
644 | 796 | } |
---|
| 797 | + |
---|
| 798 | + dbg_activate_sw_breakpoints(); |
---|
645 | 799 | |
---|
646 | 800 | /* Call the I/O driver's post_exception routine */ |
---|
647 | 801 | if (dbg_io_ops->post_exception) |
---|
.. | .. |
---|
685 | 839 | |
---|
686 | 840 | return kgdb_info[cpu].ret_state; |
---|
687 | 841 | } |
---|
| 842 | +NOKPROBE_SYMBOL(kgdb_cpu_enter); |
---|
688 | 843 | |
---|
689 | 844 | /* |
---|
690 | 845 | * kgdb_handle_exception() - main entry point from a kernel exception |
---|
.. | .. |
---|
729 | 884 | arch_kgdb_ops.enable_nmi(1); |
---|
730 | 885 | return ret; |
---|
731 | 886 | } |
---|
| 887 | +NOKPROBE_SYMBOL(kgdb_handle_exception); |
---|
732 | 888 | |
---|
733 | 889 | /* |
---|
734 | | - * GDB places a breakpoint at this function to know dynamically |
---|
735 | | - * loaded objects. It's not defined static so that only one instance with this |
---|
736 | | - * name exists in the kernel. |
---|
| 890 | + * GDB places a breakpoint at this function to know dynamically loaded objects. |
---|
737 | 891 | */ |
---|
738 | | - |
---|
739 | 892 | static int module_event(struct notifier_block *self, unsigned long val, |
---|
740 | 893 | void *data) |
---|
741 | 894 | { |
---|
.. | .. |
---|
752 | 905 | struct kgdb_state kgdb_var; |
---|
753 | 906 | struct kgdb_state *ks = &kgdb_var; |
---|
754 | 907 | |
---|
| 908 | + kgdb_info[cpu].rounding_up = false; |
---|
| 909 | + |
---|
755 | 910 | memset(ks, 0, sizeof(struct kgdb_state)); |
---|
756 | 911 | ks->cpu = cpu; |
---|
757 | 912 | ks->linux_regs = regs; |
---|
.. | .. |
---|
764 | 919 | #endif |
---|
765 | 920 | return 1; |
---|
766 | 921 | } |
---|
| 922 | +NOKPROBE_SYMBOL(kgdb_nmicallback); |
---|
767 | 923 | |
---|
768 | 924 | int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, |
---|
769 | 925 | atomic_t *send_ready) |
---|
.. | .. |
---|
789 | 945 | #endif |
---|
790 | 946 | return 1; |
---|
791 | 947 | } |
---|
| 948 | +NOKPROBE_SYMBOL(kgdb_nmicallin); |
---|
792 | 949 | |
---|
793 | 950 | static void kgdb_console_write(struct console *co, const char *s, |
---|
794 | 951 | unsigned count) |
---|
.. | .. |
---|
845 | 1002 | kgdb_breakpoint(); |
---|
846 | 1003 | } |
---|
847 | 1004 | |
---|
848 | | -static struct sysrq_key_op sysrq_dbg_op = { |
---|
| 1005 | +static const struct sysrq_key_op sysrq_dbg_op = { |
---|
849 | 1006 | .handler = sysrq_handle_dbg, |
---|
850 | 1007 | .help_msg = "debug(g)", |
---|
851 | 1008 | .action_msg = "DEBUG", |
---|
852 | 1009 | }; |
---|
853 | 1010 | #endif |
---|
854 | 1011 | |
---|
855 | | -static int kgdb_panic_event(struct notifier_block *self, |
---|
856 | | - unsigned long val, |
---|
857 | | - void *data) |
---|
| 1012 | +void kgdb_panic(const char *msg) |
---|
858 | 1013 | { |
---|
| 1014 | + if (!kgdb_io_module_registered) |
---|
| 1015 | + return; |
---|
| 1016 | + |
---|
859 | 1017 | /* |
---|
860 | | - * Avoid entering the debugger if we were triggered due to a panic |
---|
861 | | - * We don't want to get stuck waiting for input from user in such case. |
---|
862 | | - * panic_timeout indicates the system should automatically |
---|
| 1018 | + * We don't want to get stuck waiting for input from user if |
---|
| 1019 | + * "panic_timeout" indicates the system should automatically |
---|
863 | 1020 | * reboot on panic. |
---|
864 | 1021 | */ |
---|
865 | 1022 | if (panic_timeout) |
---|
866 | | - return NOTIFY_DONE; |
---|
| 1023 | + return; |
---|
867 | 1024 | |
---|
868 | 1025 | if (dbg_kdb_mode) |
---|
869 | | - kdb_printf("PANIC: %s\n", (char *)data); |
---|
| 1026 | + kdb_printf("PANIC: %s\n", msg); |
---|
| 1027 | + |
---|
870 | 1028 | kgdb_breakpoint(); |
---|
871 | | - return NOTIFY_DONE; |
---|
872 | 1029 | } |
---|
873 | 1030 | |
---|
874 | | -static struct notifier_block kgdb_panic_event_nb = { |
---|
875 | | - .notifier_call = kgdb_panic_event, |
---|
876 | | - .priority = INT_MAX, |
---|
877 | | -}; |
---|
| 1031 | +static void kgdb_initial_breakpoint(void) |
---|
| 1032 | +{ |
---|
| 1033 | + kgdb_break_asap = 0; |
---|
| 1034 | + |
---|
| 1035 | + pr_crit("Waiting for connection from remote gdb...\n"); |
---|
| 1036 | + kgdb_breakpoint(); |
---|
| 1037 | +} |
---|
878 | 1038 | |
---|
879 | 1039 | void __weak kgdb_arch_late(void) |
---|
880 | 1040 | { |
---|
.. | .. |
---|
886 | 1046 | if (kgdb_io_module_registered) |
---|
887 | 1047 | kgdb_arch_late(); |
---|
888 | 1048 | kdb_init(KDB_INIT_FULL); |
---|
| 1049 | + |
---|
| 1050 | + if (kgdb_io_module_registered && kgdb_break_asap) |
---|
| 1051 | + kgdb_initial_breakpoint(); |
---|
889 | 1052 | } |
---|
890 | 1053 | |
---|
891 | 1054 | static int |
---|
.. | .. |
---|
924 | 1087 | kgdb_arch_late(); |
---|
925 | 1088 | register_module_notifier(&dbg_module_load_nb); |
---|
926 | 1089 | register_reboot_notifier(&dbg_reboot_notifier); |
---|
927 | | - atomic_notifier_chain_register(&panic_notifier_list, |
---|
928 | | - &kgdb_panic_event_nb); |
---|
929 | 1090 | #ifdef CONFIG_MAGIC_SYSRQ |
---|
930 | 1091 | register_sysrq_key('g', &sysrq_dbg_op); |
---|
931 | 1092 | #endif |
---|
.. | .. |
---|
939 | 1100 | static void kgdb_unregister_callbacks(void) |
---|
940 | 1101 | { |
---|
941 | 1102 | /* |
---|
942 | | - * When this routine is called KGDB should unregister from the |
---|
943 | | - * panic handler and clean up, making sure it is not handling any |
---|
| 1103 | + * When this routine is called KGDB should unregister from |
---|
| 1104 | + * handlers and clean up, making sure it is not handling any |
---|
944 | 1105 | * break exceptions at the time. |
---|
945 | 1106 | */ |
---|
946 | 1107 | if (kgdb_io_module_registered) { |
---|
947 | 1108 | kgdb_io_module_registered = 0; |
---|
948 | 1109 | unregister_reboot_notifier(&dbg_reboot_notifier); |
---|
949 | 1110 | unregister_module_notifier(&dbg_module_load_nb); |
---|
950 | | - atomic_notifier_chain_unregister(&panic_notifier_list, |
---|
951 | | - &kgdb_panic_event_nb); |
---|
952 | 1111 | kgdb_arch_exit(); |
---|
953 | 1112 | #ifdef CONFIG_MAGIC_SYSRQ |
---|
954 | 1113 | unregister_sysrq_key('g', &sysrq_dbg_op); |
---|
.. | .. |
---|
972 | 1131 | atomic_set(&kgdb_break_tasklet_var, 0); |
---|
973 | 1132 | } |
---|
974 | 1133 | |
---|
975 | | -static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0); |
---|
| 1134 | +static DECLARE_TASKLET_OLD(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt); |
---|
976 | 1135 | |
---|
977 | 1136 | void kgdb_schedule_breakpoint(void) |
---|
978 | 1137 | { |
---|
.. | .. |
---|
985 | 1144 | } |
---|
986 | 1145 | EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint); |
---|
987 | 1146 | |
---|
988 | | -static void kgdb_initial_breakpoint(void) |
---|
989 | | -{ |
---|
990 | | - kgdb_break_asap = 0; |
---|
991 | | - |
---|
992 | | - pr_crit("Waiting for connection from remote gdb...\n"); |
---|
993 | | - kgdb_breakpoint(); |
---|
994 | | -} |
---|
995 | | - |
---|
996 | 1147 | /** |
---|
997 | 1148 | * kgdb_register_io_module - register KGDB IO module |
---|
998 | 1149 | * @new_dbg_io_ops: the io ops vector |
---|
.. | .. |
---|
1001 | 1152 | */ |
---|
1002 | 1153 | int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) |
---|
1003 | 1154 | { |
---|
| 1155 | + struct kgdb_io *old_dbg_io_ops; |
---|
1004 | 1156 | int err; |
---|
1005 | 1157 | |
---|
1006 | 1158 | spin_lock(&kgdb_registration_lock); |
---|
1007 | 1159 | |
---|
1008 | | - if (dbg_io_ops) { |
---|
1009 | | - spin_unlock(&kgdb_registration_lock); |
---|
| 1160 | + old_dbg_io_ops = dbg_io_ops; |
---|
| 1161 | + if (old_dbg_io_ops) { |
---|
| 1162 | + if (!old_dbg_io_ops->deinit) { |
---|
| 1163 | + spin_unlock(&kgdb_registration_lock); |
---|
1010 | 1164 | |
---|
1011 | | - pr_err("Another I/O driver is already registered with KGDB\n"); |
---|
1012 | | - return -EBUSY; |
---|
| 1165 | + pr_err("KGDB I/O driver %s can't replace %s.\n", |
---|
| 1166 | + new_dbg_io_ops->name, old_dbg_io_ops->name); |
---|
| 1167 | + return -EBUSY; |
---|
| 1168 | + } |
---|
| 1169 | + pr_info("Replacing I/O driver %s with %s\n", |
---|
| 1170 | + old_dbg_io_ops->name, new_dbg_io_ops->name); |
---|
1013 | 1171 | } |
---|
1014 | 1172 | |
---|
1015 | 1173 | if (new_dbg_io_ops->init) { |
---|
.. | .. |
---|
1024 | 1182 | |
---|
1025 | 1183 | spin_unlock(&kgdb_registration_lock); |
---|
1026 | 1184 | |
---|
| 1185 | + if (old_dbg_io_ops) { |
---|
| 1186 | + old_dbg_io_ops->deinit(); |
---|
| 1187 | + return 0; |
---|
| 1188 | + } |
---|
| 1189 | + |
---|
1027 | 1190 | pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); |
---|
1028 | 1191 | |
---|
1029 | 1192 | /* Arm KGDB now. */ |
---|
1030 | 1193 | kgdb_register_callbacks(); |
---|
1031 | 1194 | |
---|
1032 | | - if (kgdb_break_asap) |
---|
| 1195 | + if (kgdb_break_asap && |
---|
| 1196 | + (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))) |
---|
1033 | 1197 | kgdb_initial_breakpoint(); |
---|
1034 | 1198 | |
---|
1035 | 1199 | return 0; |
---|
.. | .. |
---|
1058 | 1222 | dbg_io_ops = NULL; |
---|
1059 | 1223 | |
---|
1060 | 1224 | spin_unlock(&kgdb_registration_lock); |
---|
| 1225 | + |
---|
| 1226 | + if (old_dbg_io_ops->deinit) |
---|
| 1227 | + old_dbg_io_ops->deinit(); |
---|
1061 | 1228 | |
---|
1062 | 1229 | pr_info("Unregistered I/O driver %s, debugger disabled\n", |
---|
1063 | 1230 | old_dbg_io_ops->name); |
---|
.. | .. |
---|
1099 | 1266 | kgdb_break_asap = 1; |
---|
1100 | 1267 | |
---|
1101 | 1268 | kdb_init(KDB_INIT_EARLY); |
---|
1102 | | - if (kgdb_io_module_registered) |
---|
| 1269 | + if (kgdb_io_module_registered && |
---|
| 1270 | + IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)) |
---|
1103 | 1271 | kgdb_initial_breakpoint(); |
---|
1104 | 1272 | |
---|
1105 | 1273 | return 0; |
---|