hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/x86/kernel/kgdb.c
....@@ -1,14 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
2
- * This program is free software; you can redistribute it and/or modify it
3
- * under the terms of the GNU General Public License as published by the
4
- * Free Software Foundation; either version 2, or (at your option) any
5
- * later version.
6
- *
7
- * This program is distributed in the hope that it will be useful, but
8
- * WITHOUT ANY WARRANTY; without even the implied warranty of
9
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10
- * General Public License for more details.
11
- *
123 */
134
145 /*
....@@ -127,14 +118,6 @@
127118
128119 #ifdef CONFIG_X86_32
129120 switch (regno) {
130
- case GDB_SS:
131
- if (!user_mode(regs))
132
- *(unsigned long *)mem = __KERNEL_DS;
133
- break;
134
- case GDB_SP:
135
- if (!user_mode(regs))
136
- *(unsigned long *)mem = kernel_stack_pointer(regs);
137
- break;
138121 case GDB_GS:
139122 case GDB_FS:
140123 *(unsigned long *)mem = 0xFFFF;
....@@ -422,23 +405,18 @@
422405 #ifdef CONFIG_SMP
423406 /**
424407 * kgdb_roundup_cpus - Get other CPUs into a holding pattern
425
- * @flags: Current IRQ state
426408 *
427409 * On SMP systems, we need to get the attention of the other CPUs
428410 * and get them be in a known state. This should do what is needed
429411 * to get the other CPUs to call kgdb_wait(). Note that on some arches,
430412 * the NMI approach is not used for rounding up all the CPUs. For example,
431
- * in case of MIPS, smp_call_function() is used to roundup CPUs. In
432
- * this case, we have to make sure that interrupts are enabled before
433
- * calling smp_call_function(). The argument to this function is
434
- * the flags that will be used when restoring the interrupts. There is
435
- * local_irq_save() call before kgdb_roundup_cpus().
413
+ * in case of MIPS, smp_call_function() is used to roundup CPUs.
436414 *
437415 * On non-SMP systems, this is not called.
438416 */
439
-void kgdb_roundup_cpus(unsigned long flags)
417
+void kgdb_roundup_cpus(void)
440418 {
441
- apic->send_IPI_allbutself(NMI_VECTOR);
419
+ apic_send_IPI_allbutself(NMI_VECTOR);
442420 }
443421 #endif
444422
....@@ -472,6 +450,7 @@
472450 ptr = &remcomInBuffer[1];
473451 if (kgdb_hex2long(&ptr, &addr))
474452 linux_regs->ip = addr;
453
+ fallthrough;
475454 case 'D':
476455 case 'k':
477456 /* clear the trace bit */
....@@ -560,7 +539,7 @@
560539 * a system call which should be ignored
561540 */
562541 return NOTIFY_DONE;
563
- /* fall through */
542
+ fallthrough;
564543 default:
565544 if (user_mode(regs))
566545 return NOTIFY_DONE;
....@@ -650,9 +629,10 @@
650629 struct task_struct *tsk = current;
651630 int i;
652631
653
- for (i = 0; i < 4; i++)
632
+ for (i = 0; i < 4; i++) {
654633 if (breakinfo[i].enabled)
655
- tsk->thread.debugreg6 |= (DR_TRAP0 << i);
634
+ tsk->thread.virtual_dr6 |= (DR_TRAP0 << i);
635
+ }
656636 }
657637
658638 void kgdb_arch_late(void)
....@@ -751,60 +731,49 @@
751731 int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
752732 {
753733 int err;
754
- char opc[BREAK_INSTR_SIZE];
755734
756735 bpt->type = BP_BREAKPOINT;
757
- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
736
+ err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
758737 BREAK_INSTR_SIZE);
759738 if (err)
760739 return err;
761
- err = probe_kernel_write((char *)bpt->bpt_addr,
740
+ err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
762741 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
763742 if (!err)
764743 return err;
765744 /*
766
- * It is safe to call text_poke() because normal kernel execution
745
+ * It is safe to call text_poke_kgdb() because normal kernel execution
767746 * is stopped on all cores, so long as the text_mutex is not locked.
768747 */
769748 if (mutex_is_locked(&text_mutex))
770749 return -EBUSY;
771
- text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
772
- BREAK_INSTR_SIZE);
773
- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
774
- if (err)
775
- return err;
776
- if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
777
- return -EINVAL;
750
+ text_poke_kgdb((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
751
+ BREAK_INSTR_SIZE);
778752 bpt->type = BP_POKE_BREAKPOINT;
779753
780
- return err;
754
+ return 0;
781755 }
782756
783757 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
784758 {
785
- int err;
786
- char opc[BREAK_INSTR_SIZE];
787
-
788759 if (bpt->type != BP_POKE_BREAKPOINT)
789760 goto knl_write;
790761 /*
791
- * It is safe to call text_poke() because normal kernel execution
762
+ * It is safe to call text_poke_kgdb() because normal kernel execution
792763 * is stopped on all cores, so long as the text_mutex is not locked.
793764 */
794765 if (mutex_is_locked(&text_mutex))
795766 goto knl_write;
796
- text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
797
- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
798
- if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
799
- goto knl_write;
800
- return err;
767
+ text_poke_kgdb((void *)bpt->bpt_addr, bpt->saved_instr,
768
+ BREAK_INSTR_SIZE);
769
+ return 0;
801770
802771 knl_write:
803
- return probe_kernel_write((char *)bpt->bpt_addr,
772
+ return copy_to_kernel_nofault((char *)bpt->bpt_addr,
804773 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
805774 }
806775
807
-struct kgdb_arch arch_kgdb_ops = {
776
+const struct kgdb_arch arch_kgdb_ops = {
808777 /* Breakpoint instruction: */
809778 .gdb_bpt_instr = { 0xcc },
810779 .flags = KGDB_HW_BREAKPOINT,