hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/soc/rockchip/minidump/minidump_log.c
....@@ -36,7 +36,6 @@
3636 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP
3737 #include <linux/bits.h>
3838 #include <linux/sched/prio.h>
39
-#include <asm/memory.h>
4039
4140 #include "../../../kernel/sched/sched.h"
4241
....@@ -49,6 +48,7 @@
4948 #include <linux/module.h>
5049 #include <linux/cma.h>
5150 #include <linux/dma-map-ops.h>
51
+#include <asm-generic/irq_regs.h>
5252 #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT
5353 #include <trace/hooks/debug.h>
5454 #endif
....@@ -149,6 +149,7 @@
149149
150150 static struct md_region note_md_entry;
151151 static DEFINE_PER_CPU_SHARED_ALIGNED(struct elf_prstatus *, cpu_epr);
152
+static struct elf_prstatus *epr_hang_task[8];
152153
153154 static int register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size)
154155 {
....@@ -594,14 +595,15 @@
594595
595596 static void register_note_section(void)
596597 {
597
- int ret = 0, i = 0;
598
+ int ret = 0, i = 0, j = 0;
598599 size_t data_len;
599600 Elf_Word *buf;
600601 void *buffer_start;
601602 struct elf_prstatus *epr;
603
+ struct user_pt_regs *regs;
602604 struct md_region *mdr = &note_md_entry;
603605
604
- buffer_start = kzalloc(PAGE_SIZE, GFP_KERNEL);
606
+ buffer_start = kzalloc(PAGE_SIZE * 2, GFP_KERNEL);
605607 if (!buffer_start)
606608 return;
607609
....@@ -611,11 +613,26 @@
611613
612614 buf = (Elf_Word *)mdr->virt_addr;
613615 data_len = sizeof(struct elf_prstatus);
616
+
614617 for_each_possible_cpu(i) {
615618 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, data_len);
616619 epr = (struct elf_prstatus *)buf;
617620 epr->pr_pid = i;
618621 per_cpu(cpu_epr, i) = epr;
622
+ regs = (struct user_pt_regs *)&epr->pr_reg;
623
+ regs->pc = (u64)register_note_section; /* just for fun */
624
+
625
+ buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word));
626
+ }
627
+
628
+ j = i;
629
+ for (; i < 16; i++) {
630
+ buf = append_elf_note(buf, "TASK", NT_PRSTATUS, data_len);
631
+ epr = (struct elf_prstatus *)buf;
632
+ epr->pr_pid = i;
633
+ epr_hang_task[i - j] = epr;
634
+ regs = (struct user_pt_regs *)&epr->pr_reg;
635
+ regs->pc = (u64)register_note_section; /* just for fun */
619636 buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word));
620637 }
621638
....@@ -626,17 +643,165 @@
626643 pr_err("Failed to add %s entry in Minidump\n", mdr->name);
627644 }
628645
646
+static int md_register_minidump_entry(char *name, u64 virt_addr,
647
+ u64 phys_addr, u64 size)
648
+{
649
+ struct md_region md_entry;
650
+ int ret;
651
+
652
+ strscpy(md_entry.name, name, sizeof(md_entry.name));
653
+ md_entry.virt_addr = virt_addr;
654
+ md_entry.phys_addr = phys_addr;
655
+ md_entry.size = size;
656
+ ret = rk_minidump_add_region(&md_entry);
657
+ if (ret < 0)
658
+ pr_err("Failed to add %s entry in Minidump\n", name);
659
+ return ret;
660
+}
661
+
662
+static struct page *md_vmalloc_to_page(const void *vmalloc_addr)
663
+{
664
+ unsigned long addr = (unsigned long) vmalloc_addr;
665
+ struct page *page = NULL;
666
+ pgd_t *pgd = pgd_offset_k(addr);
667
+ p4d_t *p4d;
668
+ pud_t *pud;
669
+ pmd_t *pmd;
670
+ pte_t *ptep, pte;
671
+
672
+ if (pgd_none(*pgd))
673
+ return NULL;
674
+ p4d = p4d_offset(pgd, addr);
675
+ if (p4d_none(*p4d))
676
+ return NULL;
677
+ pud = pud_offset(p4d, addr);
678
+
679
+ if (pud_none(*pud) || pud_bad(*pud))
680
+ return NULL;
681
+ pmd = pmd_offset(pud, addr);
682
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
683
+ return NULL;
684
+
685
+ ptep = pte_offset_map(pmd, addr);
686
+ pte = *ptep;
687
+ if (pte_present(pte))
688
+ page = pte_page(pte);
689
+ pte_unmap(ptep);
690
+ return page;
691
+}
692
+
693
+static bool md_is_kernel_address(u64 addr)
694
+{
695
+ u32 data;
696
+ u64 phys_addr = 0;
697
+ struct page *page;
698
+
699
+ if (!is_ttbr1_addr(addr))
700
+ return false;
701
+
702
+ if (addr >= (u64)_text && addr < (u64)_end)
703
+ return false;
704
+
705
+ if (__is_lm_address(addr)) {
706
+ phys_addr = virt_to_phys((void *)addr);
707
+ } else if (is_vmalloc_or_module_addr((const void *)addr)) {
708
+ page = md_vmalloc_to_page((const void *) addr);
709
+ if (page)
710
+ phys_addr = page_to_phys(page);
711
+ else
712
+ return false;
713
+ } else {
714
+ return false;
715
+ }
716
+
717
+ if (!md_is_ddr_address(phys_addr))
718
+ return false;
719
+
720
+ if (aarch64_insn_read((void *)addr, &data))
721
+ return false;
722
+ else
723
+ return true;
724
+}
725
+
726
+static int md_save_page(u64 addr, bool flush)
727
+{
728
+ u64 phys_addr, virt_addr;
729
+ struct page *page;
730
+ char buf[32];
731
+ int ret;
732
+
733
+ if (md_is_kernel_address(addr)) {
734
+ if (!md_is_in_the_region(addr)) {
735
+ virt_addr = addr & PAGE_MASK;
736
+ sprintf(buf, "%x", (u32)(virt_addr >> 12));
737
+
738
+ if (__is_lm_address(virt_addr)) {
739
+ phys_addr = virt_to_phys((void *)virt_addr);
740
+ } else if (is_vmalloc_or_module_addr((const void *)virt_addr)) {
741
+ page = md_vmalloc_to_page((const void *) virt_addr);
742
+ phys_addr = page_to_phys(page);
743
+ } else {
744
+ return -1;
745
+ }
746
+
747
+ ret = md_register_minidump_entry(buf, (uintptr_t)virt_addr,
748
+ phys_addr, PAGE_SIZE);
749
+ if (ret > 0 && flush)
750
+ rk_md_flush_dcache_area((void *)virt_addr, PAGE_SIZE);
751
+ } else {
752
+ if (flush)
753
+ rk_md_flush_dcache_area((void *)(addr & PAGE_MASK), PAGE_SIZE);
754
+ }
755
+ return 0;
756
+ }
757
+ return -1;
758
+}
759
+
760
+static void md_save_pages(u64 addr, bool flush)
761
+{
762
+ u64 *p, *end;
763
+
764
+ if (!md_save_page(addr, flush)) {
765
+ addr &= ~0x7;
766
+ p = (u64 *)addr;
767
+ end = (u64 *)((addr & ~(PAGE_SIZE - 1)) + PAGE_SIZE);
768
+ while (p < end) {
769
+ if (!md_is_kernel_address((u64)p))
770
+ break;
771
+ md_save_page(*p++, flush);
772
+ }
773
+ }
774
+}
775
+
629776 void rk_minidump_update_cpu_regs(struct pt_regs *regs)
630777 {
631778 int cpu = raw_smp_processor_id();
779
+ struct user_pt_regs *old_regs;
780
+ int i = 0;
781
+
632782 struct elf_prstatus *epr = per_cpu(cpu_epr, cpu);
633783
634784 if (!epr)
635785 return;
636786
787
+ if (system_state == SYSTEM_RESTART)
788
+ return;
789
+
790
+ old_regs = (struct user_pt_regs *)&epr->pr_reg;
791
+ /* if epr has been saved, don't save it again in panic notifier*/
792
+ if (old_regs->sp != 0)
793
+ return;
794
+
637795 memcpy((void *)&epr->pr_reg, (void *)regs, sizeof(elf_gregset_t));
638796 rk_md_flush_dcache_area((void *)&epr->pr_reg, sizeof(elf_gregset_t));
639797 rk_md_flush_dcache_area((void *)(regs->sp & ~(PAGE_SIZE - 1)), PAGE_SIZE);
798
+
799
+ /* dump sp */
800
+ md_save_pages(regs->sp, true);
801
+
802
+ /*dump x0-x28, x29 is lr, x30 is fp*/
803
+ for (i = 0; i < 29; i++)
804
+ md_save_pages(regs->regs[i], true);
640805 }
641806 EXPORT_SYMBOL(rk_minidump_update_cpu_regs);
642807
....@@ -1028,7 +1193,10 @@
10281193
10291194 seq_buf_printf(md_cntxt_seq_buf, "PANIC CPU : %d\n",
10301195 raw_smp_processor_id());
1031
- md_reg_context_data(&regs);
1196
+ if (in_interrupt())
1197
+ md_reg_context_data(get_irq_regs());
1198
+ else
1199
+ md_reg_context_data(&regs);
10321200 }
10331201
10341202 static int md_die_context_notify(struct notifier_block *self,
....@@ -1055,6 +1223,43 @@
10551223 .priority = INT_MAX - 2, /* < rk watchdog die notifier */
10561224 };
10571225 #endif
1226
+
1227
+static int rk_minidump_collect_hang_task(void)
1228
+{
1229
+ struct task_struct *g, *p;
1230
+ struct elf_prstatus *epr;
1231
+ struct user_pt_regs *regs;
1232
+ int idx = 0, i = 0;
1233
+
1234
+ for_each_process_thread(g, p) {
1235
+ touch_nmi_watchdog();
1236
+ touch_all_softlockup_watchdogs();
1237
+ if (p->state == TASK_UNINTERRUPTIBLE && p->state != TASK_IDLE) {
1238
+ epr = epr_hang_task[idx++];
1239
+ regs = (struct user_pt_regs *)&epr->pr_reg;
1240
+ regs->regs[19] = (unsigned long)(p->thread.cpu_context.x19);
1241
+ regs->regs[20] = (unsigned long)(p->thread.cpu_context.x20);
1242
+ regs->regs[21] = (unsigned long)(p->thread.cpu_context.x21);
1243
+ regs->regs[22] = (unsigned long)(p->thread.cpu_context.x22);
1244
+ regs->regs[23] = (unsigned long)(p->thread.cpu_context.x23);
1245
+ regs->regs[24] = (unsigned long)(p->thread.cpu_context.x24);
1246
+ regs->regs[25] = (unsigned long)(p->thread.cpu_context.x25);
1247
+ regs->regs[26] = (unsigned long)(p->thread.cpu_context.x26);
1248
+ regs->regs[27] = (unsigned long)(p->thread.cpu_context.x27);
1249
+ regs->regs[28] = (unsigned long)(p->thread.cpu_context.x28);
1250
+ regs->regs[29] = (unsigned long)(p->thread.cpu_context.fp);
1251
+ regs->sp = (unsigned long)(p->thread.cpu_context.sp);
1252
+ regs->pc = (unsigned long)p->thread.cpu_context.pc;
1253
+ md_save_pages(regs->sp, true);
1254
+ for (i = 19; i < 29; i++)
1255
+ md_save_pages(regs->regs[i], true);
1256
+ rk_md_flush_dcache_area((void *)epr, sizeof(struct elf_prstatus));
1257
+ }
1258
+ if (idx >= 8)
1259
+ return 0;
1260
+ }
1261
+ return 0;
1262
+}
10581263
10591264 static int md_panic_handler(struct notifier_block *this,
10601265 unsigned long event, void *ptr)
....@@ -1093,6 +1298,8 @@
10931298 if (md_dma_buf_procs_addr)
10941299 md_dma_buf_procs(md_dma_buf_procs_addr, md_dma_buf_procs_size);
10951300
1301
+ rk_minidump_collect_hang_task();
1302
+
10961303 rk_minidump_flush_elfheader();
10971304 md_in_oops_handler = false;
10981305 return NOTIFY_DONE;
....@@ -1102,22 +1309,6 @@
11021309 .notifier_call = md_panic_handler,
11031310 .priority = INT_MAX - 2,
11041311 };
1105
-
1106
-static int md_register_minidump_entry(char *name, u64 virt_addr,
1107
- u64 phys_addr, u64 size)
1108
-{
1109
- struct md_region md_entry;
1110
- int ret;
1111
-
1112
- strscpy(md_entry.name, name, sizeof(md_entry.name));
1113
- md_entry.virt_addr = virt_addr;
1114
- md_entry.phys_addr = phys_addr;
1115
- md_entry.size = size;
1116
- ret = rk_minidump_add_region(&md_entry);
1117
- if (ret < 0)
1118
- pr_err("Failed to add %s entry in Minidump\n", name);
1119
- return ret;
1120
-}
11211312
11221313 static int md_register_panic_entries(int num_pages, char *name,
11231314 struct seq_buf **global_buf)
....@@ -1249,6 +1440,43 @@
12491440 }
12501441 #endif /* CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP */
12511442
1443
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
1444
+int rk_minidump_hardlock_notify(struct notifier_block *nb, unsigned long event,
1445
+ void *p)
1446
+{
1447
+ struct elf_prstatus *epr;
1448
+ struct user_pt_regs *regs;
1449
+ unsigned long hardlock_cpu = event;
1450
+#ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
1451
+ int i = 0;
1452
+ struct md_stack_cpu_data *md_stack_cpu_d;
1453
+ struct md_region *mdr;
1454
+#endif
1455
+
1456
+ if (hardlock_cpu >= num_possible_cpus())
1457
+ return NOTIFY_DONE;
1458
+
1459
+#ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
1460
+ md_stack_cpu_d = &per_cpu(md_stack_data, hardlock_cpu);
1461
+ for (i = 0; i < STACK_NUM_PAGES; i++) {
1462
+ mdr = &md_stack_cpu_d->stack_mdr[i];
1463
+ if (md_is_kernel_address(mdr->virt_addr))
1464
+ rk_md_flush_dcache_area((void *)mdr->virt_addr, mdr->size);
1465
+ }
1466
+#endif
1467
+ epr = per_cpu(cpu_epr, hardlock_cpu);
1468
+ if (!epr)
1469
+ return NOTIFY_DONE;
1470
+ regs = (struct user_pt_regs *)&epr->pr_reg;
1471
+ regs->pc = (u64)p;
1472
+#ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK
1473
+ regs->sp = mdr->virt_addr + mdr->size;
1474
+#endif
1475
+ rk_md_flush_dcache_area((void *)epr, sizeof(struct elf_prstatus));
1476
+ return NOTIFY_OK;
1477
+}
1478
+#endif
1479
+
12521480 int rk_minidump_log_init(void)
12531481 {
12541482 is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK);