.. | .. |
---|
36 | 36 | #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP |
---|
37 | 37 | #include <linux/bits.h> |
---|
38 | 38 | #include <linux/sched/prio.h> |
---|
39 | | -#include <asm/memory.h> |
---|
40 | 39 | |
---|
41 | 40 | #include "../../../kernel/sched/sched.h" |
---|
42 | 41 | |
---|
.. | .. |
---|
49 | 48 | #include <linux/module.h> |
---|
50 | 49 | #include <linux/cma.h> |
---|
51 | 50 | #include <linux/dma-map-ops.h> |
---|
| 51 | +#include <asm-generic/irq_regs.h> |
---|
52 | 52 | #ifdef CONFIG_ROCKCHIP_MINIDUMP_PANIC_CPU_CONTEXT |
---|
53 | 53 | #include <trace/hooks/debug.h> |
---|
54 | 54 | #endif |
---|
.. | .. |
---|
149 | 149 | |
---|
150 | 150 | static struct md_region note_md_entry; |
---|
151 | 151 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct elf_prstatus *, cpu_epr); |
---|
| 152 | +static struct elf_prstatus *epr_hang_task[8]; |
---|
152 | 153 | |
---|
153 | 154 | static int register_stack_entry(struct md_region *ksp_entry, u64 sp, u64 size) |
---|
154 | 155 | { |
---|
.. | .. |
---|
594 | 595 | |
---|
595 | 596 | static void register_note_section(void) |
---|
596 | 597 | { |
---|
597 | | - int ret = 0, i = 0; |
---|
| 598 | + int ret = 0, i = 0, j = 0; |
---|
598 | 599 | size_t data_len; |
---|
599 | 600 | Elf_Word *buf; |
---|
600 | 601 | void *buffer_start; |
---|
601 | 602 | struct elf_prstatus *epr; |
---|
| 603 | + struct user_pt_regs *regs; |
---|
602 | 604 | struct md_region *mdr = ¬e_md_entry; |
---|
603 | 605 | |
---|
604 | | - buffer_start = kzalloc(PAGE_SIZE, GFP_KERNEL); |
---|
| 606 | + buffer_start = kzalloc(PAGE_SIZE * 2, GFP_KERNEL); |
---|
605 | 607 | if (!buffer_start) |
---|
606 | 608 | return; |
---|
607 | 609 | |
---|
.. | .. |
---|
611 | 613 | |
---|
612 | 614 | buf = (Elf_Word *)mdr->virt_addr; |
---|
613 | 615 | data_len = sizeof(struct elf_prstatus); |
---|
| 616 | + |
---|
614 | 617 | for_each_possible_cpu(i) { |
---|
615 | 618 | buf = append_elf_note(buf, "CORE", NT_PRSTATUS, data_len); |
---|
616 | 619 | epr = (struct elf_prstatus *)buf; |
---|
617 | 620 | epr->pr_pid = i; |
---|
618 | 621 | per_cpu(cpu_epr, i) = epr; |
---|
| 622 | + regs = (struct user_pt_regs *)&epr->pr_reg; |
---|
| 623 | + regs->pc = (u64)register_note_section; /* just for fun */ |
---|
| 624 | + |
---|
| 625 | + buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word)); |
---|
| 626 | + } |
---|
| 627 | + |
---|
| 628 | + j = i; |
---|
| 629 | + for (; i < 16; i++) { |
---|
| 630 | + buf = append_elf_note(buf, "TASK", NT_PRSTATUS, data_len); |
---|
| 631 | + epr = (struct elf_prstatus *)buf; |
---|
| 632 | + epr->pr_pid = i; |
---|
| 633 | + epr_hang_task[i - j] = epr; |
---|
| 634 | + regs = (struct user_pt_regs *)&epr->pr_reg; |
---|
| 635 | + regs->pc = (u64)register_note_section; /* just for fun */ |
---|
619 | 636 | buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word)); |
---|
620 | 637 | } |
---|
621 | 638 | |
---|
.. | .. |
---|
626 | 643 | pr_err("Failed to add %s entry in Minidump\n", mdr->name); |
---|
627 | 644 | } |
---|
628 | 645 | |
---|
| 646 | +static int md_register_minidump_entry(char *name, u64 virt_addr, |
---|
| 647 | + u64 phys_addr, u64 size) |
---|
| 648 | +{ |
---|
| 649 | + struct md_region md_entry; |
---|
| 650 | + int ret; |
---|
| 651 | + |
---|
| 652 | + strscpy(md_entry.name, name, sizeof(md_entry.name)); |
---|
| 653 | + md_entry.virt_addr = virt_addr; |
---|
| 654 | + md_entry.phys_addr = phys_addr; |
---|
| 655 | + md_entry.size = size; |
---|
| 656 | + ret = rk_minidump_add_region(&md_entry); |
---|
| 657 | + if (ret < 0) |
---|
| 658 | + pr_err("Failed to add %s entry in Minidump\n", name); |
---|
| 659 | + return ret; |
---|
| 660 | +} |
---|
| 661 | + |
---|
| 662 | +static struct page *md_vmalloc_to_page(const void *vmalloc_addr) |
---|
| 663 | +{ |
---|
| 664 | + unsigned long addr = (unsigned long) vmalloc_addr; |
---|
| 665 | + struct page *page = NULL; |
---|
| 666 | + pgd_t *pgd = pgd_offset_k(addr); |
---|
| 667 | + p4d_t *p4d; |
---|
| 668 | + pud_t *pud; |
---|
| 669 | + pmd_t *pmd; |
---|
| 670 | + pte_t *ptep, pte; |
---|
| 671 | + |
---|
| 672 | + if (pgd_none(*pgd)) |
---|
| 673 | + return NULL; |
---|
| 674 | + p4d = p4d_offset(pgd, addr); |
---|
| 675 | + if (p4d_none(*p4d)) |
---|
| 676 | + return NULL; |
---|
| 677 | + pud = pud_offset(p4d, addr); |
---|
| 678 | + |
---|
| 679 | + if (pud_none(*pud) || pud_bad(*pud)) |
---|
| 680 | + return NULL; |
---|
| 681 | + pmd = pmd_offset(pud, addr); |
---|
| 682 | + if (pmd_none(*pmd) || pmd_bad(*pmd)) |
---|
| 683 | + return NULL; |
---|
| 684 | + |
---|
| 685 | + ptep = pte_offset_map(pmd, addr); |
---|
| 686 | + pte = *ptep; |
---|
| 687 | + if (pte_present(pte)) |
---|
| 688 | + page = pte_page(pte); |
---|
| 689 | + pte_unmap(ptep); |
---|
| 690 | + return page; |
---|
| 691 | +} |
---|
| 692 | + |
---|
| 693 | +static bool md_is_kernel_address(u64 addr) |
---|
| 694 | +{ |
---|
| 695 | + u32 data; |
---|
| 696 | + u64 phys_addr = 0; |
---|
| 697 | + struct page *page; |
---|
| 698 | + |
---|
| 699 | + if (!is_ttbr1_addr(addr)) |
---|
| 700 | + return false; |
---|
| 701 | + |
---|
| 702 | + if (addr >= (u64)_text && addr < (u64)_end) |
---|
| 703 | + return false; |
---|
| 704 | + |
---|
| 705 | + if (__is_lm_address(addr)) { |
---|
| 706 | + phys_addr = virt_to_phys((void *)addr); |
---|
| 707 | + } else if (is_vmalloc_or_module_addr((const void *)addr)) { |
---|
| 708 | + page = md_vmalloc_to_page((const void *) addr); |
---|
| 709 | + if (page) |
---|
| 710 | + phys_addr = page_to_phys(page); |
---|
| 711 | + else |
---|
| 712 | + return false; |
---|
| 713 | + } else { |
---|
| 714 | + return false; |
---|
| 715 | + } |
---|
| 716 | + |
---|
| 717 | + if (!md_is_ddr_address(phys_addr)) |
---|
| 718 | + return false; |
---|
| 719 | + |
---|
| 720 | + if (aarch64_insn_read((void *)addr, &data)) |
---|
| 721 | + return false; |
---|
| 722 | + else |
---|
| 723 | + return true; |
---|
| 724 | +} |
---|
| 725 | + |
---|
| 726 | +static int md_save_page(u64 addr, bool flush) |
---|
| 727 | +{ |
---|
| 728 | + u64 phys_addr, virt_addr; |
---|
| 729 | + struct page *page; |
---|
| 730 | + char buf[32]; |
---|
| 731 | + int ret; |
---|
| 732 | + |
---|
| 733 | + if (md_is_kernel_address(addr)) { |
---|
| 734 | + if (!md_is_in_the_region(addr)) { |
---|
| 735 | + virt_addr = addr & PAGE_MASK; |
---|
| 736 | + sprintf(buf, "%x", (u32)(virt_addr >> 12)); |
---|
| 737 | + |
---|
| 738 | + if (__is_lm_address(virt_addr)) { |
---|
| 739 | + phys_addr = virt_to_phys((void *)virt_addr); |
---|
| 740 | + } else if (is_vmalloc_or_module_addr((const void *)virt_addr)) { |
---|
| 741 | + page = md_vmalloc_to_page((const void *) virt_addr); |
---|
| 742 | + phys_addr = page_to_phys(page); |
---|
| 743 | + } else { |
---|
| 744 | + return -1; |
---|
| 745 | + } |
---|
| 746 | + |
---|
| 747 | + ret = md_register_minidump_entry(buf, (uintptr_t)virt_addr, |
---|
| 748 | + phys_addr, PAGE_SIZE); |
---|
| 749 | + if (ret > 0 && flush) |
---|
| 750 | + rk_md_flush_dcache_area((void *)virt_addr, PAGE_SIZE); |
---|
| 751 | + } else { |
---|
| 752 | + if (flush) |
---|
| 753 | + rk_md_flush_dcache_area((void *)(addr & PAGE_MASK), PAGE_SIZE); |
---|
| 754 | + } |
---|
| 755 | + return 0; |
---|
| 756 | + } |
---|
| 757 | + return -1; |
---|
| 758 | +} |
---|
| 759 | + |
---|
| 760 | +static void md_save_pages(u64 addr, bool flush) |
---|
| 761 | +{ |
---|
| 762 | + u64 *p, *end; |
---|
| 763 | + |
---|
| 764 | + if (!md_save_page(addr, flush)) { |
---|
| 765 | + addr &= ~0x7; |
---|
| 766 | + p = (u64 *)addr; |
---|
| 767 | + end = (u64 *)((addr & ~(PAGE_SIZE - 1)) + PAGE_SIZE); |
---|
| 768 | + while (p < end) { |
---|
| 769 | + if (!md_is_kernel_address((u64)p)) |
---|
| 770 | + break; |
---|
| 771 | + md_save_page(*p++, flush); |
---|
| 772 | + } |
---|
| 773 | + } |
---|
| 774 | +} |
---|
| 775 | + |
---|
629 | 776 | void rk_minidump_update_cpu_regs(struct pt_regs *regs) |
---|
630 | 777 | { |
---|
631 | 778 | int cpu = raw_smp_processor_id(); |
---|
| 779 | + struct user_pt_regs *old_regs; |
---|
| 780 | + int i = 0; |
---|
| 781 | + |
---|
632 | 782 | struct elf_prstatus *epr = per_cpu(cpu_epr, cpu); |
---|
633 | 783 | |
---|
634 | 784 | if (!epr) |
---|
635 | 785 | return; |
---|
636 | 786 | |
---|
| 787 | + if (system_state == SYSTEM_RESTART) |
---|
| 788 | + return; |
---|
| 789 | + |
---|
| 790 | + old_regs = (struct user_pt_regs *)&epr->pr_reg; |
---|
| 791 | + /* if epr has been saved, don't save it again in panic notifier*/ |
---|
| 792 | + if (old_regs->sp != 0) |
---|
| 793 | + return; |
---|
| 794 | + |
---|
637 | 795 | memcpy((void *)&epr->pr_reg, (void *)regs, sizeof(elf_gregset_t)); |
---|
638 | 796 | rk_md_flush_dcache_area((void *)&epr->pr_reg, sizeof(elf_gregset_t)); |
---|
639 | 797 | rk_md_flush_dcache_area((void *)(regs->sp & ~(PAGE_SIZE - 1)), PAGE_SIZE); |
---|
| 798 | + |
---|
| 799 | + /* dump sp */ |
---|
| 800 | + md_save_pages(regs->sp, true); |
---|
| 801 | + |
---|
| 802 | + /*dump x0-x28, x29 is lr, x30 is fp*/ |
---|
| 803 | + for (i = 0; i < 29; i++) |
---|
| 804 | + md_save_pages(regs->regs[i], true); |
---|
640 | 805 | } |
---|
641 | 806 | EXPORT_SYMBOL(rk_minidump_update_cpu_regs); |
---|
642 | 807 | |
---|
.. | .. |
---|
1028 | 1193 | |
---|
1029 | 1194 | seq_buf_printf(md_cntxt_seq_buf, "PANIC CPU : %d\n", |
---|
1030 | 1195 | raw_smp_processor_id()); |
---|
1031 | | - md_reg_context_data(®s); |
---|
| 1196 | + if (in_interrupt()) |
---|
| 1197 | + md_reg_context_data(get_irq_regs()); |
---|
| 1198 | + else |
---|
| 1199 | + md_reg_context_data(®s); |
---|
1032 | 1200 | } |
---|
1033 | 1201 | |
---|
1034 | 1202 | static int md_die_context_notify(struct notifier_block *self, |
---|
.. | .. |
---|
1055 | 1223 | .priority = INT_MAX - 2, /* < rk watchdog die notifier */ |
---|
1056 | 1224 | }; |
---|
1057 | 1225 | #endif |
---|
| 1226 | + |
---|
| 1227 | +static int rk_minidump_collect_hang_task(void) |
---|
| 1228 | +{ |
---|
| 1229 | + struct task_struct *g, *p; |
---|
| 1230 | + struct elf_prstatus *epr; |
---|
| 1231 | + struct user_pt_regs *regs; |
---|
| 1232 | + int idx = 0, i = 0; |
---|
| 1233 | + |
---|
| 1234 | + for_each_process_thread(g, p) { |
---|
| 1235 | + touch_nmi_watchdog(); |
---|
| 1236 | + touch_all_softlockup_watchdogs(); |
---|
| 1237 | + if (p->state == TASK_UNINTERRUPTIBLE && p->state != TASK_IDLE) { |
---|
| 1238 | + epr = epr_hang_task[idx++]; |
---|
| 1239 | + regs = (struct user_pt_regs *)&epr->pr_reg; |
---|
| 1240 | + regs->regs[19] = (unsigned long)(p->thread.cpu_context.x19); |
---|
| 1241 | + regs->regs[20] = (unsigned long)(p->thread.cpu_context.x20); |
---|
| 1242 | + regs->regs[21] = (unsigned long)(p->thread.cpu_context.x21); |
---|
| 1243 | + regs->regs[22] = (unsigned long)(p->thread.cpu_context.x22); |
---|
| 1244 | + regs->regs[23] = (unsigned long)(p->thread.cpu_context.x23); |
---|
| 1245 | + regs->regs[24] = (unsigned long)(p->thread.cpu_context.x24); |
---|
| 1246 | + regs->regs[25] = (unsigned long)(p->thread.cpu_context.x25); |
---|
| 1247 | + regs->regs[26] = (unsigned long)(p->thread.cpu_context.x26); |
---|
| 1248 | + regs->regs[27] = (unsigned long)(p->thread.cpu_context.x27); |
---|
| 1249 | + regs->regs[28] = (unsigned long)(p->thread.cpu_context.x28); |
---|
| 1250 | + regs->regs[29] = (unsigned long)(p->thread.cpu_context.fp); |
---|
| 1251 | + regs->sp = (unsigned long)(p->thread.cpu_context.sp); |
---|
| 1252 | + regs->pc = (unsigned long)p->thread.cpu_context.pc; |
---|
| 1253 | + md_save_pages(regs->sp, true); |
---|
| 1254 | + for (i = 19; i < 29; i++) |
---|
| 1255 | + md_save_pages(regs->regs[i], true); |
---|
| 1256 | + rk_md_flush_dcache_area((void *)epr, sizeof(struct elf_prstatus)); |
---|
| 1257 | + } |
---|
| 1258 | + if (idx >= 8) |
---|
| 1259 | + return 0; |
---|
| 1260 | + } |
---|
| 1261 | + return 0; |
---|
| 1262 | +} |
---|
1058 | 1263 | |
---|
1059 | 1264 | static int md_panic_handler(struct notifier_block *this, |
---|
1060 | 1265 | unsigned long event, void *ptr) |
---|
.. | .. |
---|
1093 | 1298 | if (md_dma_buf_procs_addr) |
---|
1094 | 1299 | md_dma_buf_procs(md_dma_buf_procs_addr, md_dma_buf_procs_size); |
---|
1095 | 1300 | |
---|
| 1301 | + rk_minidump_collect_hang_task(); |
---|
| 1302 | + |
---|
1096 | 1303 | rk_minidump_flush_elfheader(); |
---|
1097 | 1304 | md_in_oops_handler = false; |
---|
1098 | 1305 | return NOTIFY_DONE; |
---|
.. | .. |
---|
1102 | 1309 | .notifier_call = md_panic_handler, |
---|
1103 | 1310 | .priority = INT_MAX - 2, |
---|
1104 | 1311 | }; |
---|
1105 | | - |
---|
1106 | | -static int md_register_minidump_entry(char *name, u64 virt_addr, |
---|
1107 | | - u64 phys_addr, u64 size) |
---|
1108 | | -{ |
---|
1109 | | - struct md_region md_entry; |
---|
1110 | | - int ret; |
---|
1111 | | - |
---|
1112 | | - strscpy(md_entry.name, name, sizeof(md_entry.name)); |
---|
1113 | | - md_entry.virt_addr = virt_addr; |
---|
1114 | | - md_entry.phys_addr = phys_addr; |
---|
1115 | | - md_entry.size = size; |
---|
1116 | | - ret = rk_minidump_add_region(&md_entry); |
---|
1117 | | - if (ret < 0) |
---|
1118 | | - pr_err("Failed to add %s entry in Minidump\n", name); |
---|
1119 | | - return ret; |
---|
1120 | | -} |
---|
1121 | 1312 | |
---|
1122 | 1313 | static int md_register_panic_entries(int num_pages, char *name, |
---|
1123 | 1314 | struct seq_buf **global_buf) |
---|
.. | .. |
---|
1249 | 1440 | } |
---|
1250 | 1441 | #endif /* CONFIG_ROCKCHIP_MINIDUMP_PANIC_DUMP */ |
---|
1251 | 1442 | |
---|
| 1443 | +#ifdef CONFIG_HARDLOCKUP_DETECTOR |
---|
| 1444 | +int rk_minidump_hardlock_notify(struct notifier_block *nb, unsigned long event, |
---|
| 1445 | + void *p) |
---|
| 1446 | +{ |
---|
| 1447 | + struct elf_prstatus *epr; |
---|
| 1448 | + struct user_pt_regs *regs; |
---|
| 1449 | + unsigned long hardlock_cpu = event; |
---|
| 1450 | +#ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK |
---|
| 1451 | + int i = 0; |
---|
| 1452 | + struct md_stack_cpu_data *md_stack_cpu_d; |
---|
| 1453 | + struct md_region *mdr; |
---|
| 1454 | +#endif |
---|
| 1455 | + |
---|
| 1456 | + if (hardlock_cpu >= num_possible_cpus()) |
---|
| 1457 | + return NOTIFY_DONE; |
---|
| 1458 | + |
---|
| 1459 | +#ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK |
---|
| 1460 | + md_stack_cpu_d = &per_cpu(md_stack_data, hardlock_cpu); |
---|
| 1461 | + for (i = 0; i < STACK_NUM_PAGES; i++) { |
---|
| 1462 | + mdr = &md_stack_cpu_d->stack_mdr[i]; |
---|
| 1463 | + if (md_is_kernel_address(mdr->virt_addr)) |
---|
| 1464 | + rk_md_flush_dcache_area((void *)mdr->virt_addr, mdr->size); |
---|
| 1465 | + } |
---|
| 1466 | +#endif |
---|
| 1467 | + epr = per_cpu(cpu_epr, hardlock_cpu); |
---|
| 1468 | + if (!epr) |
---|
| 1469 | + return NOTIFY_DONE; |
---|
| 1470 | + regs = (struct user_pt_regs *)&epr->pr_reg; |
---|
| 1471 | + regs->pc = (u64)p; |
---|
| 1472 | +#ifdef CONFIG_ROCKCHIP_DYN_MINIDUMP_STACK |
---|
| 1473 | + regs->sp = mdr->virt_addr + mdr->size; |
---|
| 1474 | +#endif |
---|
| 1475 | + rk_md_flush_dcache_area((void *)epr, sizeof(struct elf_prstatus)); |
---|
| 1476 | + return NOTIFY_OK; |
---|
| 1477 | +} |
---|
| 1478 | +#endif |
---|
| 1479 | + |
---|
1252 | 1480 | int rk_minidump_log_init(void) |
---|
1253 | 1481 | { |
---|
1254 | 1482 | is_vmap_stack = IS_ENABLED(CONFIG_VMAP_STACK); |
---|