.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * File: mca.c |
---|
3 | 4 | * Purpose: Generic MCA handling layer |
---|
.. | .. |
---|
77 | 78 | #include <linux/sched/task.h> |
---|
78 | 79 | #include <linux/interrupt.h> |
---|
79 | 80 | #include <linux/irq.h> |
---|
80 | | -#include <linux/bootmem.h> |
---|
| 81 | +#include <linux/memblock.h> |
---|
81 | 82 | #include <linux/acpi.h> |
---|
82 | 83 | #include <linux/timer.h> |
---|
83 | 84 | #include <linux/module.h> |
---|
.. | .. |
---|
90 | 91 | #include <linux/gfp.h> |
---|
91 | 92 | |
---|
92 | 93 | #include <asm/delay.h> |
---|
93 | | -#include <asm/machvec.h> |
---|
94 | 94 | #include <asm/meminit.h> |
---|
95 | 95 | #include <asm/page.h> |
---|
96 | 96 | #include <asm/ptrace.h> |
---|
.. | .. |
---|
104 | 104 | |
---|
105 | 105 | #include "mca_drv.h" |
---|
106 | 106 | #include "entry.h" |
---|
| 107 | +#include "irq.h" |
---|
107 | 108 | |
---|
108 | 109 | #if defined(IA64_MCA_DEBUG_INFO) |
---|
109 | 110 | # define IA64_MCA_DEBUG(fmt...) printk(fmt) |
---|
.. | .. |
---|
148 | 149 | #define CPE_HISTORY_LENGTH 5 |
---|
149 | 150 | #define CMC_HISTORY_LENGTH 5 |
---|
150 | 151 | |
---|
151 | | -#ifdef CONFIG_ACPI |
---|
152 | 152 | static struct timer_list cpe_poll_timer; |
---|
153 | | -#endif |
---|
154 | 153 | static struct timer_list cmc_poll_timer; |
---|
155 | 154 | /* |
---|
156 | 155 | * This variable tells whether we are currently in polling mode. |
---|
.. | .. |
---|
359 | 358 | |
---|
360 | 359 | static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; |
---|
361 | 360 | |
---|
362 | | -#define IA64_LOG_ALLOCATE(it, size) \ |
---|
363 | | - {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ |
---|
364 | | - (ia64_err_rec_t *)alloc_bootmem(size); \ |
---|
365 | | - ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ |
---|
366 | | - (ia64_err_rec_t *)alloc_bootmem(size);} |
---|
367 | 361 | #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) |
---|
368 | 362 | #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) |
---|
369 | 363 | #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) |
---|
.. | .. |
---|
377 | 371 | #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])) |
---|
378 | 372 | #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) |
---|
379 | 373 | #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count |
---|
| 374 | + |
---|
| 375 | +static inline void ia64_log_allocate(int it, u64 size) |
---|
| 376 | +{ |
---|
| 377 | + ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = |
---|
| 378 | + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); |
---|
| 379 | + if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]) |
---|
| 380 | + panic("%s: Failed to allocate %llu bytes\n", __func__, size); |
---|
| 381 | + |
---|
| 382 | + ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = |
---|
| 383 | + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); |
---|
| 384 | + if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]) |
---|
| 385 | + panic("%s: Failed to allocate %llu bytes\n", __func__, size); |
---|
| 386 | +} |
---|
380 | 387 | |
---|
381 | 388 | /* |
---|
382 | 389 | * ia64_log_init |
---|
.. | .. |
---|
399 | 406 | return; |
---|
400 | 407 | |
---|
401 | 408 | // set up OS data structures to hold error info |
---|
402 | | - IA64_LOG_ALLOCATE(sal_info_type, max_size); |
---|
403 | | - memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size); |
---|
404 | | - memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size); |
---|
| 409 | + ia64_log_allocate(sal_info_type, max_size); |
---|
405 | 410 | } |
---|
406 | 411 | |
---|
407 | 412 | /* |
---|
.. | .. |
---|
525 | 530 | } |
---|
526 | 531 | EXPORT_SYMBOL_GPL(mca_recover_range); |
---|
527 | 532 | |
---|
528 | | -#ifdef CONFIG_ACPI |
---|
529 | | - |
---|
530 | 533 | int cpe_vector = -1; |
---|
531 | 534 | int ia64_cpe_irq = -1; |
---|
532 | 535 | |
---|
.. | .. |
---|
588 | 591 | return IRQ_HANDLED; |
---|
589 | 592 | } |
---|
590 | 593 | |
---|
591 | | -#endif /* CONFIG_ACPI */ |
---|
592 | | - |
---|
593 | | -#ifdef CONFIG_ACPI |
---|
594 | 594 | /* |
---|
595 | 595 | * ia64_mca_register_cpev |
---|
596 | 596 | * |
---|
.. | .. |
---|
618 | 618 | IA64_MCA_DEBUG("%s: corrected platform error " |
---|
619 | 619 | "vector %#x registered\n", __func__, cpev); |
---|
620 | 620 | } |
---|
621 | | -#endif /* CONFIG_ACPI */ |
---|
622 | 621 | |
---|
623 | 622 | /* |
---|
624 | 623 | * ia64_mca_cmc_vector_setup |
---|
.. | .. |
---|
737 | 736 | static void |
---|
738 | 737 | ia64_mca_wakeup(int cpu) |
---|
739 | 738 | { |
---|
740 | | - platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); |
---|
| 739 | + ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); |
---|
741 | 740 | } |
---|
742 | 741 | |
---|
743 | 742 | /* |
---|
.. | .. |
---|
1483 | 1482 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
---|
1484 | 1483 | |
---|
1485 | 1484 | if (cpuid < nr_cpu_ids) { |
---|
1486 | | - platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); |
---|
| 1485 | + ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); |
---|
1487 | 1486 | } else { |
---|
1488 | 1487 | /* If no log record, switch out of polling mode */ |
---|
1489 | 1488 | if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { |
---|
.. | .. |
---|
1516 | 1515 | ia64_mca_cmc_poll (struct timer_list *unused) |
---|
1517 | 1516 | { |
---|
1518 | 1517 | /* Trigger a CMC interrupt cascade */ |
---|
1519 | | - platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR, |
---|
| 1518 | + ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR, |
---|
1520 | 1519 | IA64_IPI_DM_INT, 0); |
---|
1521 | 1520 | } |
---|
1522 | 1521 | |
---|
.. | .. |
---|
1533 | 1532 | * Outputs |
---|
1534 | 1533 | * handled |
---|
1535 | 1534 | */ |
---|
1536 | | -#ifdef CONFIG_ACPI |
---|
1537 | | - |
---|
1538 | 1535 | static irqreturn_t |
---|
1539 | 1536 | ia64_mca_cpe_int_caller(int cpe_irq, void *arg) |
---|
1540 | 1537 | { |
---|
.. | .. |
---|
1553 | 1550 | cpuid = cpumask_next(cpuid+1, cpu_online_mask); |
---|
1554 | 1551 | |
---|
1555 | 1552 | if (cpuid < NR_CPUS) { |
---|
1556 | | - platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); |
---|
| 1553 | + ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); |
---|
1557 | 1554 | } else { |
---|
1558 | 1555 | /* |
---|
1559 | 1556 | * If a log was recorded, increase our polling frequency, |
---|
.. | .. |
---|
1593 | 1590 | ia64_mca_cpe_poll (struct timer_list *unused) |
---|
1594 | 1591 | { |
---|
1595 | 1592 | /* Trigger a CPE interrupt cascade */ |
---|
1596 | | - platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR, |
---|
| 1593 | + ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR, |
---|
1597 | 1594 | IA64_IPI_DM_INT, 0); |
---|
1598 | 1595 | } |
---|
1599 | | - |
---|
1600 | | -#endif /* CONFIG_ACPI */ |
---|
1601 | 1596 | |
---|
1602 | 1597 | static int |
---|
1603 | 1598 | default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data) |
---|
.. | .. |
---|
1636 | 1631 | if (read_trylock(&tasklist_lock)) { |
---|
1637 | 1632 | do_each_thread (g, t) { |
---|
1638 | 1633 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); |
---|
1639 | | - show_stack(t, NULL); |
---|
| 1634 | + show_stack(t, NULL, KERN_DEFAULT); |
---|
1640 | 1635 | } while_each_thread (g, t); |
---|
1641 | 1636 | read_unlock(&tasklist_lock); |
---|
1642 | 1637 | } |
---|
.. | .. |
---|
1772 | 1767 | |
---|
1773 | 1768 | __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); |
---|
1774 | 1769 | |
---|
1775 | | -static struct irqaction cmci_irqaction = { |
---|
1776 | | - .handler = ia64_mca_cmc_int_handler, |
---|
1777 | | - .name = "cmc_hndlr" |
---|
1778 | | -}; |
---|
1779 | | - |
---|
1780 | | -static struct irqaction cmcp_irqaction = { |
---|
1781 | | - .handler = ia64_mca_cmc_int_caller, |
---|
1782 | | - .name = "cmc_poll" |
---|
1783 | | -}; |
---|
1784 | | - |
---|
1785 | | -static struct irqaction mca_rdzv_irqaction = { |
---|
1786 | | - .handler = ia64_mca_rendez_int_handler, |
---|
1787 | | - .name = "mca_rdzv" |
---|
1788 | | -}; |
---|
1789 | | - |
---|
1790 | | -static struct irqaction mca_wkup_irqaction = { |
---|
1791 | | - .handler = ia64_mca_wakeup_int_handler, |
---|
1792 | | - .name = "mca_wkup" |
---|
1793 | | -}; |
---|
1794 | | - |
---|
1795 | | -#ifdef CONFIG_ACPI |
---|
1796 | | -static struct irqaction mca_cpe_irqaction = { |
---|
1797 | | - .handler = ia64_mca_cpe_int_handler, |
---|
1798 | | - .name = "cpe_hndlr" |
---|
1799 | | -}; |
---|
1800 | | - |
---|
1801 | | -static struct irqaction mca_cpep_irqaction = { |
---|
1802 | | - .handler = ia64_mca_cpe_int_caller, |
---|
1803 | | - .name = "cpe_poll" |
---|
1804 | | -}; |
---|
1805 | | -#endif /* CONFIG_ACPI */ |
---|
1806 | | - |
---|
1807 | 1770 | /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on |
---|
1808 | 1771 | * these stacks can never sleep, they cannot return from the kernel to user |
---|
1809 | 1772 | * space, they do not appear in a normal ps listing. So there is no need to |
---|
.. | .. |
---|
1835 | 1798 | /* Caller prevents this from being called after init */ |
---|
1836 | 1799 | static void * __ref mca_bootmem(void) |
---|
1837 | 1800 | { |
---|
1838 | | - return __alloc_bootmem(sizeof(struct ia64_mca_cpu), |
---|
1839 | | - KERNEL_STACK_SIZE, 0); |
---|
| 1801 | + return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE); |
---|
1840 | 1802 | } |
---|
1841 | 1803 | |
---|
1842 | 1804 | /* Do per-CPU MCA-related initialization. */ |
---|
.. | .. |
---|
2065 | 2027 | * Configure the CMCI/P vector and handler. Interrupts for CMC are |
---|
2066 | 2028 | * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). |
---|
2067 | 2029 | */ |
---|
2068 | | - register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); |
---|
2069 | | - register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); |
---|
| 2030 | + register_percpu_irq(IA64_CMC_VECTOR, ia64_mca_cmc_int_handler, 0, |
---|
| 2031 | + "cmc_hndlr"); |
---|
| 2032 | + register_percpu_irq(IA64_CMCP_VECTOR, ia64_mca_cmc_int_caller, 0, |
---|
| 2033 | + "cmc_poll"); |
---|
2070 | 2034 | ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ |
---|
2071 | 2035 | |
---|
2072 | 2036 | /* Setup the MCA rendezvous interrupt vector */ |
---|
2073 | | - register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); |
---|
| 2037 | + register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, ia64_mca_rendez_int_handler, |
---|
| 2038 | + 0, "mca_rdzv"); |
---|
2074 | 2039 | |
---|
2075 | 2040 | /* Setup the MCA wakeup interrupt vector */ |
---|
2076 | | - register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); |
---|
| 2041 | + register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, ia64_mca_wakeup_int_handler, |
---|
| 2042 | + 0, "mca_wkup"); |
---|
2077 | 2043 | |
---|
2078 | | -#ifdef CONFIG_ACPI |
---|
2079 | 2044 | /* Setup the CPEI/P handler */ |
---|
2080 | | - register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); |
---|
2081 | | -#endif |
---|
| 2045 | + register_percpu_irq(IA64_CPEP_VECTOR, ia64_mca_cpe_int_caller, 0, |
---|
| 2046 | + "cpe_poll"); |
---|
2082 | 2047 | } |
---|
2083 | 2048 | |
---|
2084 | 2049 | /* |
---|
.. | .. |
---|
2106 | 2071 | ia64_mca_cpu_online, NULL); |
---|
2107 | 2072 | IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__); |
---|
2108 | 2073 | |
---|
2109 | | -#ifdef CONFIG_ACPI |
---|
2110 | 2074 | /* Setup the CPEI/P vector and handler */ |
---|
2111 | 2075 | cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); |
---|
2112 | 2076 | timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0); |
---|
.. | .. |
---|
2120 | 2084 | if (irq > 0) { |
---|
2121 | 2085 | cpe_poll_enabled = 0; |
---|
2122 | 2086 | irq_set_status_flags(irq, IRQ_PER_CPU); |
---|
2123 | | - setup_irq(irq, &mca_cpe_irqaction); |
---|
| 2087 | + if (request_irq(irq, ia64_mca_cpe_int_handler, |
---|
| 2088 | + 0, "cpe_hndlr", NULL)) |
---|
| 2089 | + pr_err("Failed to register cpe_hndlr interrupt\n"); |
---|
2124 | 2090 | ia64_cpe_irq = irq; |
---|
2125 | 2091 | ia64_mca_register_cpev(cpe_vector); |
---|
2126 | 2092 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", |
---|
.. | .. |
---|
2137 | 2103 | IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__); |
---|
2138 | 2104 | } |
---|
2139 | 2105 | } |
---|
2140 | | -#endif |
---|
2141 | 2106 | |
---|
2142 | 2107 | return 0; |
---|
2143 | 2108 | } |
---|