forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/arch/ia64/kernel/mca.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * File: mca.c
34 * Purpose: Generic MCA handling layer
....@@ -77,7 +78,7 @@
7778 #include <linux/sched/task.h>
7879 #include <linux/interrupt.h>
7980 #include <linux/irq.h>
80
-#include <linux/bootmem.h>
81
+#include <linux/memblock.h>
8182 #include <linux/acpi.h>
8283 #include <linux/timer.h>
8384 #include <linux/module.h>
....@@ -90,7 +91,6 @@
9091 #include <linux/gfp.h>
9192
9293 #include <asm/delay.h>
93
-#include <asm/machvec.h>
9494 #include <asm/meminit.h>
9595 #include <asm/page.h>
9696 #include <asm/ptrace.h>
....@@ -104,6 +104,7 @@
104104
105105 #include "mca_drv.h"
106106 #include "entry.h"
107
+#include "irq.h"
107108
108109 #if defined(IA64_MCA_DEBUG_INFO)
109110 # define IA64_MCA_DEBUG(fmt...) printk(fmt)
....@@ -148,9 +149,7 @@
148149 #define CPE_HISTORY_LENGTH 5
149150 #define CMC_HISTORY_LENGTH 5
150151
151
-#ifdef CONFIG_ACPI
152152 static struct timer_list cpe_poll_timer;
153
-#endif
154153 static struct timer_list cmc_poll_timer;
155154 /*
156155 * This variable tells whether we are currently in polling mode.
....@@ -359,11 +358,6 @@
359358
360359 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
361360
362
-#define IA64_LOG_ALLOCATE(it, size) \
363
- {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
364
- (ia64_err_rec_t *)alloc_bootmem(size); \
365
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
366
- (ia64_err_rec_t *)alloc_bootmem(size);}
367361 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
368362 #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
369363 #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
....@@ -377,6 +371,19 @@
377371 #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
378372 #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
379373 #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
374
+
375
+static inline void ia64_log_allocate(int it, u64 size)
376
+{
377
+ ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
378
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
379
+ if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
380
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
381
+
382
+ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
383
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
384
+ if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
385
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
386
+}
380387
381388 /*
382389 * ia64_log_init
....@@ -399,9 +406,7 @@
399406 return;
400407
401408 // set up OS data structures to hold error info
402
- IA64_LOG_ALLOCATE(sal_info_type, max_size);
403
- memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
404
- memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
409
+ ia64_log_allocate(sal_info_type, max_size);
405410 }
406411
407412 /*
....@@ -525,8 +530,6 @@
525530 }
526531 EXPORT_SYMBOL_GPL(mca_recover_range);
527532
528
-#ifdef CONFIG_ACPI
529
-
530533 int cpe_vector = -1;
531534 int ia64_cpe_irq = -1;
532535
....@@ -588,9 +591,6 @@
588591 return IRQ_HANDLED;
589592 }
590593
591
-#endif /* CONFIG_ACPI */
592
-
593
-#ifdef CONFIG_ACPI
594594 /*
595595 * ia64_mca_register_cpev
596596 *
....@@ -618,7 +618,6 @@
618618 IA64_MCA_DEBUG("%s: corrected platform error "
619619 "vector %#x registered\n", __func__, cpev);
620620 }
621
-#endif /* CONFIG_ACPI */
622621
623622 /*
624623 * ia64_mca_cmc_vector_setup
....@@ -737,7 +736,7 @@
737736 static void
738737 ia64_mca_wakeup(int cpu)
739738 {
740
- platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
739
+ ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
741740 }
742741
743742 /*
....@@ -1483,7 +1482,7 @@
14831482 cpuid = cpumask_next(cpuid+1, cpu_online_mask);
14841483
14851484 if (cpuid < nr_cpu_ids) {
1486
- platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1485
+ ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
14871486 } else {
14881487 /* If no log record, switch out of polling mode */
14891488 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
....@@ -1516,7 +1515,7 @@
15161515 ia64_mca_cmc_poll (struct timer_list *unused)
15171516 {
15181517 /* Trigger a CMC interrupt cascade */
1519
- platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
1518
+ ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
15201519 IA64_IPI_DM_INT, 0);
15211520 }
15221521
....@@ -1533,8 +1532,6 @@
15331532 * Outputs
15341533 * handled
15351534 */
1536
-#ifdef CONFIG_ACPI
1537
-
15381535 static irqreturn_t
15391536 ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
15401537 {
....@@ -1553,7 +1550,7 @@
15531550 cpuid = cpumask_next(cpuid+1, cpu_online_mask);
15541551
15551552 if (cpuid < NR_CPUS) {
1556
- platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1553
+ ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
15571554 } else {
15581555 /*
15591556 * If a log was recorded, increase our polling frequency,
....@@ -1593,11 +1590,9 @@
15931590 ia64_mca_cpe_poll (struct timer_list *unused)
15941591 {
15951592 /* Trigger a CPE interrupt cascade */
1596
- platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
1593
+ ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
15971594 IA64_IPI_DM_INT, 0);
15981595 }
1599
-
1600
-#endif /* CONFIG_ACPI */
16011596
16021597 static int
16031598 default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
....@@ -1636,7 +1631,7 @@
16361631 if (read_trylock(&tasklist_lock)) {
16371632 do_each_thread (g, t) {
16381633 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
1639
- show_stack(t, NULL);
1634
+ show_stack(t, NULL, KERN_DEFAULT);
16401635 } while_each_thread (g, t);
16411636 read_unlock(&tasklist_lock);
16421637 }
....@@ -1772,38 +1767,6 @@
17721767
17731768 __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
17741769
1775
-static struct irqaction cmci_irqaction = {
1776
- .handler = ia64_mca_cmc_int_handler,
1777
- .name = "cmc_hndlr"
1778
-};
1779
-
1780
-static struct irqaction cmcp_irqaction = {
1781
- .handler = ia64_mca_cmc_int_caller,
1782
- .name = "cmc_poll"
1783
-};
1784
-
1785
-static struct irqaction mca_rdzv_irqaction = {
1786
- .handler = ia64_mca_rendez_int_handler,
1787
- .name = "mca_rdzv"
1788
-};
1789
-
1790
-static struct irqaction mca_wkup_irqaction = {
1791
- .handler = ia64_mca_wakeup_int_handler,
1792
- .name = "mca_wkup"
1793
-};
1794
-
1795
-#ifdef CONFIG_ACPI
1796
-static struct irqaction mca_cpe_irqaction = {
1797
- .handler = ia64_mca_cpe_int_handler,
1798
- .name = "cpe_hndlr"
1799
-};
1800
-
1801
-static struct irqaction mca_cpep_irqaction = {
1802
- .handler = ia64_mca_cpe_int_caller,
1803
- .name = "cpe_poll"
1804
-};
1805
-#endif /* CONFIG_ACPI */
1806
-
18071770 /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
18081771 * these stacks can never sleep, they cannot return from the kernel to user
18091772 * space, they do not appear in a normal ps listing. So there is no need to
....@@ -1835,8 +1798,7 @@
18351798 /* Caller prevents this from being called after init */
18361799 static void * __ref mca_bootmem(void)
18371800 {
1838
- return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
1839
- KERNEL_STACK_SIZE, 0);
1801
+ return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
18401802 }
18411803
18421804 /* Do per-CPU MCA-related initialization. */
....@@ -2065,20 +2027,23 @@
20652027 * Configure the CMCI/P vector and handler. Interrupts for CMC are
20662028 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
20672029 */
2068
- register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2069
- register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2030
+ register_percpu_irq(IA64_CMC_VECTOR, ia64_mca_cmc_int_handler, 0,
2031
+ "cmc_hndlr");
2032
+ register_percpu_irq(IA64_CMCP_VECTOR, ia64_mca_cmc_int_caller, 0,
2033
+ "cmc_poll");
20702034 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
20712035
20722036 /* Setup the MCA rendezvous interrupt vector */
2073
- register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2037
+ register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, ia64_mca_rendez_int_handler,
2038
+ 0, "mca_rdzv");
20742039
20752040 /* Setup the MCA wakeup interrupt vector */
2076
- register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2041
+ register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, ia64_mca_wakeup_int_handler,
2042
+ 0, "mca_wkup");
20772043
2078
-#ifdef CONFIG_ACPI
20792044 /* Setup the CPEI/P handler */
2080
- register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2081
-#endif
2045
+ register_percpu_irq(IA64_CPEP_VECTOR, ia64_mca_cpe_int_caller, 0,
2046
+ "cpe_poll");
20822047 }
20832048
20842049 /*
....@@ -2106,7 +2071,6 @@
21062071 ia64_mca_cpu_online, NULL);
21072072 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
21082073
2109
-#ifdef CONFIG_ACPI
21102074 /* Setup the CPEI/P vector and handler */
21112075 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
21122076 timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0);
....@@ -2120,7 +2084,9 @@
21202084 if (irq > 0) {
21212085 cpe_poll_enabled = 0;
21222086 irq_set_status_flags(irq, IRQ_PER_CPU);
2123
- setup_irq(irq, &mca_cpe_irqaction);
2087
+ if (request_irq(irq, ia64_mca_cpe_int_handler,
2088
+ 0, "cpe_hndlr", NULL))
2089
+ pr_err("Failed to register cpe_hndlr interrupt\n");
21242090 ia64_cpe_irq = irq;
21252091 ia64_mca_register_cpev(cpe_vector);
21262092 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
....@@ -2137,7 +2103,6 @@
21372103 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
21382104 }
21392105 }
2140
-#endif
21412106
21422107 return 0;
21432108 }