.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2012 ARM Ltd. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License version 2 as |
---|
6 | | - * published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | | - * GNU General Public License for more details. |
---|
12 | | - * |
---|
13 | | - * You should have received a copy of the GNU General Public License |
---|
14 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
15 | 4 | */ |
---|
16 | 5 | #ifndef __ASM_HARDIRQ_H |
---|
17 | 6 | #define __ASM_HARDIRQ_H |
---|
.. | .. |
---|
24 | 13 | #include <asm/kvm_arm.h> |
---|
25 | 14 | #include <asm/sysreg.h> |
---|
26 | 15 | |
---|
27 | | -#define NR_IPI 7 |
---|
28 | | - |
---|
29 | 16 | typedef struct { |
---|
30 | 17 | unsigned int __softirq_pending; |
---|
31 | | - unsigned int ipi_irqs[NR_IPI]; |
---|
32 | 18 | } ____cacheline_aligned irq_cpustat_t; |
---|
33 | 19 | |
---|
34 | 20 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ |
---|
35 | | - |
---|
36 | | -#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ |
---|
37 | | -#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) |
---|
38 | | - |
---|
39 | | -u64 smp_irq_stat_cpu(unsigned int cpu); |
---|
40 | | -#define arch_irq_stat_cpu smp_irq_stat_cpu |
---|
41 | 21 | |
---|
42 | 22 | #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 |
---|
43 | 23 | |
---|
44 | 24 | struct nmi_ctx { |
---|
45 | 25 | u64 hcr; |
---|
| 26 | + unsigned int cnt; |
---|
46 | 27 | }; |
---|
47 | 28 | |
---|
48 | 29 | DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts); |
---|
49 | 30 | |
---|
50 | | -#define arch_nmi_enter() \ |
---|
51 | | - do { \ |
---|
52 | | - if (is_kernel_in_hyp_mode()) { \ |
---|
53 | | - struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ |
---|
54 | | - nmi_ctx->hcr = read_sysreg(hcr_el2); \ |
---|
55 | | - if (!(nmi_ctx->hcr & HCR_TGE)) { \ |
---|
56 | | - write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \ |
---|
57 | | - isb(); \ |
---|
58 | | - } \ |
---|
59 | | - } \ |
---|
60 | | - } while (0) |
---|
| 31 | +#define arch_nmi_enter() \ |
---|
| 32 | +do { \ |
---|
| 33 | + struct nmi_ctx *___ctx; \ |
---|
| 34 | + u64 ___hcr; \ |
---|
| 35 | + \ |
---|
| 36 | + if (!is_kernel_in_hyp_mode()) \ |
---|
| 37 | + break; \ |
---|
| 38 | + \ |
---|
| 39 | + ___ctx = this_cpu_ptr(&nmi_contexts); \ |
---|
| 40 | + if (___ctx->cnt) { \ |
---|
| 41 | + ___ctx->cnt++; \ |
---|
| 42 | + break; \ |
---|
| 43 | + } \ |
---|
| 44 | + \ |
---|
| 45 | + ___hcr = read_sysreg(hcr_el2); \ |
---|
| 46 | + if (!(___hcr & HCR_TGE)) { \ |
---|
| 47 | + write_sysreg(___hcr | HCR_TGE, hcr_el2); \ |
---|
| 48 | + isb(); \ |
---|
| 49 | + } \ |
---|
| 50 | + /* \ |
---|
| 51 | + * Make sure the sysreg write is performed before ___ctx->cnt \ |
---|
| 52 | + * is set to 1. NMIs that see cnt == 1 will rely on us. \ |
---|
| 53 | + */ \ |
---|
| 54 | + barrier(); \ |
---|
| 55 | + ___ctx->cnt = 1; \ |
---|
| 56 | + /* \ |
---|
| 57 | + * Make sure ___ctx->cnt is set before we save ___hcr. We \ |
---|
| 58 | + * don't want ___ctx->hcr to be overwritten. \ |
---|
| 59 | + */ \ |
---|
| 60 | + barrier(); \ |
---|
| 61 | + ___ctx->hcr = ___hcr; \ |
---|
| 62 | +} while (0) |
---|
61 | 63 | |
---|
62 | | -#define arch_nmi_exit() \ |
---|
63 | | - do { \ |
---|
64 | | - if (is_kernel_in_hyp_mode()) { \ |
---|
65 | | - struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ |
---|
66 | | - if (!(nmi_ctx->hcr & HCR_TGE)) \ |
---|
67 | | - write_sysreg(nmi_ctx->hcr, hcr_el2); \ |
---|
68 | | - } \ |
---|
69 | | - } while (0) |
---|
| 64 | +#define arch_nmi_exit() \ |
---|
| 65 | +do { \ |
---|
| 66 | + struct nmi_ctx *___ctx; \ |
---|
| 67 | + u64 ___hcr; \ |
---|
| 68 | + \ |
---|
| 69 | + if (!is_kernel_in_hyp_mode()) \ |
---|
| 70 | + break; \ |
---|
| 71 | + \ |
---|
| 72 | + ___ctx = this_cpu_ptr(&nmi_contexts); \ |
---|
| 73 | + ___hcr = ___ctx->hcr; \ |
---|
| 74 | + /* \ |
---|
| 75 | + * Make sure we read ___ctx->hcr before we release \ |
---|
| 76 | + * ___ctx->cnt as it makes ___ctx->hcr updatable again. \ |
---|
| 77 | + */ \ |
---|
| 78 | + barrier(); \ |
---|
| 79 | + ___ctx->cnt--; \ |
---|
| 80 | + /* \ |
---|
| 81 | + * Make sure ___ctx->cnt release is visible before we \ |
---|
| 82 | + * restore the sysreg. Otherwise a new NMI occurring \ |
---|
| 83 | + * right after write_sysreg() can be fooled and think \ |
---|
| 84 | + * we secured things for it. \ |
---|
| 85 | + */ \ |
---|
| 86 | + barrier(); \ |
---|
| 87 | + if (!___ctx->cnt && !(___hcr & HCR_TGE)) \ |
---|
| 88 | + write_sysreg(___hcr, hcr_el2); \ |
---|
| 89 | +} while (0) |
---|
70 | 90 | |
---|
71 | 91 | static inline void ack_bad_irq(unsigned int irq) |
---|
72 | 92 | { |
---|