forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/arm64/include/asm/hardirq.h
....@@ -1,17 +1,6 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2012 ARM Ltd.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
7
- *
8
- * This program is distributed in the hope that it will be useful,
9
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
- * GNU General Public License for more details.
12
- *
13
- * You should have received a copy of the GNU General Public License
14
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
154 */
165 #ifndef __ASM_HARDIRQ_H
176 #define __ASM_HARDIRQ_H
....@@ -24,49 +13,77 @@
2413 #include <asm/kvm_arm.h>
2514 #include <asm/sysreg.h>
2615
27
-#define NR_IPI 7
28
-
29
-typedef struct {
30
- unsigned int __softirq_pending;
31
- unsigned int ipi_irqs[NR_IPI];
32
-} ____cacheline_aligned irq_cpustat_t;
33
-
34
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
35
-
36
-#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
37
-#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
38
-
39
-u64 smp_irq_stat_cpu(unsigned int cpu);
40
-#define arch_irq_stat_cpu smp_irq_stat_cpu
16
+#define ack_bad_irq ack_bad_irq
17
+#include <asm-generic/hardirq.h>
4118
4219 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
4320
4421 struct nmi_ctx {
4522 u64 hcr;
23
+ unsigned int cnt;
4624 };
4725
4826 DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
4927
50
-#define arch_nmi_enter() \
51
- do { \
52
- if (is_kernel_in_hyp_mode()) { \
53
- struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
54
- nmi_ctx->hcr = read_sysreg(hcr_el2); \
55
- if (!(nmi_ctx->hcr & HCR_TGE)) { \
56
- write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
57
- isb(); \
58
- } \
59
- } \
60
- } while (0)
28
+#define arch_nmi_enter() \
29
+do { \
30
+ struct nmi_ctx *___ctx; \
31
+ u64 ___hcr; \
32
+ \
33
+ if (!is_kernel_in_hyp_mode()) \
34
+ break; \
35
+ \
36
+ ___ctx = this_cpu_ptr(&nmi_contexts); \
37
+ if (___ctx->cnt) { \
38
+ ___ctx->cnt++; \
39
+ break; \
40
+ } \
41
+ \
42
+ ___hcr = read_sysreg(hcr_el2); \
43
+ if (!(___hcr & HCR_TGE)) { \
44
+ write_sysreg(___hcr | HCR_TGE, hcr_el2); \
45
+ isb(); \
46
+ } \
47
+ /* \
48
+ * Make sure the sysreg write is performed before ___ctx->cnt \
49
+ * is set to 1. NMIs that see cnt == 1 will rely on us. \
50
+ */ \
51
+ barrier(); \
52
+ ___ctx->cnt = 1; \
53
+ /* \
54
+ * Make sure ___ctx->cnt is set before we save ___hcr. We \
55
+ * don't want ___ctx->hcr to be overwritten. \
56
+ */ \
57
+ barrier(); \
58
+ ___ctx->hcr = ___hcr; \
59
+} while (0)
6160
62
-#define arch_nmi_exit() \
63
- do { \
64
- if (is_kernel_in_hyp_mode()) { \
65
- struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
66
- if (!(nmi_ctx->hcr & HCR_TGE)) \
67
- write_sysreg(nmi_ctx->hcr, hcr_el2); \
68
- } \
69
- } while (0)
61
+#define arch_nmi_exit() \
62
+do { \
63
+ struct nmi_ctx *___ctx; \
64
+ u64 ___hcr; \
65
+ \
66
+ if (!is_kernel_in_hyp_mode()) \
67
+ break; \
68
+ \
69
+ ___ctx = this_cpu_ptr(&nmi_contexts); \
70
+ ___hcr = ___ctx->hcr; \
71
+ /* \
72
+ * Make sure we read ___ctx->hcr before we release \
73
+ * ___ctx->cnt as it makes ___ctx->hcr updatable again. \
74
+ */ \
75
+ barrier(); \
76
+ ___ctx->cnt--; \
77
+ /* \
78
+ * Make sure ___ctx->cnt release is visible before we \
79
+ * restore the sysreg. Otherwise a new NMI occurring \
80
+ * right after write_sysreg() can be fooled and think \
81
+ * we secured things for it. \
82
+ */ \
83
+ barrier(); \
84
+ if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
85
+ write_sysreg(___hcr, hcr_el2); \
86
+} while (0)
7087
7188 static inline void ack_bad_irq(unsigned int irq)
7289 {