hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm64/include/asm/irqflags.h
....@@ -10,6 +10,10 @@
1010 #include <asm/ptrace.h>
1111 #include <asm/sysreg.h>
1212
13
+#define IRQMASK_I_BIT PSR_I_BIT
14
+#define IRQMASK_I_POS 7
15
+#define IRQMASK_i_POS 31
16
+
1317 /*
1418 * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
1519 * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
....@@ -26,7 +30,7 @@
2630 /*
2731 * CPU interrupt mask handling.
2832 */
29
-static inline void arch_local_irq_enable(void)
33
+static inline void native_irq_enable(void)
3034 {
3135 if (system_has_prio_mask_debugging()) {
3236 u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
....@@ -35,7 +39,7 @@
3539 }
3640
3741 asm volatile(ALTERNATIVE(
38
- "msr daifclr, #2 // arch_local_irq_enable",
42
+ "msr daifclr, #2 // native_irq_enable",
3943 __msr_s(SYS_ICC_PMR_EL1, "%0"),
4044 ARM64_HAS_IRQ_PRIO_MASKING)
4145 :
....@@ -45,7 +49,7 @@
4549 pmr_sync();
4650 }
4751
48
-static inline void arch_local_irq_disable(void)
52
+static inline void native_irq_disable(void)
4953 {
5054 if (system_has_prio_mask_debugging()) {
5155 u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
....@@ -54,7 +58,7 @@
5458 }
5559
5660 asm volatile(ALTERNATIVE(
57
- "msr daifset, #2 // arch_local_irq_disable",
61
+ "msr daifset, #2 // native_irq_disable",
5862 __msr_s(SYS_ICC_PMR_EL1, "%0"),
5963 ARM64_HAS_IRQ_PRIO_MASKING)
6064 :
....@@ -62,10 +66,17 @@
6266 : "memory");
6367 }
6468
69
+static inline void native_irq_sync(void)
70
+{
71
+ native_irq_enable();
72
+ isb();
73
+ native_irq_disable();
74
+}
75
+
6576 /*
6677 * Save the current interrupt enable state.
6778 */
68
-static inline unsigned long arch_local_save_flags(void)
79
+static inline unsigned long native_save_flags(void)
6980 {
7081 unsigned long flags;
7182
....@@ -80,7 +91,7 @@
8091 return flags;
8192 }
8293
83
-static inline int arch_irqs_disabled_flags(unsigned long flags)
94
+static inline int native_irqs_disabled_flags(unsigned long flags)
8495 {
8596 int res;
8697
....@@ -95,23 +106,18 @@
95106 return res;
96107 }
97108
98
-static inline int arch_irqs_disabled(void)
99
-{
100
- return arch_irqs_disabled_flags(arch_local_save_flags());
101
-}
102
-
103
-static inline unsigned long arch_local_irq_save(void)
109
+static inline unsigned long native_irq_save(void)
104110 {
105111 unsigned long flags;
106112
107
- flags = arch_local_save_flags();
113
+ flags = native_save_flags();
108114
109115 /*
110116 * There are too many states with IRQs disabled, just keep the current
111117 * state if interrupts are already disabled/masked.
112118 */
113
- if (!arch_irqs_disabled_flags(flags))
114
- arch_local_irq_disable();
119
+ if (!native_irqs_disabled_flags(flags))
120
+ native_irq_disable();
115121
116122 return flags;
117123 }
....@@ -119,7 +125,7 @@
119125 /*
120126 * restore saved IRQ state
121127 */
122
-static inline void arch_local_irq_restore(unsigned long flags)
128
+static inline void native_irq_restore(unsigned long flags)
123129 {
124130 asm volatile(ALTERNATIVE(
125131 "msr daif, %0",
....@@ -132,4 +138,12 @@
132138 pmr_sync();
133139 }
134140
141
+static inline bool native_irqs_disabled(void)
142
+{
143
+ unsigned long flags = native_save_flags();
144
+ return native_irqs_disabled_flags(flags);
145
+}
146
+
147
+#include <asm/irq_pipeline.h>
148
+
135149 #endif /* __ASM_IRQFLAGS_H */