hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm64/kernel/entry-common.c
....@@ -8,6 +8,7 @@
88 #include <linux/context_tracking.h>
99 #include <linux/ptrace.h>
1010 #include <linux/thread_info.h>
11
+#include <linux/irqstage.h>
1112
1213 #include <asm/cpufeature.h>
1314 #include <asm/daifflags.h>
....@@ -21,7 +22,7 @@
2122 * This is intended to match the logic in irqentry_enter(), handling the kernel
2223 * mode transitions only.
2324 */
24
-static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25
+static void noinstr __enter_from_kernel_mode(struct pt_regs *regs)
2526 {
2627 regs->exit_rcu = false;
2728
....@@ -41,11 +42,50 @@
4142 mte_check_tfsr_entry();
4243 }
4344
45
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
46
+{
47
+#ifdef CONFIG_IRQ_PIPELINE
48
+ /*
49
+ * CAUTION: we may switch in-band as a result of handling a
50
+ * trap, so if we are running out-of-band, we must make sure
51
+ * not to perform the RCU exit since we did not enter it in
52
+ * the first place.
53
+ */
54
+ regs->oob_on_entry = running_oob();
55
+ if (regs->oob_on_entry) {
56
+ regs->exit_rcu = false;
57
+ return;
58
+ }
59
+
60
+ /*
61
+ * We trapped from kernel space running in-band, we need to
62
+ * record the virtual interrupt state into the current
63
+ * register frame (regs->stalled_on_entry) in order to
64
+ * reinstate it from exit_to_kernel_mode(). Next we stall the
65
+ * in-band stage in order to mirror the current hardware state
66
+ * (i.e. hardirqs are off).
67
+ */
68
+ regs->stalled_on_entry = test_and_stall_inband_nocheck();
69
+#endif
70
+
71
+ __enter_from_kernel_mode(regs);
72
+
73
+#ifdef CONFIG_IRQ_PIPELINE
74
+ /*
75
+ * Our caller is going to inherit the hardware interrupt state
76
+ * from the trapped context once we have returned: if running
77
+ * in-band, align the stall bit on the upcoming state.
78
+ */
79
+ if (running_inband() && interrupts_enabled(regs))
80
+ unstall_inband_nocheck();
81
+#endif
82
+}
83
+
4484 /*
4585 * This is intended to match the logic in irqentry_exit(), handling the kernel
4686 * mode transitions only, and with preemption handled elsewhere.
4787 */
48
-static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
88
+static void noinstr __exit_to_kernel_mode(struct pt_regs *regs)
4989 {
5090 lockdep_assert_irqs_disabled();
5191
....@@ -67,8 +107,35 @@
67107 }
68108 }
69109
110
+/*
111
+ * This is intended to match the logic in irqentry_exit(), handling the kernel
112
+ * mode transitions only, and with preemption handled elsewhere.
113
+ */
114
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
115
+{
116
+ if (running_oob())
117
+ return;
118
+
119
+ __exit_to_kernel_mode(regs);
120
+
121
+#ifdef CONFIG_IRQ_PIPELINE
122
+ /*
123
+ * Reinstate the virtual interrupt state which was in effect
124
+ * on entry to the trap.
125
+ */
126
+ if (!regs->oob_on_entry) {
127
+ if (regs->stalled_on_entry)
128
+ stall_inband_nocheck();
129
+ else
130
+ unstall_inband_nocheck();
131
+ }
132
+#endif
133
+ return;
134
+}
135
+
70136 void noinstr arm64_enter_nmi(struct pt_regs *regs)
71137 {
138
+ /* irq_pipeline: running this code oob is ok. */
72139 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
73140
74141 __nmi_enter();
....@@ -99,18 +166,57 @@
99166
100167 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
101168 {
102
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
169
+ /*
170
+ * IRQ pipeline: the interrupt entry is special in that we may
171
+ * run the lockdep and RCU prologue/epilogue only if the IRQ
172
+ * is going to be dispatched to its handler on behalf of the
173
+ * current context, i.e. only if running in-band and
174
+ * unstalled. If so, we also have to reconcile the hardware
175
+ * and virtual interrupt states temporarily in order to run
176
+ * such prologue.
177
+ */
178
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
103179 arm64_enter_nmi(regs);
104
- else
105
- enter_from_kernel_mode(regs);
180
+ } else {
181
+#ifdef CONFIG_IRQ_PIPELINE
182
+ if (running_inband()) {
183
+ regs->stalled_on_entry = test_inband_stall();
184
+ if (!regs->stalled_on_entry) {
185
+ stall_inband_nocheck();
186
+ __enter_from_kernel_mode(regs);
187
+ unstall_inband_nocheck();
188
+ }
189
+ }
190
+#else
191
+ __enter_from_kernel_mode(regs);
192
+#endif
193
+ }
106194 }
107195
108196 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
109197 {
110
- if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
198
+ if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
111199 arm64_exit_nmi(regs);
112
- else
113
- exit_to_kernel_mode(regs);
200
+ } else {
201
+#ifdef CONFIG_IRQ_PIPELINE
202
+ /*
203
+ * See enter_el1_irq_or_nmi() for details. UGLY: we
204
+ * also have to tell the tracer that irqs are off,
205
+ * since sync_current_irq_stage() did the opposite on
206
+ * exit. Hopefully, at some point arm64 will convert
207
+ * to the generic entry code which exhibits a less
208
+ * convoluted logic.
209
+ */
210
+ if (running_inband() && !regs->stalled_on_entry) {
211
+ stall_inband_nocheck();
212
+ trace_hardirqs_off();
213
+ __exit_to_kernel_mode(regs);
214
+ unstall_inband_nocheck();
215
+ }
216
+#else
217
+ __exit_to_kernel_mode(regs);
218
+#endif
219
+ }
114220 }
115221
116222 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
....@@ -231,20 +337,32 @@
231337
232338 asmlinkage void noinstr enter_from_user_mode(void)
233339 {
234
- lockdep_hardirqs_off(CALLER_ADDR0);
235
- CT_WARN_ON(ct_state() != CONTEXT_USER);
236
- user_exit_irqoff();
237
- trace_hardirqs_off_finish();
340
+ if (running_inband()) {
341
+ lockdep_hardirqs_off(CALLER_ADDR0);
342
+ WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
343
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
344
+ stall_inband_nocheck();
345
+ user_exit_irqoff();
346
+ unstall_inband_nocheck();
347
+ trace_hardirqs_off_finish();
348
+ }
238349 }
239350
240351 asmlinkage void noinstr exit_to_user_mode(void)
241352 {
242
- mte_check_tfsr_exit();
353
+ if (running_inband()) {
354
+ trace_hardirqs_on_prepare();
355
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
356
+ user_enter_irqoff();
357
+ lockdep_hardirqs_on(CALLER_ADDR0);
358
+ unstall_inband_nocheck();
359
+ }
360
+}
243361
244
- trace_hardirqs_on_prepare();
245
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
246
- user_enter_irqoff();
247
- lockdep_hardirqs_on(CALLER_ADDR0);
362
+asmlinkage void noinstr enter_el0_irq(void)
363
+{
364
+ if (running_inband() && !test_inband_stall())
365
+ enter_from_user_mode();
248366 }
249367
250368 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)