.. | .. |
---|
8 | 8 | #include <linux/context_tracking.h> |
---|
9 | 9 | #include <linux/ptrace.h> |
---|
10 | 10 | #include <linux/thread_info.h> |
---|
| 11 | +#include <linux/irqstage.h> |
---|
11 | 12 | |
---|
12 | 13 | #include <asm/cpufeature.h> |
---|
13 | 14 | #include <asm/daifflags.h> |
---|
.. | .. |
---|
21 | 22 | * This is intended to match the logic in irqentry_enter(), handling the kernel |
---|
22 | 23 | * mode transitions only. |
---|
23 | 24 | */ |
---|
24 | | -static void noinstr enter_from_kernel_mode(struct pt_regs *regs) |
---|
| 25 | +static void noinstr __enter_from_kernel_mode(struct pt_regs *regs) |
---|
25 | 26 | { |
---|
26 | 27 | regs->exit_rcu = false; |
---|
27 | 28 | |
---|
.. | .. |
---|
41 | 42 | mte_check_tfsr_entry(); |
---|
42 | 43 | } |
---|
43 | 44 | |
---|
| 45 | +static void noinstr enter_from_kernel_mode(struct pt_regs *regs) |
---|
| 46 | +{ |
---|
| 47 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 48 | + /* |
---|
| 49 | + * CAUTION: we may switch in-band as a result of handling a |
---|
| 50 | + * trap, so if we are running out-of-band, we must make sure |
---|
| 51 | + * not to perform the RCU exit since we did not enter it in |
---|
| 52 | + * the first place. |
---|
| 53 | + */ |
---|
| 54 | + regs->oob_on_entry = running_oob(); |
---|
| 55 | + if (regs->oob_on_entry) { |
---|
| 56 | + regs->exit_rcu = false; |
---|
| 57 | + return; |
---|
| 58 | + } |
---|
| 59 | + |
---|
| 60 | + /* |
---|
| 61 | + * We trapped from kernel space running in-band, we need to |
---|
| 62 | + * record the virtual interrupt state into the current |
---|
| 63 | + * register frame (regs->stalled_on_entry) in order to |
---|
| 64 | + * reinstate it from exit_to_kernel_mode(). Next we stall the |
---|
| 65 | + * in-band stage in order to mirror the current hardware state |
---|
| 66 | + * (i.e. hardirqs are off). |
---|
| 67 | + */ |
---|
| 68 | + regs->stalled_on_entry = test_and_stall_inband_nocheck(); |
---|
| 69 | +#endif |
---|
| 70 | + |
---|
| 71 | + __enter_from_kernel_mode(regs); |
---|
| 72 | + |
---|
| 73 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 74 | + /* |
---|
| 75 | + * Our caller is going to inherit the hardware interrupt state |
---|
| 76 | + * from the trapped context once we have returned: if running |
---|
| 77 | + * in-band, align the stall bit on the upcoming state. |
---|
| 78 | + */ |
---|
| 79 | + if (running_inband() && interrupts_enabled(regs)) |
---|
| 80 | + unstall_inband_nocheck(); |
---|
| 81 | +#endif |
---|
| 82 | +} |
---|
| 83 | + |
---|
44 | 84 | /* |
---|
45 | 85 | * This is intended to match the logic in irqentry_exit(), handling the kernel |
---|
46 | 86 | * mode transitions only, and with preemption handled elsewhere. |
---|
47 | 87 | */ |
---|
48 | | -static void noinstr exit_to_kernel_mode(struct pt_regs *regs) |
---|
| 88 | +static void noinstr __exit_to_kernel_mode(struct pt_regs *regs) |
---|
49 | 89 | { |
---|
50 | 90 | lockdep_assert_irqs_disabled(); |
---|
51 | 91 | |
---|
.. | .. |
---|
67 | 107 | } |
---|
68 | 108 | } |
---|
69 | 109 | |
---|
| 110 | +/* |
---|
| 111 | + * This is intended to match the logic in irqentry_exit(), handling the kernel |
---|
| 112 | + * mode transitions only, and with preemption handled elsewhere. |
---|
| 113 | + */ |
---|
| 114 | +static void noinstr exit_to_kernel_mode(struct pt_regs *regs) |
---|
| 115 | +{ |
---|
| 116 | + if (running_oob()) |
---|
| 117 | + return; |
---|
| 118 | + |
---|
| 119 | + __exit_to_kernel_mode(regs); |
---|
| 120 | + |
---|
| 121 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 122 | + /* |
---|
| 123 | + * Reinstate the virtual interrupt state which was in effect |
---|
| 124 | + * on entry to the trap. |
---|
| 125 | + */ |
---|
| 126 | + if (!regs->oob_on_entry) { |
---|
| 127 | + if (regs->stalled_on_entry) |
---|
| 128 | + stall_inband_nocheck(); |
---|
| 129 | + else |
---|
| 130 | + unstall_inband_nocheck(); |
---|
| 131 | + } |
---|
| 132 | +#endif |
---|
| 133 | + return; |
---|
| 134 | +} |
---|
| 135 | + |
---|
70 | 136 | void noinstr arm64_enter_nmi(struct pt_regs *regs) |
---|
71 | 137 | { |
---|
| 138 | + /* irq_pipeline: running this code oob is ok. */ |
---|
72 | 139 | regs->lockdep_hardirqs = lockdep_hardirqs_enabled(); |
---|
73 | 140 | |
---|
74 | 141 | __nmi_enter(); |
---|
.. | .. |
---|
99 | 166 | |
---|
100 | 167 | asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) |
---|
101 | 168 | { |
---|
102 | | - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) |
---|
| 169 | + /* |
---|
| 170 | + * IRQ pipeline: the interrupt entry is special in that we may |
---|
| 171 | + * run the lockdep and RCU prologue/epilogue only if the IRQ |
---|
| 172 | + * is going to be dispatched to its handler on behalf of the |
---|
| 173 | + * current context, i.e. only if running in-band and |
---|
| 174 | + * unstalled. If so, we also have to reconcile the hardware |
---|
| 175 | + * and virtual interrupt states temporarily in order to run |
---|
| 176 | + * such prologue. |
---|
| 177 | + */ |
---|
| 178 | + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) { |
---|
103 | 179 | arm64_enter_nmi(regs); |
---|
104 | | - else |
---|
105 | | - enter_from_kernel_mode(regs); |
---|
| 180 | + } else { |
---|
| 181 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 182 | + if (running_inband()) { |
---|
| 183 | + regs->stalled_on_entry = test_inband_stall(); |
---|
| 184 | + if (!regs->stalled_on_entry) { |
---|
| 185 | + stall_inband_nocheck(); |
---|
| 186 | + __enter_from_kernel_mode(regs); |
---|
| 187 | + unstall_inband_nocheck(); |
---|
| 188 | + } |
---|
| 189 | + } |
---|
| 190 | +#else |
---|
| 191 | + __enter_from_kernel_mode(regs); |
---|
| 192 | +#endif |
---|
| 193 | + } |
---|
106 | 194 | } |
---|
107 | 195 | |
---|
108 | 196 | asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) |
---|
109 | 197 | { |
---|
110 | | - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) |
---|
| 198 | + if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) { |
---|
111 | 199 | arm64_exit_nmi(regs); |
---|
112 | | - else |
---|
113 | | - exit_to_kernel_mode(regs); |
---|
| 200 | + } else { |
---|
| 201 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 202 | + /* |
---|
| 203 | + * See enter_el1_irq_or_nmi() for details. UGLY: we |
---|
| 204 | + * also have to tell the tracer that irqs are off, |
---|
| 205 | + * since sync_current_irq_stage() did the opposite on |
---|
| 206 | + * exit. Hopefully, at some point arm64 will convert |
---|
| 207 | + * to the generic entry code which exhibits a less |
---|
| 208 | + * convoluted logic. |
---|
| 209 | + */ |
---|
| 210 | + if (running_inband() && !regs->stalled_on_entry) { |
---|
| 211 | + stall_inband_nocheck(); |
---|
| 212 | + trace_hardirqs_off(); |
---|
| 213 | + __exit_to_kernel_mode(regs); |
---|
| 214 | + unstall_inband_nocheck(); |
---|
| 215 | + } |
---|
| 216 | +#else |
---|
| 217 | + __exit_to_kernel_mode(regs); |
---|
| 218 | +#endif |
---|
| 219 | + } |
---|
114 | 220 | } |
---|
115 | 221 | |
---|
116 | 222 | static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) |
---|
.. | .. |
---|
231 | 337 | |
---|
232 | 338 | asmlinkage void noinstr enter_from_user_mode(void) |
---|
233 | 339 | { |
---|
234 | | - lockdep_hardirqs_off(CALLER_ADDR0); |
---|
235 | | - CT_WARN_ON(ct_state() != CONTEXT_USER); |
---|
236 | | - user_exit_irqoff(); |
---|
237 | | - trace_hardirqs_off_finish(); |
---|
| 340 | + if (running_inband()) { |
---|
| 341 | + lockdep_hardirqs_off(CALLER_ADDR0); |
---|
| 342 | + WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall()); |
---|
| 343 | + CT_WARN_ON(ct_state() != CONTEXT_USER); |
---|
| 344 | + stall_inband_nocheck(); |
---|
| 345 | + user_exit_irqoff(); |
---|
| 346 | + unstall_inband_nocheck(); |
---|
| 347 | + trace_hardirqs_off_finish(); |
---|
| 348 | + } |
---|
238 | 349 | } |
---|
239 | 350 | |
---|
240 | 351 | asmlinkage void noinstr exit_to_user_mode(void) |
---|
241 | 352 | { |
---|
242 | | - mte_check_tfsr_exit(); |
---|
| 353 | + if (running_inband()) { |
---|
| 354 | + trace_hardirqs_on_prepare(); |
---|
| 355 | + lockdep_hardirqs_on_prepare(CALLER_ADDR0); |
---|
| 356 | + user_enter_irqoff(); |
---|
| 357 | + lockdep_hardirqs_on(CALLER_ADDR0); |
---|
| 358 | + unstall_inband_nocheck(); |
---|
| 359 | + } |
---|
| 360 | +} |
---|
243 | 361 | |
---|
244 | | - trace_hardirqs_on_prepare(); |
---|
245 | | - lockdep_hardirqs_on_prepare(CALLER_ADDR0); |
---|
246 | | - user_enter_irqoff(); |
---|
247 | | - lockdep_hardirqs_on(CALLER_ADDR0); |
---|
| 362 | +asmlinkage void noinstr enter_el0_irq(void) |
---|
| 363 | +{ |
---|
| 364 | + if (running_inband() && !test_inband_stall()) |
---|
| 365 | + enter_from_user_mode(); |
---|
248 | 366 | } |
---|
249 | 367 | |
---|
250 | 368 | static void noinstr el0_da(struct pt_regs *regs, unsigned long esr) |
---|