forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 10ebd8556b7990499c896a550e3d416b444211e6
kernel/arch/arm64/kernel/entry.S
....@@ -1,21 +1,10 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Low-level exception handling code
34 *
45 * Copyright (C) 2012 ARM Ltd.
56 * Authors: Catalin Marinas <catalin.marinas@arm.com>
67 * Will Deacon <will.deacon@arm.com>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
11
- *
12
- * This program is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
- * GNU General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
198 */
209
2110 #include <linux/arm-smccc.h>
....@@ -25,6 +14,8 @@
2514 #include <asm/alternative.h>
2615 #include <asm/assembler.h>
2716 #include <asm/asm-offsets.h>
17
+#include <asm/asm_pointer_auth.h>
18
+#include <asm/bug.h>
2819 #include <asm/cpufeature.h>
2920 #include <asm/errno.h>
3021 #include <asm/esr.h>
....@@ -33,23 +24,24 @@
3324 #include <asm/mmu.h>
3425 #include <asm/processor.h>
3526 #include <asm/ptrace.h>
27
+#include <asm/scs.h>
3628 #include <asm/thread_info.h>
3729 #include <asm/asm-uaccess.h>
3830 #include <asm/unistd.h>
3931
4032 /*
41
- * Context tracking subsystem. Used to instrument transitions
42
- * between user and kernel mode.
33
+ * Context tracking and irqflag tracing need to instrument transitions between
34
+ * user and kernel mode.
4335 */
44
- .macro ct_user_exit
45
-#ifdef CONFIG_CONTEXT_TRACKING
46
- bl context_tracking_user_exit
36
+ .macro user_exit_irqoff
37
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38
+ bl enter_from_user_mode
4739 #endif
4840 .endm
4941
50
- .macro ct_user_enter
51
-#ifdef CONFIG_CONTEXT_TRACKING
52
- bl context_tracking_user_enter
42
+ .macro user_enter_irqoff
43
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
44
+ bl exit_to_user_mode
5345 #endif
5446 .endm
5547
....@@ -70,24 +62,28 @@
7062
7163 .macro kernel_ventry, el, label, regsize = 64
7264 .align 7
73
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
74
-alternative_if ARM64_UNMAP_KERNEL_AT_EL0
65
+.Lventry_start\@:
7566 .if \el == 0
67
+ /*
68
+ * This must be the first instruction of the EL0 vector entries. It is
69
+ * skipped by the trampoline vectors, to trigger the cleanup.
70
+ */
71
+ b .Lskip_tramp_vectors_cleanup\@
7672 .if \regsize == 64
7773 mrs x30, tpidrro_el0
7874 msr tpidrro_el0, xzr
7975 .else
8076 mov x30, xzr
8177 .endif
78
+.Lskip_tramp_vectors_cleanup\@:
8279 .endif
83
-alternative_else_nop_endif
84
-#endif
8580
8681 sub sp, sp, #S_FRAME_SIZE
8782 #ifdef CONFIG_VMAP_STACK
8883 /*
8984 * Test whether the SP has overflowed, without corrupting a GPR.
90
- * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
85
+ * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
86
+ * should always be zero.
9187 */
9288 add sp, sp, x0 // sp' = sp + x0
9389 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
....@@ -127,19 +123,24 @@
127123 mrs x0, tpidrro_el0
128124 #endif
129125 b el\()\el\()_\label
126
+.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
130127 .endm
131128
132
- .macro tramp_alias, dst, sym
129
+ .macro tramp_alias, dst, sym, tmp
133130 mov_q \dst, TRAMP_VALIAS
134
- add \dst, \dst, #(\sym - .entry.tramp.text)
131
+ adr_l \tmp, \sym
132
+ add \dst, \dst, \tmp
133
+ adr_l \tmp, .entry.tramp.text
134
+ sub \dst, \dst, \tmp
135135 .endm
136136
137
- // This macro corrupts x0-x3. It is the caller's duty
138
- // to save/restore them if required.
137
+ /*
138
+ * This macro corrupts x0-x3. It is the caller's duty to save/restore
139
+ * them if required.
140
+ */
139141 .macro apply_ssbd, state, tmp1, tmp2
140
-#ifdef CONFIG_ARM64_SSBD
141
-alternative_cb arm64_enable_wa2_handling
142
- b .L__asm_ssbd_skip\@
142
+alternative_cb spectre_v4_patch_fw_mitigation_enable
143
+ b .L__asm_ssbd_skip\@ // Patched to NOP
143144 alternative_cb_end
144145 ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
145146 cbz \tmp2, .L__asm_ssbd_skip\@
....@@ -147,10 +148,76 @@
147148 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
148149 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
149150 mov w1, #\state
150
-alternative_cb arm64_update_smccc_conduit
151
+alternative_cb smccc_patch_fw_mitigation_conduit
151152 nop // Patched to SMC/HVC #0
152153 alternative_cb_end
153154 .L__asm_ssbd_skip\@:
155
+ .endm
156
+
157
+ /* Check for MTE asynchronous tag check faults */
158
+ .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
159
+#ifdef CONFIG_ARM64_MTE
160
+ .arch_extension lse
161
+alternative_if_not ARM64_MTE
162
+ b 1f
163
+alternative_else_nop_endif
164
+ /*
165
+ * Asynchronous tag check faults are only possible in ASYNC (2) or
166
+ * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
167
+ * set, so skip the check if it is unset.
168
+ */
169
+ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
170
+ mrs_s \tmp, SYS_TFSRE0_EL1
171
+ tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
172
+ /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
173
+ mov \tmp, #_TIF_MTE_ASYNC_FAULT
174
+ add \ti_flags, tsk, #TSK_TI_FLAGS
175
+ stset \tmp, [\ti_flags]
176
+1:
177
+#endif
178
+ .endm
179
+
180
+ /* Clear the MTE asynchronous tag check faults */
181
+ .macro clear_mte_async_tcf thread_sctlr
182
+#ifdef CONFIG_ARM64_MTE
183
+alternative_if ARM64_MTE
184
+ /* See comment in check_mte_async_tcf above. */
185
+ tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
186
+ dsb ish
187
+ msr_s SYS_TFSRE0_EL1, xzr
188
+1:
189
+alternative_else_nop_endif
190
+#endif
191
+ .endm
192
+
193
+ .macro mte_set_gcr, mte_ctrl, tmp
194
+#ifdef CONFIG_ARM64_MTE
195
+ ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
196
+ orr \tmp, \tmp, #SYS_GCR_EL1_RRND
197
+ msr_s SYS_GCR_EL1, \tmp
198
+#endif
199
+ .endm
200
+
201
+ .macro mte_set_kernel_gcr, tmp, tmp2
202
+#ifdef CONFIG_KASAN_HW_TAGS
203
+alternative_cb kasan_hw_tags_enable
204
+ b 1f
205
+alternative_cb_end
206
+ mov \tmp, KERNEL_GCR_EL1
207
+ msr_s SYS_GCR_EL1, \tmp
208
+1:
209
+#endif
210
+ .endm
211
+
212
+ .macro mte_set_user_gcr, tsk, tmp, tmp2
213
+#ifdef CONFIG_KASAN_HW_TAGS
214
+alternative_cb kasan_hw_tags_enable
215
+ b 1f
216
+alternative_cb_end
217
+ ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
218
+
219
+ mte_set_gcr \tmp, \tmp2
220
+1:
154221 #endif
155222 .endm
156223
....@@ -177,19 +244,63 @@
177244 .if \el == 0
178245 clear_gp_regs
179246 mrs x21, sp_el0
180
- ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
181
- ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
182
- disable_step_tsk x19, x20 // exceptions when scheduling.
247
+ ldr_this_cpu tsk, __entry_task, x20
248
+ msr sp_el0, tsk
249
+
250
+ /*
251
+ * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
252
+ * when scheduling.
253
+ */
254
+ ldr x19, [tsk, #TSK_TI_FLAGS]
255
+ disable_step_tsk x19, x20
256
+
257
+ /* Check for asynchronous tag check faults in user space */
258
+ ldr x0, [tsk, THREAD_SCTLR_USER]
259
+ check_mte_async_tcf x22, x23, x0
260
+
261
+#ifdef CONFIG_ARM64_PTR_AUTH
262
+alternative_if ARM64_HAS_ADDRESS_AUTH
263
+ /*
264
+ * Enable IA for in-kernel PAC if the task had it disabled. Although
265
+ * this could be implemented with an unconditional MRS which would avoid
266
+ * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
267
+ *
268
+ * Install the kernel IA key only if IA was enabled in the task. If IA
269
+ * was disabled on kernel exit then we would have left the kernel IA
270
+ * installed so there is no need to install it again.
271
+ */
272
+ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
273
+ __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
274
+ b 2f
275
+1:
276
+ mrs x0, sctlr_el1
277
+ orr x0, x0, SCTLR_ELx_ENIA
278
+ msr sctlr_el1, x0
279
+2:
280
+alternative_else_nop_endif
281
+#endif
183282
184283 apply_ssbd 1, x22, x23
185284
186
-#ifdef CONFIG_SHADOW_CALL_STACK
187
- ldr x18, [tsk, #TSK_TI_SCS] // Restore shadow call stack
188
- str xzr, [tsk, #TSK_TI_SCS] // Limit visibility of saved SCS
189
-#endif
285
+ mte_set_kernel_gcr x22, x23
286
+
287
+ /*
288
+ * Any non-self-synchronizing system register updates required for
289
+ * kernel entry should be placed before this point.
290
+ */
291
+alternative_if ARM64_MTE
292
+ isb
293
+ b 1f
294
+alternative_else_nop_endif
295
+alternative_if ARM64_HAS_ADDRESS_AUTH
296
+ isb
297
+alternative_else_nop_endif
298
+1:
299
+
300
+ scs_load_current
190301 .else
191302 add x21, sp, #S_FRAME_SIZE
192
- get_thread_info tsk
303
+ get_current_task tsk
193304 /* Save the task's original addr_limit and set USER_DS */
194305 ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
195306 str x20, [sp, #S_ORIG_ADDR_LIMIT]
....@@ -214,28 +325,9 @@
214325 add x29, sp, #S_STACKFRAME
215326
216327 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
217
- /*
218
- * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
219
- * EL0, there is no need to check the state of TTBR0_EL1 since
220
- * accesses are always enabled.
221
- * Note that the meaning of this bit differs from the ARMv8.1 PAN
222
- * feature as all TTBR0_EL1 accesses are disabled, not just those to
223
- * user mappings.
224
- */
225
-alternative_if ARM64_HAS_PAN
226
- b 1f // skip TTBR0 PAN
328
+alternative_if_not ARM64_HAS_PAN
329
+ bl __swpan_entry_el\el
227330 alternative_else_nop_endif
228
-
229
- .if \el != 0
230
- mrs x21, ttbr0_el1
231
- tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
232
- orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
233
- b.eq 1f // TTBR0 access already disabled
234
- and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
235
- .endif
236
-
237
- __uaccess_ttbr0_disable x21
238
-1:
239331 #endif
240332
241333 stp x22, x23, [sp, #S_PC]
....@@ -246,16 +338,25 @@
246338 str w21, [sp, #S_SYSCALLNO]
247339 .endif
248340
249
- /*
250
- * Set sp_el0 to current thread_info.
251
- */
252
- .if \el == 0
253
- msr sp_el0, tsk
254
- .endif
341
+ /* Save pmr */
342
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
343
+ mrs_s x20, SYS_ICC_PMR_EL1
344
+ str x20, [sp, #S_PMR_SAVE]
345
+ mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
346
+ msr_s SYS_ICC_PMR_EL1, x20
347
+alternative_else_nop_endif
348
+
349
+ /* Re-enable tag checking (TCO set on exception entry) */
350
+#ifdef CONFIG_ARM64_MTE
351
+alternative_if ARM64_MTE
352
+ SET_PSTATE_TCO(0)
353
+alternative_else_nop_endif
354
+#endif
255355
256356 /*
257357 * Registers that may be useful after this macro is invoked:
258358 *
359
+ * x20 - ICC_PMR_EL1
259360 * x21 - aborted SP
260361 * x22 - aborted PC
261362 * x23 - aborted PSTATE
....@@ -273,46 +374,22 @@
273374 /* No need to restore UAO, it will be restored from SPSR_EL1 */
274375 .endif
275376
276
- ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
277
- .if \el == 0
278
- ct_user_enter
279
- .endif
280
-
281
-#ifdef CONFIG_SHADOW_CALL_STACK
282
- .if \el == 0
283
- str x18, [tsk, #TSK_TI_SCS] // Save shadow call stack
284
- .endif
285
-#endif
286
-
287
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
288
- /*
289
- * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
290
- * PAN bit checking.
291
- */
292
-alternative_if ARM64_HAS_PAN
293
- b 2f // skip TTBR0 PAN
377
+ /* Restore pmr */
378
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
379
+ ldr x20, [sp, #S_PMR_SAVE]
380
+ msr_s SYS_ICC_PMR_EL1, x20
381
+ mrs_s x21, SYS_ICC_CTLR_EL1
382
+ tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
383
+ dsb sy // Ensure priority change is seen by redistributor
384
+.L__skip_pmr_sync\@:
294385 alternative_else_nop_endif
295386
296
- .if \el != 0
297
- tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
298
- .endif
387
+ ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
299388
300
- __uaccess_ttbr0_enable x0, x1
301
-
302
- .if \el == 0
303
- /*
304
- * Enable errata workarounds only if returning to user. The only
305
- * workaround currently required for TTBR0_EL1 changes are for the
306
- * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
307
- * corruption).
308
- */
309
- bl post_ttbr_update_workaround
310
- .endif
311
-1:
312
- .if \el != 0
313
- and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
314
- .endif
315
-2:
389
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
390
+alternative_if_not ARM64_HAS_PAN
391
+ bl __swpan_exit_el\el
392
+alternative_else_nop_endif
316393 #endif
317394
318395 .if \el == 0
....@@ -332,6 +409,34 @@
332409 alternative_else_nop_endif
333410 #endif
334411 3:
412
+ scs_save tsk, x0
413
+
414
+ /* Ignore asynchronous tag check faults in the uaccess routines */
415
+ ldr x0, [tsk, THREAD_SCTLR_USER]
416
+ clear_mte_async_tcf x0
417
+
418
+#ifdef CONFIG_ARM64_PTR_AUTH
419
+alternative_if ARM64_HAS_ADDRESS_AUTH
420
+ /*
421
+ * IA was enabled for in-kernel PAC. Disable it now if needed, or
422
+ * alternatively install the user's IA. All other per-task keys and
423
+ * SCTLR bits were updated on task switch.
424
+ *
425
+ * No kernel C function calls after this.
426
+ */
427
+ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
428
+ __ptrauth_keys_install_user tsk, x0, x1, x2
429
+ b 2f
430
+1:
431
+ mrs x0, sctlr_el1
432
+ bic x0, x0, SCTLR_ELx_ENIA
433
+ msr sctlr_el1, x0
434
+2:
435
+alternative_else_nop_endif
436
+#endif
437
+
438
+ mte_set_user_gcr tsk, x0, x1
439
+
335440 apply_ssbd 0, x0, x1
336441 .endif
337442
....@@ -352,34 +457,81 @@
352457 ldp x24, x25, [sp, #16 * 12]
353458 ldp x26, x27, [sp, #16 * 13]
354459 ldp x28, x29, [sp, #16 * 14]
355
- ldr lr, [sp, #S_LR]
356
- add sp, sp, #S_FRAME_SIZE // restore sp
357
- /*
358
- * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
359
- * when returning from IPI handler, and when returning to user-space.
360
- */
361460
362461 .if \el == 0
363
-alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
462
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
463
+ ldr lr, [sp, #S_LR]
464
+ add sp, sp, #S_FRAME_SIZE // restore sp
465
+ eret
466
+alternative_else_nop_endif
364467 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
365468 bne 4f
366
- msr far_el1, x30
367
- tramp_alias x30, tramp_exit_native
469
+ msr far_el1, x29
470
+ tramp_alias x30, tramp_exit_native, x29
368471 br x30
369472 4:
370
- tramp_alias x30, tramp_exit_compat
473
+ tramp_alias x30, tramp_exit_compat, x29
371474 br x30
372475 #endif
373476 .else
477
+ ldr lr, [sp, #S_LR]
478
+ add sp, sp, #S_FRAME_SIZE // restore sp
479
+
480
+ /* Ensure any device/NC reads complete */
481
+ alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
482
+
374483 eret
375484 .endif
376485 sb
377486 .endm
378487
488
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
489
+ /*
490
+ * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
491
+ * EL0, there is no need to check the state of TTBR0_EL1 since
492
+ * accesses are always enabled.
493
+ * Note that the meaning of this bit differs from the ARMv8.1 PAN
494
+ * feature as all TTBR0_EL1 accesses are disabled, not just those to
495
+ * user mappings.
496
+ */
497
+SYM_CODE_START_LOCAL(__swpan_entry_el1)
498
+ mrs x21, ttbr0_el1
499
+ tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
500
+ orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
501
+ b.eq 1f // TTBR0 access already disabled
502
+ and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
503
+SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
504
+ __uaccess_ttbr0_disable x21
505
+1: ret
506
+SYM_CODE_END(__swpan_entry_el1)
507
+
508
+ /*
509
+ * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
510
+ * PAN bit checking.
511
+ */
512
+SYM_CODE_START_LOCAL(__swpan_exit_el1)
513
+ tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
514
+ __uaccess_ttbr0_enable x0, x1
515
+1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
516
+ ret
517
+SYM_CODE_END(__swpan_exit_el1)
518
+
519
+SYM_CODE_START_LOCAL(__swpan_exit_el0)
520
+ __uaccess_ttbr0_enable x0, x1
521
+ /*
522
+ * Enable errata workarounds only if returning to user. The only
523
+ * workaround currently required for TTBR0_EL1 changes are for the
524
+ * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
525
+ * corruption).
526
+ */
527
+ b post_ttbr_update_workaround
528
+SYM_CODE_END(__swpan_exit_el0)
529
+#endif
530
+
379531 .macro irq_stack_entry
380532 mov x19, sp // preserve the original sp
381533 #ifdef CONFIG_SHADOW_CALL_STACK
382
- mov x20, x18 // preserve the original shadow stack
534
+ mov x24, scs_sp // preserve the original shadow stack
383535 #endif
384536
385537 /*
....@@ -401,7 +553,7 @@
401553
402554 #ifdef CONFIG_SHADOW_CALL_STACK
403555 /* also switch to the irq shadow stack */
404
- ldr_this_cpu x18, irq_shadow_call_stack_ptr, x26
556
+ ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
405557 #endif
406558
407559 9998:
....@@ -409,37 +561,88 @@
409561
410562 /*
411563 * The callee-saved regs (x19-x29) should be preserved between
412
- * irq_stack_entry and irq_stack_exit.
564
+ * irq_stack_entry and irq_stack_exit, but note that kernel_entry
565
+ * uses x20-x23 to store data for later use.
413566 */
414567 .macro irq_stack_exit
415568 mov sp, x19
416569 #ifdef CONFIG_SHADOW_CALL_STACK
417
- mov x18, x20
570
+ mov scs_sp, x24
418571 #endif
419572 .endm
420573
421
-/*
422
- * These are the registers used in the syscall handler, and allow us to
423
- * have in theory up to 7 arguments to a function - x0 to x6.
424
- *
425
- * x7 is reserved for the system call number in 32-bit mode.
426
- */
427
-wsc_nr .req w25 // number of system calls
428
-xsc_nr .req x25 // number of system calls (zero-extended)
429
-wscno .req w26 // syscall number
430
-xscno .req x26 // syscall number (zero-extended)
431
-stbl .req x27 // syscall table pointer
574
+/* GPRs used by entry code */
432575 tsk .req x28 // current thread_info
433576
434577 /*
435578 * Interrupt handling.
436579 */
437
- .macro irq_handler
438
- ldr_l x1, handle_arch_irq
580
+ .macro irq_handler, handler:req
581
+ ldr_l x1, \handler
439582 mov x0, sp
440583 irq_stack_entry
441584 blr x1
442585 irq_stack_exit
586
+ .endm
587
+
588
+#ifdef CONFIG_ARM64_PSEUDO_NMI
589
+ /*
590
+ * Set res to 0 if irqs were unmasked in interrupted context.
591
+ * Otherwise set res to non-0 value.
592
+ */
593
+ .macro test_irqs_unmasked res:req, pmr:req
594
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
595
+ sub \res, \pmr, #GIC_PRIO_IRQON
596
+alternative_else
597
+ mov \res, xzr
598
+alternative_endif
599
+ .endm
600
+#endif
601
+
602
+ .macro gic_prio_kentry_setup, tmp:req
603
+#ifdef CONFIG_ARM64_PSEUDO_NMI
604
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
605
+ mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
606
+ msr_s SYS_ICC_PMR_EL1, \tmp
607
+ alternative_else_nop_endif
608
+#endif
609
+ .endm
610
+
611
+ .macro el1_interrupt_handler, handler:req
612
+ enable_da_f
613
+
614
+ mov x0, sp
615
+ bl enter_el1_irq_or_nmi
616
+
617
+ irq_handler \handler
618
+
619
+#ifdef CONFIG_PREEMPTION
620
+ ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
621
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
622
+ /*
623
+ * DA_F were cleared at start of handling. If anything is set in DAIF,
624
+ * we come back from an NMI, so skip preemption
625
+ */
626
+ mrs x0, daif
627
+ orr x24, x24, x0
628
+alternative_else_nop_endif
629
+ cbnz x24, 1f // preempt count != 0 || NMI return path
630
+ bl arm64_preempt_schedule_irq // irq en/disable is done inside
631
+1:
632
+#endif
633
+
634
+ mov x0, sp
635
+ bl exit_el1_irq_or_nmi
636
+ .endm
637
+
638
+ .macro el0_interrupt_handler, handler:req
639
+ user_exit_irqoff
640
+ enable_da_f
641
+
642
+ tbz x22, #55, 1f
643
+ bl do_el0_irq_bp_hardening
644
+1:
645
+ irq_handler \handler
443646 .endm
444647
445648 .text
....@@ -450,7 +653,7 @@
450653 .pushsection ".entry.text", "ax"
451654
452655 .align 11
453
-ENTRY(vectors)
656
+SYM_CODE_START(vectors)
454657 kernel_ventry 1, sync_invalid // Synchronous EL1t
455658 kernel_ventry 1, irq_invalid // IRQ EL1t
456659 kernel_ventry 1, fiq_invalid // FIQ EL1t
....@@ -477,7 +680,7 @@
477680 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
478681 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
479682 #endif
480
-END(vectors)
683
+SYM_CODE_END(vectors)
481684
482685 #ifdef CONFIG_VMAP_STACK
483686 /*
....@@ -519,445 +722,161 @@
519722 ASM_BUG()
520723 .endm
521724
522
-el0_sync_invalid:
725
+SYM_CODE_START_LOCAL(el0_sync_invalid)
523726 inv_entry 0, BAD_SYNC
524
-ENDPROC(el0_sync_invalid)
727
+SYM_CODE_END(el0_sync_invalid)
525728
526
-el0_irq_invalid:
729
+SYM_CODE_START_LOCAL(el0_irq_invalid)
527730 inv_entry 0, BAD_IRQ
528
-ENDPROC(el0_irq_invalid)
731
+SYM_CODE_END(el0_irq_invalid)
529732
530
-el0_fiq_invalid:
733
+SYM_CODE_START_LOCAL(el0_fiq_invalid)
531734 inv_entry 0, BAD_FIQ
532
-ENDPROC(el0_fiq_invalid)
735
+SYM_CODE_END(el0_fiq_invalid)
533736
534
-el0_error_invalid:
737
+SYM_CODE_START_LOCAL(el0_error_invalid)
535738 inv_entry 0, BAD_ERROR
536
-ENDPROC(el0_error_invalid)
739
+SYM_CODE_END(el0_error_invalid)
537740
538741 #ifdef CONFIG_COMPAT
539
-el0_fiq_invalid_compat:
742
+SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
540743 inv_entry 0, BAD_FIQ, 32
541
-ENDPROC(el0_fiq_invalid_compat)
744
+SYM_CODE_END(el0_fiq_invalid_compat)
542745 #endif
543746
544
-el1_sync_invalid:
747
+SYM_CODE_START_LOCAL(el1_sync_invalid)
545748 inv_entry 1, BAD_SYNC
546
-ENDPROC(el1_sync_invalid)
749
+SYM_CODE_END(el1_sync_invalid)
547750
548
-el1_irq_invalid:
751
+SYM_CODE_START_LOCAL(el1_irq_invalid)
549752 inv_entry 1, BAD_IRQ
550
-ENDPROC(el1_irq_invalid)
753
+SYM_CODE_END(el1_irq_invalid)
551754
552
-el1_fiq_invalid:
755
+SYM_CODE_START_LOCAL(el1_fiq_invalid)
553756 inv_entry 1, BAD_FIQ
554
-ENDPROC(el1_fiq_invalid)
757
+SYM_CODE_END(el1_fiq_invalid)
555758
556
-el1_error_invalid:
759
+SYM_CODE_START_LOCAL(el1_error_invalid)
557760 inv_entry 1, BAD_ERROR
558
-ENDPROC(el1_error_invalid)
761
+SYM_CODE_END(el1_error_invalid)
559762
560763 /*
561764 * EL1 mode handlers.
562765 */
563766 .align 6
564
-el1_sync:
767
+SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
565768 kernel_entry 1
566
- mrs x1, esr_el1 // read the syndrome register
567
- lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
568
- cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
569
- b.eq el1_da
570
- cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
571
- b.eq el1_ia
572
- cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
573
- b.eq el1_undef
574
- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
575
- b.eq el1_sp_pc
576
- cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
577
- b.eq el1_sp_pc
578
- cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
579
- b.eq el1_undef
580
- cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
581
- b.ge el1_dbg
582
- b el1_inv
583
-
584
-el1_ia:
585
- /*
586
- * Fall through to the Data abort case
587
- */
588
-el1_da:
589
- /*
590
- * Data abort handling
591
- */
592
- mrs x3, far_el1
593
- inherit_daif pstate=x23, tmp=x2
594
- untagged_addr x0, x3
595
- mov x2, sp // struct pt_regs
596
- bl do_mem_abort
597
-
598
- kernel_exit 1
599
-el1_sp_pc:
600
- /*
601
- * Stack or PC alignment exception handling
602
- */
603
- mrs x0, far_el1
604
- inherit_daif pstate=x23, tmp=x2
605
- mov x2, sp
606
- bl do_sp_pc_abort
607
- ASM_BUG()
608
-el1_undef:
609
- /*
610
- * Undefined instruction
611
- */
612
- inherit_daif pstate=x23, tmp=x2
613769 mov x0, sp
614
- bl do_undefinstr
770
+ bl el1_sync_handler
615771 kernel_exit 1
616
-el1_dbg:
617
- /*
618
- * Debug exception handling
619
- */
620
- cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
621
- cinc x24, x24, eq // set bit '0'
622
- tbz x24, #0, el1_inv // EL1 only
623
- mrs x0, far_el1
624
- mov x2, sp // struct pt_regs
625
- bl do_debug_exception
626
- kernel_exit 1
627
-el1_inv:
628
- // TODO: add support for undefined instructions in kernel mode
629
- inherit_daif pstate=x23, tmp=x2
630
- mov x0, sp
631
- mov x2, x1
632
- mov x1, #BAD_SYNC
633
- bl bad_mode
634
- ASM_BUG()
635
-ENDPROC(el1_sync)
772
+SYM_CODE_END(el1_sync)
636773
637774 .align 6
638
-el1_irq:
775
+SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
639776 kernel_entry 1
640
- enable_da_f
641
-#ifdef CONFIG_TRACE_IRQFLAGS
642
- bl trace_hardirqs_off
643
-#endif
644
-
645
- irq_handler
646
-
647
-#ifdef CONFIG_PREEMPT
648
- ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
649
- cbnz w24, 2f // preempt count != 0
650
- ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
651
- tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
652
-
653
- ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
654
- cbnz w24, 2f // preempt lazy count != 0
655
- tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
656
-1:
657
- bl el1_preempt
658
-2:
659
-#endif
660
-#ifdef CONFIG_TRACE_IRQFLAGS
661
- bl trace_hardirqs_on
662
-#endif
777
+ el1_interrupt_handler handle_arch_irq
663778 kernel_exit 1
664
-ENDPROC(el1_irq)
665
-
666
-#ifdef CONFIG_PREEMPT
667
-el1_preempt:
668
- mov x24, lr
669
-1: bl preempt_schedule_irq // irq en/disable is done inside
670
- ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
671
- tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
672
- tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
673
- ret x24
674
-#endif
779
+SYM_CODE_END(el1_irq)
675780
676781 /*
677782 * EL0 mode handlers.
678783 */
679784 .align 6
680
-el0_sync:
785
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
681786 kernel_entry 0
682
- mrs x25, esr_el1 // read the syndrome register
683
- lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
684
- cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
685
- b.eq el0_svc
686
- cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
687
- b.eq el0_da
688
- cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
689
- b.eq el0_ia
690
- cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
691
- b.eq el0_fpsimd_acc
692
- cmp x24, #ESR_ELx_EC_SVE // SVE access
693
- b.eq el0_sve_acc
694
- cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
695
- b.eq el0_fpsimd_exc
696
- cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
697
- b.eq el0_sys
698
- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
699
- b.eq el0_sp_pc
700
- cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
701
- b.eq el0_sp_pc
702
- cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
703
- b.eq el0_undef
704
- cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
705
- b.ge el0_dbg
706
- b el0_inv
787
+ mov x0, sp
788
+ bl el0_sync_handler
789
+ b ret_to_user
790
+SYM_CODE_END(el0_sync)
707791
708792 #ifdef CONFIG_COMPAT
709793 .align 6
710
-el0_sync_compat:
794
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
711795 kernel_entry 0, 32
712
- mrs x25, esr_el1 // read the syndrome register
713
- lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
714
- cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
715
- b.eq el0_svc_compat
716
- cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
717
- b.eq el0_da
718
- cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
719
- b.eq el0_ia
720
- cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
721
- b.eq el0_fpsimd_acc
722
- cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
723
- b.eq el0_fpsimd_exc
724
- cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
725
- b.eq el0_sp_pc
726
- cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
727
- b.eq el0_undef
728
- cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
729
- b.eq el0_undef
730
- cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
731
- b.eq el0_undef
732
- cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
733
- b.eq el0_undef
734
- cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
735
- b.eq el0_undef
736
- cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
737
- b.eq el0_undef
738
- cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
739
- b.ge el0_dbg
740
- b el0_inv
741
-el0_svc_compat:
742796 mov x0, sp
743
- bl el0_svc_compat_handler
797
+ bl el0_sync_compat_handler
744798 b ret_to_user
799
+SYM_CODE_END(el0_sync_compat)
745800
746801 .align 6
747
-el0_irq_compat:
802
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
748803 kernel_entry 0, 32
749804 b el0_irq_naked
805
+SYM_CODE_END(el0_irq_compat)
750806
751
-el0_error_compat:
807
+SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
752808 kernel_entry 0, 32
753809 b el0_error_naked
810
+SYM_CODE_END(el0_error_compat)
754811 #endif
755
-
756
-el0_da:
757
- /*
758
- * Data abort handling
759
- */
760
- mrs x26, far_el1
761
- enable_daif
762
- ct_user_exit
763
- untagged_addr x0, x26
764
- mov x1, x25
765
- mov x2, sp
766
- bl do_mem_abort
767
- b ret_to_user
768
-el0_ia:
769
- /*
770
- * Instruction abort handling
771
- */
772
- mrs x26, far_el1
773
- enable_da_f
774
-#ifdef CONFIG_TRACE_IRQFLAGS
775
- bl trace_hardirqs_off
776
-#endif
777
- ct_user_exit
778
- mov x0, x26
779
- mov x1, x25
780
- mov x2, sp
781
- bl do_el0_ia_bp_hardening
782
- b ret_to_user
783
-el0_fpsimd_acc:
784
- /*
785
- * Floating Point or Advanced SIMD access
786
- */
787
- enable_daif
788
- ct_user_exit
789
- mov x0, x25
790
- mov x1, sp
791
- bl do_fpsimd_acc
792
- b ret_to_user
793
-el0_sve_acc:
794
- /*
795
- * Scalable Vector Extension access
796
- */
797
- enable_daif
798
- ct_user_exit
799
- mov x0, x25
800
- mov x1, sp
801
- bl do_sve_acc
802
- b ret_to_user
803
-el0_fpsimd_exc:
804
- /*
805
- * Floating Point, Advanced SIMD or SVE exception
806
- */
807
- enable_daif
808
- ct_user_exit
809
- mov x0, x25
810
- mov x1, sp
811
- bl do_fpsimd_exc
812
- b ret_to_user
813
-el0_sp_pc:
814
- /*
815
- * Stack or PC alignment exception handling
816
- */
817
- mrs x26, far_el1
818
- enable_da_f
819
-#ifdef CONFIG_TRACE_IRQFLAGS
820
- bl trace_hardirqs_off
821
-#endif
822
- ct_user_exit
823
- mov x0, x26
824
- mov x1, x25
825
- mov x2, sp
826
- bl do_sp_pc_abort
827
- b ret_to_user
828
-el0_undef:
829
- /*
830
- * Undefined instruction
831
- */
832
- enable_daif
833
- ct_user_exit
834
- mov x0, sp
835
- bl do_undefinstr
836
- b ret_to_user
837
-el0_sys:
838
- /*
839
- * System instructions, for trapped cache maintenance instructions
840
- */
841
- enable_daif
842
- ct_user_exit
843
- mov x0, x25
844
- mov x1, sp
845
- bl do_sysinstr
846
- b ret_to_user
847
-el0_dbg:
848
- /*
849
- * Debug exception handling
850
- */
851
- tbnz x24, #0, el0_inv // EL0 only
852
- mrs x0, far_el1
853
- mov x1, x25
854
- mov x2, sp
855
- bl do_debug_exception
856
- enable_da_f
857
- ct_user_exit
858
- b ret_to_user
859
-el0_inv:
860
- enable_daif
861
- ct_user_exit
862
- mov x0, sp
863
- mov x1, #BAD_SYNC
864
- mov x2, x25
865
- bl bad_el0_sync
866
- b ret_to_user
867
-ENDPROC(el0_sync)
868812
869813 .align 6
870
-el0_irq:
814
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
871815 kernel_entry 0
872816 el0_irq_naked:
873
- enable_da_f
874
-#ifdef CONFIG_TRACE_IRQFLAGS
875
- bl trace_hardirqs_off
876
-#endif
877
-
878
- ct_user_exit
879
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
880
- tbz x22, #55, 1f
881
- bl do_el0_irq_bp_hardening
882
-1:
883
-#endif
884
- irq_handler
885
-
886
-#ifdef CONFIG_TRACE_IRQFLAGS
887
- bl trace_hardirqs_on
888
-#endif
817
+ el0_interrupt_handler handle_arch_irq
889818 b ret_to_user
890
-ENDPROC(el0_irq)
819
+SYM_CODE_END(el0_irq)
891820
892
-el1_error:
821
+SYM_CODE_START_LOCAL(el1_error)
893822 kernel_entry 1
894823 mrs x1, esr_el1
895824 enable_dbg
896825 mov x0, sp
897826 bl do_serror
898827 kernel_exit 1
899
-ENDPROC(el1_error)
828
+SYM_CODE_END(el1_error)
900829
901
-el0_error:
830
+SYM_CODE_START_LOCAL(el0_error)
902831 kernel_entry 0
903832 el0_error_naked:
904
- mrs x1, esr_el1
833
+ mrs x25, esr_el1
834
+ user_exit_irqoff
905835 enable_dbg
906836 mov x0, sp
837
+ mov x1, x25
907838 bl do_serror
908839 enable_da_f
909
- ct_user_exit
910840 b ret_to_user
911
-ENDPROC(el0_error)
841
+SYM_CODE_END(el0_error)
842
+
843
+/*
844
+ * "slow" syscall return path.
845
+ */
846
+SYM_CODE_START_LOCAL(ret_to_user)
847
+ disable_daif
848
+ gic_prio_kentry_setup tmp=x3
849
+#ifdef CONFIG_TRACE_IRQFLAGS
850
+ bl trace_hardirqs_off
851
+#endif
852
+ ldr x19, [tsk, #TSK_TI_FLAGS]
853
+ and x2, x19, #_TIF_WORK_MASK
854
+ cbnz x2, work_pending
855
+finish_ret_to_user:
856
+ user_enter_irqoff
857
+ enable_step_tsk x19, x2
858
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
859
+ bl stackleak_erase
860
+#endif
861
+ kernel_exit 0
912862
913863 /*
914864 * Ok, we need to do extra processing, enter the slow path.
915865 */
916866 work_pending:
917867 mov x0, sp // 'regs'
868
+ mov x1, x19
918869 bl do_notify_resume
919
-#ifdef CONFIG_TRACE_IRQFLAGS
920
- bl trace_hardirqs_on // enabled while in userspace
921
-#endif
922
- ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
870
+ ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
923871 b finish_ret_to_user
924
-/*
925
- * "slow" syscall return path.
926
- */
927
-ret_to_user:
928
- disable_daif
929
- ldr x1, [tsk, #TSK_TI_FLAGS]
930
- and x2, x1, #_TIF_WORK_MASK
931
- cbnz x2, work_pending
932
-finish_ret_to_user:
933
- enable_step_tsk x1, x2
934
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
935
- bl stackleak_erase
936
-#endif
937
- kernel_exit 0
938
-ENDPROC(ret_to_user)
939
-
940
-/*
941
- * SVC handler.
942
- */
943
- .align 6
944
-el0_svc:
945
- mov x0, sp
946
- bl el0_svc_handler
947
- b ret_to_user
948
-ENDPROC(el0_svc)
872
+SYM_CODE_END(ret_to_user)
949873
950874 .popsection // .entry.text
951875
952
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
953
-/*
954
- * Exception vectors trampoline.
955
- */
956
- .pushsection ".entry.tramp.text", "ax"
957
-
876
+ // Move from tramp_pg_dir to swapper_pg_dir
958877 .macro tramp_map_kernel, tmp
959878 mrs \tmp, ttbr1_el1
960
- add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
879
+ add \tmp, \tmp, #(2 * PAGE_SIZE)
961880 bic \tmp, \tmp, #USER_ASID_FLAG
962881 msr ttbr1_el1, \tmp
963882 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
....@@ -974,9 +893,10 @@
974893 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
975894 .endm
976895
896
+ // Move from swapper_pg_dir to tramp_pg_dir
977897 .macro tramp_unmap_kernel, tmp
978898 mrs \tmp, ttbr1_el1
979
- sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
899
+ sub \tmp, \tmp, #(2 * PAGE_SIZE)
980900 orr \tmp, \tmp, #USER_ASID_FLAG
981901 msr ttbr1_el1, \tmp
982902 /*
....@@ -986,12 +906,47 @@
986906 */
987907 .endm
988908
989
- .macro tramp_ventry, regsize = 64
909
+ .macro tramp_data_page dst
910
+ adr_l \dst, .entry.tramp.text
911
+ sub \dst, \dst, PAGE_SIZE
912
+ .endm
913
+
914
+ .macro tramp_data_read_var dst, var
915
+#ifdef CONFIG_RANDOMIZE_BASE
916
+ tramp_data_page \dst
917
+ add \dst, \dst, #:lo12:__entry_tramp_data_\var
918
+ ldr \dst, [\dst]
919
+#else
920
+ ldr \dst, =\var
921
+#endif
922
+ .endm
923
+
924
+#define BHB_MITIGATION_NONE 0
925
+#define BHB_MITIGATION_LOOP 1
926
+#define BHB_MITIGATION_FW 2
927
+#define BHB_MITIGATION_INSN 3
928
+
929
+ .macro tramp_ventry, vector_start, regsize, kpti, bhb
990930 .align 7
991931 1:
992932 .if \regsize == 64
993933 msr tpidrro_el0, x30 // Restored in kernel_ventry
994934 .endif
935
+
936
+ .if \bhb == BHB_MITIGATION_LOOP
937
+ /*
938
+ * This sequence must appear before the first indirect branch. i.e. the
939
+ * ret out of tramp_ventry. It appears here because x30 is free.
940
+ */
941
+ __mitigate_spectre_bhb_loop x30
942
+ .endif // \bhb == BHB_MITIGATION_LOOP
943
+
944
+ .if \bhb == BHB_MITIGATION_INSN
945
+ clearbhb
946
+ isb
947
+ .endif // \bhb == BHB_MITIGATION_INSN
948
+
949
+ .if \kpti == 1
995950 /*
996951 * Defend against branch aliasing attacks by pushing a dummy
997952 * entry onto the return stack and using a RET instruction to
....@@ -1001,65 +956,140 @@
1001956 b .
1002957 2:
1003958 tramp_map_kernel x30
1004
-#ifdef CONFIG_RANDOMIZE_BASE
1005
- adr x30, tramp_vectors + PAGE_SIZE
1006959 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1007
- ldr x30, [x30]
1008
-#else
1009
- ldr x30, =vectors
1010
-#endif
1011
- prfm plil1strm, [x30, #(1b - tramp_vectors)]
960
+ tramp_data_read_var x30, vectors
961
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
962
+ prfm plil1strm, [x30, #(1b - \vector_start)]
963
+alternative_else_nop_endif
964
+
1012965 msr vbar_el1, x30
1013
- add x30, x30, #(1b - tramp_vectors)
1014966 isb
967
+ .else
968
+ ldr x30, =vectors
969
+ .endif // \kpti == 1
970
+
971
+ .if \bhb == BHB_MITIGATION_FW
972
+ /*
973
+ * The firmware sequence must appear before the first indirect branch.
974
+ * i.e. the ret out of tramp_ventry. But it also needs the stack to be
975
+ * mapped to save/restore the registers the SMC clobbers.
976
+ */
977
+ __mitigate_spectre_bhb_fw
978
+ .endif // \bhb == BHB_MITIGATION_FW
979
+
980
+ add x30, x30, #(1b - \vector_start + 4)
1015981 ret
982
+.org 1b + 128 // Did we overflow the ventry slot?
1016983 .endm
1017984
1018985 .macro tramp_exit, regsize = 64
1019
- adr x30, tramp_vectors
986
+ tramp_data_read_var x30, this_cpu_vector
987
+ this_cpu_offset x29
988
+ ldr x30, [x30, x29]
989
+
1020990 msr vbar_el1, x30
1021
- tramp_unmap_kernel x30
991
+ ldr lr, [sp, #S_LR]
992
+ tramp_unmap_kernel x29
1022993 .if \regsize == 64
1023
- mrs x30, far_el1
994
+ mrs x29, far_el1
1024995 .endif
996
+ add sp, sp, #S_FRAME_SIZE // restore sp
1025997 eret
1026998 sb
1027999 .endm
10281000
1029
- .align 11
1030
-ENTRY(tramp_vectors)
1001
+ .macro generate_tramp_vector, kpti, bhb
1002
+.Lvector_start\@:
10311003 .space 0x400
10321004
1033
- tramp_ventry
1034
- tramp_ventry
1035
- tramp_ventry
1036
- tramp_ventry
1005
+ .rept 4
1006
+ tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
1007
+ .endr
1008
+ .rept 4
1009
+ tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
1010
+ .endr
1011
+ .endm
10371012
1038
- tramp_ventry 32
1039
- tramp_ventry 32
1040
- tramp_ventry 32
1041
- tramp_ventry 32
1042
-END(tramp_vectors)
1013
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1014
+/*
1015
+ * Exception vectors trampoline.
1016
+ * The order must match __bp_harden_el1_vectors and the
1017
+ * arm64_bp_harden_el1_vectors enum.
1018
+ */
1019
+ .pushsection ".entry.tramp.text", "ax"
1020
+ .align 11
1021
+SYM_CODE_START_NOALIGN(tramp_vectors)
1022
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1023
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
1024
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
1025
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
1026
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1027
+ generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
1028
+SYM_CODE_END(tramp_vectors)
10431029
1044
-ENTRY(tramp_exit_native)
1030
+SYM_CODE_START(tramp_exit_native)
10451031 tramp_exit
1046
-END(tramp_exit_native)
1032
+SYM_CODE_END(tramp_exit_native)
10471033
1048
-ENTRY(tramp_exit_compat)
1034
+SYM_CODE_START(tramp_exit_compat)
10491035 tramp_exit 32
1050
-END(tramp_exit_compat)
1036
+SYM_CODE_END(tramp_exit_compat)
10511037
10521038 .ltorg
10531039 .popsection // .entry.tramp.text
10541040 #ifdef CONFIG_RANDOMIZE_BASE
10551041 .pushsection ".rodata", "a"
10561042 .align PAGE_SHIFT
1057
- .globl __entry_tramp_data_start
1058
-__entry_tramp_data_start:
1043
+SYM_DATA_START(__entry_tramp_data_start)
1044
+__entry_tramp_data_vectors:
10591045 .quad vectors
1046
+#ifdef CONFIG_ARM_SDE_INTERFACE
1047
+__entry_tramp_data___sdei_asm_handler:
1048
+ .quad __sdei_asm_handler
1049
+#endif /* CONFIG_ARM_SDE_INTERFACE */
1050
+__entry_tramp_data_this_cpu_vector:
1051
+ .quad this_cpu_vector
1052
+SYM_DATA_END(__entry_tramp_data_start)
10601053 .popsection // .rodata
10611054 #endif /* CONFIG_RANDOMIZE_BASE */
10621055 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1056
+
1057
+/*
1058
+ * Exception vectors for spectre mitigations on entry from EL1 when
1059
+ * kpti is not in use.
1060
+ */
1061
+ .macro generate_el1_vector, bhb
1062
+.Lvector_start\@:
1063
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
1064
+ kernel_ventry 1, irq_invalid // IRQ EL1t
1065
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
1066
+ kernel_ventry 1, error_invalid // Error EL1t
1067
+
1068
+ kernel_ventry 1, sync // Synchronous EL1h
1069
+ kernel_ventry 1, irq // IRQ EL1h
1070
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
1071
+ kernel_ventry 1, error // Error EL1h
1072
+
1073
+ .rept 4
1074
+ tramp_ventry .Lvector_start\@, 64, 0, \bhb
1075
+ .endr
1076
+ .rept 4
1077
+ tramp_ventry .Lvector_start\@, 32, 0, \bhb
1078
+ .endr
1079
+ .endm
1080
+
1081
+/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
1082
+ .pushsection ".entry.text", "ax"
1083
+ .align 11
1084
+SYM_CODE_START(__bp_harden_el1_vectors)
1085
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1086
+ generate_el1_vector bhb=BHB_MITIGATION_LOOP
1087
+ generate_el1_vector bhb=BHB_MITIGATION_FW
1088
+ generate_el1_vector bhb=BHB_MITIGATION_INSN
1089
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1090
+SYM_CODE_END(__bp_harden_el1_vectors)
1091
+ .popsection
1092
+
10631093
10641094 /*
10651095 * Register switch for AArch64. The callee-saved registers need to be saved
....@@ -1069,7 +1099,7 @@
10691099 * Previous and next are guaranteed not to be the same.
10701100 *
10711101 */
1072
-ENTRY(cpu_switch_to)
1102
+SYM_FUNC_START(cpu_switch_to)
10731103 mov x10, #THREAD_CPU_CONTEXT
10741104 add x8, x0, x10
10751105 mov x9, sp
....@@ -1090,26 +1120,24 @@
10901120 ldr lr, [x8]
10911121 mov sp, x9
10921122 msr sp_el0, x1
1093
-#ifdef CONFIG_SHADOW_CALL_STACK
1094
- str x18, [x0, #TSK_TI_SCS]
1095
- ldr x18, [x1, #TSK_TI_SCS]
1096
- str xzr, [x1, #TSK_TI_SCS] // limit visibility of saved SCS
1097
-#endif
1123
+ ptrauth_keys_install_kernel x1, x8, x9, x10
1124
+ scs_save x0, x8
1125
+ scs_load_current
10981126 ret
1099
-ENDPROC(cpu_switch_to)
1127
+SYM_FUNC_END(cpu_switch_to)
11001128 NOKPROBE(cpu_switch_to)
11011129
11021130 /*
11031131 * This is how we return from a fork.
11041132 */
1105
-ENTRY(ret_from_fork)
1133
+SYM_CODE_START(ret_from_fork)
11061134 bl schedule_tail
11071135 cbz x19, 1f // not a kernel thread
11081136 mov x0, x20
11091137 blr x19
1110
-1: get_thread_info tsk
1138
+1: get_current_task tsk
11111139 b ret_to_user
1112
-ENDPROC(ret_from_fork)
1140
+SYM_CODE_END(ret_from_fork)
11131141 NOKPROBE(ret_from_fork)
11141142
11151143 #ifdef CONFIG_ARM_SDE_INTERFACE
....@@ -1138,7 +1166,7 @@
11381166 */
11391167 .ltorg
11401168 .pushsection ".entry.tramp.text", "ax"
1141
-ENTRY(__sdei_asm_entry_trampoline)
1169
+SYM_CODE_START(__sdei_asm_entry_trampoline)
11421170 mrs x4, ttbr1_el1
11431171 tbz x4, #USER_ASID_BIT, 1f
11441172
....@@ -1152,15 +1180,9 @@
11521180 */
11531181 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
11541182
1155
-#ifdef CONFIG_RANDOMIZE_BASE
1156
- adr x4, tramp_vectors + PAGE_SIZE
1157
- add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1158
- ldr x4, [x4]
1159
-#else
1160
- ldr x4, =__sdei_asm_handler
1161
-#endif
1183
+ tramp_data_read_var x4, __sdei_asm_handler
11621184 br x4
1163
-ENDPROC(__sdei_asm_entry_trampoline)
1185
+SYM_CODE_END(__sdei_asm_entry_trampoline)
11641186 NOKPROBE(__sdei_asm_entry_trampoline)
11651187
11661188 /*
....@@ -1170,23 +1192,17 @@
11701192 * x2: exit_mode
11711193 * x4: struct sdei_registered_event argument from registration time.
11721194 */
1173
-ENTRY(__sdei_asm_exit_trampoline)
1195
+SYM_CODE_START(__sdei_asm_exit_trampoline)
11741196 ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
11751197 cbnz x4, 1f
11761198
11771199 tramp_unmap_kernel tmp=x4
11781200
11791201 1: sdei_handler_exit exit_mode=x2
1180
-ENDPROC(__sdei_asm_exit_trampoline)
1202
+SYM_CODE_END(__sdei_asm_exit_trampoline)
11811203 NOKPROBE(__sdei_asm_exit_trampoline)
11821204 .ltorg
11831205 .popsection // .entry.tramp.text
1184
-#ifdef CONFIG_RANDOMIZE_BASE
1185
-.pushsection ".rodata", "a"
1186
-__sdei_asm_trampoline_next_handler:
1187
- .quad __sdei_asm_handler
1188
-.popsection // .rodata
1189
-#endif /* CONFIG_RANDOMIZE_BASE */
11901206 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
11911207
11921208 /*
....@@ -1202,7 +1218,7 @@
12021218 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
12031219 * want them.
12041220 */
1205
-ENTRY(__sdei_asm_handler)
1221
+SYM_CODE_START(__sdei_asm_handler)
12061222 stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
12071223 stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
12081224 stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
....@@ -1222,13 +1238,20 @@
12221238
12231239 mov x19, x1
12241240
1241
+ /* Store the registered-event for crash_smp_send_stop() */
1242
+ ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
1243
+ cbnz w4, 1f
1244
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1245
+ b 2f
1246
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
1247
+2: str x19, [x5]
1248
+
12251249 #ifdef CONFIG_VMAP_STACK
12261250 /*
12271251 * entry.S may have been using sp as a scratch register, find whether
12281252 * this is a normal or critical event and switch to the appropriate
12291253 * stack for this CPU.
12301254 */
1231
- ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
12321255 cbnz w4, 1f
12331256 ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
12341257 b 2f
....@@ -1236,6 +1259,15 @@
12361259 2: mov x6, #SDEI_STACK_SIZE
12371260 add x5, x5, x6
12381261 mov sp, x5
1262
+#endif
1263
+
1264
+#ifdef CONFIG_SHADOW_CALL_STACK
1265
+ /* Use a separate shadow call stack for normal and critical events */
1266
+ cbnz w4, 3f
1267
+ ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1268
+ b 4f
1269
+3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
1270
+4:
12391271 #endif
12401272
12411273 /*
....@@ -1277,14 +1309,33 @@
12771309
12781310 ldr_l x2, sdei_exit_mode
12791311
1312
+ /* Clear the registered-event seen by crash_smp_send_stop() */
1313
+ ldrb w3, [x4, #SDEI_EVENT_PRIORITY]
1314
+ cbnz w3, 1f
1315
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1316
+ b 2f
1317
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
1318
+2: str xzr, [x5]
1319
+
12801320 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
12811321 sdei_handler_exit exit_mode=x2
12821322 alternative_else_nop_endif
12831323
12841324 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1285
- tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
1325
+ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
12861326 br x5
12871327 #endif
1288
-ENDPROC(__sdei_asm_handler)
1328
+SYM_CODE_END(__sdei_asm_handler)
12891329 NOKPROBE(__sdei_asm_handler)
1330
+
1331
+SYM_CODE_START(__sdei_handler_abort)
1332
+ mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1333
+ adr x1, 1f
1334
+ ldr_l x2, sdei_exit_mode
1335
+ sdei_handler_exit exit_mode=x2
1336
+ // exit the handler and jump to the next instruction.
1337
+ // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
1338
+1: ret
1339
+SYM_CODE_END(__sdei_handler_abort)
1340
+NOKPROBE(__sdei_handler_abort)
12901341 #endif /* CONFIG_ARM_SDE_INTERFACE */