hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/arm/kernel/entry-armv.S
....@@ -1,13 +1,10 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * linux/arch/arm/kernel/entry-armv.S
34 *
45 * Copyright (C) 1996,1997,1998 Russell King.
56 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
67 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License version 2 as
10
- * published by the Free Software Foundation.
118 *
129 * Low-level vector interface routines
1310 *
....@@ -207,7 +204,7 @@
207204 svc_entry
208205 irq_handler
209206
210
-#ifdef CONFIG_PREEMPT
207
+#ifdef CONFIG_PREEMPTION
211208 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
212209 ldr r0, [tsk, #TI_FLAGS] @ get flags
213210 teq r8, #0 @ if preempt count != 0
....@@ -222,7 +219,7 @@
222219
223220 .ltorg
224221
225
-#ifdef CONFIG_PREEMPT
222
+#ifdef CONFIG_PREEMPTION
226223 svc_preempt:
227224 mov r8, lr
228225 1: bl preempt_schedule_irq @ irq en/disable is done inside
....@@ -255,31 +252,10 @@
255252 #else
256253 svc_entry
257254 #endif
258
- @
259
- @ call emulation code, which returns using r9 if it has emulated
260
- @ the instruction, or the more conventional lr if we are to treat
261
- @ this as a real undefined instruction
262
- @
263
- @ r0 - instruction
264
- @
265
-#ifndef CONFIG_THUMB2_KERNEL
266
- ldr r0, [r4, #-4]
267
-#else
268
- mov r1, #2
269
- ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
270
- cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
271
- blo __und_svc_fault
272
- ldrh r9, [r4] @ bottom 16 bits
273
- add r4, r4, #2
274
- str r4, [sp, #S_PC]
275
- orr r0, r9, r0, lsl #16
276
-#endif
277
- badr r9, __und_svc_finish
278
- mov r2, r4
279
- bl call_fpe
280255
281256 mov r1, #4 @ PC correction to apply
282
-__und_svc_fault:
257
+ THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
258
+ THUMB( movne r1, #2 ) @ if so, fix up PC correction
283259 mov r0, sp @ struct pt_regs *regs
284260 bl __und_fault
285261
....@@ -627,7 +603,7 @@
627603 @ Test if we need to give access to iWMMXt coprocessors
628604 ldr r5, [r10, #TI_FLAGS]
629605 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
630
- movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
606
+ movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1)
631607 bcs iwmmxt_task_enable
632608 #endif
633609 ARM( add pc, pc, r8, lsr #6 )
....@@ -820,7 +796,7 @@
820796 * existing ones. This mechanism should be used only for things that are
821797 * really small and justified, and not be abused freely.
822798 *
823
- * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
799
+ * See Documentation/arm/kernel_user_helpers.rst for formal definitions.
824800 */
825801 THUMB( .arm )
826802
....@@ -863,7 +839,7 @@
863839 smp_dmb arm
864840 1: ldrexd r0, r1, [r2] @ load current val
865841 eors r3, r0, r4 @ compare with oldval (1)
866
- eoreqs r3, r1, r5 @ compare with oldval (2)
842
+ eorseq r3, r1, r5 @ compare with oldval (2)
867843 strexdeq r3, r6, r7, [r2] @ store newval if eq
868844 teqeq r3, #1 @ success?
869845 beq 1b @ if no then retry
....@@ -887,8 +863,8 @@
887863 ldmia r1, {r6, lr} @ load new val
888864 1: ldmia r2, {r0, r1} @ load current val
889865 eors r3, r0, r4 @ compare with oldval (1)
890
- eoreqs r3, r1, r5 @ compare with oldval (2)
891
-2: stmeqia r2, {r6, lr} @ store newval if eq
866
+ eorseq r3, r1, r5 @ compare with oldval (2)
867
+2: stmiaeq r2, {r6, lr} @ store newval if eq
892868 rsbs r0, r3, #0 @ set return val and C flag
893869 ldmfd sp!, {r4, r5, r6, pc}
894870
....@@ -902,7 +878,7 @@
902878 mov r7, #0xffff0fff
903879 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
904880 subs r8, r4, r7
905
- rsbcss r8, r8, #(2b - 1b)
881
+ rsbscs r8, r8, #(2b - 1b)
906882 strcs r7, [sp, #S_PC]
907883 #if __LINUX_ARM_ARCH__ < 6
908884 bcc kuser_cmpxchg32_fixup
....@@ -960,7 +936,7 @@
960936 mov r7, #0xffff0fff
961937 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
962938 subs r8, r4, r7
963
- rsbcss r8, r8, #(2b - 1b)
939
+ rsbscs r8, r8, #(2b - 1b)
964940 strcs r7, [sp, #S_PC]
965941 ret lr
966942 .previous
....@@ -1029,12 +1005,11 @@
10291005 sub lr, lr, #\correction
10301006 .endif
10311007
1032
- @
1033
- @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1034
- @ (parent CPSR)
1035
- @
1008
+ @ Save r0, lr_<exception> (parent PC)
10361009 stmia sp, {r0, lr} @ save r0, lr
1037
- mrs lr, spsr
1010
+
1011
+ @ Save spsr_<exception> (parent CPSR)
1012
+2: mrs lr, spsr
10381013 str lr, [sp, #8] @ save spsr
10391014
10401015 @
....@@ -1055,6 +1030,44 @@
10551030 movs pc, lr @ branch to handler in SVC mode
10561031 ENDPROC(vector_\name)
10571032
1033
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1034
+ .subsection 1
1035
+ .align 5
1036
+vector_bhb_loop8_\name:
1037
+ .if \correction
1038
+ sub lr, lr, #\correction
1039
+ .endif
1040
+
1041
+ @ Save r0, lr_<exception> (parent PC)
1042
+ stmia sp, {r0, lr}
1043
+
1044
+ @ bhb workaround
1045
+ mov r0, #8
1046
+3: W(b) . + 4
1047
+ subs r0, r0, #1
1048
+ bne 3b
1049
+ dsb
1050
+ isb
1051
+ b 2b
1052
+ENDPROC(vector_bhb_loop8_\name)
1053
+
1054
+vector_bhb_bpiall_\name:
1055
+ .if \correction
1056
+ sub lr, lr, #\correction
1057
+ .endif
1058
+
1059
+ @ Save r0, lr_<exception> (parent PC)
1060
+ stmia sp, {r0, lr}
1061
+
1062
+ @ bhb workaround
1063
+ mcr p15, 0, r0, c7, c5, 6 @ BPIALL
1064
+ @ isb not needed due to "movs pc, lr" in the vector stub
1065
+ @ which gives a "context synchronisation".
1066
+ b 2b
1067
+ENDPROC(vector_bhb_bpiall_\name)
1068
+ .previous
1069
+#endif
1070
+
10581071 .align 2
10591072 @ handler addresses follow this label
10601073 1:
....@@ -1063,6 +1076,10 @@
10631076 .section .stubs, "ax", %progbits
10641077 @ This must be the first word
10651078 .word vector_swi
1079
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1080
+ .word vector_bhb_loop8_swi
1081
+ .word vector_bhb_bpiall_swi
1082
+#endif
10661083
10671084 vector_rst:
10681085 ARM( swi SYS_ERROR0 )
....@@ -1177,8 +1194,10 @@
11771194 * FIQ "NMI" handler
11781195 *-----------------------------------------------------------------------------
11791196 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1180
- * systems.
1197
+ * systems. This must be the last vector stub, so lets place it in its own
1198
+ * subsection.
11811199 */
1200
+ .subsection 2
11821201 vector_stub fiq, FIQ_MODE, 4
11831202
11841203 .long __fiq_usr @ 0 (USR_26 / USR_32)
....@@ -1211,6 +1230,30 @@
12111230 W(b) vector_irq
12121231 W(b) vector_fiq
12131232
1233
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1234
+ .section .vectors.bhb.loop8, "ax", %progbits
1235
+.L__vectors_bhb_loop8_start:
1236
+ W(b) vector_rst
1237
+ W(b) vector_bhb_loop8_und
1238
+ W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004
1239
+ W(b) vector_bhb_loop8_pabt
1240
+ W(b) vector_bhb_loop8_dabt
1241
+ W(b) vector_addrexcptn
1242
+ W(b) vector_bhb_loop8_irq
1243
+ W(b) vector_bhb_loop8_fiq
1244
+
1245
+ .section .vectors.bhb.bpiall, "ax", %progbits
1246
+.L__vectors_bhb_bpiall_start:
1247
+ W(b) vector_rst
1248
+ W(b) vector_bhb_bpiall_und
1249
+ W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008
1250
+ W(b) vector_bhb_bpiall_pabt
1251
+ W(b) vector_bhb_bpiall_dabt
1252
+ W(b) vector_addrexcptn
1253
+ W(b) vector_bhb_bpiall_irq
1254
+ W(b) vector_bhb_bpiall_fiq
1255
+#endif
1256
+
12141257 .data
12151258 .align 2
12161259