hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/powerpc/kernel/exceptions-64e.S
....@@ -1,12 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Boot code and exception vectors for Book3E processors
34 *
45 * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * as published by the Free Software Foundation; either version
9
- * 2 of the License, or (at your option) any later version.
106 */
117
128 #include <linux/threads.h>
....@@ -28,6 +24,7 @@
2824 #include <asm/kvm_asm.h>
2925 #include <asm/kvm_booke_hv_asm.h>
3026 #include <asm/feature-fixups.h>
27
+#include <asm/context_tracking.h>
3128
3229 /* XXX This will ultimately add space for a special exception save
3330 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
....@@ -76,17 +73,6 @@
7673 ld r3,_MSR(r1)
7774 andi. r3,r3,MSR_PR
7875 bnelr
79
-
80
- /* Copy info into temporary exception thread info */
81
- ld r11,PACAKSAVE(r13)
82
- CURRENT_THREAD_INFO(r11, r11)
83
- CURRENT_THREAD_INFO(r12, r1)
84
- ld r10,TI_FLAGS(r11)
85
- std r10,TI_FLAGS(r12)
86
- ld r10,TI_PREEMPT(r11)
87
- std r10,TI_PREEMPT(r12)
88
- ld r10,TI_TASK(r11)
89
- std r10,TI_TASK(r12)
9076
9177 /*
9278 * Advance to the next TLB exception frame for handler
....@@ -505,7 +491,7 @@
505491 * interrupts happen before the wait instruction.
506492 */
507493 #define CHECK_NAPPING() \
508
- CURRENT_THREAD_INFO(r11, r1); \
494
+ ld r11, PACA_THREAD_INFO(r13); \
509495 ld r10,TI_LOCAL_FLAGS(r11); \
510496 andi. r9,r10,_TLF_NAPPING; \
511497 beq+ 1f; \
....@@ -765,12 +751,14 @@
765751 ld r15,PACATOC(r13)
766752 ld r14,interrupt_base_book3e@got(r15)
767753 ld r15,__end_interrupts@got(r15)
768
-#else
769
- LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
770
- LOAD_REG_IMMEDIATE(r15,__end_interrupts)
771
-#endif
772754 cmpld cr0,r10,r14
773755 cmpld cr1,r10,r15
756
+#else
757
+ LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
758
+ cmpld cr0, r10, r14
759
+ LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)
760
+ cmpld cr1, r10, r14
761
+#endif
774762 blt+ cr0,1f
775763 bge+ cr1,1f
776764
....@@ -835,12 +823,14 @@
835823 ld r15,PACATOC(r13)
836824 ld r14,interrupt_base_book3e@got(r15)
837825 ld r15,__end_interrupts@got(r15)
838
-#else
839
- LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
840
- LOAD_REG_IMMEDIATE(r15,__end_interrupts)
841
-#endif
842826 cmpld cr0,r10,r14
843827 cmpld cr1,r10,r15
828
+#else
829
+ LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
830
+ cmpld cr0, r10, r14
831
+ LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)
832
+ cmpld cr1, r10, r14
833
+#endif
844834 blt+ cr0,1f
845835 bge+ cr1,1f
846836
....@@ -998,7 +988,6 @@
998988 .endm
999989
1000990 masked_interrupt_book3e_0x500:
1001
- // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
1002991 masked_interrupt_book3e PACA_IRQ_EE 1
1003992
1004993 masked_interrupt_book3e_0x900:
....@@ -1012,38 +1001,6 @@
10121001 masked_interrupt_book3e_0x280:
10131002 masked_interrupt_book3e_0x2c0:
10141003 masked_interrupt_book3e PACA_IRQ_DBELL 0
1015
-
1016
-/*
1017
- * Called from arch_local_irq_enable when an interrupt needs
1018
- * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
1019
- * to indicate the kind of interrupt. MSR:EE is already off.
1020
- * We generate a stackframe like if a real interrupt had happened.
1021
- *
1022
- * Note: While MSR:EE is off, we need to make sure that _MSR
1023
- * in the generated frame has EE set to 1 or the exception
1024
- * handler will not properly re-enable them.
1025
- */
1026
-_GLOBAL(__replay_interrupt)
1027
- /* We are going to jump to the exception common code which
1028
- * will retrieve various register values from the PACA which
1029
- * we don't give a damn about.
1030
- */
1031
- mflr r10
1032
- mfmsr r11
1033
- mfcr r4
1034
- mtspr SPRN_SPRG_GEN_SCRATCH,r13;
1035
- std r1,PACA_EXGEN+EX_R1(r13);
1036
- stw r4,PACA_EXGEN+EX_CR(r13);
1037
- ori r11,r11,MSR_EE
1038
- subi r1,r1,INT_FRAME_SIZE;
1039
- cmpwi cr0,r3,0x500
1040
- beq exc_0x500_common
1041
- cmpwi cr0,r3,0x900
1042
- beq exc_0x900_common
1043
- cmpwi cr0,r3,0x280
1044
- beq exc_0x280_common
1045
- blr
1046
-
10471004
10481005 /*
10491006 * This is called from 0x300 and 0x400 handlers after the prologs with
....@@ -1084,17 +1041,161 @@
10841041 bl alignment_exception
10851042 b ret_from_except
10861043
1087
-/*
1088
- * We branch here from entry_64.S for the last stage of the exception
1089
- * return code path. MSR:EE is expected to be off at that point
1090
- */
1091
-_GLOBAL(exception_return_book3e)
1092
- b 1f
1044
+ .align 7
1045
+_GLOBAL(ret_from_except)
1046
+ ld r11,_TRAP(r1)
1047
+ andi. r0,r11,1
1048
+ bne ret_from_except_lite
1049
+ REST_NVGPRS(r1)
1050
+
1051
+_GLOBAL(ret_from_except_lite)
1052
+ /*
1053
+ * Disable interrupts so that current_thread_info()->flags
1054
+ * can't change between when we test it and when we return
1055
+ * from the interrupt.
1056
+ */
1057
+ wrteei 0
1058
+
1059
+ ld r9, PACA_THREAD_INFO(r13)
1060
+ ld r3,_MSR(r1)
1061
+ ld r10,PACACURRENT(r13)
1062
+ ld r4,TI_FLAGS(r9)
1063
+ andi. r3,r3,MSR_PR
1064
+ beq resume_kernel
1065
+ lwz r3,(THREAD+THREAD_DBCR0)(r10)
1066
+
1067
+ /* Check current_thread_info()->flags */
1068
+ andi. r0,r4,_TIF_USER_WORK_MASK
1069
+ bne 1f
1070
+ /*
1071
+ * Check to see if the dbcr0 register is set up to debug.
1072
+ * Use the internal debug mode bit to do this.
1073
+ */
1074
+ andis. r0,r3,DBCR0_IDM@h
1075
+ beq restore
1076
+ mfmsr r0
1077
+ rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
1078
+ mtmsr r0
1079
+ mtspr SPRN_DBCR0,r3
1080
+ li r10, -1
1081
+ mtspr SPRN_DBSR,r10
1082
+ b restore
1083
+1: andi. r0,r4,_TIF_NEED_RESCHED
1084
+ beq 2f
1085
+ bl restore_interrupts
1086
+ SCHEDULE_USER
1087
+ b ret_from_except_lite
1088
+2:
1089
+ bl save_nvgprs
1090
+ /*
1091
+ * Use a non volatile GPR to save and restore our thread_info flags
1092
+ * across the call to restore_interrupts.
1093
+ */
1094
+ mr r30,r4
1095
+ bl restore_interrupts
1096
+ mr r4,r30
1097
+ addi r3,r1,STACK_FRAME_OVERHEAD
1098
+ bl do_notify_resume
1099
+ b ret_from_except
1100
+
1101
+resume_kernel:
1102
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
1103
+ andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
1104
+ beq+ 1f
1105
+
1106
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
1107
+
1108
+ ld r3,GPR1(r1)
1109
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
1110
+ mr r4,r1 /* src: current exception frame */
1111
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
1112
+
1113
+ /* Copy from the original to the trampoline. */
1114
+ li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
1115
+ li r6,0 /* start offset: 0 */
1116
+ mtctr r5
1117
+2: ldx r0,r6,r4
1118
+ stdx r0,r6,r3
1119
+ addi r6,r6,8
1120
+ bdnz 2b
1121
+
1122
+ /* Do real store operation to complete stdu */
1123
+ ld r5,GPR1(r1)
1124
+ std r8,0(r5)
1125
+
1126
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
1127
+ lis r11,_TIF_EMULATE_STACK_STORE@h
1128
+ addi r5,r9,TI_FLAGS
1129
+0: ldarx r4,0,r5
1130
+ andc r4,r4,r11
1131
+ stdcx. r4,0,r5
1132
+ bne- 0b
1133
+1:
1134
+
1135
+#ifdef CONFIG_PREEMPT
1136
+ /* Check if we need to preempt */
1137
+ andi. r0,r4,_TIF_NEED_RESCHED
1138
+ beq+ restore
1139
+ /* Check that preempt_count() == 0 and interrupts are enabled */
1140
+ lwz r8,TI_PREEMPT(r9)
1141
+ cmpwi cr0,r8,0
1142
+ bne restore
1143
+ ld r0,SOFTE(r1)
1144
+ andi. r0,r0,IRQS_DISABLED
1145
+ bne restore
1146
+
1147
+ /*
1148
+ * Here we are preempting the current task. We want to make
1149
+ * sure we are soft-disabled first and reconcile irq state.
1150
+ */
1151
+ RECONCILE_IRQ_STATE(r3,r4)
1152
+ bl preempt_schedule_irq
1153
+
1154
+ /*
1155
+ * arch_local_irq_restore() from preempt_schedule_irq above may
1156
+ * enable hard interrupt but we really should disable interrupts
1157
+ * when we return from the interrupt, and so that we don't get
1158
+ * interrupted after loading SRR0/1.
1159
+ */
1160
+ wrteei 0
1161
+#endif /* CONFIG_PREEMPT */
1162
+
1163
+restore:
1164
+ /*
1165
+ * This is the main kernel exit path. First we check if we
1166
+ * are about to re-enable interrupts
1167
+ */
1168
+ ld r5,SOFTE(r1)
1169
+ lbz r6,PACAIRQSOFTMASK(r13)
1170
+ andi. r5,r5,IRQS_DISABLED
1171
+ bne .Lrestore_irq_off
1172
+
1173
+ /* We are enabling, were we already enabled ? Yes, just return */
1174
+ andi. r6,r6,IRQS_DISABLED
1175
+ beq cr0,fast_exception_return
1176
+
1177
+ /*
1178
+ * We are about to soft-enable interrupts (we are hard disabled
1179
+ * at this point). We check if there's anything that needs to
1180
+ * be replayed first.
1181
+ */
1182
+ lbz r0,PACAIRQHAPPENED(r13)
1183
+ cmpwi cr0,r0,0
1184
+ bne- .Lrestore_check_irq_replay
1185
+
1186
+ /*
1187
+ * Get here when nothing happened while soft-disabled, just
1188
+ * soft-enable and move-on. We will hard-enable as a side
1189
+ * effect of rfi
1190
+ */
1191
+.Lrestore_no_replay:
1192
+ TRACE_ENABLE_INTS
1193
+ li r0,IRQS_ENABLED
1194
+ stb r0,PACAIRQSOFTMASK(r13);
10931195
10941196 /* This is the return from load_up_fpu fast path which could do with
10951197 * less GPR restores in fact, but for now we have a single return path
10961198 */
1097
- .globl fast_exception_return
10981199 fast_exception_return:
10991200 wrteei 0
11001201 1: mr r0,r13
....@@ -1134,6 +1235,92 @@
11341235 ld r11,PACA_EXGEN+EX_R11(r13)
11351236 mfspr r13,SPRN_SPRG_GEN_SCRATCH
11361237 rfi
1238
+
1239
+ /*
1240
+ * We are returning to a context with interrupts soft disabled.
1241
+ *
1242
+ * However, we may also about to hard enable, so we need to
1243
+ * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1244
+ * or that bit can get out of sync and bad things will happen
1245
+ */
1246
+.Lrestore_irq_off:
1247
+ ld r3,_MSR(r1)
1248
+ lbz r7,PACAIRQHAPPENED(r13)
1249
+ andi. r0,r3,MSR_EE
1250
+ beq 1f
1251
+ rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
1252
+ stb r7,PACAIRQHAPPENED(r13)
1253
+1:
1254
+#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
1255
+ /* The interrupt should not have soft enabled. */
1256
+ lbz r7,PACAIRQSOFTMASK(r13)
1257
+1: tdeqi r7,IRQS_ENABLED
1258
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1259
+#endif
1260
+ b fast_exception_return
1261
+
1262
+ /*
1263
+ * Something did happen, check if a re-emit is needed
1264
+ * (this also clears paca->irq_happened)
1265
+ */
1266
+.Lrestore_check_irq_replay:
1267
+ /* XXX: We could implement a fast path here where we check
1268
+ * for irq_happened being just 0x01, in which case we can
1269
+ * clear it and return. That means that we would potentially
1270
+ * miss a decrementer having wrapped all the way around.
1271
+ *
1272
+ * Still, this might be useful for things like hash_page
1273
+ */
1274
+ bl __check_irq_replay
1275
+ cmpwi cr0,r3,0
1276
+ beq .Lrestore_no_replay
1277
+
1278
+ /*
1279
+ * We need to re-emit an interrupt. We do so by re-using our
1280
+ * existing exception frame. We first change the trap value,
1281
+ * but we need to ensure we preserve the low nibble of it
1282
+ */
1283
+ ld r4,_TRAP(r1)
1284
+ clrldi r4,r4,60
1285
+ or r4,r4,r3
1286
+ std r4,_TRAP(r1)
1287
+
1288
+ /*
1289
+ * PACA_IRQ_HARD_DIS won't always be set here, so set it now
1290
+ * to reconcile the IRQ state. Tracing is already accounted for.
1291
+ */
1292
+ lbz r4,PACAIRQHAPPENED(r13)
1293
+ ori r4,r4,PACA_IRQ_HARD_DIS
1294
+ stb r4,PACAIRQHAPPENED(r13)
1295
+
1296
+ /*
1297
+ * Then find the right handler and call it. Interrupts are
1298
+ * still soft-disabled and we keep them that way.
1299
+ */
1300
+ cmpwi cr0,r3,0x500
1301
+ bne 1f
1302
+ addi r3,r1,STACK_FRAME_OVERHEAD;
1303
+ bl do_IRQ
1304
+ b ret_from_except
1305
+1: cmpwi cr0,r3,0x900
1306
+ bne 1f
1307
+ addi r3,r1,STACK_FRAME_OVERHEAD;
1308
+ bl timer_interrupt
1309
+ b ret_from_except
1310
+#ifdef CONFIG_PPC_DOORBELL
1311
+1:
1312
+ cmpwi cr0,r3,0x280
1313
+ bne 1f
1314
+ addi r3,r1,STACK_FRAME_OVERHEAD;
1315
+ bl doorbell_exception
1316
+#endif /* CONFIG_PPC_DOORBELL */
1317
+1: b ret_from_except /* What else to do here ? */
1318
+
1319
+_ASM_NOKPROBE_SYMBOL(ret_from_except);
1320
+_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1321
+_ASM_NOKPROBE_SYMBOL(resume_kernel);
1322
+_ASM_NOKPROBE_SYMBOL(restore);
1323
+_ASM_NOKPROBE_SYMBOL(fast_exception_return);
11371324
11381325 /*
11391326 * Trampolines used when spotting a bad kernel stack pointer in
....@@ -1357,16 +1544,6 @@
13571544 sync
13581545 isync
13591546
1360
-/*
1361
- * The mapping only needs to be cache-coherent on SMP, except on
1362
- * Freescale e500mc derivatives where it's also needed for coherent DMA.
1363
- */
1364
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
1365
-#define M_IF_NEEDED MAS2_M
1366
-#else
1367
-#define M_IF_NEEDED 0
1368
-#endif
1369
-
13701547 /* 6. Setup KERNELBASE mapping in TLB[0]
13711548 *
13721549 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
....@@ -1379,7 +1556,7 @@
13791556 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
13801557 mtspr SPRN_MAS1,r6
13811558
1382
- LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
1559
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
13831560 mtspr SPRN_MAS2,r6
13841561
13851562 rlwinm r5,r5,0,0,25
....@@ -1464,7 +1641,7 @@
14641641 a2_tlbinit_after_linear_map:
14651642
14661643 /* Now we branch the new virtual address mapped by this entry */
1467
- LOAD_REG_IMMEDIATE(r3,1f)
1644
+ LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
14681645 mtctr r3
14691646 bctr
14701647