forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/powerpc/kernel/exceptions-64e.S
....@@ -1,12 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * Boot code and exception vectors for Book3E processors
34 *
45 * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * as published by the Free Software Foundation; either version
9
- * 2 of the License, or (at your option) any later version.
106 */
117
128 #include <linux/threads.h>
....@@ -28,6 +24,7 @@
2824 #include <asm/kvm_asm.h>
2925 #include <asm/kvm_booke_hv_asm.h>
3026 #include <asm/feature-fixups.h>
27
+#include <asm/context_tracking.h>
3128
3229 /* XXX This will ultimately add space for a special exception save
3330 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
....@@ -76,17 +73,6 @@
7673 ld r3,_MSR(r1)
7774 andi. r3,r3,MSR_PR
7875 bnelr
79
-
80
- /* Copy info into temporary exception thread info */
81
- ld r11,PACAKSAVE(r13)
82
- CURRENT_THREAD_INFO(r11, r11)
83
- CURRENT_THREAD_INFO(r12, r1)
84
- ld r10,TI_FLAGS(r11)
85
- std r10,TI_FLAGS(r12)
86
- ld r10,TI_PREEMPT(r11)
87
- std r10,TI_PREEMPT(r12)
88
- ld r10,TI_TASK(r11)
89
- std r10,TI_TASK(r12)
9076
9177 /*
9278 * Advance to the next TLB exception frame for handler
....@@ -505,7 +491,7 @@
505491 * interrupts happen before the wait instruction.
506492 */
507493 #define CHECK_NAPPING() \
508
- CURRENT_THREAD_INFO(r11, r1); \
494
+ ld r11, PACA_THREAD_INFO(r13); \
509495 ld r10,TI_LOCAL_FLAGS(r11); \
510496 andi. r9,r10,_TLF_NAPPING; \
511497 beq+ 1f; \
....@@ -765,12 +751,14 @@
765751 ld r15,PACATOC(r13)
766752 ld r14,interrupt_base_book3e@got(r15)
767753 ld r15,__end_interrupts@got(r15)
768
-#else
769
- LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
770
- LOAD_REG_IMMEDIATE(r15,__end_interrupts)
771
-#endif
772754 cmpld cr0,r10,r14
773755 cmpld cr1,r10,r15
756
+#else
757
+ LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
758
+ cmpld cr0, r10, r14
759
+ LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)
760
+ cmpld cr1, r10, r14
761
+#endif
774762 blt+ cr0,1f
775763 bge+ cr1,1f
776764
....@@ -835,12 +823,14 @@
835823 ld r15,PACATOC(r13)
836824 ld r14,interrupt_base_book3e@got(r15)
837825 ld r15,__end_interrupts@got(r15)
838
-#else
839
- LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
840
- LOAD_REG_IMMEDIATE(r15,__end_interrupts)
841
-#endif
842826 cmpld cr0,r10,r14
843827 cmpld cr1,r10,r15
828
+#else
829
+ LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
830
+ cmpld cr0, r10, r14
831
+ LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)
832
+ cmpld cr1, r10, r14
833
+#endif
844834 blt+ cr0,1f
845835 bge+ cr1,1f
846836
....@@ -998,7 +988,6 @@
998988 .endm
999989
1000990 masked_interrupt_book3e_0x500:
1001
- // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
1002991 masked_interrupt_book3e PACA_IRQ_EE 1
1003992
1004993 masked_interrupt_book3e_0x900:
....@@ -1012,38 +1001,6 @@
10121001 masked_interrupt_book3e_0x280:
10131002 masked_interrupt_book3e_0x2c0:
10141003 masked_interrupt_book3e PACA_IRQ_DBELL 0
1015
-
1016
-/*
1017
- * Called from arch_local_irq_enable when an interrupt needs
1018
- * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
1019
- * to indicate the kind of interrupt. MSR:EE is already off.
1020
- * We generate a stackframe like if a real interrupt had happened.
1021
- *
1022
- * Note: While MSR:EE is off, we need to make sure that _MSR
1023
- * in the generated frame has EE set to 1 or the exception
1024
- * handler will not properly re-enable them.
1025
- */
1026
-_GLOBAL(__replay_interrupt)
1027
- /* We are going to jump to the exception common code which
1028
- * will retrieve various register values from the PACA which
1029
- * we don't give a damn about.
1030
- */
1031
- mflr r10
1032
- mfmsr r11
1033
- mfcr r4
1034
- mtspr SPRN_SPRG_GEN_SCRATCH,r13;
1035
- std r1,PACA_EXGEN+EX_R1(r13);
1036
- stw r4,PACA_EXGEN+EX_CR(r13);
1037
- ori r11,r11,MSR_EE
1038
- subi r1,r1,INT_FRAME_SIZE;
1039
- cmpwi cr0,r3,0x500
1040
- beq exc_0x500_common
1041
- cmpwi cr0,r3,0x900
1042
- beq exc_0x900_common
1043
- cmpwi cr0,r3,0x280
1044
- beq exc_0x280_common
1045
- blr
1046
-
10471004
10481005 /*
10491006 * This is called from 0x300 and 0x400 handlers after the prologs with
....@@ -1084,17 +1041,169 @@
10841041 bl alignment_exception
10851042 b ret_from_except
10861043
1087
-/*
1088
- * We branch here from entry_64.S for the last stage of the exception
1089
- * return code path. MSR:EE is expected to be off at that point
1090
- */
1091
-_GLOBAL(exception_return_book3e)
1092
- b 1f
1044
+ .align 7
1045
+_GLOBAL(ret_from_except)
1046
+ ld r11,_TRAP(r1)
1047
+ andi. r0,r11,1
1048
+ bne ret_from_except_lite
1049
+ REST_NVGPRS(r1)
1050
+
1051
+_GLOBAL(ret_from_except_lite)
1052
+ /*
1053
+ * Disable interrupts so that current_thread_info()->flags
1054
+ * can't change between when we test it and when we return
1055
+ * from the interrupt.
1056
+ */
1057
+ wrteei 0
1058
+
1059
+ ld r9, PACA_THREAD_INFO(r13)
1060
+ ld r3,_MSR(r1)
1061
+ ld r10,PACACURRENT(r13)
1062
+ ld r4,TI_FLAGS(r9)
1063
+ andi. r3,r3,MSR_PR
1064
+ beq resume_kernel
1065
+ lwz r3,(THREAD+THREAD_DBCR0)(r10)
1066
+
1067
+ /* Check current_thread_info()->flags */
1068
+ andi. r0,r4,_TIF_USER_WORK_MASK
1069
+ bne 1f
1070
+ /*
1071
+ * Check to see if the dbcr0 register is set up to debug.
1072
+ * Use the internal debug mode bit to do this.
1073
+ */
1074
+ andis. r0,r3,DBCR0_IDM@h
1075
+ beq restore
1076
+ mfmsr r0
1077
+ rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
1078
+ mtmsr r0
1079
+ mtspr SPRN_DBCR0,r3
1080
+ li r10, -1
1081
+ mtspr SPRN_DBSR,r10
1082
+ b restore
1083
+1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
1084
+ beq 2f
1085
+ bl restore_interrupts
1086
+ SCHEDULE_USER
1087
+ b ret_from_except_lite
1088
+2:
1089
+ bl save_nvgprs
1090
+ /*
1091
+ * Use a non volatile GPR to save and restore our thread_info flags
1092
+ * across the call to restore_interrupts.
1093
+ */
1094
+ mr r30,r4
1095
+ bl restore_interrupts
1096
+ mr r4,r30
1097
+ addi r3,r1,STACK_FRAME_OVERHEAD
1098
+ bl do_notify_resume
1099
+ b ret_from_except
1100
+
1101
+resume_kernel:
1102
+ /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
1103
+ andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
1104
+ beq+ 1f
1105
+
1106
+ addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
1107
+
1108
+ ld r3,GPR1(r1)
1109
+ subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
1110
+ mr r4,r1 /* src: current exception frame */
1111
+ mr r1,r3 /* Reroute the trampoline frame to r1 */
1112
+
1113
+ /* Copy from the original to the trampoline. */
1114
+ li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
1115
+ li r6,0 /* start offset: 0 */
1116
+ mtctr r5
1117
+2: ldx r0,r6,r4
1118
+ stdx r0,r6,r3
1119
+ addi r6,r6,8
1120
+ bdnz 2b
1121
+
1122
+ /* Do real store operation to complete stdu */
1123
+ ld r5,GPR1(r1)
1124
+ std r8,0(r5)
1125
+
1126
+ /* Clear _TIF_EMULATE_STACK_STORE flag */
1127
+ lis r11,_TIF_EMULATE_STACK_STORE@h
1128
+ addi r5,r9,TI_FLAGS
1129
+0: ldarx r4,0,r5
1130
+ andc r4,r4,r11
1131
+ stdcx. r4,0,r5
1132
+ bne- 0b
1133
+1:
1134
+
1135
+#ifdef CONFIG_PREEMPTION
1136
+ /* Check if we need to preempt */
1137
+ lwz r8,TI_PREEMPT(r9)
1138
+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
1139
+ bne restore
1140
+ andi. r0,r4,_TIF_NEED_RESCHED
1141
+ bne+ check_count
1142
+
1143
+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
1144
+ beq+ restore
1145
+ lwz r8,TI_PREEMPT_LAZY(r9)
1146
+
1147
+ /* Check that preempt_count() == 0 and interrupts are enabled */
1148
+check_count:
1149
+ cmpwi cr0,r8,0
1150
+ bne restore
1151
+ ld r0,SOFTE(r1)
1152
+ andi. r0,r0,IRQS_DISABLED
1153
+ bne restore
1154
+
1155
+ /*
1156
+ * Here we are preempting the current task. We want to make
1157
+ * sure we are soft-disabled first and reconcile irq state.
1158
+ */
1159
+ RECONCILE_IRQ_STATE(r3,r4)
1160
+ bl preempt_schedule_irq
1161
+
1162
+ /*
1163
+ * arch_local_irq_restore() from preempt_schedule_irq above may
1164
+ * enable hard interrupt but we really should disable interrupts
1165
+ * when we return from the interrupt, and so that we don't get
1166
+ * interrupted after loading SRR0/1.
1167
+ */
1168
+ wrteei 0
1169
+#endif /* CONFIG_PREEMPTION */
1170
+
1171
+restore:
1172
+ /*
1173
+ * This is the main kernel exit path. First we check if we
1174
+ * are about to re-enable interrupts
1175
+ */
1176
+ ld r5,SOFTE(r1)
1177
+ lbz r6,PACAIRQSOFTMASK(r13)
1178
+ andi. r5,r5,IRQS_DISABLED
1179
+ bne .Lrestore_irq_off
1180
+
1181
+ /* We are enabling, were we already enabled ? Yes, just return */
1182
+ andi. r6,r6,IRQS_DISABLED
1183
+ beq cr0,fast_exception_return
1184
+
1185
+ /*
1186
+ * We are about to soft-enable interrupts (we are hard disabled
1187
+ * at this point). We check if there's anything that needs to
1188
+ * be replayed first.
1189
+ */
1190
+ lbz r0,PACAIRQHAPPENED(r13)
1191
+ cmpwi cr0,r0,0
1192
+ bne- .Lrestore_check_irq_replay
1193
+
1194
+ /*
1195
+ * Get here when nothing happened while soft-disabled, just
1196
+ * soft-enable and move-on. We will hard-enable as a side
1197
+ * effect of rfi
1198
+ */
1199
+.Lrestore_no_replay:
1200
+ TRACE_ENABLE_INTS
1201
+ li r0,IRQS_ENABLED
1202
+ stb r0,PACAIRQSOFTMASK(r13);
10931203
10941204 /* This is the return from load_up_fpu fast path which could do with
10951205 * less GPR restores in fact, but for now we have a single return path
10961206 */
1097
- .globl fast_exception_return
10981207 fast_exception_return:
10991208 wrteei 0
11001209 1: mr r0,r13
....@@ -1134,6 +1243,92 @@
11341243 ld r11,PACA_EXGEN+EX_R11(r13)
11351244 mfspr r13,SPRN_SPRG_GEN_SCRATCH
11361245 rfi
1246
+
1247
+ /*
1248
+ * We are returning to a context with interrupts soft disabled.
1249
+ *
1250
+ * However, we may also about to hard enable, so we need to
1251
+ * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1252
+ * or that bit can get out of sync and bad things will happen
1253
+ */
1254
+.Lrestore_irq_off:
1255
+ ld r3,_MSR(r1)
1256
+ lbz r7,PACAIRQHAPPENED(r13)
1257
+ andi. r0,r3,MSR_EE
1258
+ beq 1f
1259
+ rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
1260
+ stb r7,PACAIRQHAPPENED(r13)
1261
+1:
1262
+#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
1263
+ /* The interrupt should not have soft enabled. */
1264
+ lbz r7,PACAIRQSOFTMASK(r13)
1265
+1: tdeqi r7,IRQS_ENABLED
1266
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1267
+#endif
1268
+ b fast_exception_return
1269
+
1270
+ /*
1271
+ * Something did happen, check if a re-emit is needed
1272
+ * (this also clears paca->irq_happened)
1273
+ */
1274
+.Lrestore_check_irq_replay:
1275
+ /* XXX: We could implement a fast path here where we check
1276
+ * for irq_happened being just 0x01, in which case we can
1277
+ * clear it and return. That means that we would potentially
1278
+ * miss a decrementer having wrapped all the way around.
1279
+ *
1280
+ * Still, this might be useful for things like hash_page
1281
+ */
1282
+ bl __check_irq_replay
1283
+ cmpwi cr0,r3,0
1284
+ beq .Lrestore_no_replay
1285
+
1286
+ /*
1287
+ * We need to re-emit an interrupt. We do so by re-using our
1288
+ * existing exception frame. We first change the trap value,
1289
+ * but we need to ensure we preserve the low nibble of it
1290
+ */
1291
+ ld r4,_TRAP(r1)
1292
+ clrldi r4,r4,60
1293
+ or r4,r4,r3
1294
+ std r4,_TRAP(r1)
1295
+
1296
+ /*
1297
+ * PACA_IRQ_HARD_DIS won't always be set here, so set it now
1298
+ * to reconcile the IRQ state. Tracing is already accounted for.
1299
+ */
1300
+ lbz r4,PACAIRQHAPPENED(r13)
1301
+ ori r4,r4,PACA_IRQ_HARD_DIS
1302
+ stb r4,PACAIRQHAPPENED(r13)
1303
+
1304
+ /*
1305
+ * Then find the right handler and call it. Interrupts are
1306
+ * still soft-disabled and we keep them that way.
1307
+ */
1308
+ cmpwi cr0,r3,0x500
1309
+ bne 1f
1310
+ addi r3,r1,STACK_FRAME_OVERHEAD;
1311
+ bl do_IRQ
1312
+ b ret_from_except
1313
+1: cmpwi cr0,r3,0x900
1314
+ bne 1f
1315
+ addi r3,r1,STACK_FRAME_OVERHEAD;
1316
+ bl timer_interrupt
1317
+ b ret_from_except
1318
+#ifdef CONFIG_PPC_DOORBELL
1319
+1:
1320
+ cmpwi cr0,r3,0x280
1321
+ bne 1f
1322
+ addi r3,r1,STACK_FRAME_OVERHEAD;
1323
+ bl doorbell_exception
1324
+#endif /* CONFIG_PPC_DOORBELL */
1325
+1: b ret_from_except /* What else to do here ? */
1326
+
1327
+_ASM_NOKPROBE_SYMBOL(ret_from_except);
1328
+_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1329
+_ASM_NOKPROBE_SYMBOL(resume_kernel);
1330
+_ASM_NOKPROBE_SYMBOL(restore);
1331
+_ASM_NOKPROBE_SYMBOL(fast_exception_return);
11371332
11381333 /*
11391334 * Trampolines used when spotting a bad kernel stack pointer in
....@@ -1357,16 +1552,6 @@
13571552 sync
13581553 isync
13591554
1360
-/*
1361
- * The mapping only needs to be cache-coherent on SMP, except on
1362
- * Freescale e500mc derivatives where it's also needed for coherent DMA.
1363
- */
1364
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
1365
-#define M_IF_NEEDED MAS2_M
1366
-#else
1367
-#define M_IF_NEEDED 0
1368
-#endif
1369
-
13701555 /* 6. Setup KERNELBASE mapping in TLB[0]
13711556 *
13721557 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
....@@ -1379,7 +1564,7 @@
13791564 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
13801565 mtspr SPRN_MAS1,r6
13811566
1382
- LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
1567
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
13831568 mtspr SPRN_MAS2,r6
13841569
13851570 rlwinm r5,r5,0,0,25
....@@ -1464,7 +1649,7 @@
14641649 a2_tlbinit_after_linear_map:
14651650
14661651 /* Now we branch the new virtual address mapped by this entry */
1467
- LOAD_REG_IMMEDIATE(r3,1f)
1652
+ LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
14681653 mtctr r3
14691654 bctr
14701655