hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/arch/powerpc/kernel/entry_32.S
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /*
23 * PowerPC version
34 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
....@@ -11,12 +12,6 @@
1112 *
1213 * This file contains the system call entry code, context switch
1314 * code, and exception/interrupt return code for PowerPC.
14
- *
15
- * This program is free software; you can redistribute it and/or
16
- * modify it under the terms of the GNU General Public License
17
- * as published by the Free Software Foundation; either version
18
- * 2 of the License, or (at your option) any later version.
19
- *
2015 */
2116
2217 #include <linux/errno.h>
....@@ -33,18 +28,18 @@
3328 #include <asm/unistd.h>
3429 #include <asm/ptrace.h>
3530 #include <asm/export.h>
36
-#include <asm/asm-405.h>
3731 #include <asm/feature-fixups.h>
3832 #include <asm/barrier.h>
33
+#include <asm/kup.h>
34
+#include <asm/bug.h>
35
+
36
+#include "head_32.h"
3937
4038 /*
41
- * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39
+ * powerpc relies on return from interrupt/syscall being context synchronising
40
+ * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
41
+ * synchronisation instructions.
4242 */
43
-#if MSR_KERNEL >= 0x10000
44
-#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
45
-#else
46
-#define LOAD_MSR_KERNEL(r, x) li r,(x)
47
-#endif
4843
4944 /*
5045 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
....@@ -61,6 +56,7 @@
6156 mfspr r0,SPRN_DSRR1
6257 stw r0,_DSRR1(r11)
6358 /* fall through */
59
+_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
6460
6561 .globl debug_transfer_to_handler
6662 debug_transfer_to_handler:
....@@ -69,6 +65,7 @@
6965 mfspr r0,SPRN_CSRR1
7066 stw r0,_CSRR1(r11)
7167 /* fall through */
68
+_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
7269
7370 .globl crit_transfer_to_handler
7471 crit_transfer_to_handler:
....@@ -97,16 +94,14 @@
9794 mfspr r0,SPRN_SRR1
9895 stw r0,_SRR1(r11)
9996
100
- /* set the stack limit to the current stack
101
- * and set the limit to protect the thread_info
102
- * struct
103
- */
97
+ /* set the stack limit to the current stack */
10498 mfspr r8,SPRN_SPRG_THREAD
10599 lwz r0,KSP_LIMIT(r8)
106100 stw r0,SAVED_KSP_LIMIT(r11)
107
- rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
101
+ rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
108102 stw r0,KSP_LIMIT(r8)
109103 /* fall through */
104
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
110105 #endif
111106
112107 #ifdef CONFIG_40x
....@@ -121,16 +116,14 @@
121116 mfspr r0,SPRN_SRR1
122117 stw r0,crit_srr1@l(0)
123118
124
- /* set the stack limit to the current stack
125
- * and set the limit to protect the thread_info
126
- * struct
127
- */
119
+ /* set the stack limit to the current stack */
128120 mfspr r8,SPRN_SPRG_THREAD
129121 lwz r0,KSP_LIMIT(r8)
130122 stw r0,saved_ksp_limit@l(0)
131
- rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
123
+ rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
132124 stw r0,KSP_LIMIT(r8)
133125 /* fall through */
126
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
134127 #endif
135128
136129 /*
....@@ -143,6 +136,7 @@
143136 .globl transfer_to_handler_full
144137 transfer_to_handler_full:
145138 SAVE_NVGPRS(r11)
139
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
146140 /* fall through */
147141
148142 .globl transfer_to_handler
....@@ -156,9 +150,9 @@
156150 stw r12,_CTR(r11)
157151 stw r2,_XER(r11)
158152 mfspr r12,SPRN_SPRG_THREAD
159
- addi r2,r12,-THREAD
160
- tovirt(r2,r2) /* set r2 to current */
153
+ tovirt_vmstack r12, r12
161154 beq 2f /* if from user, fix up THREAD.regs */
155
+ addi r2, r12, -THREAD
162156 addi r11,r1,STACK_FRAME_OVERHEAD
163157 stw r11,PT_REGS(r12)
164158 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
....@@ -166,6 +160,12 @@
166160 internal debug mode bit to do this. */
167161 lwz r12,THREAD_DBCR0(r12)
168162 andis. r12,r12,DBCR0_IDM@h
163
+#endif
164
+ ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
165
+#ifdef CONFIG_PPC_BOOK3S_32
166
+ kuep_lock r11, r12
167
+#endif
168
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
169169 beq+ 3f
170170 /* From user and task is ptraced - load up global dbcr0 */
171171 li r12,-1 /* clear all pending debug events */
....@@ -174,8 +174,7 @@
174174 tophys(r11,r11)
175175 addi r11,r11,global_dbcr0@l
176176 #ifdef CONFIG_SMP
177
- CURRENT_THREAD_INFO(r9, r1)
178
- lwz r9,TI_CPU(r9)
177
+ lwz r9,TASK_CPU(r2)
179178 slwi r9,r9,3
180179 add r11,r11,r9
181180 #endif
....@@ -185,107 +184,120 @@
185184 addi r12,r12,-1
186185 stw r12,4(r11)
187186 #endif
188
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
189
- CURRENT_THREAD_INFO(r9, r1)
190
- tophys(r9, r9)
191
- ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
192
-#endif
193187
194188 b 3f
195189
196190 2: /* if from kernel, check interrupted DOZE/NAP mode and
197191 * check for stack overflow
198192 */
193
+ kuap_save_and_lock r11, r12, r9, r2, r6
194
+ addi r2, r12, -THREAD
195
+#ifndef CONFIG_VMAP_STACK
199196 lwz r9,KSP_LIMIT(r12)
200197 cmplw r1,r9 /* if r1 <= ksp_limit */
201198 ble- stack_ovf /* then the kernel stack overflowed */
199
+#endif
202200 5:
203
-#if defined(CONFIG_6xx) || defined(CONFIG_E500)
204
- CURRENT_THREAD_INFO(r9, r1)
205
- tophys(r9,r9) /* check local flags */
206
- lwz r12,TI_LOCAL_FLAGS(r9)
201
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
202
+ lwz r12,TI_LOCAL_FLAGS(r2)
207203 mtcrf 0x01,r12
208204 bt- 31-TLF_NAPPING,4f
209205 bt- 31-TLF_SLEEPING,7f
210
-#endif /* CONFIG_6xx || CONFIG_E500 */
206
+#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
211207 .globl transfer_to_handler_cont
212208 transfer_to_handler_cont:
213209 3:
214210 mflr r9
211
+ tovirt_novmstack r2, r2 /* set r2 to current */
212
+ tovirt_vmstack r9, r9
215213 lwz r11,0(r9) /* virtual address of handler */
216214 lwz r9,4(r9) /* where to go when done */
217215 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
218216 mtspr SPRN_NRI, r0
219217 #endif
220218 #ifdef CONFIG_TRACE_IRQFLAGS
219
+ /*
220
+ * When tracing IRQ state (lockdep) we enable the MMU before we call
221
+ * the IRQ tracing functions as they might access vmalloc space or
222
+ * perform IOs for console output.
223
+ *
224
+ * To speed up the syscall path where interrupts stay on, let's check
225
+ * first if we are changing the MSR value at all.
226
+ */
227
+ tophys_novmstack r12, r1
228
+ lwz r12,_MSR(r12)
229
+ andi. r12,r12,MSR_EE
230
+ bne 1f
231
+
232
+ /* MSR isn't changing, just transition directly */
233
+#endif
234
+ mtspr SPRN_SRR0,r11
235
+ mtspr SPRN_SRR1,r10
236
+ mtlr r9
237
+ RFI /* jump to handler, enable MMU */
238
+
239
+#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
240
+4: rlwinm r12,r12,0,~_TLF_NAPPING
241
+ stw r12,TI_LOCAL_FLAGS(r2)
242
+ b power_save_ppc32_restore
243
+
244
+7: rlwinm r12,r12,0,~_TLF_SLEEPING
245
+ stw r12,TI_LOCAL_FLAGS(r2)
246
+ lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
247
+ rlwinm r9,r9,0,~MSR_EE
248
+ lwz r12,_LINK(r11) /* and return to address in LR */
249
+ kuap_restore r11, r2, r3, r4, r5
250
+ lwz r2, GPR2(r11)
251
+ b fast_exception_return
252
+#endif
253
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
254
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
255
+
256
+#ifdef CONFIG_TRACE_IRQFLAGS
257
+1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
258
+ * keep interrupts disabled at this point otherwise we might risk
259
+ * taking an interrupt before we tell lockdep they are enabled.
260
+ */
221261 lis r12,reenable_mmu@h
222262 ori r12,r12,reenable_mmu@l
263
+ LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
223264 mtspr SPRN_SRR0,r12
224
- mtspr SPRN_SRR1,r10
225
- SYNC
265
+ mtspr SPRN_SRR1,r0
226266 RFI
227
-reenable_mmu: /* re-enable mmu so we can */
228
- mfmsr r10
229
- lwz r12,_MSR(r1)
230
- xor r10,r10,r12
231
- andi. r10,r10,MSR_EE /* Did EE change? */
232
- beq 1f
233267
268
+reenable_mmu:
234269 /*
235
- * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
236
- * If from user mode there is only one stack frame on the stack, and
237
- * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
238
- * stack frame to make trace_hardirqs_off happy.
239
- *
240
- * This is handy because we also need to save a bunch of GPRs,
270
+ * We save a bunch of GPRs,
241271 * r3 can be different from GPR3(r1) at this point, r9 and r11
242272 * contains the old MSR and handler address respectively,
243273 * r4 & r5 can contain page fault arguments that need to be passed
244
- * along as well. r12, CCR, CTR, XER etc... are left clobbered as
245
- * they aren't useful past this point (aren't syscall arguments),
246
- * the rest is restored from the exception frame.
274
+ * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
275
+ * clobbered as they aren't useful past this point.
247276 */
277
+
248278 stwu r1,-32(r1)
249279 stw r9,8(r1)
250280 stw r11,12(r1)
251281 stw r3,16(r1)
252282 stw r4,20(r1)
253283 stw r5,24(r1)
254
- bl trace_hardirqs_off
284
+
285
+ /* If we are disabling interrupts (normal case), simply log it with
286
+ * lockdep
287
+ */
288
+1: bl trace_hardirqs_off
255289 lwz r5,24(r1)
256290 lwz r4,20(r1)
257291 lwz r3,16(r1)
258292 lwz r11,12(r1)
259293 lwz r9,8(r1)
260294 addi r1,r1,32
261
- lwz r0,GPR0(r1)
262
- lwz r6,GPR6(r1)
263
- lwz r7,GPR7(r1)
264
- lwz r8,GPR8(r1)
265
-1: mtctr r11
295
+ mtctr r11
266296 mtlr r9
267297 bctr /* jump to handler */
268
-#else /* CONFIG_TRACE_IRQFLAGS */
269
- mtspr SPRN_SRR0,r11
270
- mtspr SPRN_SRR1,r10
271
- mtlr r9
272
- SYNC
273
- RFI /* jump to handler, enable MMU */
274298 #endif /* CONFIG_TRACE_IRQFLAGS */
275299
276
-#if defined (CONFIG_6xx) || defined(CONFIG_E500)
277
-4: rlwinm r12,r12,0,~_TLF_NAPPING
278
- stw r12,TI_LOCAL_FLAGS(r9)
279
- b power_save_ppc32_restore
280
-
281
-7: rlwinm r12,r12,0,~_TLF_SLEEPING
282
- stw r12,TI_LOCAL_FLAGS(r9)
283
- lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
284
- rlwinm r9,r9,0,~MSR_EE
285
- lwz r12,_LINK(r11) /* and return to address in LR */
286
- b fast_exception_return
287
-#endif
288
-
300
+#ifndef CONFIG_VMAP_STACK
289301 /*
290302 * On kernel stack overflow, load up an initial stack pointer
291303 * and call StackOverflow(regs), which should not return.
....@@ -303,14 +315,45 @@
303315 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
304316 lis r9,StackOverflow@ha
305317 addi r9,r9,StackOverflow@l
306
- LOAD_MSR_KERNEL(r10,MSR_KERNEL)
318
+ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
307319 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
308320 mtspr SPRN_NRI, r0
309321 #endif
310322 mtspr SPRN_SRR0,r9
311323 mtspr SPRN_SRR1,r10
312
- SYNC
313324 RFI
325
+_ASM_NOKPROBE_SYMBOL(stack_ovf)
326
+#endif
327
+
328
+#ifdef CONFIG_TRACE_IRQFLAGS
329
+trace_syscall_entry_irq_off:
330
+ /*
331
+ * Syscall shouldn't happen while interrupts are disabled,
332
+ * so let's do a warning here.
333
+ */
334
+0: trap
335
+ EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
336
+ bl trace_hardirqs_on
337
+
338
+ /* Now enable for real */
339
+ LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
340
+ mtmsr r10
341
+
342
+ REST_GPR(0, r1)
343
+ REST_4GPRS(3, r1)
344
+ REST_2GPRS(7, r1)
345
+ b DoSyscall
346
+#endif /* CONFIG_TRACE_IRQFLAGS */
347
+
348
+ .globl transfer_to_syscall
349
+transfer_to_syscall:
350
+#ifdef CONFIG_PPC_BOOK3S_32
351
+ kuep_lock r11, r12
352
+#endif
353
+#ifdef CONFIG_TRACE_IRQFLAGS
354
+ andi. r12,r9,MSR_EE
355
+ beq- trace_syscall_entry_irq_off
356
+#endif /* CONFIG_TRACE_IRQFLAGS */
314357
315358 /*
316359 * Handle a system call.
....@@ -323,36 +366,16 @@
323366 stw r3,ORIG_GPR3(r1)
324367 li r12,0
325368 stw r12,RESULT(r1)
326
- lwz r11,_CCR(r1) /* Clear SO bit in CR */
327
- rlwinm r11,r11,0,4,2
328
- stw r11,_CCR(r1)
329369 #ifdef CONFIG_TRACE_IRQFLAGS
330
- /* Return from syscalls can (and generally will) hard enable
331
- * interrupts. You aren't supposed to call a syscall with
332
- * interrupts disabled in the first place. However, to ensure
333
- * that we get it right vs. lockdep if it happens, we force
334
- * that hard enable here with appropriate tracing if we see
335
- * that we have been called with interrupts off
336
- */
370
+ /* Make sure interrupts are enabled */
337371 mfmsr r11
338372 andi. r12,r11,MSR_EE
339
- bne+ 1f
340
- /* We came in with interrupts disabled, we enable them now */
341
- bl trace_hardirqs_on
342
- mfmsr r11
343
- lwz r0,GPR0(r1)
344
- lwz r3,GPR3(r1)
345
- lwz r4,GPR4(r1)
346
- ori r11,r11,MSR_EE
347
- lwz r5,GPR5(r1)
348
- lwz r6,GPR6(r1)
349
- lwz r7,GPR7(r1)
350
- lwz r8,GPR8(r1)
351
- mtmsr r11
352
-1:
373
+ /* We came in with interrupts disabled, we WARN and mark them enabled
374
+ * for lockdep now */
375
+0: tweqi r12, 0
376
+ EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
353377 #endif /* CONFIG_TRACE_IRQFLAGS */
354
- CURRENT_THREAD_INFO(r10, r1)
355
- lwz r11,TI_FLAGS(r10)
378
+ lwz r11,TI_FLAGS(r2)
356379 andi. r11,r11,_TIF_SYSCALL_DOTRACE
357380 bne- syscall_dotrace
358381 syscall_dotrace_cont:
....@@ -385,17 +408,13 @@
385408 lwz r3,GPR3(r1)
386409 #endif
387410 mr r6,r3
388
- CURRENT_THREAD_INFO(r12, r1)
389411 /* disable interrupts so current_thread_info()->flags can't change */
390
- LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
412
+ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */
391413 /* Note: We don't bother telling lockdep about it */
392
- SYNC
393
- MTMSRD(r10)
394
- lwz r9,TI_FLAGS(r12)
414
+ mtmsr r10
415
+ lwz r9,TI_FLAGS(r2)
395416 li r8,-MAX_ERRNO
396
- lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h
397
- ori r0,r0, (_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l
398
- and. r0,r9,r0
417
+ andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
399418 bne- syscall_exit_work
400419 cmplw 0,r3,r8
401420 blt+ syscall_exit_cont
....@@ -407,8 +426,7 @@
407426 lwz r8,_MSR(r1)
408427 #ifdef CONFIG_TRACE_IRQFLAGS
409428 /* If we are going to return from the syscall with interrupts
410
- * off, we trace that here. It shouldn't happen though but we
411
- * want to catch the bugger if it does right ?
429
+ * off, we trace that here. It shouldn't normally happen.
412430 */
413431 andi. r10,r8,MSR_EE
414432 bne+ 1f
....@@ -437,13 +455,11 @@
437455 lwarx r7,0,r1
438456 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
439457 stwcx. r0,0,r1 /* to clear the reservation */
440
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
441
- andi. r4,r8,MSR_PR
442
- beq 3f
443
- CURRENT_THREAD_INFO(r4, r1)
444
- ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
445
-3:
458
+ ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
459
+#ifdef CONFIG_PPC_BOOK3S_32
460
+ kuep_unlock r5, r7
446461 #endif
462
+ kuap_check r2, r4
447463 lwz r4,_LINK(r1)
448464 lwz r5,_CCR(r1)
449465 mtlr r4
....@@ -451,13 +467,14 @@
451467 lwz r7,_NIP(r1)
452468 lwz r2,GPR2(r1)
453469 lwz r1,GPR1(r1)
470
+syscall_exit_finish:
454471 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
455472 mtspr SPRN_NRI, r0
456473 #endif
457474 mtspr SPRN_SRR0,r7
458475 mtspr SPRN_SRR1,r8
459
- SYNC
460476 RFI
477
+_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
461478 #ifdef CONFIG_44x
462479 2: li r7,0
463480 iccci r0,r0
....@@ -513,13 +530,13 @@
513530 b syscall_dotrace_cont
514531
515532 syscall_exit_work:
516
- andis. r0,r9,_TIF_RESTOREALL@h
533
+ andi. r0,r9,_TIF_RESTOREALL
517534 beq+ 0f
518535 REST_NVGPRS(r1)
519536 b 2f
520537 0: cmplw 0,r3,r8
521538 blt+ 1f
522
- andis. r0,r9,_TIF_NOERROR@h
539
+ andi. r0,r9,_TIF_NOERROR
523540 bne- 1f
524541 lwz r11,_CCR(r1) /* Load CR */
525542 neg r3,r3
....@@ -528,21 +545,17 @@
528545
529546 1: stw r6,RESULT(r1) /* Save result */
530547 stw r3,GPR3(r1) /* Update return value */
531
-2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h
548
+2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
532549 beq 4f
533550
534551 /* Clear per-syscall TIF flags if any are set. */
535552
536
- lis r11,_TIF_PERSYSCALL_MASK@h
537
- addi r12,r12,TI_FLAGS
553
+ li r11,_TIF_PERSYSCALL_MASK
554
+ addi r12,r2,TI_FLAGS
538555 3: lwarx r8,0,r12
539556 andc r8,r8,r11
540
-#ifdef CONFIG_IBM405_ERR77
541
- dcbt 0,r12
542
-#endif
543557 stwcx. r8,0,r12
544558 bne- 3b
545
- subi r12,r12,TI_FLAGS
546559
547560 4: /* Anything which requires enabling interrupts? */
548561 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
....@@ -552,8 +565,7 @@
552565 * lockdep as we are supposed to have IRQs on at this point
553566 */
554567 ori r10,r10,MSR_EE
555
- SYNC
556
- MTMSRD(r10)
568
+ mtmsr r10
557569
558570 /* Save NVGPRS if they're not saved already */
559571 lwz r4,_TRAP(r1)
....@@ -566,6 +578,33 @@
566578 addi r3,r1,STACK_FRAME_OVERHEAD
567579 bl do_syscall_trace_leave
568580 b ret_from_except_full
581
+
582
+ /*
583
+ * System call was called from kernel. We get here with SRR1 in r9.
584
+ * Mark the exception as recoverable once we have retrieved SRR0,
585
+ * trap a warning and return ENOSYS with CR[SO] set.
586
+ */
587
+ .globl ret_from_kernel_syscall
588
+ret_from_kernel_syscall:
589
+ mfspr r9, SPRN_SRR0
590
+ mfspr r10, SPRN_SRR1
591
+#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
592
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
593
+ mtmsr r11
594
+#endif
595
+
596
+0: trap
597
+ EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
598
+
599
+ li r3, ENOSYS
600
+ crset so
601
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
602
+ mtspr SPRN_NRI, r0
603
+#endif
604
+ mtspr SPRN_SRR0, r9
605
+ mtspr SPRN_SRR1, r10
606
+ RFI
607
+_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
569608
570609 /*
571610 * The fork/clone functions need to copy the full register set into
....@@ -596,6 +635,14 @@
596635 stw r0,_TRAP(r1) /* register set saved */
597636 b sys_clone
598637
638
+ .globl ppc_clone3
639
+ppc_clone3:
640
+ SAVE_NVGPRS(r1)
641
+ lwz r0,_TRAP(r1)
642
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
643
+ stw r0,_TRAP(r1) /* register set saved */
644
+ b sys_clone3
645
+
599646 .globl ppc_swapcontext
600647 ppc_swapcontext:
601648 SAVE_NVGPRS(r1)
....@@ -612,9 +659,8 @@
612659 */
613660 .globl handle_page_fault
614661 handle_page_fault:
615
- stw r4,_DAR(r1)
616662 addi r3,r1,STACK_FRAME_OVERHEAD
617
-#ifdef CONFIG_6xx
663
+#ifdef CONFIG_PPC_BOOK3S_32
618664 andis. r0,r5,DSISR_DABRMATCH@h
619665 bne- handle_dabr_fault
620666 #endif
....@@ -631,7 +677,7 @@
631677 bl bad_page_fault
632678 b ret_from_except_full
633679
634
-#ifdef CONFIG_6xx
680
+#ifdef CONFIG_PPC_BOOK3S_32
635681 /* We have a data breakpoint exception - handle it */
636682 handle_dabr_fault:
637683 SAVE_NVGPRS(r1)
....@@ -688,13 +734,14 @@
688734 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
689735 beq+ 1f
690736 andc r11,r11,r0
691
- MTMSRD(r11)
737
+ mtmsr r11
692738 isync
693739 1: stw r11,_MSR(r1)
694740 mfcr r10
695741 stw r10,_CCR(r1)
696742 stw r1,KSP(r3) /* Set old stack pointer */
697743
744
+ kuap_check r2, r0
698745 #ifdef CONFIG_SMP
699746 /* We need a sync somewhere here to make sure that if the
700747 * previous task gets rescheduled on another CPU, it sees all
....@@ -759,8 +806,8 @@
759806 REST_GPR(9, r11)
760807 REST_GPR(12, r11)
761808 lwz r11,GPR11(r11)
762
- SYNC
763809 RFI
810
+_ASM_NOKPROBE_SYMBOL(fast_exception_return)
764811
765812 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
766813 /* check if the exception happened in a restartable section */
....@@ -788,18 +835,14 @@
788835
789836 /* aargh, a nonrecoverable interrupt, panic */
790837 /* aargh, we don't know which trap this is */
791
-/* but the 601 doesn't implement the RI bit, so assume it's OK */
792838 3:
793
-BEGIN_FTR_SECTION
794
- b 2b
795
-END_FTR_SECTION_IFSET(CPU_FTR_601)
796839 li r10,-1
797840 stw r10,_TRAP(r11)
798841 addi r3,r1,STACK_FRAME_OVERHEAD
799842 lis r10,MSR_KERNEL@h
800843 ori r10,r10,MSR_KERNEL@l
801844 bl transfer_to_handler_full
802
- .long nonrecoverable_exception
845
+ .long unrecoverable_exception
803846 .long ret_from_except
804847 #endif
805848
....@@ -814,9 +857,8 @@
814857 * can't change between when we test it and when we return
815858 * from the interrupt. */
816859 /* Note: We don't bother telling lockdep about it */
817
- LOAD_MSR_KERNEL(r10,MSR_KERNEL)
818
- SYNC /* Some chip revs have problems here... */
819
- MTMSRD(r10) /* disable interrupts */
860
+ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
861
+ mtmsr r10 /* disable interrupts */
820862
821863 lwz r3,_MSR(r1) /* Returning to user mode? */
822864 andi. r0,r3,MSR_PR
....@@ -824,8 +866,7 @@
824866
825867 user_exc_return: /* r10 contains MSR_KERNEL here */
826868 /* Check current_thread_info()->flags */
827
- CURRENT_THREAD_INFO(r9, r1)
828
- lwz r9,TI_FLAGS(r9)
869
+ lwz r9,TI_FLAGS(r2)
829870 andi. r0,r9,_TIF_USER_WORK_MASK
830871 bne do_work
831872
....@@ -837,9 +878,9 @@
837878 andis. r10,r0,DBCR0_IDM@h
838879 bnel- load_dbcr0
839880 #endif
840
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
841
- CURRENT_THREAD_INFO(r9, r1)
842
- ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
881
+ ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
882
+#ifdef CONFIG_PPC_BOOK3S_32
883
+ kuep_unlock r10, r11
843884 #endif
844885
845886 b restore
....@@ -847,8 +888,7 @@
847888 /* N.B. the only way to get here is from the beq following ret_from_except. */
848889 resume_kernel:
849890 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
850
- CURRENT_THREAD_INFO(r9, r1)
851
- lwz r8,TI_FLAGS(r9)
891
+ lwz r8,TI_FLAGS(r2)
852892 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
853893 beq+ 1f
854894
....@@ -874,33 +914,23 @@
874914
875915 /* Clear _TIF_EMULATE_STACK_STORE flag */
876916 lis r11,_TIF_EMULATE_STACK_STORE@h
877
- addi r5,r9,TI_FLAGS
917
+ addi r5,r2,TI_FLAGS
878918 0: lwarx r8,0,r5
879919 andc r8,r8,r11
880
-#ifdef CONFIG_IBM405_ERR77
881
- dcbt 0,r5
882
-#endif
883920 stwcx. r8,0,r5
884921 bne- 0b
885922 1:
886923
887
-#ifdef CONFIG_PREEMPT
924
+#ifdef CONFIG_PREEMPTION
888925 /* check current_thread_info->preempt_count */
889
- lwz r0,TI_PREEMPT(r9)
926
+ lwz r0,TI_PREEMPT(r2)
890927 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
891
- bne restore
928
+ bne restore_kuap
892929 andi. r8,r8,_TIF_NEED_RESCHED
893
- bne+ 1f
894
- lwz r0,TI_PREEMPT_LAZY(r9)
895
- cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
896
- bne restore
897
- lwz r0,TI_FLAGS(r9)
898
- andi. r0,r0,_TIF_NEED_RESCHED_LAZY
899
- beq+ restore
900
-1:
930
+ beq+ restore_kuap
901931 lwz r3,_MSR(r1)
902932 andi. r0,r3,MSR_EE /* interrupts off? */
903
- beq restore /* don't schedule if so */
933
+ beq restore_kuap /* don't schedule if so */
904934 #ifdef CONFIG_TRACE_IRQFLAGS
905935 /* Lockdep thinks irqs are enabled, we need to call
906936 * preempt_schedule_irq with IRQs off, so we inform lockdep
....@@ -908,18 +938,16 @@
908938 */
909939 bl trace_hardirqs_off
910940 #endif
911
-2: bl preempt_schedule_irq
912
- CURRENT_THREAD_INFO(r9, r1)
913
- lwz r3,TI_FLAGS(r9)
914
- andi. r0,r3,_TIF_NEED_RESCHED_MASK
915
- bne- 2b
941
+ bl preempt_schedule_irq
916942 #ifdef CONFIG_TRACE_IRQFLAGS
917943 /* And now, to properly rebalance the above, we tell lockdep they
918944 * are being turned back on, which will happen when we return
919945 */
920946 bl trace_hardirqs_on
921947 #endif
922
-#endif /* CONFIG_PREEMPT */
948
+#endif /* CONFIG_PREEMPTION */
949
+restore_kuap:
950
+ kuap_restore r1, r2, r9, r10, r0
923951
924952 /* interrupts are hard-disabled at this point */
925953 restore:
....@@ -943,28 +971,14 @@
943971 * off in this assembly code while peeking at TI_FLAGS() and such. However
944972 * we need to inform it if the exception turned interrupts off, and we
945973 * are about to trun them back on.
946
- *
947
- * The problem here sadly is that we don't know whether the exceptions was
948
- * one that turned interrupts off or not. So we always tell lockdep about
949
- * turning them on here when we go back to wherever we came from with EE
950
- * on, even if that may meen some redudant calls being tracked. Maybe later
951
- * we could encode what the exception did somewhere or test the exception
952
- * type in the pt_regs but that sounds overkill
953974 */
954975 andi. r10,r9,MSR_EE
955976 beq 1f
956
- /*
957
- * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
958
- * which is the stack frame here, we need to force a stack frame
959
- * in case we came from user space.
960
- */
961977 stwu r1,-32(r1)
962978 mflr r0
963979 stw r0,4(r1)
964
- stwu r1,-32(r1)
965980 bl trace_hardirqs_on
966
- lwz r1,0(r1)
967
- lwz r1,0(r1)
981
+ addi r1, r1, 32
968982 lwz r9,_MSR(r1)
969983 1:
970984 #endif /* CONFIG_TRACE_IRQFLAGS */
....@@ -979,7 +993,6 @@
979993 mtspr SPRN_XER,r10
980994 mtctr r11
981995
982
- PPC405_ERR77(0,r1)
983996 BEGIN_FTR_SECTION
984997 lwarx r11,0,r1
985998 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
....@@ -1006,23 +1019,20 @@
10061019 * can restart the exception exit path at the label
10071020 * exc_exit_restart below. -- paulus
10081021 */
1009
- LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
1010
- SYNC
1011
- MTMSRD(r10) /* clear the RI bit */
1022
+ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1023
+ mtmsr r10 /* clear the RI bit */
10121024 .globl exc_exit_restart
10131025 exc_exit_restart:
10141026 lwz r12,_NIP(r1)
1015
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
1016
- mtspr SPRN_NRI, r0
1017
-#endif
10181027 mtspr SPRN_SRR0,r12
10191028 mtspr SPRN_SRR1,r9
10201029 REST_4GPRS(9, r1)
10211030 lwz r1,GPR1(r1)
10221031 .globl exc_exit_restart_end
10231032 exc_exit_restart_end:
1024
- SYNC
10251033 RFI
1034
+_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1035
+_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
10261036
10271037 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
10281038 /*
....@@ -1044,16 +1054,15 @@
10441054 exc_exit_restart:
10451055 lwz r11,_NIP(r1)
10461056 lwz r12,_MSR(r1)
1047
-exc_exit_start:
10481057 mtspr SPRN_SRR0,r11
10491058 mtspr SPRN_SRR1,r12
10501059 REST_2GPRS(11, r1)
10511060 lwz r1,GPR1(r1)
10521061 .globl exc_exit_restart_end
10531062 exc_exit_restart_end:
1054
- PPC405_ERR77_SYNC
10551063 rfi
10561064 b . /* prevent prefetch past rfi */
1065
+_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
10571066
10581067 /*
10591068 * Returning from a critical interrupt in user mode doesn't need
....@@ -1084,7 +1093,7 @@
10841093 REST_NVGPRS(r1); \
10851094 lwz r3,_MSR(r1); \
10861095 andi. r3,r3,MSR_PR; \
1087
- LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1096
+ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \
10881097 bne user_exc_return; \
10891098 lwz r0,GPR0(r1); \
10901099 lwz r2,GPR2(r1); \
....@@ -1094,7 +1103,6 @@
10941103 lwz r11,_CTR(r1); \
10951104 mtspr SPRN_XER,r10; \
10961105 mtctr r11; \
1097
- PPC405_ERR77(0,r1); \
10981106 stwcx. r0,0,r1; /* to clear the reservation */ \
10991107 lwz r11,_LINK(r1); \
11001108 mtlr r11; \
....@@ -1114,7 +1122,6 @@
11141122 lwz r10,GPR10(r1); \
11151123 lwz r11,GPR11(r1); \
11161124 lwz r1,GPR1(r1); \
1117
- PPC405_ERR77_SYNC; \
11181125 exc_lvl_rfi; \
11191126 b .; /* prevent prefetch past exc_lvl_rfi */
11201127
....@@ -1167,6 +1174,7 @@
11671174 mtspr SPRN_SRR0,r9;
11681175 mtspr SPRN_SRR1,r10;
11691176 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1177
+_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
11701178 #endif /* CONFIG_40x */
11711179
11721180 #ifdef CONFIG_BOOKE
....@@ -1178,20 +1186,18 @@
11781186 RESTORE_xSRR(SRR0,SRR1);
11791187 RESTORE_MMU_REGS;
11801188 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1189
+_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
11811190
11821191 .globl ret_from_debug_exc
11831192 ret_from_debug_exc:
11841193 mfspr r9,SPRN_SPRG_THREAD
11851194 lwz r10,SAVED_KSP_LIMIT(r1)
11861195 stw r10,KSP_LIMIT(r9)
1187
- lwz r9,THREAD_INFO-THREAD(r9)
1188
- CURRENT_THREAD_INFO(r10, r1)
1189
- lwz r10,TI_PREEMPT(r10)
1190
- stw r10,TI_PREEMPT(r9)
11911196 RESTORE_xSRR(SRR0,SRR1);
11921197 RESTORE_xSRR(CSRR0,CSRR1);
11931198 RESTORE_MMU_REGS;
11941199 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1200
+_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
11951201
11961202 .globl ret_from_mcheck_exc
11971203 ret_from_mcheck_exc:
....@@ -1203,6 +1209,7 @@
12031209 RESTORE_xSRR(DSRR0,DSRR1);
12041210 RESTORE_MMU_REGS;
12051211 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1212
+_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
12061213 #endif /* CONFIG_BOOKE */
12071214
12081215 /*
....@@ -1219,8 +1226,7 @@
12191226 lis r11,global_dbcr0@ha
12201227 addi r11,r11,global_dbcr0@l
12211228 #ifdef CONFIG_SMP
1222
- CURRENT_THREAD_INFO(r9, r1)
1223
- lwz r9,TI_CPU(r9)
1229
+ lwz r9,TASK_CPU(r2)
12241230 slwi r9,r9,3
12251231 add r11,r11,r9
12261232 #endif
....@@ -1235,41 +1241,39 @@
12351241
12361242 .section .bss
12371243 .align 4
1244
+ .global global_dbcr0
12381245 global_dbcr0:
12391246 .space 8*NR_CPUS
12401247 .previous
12411248 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
12421249
12431250 do_work: /* r10 contains MSR_KERNEL here */
1244
- andi. r0,r9,_TIF_NEED_RESCHED_MASK
1251
+ andi. r0,r9,_TIF_NEED_RESCHED
12451252 beq do_user_signal
12461253
12471254 do_resched: /* r10 contains MSR_KERNEL here */
1248
- /* Note: We don't need to inform lockdep that we are enabling
1249
- * interrupts here. As far as it knows, they are already enabled
1250
- */
1255
+#ifdef CONFIG_TRACE_IRQFLAGS
1256
+ bl trace_hardirqs_on
1257
+ mfmsr r10
1258
+#endif
12511259 ori r10,r10,MSR_EE
1252
- SYNC
1253
- MTMSRD(r10) /* hard-enable interrupts */
1260
+ mtmsr r10 /* hard-enable interrupts */
12541261 bl schedule
12551262 recheck:
12561263 /* Note: And we don't tell it we are disabling them again
12571264 * neither. Those disable/enable cycles used to peek at
12581265 * TI_FLAGS aren't advertised.
12591266 */
1260
- LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1261
- SYNC
1262
- MTMSRD(r10) /* disable interrupts */
1263
- CURRENT_THREAD_INFO(r9, r1)
1264
- lwz r9,TI_FLAGS(r9)
1265
- andi. r0,r9,_TIF_NEED_RESCHED_MASK
1267
+ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1268
+ mtmsr r10 /* disable interrupts */
1269
+ lwz r9,TI_FLAGS(r2)
1270
+ andi. r0,r9,_TIF_NEED_RESCHED
12661271 bne- do_resched
12671272 andi. r0,r9,_TIF_USER_WORK_MASK
12681273 beq restore_user
12691274 do_user_signal: /* r10 contains MSR_KERNEL here */
12701275 ori r10,r10,MSR_EE
1271
- SYNC
1272
- MTMSRD(r10) /* hard-enable interrupts */
1276
+ mtmsr r10 /* hard-enable interrupts */
12731277 /* save r13-r31 in the exception frame, if not already done */
12741278 lwz r3,_TRAP(r1)
12751279 andi. r0,r3,1
....@@ -1304,20 +1308,20 @@
13041308 mr r12,r11 /* restart at exc_exit_restart */
13051309 blr
13061310 3: /* OK, we can't recover, kill this process */
1307
- /* but the 601 doesn't implement the RI bit, so assume it's OK */
1308
-BEGIN_FTR_SECTION
1309
- blr
1310
-END_FTR_SECTION_IFSET(CPU_FTR_601)
13111311 lwz r3,_TRAP(r1)
13121312 andi. r0,r3,1
1313
- beq 4f
1313
+ beq 5f
13141314 SAVE_NVGPRS(r1)
13151315 rlwinm r3,r3,0,0,30
13161316 stw r3,_TRAP(r1)
1317
+5: mfspr r2,SPRN_SPRG_THREAD
1318
+ addi r2,r2,-THREAD
1319
+ tovirt(r2,r2) /* set back r2 to current */
13171320 4: addi r3,r1,STACK_FRAME_OVERHEAD
1318
- bl nonrecoverable_exception
1321
+ bl unrecoverable_exception
13191322 /* shouldn't return */
13201323 b 4b
1324
+_ASM_NOKPROBE_SYMBOL(nonrecoverable)
13211325
13221326 .section .bss
13231327 .align 2
....@@ -1343,33 +1347,33 @@
13431347 lis r6,1f@ha /* physical return address for rtas */
13441348 addi r6,r6,1f@l
13451349 tophys(r6,r6)
1346
- tophys(r7,r1)
1350
+ tophys_novmstack r7, r1
13471351 lwz r8,RTASENTRY(r4)
13481352 lwz r4,RTASBASE(r4)
13491353 mfmsr r9
13501354 stw r9,8(r1)
1351
- LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1352
- SYNC /* disable interrupts so SRR0/1 */
1353
- MTMSRD(r0) /* don't get trashed */
1355
+ LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1356
+ mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */
13541357 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
13551358 mtlr r6
1356
- mtspr SPRN_SPRG_RTAS,r7
1359
+ stw r7, THREAD + RTAS_SP(r2)
13571360 mtspr SPRN_SRR0,r8
13581361 mtspr SPRN_SRR1,r9
13591362 RFI
1360
-1: tophys(r9,r1)
1363
+1: tophys_novmstack r9, r1
1364
+#ifdef CONFIG_VMAP_STACK
1365
+ li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
1366
+ mtmsr r0
1367
+ isync
1368
+#endif
13611369 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
13621370 lwz r9,8(r9) /* original msr value */
13631371 addi r1,r1,INT_FRAME_SIZE
13641372 li r0,0
1365
- mtspr SPRN_SPRG_RTAS,r0
1373
+ tophys_novmstack r7, r2
1374
+ stw r0, THREAD + RTAS_SP(r7)
13661375 mtspr SPRN_SRR0,r8
13671376 mtspr SPRN_SRR1,r9
13681377 RFI /* return to caller */
1369
-
1370
- .globl machine_check_in_rtas
1371
-machine_check_in_rtas:
1372
- twi 31,0,0
1373
- /* XXX load up BATs and panic */
1374
-
1378
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
13751379 #endif /* CONFIG_PPC_RTAS */