.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* |
---|
2 | 3 | * PowerPC version |
---|
3 | 4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
---|
.. | .. |
---|
11 | 12 | * |
---|
12 | 13 | * This file contains the system call entry code, context switch |
---|
13 | 14 | * code, and exception/interrupt return code for PowerPC. |
---|
14 | | - * |
---|
15 | | - * This program is free software; you can redistribute it and/or |
---|
16 | | - * modify it under the terms of the GNU General Public License |
---|
17 | | - * as published by the Free Software Foundation; either version |
---|
18 | | - * 2 of the License, or (at your option) any later version. |
---|
19 | 15 | */ |
---|
20 | 16 | |
---|
21 | 17 | #include <linux/errno.h> |
---|
22 | 18 | #include <linux/err.h> |
---|
| 19 | +#include <asm/cache.h> |
---|
23 | 20 | #include <asm/unistd.h> |
---|
24 | 21 | #include <asm/processor.h> |
---|
25 | 22 | #include <asm/page.h> |
---|
.. | .. |
---|
46 | 43 | #include <asm/exception-64e.h> |
---|
47 | 44 | #endif |
---|
48 | 45 | #include <asm/feature-fixups.h> |
---|
| 46 | +#include <asm/kup.h> |
---|
49 | 47 | |
---|
50 | 48 | /* |
---|
51 | 49 | * System calls. |
---|
.. | .. |
---|
54 | 52 | SYS_CALL_TABLE: |
---|
55 | 53 | .tc sys_call_table[TC],sys_call_table |
---|
56 | 54 | |
---|
| 55 | +#ifdef CONFIG_COMPAT |
---|
| 56 | +COMPAT_SYS_CALL_TABLE: |
---|
| 57 | + .tc compat_sys_call_table[TC],compat_sys_call_table |
---|
| 58 | +#endif |
---|
| 59 | + |
---|
57 | 60 | /* This value is used to mark exception frames on the stack. */ |
---|
58 | 61 | exception_marker: |
---|
59 | 62 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
---|
.. | .. |
---|
61 | 64 | .section ".text" |
---|
62 | 65 | .align 7 |
---|
63 | 66 | |
---|
64 | | - .globl system_call_common |
---|
65 | | -system_call_common: |
---|
| 67 | +#ifdef CONFIG_PPC_BOOK3S |
---|
| 68 | +.macro system_call_vectored name trapnr |
---|
| 69 | + .globl system_call_vectored_\name |
---|
| 70 | +system_call_vectored_\name: |
---|
| 71 | +_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) |
---|
66 | 72 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
---|
67 | 73 | BEGIN_FTR_SECTION |
---|
68 | 74 | extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ |
---|
69 | 75 | bne .Ltabort_syscall |
---|
70 | 76 | END_FTR_SECTION_IFSET(CPU_FTR_TM) |
---|
71 | 77 | #endif |
---|
72 | | - andi. r10,r12,MSR_PR |
---|
| 78 | + SCV_INTERRUPT_TO_KERNEL |
---|
73 | 79 | mr r10,r1 |
---|
74 | | - addi r1,r1,-INT_FRAME_SIZE |
---|
75 | | - beq- 1f |
---|
76 | 80 | ld r1,PACAKSAVE(r13) |
---|
77 | | -1: std r10,0(r1) |
---|
| 81 | + std r10,0(r1) |
---|
78 | 82 | std r11,_NIP(r1) |
---|
79 | 83 | std r12,_MSR(r1) |
---|
80 | 84 | std r0,GPR0(r1) |
---|
81 | 85 | std r10,GPR1(r1) |
---|
82 | | - beq 2f /* if from kernel mode */ |
---|
83 | | -#ifdef CONFIG_PPC_FSL_BOOK3E |
---|
84 | | -START_BTB_FLUSH_SECTION |
---|
85 | | - BTB_FLUSH(r10) |
---|
86 | | -END_BTB_FLUSH_SECTION |
---|
87 | | -#endif |
---|
88 | | - ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) |
---|
89 | | -2: std r2,GPR2(r1) |
---|
| 86 | + std r2,GPR2(r1) |
---|
| 87 | + ld r2,PACATOC(r13) |
---|
| 88 | + mfcr r12 |
---|
| 89 | + li r11,0 |
---|
| 90 | + /* Can we avoid saving r3-r8 in common case? */ |
---|
90 | 91 | std r3,GPR3(r1) |
---|
91 | | - mfcr r2 |
---|
92 | 92 | std r4,GPR4(r1) |
---|
93 | 93 | std r5,GPR5(r1) |
---|
94 | 94 | std r6,GPR6(r1) |
---|
95 | 95 | std r7,GPR7(r1) |
---|
96 | 96 | std r8,GPR8(r1) |
---|
97 | | - li r11,0 |
---|
| 97 | + /* Zero r9-r12, this should only be required when restoring all GPRs */ |
---|
98 | 98 | std r11,GPR9(r1) |
---|
99 | 99 | std r11,GPR10(r1) |
---|
100 | 100 | std r11,GPR11(r1) |
---|
101 | 101 | std r11,GPR12(r1) |
---|
102 | | - std r11,_XER(r1) |
---|
103 | | - std r11,_CTR(r1) |
---|
104 | 102 | std r9,GPR13(r1) |
---|
105 | | - mflr r10 |
---|
106 | | - /* |
---|
107 | | - * This clears CR0.SO (bit 28), which is the error indication on |
---|
108 | | - * return from this system call. |
---|
109 | | - */ |
---|
110 | | - rldimi r2,r11,28,(63-28) |
---|
111 | | - li r11,0xc01 |
---|
112 | | - std r10,_LINK(r1) |
---|
| 103 | + SAVE_NVGPRS(r1) |
---|
| 104 | + std r11,_XER(r1) |
---|
| 105 | + std r11,_LINK(r1) |
---|
| 106 | + std r11,_CTR(r1) |
---|
| 107 | + |
---|
| 108 | + li r11,\trapnr |
---|
113 | 109 | std r11,_TRAP(r1) |
---|
| 110 | + std r12,_CCR(r1) |
---|
114 | 111 | std r3,ORIG_GPR3(r1) |
---|
115 | | - std r2,_CCR(r1) |
---|
116 | | - ld r2,PACATOC(r13) |
---|
117 | | - addi r9,r1,STACK_FRAME_OVERHEAD |
---|
| 112 | + addi r10,r1,STACK_FRAME_OVERHEAD |
---|
118 | 113 | ld r11,exception_marker@toc(r2) |
---|
119 | | - std r11,-16(r9) /* "regshere" marker */ |
---|
120 | | -#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) |
---|
121 | | -BEGIN_FW_FTR_SECTION |
---|
122 | | - beq 33f |
---|
123 | | - /* if from user, see if there are any DTL entries to process */ |
---|
124 | | - ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ |
---|
125 | | - ld r11,PACA_DTL_RIDX(r13) /* get log read index */ |
---|
126 | | - addi r10,r10,LPPACA_DTLIDX |
---|
127 | | - LDX_BE r10,0,r10 /* get log write index */ |
---|
128 | | - cmpd cr1,r11,r10 |
---|
129 | | - beq+ cr1,33f |
---|
130 | | - bl accumulate_stolen_time |
---|
131 | | - REST_GPR(0,r1) |
---|
132 | | - REST_4GPRS(3,r1) |
---|
133 | | - REST_2GPRS(7,r1) |
---|
134 | | - addi r9,r1,STACK_FRAME_OVERHEAD |
---|
135 | | -33: |
---|
136 | | -END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) |
---|
137 | | -#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ |
---|
| 114 | + std r11,-16(r10) /* "regshere" marker */ |
---|
| 115 | + |
---|
| 116 | +BEGIN_FTR_SECTION |
---|
| 117 | + HMT_MEDIUM |
---|
| 118 | +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
---|
138 | 119 | |
---|
139 | 120 | /* |
---|
140 | | - * A syscall should always be called with interrupts enabled |
---|
141 | | - * so we just unconditionally hard-enable here. When some kind |
---|
142 | | - * of irq tracing is used, we additionally check that condition |
---|
143 | | - * is correct |
---|
144 | | - */ |
---|
145 | | -#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) |
---|
146 | | - lbz r10,PACAIRQSOFTMASK(r13) |
---|
147 | | -1: tdnei r10,IRQS_ENABLED |
---|
148 | | - EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
---|
149 | | -#endif |
---|
150 | | - |
---|
151 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
152 | | - wrteei 1 |
---|
153 | | -#else |
---|
154 | | - li r11,MSR_RI |
---|
155 | | - ori r11,r11,MSR_EE |
---|
156 | | - mtmsrd r11,1 |
---|
157 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
158 | | - |
---|
159 | | -system_call: /* label this so stack traces look sane */ |
---|
160 | | - /* We do need to set SOFTE in the stack frame or the return |
---|
161 | | - * from interrupt will be painful |
---|
162 | | - */ |
---|
163 | | - li r10,IRQS_ENABLED |
---|
164 | | - std r10,SOFTE(r1) |
---|
165 | | - |
---|
166 | | - CURRENT_THREAD_INFO(r11, r1) |
---|
167 | | - ld r10,TI_FLAGS(r11) |
---|
168 | | - andi. r11,r10,_TIF_SYSCALL_DOTRACE |
---|
169 | | - bne .Lsyscall_dotrace /* does not return */ |
---|
170 | | - cmpldi 0,r0,NR_syscalls |
---|
171 | | - bge- .Lsyscall_enosys |
---|
172 | | - |
---|
173 | | -.Lsyscall: |
---|
174 | | -/* |
---|
175 | | - * Need to vector to 32 Bit or default sys_call_table here, |
---|
176 | | - * based on caller's run-mode / personality. |
---|
177 | | - */ |
---|
178 | | - ld r11,SYS_CALL_TABLE@toc(2) |
---|
179 | | - andi. r10,r10,_TIF_32BIT |
---|
180 | | - beq 15f |
---|
181 | | - addi r11,r11,8 /* use 32-bit syscall entries */ |
---|
182 | | - clrldi r3,r3,32 |
---|
183 | | - clrldi r4,r4,32 |
---|
184 | | - clrldi r5,r5,32 |
---|
185 | | - clrldi r6,r6,32 |
---|
186 | | - clrldi r7,r7,32 |
---|
187 | | - clrldi r8,r8,32 |
---|
188 | | -15: |
---|
189 | | - slwi r0,r0,4 |
---|
190 | | - |
---|
191 | | - barrier_nospec_asm |
---|
192 | | - /* |
---|
193 | | - * Prevent the load of the handler below (based on the user-passed |
---|
194 | | - * system call number) being speculatively executed until the test |
---|
195 | | - * against NR_syscalls and branch to .Lsyscall_enosys above has |
---|
196 | | - * committed. |
---|
| 121 | + * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which |
---|
| 122 | + * would clobber syscall parameters. Also we always enter with IRQs |
---|
| 123 | + * enabled and nothing pending. system_call_exception() will call |
---|
| 124 | + * trace_hardirqs_off(). |
---|
| 125 | + * |
---|
| 126 | + * scv enters with MSR[EE]=1, so don't set PACA_IRQ_HARD_DIS. The |
---|
| 127 | + * entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED. |
---|
197 | 128 | */ |
---|
198 | 129 | |
---|
199 | | - ldx r12,r11,r0 /* Fetch system call handler [ptr] */ |
---|
200 | | - mtctr r12 |
---|
201 | | - bctrl /* Call handler */ |
---|
| 130 | + /* Calling convention has r9 = orig r0, r10 = regs */ |
---|
| 131 | + mr r9,r0 |
---|
| 132 | + bl system_call_exception |
---|
202 | 133 | |
---|
203 | | -.Lsyscall_exit: |
---|
204 | | - std r3,RESULT(r1) |
---|
| 134 | +.Lsyscall_vectored_\name\()_exit: |
---|
| 135 | + addi r4,r1,STACK_FRAME_OVERHEAD |
---|
| 136 | + li r5,1 /* scv */ |
---|
| 137 | + bl syscall_exit_prepare |
---|
205 | 138 | |
---|
206 | | -#ifdef CONFIG_DEBUG_RSEQ |
---|
207 | | - /* Check whether the syscall is issued inside a restartable sequence */ |
---|
208 | | - addi r3,r1,STACK_FRAME_OVERHEAD |
---|
209 | | - bl rseq_syscall |
---|
210 | | - ld r3,RESULT(r1) |
---|
211 | | -#endif |
---|
| 139 | + ld r2,_CCR(r1) |
---|
| 140 | + ld r4,_NIP(r1) |
---|
| 141 | + ld r5,_MSR(r1) |
---|
212 | 142 | |
---|
213 | | - CURRENT_THREAD_INFO(r12, r1) |
---|
214 | | - |
---|
215 | | - ld r8,_MSR(r1) |
---|
216 | | -#ifdef CONFIG_PPC_BOOK3S |
---|
217 | | - /* No MSR:RI on BookE */ |
---|
218 | | - andi. r10,r8,MSR_RI |
---|
219 | | - beq- .Lunrecov_restore |
---|
220 | | -#endif |
---|
221 | | - |
---|
222 | | -/* |
---|
223 | | - * This is a few instructions into the actual syscall exit path (which actually |
---|
224 | | - * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the |
---|
225 | | - * number of visible symbols for profiling purposes. |
---|
226 | | - * |
---|
227 | | - * We can probe from system_call until this point as MSR_RI is set. But once it |
---|
228 | | - * is cleared below, we won't be able to take a trap. |
---|
229 | | - * |
---|
230 | | - * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL(). |
---|
231 | | - */ |
---|
232 | | -system_call_exit: |
---|
233 | | - /* |
---|
234 | | - * Disable interrupts so current_thread_info()->flags can't change, |
---|
235 | | - * and so that we don't get interrupted after loading SRR0/1. |
---|
236 | | - */ |
---|
237 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
238 | | - wrteei 0 |
---|
239 | | -#else |
---|
240 | | - /* |
---|
241 | | - * For performance reasons we clear RI the same time that we |
---|
242 | | - * clear EE. We only need to clear RI just before we restore r13 |
---|
243 | | - * below, but batching it with EE saves us one expensive mtmsrd call. |
---|
244 | | - * We have to be careful to restore RI if we branch anywhere from |
---|
245 | | - * here (eg syscall_exit_work). |
---|
246 | | - */ |
---|
247 | | - li r11,0 |
---|
248 | | - mtmsrd r11,1 |
---|
249 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
250 | | - |
---|
251 | | - ld r9,TI_FLAGS(r12) |
---|
252 | | - li r11,-MAX_ERRNO |
---|
253 | | - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
---|
254 | | - bne- .Lsyscall_exit_work |
---|
255 | | - |
---|
256 | | - andi. r0,r8,MSR_FP |
---|
257 | | - beq 2f |
---|
258 | | -#ifdef CONFIG_ALTIVEC |
---|
259 | | - andis. r0,r8,MSR_VEC@h |
---|
260 | | - bne 3f |
---|
261 | | -#endif |
---|
262 | | -2: addi r3,r1,STACK_FRAME_OVERHEAD |
---|
263 | | -#ifdef CONFIG_PPC_BOOK3S |
---|
264 | | - li r10,MSR_RI |
---|
265 | | - mtmsrd r10,1 /* Restore RI */ |
---|
266 | | -#endif |
---|
267 | | - bl restore_math |
---|
268 | | -#ifdef CONFIG_PPC_BOOK3S |
---|
269 | | - li r11,0 |
---|
270 | | - mtmsrd r11,1 |
---|
271 | | -#endif |
---|
272 | | - ld r8,_MSR(r1) |
---|
273 | | - ld r3,RESULT(r1) |
---|
274 | | - li r11,-MAX_ERRNO |
---|
275 | | - |
---|
276 | | -3: cmpld r3,r11 |
---|
277 | | - ld r5,_CCR(r1) |
---|
278 | | - bge- .Lsyscall_error |
---|
279 | | -.Lsyscall_error_cont: |
---|
280 | | - ld r7,_NIP(r1) |
---|
281 | 143 | BEGIN_FTR_SECTION |
---|
282 | 144 | stdcx. r0,0,r1 /* to clear the reservation */ |
---|
283 | 145 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
---|
284 | | - andi. r6,r8,MSR_PR |
---|
285 | | - ld r4,_LINK(r1) |
---|
286 | | - |
---|
287 | | - beq- 1f |
---|
288 | | - ACCOUNT_CPU_USER_EXIT(r13, r11, r12) |
---|
289 | 146 | |
---|
290 | 147 | BEGIN_FTR_SECTION |
---|
291 | 148 | HMT_MEDIUM_LOW |
---|
292 | 149 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
---|
293 | 150 | |
---|
294 | | - ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
---|
295 | | - ld r2,GPR2(r1) |
---|
296 | | - ld r1,GPR1(r1) |
---|
| 151 | + cmpdi r3,0 |
---|
| 152 | + bne .Lsyscall_vectored_\name\()_restore_regs |
---|
| 153 | + |
---|
| 154 | + /* rfscv returns with LR->NIA and CTR->MSR */ |
---|
297 | 155 | mtlr r4 |
---|
298 | | - mtcr r5 |
---|
299 | | - mtspr SPRN_SRR0,r7 |
---|
300 | | - mtspr SPRN_SRR1,r8 |
---|
301 | | - RFI_TO_USER |
---|
302 | | - b . /* prevent speculative execution */ |
---|
| 156 | + mtctr r5 |
---|
303 | 157 | |
---|
304 | | - /* exit to kernel */ |
---|
305 | | -1: ld r2,GPR2(r1) |
---|
306 | | - ld r1,GPR1(r1) |
---|
307 | | - mtlr r4 |
---|
308 | | - mtcr r5 |
---|
309 | | - mtspr SPRN_SRR0,r7 |
---|
310 | | - mtspr SPRN_SRR1,r8 |
---|
311 | | - RFI_TO_KERNEL |
---|
312 | | - b . /* prevent speculative execution */ |
---|
313 | | - |
---|
314 | | -.Lsyscall_error: |
---|
315 | | - oris r5,r5,0x1000 /* Set SO bit in CR */ |
---|
316 | | - neg r3,r3 |
---|
317 | | - std r5,_CCR(r1) |
---|
318 | | - b .Lsyscall_error_cont |
---|
319 | | - |
---|
320 | | -/* Traced system call support */ |
---|
321 | | -.Lsyscall_dotrace: |
---|
322 | | - bl save_nvgprs |
---|
323 | | - addi r3,r1,STACK_FRAME_OVERHEAD |
---|
324 | | - bl do_syscall_trace_enter |
---|
325 | | - |
---|
326 | | - /* |
---|
327 | | - * We use the return value of do_syscall_trace_enter() as the syscall |
---|
328 | | - * number. If the syscall was rejected for any reason do_syscall_trace_enter() |
---|
329 | | - * returns an invalid syscall number and the test below against |
---|
330 | | - * NR_syscalls will fail. |
---|
331 | | - */ |
---|
332 | | - mr r0,r3 |
---|
333 | | - |
---|
334 | | - /* Restore argument registers just clobbered and/or possibly changed. */ |
---|
335 | | - ld r3,GPR3(r1) |
---|
| 158 | + /* Could zero these as per ABI, but we may consider a stricter ABI |
---|
| 159 | + * which preserves these if libc implementations can benefit, so |
---|
| 160 | + * restore them for now until further measurement is done. */ |
---|
| 161 | + ld r0,GPR0(r1) |
---|
336 | 162 | ld r4,GPR4(r1) |
---|
337 | 163 | ld r5,GPR5(r1) |
---|
338 | 164 | ld r6,GPR6(r1) |
---|
339 | 165 | ld r7,GPR7(r1) |
---|
340 | 166 | ld r8,GPR8(r1) |
---|
| 167 | + /* Zero volatile regs that may contain sensitive kernel data */ |
---|
| 168 | + li r9,0 |
---|
| 169 | + li r10,0 |
---|
| 170 | + li r11,0 |
---|
| 171 | + li r12,0 |
---|
| 172 | + mtspr SPRN_XER,r0 |
---|
341 | 173 | |
---|
342 | | - /* Repopulate r9 and r10 for the syscall path */ |
---|
343 | | - addi r9,r1,STACK_FRAME_OVERHEAD |
---|
344 | | - CURRENT_THREAD_INFO(r10, r1) |
---|
345 | | - ld r10,TI_FLAGS(r10) |
---|
| 174 | + /* |
---|
| 175 | + * We don't need to restore AMR on the way back to userspace for KUAP. |
---|
| 176 | + * The value of AMR only matters while we're in the kernel. |
---|
| 177 | + */ |
---|
| 178 | + mtcr r2 |
---|
| 179 | + ld r2,GPR2(r1) |
---|
| 180 | + ld r3,GPR3(r1) |
---|
| 181 | + ld r13,GPR13(r1) |
---|
| 182 | + ld r1,GPR1(r1) |
---|
| 183 | + RFSCV_TO_USER |
---|
| 184 | + b . /* prevent speculative execution */ |
---|
346 | 185 | |
---|
347 | | - cmpldi r0,NR_syscalls |
---|
348 | | - blt+ .Lsyscall |
---|
| 186 | +.Lsyscall_vectored_\name\()_restore_regs: |
---|
| 187 | + li r3,0 |
---|
| 188 | + mtmsrd r3,1 |
---|
| 189 | + mtspr SPRN_SRR0,r4 |
---|
| 190 | + mtspr SPRN_SRR1,r5 |
---|
349 | 191 | |
---|
350 | | - /* Return code is already in r3 thanks to do_syscall_trace_enter() */ |
---|
351 | | - b .Lsyscall_exit |
---|
| 192 | + ld r3,_CTR(r1) |
---|
| 193 | + ld r4,_LINK(r1) |
---|
| 194 | + ld r5,_XER(r1) |
---|
352 | 195 | |
---|
353 | | - |
---|
354 | | -.Lsyscall_enosys: |
---|
355 | | - li r3,-ENOSYS |
---|
356 | | - b .Lsyscall_exit |
---|
357 | | - |
---|
358 | | -.Lsyscall_exit_work: |
---|
359 | | -#ifdef CONFIG_PPC_BOOK3S |
---|
360 | | - li r10,MSR_RI |
---|
361 | | - mtmsrd r10,1 /* Restore RI */ |
---|
362 | | -#endif |
---|
363 | | - /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. |
---|
364 | | - If TIF_NOERROR is set, just save r3 as it is. */ |
---|
365 | | - |
---|
366 | | - andi. r0,r9,_TIF_RESTOREALL |
---|
367 | | - beq+ 0f |
---|
368 | 196 | REST_NVGPRS(r1) |
---|
369 | | - b 2f |
---|
370 | | -0: cmpld r3,r11 /* r11 is -MAX_ERRNO */ |
---|
371 | | - blt+ 1f |
---|
372 | | - andi. r0,r9,_TIF_NOERROR |
---|
373 | | - bne- 1f |
---|
374 | | - ld r5,_CCR(r1) |
---|
375 | | - neg r3,r3 |
---|
376 | | - oris r5,r5,0x1000 /* Set SO bit in CR */ |
---|
377 | | - std r5,_CCR(r1) |
---|
378 | | -1: std r3,GPR3(r1) |
---|
379 | | -2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) |
---|
380 | | - beq 4f |
---|
| 197 | + ld r0,GPR0(r1) |
---|
| 198 | + mtcr r2 |
---|
| 199 | + mtctr r3 |
---|
| 200 | + mtlr r4 |
---|
| 201 | + mtspr SPRN_XER,r5 |
---|
| 202 | + REST_10GPRS(2, r1) |
---|
| 203 | + REST_2GPRS(12, r1) |
---|
| 204 | + ld r1,GPR1(r1) |
---|
| 205 | + RFI_TO_USER |
---|
| 206 | +.endm |
---|
381 | 207 | |
---|
382 | | - /* Clear per-syscall TIF flags if any are set. */ |
---|
| 208 | +system_call_vectored common 0x3000 |
---|
| 209 | +/* |
---|
| 210 | + * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0 |
---|
| 211 | + * which is tested by system_call_exception when r0 is -1 (as set by vector |
---|
| 212 | + * entry code). |
---|
| 213 | + */ |
---|
| 214 | +system_call_vectored sigill 0x7ff0 |
---|
383 | 215 | |
---|
384 | | - li r11,_TIF_PERSYSCALL_MASK |
---|
385 | | - addi r12,r12,TI_FLAGS |
---|
386 | | -3: ldarx r10,0,r12 |
---|
387 | | - andc r10,r10,r11 |
---|
388 | | - stdcx. r10,0,r12 |
---|
389 | | - bne- 3b |
---|
390 | | - subi r12,r12,TI_FLAGS |
---|
391 | 216 | |
---|
392 | | -4: /* Anything else left to do? */ |
---|
| 217 | +/* |
---|
| 218 | + * Entered via kernel return set up by kernel/sstep.c, must match entry regs |
---|
| 219 | + */ |
---|
| 220 | + .globl system_call_vectored_emulate |
---|
| 221 | +system_call_vectored_emulate: |
---|
| 222 | +_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate) |
---|
| 223 | + li r10,IRQS_ALL_DISABLED |
---|
| 224 | + stb r10,PACAIRQSOFTMASK(r13) |
---|
| 225 | + b system_call_vectored_common |
---|
| 226 | +#endif |
---|
| 227 | + |
---|
| 228 | + .balign IFETCH_ALIGN_BYTES |
---|
| 229 | + .globl system_call_common |
---|
| 230 | +system_call_common: |
---|
| 231 | +_ASM_NOKPROBE_SYMBOL(system_call_common) |
---|
| 232 | +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
---|
393 | 233 | BEGIN_FTR_SECTION |
---|
394 | | - lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */ |
---|
395 | | - ld r10,PACACURRENT(r13) |
---|
396 | | - sldi r3,r3,32 /* bits 11-13 are used for ppr */ |
---|
397 | | - std r3,TASKTHREADPPR(r10) |
---|
| 234 | + extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ |
---|
| 235 | + bne .Ltabort_syscall |
---|
| 236 | +END_FTR_SECTION_IFSET(CPU_FTR_TM) |
---|
| 237 | +#endif |
---|
| 238 | + mr r10,r1 |
---|
| 239 | + ld r1,PACAKSAVE(r13) |
---|
| 240 | + std r10,0(r1) |
---|
| 241 | + std r11,_NIP(r1) |
---|
| 242 | + std r12,_MSR(r1) |
---|
| 243 | + std r0,GPR0(r1) |
---|
| 244 | + std r10,GPR1(r1) |
---|
| 245 | + std r2,GPR2(r1) |
---|
| 246 | +#ifdef CONFIG_PPC_FSL_BOOK3E |
---|
| 247 | +START_BTB_FLUSH_SECTION |
---|
| 248 | + BTB_FLUSH(r10) |
---|
| 249 | +END_BTB_FLUSH_SECTION |
---|
| 250 | +#endif |
---|
| 251 | + ld r2,PACATOC(r13) |
---|
| 252 | + mfcr r12 |
---|
| 253 | + li r11,0 |
---|
| 254 | + /* Can we avoid saving r3-r8 in common case? */ |
---|
| 255 | + std r3,GPR3(r1) |
---|
| 256 | + std r4,GPR4(r1) |
---|
| 257 | + std r5,GPR5(r1) |
---|
| 258 | + std r6,GPR6(r1) |
---|
| 259 | + std r7,GPR7(r1) |
---|
| 260 | + std r8,GPR8(r1) |
---|
| 261 | + /* Zero r9-r12, this should only be required when restoring all GPRs */ |
---|
| 262 | + std r11,GPR9(r1) |
---|
| 263 | + std r11,GPR10(r1) |
---|
| 264 | + std r11,GPR11(r1) |
---|
| 265 | + std r11,GPR12(r1) |
---|
| 266 | + std r9,GPR13(r1) |
---|
| 267 | + SAVE_NVGPRS(r1) |
---|
| 268 | + std r11,_XER(r1) |
---|
| 269 | + std r11,_CTR(r1) |
---|
| 270 | + mflr r10 |
---|
| 271 | + |
---|
| 272 | + /* |
---|
| 273 | + * This clears CR0.SO (bit 28), which is the error indication on |
---|
| 274 | + * return from this system call. |
---|
| 275 | + */ |
---|
| 276 | + rldimi r12,r11,28,(63-28) |
---|
| 277 | + li r11,0xc00 |
---|
| 278 | + std r10,_LINK(r1) |
---|
| 279 | + std r11,_TRAP(r1) |
---|
| 280 | + std r12,_CCR(r1) |
---|
| 281 | + std r3,ORIG_GPR3(r1) |
---|
| 282 | + addi r10,r1,STACK_FRAME_OVERHEAD |
---|
| 283 | + ld r11,exception_marker@toc(r2) |
---|
| 284 | + std r11,-16(r10) /* "regshere" marker */ |
---|
| 285 | + |
---|
| 286 | + /* |
---|
| 287 | + * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which |
---|
| 288 | + * would clobber syscall parameters. Also we always enter with IRQs |
---|
| 289 | + * enabled and nothing pending. system_call_exception() will call |
---|
| 290 | + * trace_hardirqs_off(). |
---|
| 291 | + */ |
---|
| 292 | + li r11,IRQS_ALL_DISABLED |
---|
| 293 | + li r12,PACA_IRQ_HARD_DIS |
---|
| 294 | + stb r11,PACAIRQSOFTMASK(r13) |
---|
| 295 | + stb r12,PACAIRQHAPPENED(r13) |
---|
| 296 | + |
---|
| 297 | + /* Calling convention has r9 = orig r0, r10 = regs */ |
---|
| 298 | + mr r9,r0 |
---|
| 299 | + bl system_call_exception |
---|
| 300 | + |
---|
| 301 | +.Lsyscall_exit: |
---|
| 302 | + addi r4,r1,STACK_FRAME_OVERHEAD |
---|
| 303 | + li r5,0 /* !scv */ |
---|
| 304 | + bl syscall_exit_prepare |
---|
| 305 | + |
---|
| 306 | + ld r2,_CCR(r1) |
---|
| 307 | + ld r4,_NIP(r1) |
---|
| 308 | + ld r5,_MSR(r1) |
---|
| 309 | + ld r6,_LINK(r1) |
---|
| 310 | + |
---|
| 311 | +BEGIN_FTR_SECTION |
---|
| 312 | + stdcx. r0,0,r1 /* to clear the reservation */ |
---|
| 313 | +END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
---|
| 314 | + |
---|
| 315 | + mtspr SPRN_SRR0,r4 |
---|
| 316 | + mtspr SPRN_SRR1,r5 |
---|
| 317 | + mtlr r6 |
---|
| 318 | + |
---|
| 319 | + cmpdi r3,0 |
---|
| 320 | + bne .Lsyscall_restore_regs |
---|
| 321 | + /* Zero volatile regs that may contain sensitive kernel data */ |
---|
| 322 | + li r0,0 |
---|
| 323 | + li r4,0 |
---|
| 324 | + li r5,0 |
---|
| 325 | + li r6,0 |
---|
| 326 | + li r7,0 |
---|
| 327 | + li r8,0 |
---|
| 328 | + li r9,0 |
---|
| 329 | + li r10,0 |
---|
| 330 | + li r11,0 |
---|
| 331 | + li r12,0 |
---|
| 332 | + mtctr r0 |
---|
| 333 | + mtspr SPRN_XER,r0 |
---|
| 334 | +.Lsyscall_restore_regs_cont: |
---|
| 335 | + |
---|
| 336 | +BEGIN_FTR_SECTION |
---|
| 337 | + HMT_MEDIUM_LOW |
---|
398 | 338 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
---|
399 | 339 | |
---|
400 | | - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) |
---|
401 | | - beq ret_from_except_lite |
---|
| 340 | + /* |
---|
| 341 | + * We don't need to restore AMR on the way back to userspace for KUAP. |
---|
| 342 | + * The value of AMR only matters while we're in the kernel. |
---|
| 343 | + */ |
---|
| 344 | + mtcr r2 |
---|
| 345 | + ld r2,GPR2(r1) |
---|
| 346 | + ld r3,GPR3(r1) |
---|
| 347 | + ld r13,GPR13(r1) |
---|
| 348 | + ld r1,GPR1(r1) |
---|
| 349 | + RFI_TO_USER |
---|
| 350 | + b . /* prevent speculative execution */ |
---|
402 | 351 | |
---|
403 | | - /* Re-enable interrupts */ |
---|
404 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
405 | | - wrteei 1 |
---|
406 | | -#else |
---|
407 | | - li r10,MSR_RI |
---|
408 | | - ori r10,r10,MSR_EE |
---|
409 | | - mtmsrd r10,1 |
---|
410 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
411 | | - |
---|
412 | | - bl save_nvgprs |
---|
413 | | - addi r3,r1,STACK_FRAME_OVERHEAD |
---|
414 | | - bl do_syscall_trace_leave |
---|
415 | | - b ret_from_except |
---|
| 352 | +.Lsyscall_restore_regs: |
---|
| 353 | + ld r3,_CTR(r1) |
---|
| 354 | + ld r4,_XER(r1) |
---|
| 355 | + REST_NVGPRS(r1) |
---|
| 356 | + mtctr r3 |
---|
| 357 | + mtspr SPRN_XER,r4 |
---|
| 358 | + ld r0,GPR0(r1) |
---|
| 359 | + REST_8GPRS(4, r1) |
---|
| 360 | + ld r12,GPR12(r1) |
---|
| 361 | + b .Lsyscall_restore_regs_cont |
---|
416 | 362 | |
---|
417 | 363 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
---|
418 | 364 | .Ltabort_syscall: |
---|
.. | .. |
---|
440 | 386 | RFI_TO_USER |
---|
441 | 387 | b . /* prevent speculative execution */ |
---|
442 | 388 | #endif |
---|
443 | | -_ASM_NOKPROBE_SYMBOL(system_call_common); |
---|
444 | | -_ASM_NOKPROBE_SYMBOL(system_call_exit); |
---|
445 | 389 | |
---|
| 390 | +#ifdef CONFIG_PPC_BOOK3S |
---|
| 391 | +_GLOBAL(ret_from_fork_scv) |
---|
| 392 | + bl schedule_tail |
---|
| 393 | + REST_NVGPRS(r1) |
---|
| 394 | + li r3,0 /* fork() return value */ |
---|
| 395 | + b .Lsyscall_vectored_common_exit |
---|
| 396 | +#endif |
---|
| 397 | + |
---|
| 398 | +_GLOBAL(ret_from_fork) |
---|
| 399 | + bl schedule_tail |
---|
| 400 | + REST_NVGPRS(r1) |
---|
| 401 | + li r3,0 /* fork() return value */ |
---|
| 402 | + b .Lsyscall_exit |
---|
| 403 | + |
---|
| 404 | +_GLOBAL(ret_from_kernel_thread) |
---|
| 405 | + bl schedule_tail |
---|
| 406 | + REST_NVGPRS(r1) |
---|
| 407 | + mtctr r14 |
---|
| 408 | + mr r3,r15 |
---|
| 409 | +#ifdef PPC64_ELF_ABI_v2 |
---|
| 410 | + mr r12,r14 |
---|
| 411 | +#endif |
---|
| 412 | + bctrl |
---|
| 413 | + li r3,0 |
---|
| 414 | + b .Lsyscall_exit |
---|
| 415 | + |
---|
| 416 | +#ifdef CONFIG_PPC_BOOK3E |
---|
446 | 417 | /* Save non-volatile GPRs, if not already saved. */ |
---|
447 | 418 | _GLOBAL(save_nvgprs) |
---|
448 | 419 | ld r11,_TRAP(r1) |
---|
.. | .. |
---|
453 | 424 | std r0,_TRAP(r1) |
---|
454 | 425 | blr |
---|
455 | 426 | _ASM_NOKPROBE_SYMBOL(save_nvgprs); |
---|
456 | | - |
---|
457 | | - |
---|
458 | | -/* |
---|
459 | | - * The sigsuspend and rt_sigsuspend system calls can call do_signal |
---|
460 | | - * and thus put the process into the stopped state where we might |
---|
461 | | - * want to examine its user state with ptrace. Therefore we need |
---|
462 | | - * to save all the nonvolatile registers (r14 - r31) before calling |
---|
463 | | - * the C code. Similarly, fork, vfork and clone need the full |
---|
464 | | - * register state on the stack so that it can be copied to the child. |
---|
465 | | - */ |
---|
466 | | - |
---|
467 | | -_GLOBAL(ppc_fork) |
---|
468 | | - bl save_nvgprs |
---|
469 | | - bl sys_fork |
---|
470 | | - b .Lsyscall_exit |
---|
471 | | - |
---|
472 | | -_GLOBAL(ppc_vfork) |
---|
473 | | - bl save_nvgprs |
---|
474 | | - bl sys_vfork |
---|
475 | | - b .Lsyscall_exit |
---|
476 | | - |
---|
477 | | -_GLOBAL(ppc_clone) |
---|
478 | | - bl save_nvgprs |
---|
479 | | - bl sys_clone |
---|
480 | | - b .Lsyscall_exit |
---|
481 | | - |
---|
482 | | -_GLOBAL(ppc32_swapcontext) |
---|
483 | | - bl save_nvgprs |
---|
484 | | - bl compat_sys_swapcontext |
---|
485 | | - b .Lsyscall_exit |
---|
486 | | - |
---|
487 | | -_GLOBAL(ppc64_swapcontext) |
---|
488 | | - bl save_nvgprs |
---|
489 | | - bl sys_swapcontext |
---|
490 | | - b .Lsyscall_exit |
---|
491 | | - |
---|
492 | | -_GLOBAL(ppc_switch_endian) |
---|
493 | | - bl save_nvgprs |
---|
494 | | - bl sys_switch_endian |
---|
495 | | - b .Lsyscall_exit |
---|
496 | | - |
---|
497 | | -_GLOBAL(ret_from_fork) |
---|
498 | | - bl schedule_tail |
---|
499 | | - REST_NVGPRS(r1) |
---|
500 | | - li r3,0 |
---|
501 | | - b .Lsyscall_exit |
---|
502 | | - |
---|
503 | | -_GLOBAL(ret_from_kernel_thread) |
---|
504 | | - bl schedule_tail |
---|
505 | | - REST_NVGPRS(r1) |
---|
506 | | - mtlr r14 |
---|
507 | | - mr r3,r15 |
---|
508 | | -#ifdef PPC64_ELF_ABI_v2 |
---|
509 | | - mr r12,r14 |
---|
510 | 427 | #endif |
---|
511 | | - blrl |
---|
512 | | - li r3,0 |
---|
513 | | - b .Lsyscall_exit |
---|
514 | 428 | |
---|
515 | 429 | #ifdef CONFIG_PPC_BOOK3S_64 |
---|
516 | 430 | |
---|
517 | 431 | #define FLUSH_COUNT_CACHE \ |
---|
518 | 432 | 1: nop; \ |
---|
519 | | - patch_site 1b, patch__call_flush_count_cache |
---|
520 | | - |
---|
521 | | - |
---|
522 | | -#define BCCTR_FLUSH .long 0x4c400420 |
---|
| 433 | + patch_site 1b, patch__call_flush_branch_caches1; \ |
---|
| 434 | +1: nop; \ |
---|
| 435 | + patch_site 1b, patch__call_flush_branch_caches2; \ |
---|
| 436 | +1: nop; \ |
---|
| 437 | + patch_site 1b, patch__call_flush_branch_caches3 |
---|
523 | 438 | |
---|
524 | 439 | .macro nops number |
---|
525 | 440 | .rept \number |
---|
.. | .. |
---|
528 | 443 | .endm |
---|
529 | 444 | |
---|
530 | 445 | .balign 32 |
---|
531 | | -.global flush_count_cache |
---|
532 | | -flush_count_cache: |
---|
| 446 | +.global flush_branch_caches |
---|
| 447 | +flush_branch_caches: |
---|
533 | 448 | /* Save LR into r9 */ |
---|
534 | 449 | mflr r9 |
---|
535 | 450 | |
---|
.. | .. |
---|
551 | 466 | li r9,0x7fff |
---|
552 | 467 | mtctr r9 |
---|
553 | 468 | |
---|
554 | | - BCCTR_FLUSH |
---|
| 469 | + PPC_BCCTR_FLUSH |
---|
555 | 470 | |
---|
556 | 471 | 2: nop |
---|
557 | 472 | patch_site 2b patch__flush_count_cache_return |
---|
.. | .. |
---|
560 | 475 | |
---|
561 | 476 | .rept 278 |
---|
562 | 477 | .balign 32 |
---|
563 | | - BCCTR_FLUSH |
---|
| 478 | + PPC_BCCTR_FLUSH |
---|
564 | 479 | nops 7 |
---|
565 | 480 | .endr |
---|
566 | 481 | |
---|
.. | .. |
---|
574 | 489 | * state of one is saved on its kernel stack. Then the state |
---|
575 | 490 | * of the other is restored from its kernel stack. The memory |
---|
576 | 491 | * management hardware is updated to the second process's state. |
---|
577 | | - * Finally, we can return to the second process, via ret_from_except. |
---|
| 492 | + * Finally, we can return to the second process, via interrupt_return. |
---|
578 | 493 | * On entry, r3 points to the THREAD for the current task, r4 |
---|
579 | 494 | * points to the THREAD for the new task. |
---|
580 | 495 | * |
---|
.. | .. |
---|
593 | 508 | std r0,16(r1) |
---|
594 | 509 | stdu r1,-SWITCH_FRAME_SIZE(r1) |
---|
595 | 510 | /* r3-r13 are caller saved -- Cort */ |
---|
596 | | - SAVE_8GPRS(14, r1) |
---|
597 | | - SAVE_10GPRS(22, r1) |
---|
| 511 | + SAVE_NVGPRS(r1) |
---|
598 | 512 | std r0,_NIP(r1) /* Return to switch caller */ |
---|
599 | 513 | mfcr r23 |
---|
600 | 514 | std r23,_CCR(r1) |
---|
601 | 515 | std r1,KSP(r3) /* Set old stack pointer */ |
---|
602 | 516 | |
---|
603 | | - FLUSH_COUNT_CACHE |
---|
| 517 | + kuap_check_amr r9, r10 |
---|
| 518 | + |
---|
| 519 | + FLUSH_COUNT_CACHE /* Clobbers r9, ctr */ |
---|
604 | 520 | |
---|
605 | 521 | /* |
---|
606 | 522 | * On SMP kernels, care must be taken because a task may be |
---|
.. | .. |
---|
613 | 529 | * kernel/sched/core.c). |
---|
614 | 530 | * |
---|
615 | 531 | * Uncacheable stores in the case of involuntary preemption must |
---|
616 | | - * be taken care of. The smp_mb__before_spin_lock() in __schedule() |
---|
| 532 | + * be taken care of. The smp_mb__after_spinlock() in __schedule() |
---|
617 | 533 | * is implemented as hwsync on powerpc, which orders MMIO too. So |
---|
618 | 534 | * long as there is an hwsync in the context switch path, it will |
---|
619 | 535 | * be executed on the source CPU after the task has performed |
---|
.. | .. |
---|
635 | 551 | |
---|
636 | 552 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
---|
637 | 553 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
---|
| 554 | +#if defined(CONFIG_STACKPROTECTOR) |
---|
| 555 | + ld r6, TASK_CANARY(r6) |
---|
| 556 | + std r6, PACA_CANARY(r13) |
---|
| 557 | +#endif |
---|
638 | 558 | |
---|
639 | 559 | ld r8,KSP(r4) /* new stack pointer */ |
---|
640 | 560 | #ifdef CONFIG_PPC_BOOK3S_64 |
---|
.. | .. |
---|
683 | 603 | |
---|
684 | 604 | isync |
---|
685 | 605 | slbie r6 |
---|
| 606 | +BEGIN_FTR_SECTION |
---|
686 | 607 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ |
---|
| 608 | +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
---|
687 | 609 | slbmte r7,r0 |
---|
688 | 610 | isync |
---|
689 | 611 | 2: |
---|
690 | 612 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
---|
691 | 613 | |
---|
692 | | - CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ |
---|
| 614 | + clrrdi r7, r8, THREAD_SHIFT /* base of new stack */ |
---|
693 | 615 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
---|
694 | 616 | because we don't need to leave the 288-byte ABI gap at the |
---|
695 | 617 | top of the kernel stack. */ |
---|
.. | .. |
---|
710 | 632 | mtcrf 0xFF,r6 |
---|
711 | 633 | |
---|
712 | 634 | /* r3-r13 are destroyed -- Cort */ |
---|
713 | | - REST_8GPRS(14, r1) |
---|
714 | | - REST_10GPRS(22, r1) |
---|
| 635 | + REST_NVGPRS(r1) |
---|
715 | 636 | |
---|
716 | 637 | /* convert old thread to its task_struct for return value */ |
---|
717 | 638 | addi r3,r3,-THREAD |
---|
.. | .. |
---|
720 | 641 | addi r1,r1,SWITCH_FRAME_SIZE |
---|
721 | 642 | blr |
---|
722 | 643 | |
---|
723 | | - .align 7 |
---|
724 | | -_GLOBAL(ret_from_except) |
---|
725 | | - ld r11,_TRAP(r1) |
---|
726 | | - andi. r0,r11,1 |
---|
727 | | - bne ret_from_except_lite |
---|
728 | | - REST_NVGPRS(r1) |
---|
729 | | - |
---|
730 | | -_GLOBAL(ret_from_except_lite) |
---|
| 644 | +#ifdef CONFIG_PPC_BOOK3S |
---|
731 | 645 | /* |
---|
732 | | - * Disable interrupts so that current_thread_info()->flags |
---|
733 | | - * can't change between when we test it and when we return |
---|
734 | | - * from the interrupt. |
---|
| 646 | + * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not |
---|
| 647 | + * touched, no exit work created, then this can be used. |
---|
735 | 648 | */ |
---|
736 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
737 | | - wrteei 0 |
---|
738 | | -#else |
---|
739 | | - li r10,MSR_RI |
---|
740 | | - mtmsrd r10,1 /* Update machine state */ |
---|
741 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
742 | | - |
---|
743 | | - CURRENT_THREAD_INFO(r9, r1) |
---|
744 | | - ld r3,_MSR(r1) |
---|
745 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
746 | | - ld r10,PACACURRENT(r13) |
---|
747 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
748 | | - ld r4,TI_FLAGS(r9) |
---|
749 | | - andi. r3,r3,MSR_PR |
---|
750 | | - beq resume_kernel |
---|
751 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
752 | | - lwz r3,(THREAD+THREAD_DBCR0)(r10) |
---|
753 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
754 | | - |
---|
755 | | - /* Check current_thread_info()->flags */ |
---|
756 | | - andi. r0,r4,_TIF_USER_WORK_MASK |
---|
757 | | - bne 1f |
---|
758 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
759 | | - /* |
---|
760 | | - * Check to see if the dbcr0 register is set up to debug. |
---|
761 | | - * Use the internal debug mode bit to do this. |
---|
762 | | - */ |
---|
763 | | - andis. r0,r3,DBCR0_IDM@h |
---|
764 | | - beq restore |
---|
765 | | - mfmsr r0 |
---|
766 | | - rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ |
---|
767 | | - mtmsr r0 |
---|
768 | | - mtspr SPRN_DBCR0,r3 |
---|
769 | | - li r10, -1 |
---|
770 | | - mtspr SPRN_DBSR,r10 |
---|
771 | | - b restore |
---|
772 | | -#else |
---|
| 649 | + .balign IFETCH_ALIGN_BYTES |
---|
| 650 | + .globl fast_interrupt_return |
---|
| 651 | +fast_interrupt_return: |
---|
| 652 | +_ASM_NOKPROBE_SYMBOL(fast_interrupt_return) |
---|
| 653 | + kuap_check_amr r3, r4 |
---|
| 654 | + ld r5,_MSR(r1) |
---|
| 655 | + andi. r0,r5,MSR_PR |
---|
| 656 | + bne .Lfast_user_interrupt_return |
---|
| 657 | + kuap_restore_amr r3, r4 |
---|
| 658 | + andi. r0,r5,MSR_RI |
---|
| 659 | + li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ |
---|
| 660 | + bne+ .Lfast_kernel_interrupt_return |
---|
773 | 661 | addi r3,r1,STACK_FRAME_OVERHEAD |
---|
774 | | - bl restore_math |
---|
775 | | - b restore |
---|
776 | | -#endif |
---|
777 | | -1: andi. r0,r4,_TIF_NEED_RESCHED |
---|
778 | | - beq 2f |
---|
779 | | - bl restore_interrupts |
---|
780 | | - SCHEDULE_USER |
---|
781 | | - b ret_from_except_lite |
---|
782 | | -2: |
---|
783 | | -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
---|
784 | | - andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM |
---|
785 | | - bne 3f /* only restore TM if nothing else to do */ |
---|
| 662 | + bl unrecoverable_exception |
---|
| 663 | + b . /* should not get here */ |
---|
| 664 | + |
---|
| 665 | + .balign IFETCH_ALIGN_BYTES |
---|
| 666 | + .globl interrupt_return |
---|
| 667 | +interrupt_return: |
---|
| 668 | +_ASM_NOKPROBE_SYMBOL(interrupt_return) |
---|
| 669 | + ld r4,_MSR(r1) |
---|
| 670 | + andi. r0,r4,MSR_PR |
---|
| 671 | + beq .Lkernel_interrupt_return |
---|
786 | 672 | addi r3,r1,STACK_FRAME_OVERHEAD |
---|
787 | | - bl restore_tm_state |
---|
788 | | - b restore |
---|
789 | | -3: |
---|
790 | | -#endif |
---|
791 | | - bl save_nvgprs |
---|
792 | | - /* |
---|
793 | | - * Use a non volatile GPR to save and restore our thread_info flags |
---|
794 | | - * across the call to restore_interrupts. |
---|
795 | | - */ |
---|
796 | | - mr r30,r4 |
---|
797 | | - bl restore_interrupts |
---|
798 | | - mr r4,r30 |
---|
799 | | - addi r3,r1,STACK_FRAME_OVERHEAD |
---|
800 | | - bl do_notify_resume |
---|
801 | | - b ret_from_except |
---|
| 673 | + bl interrupt_exit_user_prepare |
---|
| 674 | + cmpdi r3,0 |
---|
| 675 | + bne- .Lrestore_nvgprs |
---|
802 | 676 | |
---|
803 | | -resume_kernel: |
---|
804 | | - /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
---|
805 | | - andis. r8,r4,_TIF_EMULATE_STACK_STORE@h |
---|
806 | | - beq+ 1f |
---|
| 677 | +.Lfast_user_interrupt_return: |
---|
| 678 | + ld r11,_NIP(r1) |
---|
| 679 | + ld r12,_MSR(r1) |
---|
| 680 | +BEGIN_FTR_SECTION |
---|
| 681 | + ld r10,_PPR(r1) |
---|
| 682 | + mtspr SPRN_PPR,r10 |
---|
| 683 | +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
---|
| 684 | + mtspr SPRN_SRR0,r11 |
---|
| 685 | + mtspr SPRN_SRR1,r12 |
---|
807 | 686 | |
---|
808 | | - addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ |
---|
809 | | - |
---|
810 | | - ld r3,GPR1(r1) |
---|
811 | | - subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ |
---|
812 | | - mr r4,r1 /* src: current exception frame */ |
---|
813 | | - mr r1,r3 /* Reroute the trampoline frame to r1 */ |
---|
814 | | - |
---|
815 | | - /* Copy from the original to the trampoline. */ |
---|
816 | | - li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ |
---|
817 | | - li r6,0 /* start offset: 0 */ |
---|
818 | | - mtctr r5 |
---|
819 | | -2: ldx r0,r6,r4 |
---|
820 | | - stdx r0,r6,r3 |
---|
821 | | - addi r6,r6,8 |
---|
822 | | - bdnz 2b |
---|
823 | | - |
---|
824 | | - /* Do real store operation to complete stdu */ |
---|
825 | | - ld r5,GPR1(r1) |
---|
826 | | - std r8,0(r5) |
---|
827 | | - |
---|
828 | | - /* Clear _TIF_EMULATE_STACK_STORE flag */ |
---|
829 | | - lis r11,_TIF_EMULATE_STACK_STORE@h |
---|
830 | | - addi r5,r9,TI_FLAGS |
---|
831 | | -0: ldarx r4,0,r5 |
---|
832 | | - andc r4,r4,r11 |
---|
833 | | - stdcx. r4,0,r5 |
---|
834 | | - bne- 0b |
---|
835 | | -1: |
---|
836 | | - |
---|
837 | | -#ifdef CONFIG_PREEMPT |
---|
838 | | - /* Check if we need to preempt */ |
---|
839 | | - andi. r0,r4,_TIF_NEED_RESCHED |
---|
840 | | - beq+ restore |
---|
841 | | - /* Check that preempt_count() == 0 and interrupts are enabled */ |
---|
842 | | - lwz r8,TI_PREEMPT(r9) |
---|
843 | | - cmpwi cr0,r8,0 |
---|
844 | | - bne restore |
---|
845 | | - ld r0,SOFTE(r1) |
---|
846 | | - andi. r0,r0,IRQS_DISABLED |
---|
847 | | - bne restore |
---|
848 | | - |
---|
849 | | - /* |
---|
850 | | - * Here we are preempting the current task. We want to make |
---|
851 | | - * sure we are soft-disabled first and reconcile irq state. |
---|
852 | | - */ |
---|
853 | | - RECONCILE_IRQ_STATE(r3,r4) |
---|
854 | | -1: bl preempt_schedule_irq |
---|
855 | | - |
---|
856 | | - /* Re-test flags and eventually loop */ |
---|
857 | | - CURRENT_THREAD_INFO(r9, r1) |
---|
858 | | - ld r4,TI_FLAGS(r9) |
---|
859 | | - andi. r0,r4,_TIF_NEED_RESCHED |
---|
860 | | - bne 1b |
---|
861 | | - |
---|
862 | | - /* |
---|
863 | | - * arch_local_irq_restore() from preempt_schedule_irq above may |
---|
864 | | - * enable hard interrupt but we really should disable interrupts |
---|
865 | | - * when we return from the interrupt, and so that we don't get |
---|
866 | | - * interrupted after loading SRR0/1. |
---|
867 | | - */ |
---|
868 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
869 | | - wrteei 0 |
---|
870 | | -#else |
---|
871 | | - li r10,MSR_RI |
---|
872 | | - mtmsrd r10,1 /* Update machine state */ |
---|
873 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
874 | | -#endif /* CONFIG_PREEMPT */ |
---|
875 | | - |
---|
876 | | - .globl fast_exc_return_irq |
---|
877 | | -fast_exc_return_irq: |
---|
878 | | -restore: |
---|
879 | | - /* |
---|
880 | | - * This is the main kernel exit path. First we check if we |
---|
881 | | - * are about to re-enable interrupts |
---|
882 | | - */ |
---|
883 | | - ld r5,SOFTE(r1) |
---|
884 | | - lbz r6,PACAIRQSOFTMASK(r13) |
---|
885 | | - andi. r5,r5,IRQS_DISABLED |
---|
886 | | - bne .Lrestore_irq_off |
---|
887 | | - |
---|
888 | | - /* We are enabling, were we already enabled ? Yes, just return */ |
---|
889 | | - andi. r6,r6,IRQS_DISABLED |
---|
890 | | - beq cr0,.Ldo_restore |
---|
891 | | - |
---|
892 | | - /* |
---|
893 | | - * We are about to soft-enable interrupts (we are hard disabled |
---|
894 | | - * at this point). We check if there's anything that needs to |
---|
895 | | - * be replayed first. |
---|
896 | | - */ |
---|
897 | | - lbz r0,PACAIRQHAPPENED(r13) |
---|
898 | | - cmpwi cr0,r0,0 |
---|
899 | | - bne- .Lrestore_check_irq_replay |
---|
900 | | - |
---|
901 | | - /* |
---|
902 | | - * Get here when nothing happened while soft-disabled, just |
---|
903 | | - * soft-enable and move-on. We will hard-enable as a side |
---|
904 | | - * effect of rfi |
---|
905 | | - */ |
---|
906 | | -.Lrestore_no_replay: |
---|
907 | | - TRACE_ENABLE_INTS |
---|
908 | | - li r0,IRQS_ENABLED |
---|
909 | | - stb r0,PACAIRQSOFTMASK(r13); |
---|
910 | | - |
---|
911 | | - /* |
---|
912 | | - * Final return path. BookE is handled in a different file |
---|
913 | | - */ |
---|
914 | | -.Ldo_restore: |
---|
915 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
916 | | - b exception_return_book3e |
---|
917 | | -#else |
---|
918 | | - /* |
---|
919 | | - * Clear the reservation. If we know the CPU tracks the address of |
---|
920 | | - * the reservation then we can potentially save some cycles and use |
---|
921 | | - * a larx. On POWER6 and POWER7 this is significantly faster. |
---|
922 | | - */ |
---|
923 | 687 | BEGIN_FTR_SECTION |
---|
924 | 688 | stdcx. r0,0,r1 /* to clear the reservation */ |
---|
925 | 689 | FTR_SECTION_ELSE |
---|
926 | | - ldarx r4,0,r1 |
---|
| 690 | + ldarx r0,0,r1 |
---|
927 | 691 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
---|
928 | 692 | |
---|
929 | | - /* |
---|
930 | | - * Some code path such as load_up_fpu or altivec return directly |
---|
931 | | - * here. They run entirely hard disabled and do not alter the |
---|
932 | | - * interrupt state. They also don't use lwarx/stwcx. and thus |
---|
933 | | - * are known not to leave dangling reservations. |
---|
934 | | - */ |
---|
935 | | - .globl fast_exception_return |
---|
936 | | -fast_exception_return: |
---|
937 | | - ld r3,_MSR(r1) |
---|
938 | | - ld r4,_CTR(r1) |
---|
939 | | - ld r0,_LINK(r1) |
---|
940 | | - mtctr r4 |
---|
941 | | - mtlr r0 |
---|
942 | | - ld r4,_XER(r1) |
---|
943 | | - mtspr SPRN_XER,r4 |
---|
| 693 | + ld r3,_CCR(r1) |
---|
| 694 | + ld r4,_LINK(r1) |
---|
| 695 | + ld r5,_CTR(r1) |
---|
| 696 | + ld r6,_XER(r1) |
---|
| 697 | + li r0,0 |
---|
944 | 698 | |
---|
945 | | - REST_8GPRS(5, r1) |
---|
946 | | - |
---|
947 | | - andi. r0,r3,MSR_RI |
---|
948 | | - beq- .Lunrecov_restore |
---|
949 | | - |
---|
950 | | - /* Load PPR from thread struct before we clear MSR:RI */ |
---|
951 | | -BEGIN_FTR_SECTION |
---|
952 | | - ld r2,PACACURRENT(r13) |
---|
953 | | - ld r2,TASKTHREADPPR(r2) |
---|
954 | | -END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
---|
955 | | - |
---|
956 | | - /* |
---|
957 | | - * Clear RI before restoring r13. If we are returning to |
---|
958 | | - * userspace and we take an exception after restoring r13, |
---|
959 | | - * we end up corrupting the userspace r13 value. |
---|
960 | | - */ |
---|
961 | | - li r4,0 |
---|
962 | | - mtmsrd r4,1 |
---|
963 | | - |
---|
964 | | -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
---|
965 | | - /* TM debug */ |
---|
966 | | - std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */ |
---|
967 | | -#endif |
---|
968 | | - /* |
---|
969 | | - * r13 is our per cpu area, only restore it if we are returning to |
---|
970 | | - * userspace the value stored in the stack frame may belong to |
---|
971 | | - * another CPU. |
---|
972 | | - */ |
---|
973 | | - andi. r0,r3,MSR_PR |
---|
974 | | - beq 1f |
---|
975 | | -BEGIN_FTR_SECTION |
---|
976 | | - mtspr SPRN_PPR,r2 /* Restore PPR */ |
---|
977 | | -END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
---|
978 | | - ACCOUNT_CPU_USER_EXIT(r13, r2, r4) |
---|
| 699 | + REST_4GPRS(7, r1) |
---|
| 700 | + REST_2GPRS(11, r1) |
---|
979 | 701 | REST_GPR(13, r1) |
---|
980 | 702 | |
---|
981 | | - mtspr SPRN_SRR1,r3 |
---|
| 703 | + mtcr r3 |
---|
| 704 | + mtlr r4 |
---|
| 705 | + mtctr r5 |
---|
| 706 | + mtspr SPRN_XER,r6 |
---|
982 | 707 | |
---|
983 | | - ld r2,_CCR(r1) |
---|
984 | | - mtcrf 0xFF,r2 |
---|
985 | | - ld r2,_NIP(r1) |
---|
986 | | - mtspr SPRN_SRR0,r2 |
---|
987 | | - |
---|
988 | | - ld r0,GPR0(r1) |
---|
989 | | - ld r2,GPR2(r1) |
---|
990 | | - ld r3,GPR3(r1) |
---|
991 | | - ld r4,GPR4(r1) |
---|
992 | | - ld r1,GPR1(r1) |
---|
| 708 | + REST_4GPRS(2, r1) |
---|
| 709 | + REST_GPR(6, r1) |
---|
| 710 | + REST_GPR(0, r1) |
---|
| 711 | + REST_GPR(1, r1) |
---|
993 | 712 | RFI_TO_USER |
---|
994 | 713 | b . /* prevent speculative execution */ |
---|
995 | 714 | |
---|
996 | | -1: mtspr SPRN_SRR1,r3 |
---|
| 715 | +.Lrestore_nvgprs: |
---|
| 716 | + REST_NVGPRS(r1) |
---|
| 717 | + b .Lfast_user_interrupt_return |
---|
997 | 718 | |
---|
998 | | - ld r2,_CCR(r1) |
---|
999 | | - mtcrf 0xFF,r2 |
---|
1000 | | - ld r2,_NIP(r1) |
---|
1001 | | - mtspr SPRN_SRR0,r2 |
---|
| 719 | + .balign IFETCH_ALIGN_BYTES |
---|
| 720 | +.Lkernel_interrupt_return: |
---|
| 721 | + addi r3,r1,STACK_FRAME_OVERHEAD |
---|
| 722 | + bl interrupt_exit_kernel_prepare |
---|
| 723 | + |
---|
| 724 | +.Lfast_kernel_interrupt_return: |
---|
| 725 | + cmpdi cr1,r3,0 |
---|
| 726 | + ld r11,_NIP(r1) |
---|
| 727 | + ld r12,_MSR(r1) |
---|
| 728 | + mtspr SPRN_SRR0,r11 |
---|
| 729 | + mtspr SPRN_SRR1,r12 |
---|
| 730 | + |
---|
| 731 | +BEGIN_FTR_SECTION |
---|
| 732 | + stdcx. r0,0,r1 /* to clear the reservation */ |
---|
| 733 | +FTR_SECTION_ELSE |
---|
| 734 | + ldarx r0,0,r1 |
---|
| 735 | +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
---|
| 736 | + |
---|
| 737 | + ld r3,_LINK(r1) |
---|
| 738 | + ld r4,_CTR(r1) |
---|
| 739 | + ld r5,_XER(r1) |
---|
| 740 | + ld r6,_CCR(r1) |
---|
| 741 | + li r0,0 |
---|
| 742 | + |
---|
| 743 | + REST_4GPRS(7, r1) |
---|
| 744 | + REST_2GPRS(11, r1) |
---|
| 745 | + |
---|
| 746 | + mtlr r3 |
---|
| 747 | + mtctr r4 |
---|
| 748 | + mtspr SPRN_XER,r5 |
---|
1002 | 749 | |
---|
1003 | 750 | /* |
---|
1004 | 751 | * Leaving a stale exception_marker on the stack can confuse |
---|
1005 | 752 | * the reliable stack unwinder later on. Clear it. |
---|
1006 | 753 | */ |
---|
1007 | | - li r2,0 |
---|
1008 | | - std r2,STACK_FRAME_OVERHEAD-16(r1) |
---|
| 754 | + std r0,STACK_FRAME_OVERHEAD-16(r1) |
---|
1009 | 755 | |
---|
1010 | | - ld r0,GPR0(r1) |
---|
1011 | | - ld r2,GPR2(r1) |
---|
1012 | | - ld r3,GPR3(r1) |
---|
1013 | | - ld r4,GPR4(r1) |
---|
1014 | | - ld r1,GPR1(r1) |
---|
| 756 | + REST_4GPRS(2, r1) |
---|
| 757 | + |
---|
| 758 | + bne- cr1,1f /* emulate stack store */ |
---|
| 759 | + mtcr r6 |
---|
| 760 | + REST_GPR(6, r1) |
---|
| 761 | + REST_GPR(0, r1) |
---|
| 762 | + REST_GPR(1, r1) |
---|
1015 | 763 | RFI_TO_KERNEL |
---|
1016 | 764 | b . /* prevent speculative execution */ |
---|
1017 | 765 | |
---|
1018 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
1019 | | - |
---|
1020 | | - /* |
---|
1021 | | - * We are returning to a context with interrupts soft disabled. |
---|
1022 | | - * |
---|
1023 | | - * However, we may also about to hard enable, so we need to |
---|
1024 | | - * make sure that in this case, we also clear PACA_IRQ_HARD_DIS |
---|
1025 | | - * or that bit can get out of sync and bad things will happen |
---|
| 766 | +1: /* |
---|
| 767 | + * Emulate stack store with update. New r1 value was already calculated |
---|
| 768 | + * and updated in our interrupt regs by emulate_loadstore, but we can't |
---|
| 769 | + * store the previous value of r1 to the stack before re-loading our |
---|
| 770 | + * registers from it, otherwise they could be clobbered. Use |
---|
| 771 | + * PACA_EXGEN as temporary storage to hold the store data, as |
---|
| 772 | + * interrupts are disabled here so it won't be clobbered. |
---|
1026 | 773 | */ |
---|
1027 | | -.Lrestore_irq_off: |
---|
1028 | | - ld r3,_MSR(r1) |
---|
1029 | | - lbz r7,PACAIRQHAPPENED(r13) |
---|
1030 | | - andi. r0,r3,MSR_EE |
---|
1031 | | - beq 1f |
---|
1032 | | - rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS |
---|
1033 | | - stb r7,PACAIRQHAPPENED(r13) |
---|
1034 | | -1: |
---|
1035 | | -#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) |
---|
1036 | | - /* The interrupt should not have soft enabled. */ |
---|
1037 | | - lbz r7,PACAIRQSOFTMASK(r13) |
---|
1038 | | -1: tdeqi r7,IRQS_ENABLED |
---|
1039 | | - EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
---|
1040 | | -#endif |
---|
1041 | | - b .Ldo_restore |
---|
| 774 | + mtcr r6 |
---|
| 775 | + std r9,PACA_EXGEN+0(r13) |
---|
| 776 | + addi r9,r1,INT_FRAME_SIZE /* get original r1 */ |
---|
| 777 | + REST_GPR(6, r1) |
---|
| 778 | + REST_GPR(0, r1) |
---|
| 779 | + REST_GPR(1, r1) |
---|
| 780 | + std r9,0(r1) /* perform store component of stdu */ |
---|
| 781 | + ld r9,PACA_EXGEN+0(r13) |
---|
1042 | 782 | |
---|
1043 | | - /* |
---|
1044 | | - * Something did happen, check if a re-emit is needed |
---|
1045 | | - * (this also clears paca->irq_happened) |
---|
1046 | | - */ |
---|
1047 | | -.Lrestore_check_irq_replay: |
---|
1048 | | - /* XXX: We could implement a fast path here where we check |
---|
1049 | | - * for irq_happened being just 0x01, in which case we can |
---|
1050 | | - * clear it and return. That means that we would potentially |
---|
1051 | | - * miss a decrementer having wrapped all the way around. |
---|
1052 | | - * |
---|
1053 | | - * Still, this might be useful for things like hash_page |
---|
1054 | | - */ |
---|
1055 | | - bl __check_irq_replay |
---|
1056 | | - cmpwi cr0,r3,0 |
---|
1057 | | - beq .Lrestore_no_replay |
---|
1058 | | - |
---|
1059 | | - /* |
---|
1060 | | - * We need to re-emit an interrupt. We do so by re-using our |
---|
1061 | | - * existing exception frame. We first change the trap value, |
---|
1062 | | - * but we need to ensure we preserve the low nibble of it |
---|
1063 | | - */ |
---|
1064 | | - ld r4,_TRAP(r1) |
---|
1065 | | - clrldi r4,r4,60 |
---|
1066 | | - or r4,r4,r3 |
---|
1067 | | - std r4,_TRAP(r1) |
---|
1068 | | - |
---|
1069 | | - /* |
---|
1070 | | - * PACA_IRQ_HARD_DIS won't always be set here, so set it now |
---|
1071 | | - * to reconcile the IRQ state. Tracing is already accounted for. |
---|
1072 | | - */ |
---|
1073 | | - lbz r4,PACAIRQHAPPENED(r13) |
---|
1074 | | - ori r4,r4,PACA_IRQ_HARD_DIS |
---|
1075 | | - stb r4,PACAIRQHAPPENED(r13) |
---|
1076 | | - |
---|
1077 | | - /* |
---|
1078 | | - * Then find the right handler and call it. Interrupts are |
---|
1079 | | - * still soft-disabled and we keep them that way. |
---|
1080 | | - */ |
---|
1081 | | - cmpwi cr0,r3,0x500 |
---|
1082 | | - bne 1f |
---|
1083 | | - addi r3,r1,STACK_FRAME_OVERHEAD; |
---|
1084 | | - bl do_IRQ |
---|
1085 | | - b ret_from_except |
---|
1086 | | -1: cmpwi cr0,r3,0xf00 |
---|
1087 | | - bne 1f |
---|
1088 | | - addi r3,r1,STACK_FRAME_OVERHEAD; |
---|
1089 | | - bl performance_monitor_exception |
---|
1090 | | - b ret_from_except |
---|
1091 | | -1: cmpwi cr0,r3,0xe60 |
---|
1092 | | - bne 1f |
---|
1093 | | - addi r3,r1,STACK_FRAME_OVERHEAD; |
---|
1094 | | - bl handle_hmi_exception |
---|
1095 | | - b ret_from_except |
---|
1096 | | -1: cmpwi cr0,r3,0x900 |
---|
1097 | | - bne 1f |
---|
1098 | | - addi r3,r1,STACK_FRAME_OVERHEAD; |
---|
1099 | | - bl timer_interrupt |
---|
1100 | | - b ret_from_except |
---|
1101 | | -#ifdef CONFIG_PPC_DOORBELL |
---|
1102 | | -1: |
---|
1103 | | -#ifdef CONFIG_PPC_BOOK3E |
---|
1104 | | - cmpwi cr0,r3,0x280 |
---|
1105 | | -#else |
---|
1106 | | - cmpwi cr0,r3,0xa00 |
---|
1107 | | -#endif /* CONFIG_PPC_BOOK3E */ |
---|
1108 | | - bne 1f |
---|
1109 | | - addi r3,r1,STACK_FRAME_OVERHEAD; |
---|
1110 | | - bl doorbell_exception |
---|
1111 | | -#endif /* CONFIG_PPC_DOORBELL */ |
---|
1112 | | -1: b ret_from_except /* What else to do here ? */ |
---|
1113 | | - |
---|
1114 | | -.Lunrecov_restore: |
---|
1115 | | - addi r3,r1,STACK_FRAME_OVERHEAD |
---|
1116 | | - bl unrecoverable_exception |
---|
1117 | | - b .Lunrecov_restore |
---|
1118 | | - |
---|
1119 | | -_ASM_NOKPROBE_SYMBOL(ret_from_except); |
---|
1120 | | -_ASM_NOKPROBE_SYMBOL(ret_from_except_lite); |
---|
1121 | | -_ASM_NOKPROBE_SYMBOL(resume_kernel); |
---|
1122 | | -_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq); |
---|
1123 | | -_ASM_NOKPROBE_SYMBOL(restore); |
---|
1124 | | -_ASM_NOKPROBE_SYMBOL(fast_exception_return); |
---|
1125 | | - |
---|
| 783 | + RFI_TO_KERNEL |
---|
| 784 | + b . /* prevent speculative execution */ |
---|
| 785 | +#endif /* CONFIG_PPC_BOOK3S */ |
---|
1126 | 786 | |
---|
1127 | 787 | #ifdef CONFIG_PPC_RTAS |
---|
1128 | 788 | /* |
---|
.. | .. |
---|
1136 | 796 | _GLOBAL(enter_rtas) |
---|
1137 | 797 | mflr r0 |
---|
1138 | 798 | std r0,16(r1) |
---|
1139 | | - stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ |
---|
| 799 | + stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */ |
---|
1140 | 800 | |
---|
1141 | 801 | /* Because RTAS is running in 32b mode, it clobbers the high order half |
---|
1142 | 802 | * of all registers that it saves. We therefore save those registers |
---|
.. | .. |
---|
1144 | 804 | */ |
---|
1145 | 805 | SAVE_GPR(2, r1) /* Save the TOC */ |
---|
1146 | 806 | SAVE_GPR(13, r1) /* Save paca */ |
---|
1147 | | - SAVE_8GPRS(14, r1) /* Save the non-volatiles */ |
---|
1148 | | - SAVE_10GPRS(22, r1) /* ditto */ |
---|
| 807 | + SAVE_NVGPRS(r1) /* Save the non-volatiles */ |
---|
1149 | 808 | |
---|
1150 | 809 | mfcr r4 |
---|
1151 | 810 | std r4,_CCR(r1) |
---|
.. | .. |
---|
1252 | 911 | /* relocation is on at this point */ |
---|
1253 | 912 | REST_GPR(2, r1) /* Restore the TOC */ |
---|
1254 | 913 | REST_GPR(13, r1) /* Restore paca */ |
---|
1255 | | - REST_8GPRS(14, r1) /* Restore the non-volatiles */ |
---|
1256 | | - REST_10GPRS(22, r1) /* ditto */ |
---|
| 914 | + REST_NVGPRS(r1) /* Restore the non-volatiles */ |
---|
1257 | 915 | |
---|
1258 | 916 | GET_PACA(r13) |
---|
1259 | 917 | |
---|
.. | .. |
---|
1268 | 926 | ld r8,_DSISR(r1) |
---|
1269 | 927 | mtdsisr r8 |
---|
1270 | 928 | |
---|
1271 | | - addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ |
---|
| 929 | + addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */ |
---|
1272 | 930 | ld r0,16(r1) /* get return address */ |
---|
1273 | 931 | |
---|
1274 | 932 | mtlr r0 |
---|
.. | .. |
---|
1279 | 937 | _GLOBAL(enter_prom) |
---|
1280 | 938 | mflr r0 |
---|
1281 | 939 | std r0,16(r1) |
---|
1282 | | - stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ |
---|
| 940 | + stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */ |
---|
1283 | 941 | |
---|
1284 | 942 | /* Because PROM is running in 32b mode, it clobbers the high order half |
---|
1285 | 943 | * of all registers that it saves. We therefore save those registers |
---|
.. | .. |
---|
1287 | 945 | */ |
---|
1288 | 946 | SAVE_GPR(2, r1) |
---|
1289 | 947 | SAVE_GPR(13, r1) |
---|
1290 | | - SAVE_8GPRS(14, r1) |
---|
1291 | | - SAVE_10GPRS(22, r1) |
---|
| 948 | + SAVE_NVGPRS(r1) |
---|
1292 | 949 | mfcr r10 |
---|
1293 | 950 | mfmsr r11 |
---|
1294 | 951 | std r10,_CCR(r1) |
---|
.. | .. |
---|
1332 | 989 | /* Restore other registers */ |
---|
1333 | 990 | REST_GPR(2, r1) |
---|
1334 | 991 | REST_GPR(13, r1) |
---|
1335 | | - REST_8GPRS(14, r1) |
---|
1336 | | - REST_10GPRS(22, r1) |
---|
| 992 | + REST_NVGPRS(r1) |
---|
1337 | 993 | ld r4,_CCR(r1) |
---|
1338 | 994 | mtcr r4 |
---|
1339 | | - |
---|
1340 | | - addi r1,r1,PROM_FRAME_SIZE |
---|
| 995 | + |
---|
| 996 | + addi r1,r1,SWITCH_FRAME_SIZE |
---|
1341 | 997 | ld r0,16(r1) |
---|
1342 | 998 | mtlr r0 |
---|
1343 | 999 | blr |
---|