| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * PowerPC version |
|---|
| 3 | 4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
|---|
| .. | .. |
|---|
| 11 | 12 | * |
|---|
| 12 | 13 | * This file contains the system call entry code, context switch |
|---|
| 13 | 14 | * code, and exception/interrupt return code for PowerPC. |
|---|
| 14 | | - * |
|---|
| 15 | | - * This program is free software; you can redistribute it and/or |
|---|
| 16 | | - * modify it under the terms of the GNU General Public License |
|---|
| 17 | | - * as published by the Free Software Foundation; either version |
|---|
| 18 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 19 | | - * |
|---|
| 20 | 15 | */ |
|---|
| 21 | 16 | |
|---|
| 22 | 17 | #include <linux/errno.h> |
|---|
| .. | .. |
|---|
| 33 | 28 | #include <asm/unistd.h> |
|---|
| 34 | 29 | #include <asm/ptrace.h> |
|---|
| 35 | 30 | #include <asm/export.h> |
|---|
| 36 | | -#include <asm/asm-405.h> |
|---|
| 37 | 31 | #include <asm/feature-fixups.h> |
|---|
| 38 | 32 | #include <asm/barrier.h> |
|---|
| 33 | +#include <asm/kup.h> |
|---|
| 34 | +#include <asm/bug.h> |
|---|
| 35 | + |
|---|
| 36 | +#include "head_32.h" |
|---|
| 39 | 37 | |
|---|
| 40 | 38 | /* |
|---|
| 41 | | - * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. |
|---|
| 39 | + * powerpc relies on return from interrupt/syscall being context synchronising |
|---|
| 40 | + * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional |
|---|
| 41 | + * synchronisation instructions. |
|---|
| 42 | 42 | */ |
|---|
| 43 | | -#if MSR_KERNEL >= 0x10000 |
|---|
| 44 | | -#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l |
|---|
| 45 | | -#else |
|---|
| 46 | | -#define LOAD_MSR_KERNEL(r, x) li r,(x) |
|---|
| 47 | | -#endif |
|---|
| 48 | 43 | |
|---|
| 49 | 44 | /* |
|---|
| 50 | 45 | * Align to 4k in order to ensure that all functions modyfing srr0/srr1 |
|---|
| .. | .. |
|---|
| 61 | 56 | mfspr r0,SPRN_DSRR1 |
|---|
| 62 | 57 | stw r0,_DSRR1(r11) |
|---|
| 63 | 58 | /* fall through */ |
|---|
| 59 | +_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler) |
|---|
| 64 | 60 | |
|---|
| 65 | 61 | .globl debug_transfer_to_handler |
|---|
| 66 | 62 | debug_transfer_to_handler: |
|---|
| .. | .. |
|---|
| 69 | 65 | mfspr r0,SPRN_CSRR1 |
|---|
| 70 | 66 | stw r0,_CSRR1(r11) |
|---|
| 71 | 67 | /* fall through */ |
|---|
| 68 | +_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler) |
|---|
| 72 | 69 | |
|---|
| 73 | 70 | .globl crit_transfer_to_handler |
|---|
| 74 | 71 | crit_transfer_to_handler: |
|---|
| .. | .. |
|---|
| 97 | 94 | mfspr r0,SPRN_SRR1 |
|---|
| 98 | 95 | stw r0,_SRR1(r11) |
|---|
| 99 | 96 | |
|---|
| 100 | | - /* set the stack limit to the current stack |
|---|
| 101 | | - * and set the limit to protect the thread_info |
|---|
| 102 | | - * struct |
|---|
| 103 | | - */ |
|---|
| 97 | + /* set the stack limit to the current stack */ |
|---|
| 104 | 98 | mfspr r8,SPRN_SPRG_THREAD |
|---|
| 105 | 99 | lwz r0,KSP_LIMIT(r8) |
|---|
| 106 | 100 | stw r0,SAVED_KSP_LIMIT(r11) |
|---|
| 107 | | - rlwimi r0,r1,0,0,(31-THREAD_SHIFT) |
|---|
| 101 | + rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) |
|---|
| 108 | 102 | stw r0,KSP_LIMIT(r8) |
|---|
| 109 | 103 | /* fall through */ |
|---|
| 104 | +_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler) |
|---|
| 110 | 105 | #endif |
|---|
| 111 | 106 | |
|---|
| 112 | 107 | #ifdef CONFIG_40x |
|---|
| .. | .. |
|---|
| 121 | 116 | mfspr r0,SPRN_SRR1 |
|---|
| 122 | 117 | stw r0,crit_srr1@l(0) |
|---|
| 123 | 118 | |
|---|
| 124 | | - /* set the stack limit to the current stack |
|---|
| 125 | | - * and set the limit to protect the thread_info |
|---|
| 126 | | - * struct |
|---|
| 127 | | - */ |
|---|
| 119 | + /* set the stack limit to the current stack */ |
|---|
| 128 | 120 | mfspr r8,SPRN_SPRG_THREAD |
|---|
| 129 | 121 | lwz r0,KSP_LIMIT(r8) |
|---|
| 130 | 122 | stw r0,saved_ksp_limit@l(0) |
|---|
| 131 | | - rlwimi r0,r1,0,0,(31-THREAD_SHIFT) |
|---|
| 123 | + rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) |
|---|
| 132 | 124 | stw r0,KSP_LIMIT(r8) |
|---|
| 133 | 125 | /* fall through */ |
|---|
| 126 | +_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler) |
|---|
| 134 | 127 | #endif |
|---|
| 135 | 128 | |
|---|
| 136 | 129 | /* |
|---|
| .. | .. |
|---|
| 143 | 136 | .globl transfer_to_handler_full |
|---|
| 144 | 137 | transfer_to_handler_full: |
|---|
| 145 | 138 | SAVE_NVGPRS(r11) |
|---|
| 139 | +_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full) |
|---|
| 146 | 140 | /* fall through */ |
|---|
| 147 | 141 | |
|---|
| 148 | 142 | .globl transfer_to_handler |
|---|
| .. | .. |
|---|
| 156 | 150 | stw r12,_CTR(r11) |
|---|
| 157 | 151 | stw r2,_XER(r11) |
|---|
| 158 | 152 | mfspr r12,SPRN_SPRG_THREAD |
|---|
| 159 | | - addi r2,r12,-THREAD |
|---|
| 160 | | - tovirt(r2,r2) /* set r2 to current */ |
|---|
| 153 | + tovirt_vmstack r12, r12 |
|---|
| 161 | 154 | beq 2f /* if from user, fix up THREAD.regs */ |
|---|
| 155 | + addi r2, r12, -THREAD |
|---|
| 162 | 156 | addi r11,r1,STACK_FRAME_OVERHEAD |
|---|
| 163 | 157 | stw r11,PT_REGS(r12) |
|---|
| 164 | 158 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) |
|---|
| .. | .. |
|---|
| 166 | 160 | internal debug mode bit to do this. */ |
|---|
| 167 | 161 | lwz r12,THREAD_DBCR0(r12) |
|---|
| 168 | 162 | andis. r12,r12,DBCR0_IDM@h |
|---|
| 163 | +#endif |
|---|
| 164 | + ACCOUNT_CPU_USER_ENTRY(r2, r11, r12) |
|---|
| 165 | +#ifdef CONFIG_PPC_BOOK3S_32 |
|---|
| 166 | + kuep_lock r11, r12 |
|---|
| 167 | +#endif |
|---|
| 168 | +#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) |
|---|
| 169 | 169 | beq+ 3f |
|---|
| 170 | 170 | /* From user and task is ptraced - load up global dbcr0 */ |
|---|
| 171 | 171 | li r12,-1 /* clear all pending debug events */ |
|---|
| .. | .. |
|---|
| 174 | 174 | tophys(r11,r11) |
|---|
| 175 | 175 | addi r11,r11,global_dbcr0@l |
|---|
| 176 | 176 | #ifdef CONFIG_SMP |
|---|
| 177 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 178 | | - lwz r9,TI_CPU(r9) |
|---|
| 177 | + lwz r9,TASK_CPU(r2) |
|---|
| 179 | 178 | slwi r9,r9,3 |
|---|
| 180 | 179 | add r11,r11,r9 |
|---|
| 181 | 180 | #endif |
|---|
| .. | .. |
|---|
| 185 | 184 | addi r12,r12,-1 |
|---|
| 186 | 185 | stw r12,4(r11) |
|---|
| 187 | 186 | #endif |
|---|
| 188 | | -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
|---|
| 189 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 190 | | - tophys(r9, r9) |
|---|
| 191 | | - ACCOUNT_CPU_USER_ENTRY(r9, r11, r12) |
|---|
| 192 | | -#endif |
|---|
| 193 | 187 | |
|---|
| 194 | 188 | b 3f |
|---|
| 195 | 189 | |
|---|
| 196 | 190 | 2: /* if from kernel, check interrupted DOZE/NAP mode and |
|---|
| 197 | 191 | * check for stack overflow |
|---|
| 198 | 192 | */ |
|---|
| 193 | + kuap_save_and_lock r11, r12, r9, r2, r6 |
|---|
| 194 | + addi r2, r12, -THREAD |
|---|
| 195 | +#ifndef CONFIG_VMAP_STACK |
|---|
| 199 | 196 | lwz r9,KSP_LIMIT(r12) |
|---|
| 200 | 197 | cmplw r1,r9 /* if r1 <= ksp_limit */ |
|---|
| 201 | 198 | ble- stack_ovf /* then the kernel stack overflowed */ |
|---|
| 199 | +#endif |
|---|
| 202 | 200 | 5: |
|---|
| 203 | | -#if defined(CONFIG_6xx) || defined(CONFIG_E500) |
|---|
| 204 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 205 | | - tophys(r9,r9) /* check local flags */ |
|---|
| 206 | | - lwz r12,TI_LOCAL_FLAGS(r9) |
|---|
| 201 | +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) |
|---|
| 202 | + lwz r12,TI_LOCAL_FLAGS(r2) |
|---|
| 207 | 203 | mtcrf 0x01,r12 |
|---|
| 208 | 204 | bt- 31-TLF_NAPPING,4f |
|---|
| 209 | 205 | bt- 31-TLF_SLEEPING,7f |
|---|
| 210 | | -#endif /* CONFIG_6xx || CONFIG_E500 */ |
|---|
| 206 | +#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */ |
|---|
| 211 | 207 | .globl transfer_to_handler_cont |
|---|
| 212 | 208 | transfer_to_handler_cont: |
|---|
| 213 | 209 | 3: |
|---|
| 214 | 210 | mflr r9 |
|---|
| 211 | + tovirt_novmstack r2, r2 /* set r2 to current */ |
|---|
| 212 | + tovirt_vmstack r9, r9 |
|---|
| 215 | 213 | lwz r11,0(r9) /* virtual address of handler */ |
|---|
| 216 | 214 | lwz r9,4(r9) /* where to go when done */ |
|---|
| 217 | 215 | #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) |
|---|
| 218 | 216 | mtspr SPRN_NRI, r0 |
|---|
| 219 | 217 | #endif |
|---|
| 220 | 218 | #ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 219 | + /* |
|---|
| 220 | + * When tracing IRQ state (lockdep) we enable the MMU before we call |
|---|
| 221 | + * the IRQ tracing functions as they might access vmalloc space or |
|---|
| 222 | + * perform IOs for console output. |
|---|
| 223 | + * |
|---|
| 224 | + * To speed up the syscall path where interrupts stay on, let's check |
|---|
| 225 | + * first if we are changing the MSR value at all. |
|---|
| 226 | + */ |
|---|
| 227 | + tophys_novmstack r12, r1 |
|---|
| 228 | + lwz r12,_MSR(r12) |
|---|
| 229 | + andi. r12,r12,MSR_EE |
|---|
| 230 | + bne 1f |
|---|
| 231 | + |
|---|
| 232 | + /* MSR isn't changing, just transition directly */ |
|---|
| 233 | +#endif |
|---|
| 234 | + mtspr SPRN_SRR0,r11 |
|---|
| 235 | + mtspr SPRN_SRR1,r10 |
|---|
| 236 | + mtlr r9 |
|---|
| 237 | + RFI /* jump to handler, enable MMU */ |
|---|
| 238 | + |
|---|
| 239 | +#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) |
|---|
| 240 | +4: rlwinm r12,r12,0,~_TLF_NAPPING |
|---|
| 241 | + stw r12,TI_LOCAL_FLAGS(r2) |
|---|
| 242 | + b power_save_ppc32_restore |
|---|
| 243 | + |
|---|
| 244 | +7: rlwinm r12,r12,0,~_TLF_SLEEPING |
|---|
| 245 | + stw r12,TI_LOCAL_FLAGS(r2) |
|---|
| 246 | + lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ |
|---|
| 247 | + rlwinm r9,r9,0,~MSR_EE |
|---|
| 248 | + lwz r12,_LINK(r11) /* and return to address in LR */ |
|---|
| 249 | + kuap_restore r11, r2, r3, r4, r5 |
|---|
| 250 | + lwz r2, GPR2(r11) |
|---|
| 251 | + b fast_exception_return |
|---|
| 252 | +#endif |
|---|
| 253 | +_ASM_NOKPROBE_SYMBOL(transfer_to_handler) |
|---|
| 254 | +_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont) |
|---|
| 255 | + |
|---|
| 256 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 257 | +1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to |
|---|
| 258 | + * keep interrupts disabled at this point otherwise we might risk |
|---|
| 259 | + * taking an interrupt before we tell lockdep they are enabled. |
|---|
| 260 | + */ |
|---|
| 221 | 261 | lis r12,reenable_mmu@h |
|---|
| 222 | 262 | ori r12,r12,reenable_mmu@l |
|---|
| 263 | + LOAD_REG_IMMEDIATE(r0, MSR_KERNEL) |
|---|
| 223 | 264 | mtspr SPRN_SRR0,r12 |
|---|
| 224 | | - mtspr SPRN_SRR1,r10 |
|---|
| 225 | | - SYNC |
|---|
| 265 | + mtspr SPRN_SRR1,r0 |
|---|
| 226 | 266 | RFI |
|---|
| 227 | | -reenable_mmu: /* re-enable mmu so we can */ |
|---|
| 228 | | - mfmsr r10 |
|---|
| 229 | | - lwz r12,_MSR(r1) |
|---|
| 230 | | - xor r10,r10,r12 |
|---|
| 231 | | - andi. r10,r10,MSR_EE /* Did EE change? */ |
|---|
| 232 | | - beq 1f |
|---|
| 233 | 267 | |
|---|
| 268 | +reenable_mmu: |
|---|
| 234 | 269 | /* |
|---|
| 235 | | - * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. |
|---|
| 236 | | - * If from user mode there is only one stack frame on the stack, and |
|---|
| 237 | | - * accessing CALLER_ADDR1 will cause oops. So we need create a dummy |
|---|
| 238 | | - * stack frame to make trace_hardirqs_off happy. |
|---|
| 239 | | - * |
|---|
| 240 | | - * This is handy because we also need to save a bunch of GPRs, |
|---|
| 270 | + * We save a bunch of GPRs, |
|---|
| 241 | 271 | * r3 can be different from GPR3(r1) at this point, r9 and r11 |
|---|
| 242 | 272 | * contains the old MSR and handler address respectively, |
|---|
| 243 | 273 | * r4 & r5 can contain page fault arguments that need to be passed |
|---|
| 244 | | - * along as well. r12, CCR, CTR, XER etc... are left clobbered as |
|---|
| 245 | | - * they aren't useful past this point (aren't syscall arguments), |
|---|
| 246 | | - * the rest is restored from the exception frame. |
|---|
| 274 | + * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left |
|---|
| 275 | + * clobbered as they aren't useful past this point. |
|---|
| 247 | 276 | */ |
|---|
| 277 | + |
|---|
| 248 | 278 | stwu r1,-32(r1) |
|---|
| 249 | 279 | stw r9,8(r1) |
|---|
| 250 | 280 | stw r11,12(r1) |
|---|
| 251 | 281 | stw r3,16(r1) |
|---|
| 252 | 282 | stw r4,20(r1) |
|---|
| 253 | 283 | stw r5,24(r1) |
|---|
| 254 | | - bl trace_hardirqs_off |
|---|
| 284 | + |
|---|
| 285 | + /* If we are disabling interrupts (normal case), simply log it with |
|---|
| 286 | + * lockdep |
|---|
| 287 | + */ |
|---|
| 288 | +1: bl trace_hardirqs_off |
|---|
| 255 | 289 | lwz r5,24(r1) |
|---|
| 256 | 290 | lwz r4,20(r1) |
|---|
| 257 | 291 | lwz r3,16(r1) |
|---|
| 258 | 292 | lwz r11,12(r1) |
|---|
| 259 | 293 | lwz r9,8(r1) |
|---|
| 260 | 294 | addi r1,r1,32 |
|---|
| 261 | | - lwz r0,GPR0(r1) |
|---|
| 262 | | - lwz r6,GPR6(r1) |
|---|
| 263 | | - lwz r7,GPR7(r1) |
|---|
| 264 | | - lwz r8,GPR8(r1) |
|---|
| 265 | | -1: mtctr r11 |
|---|
| 295 | + mtctr r11 |
|---|
| 266 | 296 | mtlr r9 |
|---|
| 267 | 297 | bctr /* jump to handler */ |
|---|
| 268 | | -#else /* CONFIG_TRACE_IRQFLAGS */ |
|---|
| 269 | | - mtspr SPRN_SRR0,r11 |
|---|
| 270 | | - mtspr SPRN_SRR1,r10 |
|---|
| 271 | | - mtlr r9 |
|---|
| 272 | | - SYNC |
|---|
| 273 | | - RFI /* jump to handler, enable MMU */ |
|---|
| 274 | 298 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
|---|
| 275 | 299 | |
|---|
| 276 | | -#if defined (CONFIG_6xx) || defined(CONFIG_E500) |
|---|
| 277 | | -4: rlwinm r12,r12,0,~_TLF_NAPPING |
|---|
| 278 | | - stw r12,TI_LOCAL_FLAGS(r9) |
|---|
| 279 | | - b power_save_ppc32_restore |
|---|
| 280 | | - |
|---|
| 281 | | -7: rlwinm r12,r12,0,~_TLF_SLEEPING |
|---|
| 282 | | - stw r12,TI_LOCAL_FLAGS(r9) |
|---|
| 283 | | - lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ |
|---|
| 284 | | - rlwinm r9,r9,0,~MSR_EE |
|---|
| 285 | | - lwz r12,_LINK(r11) /* and return to address in LR */ |
|---|
| 286 | | - b fast_exception_return |
|---|
| 287 | | -#endif |
|---|
| 288 | | - |
|---|
| 300 | +#ifndef CONFIG_VMAP_STACK |
|---|
| 289 | 301 | /* |
|---|
| 290 | 302 | * On kernel stack overflow, load up an initial stack pointer |
|---|
| 291 | 303 | * and call StackOverflow(regs), which should not return. |
|---|
| .. | .. |
|---|
| 303 | 315 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD |
|---|
| 304 | 316 | lis r9,StackOverflow@ha |
|---|
| 305 | 317 | addi r9,r9,StackOverflow@l |
|---|
| 306 | | - LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
|---|
| 318 | + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) |
|---|
| 307 | 319 | #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) |
|---|
| 308 | 320 | mtspr SPRN_NRI, r0 |
|---|
| 309 | 321 | #endif |
|---|
| 310 | 322 | mtspr SPRN_SRR0,r9 |
|---|
| 311 | 323 | mtspr SPRN_SRR1,r10 |
|---|
| 312 | | - SYNC |
|---|
| 313 | 324 | RFI |
|---|
| 325 | +_ASM_NOKPROBE_SYMBOL(stack_ovf) |
|---|
| 326 | +#endif |
|---|
| 327 | + |
|---|
| 328 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 329 | +trace_syscall_entry_irq_off: |
|---|
| 330 | + /* |
|---|
| 331 | + * Syscall shouldn't happen while interrupts are disabled, |
|---|
| 332 | + * so let's do a warning here. |
|---|
| 333 | + */ |
|---|
| 334 | +0: trap |
|---|
| 335 | + EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING |
|---|
| 336 | + bl trace_hardirqs_on |
|---|
| 337 | + |
|---|
| 338 | + /* Now enable for real */ |
|---|
| 339 | + LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) |
|---|
| 340 | + mtmsr r10 |
|---|
| 341 | + |
|---|
| 342 | + REST_GPR(0, r1) |
|---|
| 343 | + REST_4GPRS(3, r1) |
|---|
| 344 | + REST_2GPRS(7, r1) |
|---|
| 345 | + b DoSyscall |
|---|
| 346 | +#endif /* CONFIG_TRACE_IRQFLAGS */ |
|---|
| 347 | + |
|---|
| 348 | + .globl transfer_to_syscall |
|---|
| 349 | +transfer_to_syscall: |
|---|
| 350 | +#ifdef CONFIG_PPC_BOOK3S_32 |
|---|
| 351 | + kuep_lock r11, r12 |
|---|
| 352 | +#endif |
|---|
| 353 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 354 | + andi. r12,r9,MSR_EE |
|---|
| 355 | + beq- trace_syscall_entry_irq_off |
|---|
| 356 | +#endif /* CONFIG_TRACE_IRQFLAGS */ |
|---|
| 314 | 357 | |
|---|
| 315 | 358 | /* |
|---|
| 316 | 359 | * Handle a system call. |
|---|
| .. | .. |
|---|
| 323 | 366 | stw r3,ORIG_GPR3(r1) |
|---|
| 324 | 367 | li r12,0 |
|---|
| 325 | 368 | stw r12,RESULT(r1) |
|---|
| 326 | | - lwz r11,_CCR(r1) /* Clear SO bit in CR */ |
|---|
| 327 | | - rlwinm r11,r11,0,4,2 |
|---|
| 328 | | - stw r11,_CCR(r1) |
|---|
| 329 | 369 | #ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 330 | | - /* Return from syscalls can (and generally will) hard enable |
|---|
| 331 | | - * interrupts. You aren't supposed to call a syscall with |
|---|
| 332 | | - * interrupts disabled in the first place. However, to ensure |
|---|
| 333 | | - * that we get it right vs. lockdep if it happens, we force |
|---|
| 334 | | - * that hard enable here with appropriate tracing if we see |
|---|
| 335 | | - * that we have been called with interrupts off |
|---|
| 336 | | - */ |
|---|
| 370 | + /* Make sure interrupts are enabled */ |
|---|
| 337 | 371 | mfmsr r11 |
|---|
| 338 | 372 | andi. r12,r11,MSR_EE |
|---|
| 339 | | - bne+ 1f |
|---|
| 340 | | - /* We came in with interrupts disabled, we enable them now */ |
|---|
| 341 | | - bl trace_hardirqs_on |
|---|
| 342 | | - mfmsr r11 |
|---|
| 343 | | - lwz r0,GPR0(r1) |
|---|
| 344 | | - lwz r3,GPR3(r1) |
|---|
| 345 | | - lwz r4,GPR4(r1) |
|---|
| 346 | | - ori r11,r11,MSR_EE |
|---|
| 347 | | - lwz r5,GPR5(r1) |
|---|
| 348 | | - lwz r6,GPR6(r1) |
|---|
| 349 | | - lwz r7,GPR7(r1) |
|---|
| 350 | | - lwz r8,GPR8(r1) |
|---|
| 351 | | - mtmsr r11 |
|---|
| 352 | | -1: |
|---|
| 373 | + /* We came in with interrupts disabled, we WARN and mark them enabled |
|---|
| 374 | + * for lockdep now */ |
|---|
| 375 | +0: tweqi r12, 0 |
|---|
| 376 | + EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING |
|---|
| 353 | 377 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
|---|
| 354 | | - CURRENT_THREAD_INFO(r10, r1) |
|---|
| 355 | | - lwz r11,TI_FLAGS(r10) |
|---|
| 378 | + lwz r11,TI_FLAGS(r2) |
|---|
| 356 | 379 | andi. r11,r11,_TIF_SYSCALL_DOTRACE |
|---|
| 357 | 380 | bne- syscall_dotrace |
|---|
| 358 | 381 | syscall_dotrace_cont: |
|---|
| .. | .. |
|---|
| 385 | 408 | lwz r3,GPR3(r1) |
|---|
| 386 | 409 | #endif |
|---|
| 387 | 410 | mr r6,r3 |
|---|
| 388 | | - CURRENT_THREAD_INFO(r12, r1) |
|---|
| 389 | 411 | /* disable interrupts so current_thread_info()->flags can't change */ |
|---|
| 390 | | - LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ |
|---|
| 412 | + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ |
|---|
| 391 | 413 | /* Note: We don't bother telling lockdep about it */ |
|---|
| 392 | | - SYNC |
|---|
| 393 | | - MTMSRD(r10) |
|---|
| 394 | | - lwz r9,TI_FLAGS(r12) |
|---|
| 414 | + mtmsr r10 |
|---|
| 415 | + lwz r9,TI_FLAGS(r2) |
|---|
| 395 | 416 | li r8,-MAX_ERRNO |
|---|
| 396 | | - lis r0,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@h |
|---|
| 397 | | - ori r0,r0, (_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)@l |
|---|
| 398 | | - and. r0,r9,r0 |
|---|
| 417 | + andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
|---|
| 399 | 418 | bne- syscall_exit_work |
|---|
| 400 | 419 | cmplw 0,r3,r8 |
|---|
| 401 | 420 | blt+ syscall_exit_cont |
|---|
| .. | .. |
|---|
| 407 | 426 | lwz r8,_MSR(r1) |
|---|
| 408 | 427 | #ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 409 | 428 | /* If we are going to return from the syscall with interrupts |
|---|
| 410 | | - * off, we trace that here. It shouldn't happen though but we |
|---|
| 411 | | - * want to catch the bugger if it does right ? |
|---|
| 429 | + * off, we trace that here. It shouldn't normally happen. |
|---|
| 412 | 430 | */ |
|---|
| 413 | 431 | andi. r10,r8,MSR_EE |
|---|
| 414 | 432 | bne+ 1f |
|---|
| .. | .. |
|---|
| 437 | 455 | lwarx r7,0,r1 |
|---|
| 438 | 456 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) |
|---|
| 439 | 457 | stwcx. r0,0,r1 /* to clear the reservation */ |
|---|
| 440 | | -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
|---|
| 441 | | - andi. r4,r8,MSR_PR |
|---|
| 442 | | - beq 3f |
|---|
| 443 | | - CURRENT_THREAD_INFO(r4, r1) |
|---|
| 444 | | - ACCOUNT_CPU_USER_EXIT(r4, r5, r7) |
|---|
| 445 | | -3: |
|---|
| 458 | + ACCOUNT_CPU_USER_EXIT(r2, r5, r7) |
|---|
| 459 | +#ifdef CONFIG_PPC_BOOK3S_32 |
|---|
| 460 | + kuep_unlock r5, r7 |
|---|
| 446 | 461 | #endif |
|---|
| 462 | + kuap_check r2, r4 |
|---|
| 447 | 463 | lwz r4,_LINK(r1) |
|---|
| 448 | 464 | lwz r5,_CCR(r1) |
|---|
| 449 | 465 | mtlr r4 |
|---|
| .. | .. |
|---|
| 451 | 467 | lwz r7,_NIP(r1) |
|---|
| 452 | 468 | lwz r2,GPR2(r1) |
|---|
| 453 | 469 | lwz r1,GPR1(r1) |
|---|
| 470 | +syscall_exit_finish: |
|---|
| 454 | 471 | #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) |
|---|
| 455 | 472 | mtspr SPRN_NRI, r0 |
|---|
| 456 | 473 | #endif |
|---|
| 457 | 474 | mtspr SPRN_SRR0,r7 |
|---|
| 458 | 475 | mtspr SPRN_SRR1,r8 |
|---|
| 459 | | - SYNC |
|---|
| 460 | 476 | RFI |
|---|
| 477 | +_ASM_NOKPROBE_SYMBOL(syscall_exit_finish) |
|---|
| 461 | 478 | #ifdef CONFIG_44x |
|---|
| 462 | 479 | 2: li r7,0 |
|---|
| 463 | 480 | iccci r0,r0 |
|---|
| .. | .. |
|---|
| 513 | 530 | b syscall_dotrace_cont |
|---|
| 514 | 531 | |
|---|
| 515 | 532 | syscall_exit_work: |
|---|
| 516 | | - andis. r0,r9,_TIF_RESTOREALL@h |
|---|
| 533 | + andi. r0,r9,_TIF_RESTOREALL |
|---|
| 517 | 534 | beq+ 0f |
|---|
| 518 | 535 | REST_NVGPRS(r1) |
|---|
| 519 | 536 | b 2f |
|---|
| 520 | 537 | 0: cmplw 0,r3,r8 |
|---|
| 521 | 538 | blt+ 1f |
|---|
| 522 | | - andis. r0,r9,_TIF_NOERROR@h |
|---|
| 539 | + andi. r0,r9,_TIF_NOERROR |
|---|
| 523 | 540 | bne- 1f |
|---|
| 524 | 541 | lwz r11,_CCR(r1) /* Load CR */ |
|---|
| 525 | 542 | neg r3,r3 |
|---|
| .. | .. |
|---|
| 528 | 545 | |
|---|
| 529 | 546 | 1: stw r6,RESULT(r1) /* Save result */ |
|---|
| 530 | 547 | stw r3,GPR3(r1) /* Update return value */ |
|---|
| 531 | | -2: andis. r0,r9,(_TIF_PERSYSCALL_MASK)@h |
|---|
| 548 | +2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) |
|---|
| 532 | 549 | beq 4f |
|---|
| 533 | 550 | |
|---|
| 534 | 551 | /* Clear per-syscall TIF flags if any are set. */ |
|---|
| 535 | 552 | |
|---|
| 536 | | - lis r11,_TIF_PERSYSCALL_MASK@h |
|---|
| 537 | | - addi r12,r12,TI_FLAGS |
|---|
| 553 | + li r11,_TIF_PERSYSCALL_MASK |
|---|
| 554 | + addi r12,r2,TI_FLAGS |
|---|
| 538 | 555 | 3: lwarx r8,0,r12 |
|---|
| 539 | 556 | andc r8,r8,r11 |
|---|
| 540 | | -#ifdef CONFIG_IBM405_ERR77 |
|---|
| 541 | | - dcbt 0,r12 |
|---|
| 542 | | -#endif |
|---|
| 543 | 557 | stwcx. r8,0,r12 |
|---|
| 544 | 558 | bne- 3b |
|---|
| 545 | | - subi r12,r12,TI_FLAGS |
|---|
| 546 | 559 | |
|---|
| 547 | 560 | 4: /* Anything which requires enabling interrupts? */ |
|---|
| 548 | 561 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) |
|---|
| .. | .. |
|---|
| 552 | 565 | * lockdep as we are supposed to have IRQs on at this point |
|---|
| 553 | 566 | */ |
|---|
| 554 | 567 | ori r10,r10,MSR_EE |
|---|
| 555 | | - SYNC |
|---|
| 556 | | - MTMSRD(r10) |
|---|
| 568 | + mtmsr r10 |
|---|
| 557 | 569 | |
|---|
| 558 | 570 | /* Save NVGPRS if they're not saved already */ |
|---|
| 559 | 571 | lwz r4,_TRAP(r1) |
|---|
| .. | .. |
|---|
| 566 | 578 | addi r3,r1,STACK_FRAME_OVERHEAD |
|---|
| 567 | 579 | bl do_syscall_trace_leave |
|---|
| 568 | 580 | b ret_from_except_full |
|---|
| 581 | + |
|---|
| 582 | + /* |
|---|
| 583 | + * System call was called from kernel. We get here with SRR1 in r9. |
|---|
| 584 | + * Mark the exception as recoverable once we have retrieved SRR0, |
|---|
| 585 | + * trap a warning and return ENOSYS with CR[SO] set. |
|---|
| 586 | + */ |
|---|
| 587 | + .globl ret_from_kernel_syscall |
|---|
| 588 | +ret_from_kernel_syscall: |
|---|
| 589 | + mfspr r9, SPRN_SRR0 |
|---|
| 590 | + mfspr r10, SPRN_SRR1 |
|---|
| 591 | +#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE) |
|---|
| 592 | + LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR)) |
|---|
| 593 | + mtmsr r11 |
|---|
| 594 | +#endif |
|---|
| 595 | + |
|---|
| 596 | +0: trap |
|---|
| 597 | + EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING |
|---|
| 598 | + |
|---|
| 599 | + li r3, ENOSYS |
|---|
| 600 | + crset so |
|---|
| 601 | +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) |
|---|
| 602 | + mtspr SPRN_NRI, r0 |
|---|
| 603 | +#endif |
|---|
| 604 | + mtspr SPRN_SRR0, r9 |
|---|
| 605 | + mtspr SPRN_SRR1, r10 |
|---|
| 606 | + RFI |
|---|
| 607 | +_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) |
|---|
| 569 | 608 | |
|---|
| 570 | 609 | /* |
|---|
| 571 | 610 | * The fork/clone functions need to copy the full register set into |
|---|
| .. | .. |
|---|
| 596 | 635 | stw r0,_TRAP(r1) /* register set saved */ |
|---|
| 597 | 636 | b sys_clone |
|---|
| 598 | 637 | |
|---|
| 638 | + .globl ppc_clone3 |
|---|
| 639 | +ppc_clone3: |
|---|
| 640 | + SAVE_NVGPRS(r1) |
|---|
| 641 | + lwz r0,_TRAP(r1) |
|---|
| 642 | + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ |
|---|
| 643 | + stw r0,_TRAP(r1) /* register set saved */ |
|---|
| 644 | + b sys_clone3 |
|---|
| 645 | + |
|---|
| 599 | 646 | .globl ppc_swapcontext |
|---|
| 600 | 647 | ppc_swapcontext: |
|---|
| 601 | 648 | SAVE_NVGPRS(r1) |
|---|
| .. | .. |
|---|
| 612 | 659 | */ |
|---|
| 613 | 660 | .globl handle_page_fault |
|---|
| 614 | 661 | handle_page_fault: |
|---|
| 615 | | - stw r4,_DAR(r1) |
|---|
| 616 | 662 | addi r3,r1,STACK_FRAME_OVERHEAD |
|---|
| 617 | | -#ifdef CONFIG_6xx |
|---|
| 663 | +#ifdef CONFIG_PPC_BOOK3S_32 |
|---|
| 618 | 664 | andis. r0,r5,DSISR_DABRMATCH@h |
|---|
| 619 | 665 | bne- handle_dabr_fault |
|---|
| 620 | 666 | #endif |
|---|
| .. | .. |
|---|
| 631 | 677 | bl bad_page_fault |
|---|
| 632 | 678 | b ret_from_except_full |
|---|
| 633 | 679 | |
|---|
| 634 | | -#ifdef CONFIG_6xx |
|---|
| 680 | +#ifdef CONFIG_PPC_BOOK3S_32 |
|---|
| 635 | 681 | /* We have a data breakpoint exception - handle it */ |
|---|
| 636 | 682 | handle_dabr_fault: |
|---|
| 637 | 683 | SAVE_NVGPRS(r1) |
|---|
| .. | .. |
|---|
| 688 | 734 | and. r0,r0,r11 /* FP or altivec or SPE enabled? */ |
|---|
| 689 | 735 | beq+ 1f |
|---|
| 690 | 736 | andc r11,r11,r0 |
|---|
| 691 | | - MTMSRD(r11) |
|---|
| 737 | + mtmsr r11 |
|---|
| 692 | 738 | isync |
|---|
| 693 | 739 | 1: stw r11,_MSR(r1) |
|---|
| 694 | 740 | mfcr r10 |
|---|
| 695 | 741 | stw r10,_CCR(r1) |
|---|
| 696 | 742 | stw r1,KSP(r3) /* Set old stack pointer */ |
|---|
| 697 | 743 | |
|---|
| 744 | + kuap_check r2, r0 |
|---|
| 698 | 745 | #ifdef CONFIG_SMP |
|---|
| 699 | 746 | /* We need a sync somewhere here to make sure that if the |
|---|
| 700 | 747 | * previous task gets rescheduled on another CPU, it sees all |
|---|
| .. | .. |
|---|
| 759 | 806 | REST_GPR(9, r11) |
|---|
| 760 | 807 | REST_GPR(12, r11) |
|---|
| 761 | 808 | lwz r11,GPR11(r11) |
|---|
| 762 | | - SYNC |
|---|
| 763 | 809 | RFI |
|---|
| 810 | +_ASM_NOKPROBE_SYMBOL(fast_exception_return) |
|---|
| 764 | 811 | |
|---|
| 765 | 812 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
|---|
| 766 | 813 | /* check if the exception happened in a restartable section */ |
|---|
| .. | .. |
|---|
| 788 | 835 | |
|---|
| 789 | 836 | /* aargh, a nonrecoverable interrupt, panic */ |
|---|
| 790 | 837 | /* aargh, we don't know which trap this is */ |
|---|
| 791 | | -/* but the 601 doesn't implement the RI bit, so assume it's OK */ |
|---|
| 792 | 838 | 3: |
|---|
| 793 | | -BEGIN_FTR_SECTION |
|---|
| 794 | | - b 2b |
|---|
| 795 | | -END_FTR_SECTION_IFSET(CPU_FTR_601) |
|---|
| 796 | 839 | li r10,-1 |
|---|
| 797 | 840 | stw r10,_TRAP(r11) |
|---|
| 798 | 841 | addi r3,r1,STACK_FRAME_OVERHEAD |
|---|
| 799 | 842 | lis r10,MSR_KERNEL@h |
|---|
| 800 | 843 | ori r10,r10,MSR_KERNEL@l |
|---|
| 801 | 844 | bl transfer_to_handler_full |
|---|
| 802 | | - .long nonrecoverable_exception |
|---|
| 845 | + .long unrecoverable_exception |
|---|
| 803 | 846 | .long ret_from_except |
|---|
| 804 | 847 | #endif |
|---|
| 805 | 848 | |
|---|
| .. | .. |
|---|
| 814 | 857 | * can't change between when we test it and when we return |
|---|
| 815 | 858 | * from the interrupt. */ |
|---|
| 816 | 859 | /* Note: We don't bother telling lockdep about it */ |
|---|
| 817 | | - LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
|---|
| 818 | | - SYNC /* Some chip revs have problems here... */ |
|---|
| 819 | | - MTMSRD(r10) /* disable interrupts */ |
|---|
| 860 | + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) |
|---|
| 861 | + mtmsr r10 /* disable interrupts */ |
|---|
| 820 | 862 | |
|---|
| 821 | 863 | lwz r3,_MSR(r1) /* Returning to user mode? */ |
|---|
| 822 | 864 | andi. r0,r3,MSR_PR |
|---|
| .. | .. |
|---|
| 824 | 866 | |
|---|
| 825 | 867 | user_exc_return: /* r10 contains MSR_KERNEL here */ |
|---|
| 826 | 868 | /* Check current_thread_info()->flags */ |
|---|
| 827 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 828 | | - lwz r9,TI_FLAGS(r9) |
|---|
| 869 | + lwz r9,TI_FLAGS(r2) |
|---|
| 829 | 870 | andi. r0,r9,_TIF_USER_WORK_MASK |
|---|
| 830 | 871 | bne do_work |
|---|
| 831 | 872 | |
|---|
| .. | .. |
|---|
| 837 | 878 | andis. r10,r0,DBCR0_IDM@h |
|---|
| 838 | 879 | bnel- load_dbcr0 |
|---|
| 839 | 880 | #endif |
|---|
| 840 | | -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
|---|
| 841 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 842 | | - ACCOUNT_CPU_USER_EXIT(r9, r10, r11) |
|---|
| 881 | + ACCOUNT_CPU_USER_EXIT(r2, r10, r11) |
|---|
| 882 | +#ifdef CONFIG_PPC_BOOK3S_32 |
|---|
| 883 | + kuep_unlock r10, r11 |
|---|
| 843 | 884 | #endif |
|---|
| 844 | 885 | |
|---|
| 845 | 886 | b restore |
|---|
| .. | .. |
|---|
| 847 | 888 | /* N.B. the only way to get here is from the beq following ret_from_except. */ |
|---|
| 848 | 889 | resume_kernel: |
|---|
| 849 | 890 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
|---|
| 850 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 851 | | - lwz r8,TI_FLAGS(r9) |
|---|
| 891 | + lwz r8,TI_FLAGS(r2) |
|---|
| 852 | 892 | andis. r0,r8,_TIF_EMULATE_STACK_STORE@h |
|---|
| 853 | 893 | beq+ 1f |
|---|
| 854 | 894 | |
|---|
| .. | .. |
|---|
| 874 | 914 | |
|---|
| 875 | 915 | /* Clear _TIF_EMULATE_STACK_STORE flag */ |
|---|
| 876 | 916 | lis r11,_TIF_EMULATE_STACK_STORE@h |
|---|
| 877 | | - addi r5,r9,TI_FLAGS |
|---|
| 917 | + addi r5,r2,TI_FLAGS |
|---|
| 878 | 918 | 0: lwarx r8,0,r5 |
|---|
| 879 | 919 | andc r8,r8,r11 |
|---|
| 880 | | -#ifdef CONFIG_IBM405_ERR77 |
|---|
| 881 | | - dcbt 0,r5 |
|---|
| 882 | | -#endif |
|---|
| 883 | 920 | stwcx. r8,0,r5 |
|---|
| 884 | 921 | bne- 0b |
|---|
| 885 | 922 | 1: |
|---|
| 886 | 923 | |
|---|
| 887 | | -#ifdef CONFIG_PREEMPT |
|---|
| 924 | +#ifdef CONFIG_PREEMPTION |
|---|
| 888 | 925 | /* check current_thread_info->preempt_count */ |
|---|
| 889 | | - lwz r0,TI_PREEMPT(r9) |
|---|
| 926 | + lwz r0,TI_PREEMPT(r2) |
|---|
| 890 | 927 | cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
|---|
| 891 | | - bne restore |
|---|
| 928 | + bne restore_kuap |
|---|
| 892 | 929 | andi. r8,r8,_TIF_NEED_RESCHED |
|---|
| 893 | | - bne+ 1f |
|---|
| 894 | | - lwz r0,TI_PREEMPT_LAZY(r9) |
|---|
| 895 | | - cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ |
|---|
| 896 | | - bne restore |
|---|
| 897 | | - lwz r0,TI_FLAGS(r9) |
|---|
| 898 | | - andi. r0,r0,_TIF_NEED_RESCHED_LAZY |
|---|
| 899 | | - beq+ restore |
|---|
| 900 | | -1: |
|---|
| 930 | + beq+ restore_kuap |
|---|
| 901 | 931 | lwz r3,_MSR(r1) |
|---|
| 902 | 932 | andi. r0,r3,MSR_EE /* interrupts off? */ |
|---|
| 903 | | - beq restore /* don't schedule if so */ |
|---|
| 933 | + beq restore_kuap /* don't schedule if so */ |
|---|
| 904 | 934 | #ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 905 | 935 | /* Lockdep thinks irqs are enabled, we need to call |
|---|
| 906 | 936 | * preempt_schedule_irq with IRQs off, so we inform lockdep |
|---|
| .. | .. |
|---|
| 908 | 938 | */ |
|---|
| 909 | 939 | bl trace_hardirqs_off |
|---|
| 910 | 940 | #endif |
|---|
| 911 | | -2: bl preempt_schedule_irq |
|---|
| 912 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 913 | | - lwz r3,TI_FLAGS(r9) |
|---|
| 914 | | - andi. r0,r3,_TIF_NEED_RESCHED_MASK |
|---|
| 915 | | - bne- 2b |
|---|
| 941 | + bl preempt_schedule_irq |
|---|
| 916 | 942 | #ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 917 | 943 | /* And now, to properly rebalance the above, we tell lockdep they |
|---|
| 918 | 944 | * are being turned back on, which will happen when we return |
|---|
| 919 | 945 | */ |
|---|
| 920 | 946 | bl trace_hardirqs_on |
|---|
| 921 | 947 | #endif |
|---|
| 922 | | -#endif /* CONFIG_PREEMPT */ |
|---|
| 948 | +#endif /* CONFIG_PREEMPTION */ |
|---|
| 949 | +restore_kuap: |
|---|
| 950 | + kuap_restore r1, r2, r9, r10, r0 |
|---|
| 923 | 951 | |
|---|
| 924 | 952 | /* interrupts are hard-disabled at this point */ |
|---|
| 925 | 953 | restore: |
|---|
| .. | .. |
|---|
| 943 | 971 | * off in this assembly code while peeking at TI_FLAGS() and such. However |
|---|
| 944 | 972 | * we need to inform it if the exception turned interrupts off, and we |
|---|
| 945 | 973 | * are about to trun them back on. |
|---|
| 946 | | - * |
|---|
| 947 | | - * The problem here sadly is that we don't know whether the exceptions was |
|---|
| 948 | | - * one that turned interrupts off or not. So we always tell lockdep about |
|---|
| 949 | | - * turning them on here when we go back to wherever we came from with EE |
|---|
| 950 | | - * on, even if that may meen some redudant calls being tracked. Maybe later |
|---|
| 951 | | - * we could encode what the exception did somewhere or test the exception |
|---|
| 952 | | - * type in the pt_regs but that sounds overkill |
|---|
| 953 | 974 | */ |
|---|
| 954 | 975 | andi. r10,r9,MSR_EE |
|---|
| 955 | 976 | beq 1f |
|---|
| 956 | | - /* |
|---|
| 957 | | - * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, |
|---|
| 958 | | - * which is the stack frame here, we need to force a stack frame |
|---|
| 959 | | - * in case we came from user space. |
|---|
| 960 | | - */ |
|---|
| 961 | 977 | stwu r1,-32(r1) |
|---|
| 962 | 978 | mflr r0 |
|---|
| 963 | 979 | stw r0,4(r1) |
|---|
| 964 | | - stwu r1,-32(r1) |
|---|
| 965 | 980 | bl trace_hardirqs_on |
|---|
| 966 | | - lwz r1,0(r1) |
|---|
| 967 | | - lwz r1,0(r1) |
|---|
| 981 | + addi r1, r1, 32 |
|---|
| 968 | 982 | lwz r9,_MSR(r1) |
|---|
| 969 | 983 | 1: |
|---|
| 970 | 984 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
|---|
| .. | .. |
|---|
| 979 | 993 | mtspr SPRN_XER,r10 |
|---|
| 980 | 994 | mtctr r11 |
|---|
| 981 | 995 | |
|---|
| 982 | | - PPC405_ERR77(0,r1) |
|---|
| 983 | 996 | BEGIN_FTR_SECTION |
|---|
| 984 | 997 | lwarx r11,0,r1 |
|---|
| 985 | 998 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) |
|---|
| .. | .. |
|---|
| 1006 | 1019 | * can restart the exception exit path at the label |
|---|
| 1007 | 1020 | * exc_exit_restart below. -- paulus |
|---|
| 1008 | 1021 | */ |
|---|
| 1009 | | - LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI) |
|---|
| 1010 | | - SYNC |
|---|
| 1011 | | - MTMSRD(r10) /* clear the RI bit */ |
|---|
| 1022 | + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI) |
|---|
| 1023 | + mtmsr r10 /* clear the RI bit */ |
|---|
| 1012 | 1024 | .globl exc_exit_restart |
|---|
| 1013 | 1025 | exc_exit_restart: |
|---|
| 1014 | 1026 | lwz r12,_NIP(r1) |
|---|
| 1015 | | -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) |
|---|
| 1016 | | - mtspr SPRN_NRI, r0 |
|---|
| 1017 | | -#endif |
|---|
| 1018 | 1027 | mtspr SPRN_SRR0,r12 |
|---|
| 1019 | 1028 | mtspr SPRN_SRR1,r9 |
|---|
| 1020 | 1029 | REST_4GPRS(9, r1) |
|---|
| 1021 | 1030 | lwz r1,GPR1(r1) |
|---|
| 1022 | 1031 | .globl exc_exit_restart_end |
|---|
| 1023 | 1032 | exc_exit_restart_end: |
|---|
| 1024 | | - SYNC |
|---|
| 1025 | 1033 | RFI |
|---|
| 1034 | +_ASM_NOKPROBE_SYMBOL(exc_exit_restart) |
|---|
| 1035 | +_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end) |
|---|
| 1026 | 1036 | |
|---|
| 1027 | 1037 | #else /* !(CONFIG_4xx || CONFIG_BOOKE) */ |
|---|
| 1028 | 1038 | /* |
|---|
| .. | .. |
|---|
| 1044 | 1054 | exc_exit_restart: |
|---|
| 1045 | 1055 | lwz r11,_NIP(r1) |
|---|
| 1046 | 1056 | lwz r12,_MSR(r1) |
|---|
| 1047 | | -exc_exit_start: |
|---|
| 1048 | 1057 | mtspr SPRN_SRR0,r11 |
|---|
| 1049 | 1058 | mtspr SPRN_SRR1,r12 |
|---|
| 1050 | 1059 | REST_2GPRS(11, r1) |
|---|
| 1051 | 1060 | lwz r1,GPR1(r1) |
|---|
| 1052 | 1061 | .globl exc_exit_restart_end |
|---|
| 1053 | 1062 | exc_exit_restart_end: |
|---|
| 1054 | | - PPC405_ERR77_SYNC |
|---|
| 1055 | 1063 | rfi |
|---|
| 1056 | 1064 | b . /* prevent prefetch past rfi */ |
|---|
| 1065 | +_ASM_NOKPROBE_SYMBOL(exc_exit_restart) |
|---|
| 1057 | 1066 | |
|---|
| 1058 | 1067 | /* |
|---|
| 1059 | 1068 | * Returning from a critical interrupt in user mode doesn't need |
|---|
| .. | .. |
|---|
| 1084 | 1093 | REST_NVGPRS(r1); \ |
|---|
| 1085 | 1094 | lwz r3,_MSR(r1); \ |
|---|
| 1086 | 1095 | andi. r3,r3,MSR_PR; \ |
|---|
| 1087 | | - LOAD_MSR_KERNEL(r10,MSR_KERNEL); \ |
|---|
| 1096 | + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \ |
|---|
| 1088 | 1097 | bne user_exc_return; \ |
|---|
| 1089 | 1098 | lwz r0,GPR0(r1); \ |
|---|
| 1090 | 1099 | lwz r2,GPR2(r1); \ |
|---|
| .. | .. |
|---|
| 1094 | 1103 | lwz r11,_CTR(r1); \ |
|---|
| 1095 | 1104 | mtspr SPRN_XER,r10; \ |
|---|
| 1096 | 1105 | mtctr r11; \ |
|---|
| 1097 | | - PPC405_ERR77(0,r1); \ |
|---|
| 1098 | 1106 | stwcx. r0,0,r1; /* to clear the reservation */ \ |
|---|
| 1099 | 1107 | lwz r11,_LINK(r1); \ |
|---|
| 1100 | 1108 | mtlr r11; \ |
|---|
| .. | .. |
|---|
| 1114 | 1122 | lwz r10,GPR10(r1); \ |
|---|
| 1115 | 1123 | lwz r11,GPR11(r1); \ |
|---|
| 1116 | 1124 | lwz r1,GPR1(r1); \ |
|---|
| 1117 | | - PPC405_ERR77_SYNC; \ |
|---|
| 1118 | 1125 | exc_lvl_rfi; \ |
|---|
| 1119 | 1126 | b .; /* prevent prefetch past exc_lvl_rfi */ |
|---|
| 1120 | 1127 | |
|---|
| .. | .. |
|---|
| 1167 | 1174 | mtspr SPRN_SRR0,r9; |
|---|
| 1168 | 1175 | mtspr SPRN_SRR1,r10; |
|---|
| 1169 | 1176 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) |
|---|
| 1177 | +_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) |
|---|
| 1170 | 1178 | #endif /* CONFIG_40x */ |
|---|
| 1171 | 1179 | |
|---|
| 1172 | 1180 | #ifdef CONFIG_BOOKE |
|---|
| .. | .. |
|---|
| 1178 | 1186 | RESTORE_xSRR(SRR0,SRR1); |
|---|
| 1179 | 1187 | RESTORE_MMU_REGS; |
|---|
| 1180 | 1188 | RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI) |
|---|
| 1189 | +_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) |
|---|
| 1181 | 1190 | |
|---|
| 1182 | 1191 | .globl ret_from_debug_exc |
|---|
| 1183 | 1192 | ret_from_debug_exc: |
|---|
| 1184 | 1193 | mfspr r9,SPRN_SPRG_THREAD |
|---|
| 1185 | 1194 | lwz r10,SAVED_KSP_LIMIT(r1) |
|---|
| 1186 | 1195 | stw r10,KSP_LIMIT(r9) |
|---|
| 1187 | | - lwz r9,THREAD_INFO-THREAD(r9) |
|---|
| 1188 | | - CURRENT_THREAD_INFO(r10, r1) |
|---|
| 1189 | | - lwz r10,TI_PREEMPT(r10) |
|---|
| 1190 | | - stw r10,TI_PREEMPT(r9) |
|---|
| 1191 | 1196 | RESTORE_xSRR(SRR0,SRR1); |
|---|
| 1192 | 1197 | RESTORE_xSRR(CSRR0,CSRR1); |
|---|
| 1193 | 1198 | RESTORE_MMU_REGS; |
|---|
| 1194 | 1199 | RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI) |
|---|
| 1200 | +_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) |
|---|
| 1195 | 1201 | |
|---|
| 1196 | 1202 | .globl ret_from_mcheck_exc |
|---|
| 1197 | 1203 | ret_from_mcheck_exc: |
|---|
| .. | .. |
|---|
| 1203 | 1209 | RESTORE_xSRR(DSRR0,DSRR1); |
|---|
| 1204 | 1210 | RESTORE_MMU_REGS; |
|---|
| 1205 | 1211 | RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI) |
|---|
| 1212 | +_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) |
|---|
| 1206 | 1213 | #endif /* CONFIG_BOOKE */ |
|---|
| 1207 | 1214 | |
|---|
| 1208 | 1215 | /* |
|---|
| .. | .. |
|---|
| 1219 | 1226 | lis r11,global_dbcr0@ha |
|---|
| 1220 | 1227 | addi r11,r11,global_dbcr0@l |
|---|
| 1221 | 1228 | #ifdef CONFIG_SMP |
|---|
| 1222 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 1223 | | - lwz r9,TI_CPU(r9) |
|---|
| 1229 | + lwz r9,TASK_CPU(r2) |
|---|
| 1224 | 1230 | slwi r9,r9,3 |
|---|
| 1225 | 1231 | add r11,r11,r9 |
|---|
| 1226 | 1232 | #endif |
|---|
| .. | .. |
|---|
| 1235 | 1241 | |
|---|
| 1236 | 1242 | .section .bss |
|---|
| 1237 | 1243 | .align 4 |
|---|
| 1244 | + .global global_dbcr0 |
|---|
| 1238 | 1245 | global_dbcr0: |
|---|
| 1239 | 1246 | .space 8*NR_CPUS |
|---|
| 1240 | 1247 | .previous |
|---|
| 1241 | 1248 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ |
|---|
| 1242 | 1249 | |
|---|
| 1243 | 1250 | do_work: /* r10 contains MSR_KERNEL here */ |
|---|
| 1244 | | - andi. r0,r9,_TIF_NEED_RESCHED_MASK |
|---|
| 1251 | + andi. r0,r9,_TIF_NEED_RESCHED |
|---|
| 1245 | 1252 | beq do_user_signal |
|---|
| 1246 | 1253 | |
|---|
| 1247 | 1254 | do_resched: /* r10 contains MSR_KERNEL here */ |
|---|
| 1248 | | - /* Note: We don't need to inform lockdep that we are enabling |
|---|
| 1249 | | - * interrupts here. As far as it knows, they are already enabled |
|---|
| 1250 | | - */ |
|---|
| 1255 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 1256 | + bl trace_hardirqs_on |
|---|
| 1257 | + mfmsr r10 |
|---|
| 1258 | +#endif |
|---|
| 1251 | 1259 | ori r10,r10,MSR_EE |
|---|
| 1252 | | - SYNC |
|---|
| 1253 | | - MTMSRD(r10) /* hard-enable interrupts */ |
|---|
| 1260 | + mtmsr r10 /* hard-enable interrupts */ |
|---|
| 1254 | 1261 | bl schedule |
|---|
| 1255 | 1262 | recheck: |
|---|
| 1256 | 1263 | /* Note: And we don't tell it we are disabling them again |
|---|
| 1257 | 1264 | * neither. Those disable/enable cycles used to peek at |
|---|
| 1258 | 1265 | * TI_FLAGS aren't advertised. |
|---|
| 1259 | 1266 | */ |
|---|
| 1260 | | - LOAD_MSR_KERNEL(r10,MSR_KERNEL) |
|---|
| 1261 | | - SYNC |
|---|
| 1262 | | - MTMSRD(r10) /* disable interrupts */ |
|---|
| 1263 | | - CURRENT_THREAD_INFO(r9, r1) |
|---|
| 1264 | | - lwz r9,TI_FLAGS(r9) |
|---|
| 1265 | | - andi. r0,r9,_TIF_NEED_RESCHED_MASK |
|---|
| 1267 | + LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) |
|---|
| 1268 | + mtmsr r10 /* disable interrupts */ |
|---|
| 1269 | + lwz r9,TI_FLAGS(r2) |
|---|
| 1270 | + andi. r0,r9,_TIF_NEED_RESCHED |
|---|
| 1266 | 1271 | bne- do_resched |
|---|
| 1267 | 1272 | andi. r0,r9,_TIF_USER_WORK_MASK |
|---|
| 1268 | 1273 | beq restore_user |
|---|
| 1269 | 1274 | do_user_signal: /* r10 contains MSR_KERNEL here */ |
|---|
| 1270 | 1275 | ori r10,r10,MSR_EE |
|---|
| 1271 | | - SYNC |
|---|
| 1272 | | - MTMSRD(r10) /* hard-enable interrupts */ |
|---|
| 1276 | + mtmsr r10 /* hard-enable interrupts */ |
|---|
| 1273 | 1277 | /* save r13-r31 in the exception frame, if not already done */ |
|---|
| 1274 | 1278 | lwz r3,_TRAP(r1) |
|---|
| 1275 | 1279 | andi. r0,r3,1 |
|---|
| .. | .. |
|---|
| 1304 | 1308 | mr r12,r11 /* restart at exc_exit_restart */ |
|---|
| 1305 | 1309 | blr |
|---|
| 1306 | 1310 | 3: /* OK, we can't recover, kill this process */ |
|---|
| 1307 | | - /* but the 601 doesn't implement the RI bit, so assume it's OK */ |
|---|
| 1308 | | -BEGIN_FTR_SECTION |
|---|
| 1309 | | - blr |
|---|
| 1310 | | -END_FTR_SECTION_IFSET(CPU_FTR_601) |
|---|
| 1311 | 1311 | lwz r3,_TRAP(r1) |
|---|
| 1312 | 1312 | andi. r0,r3,1 |
|---|
| 1313 | | - beq 4f |
|---|
| 1313 | + beq 5f |
|---|
| 1314 | 1314 | SAVE_NVGPRS(r1) |
|---|
| 1315 | 1315 | rlwinm r3,r3,0,0,30 |
|---|
| 1316 | 1316 | stw r3,_TRAP(r1) |
|---|
| 1317 | +5: mfspr r2,SPRN_SPRG_THREAD |
|---|
| 1318 | + addi r2,r2,-THREAD |
|---|
| 1319 | + tovirt(r2,r2) /* set back r2 to current */ |
|---|
| 1317 | 1320 | 4: addi r3,r1,STACK_FRAME_OVERHEAD |
|---|
| 1318 | | - bl nonrecoverable_exception |
|---|
| 1321 | + bl unrecoverable_exception |
|---|
| 1319 | 1322 | /* shouldn't return */ |
|---|
| 1320 | 1323 | b 4b |
|---|
| 1324 | +_ASM_NOKPROBE_SYMBOL(nonrecoverable) |
|---|
| 1321 | 1325 | |
|---|
| 1322 | 1326 | .section .bss |
|---|
| 1323 | 1327 | .align 2 |
|---|
| .. | .. |
|---|
| 1343 | 1347 | lis r6,1f@ha /* physical return address for rtas */ |
|---|
| 1344 | 1348 | addi r6,r6,1f@l |
|---|
| 1345 | 1349 | tophys(r6,r6) |
|---|
| 1346 | | - tophys(r7,r1) |
|---|
| 1350 | + tophys_novmstack r7, r1 |
|---|
| 1347 | 1351 | lwz r8,RTASENTRY(r4) |
|---|
| 1348 | 1352 | lwz r4,RTASBASE(r4) |
|---|
| 1349 | 1353 | mfmsr r9 |
|---|
| 1350 | 1354 | stw r9,8(r1) |
|---|
| 1351 | | - LOAD_MSR_KERNEL(r0,MSR_KERNEL) |
|---|
| 1352 | | - SYNC /* disable interrupts so SRR0/1 */ |
|---|
| 1353 | | - MTMSRD(r0) /* don't get trashed */ |
|---|
| 1355 | + LOAD_REG_IMMEDIATE(r0,MSR_KERNEL) |
|---|
| 1356 | + mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */ |
|---|
| 1354 | 1357 | li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) |
|---|
| 1355 | 1358 | mtlr r6 |
|---|
| 1356 | | - mtspr SPRN_SPRG_RTAS,r7 |
|---|
| 1359 | + stw r7, THREAD + RTAS_SP(r2) |
|---|
| 1357 | 1360 | mtspr SPRN_SRR0,r8 |
|---|
| 1358 | 1361 | mtspr SPRN_SRR1,r9 |
|---|
| 1359 | 1362 | RFI |
|---|
| 1360 | | -1: tophys(r9,r1) |
|---|
| 1363 | +1: tophys_novmstack r9, r1 |
|---|
| 1364 | +#ifdef CONFIG_VMAP_STACK |
|---|
| 1365 | + li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */ |
|---|
| 1366 | + mtmsr r0 |
|---|
| 1367 | + isync |
|---|
| 1368 | +#endif |
|---|
| 1361 | 1369 | lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ |
|---|
| 1362 | 1370 | lwz r9,8(r9) /* original msr value */ |
|---|
| 1363 | 1371 | addi r1,r1,INT_FRAME_SIZE |
|---|
| 1364 | 1372 | li r0,0 |
|---|
| 1365 | | - mtspr SPRN_SPRG_RTAS,r0 |
|---|
| 1373 | + tophys_novmstack r7, r2 |
|---|
| 1374 | + stw r0, THREAD + RTAS_SP(r7) |
|---|
| 1366 | 1375 | mtspr SPRN_SRR0,r8 |
|---|
| 1367 | 1376 | mtspr SPRN_SRR1,r9 |
|---|
| 1368 | 1377 | RFI /* return to caller */ |
|---|
| 1369 | | - |
|---|
| 1370 | | - .globl machine_check_in_rtas |
|---|
| 1371 | | -machine_check_in_rtas: |
|---|
| 1372 | | - twi 31,0,0 |
|---|
| 1373 | | - /* XXX load up BATs and panic */ |
|---|
| 1374 | | - |
|---|
| 1378 | +_ASM_NOKPROBE_SYMBOL(enter_rtas) |
|---|
| 1375 | 1379 | #endif /* CONFIG_PPC_RTAS */ |
|---|