| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | | - * This file contains idle entry/exit functions for POWER7, |
|---|
| 3 | | - * POWER8 and POWER9 CPUs. |
|---|
| 3 | + * Copyright 2018, IBM Corporation. |
|---|
| 4 | 4 | * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or |
|---|
| 6 | | - * modify it under the terms of the GNU General Public License |
|---|
| 7 | | - * as published by the Free Software Foundation; either version |
|---|
| 8 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 5 | + * This file contains general idle entry/exit functions to save |
|---|
| 6 | + * and restore stack and NVGPRs which allows C code to call idle |
|---|
| 7 | + * states that lose GPRs, and it will return transparently with |
|---|
| 8 | + * SRR1 wakeup reason return value. |
|---|
| 9 | + * |
|---|
| 10 | + * The platform / CPU caller must ensure SPRs and any other non-GPR |
|---|
| 11 | + * state is saved and restored correctly, handle KVM, interrupts, etc. |
|---|
| 9 | 12 | */ |
|---|
| 10 | 13 | |
|---|
| 11 | | -#include <linux/threads.h> |
|---|
| 12 | | -#include <asm/processor.h> |
|---|
| 13 | | -#include <asm/page.h> |
|---|
| 14 | | -#include <asm/cputable.h> |
|---|
| 15 | | -#include <asm/thread_info.h> |
|---|
| 16 | 14 | #include <asm/ppc_asm.h> |
|---|
| 17 | 15 | #include <asm/asm-offsets.h> |
|---|
| 18 | 16 | #include <asm/ppc-opcode.h> |
|---|
| 19 | | -#include <asm/hw_irq.h> |
|---|
| 20 | | -#include <asm/kvm_book3s_asm.h> |
|---|
| 21 | | -#include <asm/opal.h> |
|---|
| 22 | 17 | #include <asm/cpuidle.h> |
|---|
| 23 | | -#include <asm/exception-64s.h> |
|---|
| 24 | | -#include <asm/book3s/64/mmu-hash.h> |
|---|
| 25 | | -#include <asm/mmu.h> |
|---|
| 26 | | -#include <asm/asm-compat.h> |
|---|
| 27 | | -#include <asm/feature-fixups.h> |
|---|
| 18 | +#include <asm/thread_info.h> /* TLF_NAPPING */ |
|---|
| 28 | 19 | |
|---|
| 29 | | -#undef DEBUG |
|---|
| 30 | | - |
|---|
| 20 | +#ifdef CONFIG_PPC_P7_NAP |
|---|
| 31 | 21 | /* |
|---|
| 32 | | - * Use unused space in the interrupt stack to save and restore |
|---|
| 33 | | - * registers for winkle support. |
|---|
| 34 | | - */ |
|---|
| 35 | | -#define _MMCR0 GPR0 |
|---|
| 36 | | -#define _SDR1 GPR3 |
|---|
| 37 | | -#define _PTCR GPR3 |
|---|
| 38 | | -#define _RPR GPR4 |
|---|
| 39 | | -#define _SPURR GPR5 |
|---|
| 40 | | -#define _PURR GPR6 |
|---|
| 41 | | -#define _TSCR GPR7 |
|---|
| 42 | | -#define _DSCR GPR8 |
|---|
| 43 | | -#define _AMOR GPR9 |
|---|
| 44 | | -#define _WORT GPR10 |
|---|
| 45 | | -#define _WORC GPR11 |
|---|
| 46 | | -#define _LPCR GPR12 |
|---|
| 47 | | - |
|---|
| 48 | | -#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16 |
|---|
| 49 | | - |
|---|
| 50 | | - .text |
|---|
| 51 | | - |
|---|
| 52 | | -/* |
|---|
| 53 | | - * Used by threads before entering deep idle states. Saves SPRs |
|---|
| 54 | | - * in interrupt stack frame |
|---|
| 55 | | - */ |
|---|
| 56 | | -save_sprs_to_stack: |
|---|
| 57 | | - /* |
|---|
| 58 | | - * Note all register i.e per-core, per-subcore or per-thread is saved |
|---|
| 59 | | - * here since any thread in the core might wake up first |
|---|
| 60 | | - */ |
|---|
| 61 | | -BEGIN_FTR_SECTION |
|---|
| 62 | | - /* |
|---|
| 63 | | - * Note - SDR1 is dropped in Power ISA v3. Hence not restoring |
|---|
| 64 | | - * SDR1 here |
|---|
| 65 | | - */ |
|---|
| 66 | | - mfspr r3,SPRN_PTCR |
|---|
| 67 | | - std r3,_PTCR(r1) |
|---|
| 68 | | - mfspr r3,SPRN_LPCR |
|---|
| 69 | | - std r3,_LPCR(r1) |
|---|
| 70 | | -FTR_SECTION_ELSE |
|---|
| 71 | | - mfspr r3,SPRN_SDR1 |
|---|
| 72 | | - std r3,_SDR1(r1) |
|---|
| 73 | | -ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
|---|
| 74 | | - mfspr r3,SPRN_RPR |
|---|
| 75 | | - std r3,_RPR(r1) |
|---|
| 76 | | - mfspr r3,SPRN_SPURR |
|---|
| 77 | | - std r3,_SPURR(r1) |
|---|
| 78 | | - mfspr r3,SPRN_PURR |
|---|
| 79 | | - std r3,_PURR(r1) |
|---|
| 80 | | - mfspr r3,SPRN_TSCR |
|---|
| 81 | | - std r3,_TSCR(r1) |
|---|
| 82 | | - mfspr r3,SPRN_DSCR |
|---|
| 83 | | - std r3,_DSCR(r1) |
|---|
| 84 | | - mfspr r3,SPRN_AMOR |
|---|
| 85 | | - std r3,_AMOR(r1) |
|---|
| 86 | | - mfspr r3,SPRN_WORT |
|---|
| 87 | | - std r3,_WORT(r1) |
|---|
| 88 | | - mfspr r3,SPRN_WORC |
|---|
| 89 | | - std r3,_WORC(r1) |
|---|
| 90 | | -/* |
|---|
| 91 | | - * On POWER9, there are idle states such as stop4, invoked via cpuidle, |
|---|
| 92 | | - * that lose hypervisor resources. In such cases, we need to save |
|---|
| 93 | | - * additional SPRs before entering those idle states so that they can |
|---|
| 94 | | - * be restored to their older values on wakeup from the idle state. |
|---|
| 22 | + * Desired PSSCR in r3 |
|---|
| 95 | 23 | * |
|---|
| 96 | | - * On POWER8, the only such deep idle state is winkle which is used |
|---|
| 97 | | - * only in the context of CPU-Hotplug, where these additional SPRs are |
|---|
| 98 | | - * reinitiazed to a sane value. Hence there is no need to save/restore |
|---|
| 99 | | - * these SPRs. |
|---|
| 100 | | - */ |
|---|
| 101 | | -BEGIN_FTR_SECTION |
|---|
| 102 | | - blr |
|---|
| 103 | | -END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
|---|
| 104 | | - |
|---|
| 105 | | -power9_save_additional_sprs: |
|---|
| 106 | | - mfspr r3, SPRN_PID |
|---|
| 107 | | - mfspr r4, SPRN_LDBAR |
|---|
| 108 | | - std r3, STOP_PID(r13) |
|---|
| 109 | | - std r4, STOP_LDBAR(r13) |
|---|
| 110 | | - |
|---|
| 111 | | - mfspr r3, SPRN_FSCR |
|---|
| 112 | | - mfspr r4, SPRN_HFSCR |
|---|
| 113 | | - std r3, STOP_FSCR(r13) |
|---|
| 114 | | - std r4, STOP_HFSCR(r13) |
|---|
| 115 | | - |
|---|
| 116 | | - mfspr r3, SPRN_MMCRA |
|---|
| 117 | | - mfspr r4, SPRN_MMCR0 |
|---|
| 118 | | - std r3, STOP_MMCRA(r13) |
|---|
| 119 | | - std r4, _MMCR0(r1) |
|---|
| 120 | | - |
|---|
| 121 | | - mfspr r3, SPRN_MMCR1 |
|---|
| 122 | | - mfspr r4, SPRN_MMCR2 |
|---|
| 123 | | - std r3, STOP_MMCR1(r13) |
|---|
| 124 | | - std r4, STOP_MMCR2(r13) |
|---|
| 125 | | - blr |
|---|
| 126 | | - |
|---|
| 127 | | -power9_restore_additional_sprs: |
|---|
| 128 | | - ld r3,_LPCR(r1) |
|---|
| 129 | | - ld r4, STOP_PID(r13) |
|---|
| 130 | | - mtspr SPRN_LPCR,r3 |
|---|
| 131 | | - mtspr SPRN_PID, r4 |
|---|
| 132 | | - |
|---|
| 133 | | - ld r3, STOP_LDBAR(r13) |
|---|
| 134 | | - ld r4, STOP_FSCR(r13) |
|---|
| 135 | | - mtspr SPRN_LDBAR, r3 |
|---|
| 136 | | - mtspr SPRN_FSCR, r4 |
|---|
| 137 | | - |
|---|
| 138 | | - ld r3, STOP_HFSCR(r13) |
|---|
| 139 | | - ld r4, STOP_MMCRA(r13) |
|---|
| 140 | | - mtspr SPRN_HFSCR, r3 |
|---|
| 141 | | - mtspr SPRN_MMCRA, r4 |
|---|
| 142 | | - |
|---|
| 143 | | - ld r3, _MMCR0(r1) |
|---|
| 144 | | - ld r4, STOP_MMCR1(r13) |
|---|
| 145 | | - mtspr SPRN_MMCR0, r3 |
|---|
| 146 | | - mtspr SPRN_MMCR1, r4 |
|---|
| 147 | | - |
|---|
| 148 | | - ld r3, STOP_MMCR2(r13) |
|---|
| 149 | | - ld r4, PACA_SPRG_VDSO(r13) |
|---|
| 150 | | - mtspr SPRN_MMCR2, r3 |
|---|
| 151 | | - mtspr SPRN_SPRG3, r4 |
|---|
| 152 | | - blr |
|---|
| 153 | | - |
|---|
| 154 | | -/* |
|---|
| 155 | | - * Used by threads when the lock bit of core_idle_state is set. |
|---|
| 156 | | - * Threads will spin in HMT_LOW until the lock bit is cleared. |
|---|
| 157 | | - * r14 - pointer to core_idle_state |
|---|
| 158 | | - * r15 - used to load contents of core_idle_state |
|---|
| 159 | | - * r9 - used as a temporary variable |
|---|
| 160 | | - */ |
|---|
| 161 | | - |
|---|
| 162 | | -core_idle_lock_held: |
|---|
| 163 | | - HMT_LOW |
|---|
| 164 | | -3: lwz r15,0(r14) |
|---|
| 165 | | - andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 166 | | - bne 3b |
|---|
| 167 | | - HMT_MEDIUM |
|---|
| 168 | | - lwarx r15,0,r14 |
|---|
| 169 | | - andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 170 | | - bne- core_idle_lock_held |
|---|
| 171 | | - blr |
|---|
| 172 | | - |
|---|
| 173 | | -/* Reuse some unused pt_regs slots for AMR/IAMR/UAMOR/UAMOR */ |
|---|
| 174 | | -#define PNV_POWERSAVE_AMR _TRAP |
|---|
| 175 | | -#define PNV_POWERSAVE_IAMR _DAR |
|---|
| 176 | | -#define PNV_POWERSAVE_UAMOR _DSISR |
|---|
| 177 | | -#define PNV_POWERSAVE_AMOR RESULT |
|---|
| 178 | | - |
|---|
| 179 | | -/* |
|---|
| 180 | | - * Pass requested state in r3: |
|---|
| 181 | | - * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8 |
|---|
| 182 | | - * - Requested PSSCR value in POWER9 |
|---|
| 24 | + * No state will be lost regardless of wakeup mechanism (interrupt or NIA). |
|---|
| 183 | 25 | * |
|---|
| 184 | | - * Address of idle handler to branch to in realmode in r4 |
|---|
| 26 | + * An EC=0 type wakeup will return with a value of 0. SRESET wakeup (which can |
|---|
| 27 | + * happen with xscom SRESET and possibly MCE) may clobber volatiles except LR, |
|---|
| 28 | + * and must blr, to return to caller with r3 set according to caller's expected |
|---|
| 29 | + * return code (for Book3S/64 that is SRR1). |
|---|
| 185 | 30 | */ |
|---|
| 186 | | -pnv_powersave_common: |
|---|
| 187 | | - /* Use r3 to pass state nap/sleep/winkle */ |
|---|
| 188 | | - /* NAP is a state loss, we create a regs frame on the |
|---|
| 189 | | - * stack, fill it up with the state we care about and |
|---|
| 190 | | - * stick a pointer to it in PACAR1. We really only |
|---|
| 191 | | - * need to save PC, some CR bits and the NV GPRs, |
|---|
| 192 | | - * but for now an interrupt frame will do. |
|---|
| 193 | | - */ |
|---|
| 194 | | - mtctr r4 |
|---|
| 31 | +_GLOBAL(isa300_idle_stop_noloss) |
|---|
| 32 | + mtspr SPRN_PSSCR,r3 |
|---|
| 33 | + PPC_STOP |
|---|
| 34 | + li r3,0 |
|---|
| 35 | + blr |
|---|
| 195 | 36 | |
|---|
| 196 | | - mflr r0 |
|---|
| 197 | | - std r0,16(r1) |
|---|
| 198 | | - stdu r1,-INT_FRAME_SIZE(r1) |
|---|
| 199 | | - std r0,_LINK(r1) |
|---|
| 200 | | - std r0,_NIP(r1) |
|---|
| 201 | | - |
|---|
| 202 | | - /* We haven't lost state ... yet */ |
|---|
| 203 | | - li r0,0 |
|---|
| 204 | | - stb r0,PACA_NAPSTATELOST(r13) |
|---|
| 205 | | - |
|---|
| 206 | | - /* Continue saving state */ |
|---|
| 207 | | - SAVE_GPR(2, r1) |
|---|
| 208 | | - SAVE_NVGPRS(r1) |
|---|
| 209 | | - |
|---|
| 210 | | -BEGIN_FTR_SECTION |
|---|
| 211 | | - mfspr r4, SPRN_AMR |
|---|
| 212 | | - mfspr r5, SPRN_IAMR |
|---|
| 213 | | - mfspr r6, SPRN_UAMOR |
|---|
| 214 | | - std r4, PNV_POWERSAVE_AMR(r1) |
|---|
| 215 | | - std r5, PNV_POWERSAVE_IAMR(r1) |
|---|
| 216 | | - std r6, PNV_POWERSAVE_UAMOR(r1) |
|---|
| 217 | | -BEGIN_FTR_SECTION_NESTED(42) |
|---|
| 218 | | - mfspr r7, SPRN_AMOR |
|---|
| 219 | | - std r7, PNV_POWERSAVE_AMOR(r1) |
|---|
| 220 | | -END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) |
|---|
| 221 | | -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
|---|
| 222 | | - |
|---|
| 223 | | - mfcr r5 |
|---|
| 224 | | - std r5,_CCR(r1) |
|---|
| 37 | +/* |
|---|
| 38 | + * Desired PSSCR in r3 |
|---|
| 39 | + * |
|---|
| 40 | + * GPRs may be lost, so they are saved here. Wakeup is by interrupt only. |
|---|
| 41 | + * The SRESET wakeup returns to this function's caller by calling |
|---|
| 42 | + * idle_return_gpr_loss with r3 set to desired return value. |
|---|
| 43 | + * |
|---|
| 44 | + * A wakeup without GPR loss may alteratively be handled as in |
|---|
| 45 | + * isa300_idle_stop_noloss and blr directly, as an optimisation. |
|---|
| 46 | + * |
|---|
| 47 | + * The caller is responsible for saving/restoring SPRs, MSR, timebase, |
|---|
| 48 | + * etc. |
|---|
| 49 | + */ |
|---|
| 50 | +_GLOBAL(isa300_idle_stop_mayloss) |
|---|
| 51 | + mtspr SPRN_PSSCR,r3 |
|---|
| 225 | 52 | std r1,PACAR1(r13) |
|---|
| 53 | + mflr r4 |
|---|
| 54 | + mfcr r5 |
|---|
| 55 | + /* |
|---|
| 56 | + * Use the stack red zone rather than a new frame for saving regs since |
|---|
| 57 | + * in the case of no GPR loss the wakeup code branches directly back to |
|---|
| 58 | + * the caller without deallocating the stack frame first. |
|---|
| 59 | + */ |
|---|
| 60 | + std r2,-8*1(r1) |
|---|
| 61 | + std r14,-8*2(r1) |
|---|
| 62 | + std r15,-8*3(r1) |
|---|
| 63 | + std r16,-8*4(r1) |
|---|
| 64 | + std r17,-8*5(r1) |
|---|
| 65 | + std r18,-8*6(r1) |
|---|
| 66 | + std r19,-8*7(r1) |
|---|
| 67 | + std r20,-8*8(r1) |
|---|
| 68 | + std r21,-8*9(r1) |
|---|
| 69 | + std r22,-8*10(r1) |
|---|
| 70 | + std r23,-8*11(r1) |
|---|
| 71 | + std r24,-8*12(r1) |
|---|
| 72 | + std r25,-8*13(r1) |
|---|
| 73 | + std r26,-8*14(r1) |
|---|
| 74 | + std r27,-8*15(r1) |
|---|
| 75 | + std r28,-8*16(r1) |
|---|
| 76 | + std r29,-8*17(r1) |
|---|
| 77 | + std r30,-8*18(r1) |
|---|
| 78 | + std r31,-8*19(r1) |
|---|
| 79 | + std r4,-8*20(r1) |
|---|
| 80 | + std r5,-8*21(r1) |
|---|
| 81 | + /* 168 bytes */ |
|---|
| 82 | + PPC_STOP |
|---|
| 83 | + b . /* catch bugs */ |
|---|
| 226 | 84 | |
|---|
| 227 | | -BEGIN_FTR_SECTION |
|---|
| 85 | +/* |
|---|
| 86 | + * Desired return value in r3 |
|---|
| 87 | + * |
|---|
| 88 | + * The idle wakeup SRESET interrupt can call this after calling |
|---|
| 89 | + * to return to the idle sleep function caller with r3 as the return code. |
|---|
| 90 | + * |
|---|
| 91 | + * This must not be used if idle was entered via a _noloss function (use |
|---|
| 92 | + * a simple blr instead). |
|---|
| 93 | + */ |
|---|
| 94 | +_GLOBAL(idle_return_gpr_loss) |
|---|
| 95 | + ld r1,PACAR1(r13) |
|---|
| 96 | + ld r4,-8*20(r1) |
|---|
| 97 | + ld r5,-8*21(r1) |
|---|
| 98 | + mtlr r4 |
|---|
| 99 | + mtcr r5 |
|---|
| 228 | 100 | /* |
|---|
| 229 | | - * POWER9 does not require real mode to stop, and presently does not |
|---|
| 230 | | - * set hwthread_state for KVM (threads don't share MMU context), so |
|---|
| 231 | | - * we can remain in virtual mode for this. |
|---|
| 101 | + * KVM nap requires r2 to be saved, rather than just restoring it |
|---|
| 102 | + * from PACATOC. This could be avoided for that less common case |
|---|
| 103 | + * if KVM saved its r2. |
|---|
| 232 | 104 | */ |
|---|
| 233 | | - bctr |
|---|
| 234 | | -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
|---|
| 235 | | - /* |
|---|
| 236 | | - * POWER8 |
|---|
| 237 | | - * Go to real mode to do the nap, as required by the architecture. |
|---|
| 238 | | - * Also, we need to be in real mode before setting hwthread_state, |
|---|
| 239 | | - * because as soon as we do that, another thread can switch |
|---|
| 240 | | - * the MMU context to the guest. |
|---|
| 241 | | - */ |
|---|
| 242 | | - LOAD_REG_IMMEDIATE(r7, MSR_IDLE) |
|---|
| 243 | | - mtmsrd r7,0 |
|---|
| 244 | | - bctr |
|---|
| 105 | + ld r2,-8*1(r1) |
|---|
| 106 | + ld r14,-8*2(r1) |
|---|
| 107 | + ld r15,-8*3(r1) |
|---|
| 108 | + ld r16,-8*4(r1) |
|---|
| 109 | + ld r17,-8*5(r1) |
|---|
| 110 | + ld r18,-8*6(r1) |
|---|
| 111 | + ld r19,-8*7(r1) |
|---|
| 112 | + ld r20,-8*8(r1) |
|---|
| 113 | + ld r21,-8*9(r1) |
|---|
| 114 | + ld r22,-8*10(r1) |
|---|
| 115 | + ld r23,-8*11(r1) |
|---|
| 116 | + ld r24,-8*12(r1) |
|---|
| 117 | + ld r25,-8*13(r1) |
|---|
| 118 | + ld r26,-8*14(r1) |
|---|
| 119 | + ld r27,-8*15(r1) |
|---|
| 120 | + ld r28,-8*16(r1) |
|---|
| 121 | + ld r29,-8*17(r1) |
|---|
| 122 | + ld r30,-8*18(r1) |
|---|
| 123 | + ld r31,-8*19(r1) |
|---|
| 124 | + blr |
|---|
| 245 | 125 | |
|---|
| 246 | 126 | /* |
|---|
| 247 | 127 | * This is the sequence required to execute idle instructions, as |
|---|
| 248 | 128 | * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0. |
|---|
| 129 | + * We have to store a GPR somewhere, ptesync, then reload it, and create |
|---|
| 130 | + * a false dependency on the result of the load. It doesn't matter which |
|---|
| 131 | + * GPR we store, or where we store it. We have already stored r2 to the |
|---|
| 132 | + * stack at -8(r1) in isa206_idle_insn_mayloss, so use that. |
|---|
| 249 | 133 | */ |
|---|
| 250 | 134 | #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ |
|---|
| 251 | 135 | /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ |
|---|
| 252 | | - std r0,0(r1); \ |
|---|
| 136 | + std r2,-8(r1); \ |
|---|
| 253 | 137 | ptesync; \ |
|---|
| 254 | | - ld r0,0(r1); \ |
|---|
| 255 | | -236: cmpd cr0,r0,r0; \ |
|---|
| 138 | + ld r2,-8(r1); \ |
|---|
| 139 | +236: cmpd cr0,r2,r2; \ |
|---|
| 256 | 140 | bne 236b; \ |
|---|
| 257 | | - IDLE_INST; |
|---|
| 258 | | - |
|---|
| 259 | | - |
|---|
| 260 | | - .globl pnv_enter_arch207_idle_mode |
|---|
| 261 | | -pnv_enter_arch207_idle_mode: |
|---|
| 262 | | -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
|---|
| 263 | | - /* Tell KVM we're entering idle */ |
|---|
| 264 | | - li r4,KVM_HWTHREAD_IN_IDLE |
|---|
| 265 | | - /******************************************************/ |
|---|
| 266 | | - /* N O T E W E L L ! ! ! N O T E W E L L */ |
|---|
| 267 | | - /* The following store to HSTATE_HWTHREAD_STATE(r13) */ |
|---|
| 268 | | - /* MUST occur in real mode, i.e. with the MMU off, */ |
|---|
| 269 | | - /* and the MMU must stay off until we clear this flag */ |
|---|
| 270 | | - /* and test HSTATE_HWTHREAD_REQ(r13) in */ |
|---|
| 271 | | - /* pnv_powersave_wakeup in this file. */ |
|---|
| 272 | | - /* The reason is that another thread can switch the */ |
|---|
| 273 | | - /* MMU to a guest context whenever this flag is set */ |
|---|
| 274 | | - /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ |
|---|
| 275 | | - /* that would potentially cause this thread to start */ |
|---|
| 276 | | - /* executing instructions from guest memory in */ |
|---|
| 277 | | - /* hypervisor mode, leading to a host crash or data */ |
|---|
| 278 | | - /* corruption, or worse. */ |
|---|
| 279 | | - /******************************************************/ |
|---|
| 280 | | - stb r4,HSTATE_HWTHREAD_STATE(r13) |
|---|
| 281 | | -#endif |
|---|
| 282 | | - stb r3,PACA_THREAD_IDLE_STATE(r13) |
|---|
| 283 | | - cmpwi cr3,r3,PNV_THREAD_SLEEP |
|---|
| 284 | | - bge cr3,2f |
|---|
| 285 | | - IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) |
|---|
| 286 | | - /* No return */ |
|---|
| 287 | | -2: |
|---|
| 288 | | - /* Sleep or winkle */ |
|---|
| 289 | | - lbz r7,PACA_THREAD_MASK(r13) |
|---|
| 290 | | - ld r14,PACA_CORE_IDLE_STATE_PTR(r13) |
|---|
| 291 | | - li r5,0 |
|---|
| 292 | | - beq cr3,3f |
|---|
| 293 | | - lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h |
|---|
| 294 | | -3: |
|---|
| 295 | | -lwarx_loop1: |
|---|
| 296 | | - lwarx r15,0,r14 |
|---|
| 297 | | - |
|---|
| 298 | | - andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 299 | | - bnel- core_idle_lock_held |
|---|
| 300 | | - |
|---|
| 301 | | - add r15,r15,r5 /* Add if winkle */ |
|---|
| 302 | | - andc r15,r15,r7 /* Clear thread bit */ |
|---|
| 303 | | - |
|---|
| 304 | | - andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS |
|---|
| 141 | + IDLE_INST; \ |
|---|
| 142 | + b . /* catch bugs */ |
|---|
| 305 | 143 | |
|---|
| 306 | 144 | /* |
|---|
| 307 | | - * If cr0 = 0, then current thread is the last thread of the core entering |
|---|
| 308 | | - * sleep. Last thread needs to execute the hardware bug workaround code if |
|---|
| 309 | | - * required by the platform. |
|---|
| 310 | | - * Make the workaround call unconditionally here. The below branch call is |
|---|
| 311 | | - * patched out when the idle states are discovered if the platform does not |
|---|
| 312 | | - * require it. |
|---|
| 313 | | - */ |
|---|
| 314 | | -.global pnv_fastsleep_workaround_at_entry |
|---|
| 315 | | -pnv_fastsleep_workaround_at_entry: |
|---|
| 316 | | - beq fastsleep_workaround_at_entry |
|---|
| 317 | | - |
|---|
| 318 | | - stwcx. r15,0,r14 |
|---|
| 319 | | - bne- lwarx_loop1 |
|---|
| 320 | | - isync |
|---|
| 321 | | - |
|---|
| 322 | | -common_enter: /* common code for all the threads entering sleep or winkle */ |
|---|
| 323 | | - bgt cr3,enter_winkle |
|---|
| 324 | | - IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) |
|---|
| 325 | | - |
|---|
| 326 | | -fastsleep_workaround_at_entry: |
|---|
| 327 | | - oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 328 | | - stwcx. r15,0,r14 |
|---|
| 329 | | - bne- lwarx_loop1 |
|---|
| 330 | | - isync |
|---|
| 331 | | - |
|---|
| 332 | | - /* Fast sleep workaround */ |
|---|
| 333 | | - li r3,1 |
|---|
| 334 | | - li r4,1 |
|---|
| 335 | | - bl opal_config_cpu_idle_state |
|---|
| 336 | | - |
|---|
| 337 | | - /* Unlock */ |
|---|
| 338 | | - xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 339 | | - lwsync |
|---|
| 340 | | - stw r15,0(r14) |
|---|
| 341 | | - b common_enter |
|---|
| 342 | | - |
|---|
| 343 | | -enter_winkle: |
|---|
| 344 | | - bl save_sprs_to_stack |
|---|
| 345 | | - |
|---|
| 346 | | - IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) |
|---|
| 347 | | - |
|---|
| 348 | | -/* |
|---|
| 349 | | - * r3 - PSSCR value corresponding to the requested stop state. |
|---|
| 350 | | - */ |
|---|
| 351 | | -power_enter_stop: |
|---|
| 352 | | -/* |
|---|
| 353 | | - * Check if we are executing the lite variant with ESL=EC=0 |
|---|
| 354 | | - */ |
|---|
| 355 | | - andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED |
|---|
| 356 | | - clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ |
|---|
| 357 | | - bne .Lhandle_esl_ec_set |
|---|
| 358 | | - PPC_STOP |
|---|
| 359 | | - li r3,0 /* Since we didn't lose state, return 0 */ |
|---|
| 360 | | - std r3, PACA_REQ_PSSCR(r13) |
|---|
| 361 | | - |
|---|
| 362 | | - /* |
|---|
| 363 | | - * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so |
|---|
| 364 | | - * it can determine if the wakeup reason is an HMI in |
|---|
| 365 | | - * CHECK_HMI_INTERRUPT. |
|---|
| 366 | | - * |
|---|
| 367 | | - * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup |
|---|
| 368 | | - * reason, so there is no point setting r12 to SRR1. |
|---|
| 369 | | - * |
|---|
| 370 | | - * Further, we clear r12 here, so that we don't accidentally enter the |
|---|
| 371 | | - * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI. |
|---|
| 372 | | - */ |
|---|
| 373 | | - li r12, 0 |
|---|
| 374 | | - b pnv_wakeup_noloss |
|---|
| 375 | | - |
|---|
| 376 | | -.Lhandle_esl_ec_set: |
|---|
| 377 | | -BEGIN_FTR_SECTION |
|---|
| 378 | | - /* |
|---|
| 379 | | - * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after |
|---|
| 380 | | - * a state-loss idle. Saving and restoring MMCR0 over idle is a |
|---|
| 381 | | - * workaround. |
|---|
| 382 | | - */ |
|---|
| 383 | | - mfspr r4,SPRN_MMCR0 |
|---|
| 384 | | - std r4,_MMCR0(r1) |
|---|
| 385 | | -END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1) |
|---|
| 386 | | - |
|---|
| 387 | | -/* |
|---|
| 388 | | - * Check if the requested state is a deep idle state. |
|---|
| 389 | | - */ |
|---|
| 390 | | - LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
|---|
| 391 | | - ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) |
|---|
| 392 | | - cmpd r3,r4 |
|---|
| 393 | | - bge .Lhandle_deep_stop |
|---|
| 394 | | - PPC_STOP /* Does not return (system reset interrupt) */ |
|---|
| 395 | | - |
|---|
| 396 | | -.Lhandle_deep_stop: |
|---|
| 397 | | -/* |
|---|
| 398 | | - * Entering deep idle state. |
|---|
| 399 | | - * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to |
|---|
| 400 | | - * stack and enter stop |
|---|
| 401 | | - */ |
|---|
| 402 | | - lbz r7,PACA_THREAD_MASK(r13) |
|---|
| 403 | | - ld r14,PACA_CORE_IDLE_STATE_PTR(r13) |
|---|
| 404 | | - |
|---|
| 405 | | -lwarx_loop_stop: |
|---|
| 406 | | - lwarx r15,0,r14 |
|---|
| 407 | | - andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 408 | | - bnel- core_idle_lock_held |
|---|
| 409 | | - andc r15,r15,r7 /* Clear thread bit */ |
|---|
| 410 | | - |
|---|
| 411 | | - stwcx. r15,0,r14 |
|---|
| 412 | | - bne- lwarx_loop_stop |
|---|
| 413 | | - isync |
|---|
| 414 | | - |
|---|
| 415 | | - bl save_sprs_to_stack |
|---|
| 416 | | - |
|---|
| 417 | | - PPC_STOP /* Does not return (system reset interrupt) */ |
|---|
| 418 | | - |
|---|
| 419 | | -/* |
|---|
| 420 | | - * Entered with MSR[EE]=0 and no soft-masked interrupts pending. |
|---|
| 421 | | - * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE). |
|---|
| 422 | | - */ |
|---|
| 423 | | -_GLOBAL(power7_idle_insn) |
|---|
| 424 | | - /* Now check if user or arch enabled NAP mode */ |
|---|
| 425 | | - LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode) |
|---|
| 426 | | - b pnv_powersave_common |
|---|
| 427 | | - |
|---|
| 428 | | -#define CHECK_HMI_INTERRUPT \ |
|---|
| 429 | | -BEGIN_FTR_SECTION_NESTED(66); \ |
|---|
| 430 | | - rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \ |
|---|
| 431 | | -FTR_SECTION_ELSE_NESTED(66); \ |
|---|
| 432 | | - rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \ |
|---|
| 433 | | -ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \ |
|---|
| 434 | | - cmpwi r0,0xa; /* Hypervisor maintenance ? */ \ |
|---|
| 435 | | - bne+ 20f; \ |
|---|
| 436 | | - /* Invoke opal call to handle hmi */ \ |
|---|
| 437 | | - ld r2,PACATOC(r13); \ |
|---|
| 438 | | - ld r1,PACAR1(r13); \ |
|---|
| 439 | | - std r3,ORIG_GPR3(r1); /* Save original r3 */ \ |
|---|
| 440 | | - li r3,0; /* NULL argument */ \ |
|---|
| 441 | | - bl hmi_exception_realmode; \ |
|---|
| 442 | | - nop; \ |
|---|
| 443 | | - ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \ |
|---|
| 444 | | -20: nop; |
|---|
| 445 | | - |
|---|
| 446 | | -/* |
|---|
| 447 | | - * Entered with MSR[EE]=0 and no soft-masked interrupts pending. |
|---|
| 448 | | - * r3 contains desired PSSCR register value. |
|---|
| 145 | + * Desired instruction type in r3 |
|---|
| 449 | 146 | * |
|---|
| 450 | | - * Offline (CPU unplug) case also must notify KVM that the CPU is |
|---|
| 451 | | - * idle. |
|---|
| 147 | + * GPRs may be lost, so they are saved here. Wakeup is by interrupt only. |
|---|
| 148 | + * The SRESET wakeup returns to this function's caller by calling |
|---|
| 149 | + * idle_return_gpr_loss with r3 set to desired return value. |
|---|
| 150 | + * |
|---|
| 151 | + * A wakeup without GPR loss may alteratively be handled as in |
|---|
| 152 | + * isa300_idle_stop_noloss and blr directly, as an optimisation. |
|---|
| 153 | + * |
|---|
| 154 | + * The caller is responsible for saving/restoring SPRs, MSR, timebase, |
|---|
| 155 | + * etc. |
|---|
| 156 | + * |
|---|
| 157 | + * This must be called in real-mode (MSR_IDLE). |
|---|
| 452 | 158 | */ |
|---|
| 453 | | -_GLOBAL(power9_offline_stop) |
|---|
| 454 | | -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
|---|
| 159 | +_GLOBAL(isa206_idle_insn_mayloss) |
|---|
| 160 | + std r1,PACAR1(r13) |
|---|
| 161 | + mflr r4 |
|---|
| 162 | + mfcr r5 |
|---|
| 455 | 163 | /* |
|---|
| 456 | | - * Tell KVM we're entering idle. |
|---|
| 457 | | - * This does not have to be done in real mode because the P9 MMU |
|---|
| 458 | | - * is independent per-thread. Some steppings share radix/hash mode |
|---|
| 459 | | - * between threads, but in that case KVM has a barrier sync in real |
|---|
| 460 | | - * mode before and after switching between radix and hash. |
|---|
| 164 | + * Use the stack red zone rather than a new frame for saving regs since |
|---|
| 165 | + * in the case of no GPR loss the wakeup code branches directly back to |
|---|
| 166 | + * the caller without deallocating the stack frame first. |
|---|
| 461 | 167 | */ |
|---|
| 462 | | - li r4,KVM_HWTHREAD_IN_IDLE |
|---|
| 463 | | - stb r4,HSTATE_HWTHREAD_STATE(r13) |
|---|
| 464 | | -#endif |
|---|
| 465 | | - /* fall through */ |
|---|
| 466 | | - |
|---|
| 467 | | -_GLOBAL(power9_idle_stop) |
|---|
| 468 | | - std r3, PACA_REQ_PSSCR(r13) |
|---|
| 469 | | -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
|---|
| 470 | | -BEGIN_FTR_SECTION |
|---|
| 471 | | - sync |
|---|
| 472 | | - lwz r5, PACA_DONT_STOP(r13) |
|---|
| 473 | | - cmpwi r5, 0 |
|---|
| 168 | + std r2,-8*1(r1) |
|---|
| 169 | + std r14,-8*2(r1) |
|---|
| 170 | + std r15,-8*3(r1) |
|---|
| 171 | + std r16,-8*4(r1) |
|---|
| 172 | + std r17,-8*5(r1) |
|---|
| 173 | + std r18,-8*6(r1) |
|---|
| 174 | + std r19,-8*7(r1) |
|---|
| 175 | + std r20,-8*8(r1) |
|---|
| 176 | + std r21,-8*9(r1) |
|---|
| 177 | + std r22,-8*10(r1) |
|---|
| 178 | + std r23,-8*11(r1) |
|---|
| 179 | + std r24,-8*12(r1) |
|---|
| 180 | + std r25,-8*13(r1) |
|---|
| 181 | + std r26,-8*14(r1) |
|---|
| 182 | + std r27,-8*15(r1) |
|---|
| 183 | + std r28,-8*16(r1) |
|---|
| 184 | + std r29,-8*17(r1) |
|---|
| 185 | + std r30,-8*18(r1) |
|---|
| 186 | + std r31,-8*19(r1) |
|---|
| 187 | + std r4,-8*20(r1) |
|---|
| 188 | + std r5,-8*21(r1) |
|---|
| 189 | + cmpwi r3,PNV_THREAD_NAP |
|---|
| 474 | 190 | bne 1f |
|---|
| 475 | | -END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
|---|
| 476 | | -#endif |
|---|
| 477 | | - mtspr SPRN_PSSCR,r3 |
|---|
| 478 | | - LOAD_REG_ADDR(r4,power_enter_stop) |
|---|
| 479 | | - b pnv_powersave_common |
|---|
| 480 | | - /* No return */ |
|---|
| 481 | | -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
|---|
| 482 | | -1: |
|---|
| 483 | | - /* |
|---|
| 484 | | - * We get here when TM / thread reconfiguration bug workaround |
|---|
| 485 | | - * code wants to get the CPU into SMT4 mode, and therefore |
|---|
| 486 | | - * we are being asked not to stop. |
|---|
| 487 | | - */ |
|---|
| 488 | | - li r3, 0 |
|---|
| 489 | | - std r3, PACA_REQ_PSSCR(r13) |
|---|
| 490 | | - blr /* return 0 for wakeup cause / SRR1 value */ |
|---|
| 491 | | -#endif |
|---|
| 492 | | - |
|---|
| 493 | | -/* |
|---|
| 494 | | - * Called from machine check handler for powersave wakeups. |
|---|
| 495 | | - * Low level machine check processing has already been done. Now just |
|---|
| 496 | | - * go through the wake up path to get everything in order. |
|---|
| 497 | | - * |
|---|
| 498 | | - * r3 - The original SRR1 value. |
|---|
| 499 | | - * Original SRR[01] have been clobbered. |
|---|
| 500 | | - * MSR_RI is clear. |
|---|
| 501 | | - */ |
|---|
| 502 | | -.global pnv_powersave_wakeup_mce |
|---|
| 503 | | -pnv_powersave_wakeup_mce: |
|---|
| 504 | | - /* Set cr3 for pnv_powersave_wakeup */ |
|---|
| 505 | | - rlwinm r11,r3,47-31,30,31 |
|---|
| 506 | | - cmpwi cr3,r11,2 |
|---|
| 507 | | - |
|---|
| 508 | | - /* |
|---|
| 509 | | - * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake |
|---|
| 510 | | - * reason into r12, which allows reuse of the system reset wakeup |
|---|
| 511 | | - * code without being mistaken for another type of wakeup. |
|---|
| 512 | | - */ |
|---|
| 513 | | - oris r12,r3,SRR1_WAKEMCE_RESVD@h |
|---|
| 514 | | - |
|---|
| 515 | | - b pnv_powersave_wakeup |
|---|
| 516 | | - |
|---|
| 517 | | -/* |
|---|
| 518 | | - * Called from reset vector for powersave wakeups. |
|---|
| 519 | | - * cr3 - set to gt if waking up with partial/complete hypervisor state loss |
|---|
| 520 | | - * r12 - SRR1 |
|---|
| 521 | | - */ |
|---|
| 522 | | -.global pnv_powersave_wakeup |
|---|
| 523 | | -pnv_powersave_wakeup: |
|---|
| 524 | | - ld r2, PACATOC(r13) |
|---|
| 525 | | - |
|---|
| 526 | | -BEGIN_FTR_SECTION |
|---|
| 527 | | - bl pnv_restore_hyp_resource_arch300 |
|---|
| 528 | | -FTR_SECTION_ELSE |
|---|
| 529 | | - bl pnv_restore_hyp_resource_arch207 |
|---|
| 530 | | -ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) |
|---|
| 531 | | - |
|---|
| 532 | | - li r0,PNV_THREAD_RUNNING |
|---|
| 533 | | - stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */ |
|---|
| 534 | | - |
|---|
| 535 | | - mr r3,r12 |
|---|
| 536 | | - |
|---|
| 537 | | -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
|---|
| 538 | | - lbz r0,HSTATE_HWTHREAD_STATE(r13) |
|---|
| 539 | | - cmpwi r0,KVM_HWTHREAD_IN_KERNEL |
|---|
| 540 | | - beq 0f |
|---|
| 541 | | - li r0,KVM_HWTHREAD_IN_KERNEL |
|---|
| 542 | | - stb r0,HSTATE_HWTHREAD_STATE(r13) |
|---|
| 543 | | - /* Order setting hwthread_state vs. testing hwthread_req */ |
|---|
| 544 | | - sync |
|---|
| 545 | | -0: lbz r0,HSTATE_HWTHREAD_REQ(r13) |
|---|
| 546 | | - cmpwi r0,0 |
|---|
| 547 | | - beq 1f |
|---|
| 548 | | - b kvm_start_guest |
|---|
| 549 | | -1: |
|---|
| 550 | | -#endif |
|---|
| 551 | | - |
|---|
| 552 | | - /* Return SRR1 from power7_nap() */ |
|---|
| 553 | | - blt cr3,pnv_wakeup_noloss |
|---|
| 554 | | - b pnv_wakeup_loss |
|---|
| 555 | | - |
|---|
| 556 | | -/* |
|---|
| 557 | | - * Check whether we have woken up with hypervisor state loss. |
|---|
| 558 | | - * If yes, restore hypervisor state and return back to link. |
|---|
| 559 | | - * |
|---|
| 560 | | - * cr3 - set to gt if waking up with partial/complete hypervisor state loss |
|---|
| 561 | | - */ |
|---|
| 562 | | -pnv_restore_hyp_resource_arch300: |
|---|
| 563 | | - /* |
|---|
| 564 | | - * Workaround for POWER9, if we lost resources, the ERAT |
|---|
| 565 | | - * might have been mixed up and needs flushing. We also need |
|---|
| 566 | | - * to reload MMCR0 (see comment above). We also need to set |
|---|
| 567 | | - * then clear bit 60 in MMCRA to ensure the PMU starts running. |
|---|
| 568 | | - */ |
|---|
| 569 | | - blt cr3,1f |
|---|
| 570 | | -BEGIN_FTR_SECTION |
|---|
| 571 | | - PPC_INVALIDATE_ERAT |
|---|
| 572 | | - ld r1,PACAR1(r13) |
|---|
| 573 | | - ld r4,_MMCR0(r1) |
|---|
| 574 | | - mtspr SPRN_MMCR0,r4 |
|---|
| 575 | | -END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1) |
|---|
| 576 | | - mfspr r4,SPRN_MMCRA |
|---|
| 577 | | - ori r4,r4,(1 << (63-60)) |
|---|
| 578 | | - mtspr SPRN_MMCRA,r4 |
|---|
| 579 | | - xori r4,r4,(1 << (63-60)) |
|---|
| 580 | | - mtspr SPRN_MMCRA,r4 |
|---|
| 581 | | -1: |
|---|
| 582 | | - /* |
|---|
| 583 | | - * POWER ISA 3. Use PSSCR to determine if we |
|---|
| 584 | | - * are waking up from deep idle state |
|---|
| 585 | | - */ |
|---|
| 586 | | - LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) |
|---|
| 587 | | - ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) |
|---|
| 588 | | - |
|---|
| 589 | | - /* |
|---|
| 590 | | - * 0-3 bits correspond to Power-Saving Level Status |
|---|
| 591 | | - * which indicates the idle state we are waking up from |
|---|
| 592 | | - */ |
|---|
| 593 | | - mfspr r5, SPRN_PSSCR |
|---|
| 594 | | - rldicl r5,r5,4,60 |
|---|
| 595 | | - li r0, 0 /* clear requested_psscr to say we're awake */ |
|---|
| 596 | | - std r0, PACA_REQ_PSSCR(r13) |
|---|
| 597 | | - cmpd cr4,r5,r4 |
|---|
| 598 | | - bge cr4,pnv_wakeup_tb_loss /* returns to caller */ |
|---|
| 599 | | - |
|---|
| 600 | | - blr /* Waking up without hypervisor state loss. */ |
|---|
| 601 | | - |
|---|
| 602 | | -/* Same calling convention as arch300 */ |
|---|
| 603 | | -pnv_restore_hyp_resource_arch207: |
|---|
| 604 | | - /* |
|---|
| 605 | | - * POWER ISA 2.07 or less. |
|---|
| 606 | | - * Check if we slept with sleep or winkle. |
|---|
| 607 | | - */ |
|---|
| 608 | | - lbz r4,PACA_THREAD_IDLE_STATE(r13) |
|---|
| 609 | | - cmpwi cr2,r4,PNV_THREAD_NAP |
|---|
| 610 | | - bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */ |
|---|
| 611 | | - |
|---|
| 612 | | - /* |
|---|
| 613 | | - * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking |
|---|
| 614 | | - * up from nap. At this stage CR3 shouldn't contains 'gt' since that |
|---|
| 615 | | - * indicates we are waking with hypervisor state loss from nap. |
|---|
| 616 | | - */ |
|---|
| 617 | | - bgt cr3,. |
|---|
| 618 | | - |
|---|
| 619 | | - blr /* Waking up without hypervisor state loss */ |
|---|
| 620 | | - |
|---|
| 621 | | -/* |
|---|
| 622 | | - * Called if waking up from idle state which can cause either partial or |
|---|
| 623 | | - * complete hyp state loss. |
|---|
| 624 | | - * In POWER8, called if waking up from fastsleep or winkle |
|---|
| 625 | | - * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state |
|---|
| 626 | | - * |
|---|
| 627 | | - * r13 - PACA |
|---|
| 628 | | - * cr3 - gt if waking up with partial/complete hypervisor state loss |
|---|
| 629 | | - * |
|---|
| 630 | | - * If ISA300: |
|---|
| 631 | | - * cr4 - gt or eq if waking up from complete hypervisor state loss. |
|---|
| 632 | | - * |
|---|
| 633 | | - * If ISA207: |
|---|
| 634 | | - * r4 - PACA_THREAD_IDLE_STATE |
|---|
| 635 | | - */ |
|---|
| 636 | | -pnv_wakeup_tb_loss: |
|---|
| 637 | | - ld r1,PACAR1(r13) |
|---|
| 638 | | - /* |
|---|
| 639 | | - * Before entering any idle state, the NVGPRs are saved in the stack. |
|---|
| 640 | | - * If there was a state loss, or PACA_NAPSTATELOST was set, then the |
|---|
| 641 | | - * NVGPRs are restored. If we are here, it is likely that state is lost, |
|---|
| 642 | | - * but not guaranteed -- neither ISA207 nor ISA300 tests to reach |
|---|
| 643 | | - * here are the same as the test to restore NVGPRS: |
|---|
| 644 | | - * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, |
|---|
| 645 | | - * and SRR1 test for restoring NVGPRs. |
|---|
| 646 | | - * |
|---|
| 647 | | - * We are about to clobber NVGPRs now, so set NAPSTATELOST to |
|---|
| 648 | | - * guarantee they will always be restored. This might be tightened |
|---|
| 649 | | - * with careful reading of specs (particularly for ISA300) but this |
|---|
| 650 | | - * is already a slow wakeup path and it's simpler to be safe. |
|---|
| 651 | | - */ |
|---|
| 652 | | - li r0,1 |
|---|
| 653 | | - stb r0,PACA_NAPSTATELOST(r13) |
|---|
| 654 | | - |
|---|
| 655 | | - /* |
|---|
| 656 | | - * |
|---|
| 657 | | - * Save SRR1 and LR in NVGPRs as they might be clobbered in |
|---|
| 658 | | - * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required |
|---|
| 659 | | - * to determine the wakeup reason if we branch to kvm_start_guest. LR |
|---|
| 660 | | - * is required to return back to reset vector after hypervisor state |
|---|
| 661 | | - * restore is complete. |
|---|
| 662 | | - */ |
|---|
| 663 | | - mr r19,r12 |
|---|
| 664 | | - mr r18,r4 |
|---|
| 665 | | - mflr r17 |
|---|
| 666 | | -BEGIN_FTR_SECTION |
|---|
| 667 | | - CHECK_HMI_INTERRUPT |
|---|
| 668 | | -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
|---|
| 669 | | - |
|---|
| 670 | | - ld r14,PACA_CORE_IDLE_STATE_PTR(r13) |
|---|
| 671 | | - lbz r7,PACA_THREAD_MASK(r13) |
|---|
| 672 | | - |
|---|
| 673 | | - /* |
|---|
| 674 | | - * Take the core lock to synchronize against other threads. |
|---|
| 675 | | - * |
|---|
| 676 | | - * Lock bit is set in one of the 2 cases- |
|---|
| 677 | | - * a. In the sleep/winkle enter path, the last thread is executing |
|---|
| 678 | | - * fastsleep workaround code. |
|---|
| 679 | | - * b. In the wake up path, another thread is executing fastsleep |
|---|
| 680 | | - * workaround undo code or resyncing timebase or restoring context |
|---|
| 681 | | - * In either case loop until the lock bit is cleared. |
|---|
| 682 | | - */ |
|---|
| 683 | | -1: |
|---|
| 684 | | - lwarx r15,0,r14 |
|---|
| 685 | | - andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 686 | | - bnel- core_idle_lock_held |
|---|
| 687 | | - oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 688 | | - stwcx. r15,0,r14 |
|---|
| 689 | | - bne- 1b |
|---|
| 690 | | - isync |
|---|
| 691 | | - |
|---|
| 692 | | - andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS |
|---|
| 693 | | - cmpwi cr2,r9,0 |
|---|
| 694 | | - |
|---|
| 695 | | - /* |
|---|
| 696 | | - * At this stage |
|---|
| 697 | | - * cr2 - eq if first thread to wakeup in core |
|---|
| 698 | | - * cr3- gt if waking up with partial/complete hypervisor state loss |
|---|
| 699 | | - * ISA300: |
|---|
| 700 | | - * cr4 - gt or eq if waking up from complete hypervisor state loss. |
|---|
| 701 | | - */ |
|---|
| 702 | | - |
|---|
| 703 | | -BEGIN_FTR_SECTION |
|---|
| 704 | | - /* |
|---|
| 705 | | - * Were we in winkle? |
|---|
| 706 | | - * If yes, check if all threads were in winkle, decrement our |
|---|
| 707 | | - * winkle count, set all thread winkle bits if all were in winkle. |
|---|
| 708 | | - * Check if our thread has a winkle bit set, and set cr4 accordingly |
|---|
| 709 | | - * (to match ISA300, above). Pseudo-code for core idle state |
|---|
| 710 | | - * transitions for ISA207 is as follows (everything happens atomically |
|---|
| 711 | | - * due to store conditional and/or lock bit): |
|---|
| 712 | | - * |
|---|
| 713 | | - * nap_idle() { } |
|---|
| 714 | | - * nap_wake() { } |
|---|
| 715 | | - * |
|---|
| 716 | | - * sleep_idle() |
|---|
| 717 | | - * { |
|---|
| 718 | | - * core_idle_state &= ~thread_in_core |
|---|
| 719 | | - * } |
|---|
| 720 | | - * |
|---|
| 721 | | - * sleep_wake() |
|---|
| 722 | | - * { |
|---|
| 723 | | - * bool first_in_core, first_in_subcore; |
|---|
| 724 | | - * |
|---|
| 725 | | - * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; |
|---|
| 726 | | - * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; |
|---|
| 727 | | - * |
|---|
| 728 | | - * core_idle_state |= thread_in_core; |
|---|
| 729 | | - * } |
|---|
| 730 | | - * |
|---|
| 731 | | - * winkle_idle() |
|---|
| 732 | | - * { |
|---|
| 733 | | - * core_idle_state &= ~thread_in_core; |
|---|
| 734 | | - * core_idle_state += 1 << WINKLE_COUNT_SHIFT; |
|---|
| 735 | | - * } |
|---|
| 736 | | - * |
|---|
| 737 | | - * winkle_wake() |
|---|
| 738 | | - * { |
|---|
| 739 | | - * bool first_in_core, first_in_subcore, winkle_state_lost; |
|---|
| 740 | | - * |
|---|
| 741 | | - * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0; |
|---|
| 742 | | - * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0; |
|---|
| 743 | | - * |
|---|
| 744 | | - * core_idle_state |= thread_in_core; |
|---|
| 745 | | - * |
|---|
| 746 | | - * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT)) |
|---|
| 747 | | - * core_idle_state |= THREAD_WINKLE_BITS; |
|---|
| 748 | | - * core_idle_state -= 1 << WINKLE_COUNT_SHIFT; |
|---|
| 749 | | - * |
|---|
| 750 | | - * winkle_state_lost = core_idle_state & |
|---|
| 751 | | - * (thread_in_core << WINKLE_THREAD_SHIFT); |
|---|
| 752 | | - * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT); |
|---|
| 753 | | - * } |
|---|
| 754 | | - * |
|---|
| 755 | | - */ |
|---|
| 756 | | - cmpwi r18,PNV_THREAD_WINKLE |
|---|
| 191 | + IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) |
|---|
| 192 | +1: cmpwi r3,PNV_THREAD_SLEEP |
|---|
| 757 | 193 | bne 2f |
|---|
| 758 | | - andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h |
|---|
| 759 | | - subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h |
|---|
| 760 | | - beq 2f |
|---|
| 761 | | - ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */ |
|---|
| 762 | | -2: |
|---|
| 763 | | - /* Shift thread bit to winkle mask, then test if this thread is set, |
|---|
| 764 | | - * and remove it from the winkle bits */ |
|---|
| 765 | | - slwi r8,r7,8 |
|---|
| 766 | | - and r8,r8,r15 |
|---|
| 767 | | - andc r15,r15,r8 |
|---|
| 768 | | - cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */ |
|---|
| 769 | | - |
|---|
| 770 | | - lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) |
|---|
| 771 | | - and r4,r4,r15 |
|---|
| 772 | | - cmpwi r4,0 /* Check if first in subcore */ |
|---|
| 773 | | - |
|---|
| 774 | | - or r15,r15,r7 /* Set thread bit */ |
|---|
| 775 | | - beq first_thread_in_subcore |
|---|
| 776 | | -END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
|---|
| 777 | | - |
|---|
| 778 | | - or r15,r15,r7 /* Set thread bit */ |
|---|
| 779 | | - beq cr2,first_thread_in_core |
|---|
| 780 | | - |
|---|
| 781 | | - /* Not first thread in core or subcore to wake up */ |
|---|
| 782 | | - b clear_lock |
|---|
| 783 | | - |
|---|
| 784 | | -first_thread_in_subcore: |
|---|
| 785 | | - /* |
|---|
| 786 | | - * If waking up from sleep, subcore state is not lost. Hence |
|---|
| 787 | | - * skip subcore state restore |
|---|
| 788 | | - */ |
|---|
| 789 | | - blt cr4,subcore_state_restored |
|---|
| 790 | | - |
|---|
| 791 | | - /* Restore per-subcore state */ |
|---|
| 792 | | - ld r4,_SDR1(r1) |
|---|
| 793 | | - mtspr SPRN_SDR1,r4 |
|---|
| 794 | | - |
|---|
| 795 | | - ld r4,_RPR(r1) |
|---|
| 796 | | - mtspr SPRN_RPR,r4 |
|---|
| 797 | | - ld r4,_AMOR(r1) |
|---|
| 798 | | - mtspr SPRN_AMOR,r4 |
|---|
| 799 | | - |
|---|
| 800 | | -subcore_state_restored: |
|---|
| 801 | | - /* |
|---|
| 802 | | - * Check if the thread is also the first thread in the core. If not, |
|---|
| 803 | | - * skip to clear_lock. |
|---|
| 804 | | - */ |
|---|
| 805 | | - bne cr2,clear_lock |
|---|
| 806 | | - |
|---|
| 807 | | -first_thread_in_core: |
|---|
| 808 | | - |
|---|
| 809 | | - /* |
|---|
| 810 | | - * First thread in the core waking up from any state which can cause |
|---|
| 811 | | - * partial or complete hypervisor state loss. It needs to |
|---|
| 812 | | - * call the fastsleep workaround code if the platform requires it. |
|---|
| 813 | | - * Call it unconditionally here. The below branch instruction will |
|---|
| 814 | | - * be patched out if the platform does not have fastsleep or does not |
|---|
| 815 | | - * require the workaround. Patching will be performed during the |
|---|
| 816 | | - * discovery of idle-states. |
|---|
| 817 | | - */ |
|---|
| 818 | | -.global pnv_fastsleep_workaround_at_exit |
|---|
| 819 | | -pnv_fastsleep_workaround_at_exit: |
|---|
| 820 | | - b fastsleep_workaround_at_exit |
|---|
| 821 | | - |
|---|
| 822 | | -timebase_resync: |
|---|
| 823 | | - /* |
|---|
| 824 | | - * Use cr3 which indicates that we are waking up with atleast partial |
|---|
| 825 | | - * hypervisor state loss to determine if TIMEBASE RESYNC is needed. |
|---|
| 826 | | - */ |
|---|
| 827 | | - ble cr3,.Ltb_resynced |
|---|
| 828 | | - /* Time base re-sync */ |
|---|
| 829 | | - bl opal_resync_timebase; |
|---|
| 830 | | - /* |
|---|
| 831 | | - * If waking up from sleep (POWER8), per core state |
|---|
| 832 | | - * is not lost, skip to clear_lock. |
|---|
| 833 | | - */ |
|---|
| 834 | | -.Ltb_resynced: |
|---|
| 835 | | - blt cr4,clear_lock |
|---|
| 836 | | - |
|---|
| 837 | | - /* |
|---|
| 838 | | - * First thread in the core to wake up and its waking up with |
|---|
| 839 | | - * complete hypervisor state loss. Restore per core hypervisor |
|---|
| 840 | | - * state. |
|---|
| 841 | | - */ |
|---|
| 842 | | -BEGIN_FTR_SECTION |
|---|
| 843 | | - ld r4,_PTCR(r1) |
|---|
| 844 | | - mtspr SPRN_PTCR,r4 |
|---|
| 845 | | - ld r4,_RPR(r1) |
|---|
| 846 | | - mtspr SPRN_RPR,r4 |
|---|
| 847 | | - ld r4,_AMOR(r1) |
|---|
| 848 | | - mtspr SPRN_AMOR,r4 |
|---|
| 849 | | -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
|---|
| 850 | | - |
|---|
| 851 | | - ld r4,_TSCR(r1) |
|---|
| 852 | | - mtspr SPRN_TSCR,r4 |
|---|
| 853 | | - ld r4,_WORC(r1) |
|---|
| 854 | | - mtspr SPRN_WORC,r4 |
|---|
| 855 | | - |
|---|
| 856 | | -clear_lock: |
|---|
| 857 | | - xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h |
|---|
| 858 | | - lwsync |
|---|
| 859 | | - stw r15,0(r14) |
|---|
| 860 | | - |
|---|
| 861 | | -common_exit: |
|---|
| 862 | | - /* |
|---|
| 863 | | - * Common to all threads. |
|---|
| 864 | | - * |
|---|
| 865 | | - * If waking up from sleep, hypervisor state is not lost. Hence |
|---|
| 866 | | - * skip hypervisor state restore. |
|---|
| 867 | | - */ |
|---|
| 868 | | - blt cr4,hypervisor_state_restored |
|---|
| 869 | | - |
|---|
| 870 | | - /* Waking up from winkle */ |
|---|
| 871 | | - |
|---|
| 872 | | -BEGIN_MMU_FTR_SECTION |
|---|
| 873 | | - b no_segments |
|---|
| 874 | | -END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
|---|
| 875 | | - /* Restore SLB from PACA */ |
|---|
| 876 | | - ld r8,PACA_SLBSHADOWPTR(r13) |
|---|
| 877 | | - |
|---|
| 878 | | - .rept SLB_NUM_BOLTED |
|---|
| 879 | | - li r3, SLBSHADOW_SAVEAREA |
|---|
| 880 | | - LDX_BE r5, r8, r3 |
|---|
| 881 | | - addi r3, r3, 8 |
|---|
| 882 | | - LDX_BE r6, r8, r3 |
|---|
| 883 | | - andis. r7,r5,SLB_ESID_V@h |
|---|
| 884 | | - beq 1f |
|---|
| 885 | | - slbmte r6,r5 |
|---|
| 886 | | -1: addi r8,r8,16 |
|---|
| 887 | | - .endr |
|---|
| 888 | | -no_segments: |
|---|
| 889 | | - |
|---|
| 890 | | - /* Restore per thread state */ |
|---|
| 891 | | - |
|---|
| 892 | | - ld r4,_SPURR(r1) |
|---|
| 893 | | - mtspr SPRN_SPURR,r4 |
|---|
| 894 | | - ld r4,_PURR(r1) |
|---|
| 895 | | - mtspr SPRN_PURR,r4 |
|---|
| 896 | | - ld r4,_DSCR(r1) |
|---|
| 897 | | - mtspr SPRN_DSCR,r4 |
|---|
| 898 | | - ld r4,_WORT(r1) |
|---|
| 899 | | - mtspr SPRN_WORT,r4 |
|---|
| 900 | | - |
|---|
| 901 | | - /* Call cur_cpu_spec->cpu_restore() */ |
|---|
| 902 | | - LOAD_REG_ADDR(r4, cur_cpu_spec) |
|---|
| 903 | | - ld r4,0(r4) |
|---|
| 904 | | - ld r12,CPU_SPEC_RESTORE(r4) |
|---|
| 905 | | -#ifdef PPC64_ELF_ABI_v1 |
|---|
| 906 | | - ld r12,0(r12) |
|---|
| 194 | + IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) |
|---|
| 195 | +2: IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) |
|---|
| 907 | 196 | #endif |
|---|
| 908 | | - mtctr r12 |
|---|
| 909 | | - bctrl |
|---|
| 910 | 197 | |
|---|
| 911 | | -/* |
|---|
| 912 | | - * On POWER9, we can come here on wakeup from a cpuidle stop state. |
|---|
| 913 | | - * Hence restore the additional SPRs to the saved value. |
|---|
| 914 | | - * |
|---|
| 915 | | - * On POWER8, we come here only on winkle. Since winkle is used |
|---|
| 916 | | - * only in the case of CPU-Hotplug, we don't need to restore |
|---|
| 917 | | - * the additional SPRs. |
|---|
| 918 | | - */ |
|---|
| 919 | | -BEGIN_FTR_SECTION |
|---|
| 920 | | - bl power9_restore_additional_sprs |
|---|
| 921 | | -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
|---|
| 922 | | -hypervisor_state_restored: |
|---|
| 923 | | - |
|---|
| 924 | | - mr r12,r19 |
|---|
| 925 | | - mtlr r17 |
|---|
| 926 | | - blr /* return to pnv_powersave_wakeup */ |
|---|
| 927 | | - |
|---|
| 928 | | -fastsleep_workaround_at_exit: |
|---|
| 929 | | - li r3,1 |
|---|
| 930 | | - li r4,0 |
|---|
| 931 | | - bl opal_config_cpu_idle_state |
|---|
| 932 | | - b timebase_resync |
|---|
| 933 | | - |
|---|
| 934 | | -/* |
|---|
| 935 | | - * R3 here contains the value that will be returned to the caller |
|---|
| 936 | | - * of power7_nap. |
|---|
| 937 | | - * R12 contains SRR1 for CHECK_HMI_INTERRUPT. |
|---|
| 938 | | - */ |
|---|
| 939 | | -.global pnv_wakeup_loss |
|---|
| 940 | | -pnv_wakeup_loss: |
|---|
| 941 | | - ld r1,PACAR1(r13) |
|---|
| 942 | | -BEGIN_FTR_SECTION |
|---|
| 943 | | - CHECK_HMI_INTERRUPT |
|---|
| 944 | | -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
|---|
| 945 | | - REST_NVGPRS(r1) |
|---|
| 946 | | - REST_GPR(2, r1) |
|---|
| 947 | | - |
|---|
| 948 | | -BEGIN_FTR_SECTION |
|---|
| 949 | | - /* These regs were saved in pnv_powersave_common() */ |
|---|
| 950 | | - ld r4, PNV_POWERSAVE_AMR(r1) |
|---|
| 951 | | - ld r5, PNV_POWERSAVE_IAMR(r1) |
|---|
| 952 | | - ld r6, PNV_POWERSAVE_UAMOR(r1) |
|---|
| 953 | | - mtspr SPRN_AMR, r4 |
|---|
| 954 | | - mtspr SPRN_IAMR, r5 |
|---|
| 955 | | - mtspr SPRN_UAMOR, r6 |
|---|
| 956 | | -BEGIN_FTR_SECTION_NESTED(42) |
|---|
| 957 | | - ld r7, PNV_POWERSAVE_AMOR(r1) |
|---|
| 958 | | - mtspr SPRN_AMOR, r7 |
|---|
| 959 | | -END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) |
|---|
| 198 | +#ifdef CONFIG_PPC_970_NAP |
|---|
| 199 | +_GLOBAL(power4_idle_nap) |
|---|
| 200 | + LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW) |
|---|
| 201 | + ld r9,PACA_THREAD_INFO(r13) |
|---|
| 202 | + ld r8,TI_LOCAL_FLAGS(r9) |
|---|
| 203 | + ori r8,r8,_TLF_NAPPING |
|---|
| 204 | + std r8,TI_LOCAL_FLAGS(r9) |
|---|
| 960 | 205 | /* |
|---|
| 961 | | - * We don't need an isync here after restoring IAMR because the upcoming |
|---|
| 962 | | - * mtmsrd is execution synchronizing. |
|---|
| 206 | + * NAPPING bit is set, from this point onward power4_fixup_nap |
|---|
| 207 | + * will cause exceptions to return to power4_idle_nap_return. |
|---|
| 963 | 208 | */ |
|---|
| 964 | | -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
|---|
| 965 | | - |
|---|
| 966 | | - ld r4,PACAKMSR(r13) |
|---|
| 967 | | - ld r5,_LINK(r1) |
|---|
| 968 | | - ld r6,_CCR(r1) |
|---|
| 969 | | - addi r1,r1,INT_FRAME_SIZE |
|---|
| 970 | | - mtlr r5 |
|---|
| 971 | | - mtcr r6 |
|---|
| 972 | | - mtmsrd r4 |
|---|
| 973 | | - blr |
|---|
| 974 | | - |
|---|
| 975 | | -/* |
|---|
| 976 | | - * R3 here contains the value that will be returned to the caller |
|---|
| 977 | | - * of power7_nap. |
|---|
| 978 | | - * R12 contains SRR1 for CHECK_HMI_INTERRUPT. |
|---|
| 979 | | - */ |
|---|
| 980 | | -pnv_wakeup_noloss: |
|---|
| 981 | | - lbz r0,PACA_NAPSTATELOST(r13) |
|---|
| 982 | | - cmpwi r0,0 |
|---|
| 983 | | - bne pnv_wakeup_loss |
|---|
| 984 | | - ld r1,PACAR1(r13) |
|---|
| 985 | | -BEGIN_FTR_SECTION |
|---|
| 986 | | - CHECK_HMI_INTERRUPT |
|---|
| 987 | | -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
|---|
| 988 | | - ld r4,PACAKMSR(r13) |
|---|
| 989 | | - ld r5,_NIP(r1) |
|---|
| 990 | | - ld r6,_CCR(r1) |
|---|
| 991 | | - addi r1,r1,INT_FRAME_SIZE |
|---|
| 992 | | - mtlr r5 |
|---|
| 993 | | - mtcr r6 |
|---|
| 994 | | - mtmsrd r4 |
|---|
| 995 | | - blr |
|---|
| 209 | +1: sync |
|---|
| 210 | + isync |
|---|
| 211 | + mtmsrd r7 |
|---|
| 212 | + isync |
|---|
| 213 | + b 1b |
|---|
| 214 | +#endif |
|---|