| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 3 | | - * it under the terms of the GNU General Public License, version 2, as |
|---|
| 4 | | - * published by the Free Software Foundation. |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 7 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 8 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 9 | | - * GNU General Public License for more details. |
|---|
| 10 | | - * |
|---|
| 11 | | - * You should have received a copy of the GNU General Public License |
|---|
| 12 | | - * along with this program; if not, write to the Free Software |
|---|
| 13 | | - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
|---|
| 14 | 3 | * |
|---|
| 15 | 4 | * Copyright SUSE Linux Products GmbH 2009 |
|---|
| 16 | 5 | * |
|---|
| .. | .. |
|---|
| 37 | 26 | #define FUNC(name) name |
|---|
| 38 | 27 | #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) |
|---|
| 39 | 28 | |
|---|
| 40 | | -#endif /* CONFIG_PPC_BOOK3S_XX */ |
|---|
| 29 | +#endif /* CONFIG_PPC_BOOK3S_64 */ |
|---|
| 41 | 30 | |
|---|
| 42 | 31 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
|---|
| 43 | 32 | PPC_LL r14, VCPU_GPR(R14)(vcpu); \ |
|---|
| .. | .. |
|---|
| 66 | 55 | ****************************************************************************/ |
|---|
| 67 | 56 | |
|---|
| 68 | 57 | /* Registers: |
|---|
| 69 | | - * r3: kvm_run pointer |
|---|
| 70 | | - * r4: vcpu pointer |
|---|
| 58 | + * r3: vcpu pointer |
|---|
| 71 | 59 | */ |
|---|
| 72 | 60 | _GLOBAL(__kvmppc_vcpu_run) |
|---|
| 73 | 61 | |
|---|
| .. | .. |
|---|
| 79 | 67 | /* Save host state to the stack */ |
|---|
| 80 | 68 | PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) |
|---|
| 81 | 69 | |
|---|
| 82 | | - /* Save r3 (kvm_run) and r4 (vcpu) */ |
|---|
| 83 | | - SAVE_2GPRS(3, r1) |
|---|
| 70 | + /* Save r3 (vcpu) */ |
|---|
| 71 | + SAVE_GPR(3, r1) |
|---|
| 84 | 72 | |
|---|
| 85 | 73 | /* Save non-volatile registers (r14 - r31) */ |
|---|
| 86 | 74 | SAVE_NVGPRS(r1) |
|---|
| .. | .. |
|---|
| 93 | 81 | PPC_STL r0, _LINK(r1) |
|---|
| 94 | 82 | |
|---|
| 95 | 83 | /* Load non-volatile guest state from the vcpu */ |
|---|
| 96 | | - VCPU_LOAD_NVGPRS(r4) |
|---|
| 84 | + VCPU_LOAD_NVGPRS(r3) |
|---|
| 97 | 85 | |
|---|
| 98 | 86 | kvm_start_lightweight: |
|---|
| 99 | 87 | /* Copy registers into shadow vcpu so we can access them in real mode */ |
|---|
| 100 | | - mr r3, r4 |
|---|
| 101 | 88 | bl FUNC(kvmppc_copy_to_svcpu) |
|---|
| 102 | 89 | nop |
|---|
| 103 | | - REST_GPR(4, r1) |
|---|
| 90 | + REST_GPR(3, r1) |
|---|
| 104 | 91 | |
|---|
| 105 | 92 | #ifdef CONFIG_PPC_BOOK3S_64 |
|---|
| 106 | 93 | /* Get the dcbz32 flag */ |
|---|
| 107 | | - PPC_LL r3, VCPU_HFLAGS(r4) |
|---|
| 108 | | - rldicl r3, r3, 0, 63 /* r3 &= 1 */ |
|---|
| 109 | | - stb r3, HSTATE_RESTORE_HID5(r13) |
|---|
| 94 | + PPC_LL r0, VCPU_HFLAGS(r3) |
|---|
| 95 | + rldicl r0, r0, 0, 63 /* r3 &= 1 */ |
|---|
| 96 | + stb r0, HSTATE_RESTORE_HID5(r13) |
|---|
| 110 | 97 | |
|---|
| 111 | 98 | /* Load up guest SPRG3 value, since it's user readable */ |
|---|
| 112 | | - lwz r3, VCPU_SHAREDBE(r4) |
|---|
| 113 | | - cmpwi r3, 0 |
|---|
| 114 | | - ld r5, VCPU_SHARED(r4) |
|---|
| 99 | + lbz r4, VCPU_SHAREDBE(r3) |
|---|
| 100 | + cmpwi r4, 0 |
|---|
| 101 | + ld r5, VCPU_SHARED(r3) |
|---|
| 115 | 102 | beq sprg3_little_endian |
|---|
| 116 | 103 | sprg3_big_endian: |
|---|
| 117 | 104 | #ifdef __BIG_ENDIAN__ |
|---|
| 118 | | - ld r3, VCPU_SHARED_SPRG3(r5) |
|---|
| 105 | + ld r4, VCPU_SHARED_SPRG3(r5) |
|---|
| 119 | 106 | #else |
|---|
| 120 | 107 | addi r5, r5, VCPU_SHARED_SPRG3 |
|---|
| 121 | | - ldbrx r3, 0, r5 |
|---|
| 108 | + ldbrx r4, 0, r5 |
|---|
| 122 | 109 | #endif |
|---|
| 123 | 110 | b after_sprg3_load |
|---|
| 124 | 111 | sprg3_little_endian: |
|---|
| 125 | 112 | #ifdef __LITTLE_ENDIAN__ |
|---|
| 126 | | - ld r3, VCPU_SHARED_SPRG3(r5) |
|---|
| 113 | + ld r4, VCPU_SHARED_SPRG3(r5) |
|---|
| 127 | 114 | #else |
|---|
| 128 | 115 | addi r5, r5, VCPU_SHARED_SPRG3 |
|---|
| 129 | | - ldbrx r3, 0, r5 |
|---|
| 116 | + ldbrx r4, 0, r5 |
|---|
| 130 | 117 | #endif |
|---|
| 131 | 118 | |
|---|
| 132 | 119 | after_sprg3_load: |
|---|
| 133 | | - mtspr SPRN_SPRG3, r3 |
|---|
| 120 | + mtspr SPRN_SPRG3, r4 |
|---|
| 134 | 121 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
|---|
| 135 | 122 | |
|---|
| 136 | | - PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */ |
|---|
| 123 | + PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */ |
|---|
| 137 | 124 | |
|---|
| 138 | 125 | /* Jump to segment patching handler and into our guest */ |
|---|
| 139 | 126 | bl FUNC(kvmppc_entry_trampoline) |
|---|
| .. | .. |
|---|
| 157 | 144 | * |
|---|
| 158 | 145 | */ |
|---|
| 159 | 146 | |
|---|
| 160 | | - PPC_LL r3, GPR4(r1) /* vcpu pointer */ |
|---|
| 147 | + PPC_LL r3, GPR3(r1) /* vcpu pointer */ |
|---|
| 161 | 148 | |
|---|
| 162 | 149 | /* |
|---|
| 163 | 150 | * kvmppc_copy_from_svcpu can clobber volatile registers, save |
|---|
| .. | .. |
|---|
| 180 | 167 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
|---|
| 181 | 168 | |
|---|
| 182 | 169 | /* R7 = vcpu */ |
|---|
| 183 | | - PPC_LL r7, GPR4(r1) |
|---|
| 170 | + PPC_LL r7, GPR3(r1) |
|---|
| 184 | 171 | |
|---|
| 185 | 172 | PPC_STL r14, VCPU_GPR(R14)(r7) |
|---|
| 186 | 173 | PPC_STL r15, VCPU_GPR(R15)(r7) |
|---|
| .. | .. |
|---|
| 201 | 188 | PPC_STL r30, VCPU_GPR(R30)(r7) |
|---|
| 202 | 189 | PPC_STL r31, VCPU_GPR(R31)(r7) |
|---|
| 203 | 190 | |
|---|
| 204 | | - /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
|---|
| 205 | | - lwz r5, VCPU_TRAP(r7) |
|---|
| 191 | + /* Pass the exit number as 2nd argument to kvmppc_handle_exit */ |
|---|
| 192 | + lwz r4, VCPU_TRAP(r7) |
|---|
| 206 | 193 | |
|---|
| 207 | | - /* Restore r3 (kvm_run) and r4 (vcpu) */ |
|---|
| 208 | | - REST_2GPRS(3, r1) |
|---|
| 194 | + /* Restore r3 (vcpu) */ |
|---|
| 195 | + REST_GPR(3, r1) |
|---|
| 209 | 196 | bl FUNC(kvmppc_handle_exit_pr) |
|---|
| 210 | 197 | |
|---|
| 211 | 198 | /* If RESUME_GUEST, get back in the loop */ |
|---|
| .. | .. |
|---|
| 234 | 221 | PPC_LL r4, _LINK(r1) |
|---|
| 235 | 222 | PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) |
|---|
| 236 | 223 | |
|---|
| 237 | | - /* Load vcpu and cpu_run */ |
|---|
| 238 | | - REST_2GPRS(3, r1) |
|---|
| 224 | + /* Load vcpu */ |
|---|
| 225 | + REST_GPR(3, r1) |
|---|
| 239 | 226 | |
|---|
| 240 | 227 | /* Load non-volatile guest state from the vcpu */ |
|---|
| 241 | | - VCPU_LOAD_NVGPRS(r4) |
|---|
| 228 | + VCPU_LOAD_NVGPRS(r3) |
|---|
| 242 | 229 | |
|---|
| 243 | 230 | /* Jump back into the beginning of this function */ |
|---|
| 244 | 231 | b kvm_start_lightweight |
|---|
| .. | .. |
|---|
| 246 | 233 | kvm_loop_lightweight: |
|---|
| 247 | 234 | |
|---|
| 248 | 235 | /* We'll need the vcpu pointer */ |
|---|
| 249 | | - REST_GPR(4, r1) |
|---|
| 236 | + REST_GPR(3, r1) |
|---|
| 250 | 237 | |
|---|
| 251 | 238 | /* Jump back into the beginning of this function */ |
|---|
| 252 | 239 | b kvm_start_lightweight |
|---|