| .. | .. |
|---|
| 67 | 67 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
|---|
| 68 | 68 | { |
|---|
| 69 | 69 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
|---|
| 70 | | - struct kvm_run *run = vcpu->run; |
|---|
| 71 | 70 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 72 | 71 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 73 | 72 | enum emulation_result er = EMULATE_DONE; |
|---|
| .. | .. |
|---|
| 81 | 80 | * Unusable/no FPU in guest: |
|---|
| 82 | 81 | * deliver guest COP1 Unusable Exception |
|---|
| 83 | 82 | */ |
|---|
| 84 | | - er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); |
|---|
| 83 | + er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu); |
|---|
| 85 | 84 | } else { |
|---|
| 86 | 85 | /* Restore FPU state */ |
|---|
| 87 | 86 | kvm_own_fpu(vcpu); |
|---|
| 88 | 87 | er = EMULATE_DONE; |
|---|
| 89 | 88 | } |
|---|
| 90 | 89 | } else { |
|---|
| 91 | | - er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
|---|
| 90 | + er = kvm_mips_emulate_inst(cause, opc, vcpu); |
|---|
| 92 | 91 | } |
|---|
| 93 | 92 | |
|---|
| 94 | 93 | switch (er) { |
|---|
| .. | .. |
|---|
| 97 | 96 | break; |
|---|
| 98 | 97 | |
|---|
| 99 | 98 | case EMULATE_FAIL: |
|---|
| 100 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 99 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 101 | 100 | ret = RESUME_HOST; |
|---|
| 102 | 101 | break; |
|---|
| 103 | 102 | |
|---|
| 104 | 103 | case EMULATE_WAIT: |
|---|
| 105 | | - run->exit_reason = KVM_EXIT_INTR; |
|---|
| 104 | + vcpu->run->exit_reason = KVM_EXIT_INTR; |
|---|
| 106 | 105 | ret = RESUME_HOST; |
|---|
| 107 | 106 | break; |
|---|
| 108 | 107 | |
|---|
| .. | .. |
|---|
| 116 | 115 | return ret; |
|---|
| 117 | 116 | } |
|---|
| 118 | 117 | |
|---|
| 119 | | -static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run, |
|---|
| 120 | | - struct kvm_vcpu *vcpu) |
|---|
| 118 | +static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) |
|---|
| 121 | 119 | { |
|---|
| 122 | 120 | enum emulation_result er; |
|---|
| 123 | 121 | union mips_instruction inst; |
|---|
| .. | .. |
|---|
| 125 | 123 | |
|---|
| 126 | 124 | /* A code fetch fault doesn't count as an MMIO */ |
|---|
| 127 | 125 | if (kvm_is_ifetch_fault(&vcpu->arch)) { |
|---|
| 128 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 126 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 129 | 127 | return RESUME_HOST; |
|---|
| 130 | 128 | } |
|---|
| 131 | 129 | |
|---|
| .. | .. |
|---|
| 134 | 132 | opc += 1; |
|---|
| 135 | 133 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
|---|
| 136 | 134 | if (err) { |
|---|
| 137 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 135 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 138 | 136 | return RESUME_HOST; |
|---|
| 139 | 137 | } |
|---|
| 140 | 138 | |
|---|
| 141 | 139 | /* Emulate the load */ |
|---|
| 142 | | - er = kvm_mips_emulate_load(inst, cause, run, vcpu); |
|---|
| 140 | + er = kvm_mips_emulate_load(inst, cause, vcpu); |
|---|
| 143 | 141 | if (er == EMULATE_FAIL) { |
|---|
| 144 | 142 | kvm_err("Emulate load from MMIO space failed\n"); |
|---|
| 145 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 143 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 146 | 144 | } else { |
|---|
| 147 | | - run->exit_reason = KVM_EXIT_MMIO; |
|---|
| 145 | + vcpu->run->exit_reason = KVM_EXIT_MMIO; |
|---|
| 148 | 146 | } |
|---|
| 149 | 147 | return RESUME_HOST; |
|---|
| 150 | 148 | } |
|---|
| 151 | 149 | |
|---|
| 152 | | -static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run, |
|---|
| 153 | | - struct kvm_vcpu *vcpu) |
|---|
| 150 | +static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) |
|---|
| 154 | 151 | { |
|---|
| 155 | 152 | enum emulation_result er; |
|---|
| 156 | 153 | union mips_instruction inst; |
|---|
| .. | .. |
|---|
| 161 | 158 | opc += 1; |
|---|
| 162 | 159 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
|---|
| 163 | 160 | if (err) { |
|---|
| 164 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 161 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 165 | 162 | return RESUME_HOST; |
|---|
| 166 | 163 | } |
|---|
| 167 | 164 | |
|---|
| 168 | 165 | /* Emulate the store */ |
|---|
| 169 | | - er = kvm_mips_emulate_store(inst, cause, run, vcpu); |
|---|
| 166 | + er = kvm_mips_emulate_store(inst, cause, vcpu); |
|---|
| 170 | 167 | if (er == EMULATE_FAIL) { |
|---|
| 171 | 168 | kvm_err("Emulate store to MMIO space failed\n"); |
|---|
| 172 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 169 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 173 | 170 | } else { |
|---|
| 174 | | - run->exit_reason = KVM_EXIT_MMIO; |
|---|
| 171 | + vcpu->run->exit_reason = KVM_EXIT_MMIO; |
|---|
| 175 | 172 | } |
|---|
| 176 | 173 | return RESUME_HOST; |
|---|
| 177 | 174 | } |
|---|
| 178 | 175 | |
|---|
| 179 | | -static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run, |
|---|
| 176 | +static int kvm_mips_bad_access(u32 cause, u32 *opc, |
|---|
| 180 | 177 | struct kvm_vcpu *vcpu, bool store) |
|---|
| 181 | 178 | { |
|---|
| 182 | 179 | if (store) |
|---|
| 183 | | - return kvm_mips_bad_store(cause, opc, run, vcpu); |
|---|
| 180 | + return kvm_mips_bad_store(cause, opc, vcpu); |
|---|
| 184 | 181 | else |
|---|
| 185 | | - return kvm_mips_bad_load(cause, opc, run, vcpu); |
|---|
| 182 | + return kvm_mips_bad_load(cause, opc, vcpu); |
|---|
| 186 | 183 | } |
|---|
| 187 | 184 | |
|---|
| 188 | 185 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) |
|---|
| 189 | 186 | { |
|---|
| 190 | 187 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
|---|
| 191 | | - struct kvm_run *run = vcpu->run; |
|---|
| 192 | 188 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 193 | 189 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
|---|
| 194 | 190 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| .. | .. |
|---|
| 212 | 208 | * They would indicate stale host TLB entries. |
|---|
| 213 | 209 | */ |
|---|
| 214 | 210 | if (unlikely(index < 0)) { |
|---|
| 215 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 211 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 216 | 212 | return RESUME_HOST; |
|---|
| 217 | 213 | } |
|---|
| 218 | 214 | tlb = vcpu->arch.guest_tlb + index; |
|---|
| 219 | 215 | if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { |
|---|
| 220 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 216 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 221 | 217 | return RESUME_HOST; |
|---|
| 222 | 218 | } |
|---|
| 223 | 219 | |
|---|
| .. | .. |
|---|
| 226 | 222 | * exception. Relay that on to the guest so it can handle it. |
|---|
| 227 | 223 | */ |
|---|
| 228 | 224 | if (!TLB_IS_DIRTY(*tlb, badvaddr)) { |
|---|
| 229 | | - kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); |
|---|
| 225 | + kvm_mips_emulate_tlbmod(cause, opc, vcpu); |
|---|
| 230 | 226 | return RESUME_GUEST; |
|---|
| 231 | 227 | } |
|---|
| 232 | 228 | |
|---|
| 233 | 229 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, |
|---|
| 234 | 230 | true)) |
|---|
| 235 | 231 | /* Not writable, needs handling as MMIO */ |
|---|
| 236 | | - return kvm_mips_bad_store(cause, opc, run, vcpu); |
|---|
| 232 | + return kvm_mips_bad_store(cause, opc, vcpu); |
|---|
| 237 | 233 | return RESUME_GUEST; |
|---|
| 238 | 234 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
|---|
| 239 | 235 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) |
|---|
| 240 | 236 | /* Not writable, needs handling as MMIO */ |
|---|
| 241 | | - return kvm_mips_bad_store(cause, opc, run, vcpu); |
|---|
| 237 | + return kvm_mips_bad_store(cause, opc, vcpu); |
|---|
| 242 | 238 | return RESUME_GUEST; |
|---|
| 243 | 239 | } else { |
|---|
| 244 | 240 | /* host kernel addresses are all handled as MMIO */ |
|---|
| 245 | | - return kvm_mips_bad_store(cause, opc, run, vcpu); |
|---|
| 241 | + return kvm_mips_bad_store(cause, opc, vcpu); |
|---|
| 246 | 242 | } |
|---|
| 247 | 243 | } |
|---|
| 248 | 244 | |
|---|
| .. | .. |
|---|
| 276 | 272 | * into the shadow host TLB |
|---|
| 277 | 273 | */ |
|---|
| 278 | 274 | |
|---|
| 279 | | - er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store); |
|---|
| 275 | + er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store); |
|---|
| 280 | 276 | if (er == EMULATE_DONE) |
|---|
| 281 | 277 | ret = RESUME_GUEST; |
|---|
| 282 | 278 | else { |
|---|
| .. | .. |
|---|
| 289 | 285 | * not expect to ever get them |
|---|
| 290 | 286 | */ |
|---|
| 291 | 287 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) |
|---|
| 292 | | - ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); |
|---|
| 288 | + ret = kvm_mips_bad_access(cause, opc, vcpu, store); |
|---|
| 293 | 289 | } else if (KVM_GUEST_KERNEL_MODE(vcpu) |
|---|
| 294 | 290 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
|---|
| 295 | 291 | /* |
|---|
| 296 | 292 | * With EVA we may get a TLB exception instead of an address |
|---|
| 297 | 293 | * error when the guest performs MMIO to KSeg1 addresses. |
|---|
| 298 | 294 | */ |
|---|
| 299 | | - ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); |
|---|
| 295 | + ret = kvm_mips_bad_access(cause, opc, vcpu, store); |
|---|
| 300 | 296 | } else { |
|---|
| 301 | 297 | kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", |
|---|
| 302 | 298 | store ? "ST" : "LD", cause, opc, badvaddr); |
|---|
| .. | .. |
|---|
| 320 | 316 | |
|---|
| 321 | 317 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) |
|---|
| 322 | 318 | { |
|---|
| 323 | | - struct kvm_run *run = vcpu->run; |
|---|
| 324 | 319 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 325 | 320 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
|---|
| 326 | 321 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| .. | .. |
|---|
| 328 | 323 | |
|---|
| 329 | 324 | if (KVM_GUEST_KERNEL_MODE(vcpu) |
|---|
| 330 | 325 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
|---|
| 331 | | - ret = kvm_mips_bad_store(cause, opc, run, vcpu); |
|---|
| 326 | + ret = kvm_mips_bad_store(cause, opc, vcpu); |
|---|
| 332 | 327 | } else { |
|---|
| 333 | 328 | kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", |
|---|
| 334 | 329 | cause, opc, badvaddr); |
|---|
| 335 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 330 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 336 | 331 | ret = RESUME_HOST; |
|---|
| 337 | 332 | } |
|---|
| 338 | 333 | return ret; |
|---|
| .. | .. |
|---|
| 340 | 335 | |
|---|
| 341 | 336 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) |
|---|
| 342 | 337 | { |
|---|
| 343 | | - struct kvm_run *run = vcpu->run; |
|---|
| 344 | 338 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 345 | 339 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
|---|
| 346 | 340 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 347 | 341 | int ret = RESUME_GUEST; |
|---|
| 348 | 342 | |
|---|
| 349 | 343 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { |
|---|
| 350 | | - ret = kvm_mips_bad_load(cause, opc, run, vcpu); |
|---|
| 344 | + ret = kvm_mips_bad_load(cause, opc, vcpu); |
|---|
| 351 | 345 | } else { |
|---|
| 352 | 346 | kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", |
|---|
| 353 | 347 | cause, opc, badvaddr); |
|---|
| 354 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 348 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 355 | 349 | ret = RESUME_HOST; |
|---|
| 356 | 350 | } |
|---|
| 357 | 351 | return ret; |
|---|
| .. | .. |
|---|
| 359 | 353 | |
|---|
| 360 | 354 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) |
|---|
| 361 | 355 | { |
|---|
| 362 | | - struct kvm_run *run = vcpu->run; |
|---|
| 363 | 356 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 364 | 357 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 365 | 358 | enum emulation_result er = EMULATE_DONE; |
|---|
| 366 | 359 | int ret = RESUME_GUEST; |
|---|
| 367 | 360 | |
|---|
| 368 | | - er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); |
|---|
| 361 | + er = kvm_mips_emulate_syscall(cause, opc, vcpu); |
|---|
| 369 | 362 | if (er == EMULATE_DONE) |
|---|
| 370 | 363 | ret = RESUME_GUEST; |
|---|
| 371 | 364 | else { |
|---|
| 372 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 365 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 373 | 366 | ret = RESUME_HOST; |
|---|
| 374 | 367 | } |
|---|
| 375 | 368 | return ret; |
|---|
| .. | .. |
|---|
| 377 | 370 | |
|---|
| 378 | 371 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) |
|---|
| 379 | 372 | { |
|---|
| 380 | | - struct kvm_run *run = vcpu->run; |
|---|
| 381 | 373 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 382 | 374 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 383 | 375 | enum emulation_result er = EMULATE_DONE; |
|---|
| 384 | 376 | int ret = RESUME_GUEST; |
|---|
| 385 | 377 | |
|---|
| 386 | | - er = kvm_mips_handle_ri(cause, opc, run, vcpu); |
|---|
| 378 | + er = kvm_mips_handle_ri(cause, opc, vcpu); |
|---|
| 387 | 379 | if (er == EMULATE_DONE) |
|---|
| 388 | 380 | ret = RESUME_GUEST; |
|---|
| 389 | 381 | else { |
|---|
| 390 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 382 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 391 | 383 | ret = RESUME_HOST; |
|---|
| 392 | 384 | } |
|---|
| 393 | 385 | return ret; |
|---|
| .. | .. |
|---|
| 395 | 387 | |
|---|
| 396 | 388 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) |
|---|
| 397 | 389 | { |
|---|
| 398 | | - struct kvm_run *run = vcpu->run; |
|---|
| 399 | 390 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 400 | 391 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 401 | 392 | enum emulation_result er = EMULATE_DONE; |
|---|
| 402 | 393 | int ret = RESUME_GUEST; |
|---|
| 403 | 394 | |
|---|
| 404 | | - er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); |
|---|
| 395 | + er = kvm_mips_emulate_bp_exc(cause, opc, vcpu); |
|---|
| 405 | 396 | if (er == EMULATE_DONE) |
|---|
| 406 | 397 | ret = RESUME_GUEST; |
|---|
| 407 | 398 | else { |
|---|
| 408 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 399 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 409 | 400 | ret = RESUME_HOST; |
|---|
| 410 | 401 | } |
|---|
| 411 | 402 | return ret; |
|---|
| .. | .. |
|---|
| 413 | 404 | |
|---|
| 414 | 405 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) |
|---|
| 415 | 406 | { |
|---|
| 416 | | - struct kvm_run *run = vcpu->run; |
|---|
| 417 | 407 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
|---|
| 418 | 408 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 419 | 409 | enum emulation_result er = EMULATE_DONE; |
|---|
| 420 | 410 | int ret = RESUME_GUEST; |
|---|
| 421 | 411 | |
|---|
| 422 | | - er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); |
|---|
| 412 | + er = kvm_mips_emulate_trap_exc(cause, opc, vcpu); |
|---|
| 423 | 413 | if (er == EMULATE_DONE) { |
|---|
| 424 | 414 | ret = RESUME_GUEST; |
|---|
| 425 | 415 | } else { |
|---|
| 426 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 416 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 427 | 417 | ret = RESUME_HOST; |
|---|
| 428 | 418 | } |
|---|
| 429 | 419 | return ret; |
|---|
| .. | .. |
|---|
| 431 | 421 | |
|---|
| 432 | 422 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) |
|---|
| 433 | 423 | { |
|---|
| 434 | | - struct kvm_run *run = vcpu->run; |
|---|
| 435 | 424 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
|---|
| 436 | 425 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 437 | 426 | enum emulation_result er = EMULATE_DONE; |
|---|
| 438 | 427 | int ret = RESUME_GUEST; |
|---|
| 439 | 428 | |
|---|
| 440 | | - er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); |
|---|
| 429 | + er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu); |
|---|
| 441 | 430 | if (er == EMULATE_DONE) { |
|---|
| 442 | 431 | ret = RESUME_GUEST; |
|---|
| 443 | 432 | } else { |
|---|
| 444 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 433 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 445 | 434 | ret = RESUME_HOST; |
|---|
| 446 | 435 | } |
|---|
| 447 | 436 | return ret; |
|---|
| .. | .. |
|---|
| 449 | 438 | |
|---|
| 450 | 439 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) |
|---|
| 451 | 440 | { |
|---|
| 452 | | - struct kvm_run *run = vcpu->run; |
|---|
| 453 | 441 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; |
|---|
| 454 | 442 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 455 | 443 | enum emulation_result er = EMULATE_DONE; |
|---|
| 456 | 444 | int ret = RESUME_GUEST; |
|---|
| 457 | 445 | |
|---|
| 458 | | - er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); |
|---|
| 446 | + er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu); |
|---|
| 459 | 447 | if (er == EMULATE_DONE) { |
|---|
| 460 | 448 | ret = RESUME_GUEST; |
|---|
| 461 | 449 | } else { |
|---|
| 462 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 450 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 463 | 451 | ret = RESUME_HOST; |
|---|
| 464 | 452 | } |
|---|
| 465 | 453 | return ret; |
|---|
| .. | .. |
|---|
| 474 | 462 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) |
|---|
| 475 | 463 | { |
|---|
| 476 | 464 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
|---|
| 477 | | - struct kvm_run *run = vcpu->run; |
|---|
| 478 | 465 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; |
|---|
| 479 | 466 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 480 | 467 | enum emulation_result er = EMULATE_DONE; |
|---|
| .. | .. |
|---|
| 486 | 473 | * No MSA in guest, or FPU enabled and not in FR=1 mode, |
|---|
| 487 | 474 | * guest reserved instruction exception |
|---|
| 488 | 475 | */ |
|---|
| 489 | | - er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
|---|
| 476 | + er = kvm_mips_emulate_ri_exc(cause, opc, vcpu); |
|---|
| 490 | 477 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { |
|---|
| 491 | 478 | /* MSA disabled by guest, guest MSA disabled exception */ |
|---|
| 492 | | - er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); |
|---|
| 479 | + er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu); |
|---|
| 493 | 480 | } else { |
|---|
| 494 | 481 | /* Restore MSA/FPU state */ |
|---|
| 495 | 482 | kvm_own_msa(vcpu); |
|---|
| .. | .. |
|---|
| 502 | 489 | break; |
|---|
| 503 | 490 | |
|---|
| 504 | 491 | case EMULATE_FAIL: |
|---|
| 505 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 492 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 506 | 493 | ret = RESUME_HOST; |
|---|
| 507 | 494 | break; |
|---|
| 508 | 495 | |
|---|
| .. | .. |
|---|
| 527 | 514 | |
|---|
| 528 | 515 | switch (ext) { |
|---|
| 529 | 516 | case KVM_CAP_MIPS_TE: |
|---|
| 517 | + r = 1; |
|---|
| 518 | + break; |
|---|
| 519 | + case KVM_CAP_IOEVENTFD: |
|---|
| 530 | 520 | r = 1; |
|---|
| 531 | 521 | break; |
|---|
| 532 | 522 | default: |
|---|
| .. | .. |
|---|
| 564 | 554 | /* Don't free host kernel page tables copied from init_mm.pgd */ |
|---|
| 565 | 555 | const unsigned long end = 0x80000000; |
|---|
| 566 | 556 | unsigned long pgd_va, pud_va, pmd_va; |
|---|
| 557 | + p4d_t *p4d; |
|---|
| 567 | 558 | pud_t *pud; |
|---|
| 568 | 559 | pmd_t *pmd; |
|---|
| 569 | 560 | pte_t *pte; |
|---|
| .. | .. |
|---|
| 576 | 567 | pgd_va = (unsigned long)i << PGDIR_SHIFT; |
|---|
| 577 | 568 | if (pgd_va >= end) |
|---|
| 578 | 569 | break; |
|---|
| 579 | | - pud = pud_offset(pgd + i, 0); |
|---|
| 570 | + p4d = p4d_offset(pgd, 0); |
|---|
| 571 | + pud = pud_offset(p4d + i, 0); |
|---|
| 580 | 572 | for (j = 0; j < PTRS_PER_PUD; j++) { |
|---|
| 581 | 573 | if (pud_none(pud[j])) |
|---|
| 582 | 574 | continue; |
|---|
| .. | .. |
|---|
| 592 | 584 | pmd_va = pud_va | (k << PMD_SHIFT); |
|---|
| 593 | 585 | if (pmd_va >= end) |
|---|
| 594 | 586 | break; |
|---|
| 595 | | - pte = pte_offset(pmd + k, 0); |
|---|
| 587 | + pte = pte_offset_kernel(pmd + k, 0); |
|---|
| 596 | 588 | pte_free_kernel(NULL, pte); |
|---|
| 597 | 589 | } |
|---|
| 598 | 590 | pmd_free(NULL, pmd); |
|---|
| .. | .. |
|---|
| 1056 | 1048 | */ |
|---|
| 1057 | 1049 | if (current->flags & PF_VCPU) { |
|---|
| 1058 | 1050 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; |
|---|
| 1059 | | - if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & |
|---|
| 1060 | | - asid_version_mask(cpu)) |
|---|
| 1061 | | - get_new_mmu_context(mm, cpu); |
|---|
| 1062 | | - write_c0_entryhi(cpu_asid(cpu, mm)); |
|---|
| 1063 | | - TLBMISS_HANDLER_SETUP_PGD(mm->pgd); |
|---|
| 1051 | + check_switch_mmu_context(mm); |
|---|
| 1064 | 1052 | kvm_mips_suspend_mm(cpu); |
|---|
| 1065 | 1053 | ehb(); |
|---|
| 1066 | 1054 | } |
|---|
| .. | .. |
|---|
| 1074 | 1062 | |
|---|
| 1075 | 1063 | if (current->flags & PF_VCPU) { |
|---|
| 1076 | 1064 | /* Restore normal Linux process memory map */ |
|---|
| 1077 | | - if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & |
|---|
| 1078 | | - asid_version_mask(cpu))) |
|---|
| 1079 | | - get_new_mmu_context(current->mm, cpu); |
|---|
| 1080 | | - write_c0_entryhi(cpu_asid(cpu, current->mm)); |
|---|
| 1081 | | - TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); |
|---|
| 1065 | + check_switch_mmu_context(current->mm); |
|---|
| 1082 | 1066 | kvm_mips_resume_mm(cpu); |
|---|
| 1083 | 1067 | ehb(); |
|---|
| 1084 | 1068 | } |
|---|
| .. | .. |
|---|
| 1106 | 1090 | kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); |
|---|
| 1107 | 1091 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); |
|---|
| 1108 | 1092 | for_each_possible_cpu(i) { |
|---|
| 1109 | | - cpu_context(i, kern_mm) = 0; |
|---|
| 1110 | | - cpu_context(i, user_mm) = 0; |
|---|
| 1093 | + set_cpu_context(i, kern_mm, 0); |
|---|
| 1094 | + set_cpu_context(i, user_mm, 0); |
|---|
| 1111 | 1095 | } |
|---|
| 1112 | 1096 | |
|---|
| 1113 | 1097 | /* Generate new ASID for current mode */ |
|---|
| 1114 | 1098 | if (reload_asid) { |
|---|
| 1115 | 1099 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; |
|---|
| 1116 | | - get_new_mmu_context(mm, cpu); |
|---|
| 1100 | + get_new_mmu_context(mm); |
|---|
| 1117 | 1101 | htw_stop(); |
|---|
| 1118 | 1102 | write_c0_entryhi(cpu_asid(cpu, mm)); |
|---|
| 1119 | 1103 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); |
|---|
| .. | .. |
|---|
| 1187 | 1171 | local_irq_enable(); |
|---|
| 1188 | 1172 | } |
|---|
| 1189 | 1173 | |
|---|
| 1190 | | -static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, |
|---|
| 1191 | | - struct kvm_vcpu *vcpu) |
|---|
| 1174 | +static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu) |
|---|
| 1192 | 1175 | { |
|---|
| 1193 | 1176 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
|---|
| 1194 | 1177 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; |
|---|
| .. | .. |
|---|
| 1219 | 1202 | if (gasid != vcpu->arch.last_user_gasid) { |
|---|
| 1220 | 1203 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); |
|---|
| 1221 | 1204 | for_each_possible_cpu(i) |
|---|
| 1222 | | - cpu_context(i, user_mm) = 0; |
|---|
| 1205 | + set_cpu_context(i, user_mm, 0); |
|---|
| 1223 | 1206 | vcpu->arch.last_user_gasid = gasid; |
|---|
| 1224 | 1207 | } |
|---|
| 1225 | 1208 | } |
|---|
| .. | .. |
|---|
| 1228 | 1211 | * Check if ASID is stale. This may happen due to a TLB flush request or |
|---|
| 1229 | 1212 | * a lazy user MM invalidation. |
|---|
| 1230 | 1213 | */ |
|---|
| 1231 | | - if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & |
|---|
| 1232 | | - asid_version_mask(cpu)) |
|---|
| 1233 | | - get_new_mmu_context(mm, cpu); |
|---|
| 1214 | + check_mmu_context(mm); |
|---|
| 1234 | 1215 | } |
|---|
| 1235 | 1216 | |
|---|
| 1236 | | -static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) |
|---|
| 1217 | +static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) |
|---|
| 1237 | 1218 | { |
|---|
| 1238 | 1219 | int cpu = smp_processor_id(); |
|---|
| 1239 | 1220 | int r; |
|---|
| .. | .. |
|---|
| 1242 | 1223 | kvm_mips_deliver_interrupts(vcpu, |
|---|
| 1243 | 1224 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); |
|---|
| 1244 | 1225 | |
|---|
| 1245 | | - kvm_trap_emul_vcpu_reenter(run, vcpu); |
|---|
| 1226 | + kvm_trap_emul_vcpu_reenter(vcpu); |
|---|
| 1246 | 1227 | |
|---|
| 1247 | 1228 | /* |
|---|
| 1248 | 1229 | * We use user accessors to access guest memory, but we don't want to |
|---|
| .. | .. |
|---|
| 1260 | 1241 | */ |
|---|
| 1261 | 1242 | kvm_mips_suspend_mm(cpu); |
|---|
| 1262 | 1243 | |
|---|
| 1263 | | - r = vcpu->arch.vcpu_run(run, vcpu); |
|---|
| 1244 | + r = vcpu->arch.vcpu_run(vcpu); |
|---|
| 1264 | 1245 | |
|---|
| 1265 | 1246 | /* We may have migrated while handling guest exits */ |
|---|
| 1266 | 1247 | cpu = smp_processor_id(); |
|---|
| 1267 | 1248 | |
|---|
| 1268 | 1249 | /* Restore normal Linux process memory map */ |
|---|
| 1269 | | - if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & |
|---|
| 1270 | | - asid_version_mask(cpu))) |
|---|
| 1271 | | - get_new_mmu_context(current->mm, cpu); |
|---|
| 1272 | | - write_c0_entryhi(cpu_asid(cpu, current->mm)); |
|---|
| 1273 | | - TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); |
|---|
| 1250 | + check_switch_mmu_context(current->mm); |
|---|
| 1274 | 1251 | kvm_mips_resume_mm(cpu); |
|---|
| 1275 | 1252 | |
|---|
| 1276 | 1253 | htw_start(); |
|---|