| .. | .. |
|---|
| 56 | 56 | #define C0_BADVADDR 8, 0 |
|---|
| 57 | 57 | #define C0_BADINSTR 8, 1 |
|---|
| 58 | 58 | #define C0_BADINSTRP 8, 2 |
|---|
| 59 | +#define C0_PGD 9, 7 |
|---|
| 59 | 60 | #define C0_ENTRYHI 10, 0 |
|---|
| 60 | 61 | #define C0_GUESTCTL1 10, 4 |
|---|
| 61 | 62 | #define C0_STATUS 12, 0 |
|---|
| .. | .. |
|---|
| 204 | 205 | * Assemble the start of the vcpu_run function to run a guest VCPU. The function |
|---|
| 205 | 206 | * conforms to the following prototype: |
|---|
| 206 | 207 | * |
|---|
| 207 | | - * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); |
|---|
| 208 | + * int vcpu_run(struct kvm_vcpu *vcpu); |
|---|
| 208 | 209 | * |
|---|
| 209 | 210 | * The exit from the guest and return to the caller is handled by the code |
|---|
| 210 | 211 | * generated by kvm_mips_build_ret_to_host(). |
|---|
| .. | .. |
|---|
| 217 | 218 | unsigned int i; |
|---|
| 218 | 219 | |
|---|
| 219 | 220 | /* |
|---|
| 220 | | - * A0: run |
|---|
| 221 | | - * A1: vcpu |
|---|
| 221 | + * A0: vcpu |
|---|
| 222 | 222 | */ |
|---|
| 223 | 223 | |
|---|
| 224 | 224 | /* k0/k1 not being used in host kernel context */ |
|---|
| .. | .. |
|---|
| 237 | 237 | kvm_mips_build_save_scratch(&p, V1, K1); |
|---|
| 238 | 238 | |
|---|
| 239 | 239 | /* VCPU scratch register has pointer to vcpu */ |
|---|
| 240 | | - UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); |
|---|
| 240 | + UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]); |
|---|
| 241 | 241 | |
|---|
| 242 | 242 | /* Offset into vcpu->arch */ |
|---|
| 243 | | - UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); |
|---|
| 243 | + UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch)); |
|---|
| 244 | 244 | |
|---|
| 245 | 245 | /* |
|---|
| 246 | 246 | * Save the host stack to VCPU, used for exception processing |
|---|
| .. | .. |
|---|
| 307 | 307 | |
|---|
| 308 | 308 | #ifdef CONFIG_KVM_MIPS_VZ |
|---|
| 309 | 309 | /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ |
|---|
| 310 | | - UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); |
|---|
| 310 | + if (cpu_has_ldpte) |
|---|
| 311 | + UASM_i_MFC0(&p, K0, C0_PWBASE); |
|---|
| 312 | + else |
|---|
| 313 | + UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); |
|---|
| 311 | 314 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1); |
|---|
| 312 | 315 | |
|---|
| 313 | 316 | /* |
|---|
| .. | .. |
|---|
| 469 | 472 | u32 *p = addr; |
|---|
| 470 | 473 | struct uasm_label labels[2]; |
|---|
| 471 | 474 | struct uasm_reloc relocs[2]; |
|---|
| 475 | +#ifndef CONFIG_CPU_LOONGSON64 |
|---|
| 472 | 476 | struct uasm_label *l = labels; |
|---|
| 473 | 477 | struct uasm_reloc *r = relocs; |
|---|
| 478 | +#endif |
|---|
| 474 | 479 | |
|---|
| 475 | 480 | memset(labels, 0, sizeof(labels)); |
|---|
| 476 | 481 | memset(relocs, 0, sizeof(relocs)); |
|---|
| .. | .. |
|---|
| 490 | 495 | */ |
|---|
| 491 | 496 | preempt_disable(); |
|---|
| 492 | 497 | |
|---|
| 498 | +#ifdef CONFIG_CPU_LOONGSON64 |
|---|
| 499 | + UASM_i_MFC0(&p, K1, C0_PGD); |
|---|
| 500 | + uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ |
|---|
| 501 | +#ifndef __PAGETABLE_PMD_FOLDED |
|---|
| 502 | + uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ |
|---|
| 503 | +#endif |
|---|
| 504 | + uasm_i_ldpte(&p, K1, 0); /* even */ |
|---|
| 505 | + uasm_i_ldpte(&p, K1, 1); /* odd */ |
|---|
| 506 | + uasm_i_tlbwr(&p); |
|---|
| 507 | +#else |
|---|
| 493 | 508 | /* |
|---|
| 494 | 509 | * Now for the actual refill bit. A lot of this can be common with the |
|---|
| 495 | 510 | * Linux TLB refill handler, however we don't need to handle so many |
|---|
| .. | .. |
|---|
| 512 | 527 | build_get_ptep(&p, K0, K1); |
|---|
| 513 | 528 | build_update_entries(&p, K0, K1); |
|---|
| 514 | 529 | build_tlb_write_entry(&p, &l, &r, tlb_random); |
|---|
| 530 | +#endif |
|---|
| 515 | 531 | |
|---|
| 516 | 532 | preempt_enable(); |
|---|
| 517 | 533 | |
|---|
| .. | .. |
|---|
| 628 | 644 | /* Now that context has been saved, we can use other registers */ |
|---|
| 629 | 645 | |
|---|
| 630 | 646 | /* Restore vcpu */ |
|---|
| 631 | | - UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); |
|---|
| 632 | | - |
|---|
| 633 | | - /* Restore run (vcpu->run) */ |
|---|
| 634 | | - UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1); |
|---|
| 647 | + UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); |
|---|
| 635 | 648 | |
|---|
| 636 | 649 | /* |
|---|
| 637 | 650 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process |
|---|
| .. | .. |
|---|
| 793 | 806 | * with this in the kernel |
|---|
| 794 | 807 | */ |
|---|
| 795 | 808 | uasm_i_move(&p, A0, S0); |
|---|
| 796 | | - uasm_i_move(&p, A1, S1); |
|---|
| 797 | 809 | UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); |
|---|
| 798 | 810 | uasm_i_jalr(&p, RA, T9); |
|---|
| 799 | 811 | UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); |
|---|
| .. | .. |
|---|
| 835 | 847 | * guest, reload k1 |
|---|
| 836 | 848 | */ |
|---|
| 837 | 849 | |
|---|
| 838 | | - uasm_i_move(&p, K1, S1); |
|---|
| 850 | + uasm_i_move(&p, K1, S0); |
|---|
| 839 | 851 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); |
|---|
| 840 | 852 | |
|---|
| 841 | 853 | /* |
|---|
| .. | .. |
|---|
| 869 | 881 | { |
|---|
| 870 | 882 | u32 *p = addr; |
|---|
| 871 | 883 | |
|---|
| 872 | | - /* Put the saved pointer to vcpu (s1) back into the scratch register */ |
|---|
| 873 | | - UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); |
|---|
| 884 | + /* Put the saved pointer to vcpu (s0) back into the scratch register */ |
|---|
| 885 | + UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); |
|---|
| 874 | 886 | |
|---|
| 875 | 887 | /* Load up the Guest EBASE to minimize the window where BEV is set */ |
|---|
| 876 | 888 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); |
|---|