From 01573e231f18eb2d99162747186f59511f56b64d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 08 Dec 2023 10:40:48 +0000 Subject: [PATCH] 移去rt --- kernel/arch/mips/kvm/entry.c | 40 ++++++++++++++++++++++++++-------------- 1 files changed, 26 insertions(+), 14 deletions(-) diff --git a/kernel/arch/mips/kvm/entry.c b/kernel/arch/mips/kvm/entry.c index 16e1c93..832475b 100644 --- a/kernel/arch/mips/kvm/entry.c +++ b/kernel/arch/mips/kvm/entry.c @@ -56,6 +56,7 @@ #define C0_BADVADDR 8, 0 #define C0_BADINSTR 8, 1 #define C0_BADINSTRP 8, 2 +#define C0_PGD 9, 7 #define C0_ENTRYHI 10, 0 #define C0_GUESTCTL1 10, 4 #define C0_STATUS 12, 0 @@ -204,7 +205,7 @@ * Assemble the start of the vcpu_run function to run a guest VCPU. The function * conforms to the following prototype: * - * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); + * int vcpu_run(struct kvm_vcpu *vcpu); * * The exit from the guest and return to the caller is handled by the code * generated by kvm_mips_build_ret_to_host(). @@ -217,8 +218,7 @@ unsigned int i; /* - * A0: run - * A1: vcpu + * A0: vcpu */ /* k0/k1 not being used in host kernel context */ @@ -237,10 +237,10 @@ kvm_mips_build_save_scratch(&p, V1, K1); /* VCPU scratch register has pointer to vcpu */ - UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]); /* Offset into vcpu->arch */ - UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); + UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch)); /* * Save the host stack to VCPU, used for exception processing @@ -307,7 +307,10 @@ #ifdef CONFIG_KVM_MIPS_VZ /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ - UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); + if (cpu_has_ldpte) + UASM_i_MFC0(&p, K0, C0_PWBASE); + else + UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1); /* @@ -469,8 +472,10 @@ u32 *p = addr; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; +#ifndef CONFIG_CPU_LOONGSON64 struct uasm_label *l = labels; struct uasm_reloc *r = relocs; +#endif memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -490,6 +495,16 @@ */ preempt_disable(); +#ifdef CONFIG_CPU_LOONGSON64 + UASM_i_MFC0(&p, K1, C0_PGD); + uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ +#ifndef __PAGETABLE_PMD_FOLDED + uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ +#endif + uasm_i_ldpte(&p, K1, 0); /* even */ + uasm_i_ldpte(&p, K1, 1); /* odd */ + uasm_i_tlbwr(&p); +#else /* * Now for the actual refill bit. A lot of this can be common with the * Linux TLB refill handler, however we don't need to handle so many @@ -512,6 +527,7 @@ build_get_ptep(&p, K0, K1); build_update_entries(&p, K0, K1); build_tlb_write_entry(&p, &l, &r, tlb_random); +#endif preempt_enable(); @@ -628,10 +644,7 @@ /* Now that context has been saved, we can use other registers */ /* Restore vcpu */ - UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); - - /* Restore run (vcpu->run) */ - UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1); + UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); /* * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process @@ -793,7 +806,6 @@ * with this in the kernel */ uasm_i_move(&p, A0, S0); - uasm_i_move(&p, A1, S1); UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); uasm_i_jalr(&p, RA, T9); UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); @@ -835,7 +847,7 @@ * guest, reload k1 */ - uasm_i_move(&p, K1, S1); + uasm_i_move(&p, K1, S0); UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); /* @@ -869,8 +881,8 @@ { u32 *p = addr; - /* Put the saved pointer to vcpu (s1) back into the scratch register */ - UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); + /* Put the saved pointer to vcpu (s0) back into the scratch register */ + UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); /* Load up the Guest EBASE to minimize the window where BEV is set */ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); -- Gitblit v1.6.2