forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/arch/mips/kvm/trap_emul.c
....@@ -67,7 +67,6 @@
6767 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
6868 {
6969 struct mips_coproc *cop0 = vcpu->arch.cop0;
70
- struct kvm_run *run = vcpu->run;
7170 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
7271 u32 cause = vcpu->arch.host_cp0_cause;
7372 enum emulation_result er = EMULATE_DONE;
....@@ -81,14 +80,14 @@
8180 * Unusable/no FPU in guest:
8281 * deliver guest COP1 Unusable Exception
8382 */
84
- er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
83
+ er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
8584 } else {
8685 /* Restore FPU state */
8786 kvm_own_fpu(vcpu);
8887 er = EMULATE_DONE;
8988 }
9089 } else {
91
- er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
90
+ er = kvm_mips_emulate_inst(cause, opc, vcpu);
9291 }
9392
9493 switch (er) {
....@@ -97,12 +96,12 @@
9796 break;
9897
9998 case EMULATE_FAIL:
100
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
99
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
101100 ret = RESUME_HOST;
102101 break;
103102
104103 case EMULATE_WAIT:
105
- run->exit_reason = KVM_EXIT_INTR;
104
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
106105 ret = RESUME_HOST;
107106 break;
108107
....@@ -116,8 +115,7 @@
116115 return ret;
117116 }
118117
119
-static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
120
- struct kvm_vcpu *vcpu)
118
+static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
121119 {
122120 enum emulation_result er;
123121 union mips_instruction inst;
....@@ -125,7 +123,7 @@
125123
126124 /* A code fetch fault doesn't count as an MMIO */
127125 if (kvm_is_ifetch_fault(&vcpu->arch)) {
128
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
126
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
129127 return RESUME_HOST;
130128 }
131129
....@@ -134,23 +132,22 @@
134132 opc += 1;
135133 err = kvm_get_badinstr(opc, vcpu, &inst.word);
136134 if (err) {
137
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
135
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
138136 return RESUME_HOST;
139137 }
140138
141139 /* Emulate the load */
142
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
140
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
143141 if (er == EMULATE_FAIL) {
144142 kvm_err("Emulate load from MMIO space failed\n");
145
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
143
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
146144 } else {
147
- run->exit_reason = KVM_EXIT_MMIO;
145
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
148146 }
149147 return RESUME_HOST;
150148 }
151149
152
-static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
153
- struct kvm_vcpu *vcpu)
150
+static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
154151 {
155152 enum emulation_result er;
156153 union mips_instruction inst;
....@@ -161,34 +158,33 @@
161158 opc += 1;
162159 err = kvm_get_badinstr(opc, vcpu, &inst.word);
163160 if (err) {
164
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
161
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
165162 return RESUME_HOST;
166163 }
167164
168165 /* Emulate the store */
169
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
166
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
170167 if (er == EMULATE_FAIL) {
171168 kvm_err("Emulate store to MMIO space failed\n");
172
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
173170 } else {
174
- run->exit_reason = KVM_EXIT_MMIO;
171
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
175172 }
176173 return RESUME_HOST;
177174 }
178175
179
-static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
176
+static int kvm_mips_bad_access(u32 cause, u32 *opc,
180177 struct kvm_vcpu *vcpu, bool store)
181178 {
182179 if (store)
183
- return kvm_mips_bad_store(cause, opc, run, vcpu);
180
+ return kvm_mips_bad_store(cause, opc, vcpu);
184181 else
185
- return kvm_mips_bad_load(cause, opc, run, vcpu);
182
+ return kvm_mips_bad_load(cause, opc, vcpu);
186183 }
187184
188185 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
189186 {
190187 struct mips_coproc *cop0 = vcpu->arch.cop0;
191
- struct kvm_run *run = vcpu->run;
192188 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
193189 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
194190 u32 cause = vcpu->arch.host_cp0_cause;
....@@ -212,12 +208,12 @@
212208 * They would indicate stale host TLB entries.
213209 */
214210 if (unlikely(index < 0)) {
215
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
211
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
216212 return RESUME_HOST;
217213 }
218214 tlb = vcpu->arch.guest_tlb + index;
219215 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
220
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
216
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
221217 return RESUME_HOST;
222218 }
223219
....@@ -226,23 +222,23 @@
226222 * exception. Relay that on to the guest so it can handle it.
227223 */
228224 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
229
- kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
225
+ kvm_mips_emulate_tlbmod(cause, opc, vcpu);
230226 return RESUME_GUEST;
231227 }
232228
233229 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
234230 true))
235231 /* Not writable, needs handling as MMIO */
236
- return kvm_mips_bad_store(cause, opc, run, vcpu);
232
+ return kvm_mips_bad_store(cause, opc, vcpu);
237233 return RESUME_GUEST;
238234 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
239235 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
240236 /* Not writable, needs handling as MMIO */
241
- return kvm_mips_bad_store(cause, opc, run, vcpu);
237
+ return kvm_mips_bad_store(cause, opc, vcpu);
242238 return RESUME_GUEST;
243239 } else {
244240 /* host kernel addresses are all handled as MMIO */
245
- return kvm_mips_bad_store(cause, opc, run, vcpu);
241
+ return kvm_mips_bad_store(cause, opc, vcpu);
246242 }
247243 }
248244
....@@ -276,7 +272,7 @@
276272 * into the shadow host TLB
277273 */
278274
279
- er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
275
+ er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
280276 if (er == EMULATE_DONE)
281277 ret = RESUME_GUEST;
282278 else {
....@@ -289,14 +285,14 @@
289285 * not expect to ever get them
290286 */
291287 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
292
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
288
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
293289 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
294290 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
295291 /*
296292 * With EVA we may get a TLB exception instead of an address
297293 * error when the guest performs MMIO to KSeg1 addresses.
298294 */
299
- ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
295
+ ret = kvm_mips_bad_access(cause, opc, vcpu, store);
300296 } else {
301297 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
302298 store ? "ST" : "LD", cause, opc, badvaddr);
....@@ -320,7 +316,6 @@
320316
321317 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
322318 {
323
- struct kvm_run *run = vcpu->run;
324319 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
325320 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
326321 u32 cause = vcpu->arch.host_cp0_cause;
....@@ -328,11 +323,11 @@
328323
329324 if (KVM_GUEST_KERNEL_MODE(vcpu)
330325 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
331
- ret = kvm_mips_bad_store(cause, opc, run, vcpu);
326
+ ret = kvm_mips_bad_store(cause, opc, vcpu);
332327 } else {
333328 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
334329 cause, opc, badvaddr);
335
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
330
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
336331 ret = RESUME_HOST;
337332 }
338333 return ret;
....@@ -340,18 +335,17 @@
340335
341336 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
342337 {
343
- struct kvm_run *run = vcpu->run;
344338 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
345339 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
346340 u32 cause = vcpu->arch.host_cp0_cause;
347341 int ret = RESUME_GUEST;
348342
349343 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
350
- ret = kvm_mips_bad_load(cause, opc, run, vcpu);
344
+ ret = kvm_mips_bad_load(cause, opc, vcpu);
351345 } else {
352346 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
353347 cause, opc, badvaddr);
354
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
348
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
355349 ret = RESUME_HOST;
356350 }
357351 return ret;
....@@ -359,17 +353,16 @@
359353
360354 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
361355 {
362
- struct kvm_run *run = vcpu->run;
363356 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
364357 u32 cause = vcpu->arch.host_cp0_cause;
365358 enum emulation_result er = EMULATE_DONE;
366359 int ret = RESUME_GUEST;
367360
368
- er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
361
+ er = kvm_mips_emulate_syscall(cause, opc, vcpu);
369362 if (er == EMULATE_DONE)
370363 ret = RESUME_GUEST;
371364 else {
372
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
365
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
373366 ret = RESUME_HOST;
374367 }
375368 return ret;
....@@ -377,17 +370,16 @@
377370
378371 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
379372 {
380
- struct kvm_run *run = vcpu->run;
381373 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
382374 u32 cause = vcpu->arch.host_cp0_cause;
383375 enum emulation_result er = EMULATE_DONE;
384376 int ret = RESUME_GUEST;
385377
386
- er = kvm_mips_handle_ri(cause, opc, run, vcpu);
378
+ er = kvm_mips_handle_ri(cause, opc, vcpu);
387379 if (er == EMULATE_DONE)
388380 ret = RESUME_GUEST;
389381 else {
390
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
382
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
391383 ret = RESUME_HOST;
392384 }
393385 return ret;
....@@ -395,17 +387,16 @@
395387
396388 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
397389 {
398
- struct kvm_run *run = vcpu->run;
399390 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
400391 u32 cause = vcpu->arch.host_cp0_cause;
401392 enum emulation_result er = EMULATE_DONE;
402393 int ret = RESUME_GUEST;
403394
404
- er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
395
+ er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
405396 if (er == EMULATE_DONE)
406397 ret = RESUME_GUEST;
407398 else {
408
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
399
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
409400 ret = RESUME_HOST;
410401 }
411402 return ret;
....@@ -413,17 +404,16 @@
413404
414405 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
415406 {
416
- struct kvm_run *run = vcpu->run;
417407 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
418408 u32 cause = vcpu->arch.host_cp0_cause;
419409 enum emulation_result er = EMULATE_DONE;
420410 int ret = RESUME_GUEST;
421411
422
- er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
412
+ er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
423413 if (er == EMULATE_DONE) {
424414 ret = RESUME_GUEST;
425415 } else {
426
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
416
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
427417 ret = RESUME_HOST;
428418 }
429419 return ret;
....@@ -431,17 +421,16 @@
431421
432422 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
433423 {
434
- struct kvm_run *run = vcpu->run;
435424 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
436425 u32 cause = vcpu->arch.host_cp0_cause;
437426 enum emulation_result er = EMULATE_DONE;
438427 int ret = RESUME_GUEST;
439428
440
- er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
429
+ er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
441430 if (er == EMULATE_DONE) {
442431 ret = RESUME_GUEST;
443432 } else {
444
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
433
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
445434 ret = RESUME_HOST;
446435 }
447436 return ret;
....@@ -449,17 +438,16 @@
449438
450439 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
451440 {
452
- struct kvm_run *run = vcpu->run;
453441 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
454442 u32 cause = vcpu->arch.host_cp0_cause;
455443 enum emulation_result er = EMULATE_DONE;
456444 int ret = RESUME_GUEST;
457445
458
- er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
446
+ er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
459447 if (er == EMULATE_DONE) {
460448 ret = RESUME_GUEST;
461449 } else {
462
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
450
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
463451 ret = RESUME_HOST;
464452 }
465453 return ret;
....@@ -474,7 +462,6 @@
474462 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
475463 {
476464 struct mips_coproc *cop0 = vcpu->arch.cop0;
477
- struct kvm_run *run = vcpu->run;
478465 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
479466 u32 cause = vcpu->arch.host_cp0_cause;
480467 enum emulation_result er = EMULATE_DONE;
....@@ -486,10 +473,10 @@
486473 * No MSA in guest, or FPU enabled and not in FR=1 mode,
487474 * guest reserved instruction exception
488475 */
489
- er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
476
+ er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
490477 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
491478 /* MSA disabled by guest, guest MSA disabled exception */
492
- er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
479
+ er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
493480 } else {
494481 /* Restore MSA/FPU state */
495482 kvm_own_msa(vcpu);
....@@ -502,7 +489,7 @@
502489 break;
503490
504491 case EMULATE_FAIL:
505
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
492
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
506493 ret = RESUME_HOST;
507494 break;
508495
....@@ -527,6 +514,9 @@
527514
528515 switch (ext) {
529516 case KVM_CAP_MIPS_TE:
517
+ r = 1;
518
+ break;
519
+ case KVM_CAP_IOEVENTFD:
530520 r = 1;
531521 break;
532522 default:
....@@ -564,6 +554,7 @@
564554 /* Don't free host kernel page tables copied from init_mm.pgd */
565555 const unsigned long end = 0x80000000;
566556 unsigned long pgd_va, pud_va, pmd_va;
557
+ p4d_t *p4d;
567558 pud_t *pud;
568559 pmd_t *pmd;
569560 pte_t *pte;
....@@ -576,7 +567,8 @@
576567 pgd_va = (unsigned long)i << PGDIR_SHIFT;
577568 if (pgd_va >= end)
578569 break;
579
- pud = pud_offset(pgd + i, 0);
570
+ p4d = p4d_offset(pgd, 0);
571
+ pud = pud_offset(p4d + i, 0);
580572 for (j = 0; j < PTRS_PER_PUD; j++) {
581573 if (pud_none(pud[j]))
582574 continue;
....@@ -592,7 +584,7 @@
592584 pmd_va = pud_va | (k << PMD_SHIFT);
593585 if (pmd_va >= end)
594586 break;
595
- pte = pte_offset(pmd + k, 0);
587
+ pte = pte_offset_kernel(pmd + k, 0);
596588 pte_free_kernel(NULL, pte);
597589 }
598590 pmd_free(NULL, pmd);
....@@ -1056,11 +1048,7 @@
10561048 */
10571049 if (current->flags & PF_VCPU) {
10581050 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1059
- if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1060
- asid_version_mask(cpu))
1061
- get_new_mmu_context(mm, cpu);
1062
- write_c0_entryhi(cpu_asid(cpu, mm));
1063
- TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1051
+ check_switch_mmu_context(mm);
10641052 kvm_mips_suspend_mm(cpu);
10651053 ehb();
10661054 }
....@@ -1074,11 +1062,7 @@
10741062
10751063 if (current->flags & PF_VCPU) {
10761064 /* Restore normal Linux process memory map */
1077
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1078
- asid_version_mask(cpu)))
1079
- get_new_mmu_context(current->mm, cpu);
1080
- write_c0_entryhi(cpu_asid(cpu, current->mm));
1081
- TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
1065
+ check_switch_mmu_context(current->mm);
10821066 kvm_mips_resume_mm(cpu);
10831067 ehb();
10841068 }
....@@ -1106,14 +1090,14 @@
11061090 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
11071091 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
11081092 for_each_possible_cpu(i) {
1109
- cpu_context(i, kern_mm) = 0;
1110
- cpu_context(i, user_mm) = 0;
1093
+ set_cpu_context(i, kern_mm, 0);
1094
+ set_cpu_context(i, user_mm, 0);
11111095 }
11121096
11131097 /* Generate new ASID for current mode */
11141098 if (reload_asid) {
11151099 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1116
- get_new_mmu_context(mm, cpu);
1100
+ get_new_mmu_context(mm);
11171101 htw_stop();
11181102 write_c0_entryhi(cpu_asid(cpu, mm));
11191103 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
....@@ -1187,8 +1171,7 @@
11871171 local_irq_enable();
11881172 }
11891173
1190
-static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1191
- struct kvm_vcpu *vcpu)
1174
+static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
11921175 {
11931176 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
11941177 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
....@@ -1219,7 +1202,7 @@
12191202 if (gasid != vcpu->arch.last_user_gasid) {
12201203 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
12211204 for_each_possible_cpu(i)
1222
- cpu_context(i, user_mm) = 0;
1205
+ set_cpu_context(i, user_mm, 0);
12231206 vcpu->arch.last_user_gasid = gasid;
12241207 }
12251208 }
....@@ -1228,12 +1211,10 @@
12281211 * Check if ASID is stale. This may happen due to a TLB flush request or
12291212 * a lazy user MM invalidation.
12301213 */
1231
- if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1232
- asid_version_mask(cpu))
1233
- get_new_mmu_context(mm, cpu);
1214
+ check_mmu_context(mm);
12341215 }
12351216
1236
-static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1217
+static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
12371218 {
12381219 int cpu = smp_processor_id();
12391220 int r;
....@@ -1242,7 +1223,7 @@
12421223 kvm_mips_deliver_interrupts(vcpu,
12431224 kvm_read_c0_guest_cause(vcpu->arch.cop0));
12441225
1245
- kvm_trap_emul_vcpu_reenter(run, vcpu);
1226
+ kvm_trap_emul_vcpu_reenter(vcpu);
12461227
12471228 /*
12481229 * We use user accessors to access guest memory, but we don't want to
....@@ -1260,17 +1241,13 @@
12601241 */
12611242 kvm_mips_suspend_mm(cpu);
12621243
1263
- r = vcpu->arch.vcpu_run(run, vcpu);
1244
+ r = vcpu->arch.vcpu_run(vcpu);
12641245
12651246 /* We may have migrated while handling guest exits */
12661247 cpu = smp_processor_id();
12671248
12681249 /* Restore normal Linux process memory map */
1269
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1270
- asid_version_mask(cpu)))
1271
- get_new_mmu_context(current->mm, cpu);
1272
- write_c0_entryhi(cpu_asid(cpu, current->mm));
1273
- TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
1250
+ check_switch_mmu_context(current->mm);
12741251 kvm_mips_resume_mm(cpu);
12751252
12761253 htw_start();