hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/mips/kvm/mips.c
....@@ -18,14 +18,14 @@
1818 #include <linux/vmalloc.h>
1919 #include <linux/sched/signal.h>
2020 #include <linux/fs.h>
21
-#include <linux/bootmem.h>
21
+#include <linux/memblock.h>
22
+#include <linux/pgtable.h>
2223
2324 #include <asm/fpu.h>
2425 #include <asm/page.h>
2526 #include <asm/cacheflush.h>
2627 #include <asm/mmu_context.h>
2728 #include <asm/pgalloc.h>
28
-#include <asm/pgtable.h>
2929
3030 #include <linux/kvm_host.h>
3131
....@@ -39,40 +39,44 @@
3939 #define VECTORSPACING 0x100 /* for EI/VI mode */
4040 #endif
4141
42
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
4342 struct kvm_stats_debugfs_item debugfs_entries[] = {
44
- { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
45
- { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
46
- { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
47
- { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
48
- { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
49
- { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
50
- { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
51
- { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
52
- { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
53
- { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
54
- { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
55
- { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
56
- { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
57
- { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
58
- { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
59
- { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
60
- { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
61
- { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
43
+ VCPU_STAT("wait", wait_exits),
44
+ VCPU_STAT("cache", cache_exits),
45
+ VCPU_STAT("signal", signal_exits),
46
+ VCPU_STAT("interrupt", int_exits),
47
+ VCPU_STAT("cop_unusable", cop_unusable_exits),
48
+ VCPU_STAT("tlbmod", tlbmod_exits),
49
+ VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
50
+ VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
51
+ VCPU_STAT("addrerr_st", addrerr_st_exits),
52
+ VCPU_STAT("addrerr_ld", addrerr_ld_exits),
53
+ VCPU_STAT("syscall", syscall_exits),
54
+ VCPU_STAT("resvd_inst", resvd_inst_exits),
55
+ VCPU_STAT("break_inst", break_inst_exits),
56
+ VCPU_STAT("trap_inst", trap_inst_exits),
57
+ VCPU_STAT("msa_fpe", msa_fpe_exits),
58
+ VCPU_STAT("fpe", fpe_exits),
59
+ VCPU_STAT("msa_disabled", msa_disabled_exits),
60
+ VCPU_STAT("flush_dcache", flush_dcache_exits),
6261 #ifdef CONFIG_KVM_MIPS_VZ
63
- { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
64
- { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
65
- { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
66
- { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
67
- { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
68
- { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
69
- { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
70
- { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
62
+ VCPU_STAT("vz_gpsi", vz_gpsi_exits),
63
+ VCPU_STAT("vz_gsfc", vz_gsfc_exits),
64
+ VCPU_STAT("vz_hc", vz_hc_exits),
65
+ VCPU_STAT("vz_grr", vz_grr_exits),
66
+ VCPU_STAT("vz_gva", vz_gva_exits),
67
+ VCPU_STAT("vz_ghfc", vz_ghfc_exits),
68
+ VCPU_STAT("vz_gpa", vz_gpa_exits),
69
+ VCPU_STAT("vz_resvd", vz_resvd_exits),
70
+#ifdef CONFIG_CPU_LOONGSON64
71
+ VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
7172 #endif
72
- { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
73
- { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
74
- { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
75
- { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
73
+#endif
74
+ VCPU_STAT("halt_successful_poll", halt_successful_poll),
75
+ VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
76
+ VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
77
+ VCPU_STAT("halt_wakeup", halt_wakeup),
78
+ VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
79
+ VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
7680 {NULL}
7781 };
7882
....@@ -80,13 +84,13 @@
8084
8185 int kvm_guest_mode_change_trace_reg(void)
8286 {
83
- kvm_trace_guest_mode_change = 1;
87
+ kvm_trace_guest_mode_change = true;
8488 return 0;
8589 }
8690
8791 void kvm_guest_mode_change_trace_unreg(void)
8892 {
89
- kvm_trace_guest_mode_change = 0;
93
+ kvm_trace_guest_mode_change = false;
9094 }
9195
9296 /*
....@@ -118,15 +122,17 @@
118122 kvm_mips_callbacks->hardware_disable();
119123 }
120124
121
-int kvm_arch_hardware_setup(void)
125
+int kvm_arch_hardware_setup(void *opaque)
122126 {
123127 return 0;
124128 }
125129
126
-void kvm_arch_check_processor_compat(void *rtn)
130
+int kvm_arch_check_processor_compat(void *opaque)
127131 {
128
- *(int *)rtn = 0;
132
+ return 0;
129133 }
134
+
135
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
130136
131137 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
132138 {
....@@ -149,16 +155,10 @@
149155 if (!kvm->arch.gpa_mm.pgd)
150156 return -ENOMEM;
151157
152
- return 0;
153
-}
158
+#ifdef CONFIG_CPU_LOONGSON64
159
+ kvm_init_loongson_ipi(kvm);
160
+#endif
154161
155
-bool kvm_arch_has_vcpu_debugfs(void)
156
-{
157
- return false;
158
-}
159
-
160
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
161
-{
162162 return 0;
163163 }
164164
....@@ -168,7 +168,7 @@
168168 struct kvm_vcpu *vcpu;
169169
170170 kvm_for_each_vcpu(i, vcpu, kvm) {
171
- kvm_arch_vcpu_free(vcpu);
171
+ kvm_vcpu_destroy(vcpu);
172172 }
173173
174174 mutex_lock(&kvm->lock);
....@@ -198,12 +198,6 @@
198198 unsigned long arg)
199199 {
200200 return -ENOIOCTLCMD;
201
-}
202
-
203
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
204
- unsigned long npages)
205
-{
206
- return 0;
207201 }
208202
209203 void kvm_arch_flush_shadow_all(struct kvm *kvm)
....@@ -242,7 +236,7 @@
242236
243237 void kvm_arch_commit_memory_region(struct kvm *kvm,
244238 const struct kvm_userspace_memory_region *mem,
245
- const struct kvm_memory_slot *old,
239
+ struct kvm_memory_slot *old,
246240 const struct kvm_memory_slot *new,
247241 enum kvm_mr_change change)
248242 {
....@@ -292,25 +286,42 @@
292286 pr_debug("\tEND(%s)\n", symbol);
293287 }
294288
295
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
289
+/* low level hrtimer wake routine */
290
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
291
+{
292
+ struct kvm_vcpu *vcpu;
293
+
294
+ vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
295
+
296
+ kvm_mips_callbacks->queue_timer_int(vcpu);
297
+
298
+ vcpu->arch.wait = 0;
299
+ rcuwait_wake_up(&vcpu->wait);
300
+
301
+ return kvm_mips_count_timeout(vcpu);
302
+}
303
+
304
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
305
+{
306
+ return 0;
307
+}
308
+
309
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
296310 {
297311 int err, size;
298312 void *gebase, *p, *handler, *refill_start, *refill_end;
299313 int i;
300314
301
- struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
315
+ kvm_debug("kvm @ %p: create cpu %d at %p\n",
316
+ vcpu->kvm, vcpu->vcpu_id, vcpu);
302317
303
- if (!vcpu) {
304
- err = -ENOMEM;
305
- goto out;
306
- }
307
-
308
- err = kvm_vcpu_init(vcpu, kvm, id);
309
-
318
+ err = kvm_mips_callbacks->vcpu_init(vcpu);
310319 if (err)
311
- goto out_free_cpu;
320
+ return err;
312321
313
- kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
322
+ hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
323
+ HRTIMER_MODE_REL);
324
+ vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
314325
315326 /*
316327 * Allocate space for host mode exception handlers that handle
....@@ -325,7 +336,7 @@
325336
326337 if (!gebase) {
327338 err = -ENOMEM;
328
- goto out_uninit_cpu;
339
+ goto out_uninit_vcpu;
329340 }
330341 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
331342 ALIGN(size, PAGE_SIZE), gebase);
....@@ -404,38 +415,33 @@
404415 vcpu->arch.last_sched_cpu = -1;
405416 vcpu->arch.last_exec_cpu = -1;
406417
407
- return vcpu;
418
+ /* Initial guest state */
419
+ err = kvm_mips_callbacks->vcpu_setup(vcpu);
420
+ if (err)
421
+ goto out_free_commpage;
408422
423
+ return 0;
424
+
425
+out_free_commpage:
426
+ kfree(vcpu->arch.kseg0_commpage);
409427 out_free_gebase:
410428 kfree(gebase);
411
-
412
-out_uninit_cpu:
413
- kvm_vcpu_uninit(vcpu);
414
-
415
-out_free_cpu:
416
- kfree(vcpu);
417
-
418
-out:
419
- return ERR_PTR(err);
429
+out_uninit_vcpu:
430
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
431
+ return err;
420432 }
421433
422
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
434
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
423435 {
424436 hrtimer_cancel(&vcpu->arch.comparecount_timer);
425
-
426
- kvm_vcpu_uninit(vcpu);
427437
428438 kvm_mips_dump_stats(vcpu);
429439
430440 kvm_mmu_free_memory_caches(vcpu);
431441 kfree(vcpu->arch.guest_ebase);
432442 kfree(vcpu->arch.kseg0_commpage);
433
- kfree(vcpu);
434
-}
435443
436
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
437
-{
438
- kvm_arch_vcpu_free(vcpu);
444
+ kvm_mips_callbacks->vcpu_uninit(vcpu);
439445 }
440446
441447 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
....@@ -444,7 +450,7 @@
444450 return -ENOIOCTLCMD;
445451 }
446452
447
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
453
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
448454 {
449455 int r = -EINTR;
450456
....@@ -454,11 +460,11 @@
454460
455461 if (vcpu->mmio_needed) {
456462 if (!vcpu->mmio_is_write)
457
- kvm_mips_complete_mmio_load(vcpu, run);
463
+ kvm_mips_complete_mmio_load(vcpu);
458464 vcpu->mmio_needed = 0;
459465 }
460466
461
- if (run->immediate_exit)
467
+ if (vcpu->run->immediate_exit)
462468 goto out;
463469
464470 lose_fpu(1);
....@@ -475,7 +481,7 @@
475481 */
476482 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
477483
478
- r = kvm_mips_callbacks->vcpu_run(run, vcpu);
484
+ r = kvm_mips_callbacks->vcpu_run(vcpu);
479485
480486 trace_kvm_out(vcpu);
481487 guest_exit_irqoff();
....@@ -494,7 +500,10 @@
494500 int intr = (int)irq->irq;
495501 struct kvm_vcpu *dvcpu = NULL;
496502
497
- if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
503
+ if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
504
+ intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
505
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
506
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
498507 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
499508 (int)intr);
500509
....@@ -503,10 +512,10 @@
503512 else
504513 dvcpu = vcpu->kvm->vcpus[irq->cpu];
505514
506
- if (intr == 2 || intr == 3 || intr == 4) {
515
+ if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
507516 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
508517
509
- } else if (intr == -2 || intr == -3 || intr == -4) {
518
+ } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
510519 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
511520 } else {
512521 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
....@@ -516,8 +525,7 @@
516525
517526 dvcpu->arch.wait = 0;
518527
519
- if (swq_has_sleeper(&dvcpu->wq))
520
- swake_up_one(&dvcpu->wq);
528
+ rcuwait_wake_up(&dvcpu->wait);
521529
522530 return 0;
523531 }
....@@ -983,46 +991,16 @@
983991 return r;
984992 }
985993
986
-/**
987
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
988
- * @kvm: kvm instance
989
- * @log: slot id and address to which we copy the log
990
- *
991
- * Steps 1-4 below provide general overview of dirty page logging. See
992
- * kvm_get_dirty_log_protect() function description for additional details.
993
- *
994
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
995
- * always flush the TLB (step 4) even if previous step failed and the dirty
996
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
997
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
998
- * writes will be marked dirty for next log read.
999
- *
1000
- * 1. Take a snapshot of the bit and clear it if needed.
1001
- * 2. Write protect the corresponding page.
1002
- * 3. Copy the snapshot to the userspace.
1003
- * 4. Flush TLB's if needed.
1004
- */
1005
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
994
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1006995 {
1007
- struct kvm_memslots *slots;
1008
- struct kvm_memory_slot *memslot;
1009
- bool is_dirty = false;
1010
- int r;
1011996
1012
- mutex_lock(&kvm->slots_lock);
997
+}
1013998
1014
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
1015
-
1016
- if (is_dirty) {
1017
- slots = kvm_memslots(kvm);
1018
- memslot = id_to_memslot(slots, log->slot);
1019
-
1020
- /* Let implementation handle TLB/GVA invalidation */
1021
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1022
- }
1023
-
1024
- mutex_unlock(&kvm->slots_lock);
1025
- return r;
999
+void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1000
+ struct kvm_memory_slot *memslot)
1001
+{
1002
+ /* Let implementation handle TLB/GVA invalidation */
1003
+ kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
10261004 }
10271005
10281006 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
....@@ -1201,56 +1179,10 @@
12011179 return 0;
12021180 }
12031181
1204
-static void kvm_mips_comparecount_func(unsigned long data)
1205
-{
1206
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1207
-
1208
- kvm_mips_callbacks->queue_timer_int(vcpu);
1209
-
1210
- vcpu->arch.wait = 0;
1211
- if (swq_has_sleeper(&vcpu->wq))
1212
- swake_up_one(&vcpu->wq);
1213
-}
1214
-
1215
-/* low level hrtimer wake routine */
1216
-static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1217
-{
1218
- struct kvm_vcpu *vcpu;
1219
-
1220
- vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1221
- kvm_mips_comparecount_func((unsigned long) vcpu);
1222
- return kvm_mips_count_timeout(vcpu);
1223
-}
1224
-
1225
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1226
-{
1227
- int err;
1228
-
1229
- err = kvm_mips_callbacks->vcpu_init(vcpu);
1230
- if (err)
1231
- return err;
1232
-
1233
- hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1234
- HRTIMER_MODE_REL);
1235
- vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1236
- return 0;
1237
-}
1238
-
1239
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1240
-{
1241
- kvm_mips_callbacks->vcpu_uninit(vcpu);
1242
-}
1243
-
12441182 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
12451183 struct kvm_translation *tr)
12461184 {
12471185 return 0;
1248
-}
1249
-
1250
-/* Initial guest state */
1251
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1252
-{
1253
- return kvm_mips_callbacks->vcpu_setup(vcpu);
12541186 }
12551187
12561188 static void kvm_mips_set_c0_status(void)
....@@ -1267,8 +1199,9 @@
12671199 /*
12681200 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
12691201 */
1270
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1202
+int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
12711203 {
1204
+ struct kvm_run *run = vcpu->run;
12721205 u32 cause = vcpu->arch.host_cp0_cause;
12731206 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
12741207 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
....@@ -1305,7 +1238,7 @@
13051238 * end up causing an exception to be delivered to the Guest
13061239 * Kernel
13071240 */
1308
- er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1241
+ er = kvm_mips_check_privilege(cause, opc, vcpu);
13091242 if (er == EMULATE_PRIV_FAIL) {
13101243 goto skip_emul;
13111244 } else if (er == EMULATE_FAIL) {
....@@ -1454,7 +1387,7 @@
14541387 */
14551388 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
14561389
1457
- kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1390
+ kvm_mips_callbacks->vcpu_reenter(vcpu);
14581391
14591392 /*
14601393 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
....@@ -1701,9 +1634,42 @@
17011634 .notifier_call = kvm_mips_csr_die_notify,
17021635 };
17031636
1637
+static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1638
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
1639
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
1640
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
1641
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
1642
+};
1643
+
1644
+static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1645
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
1646
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
1647
+ [MIPS_EXC_INT_IO_2] = C_IRQ1,
1648
+ [MIPS_EXC_INT_IPI_1] = C_IRQ4,
1649
+};
1650
+
1651
+u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1652
+
1653
+u32 kvm_irq_to_priority(u32 irq)
1654
+{
1655
+ int i;
1656
+
1657
+ for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1658
+ if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1659
+ return i;
1660
+ }
1661
+
1662
+ return MIPS_EXC_MAX;
1663
+}
1664
+
17041665 static int __init kvm_mips_init(void)
17051666 {
17061667 int ret;
1668
+
1669
+ if (cpu_has_mmid) {
1670
+ pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1671
+ return -EOPNOTSUPP;
1672
+ }
17071673
17081674 ret = kvm_mips_entry_setup();
17091675 if (ret)
....@@ -1714,6 +1680,9 @@
17141680 if (ret)
17151681 return ret;
17161682
1683
+ if (boot_cpu_type() == CPU_LOONGSON64)
1684
+ kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1685
+
17171686 register_die_notifier(&kvm_mips_csr_die_notifier);
17181687
17191688 return 0;