forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/mips/include/asm/kvm_host.h
....@@ -23,6 +23,8 @@
2323 #include <asm/inst.h>
2424 #include <asm/mipsregs.h>
2525
26
+#include <kvm/iodev.h>
27
+
2628 /* MIPS KVM register ids */
2729 #define MIPS_CP0_32(_R, _S) \
2830 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
....@@ -66,9 +68,11 @@
6668 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
6769 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
6870 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
71
+#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
6972 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
7073 #define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
7174 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
75
+#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
7276 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
7377 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
7478 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
....@@ -78,8 +82,8 @@
7882 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
7983
8084
81
-#define KVM_MAX_VCPUS 8
82
-#define KVM_USER_MEM_SLOTS 8
85
+#define KVM_MAX_VCPUS 16
86
+#define KVM_USER_MEM_SLOTS 16
8387 /* memory slots that does not exposed to userspace */
8488 #define KVM_PRIVATE_MEM_SLOTS 0
8589
....@@ -171,9 +175,14 @@
171175 u64 vz_ghfc_exits;
172176 u64 vz_gpa_exits;
173177 u64 vz_resvd_exits;
178
+#ifdef CONFIG_CPU_LOONGSON64
179
+ u64 vz_cpucfg_exits;
180
+#endif
174181 #endif
175182 u64 halt_successful_poll;
176183 u64 halt_attempted_poll;
184
+ u64 halt_poll_success_ns;
185
+ u64 halt_poll_fail_ns;
177186 u64 halt_poll_invalid;
178187 u64 halt_wakeup;
179188 };
....@@ -181,11 +190,39 @@
181190 struct kvm_arch_memory_slot {
182191 };
183192
193
+#ifdef CONFIG_CPU_LOONGSON64
194
+struct ipi_state {
195
+ uint32_t status;
196
+ uint32_t en;
197
+ uint32_t set;
198
+ uint32_t clear;
199
+ uint64_t buf[4];
200
+};
201
+
202
+struct loongson_kvm_ipi;
203
+
204
+struct ipi_io_device {
205
+ int node_id;
206
+ struct loongson_kvm_ipi *ipi;
207
+ struct kvm_io_device device;
208
+};
209
+
210
+struct loongson_kvm_ipi {
211
+ spinlock_t lock;
212
+ struct kvm *kvm;
213
+ struct ipi_state ipistate[16];
214
+ struct ipi_io_device dev_ipi[4];
215
+};
216
+#endif
217
+
184218 struct kvm_arch {
185219 /* Guest physical mm */
186220 struct mm_struct gpa_mm;
187221 /* Mask of CPUs needing GPA ASID flush */
188222 cpumask_t asid_flush_mask;
223
+#ifdef CONFIG_CPU_LOONGSON64
224
+ struct loongson_kvm_ipi ipi;
225
+#endif
189226 };
190227
191228 #define N_MIPS_COPROC_REGS 32
....@@ -223,6 +260,7 @@
223260 #define MIPS_CP0_WATCH_LO 18
224261 #define MIPS_CP0_WATCH_HI 19
225262 #define MIPS_CP0_TLB_XCONTEXT 20
263
+#define MIPS_CP0_DIAG 22
226264 #define MIPS_CP0_ECC 26
227265 #define MIPS_CP0_CACHE_ERR 27
228266 #define MIPS_CP0_TAG_LO 28
....@@ -297,24 +335,13 @@
297335 long tlb_lo[2];
298336 };
299337
300
-#define KVM_NR_MEM_OBJS 4
301
-
302
-/*
303
- * We don't want allocation failures within the mmu code, so we preallocate
304
- * enough memory for a single page fault in a cache.
305
- */
306
-struct kvm_mmu_memory_cache {
307
- int nobjs;
308
- void *objects[KVM_NR_MEM_OBJS];
309
-};
310
-
311338 #define KVM_MIPS_AUX_FPU 0x1
312339 #define KVM_MIPS_AUX_MSA 0x2
313340
314341 #define KVM_MIPS_GUEST_TLB_SIZE 64
315342 struct kvm_vcpu_arch {
316343 void *guest_ebase;
317
- int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
344
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
318345
319346 /* Host registers preserved across guest mode execution */
320347 unsigned long host_stack;
....@@ -415,11 +442,12 @@
415442 unsigned long temp;
416443 do {
417444 __asm__ __volatile__(
445
+ " .set push \n"
418446 " .set "MIPS_ISA_ARCH_LEVEL" \n"
419447 " " __LL "%0, %1 \n"
420448 " or %0, %2 \n"
421449 " " __SC "%0, %1 \n"
422
- " .set mips0 \n"
450
+ " .set pop \n"
423451 : "=&r" (temp), "+m" (*reg)
424452 : "r" (val));
425453 } while (unlikely(!temp));
....@@ -431,11 +459,12 @@
431459 unsigned long temp;
432460 do {
433461 __asm__ __volatile__(
462
+ " .set push \n"
434463 " .set "MIPS_ISA_ARCH_LEVEL" \n"
435464 " " __LL "%0, %1 \n"
436465 " and %0, %2 \n"
437466 " " __SC "%0, %1 \n"
438
- " .set mips0 \n"
467
+ " .set pop \n"
439468 : "=&r" (temp), "+m" (*reg)
440469 : "r" (~val));
441470 } while (unlikely(!temp));
....@@ -448,12 +477,13 @@
448477 unsigned long temp;
449478 do {
450479 __asm__ __volatile__(
480
+ " .set push \n"
451481 " .set "MIPS_ISA_ARCH_LEVEL" \n"
452482 " " __LL "%0, %1 \n"
453483 " and %0, %2 \n"
454484 " or %0, %3 \n"
455485 " " __SC "%0, %1 \n"
456
- " .set mips0 \n"
486
+ " .set pop \n"
457487 : "=&r" (temp), "+m" (*reg)
458488 : "r" (~change), "r" (val & change));
459489 } while (unlikely(!temp));
....@@ -813,8 +843,8 @@
813843 const struct kvm_one_reg *reg, s64 v);
814844 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
815845 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
816
- int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
817
- void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
846
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
847
+ void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
818848 };
819849 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
820850 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
....@@ -822,7 +852,7 @@
822852 /* Debug: dump vcpu state */
823853 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
824854
825
-extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
855
+extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
826856
827857 /* Building of entry/exception code */
828858 int kvm_mips_entry_setup(void);
....@@ -869,7 +899,6 @@
869899
870900 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
871901 u32 *opc,
872
- struct kvm_run *run,
873902 struct kvm_vcpu *vcpu,
874903 bool write_fault);
875904
....@@ -891,6 +920,10 @@
891920 unsigned int count);
892921 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
893922 unsigned int count);
923
+#ifdef CONFIG_CPU_LOONGSON64
924
+void kvm_loongson_clear_guest_vtlb(void);
925
+void kvm_loongson_clear_guest_ftlb(void);
926
+#endif
894927 #endif
895928
896929 void kvm_mips_suspend_mm(int cpu);
....@@ -936,8 +969,8 @@
936969
937970 #define KVM_ARCH_WANT_MMU_NOTIFIER
938971 int kvm_unmap_hva_range(struct kvm *kvm,
939
- unsigned long start, unsigned long end, bool blockable);
940
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
972
+ unsigned long start, unsigned long end, unsigned flags);
973
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
941974 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
942975 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
943976
....@@ -976,83 +1009,67 @@
9761009
9771010 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
9781011 u32 *opc,
979
- struct kvm_run *run,
9801012 struct kvm_vcpu *vcpu);
9811013
9821014 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
9831015
9841016 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
9851017 u32 *opc,
986
- struct kvm_run *run,
9871018 struct kvm_vcpu *vcpu);
9881019
9891020 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
9901021 u32 *opc,
991
- struct kvm_run *run,
9921022 struct kvm_vcpu *vcpu);
9931023
9941024 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
9951025 u32 *opc,
996
- struct kvm_run *run,
9971026 struct kvm_vcpu *vcpu);
9981027
9991028 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
10001029 u32 *opc,
1001
- struct kvm_run *run,
10021030 struct kvm_vcpu *vcpu);
10031031
10041032 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
10051033 u32 *opc,
1006
- struct kvm_run *run,
10071034 struct kvm_vcpu *vcpu);
10081035
10091036 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
10101037 u32 *opc,
1011
- struct kvm_run *run,
10121038 struct kvm_vcpu *vcpu);
10131039
10141040 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
10151041 u32 *opc,
1016
- struct kvm_run *run,
10171042 struct kvm_vcpu *vcpu);
10181043
10191044 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
10201045 u32 *opc,
1021
- struct kvm_run *run,
10221046 struct kvm_vcpu *vcpu);
10231047
10241048 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
10251049 u32 *opc,
1026
- struct kvm_run *run,
10271050 struct kvm_vcpu *vcpu);
10281051
10291052 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
10301053 u32 *opc,
1031
- struct kvm_run *run,
10321054 struct kvm_vcpu *vcpu);
10331055
10341056 extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
10351057 u32 *opc,
1036
- struct kvm_run *run,
10371058 struct kvm_vcpu *vcpu);
10381059
10391060 extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
10401061 u32 *opc,
1041
- struct kvm_run *run,
10421062 struct kvm_vcpu *vcpu);
10431063
10441064 extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
10451065 u32 *opc,
1046
- struct kvm_run *run,
10471066 struct kvm_vcpu *vcpu);
10481067
10491068 extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
10501069 u32 *opc,
1051
- struct kvm_run *run,
10521070 struct kvm_vcpu *vcpu);
10531071
1054
-extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
1055
- struct kvm_run *run);
1072
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
10561073
10571074 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
10581075 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
....@@ -1081,26 +1098,21 @@
10811098
10821099 enum emulation_result kvm_mips_check_privilege(u32 cause,
10831100 u32 *opc,
1084
- struct kvm_run *run,
10851101 struct kvm_vcpu *vcpu);
10861102
10871103 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
10881104 u32 *opc,
10891105 u32 cause,
1090
- struct kvm_run *run,
10911106 struct kvm_vcpu *vcpu);
10921107 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
10931108 u32 *opc,
10941109 u32 cause,
1095
- struct kvm_run *run,
10961110 struct kvm_vcpu *vcpu);
10971111 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
10981112 u32 cause,
1099
- struct kvm_run *run,
11001113 struct kvm_vcpu *vcpu);
11011114 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
11021115 u32 cause,
1103
- struct kvm_run *run,
11041116 struct kvm_vcpu *vcpu);
11051117
11061118 /* COP0 */
....@@ -1130,11 +1142,13 @@
11301142 /* Misc */
11311143 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
11321144 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
1145
+extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1146
+ struct kvm_mips_interrupt *irq);
11331147
11341148 static inline void kvm_arch_hardware_unsetup(void) {}
11351149 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
11361150 static inline void kvm_arch_free_memslot(struct kvm *kvm,
1137
- struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
1151
+ struct kvm_memory_slot *slot) {}
11381152 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
11391153 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
11401154 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}