forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/arch/mips/kvm/vz.c
....@@ -29,6 +29,9 @@
2929 #include <linux/kvm_host.h>
3030
3131 #include "interrupt.h"
32
+#ifdef CONFIG_CPU_LOONGSON64
33
+#include "loongson_regs.h"
34
+#endif
3235
3336 #include "trace.h"
3437
....@@ -126,6 +129,11 @@
126129 return mask;
127130 }
128131
132
+static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
133
+{
134
+ return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
135
+}
136
+
129137 /*
130138 * VZ optionally allows these additional Config bits to be written by root:
131139 * Config: M, [MT]
....@@ -180,6 +188,12 @@
180188 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
181189 }
182190
191
+static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
192
+{
193
+ return kvm_vz_config6_guest_wrmask(vcpu) |
194
+ LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
195
+}
196
+
183197 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
184198 {
185199 /* VZ guest has already converted gva to gpa */
....@@ -225,23 +239,7 @@
225239 * interrupts are asynchronous to vcpu execution therefore defer guest
226240 * cp0 accesses
227241 */
228
- switch (intr) {
229
- case 2:
230
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
231
- break;
232
-
233
- case 3:
234
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
235
- break;
236
-
237
- case 4:
238
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
239
- break;
240
-
241
- default:
242
- break;
243
- }
244
-
242
+ kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
245243 }
246244
247245 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
....@@ -253,44 +251,22 @@
253251 * interrupts are asynchronous to vcpu execution therefore defer guest
254252 * cp0 accesses
255253 */
256
- switch (intr) {
257
- case -2:
258
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
259
- break;
260
-
261
- case -3:
262
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
263
- break;
264
-
265
- case -4:
266
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
267
- break;
268
-
269
- default:
270
- break;
271
- }
272
-
254
+ kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
273255 }
274
-
275
-static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
276
- [MIPS_EXC_INT_TIMER] = C_IRQ5,
277
- [MIPS_EXC_INT_IO] = C_IRQ0,
278
- [MIPS_EXC_INT_IPI_1] = C_IRQ1,
279
- [MIPS_EXC_INT_IPI_2] = C_IRQ2,
280
-};
281256
282257 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
283258 u32 cause)
284259 {
285260 u32 irq = (priority < MIPS_EXC_MAX) ?
286
- kvm_vz_priority_to_irq[priority] : 0;
261
+ kvm_priority_to_irq[priority] : 0;
287262
288263 switch (priority) {
289264 case MIPS_EXC_INT_TIMER:
290265 set_gc0_cause(C_TI);
291266 break;
292267
293
- case MIPS_EXC_INT_IO:
268
+ case MIPS_EXC_INT_IO_1:
269
+ case MIPS_EXC_INT_IO_2:
294270 case MIPS_EXC_INT_IPI_1:
295271 case MIPS_EXC_INT_IPI_2:
296272 if (cpu_has_guestctl2)
....@@ -311,7 +287,7 @@
311287 u32 cause)
312288 {
313289 u32 irq = (priority < MIPS_EXC_MAX) ?
314
- kvm_vz_priority_to_irq[priority] : 0;
290
+ kvm_priority_to_irq[priority] : 0;
315291
316292 switch (priority) {
317293 case MIPS_EXC_INT_TIMER:
....@@ -329,7 +305,8 @@
329305 }
330306 break;
331307
332
- case MIPS_EXC_INT_IO:
308
+ case MIPS_EXC_INT_IO_1:
309
+ case MIPS_EXC_INT_IO_2:
333310 case MIPS_EXC_INT_IPI_1:
334311 case MIPS_EXC_INT_IPI_2:
335312 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
....@@ -899,7 +876,6 @@
899876
900877 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
901878 u32 *opc, u32 cause,
902
- struct kvm_run *run,
903879 struct kvm_vcpu *vcpu)
904880 {
905881 struct mips_coproc *cop0 = vcpu->arch.cop0;
....@@ -966,7 +942,8 @@
966942 (sel == 2 || /* SRSCtl */
967943 sel == 3)) || /* SRSMap */
968944 (rd == MIPS_CP0_CONFIG &&
969
- (sel == 7)) || /* Config7 */
945
+ (sel == 6 || /* Config6 */
946
+ sel == 7)) || /* Config7 */
970947 (rd == MIPS_CP0_LLADDR &&
971948 (sel == 2) && /* MAARI */
972949 cpu_guest_has_maar &&
....@@ -974,6 +951,11 @@
974951 (rd == MIPS_CP0_ERRCTL &&
975952 (sel == 0))) { /* ErrCtl */
976953 val = cop0->reg[rd][sel];
954
+#ifdef CONFIG_CPU_LOONGSON64
955
+ } else if (rd == MIPS_CP0_DIAG &&
956
+ (sel == 0)) { /* Diag */
957
+ val = cop0->reg[rd][sel];
958
+#endif
977959 } else {
978960 val = 0;
979961 er = EMULATE_FAIL;
....@@ -1036,9 +1018,40 @@
10361018 cpu_guest_has_maar &&
10371019 !cpu_guest_has_dyn_maar) {
10381020 kvm_write_maari(vcpu, val);
1021
+ } else if (rd == MIPS_CP0_CONFIG &&
1022
+ (sel == 6)) {
1023
+ cop0->reg[rd][sel] = (int)val;
10391024 } else if (rd == MIPS_CP0_ERRCTL &&
10401025 (sel == 0)) { /* ErrCtl */
10411026 /* ignore the written value */
1027
+#ifdef CONFIG_CPU_LOONGSON64
1028
+ } else if (rd == MIPS_CP0_DIAG &&
1029
+ (sel == 0)) { /* Diag */
1030
+ unsigned long flags;
1031
+
1032
+ local_irq_save(flags);
1033
+ if (val & LOONGSON_DIAG_BTB) {
1034
+ /* Flush BTB */
1035
+ set_c0_diag(LOONGSON_DIAG_BTB);
1036
+ }
1037
+ if (val & LOONGSON_DIAG_ITLB) {
1038
+ /* Flush ITLB */
1039
+ set_c0_diag(LOONGSON_DIAG_ITLB);
1040
+ }
1041
+ if (val & LOONGSON_DIAG_DTLB) {
1042
+ /* Flush DTLB */
1043
+ set_c0_diag(LOONGSON_DIAG_DTLB);
1044
+ }
1045
+ if (val & LOONGSON_DIAG_VTLB) {
1046
+ /* Flush VTLB */
1047
+ kvm_loongson_clear_guest_vtlb();
1048
+ }
1049
+ if (val & LOONGSON_DIAG_FTLB) {
1050
+ /* Flush FTLB */
1051
+ kvm_loongson_clear_guest_ftlb();
1052
+ }
1053
+ local_irq_restore(flags);
1054
+#endif
10421055 } else {
10431056 er = EMULATE_FAIL;
10441057 }
....@@ -1062,7 +1075,6 @@
10621075
10631076 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
10641077 u32 *opc, u32 cause,
1065
- struct kvm_run *run,
10661078 struct kvm_vcpu *vcpu)
10671079 {
10681080 enum emulation_result er = EMULATE_DONE;
....@@ -1118,7 +1130,7 @@
11181130 break;
11191131 default:
11201132 break;
1121
- };
1133
+ }
11221134
11231135 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
11241136 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
....@@ -1129,12 +1141,81 @@
11291141 return EMULATE_FAIL;
11301142 }
11311143
1144
+#ifdef CONFIG_CPU_LOONGSON64
1145
+static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1146
+ u32 *opc, u32 cause,
1147
+ struct kvm_vcpu *vcpu)
1148
+{
1149
+ unsigned int rs, rd;
1150
+ unsigned int hostcfg;
1151
+ unsigned long curr_pc;
1152
+ enum emulation_result er = EMULATE_DONE;
1153
+
1154
+ /*
1155
+ * Update PC and hold onto current PC in case there is
1156
+ * an error and we want to rollback the PC
1157
+ */
1158
+ curr_pc = vcpu->arch.pc;
1159
+ er = update_pc(vcpu, cause);
1160
+ if (er == EMULATE_FAIL)
1161
+ return er;
1162
+
1163
+ rs = inst.loongson3_lscsr_format.rs;
1164
+ rd = inst.loongson3_lscsr_format.rd;
1165
+ switch (inst.loongson3_lscsr_format.fr) {
1166
+ case 0x8: /* Read CPUCFG */
1167
+ ++vcpu->stat.vz_cpucfg_exits;
1168
+ hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1169
+
1170
+ switch (vcpu->arch.gprs[rs]) {
1171
+ case LOONGSON_CFG0:
1172
+ vcpu->arch.gprs[rd] = 0x14c000;
1173
+ break;
1174
+ case LOONGSON_CFG1:
1175
+ hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1176
+ LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1177
+ LOONGSON_CFG1_SFBP);
1178
+ vcpu->arch.gprs[rd] = hostcfg;
1179
+ break;
1180
+ case LOONGSON_CFG2:
1181
+ hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1182
+ LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1183
+ vcpu->arch.gprs[rd] = hostcfg;
1184
+ break;
1185
+ case LOONGSON_CFG3:
1186
+ vcpu->arch.gprs[rd] = hostcfg;
1187
+ break;
1188
+ default:
1189
+ /* Don't export any other advanced features to guest */
1190
+ vcpu->arch.gprs[rd] = 0;
1191
+ break;
1192
+ }
1193
+ break;
1194
+
1195
+ default:
1196
+ kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1197
+ inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1198
+ er = EMULATE_FAIL;
1199
+ break;
1200
+ }
1201
+
1202
+ /* Rollback PC only if emulation was unsuccessful */
1203
+ if (er == EMULATE_FAIL) {
1204
+ kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1205
+ curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1206
+
1207
+ vcpu->arch.pc = curr_pc;
1208
+ }
1209
+
1210
+ return er;
1211
+}
1212
+#endif
1213
+
11321214 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
11331215 struct kvm_vcpu *vcpu)
11341216 {
11351217 enum emulation_result er = EMULATE_DONE;
11361218 struct kvm_vcpu_arch *arch = &vcpu->arch;
1137
- struct kvm_run *run = vcpu->run;
11381219 union mips_instruction inst;
11391220 int rd, rt, sel;
11401221 int err;
....@@ -1150,12 +1231,17 @@
11501231
11511232 switch (inst.r_format.opcode) {
11521233 case cop0_op:
1153
- er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
1234
+ er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
11541235 break;
11551236 #ifndef CONFIG_CPU_MIPSR6
11561237 case cache_op:
11571238 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1158
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1239
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1240
+ break;
1241
+#endif
1242
+#ifdef CONFIG_CPU_LOONGSON64
1243
+ case lwc2_op:
1244
+ er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
11591245 break;
11601246 #endif
11611247 case spec3_op:
....@@ -1163,7 +1249,7 @@
11631249 #ifdef CONFIG_CPU_MIPSR6
11641250 case cache6_op:
11651251 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1166
- er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1252
+ er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
11671253 break;
11681254 #endif
11691255 case rdhwr_op:
....@@ -1183,7 +1269,7 @@
11831269 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
11841270 KVM_TRACE_HWR(rd, sel), 0);
11851271 goto unknown;
1186
- };
1272
+ }
11871273
11881274 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
11891275 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
....@@ -1192,7 +1278,7 @@
11921278 break;
11931279 default:
11941280 goto unknown;
1195
- };
1281
+ }
11961282 break;
11971283 unknown:
11981284
....@@ -1465,7 +1551,6 @@
14651551 */
14661552 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
14671553 {
1468
- struct kvm_run *run = vcpu->run;
14691554 u32 cause = vcpu->arch.host_cp0_cause;
14701555 enum emulation_result er = EMULATE_FAIL;
14711556 int ret = RESUME_GUEST;
....@@ -1493,7 +1578,7 @@
14931578 break;
14941579
14951580 case EMULATE_FAIL:
1496
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1581
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
14971582 ret = RESUME_HOST;
14981583 break;
14991584
....@@ -1512,8 +1597,6 @@
15121597 */
15131598 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
15141599 {
1515
- struct kvm_run *run = vcpu->run;
1516
-
15171600 /*
15181601 * If MSA not present or not exposed to guest or FR=0, the MSA operation
15191602 * should have been treated as a reserved instruction!
....@@ -1524,7 +1607,7 @@
15241607 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
15251608 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
15261609 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1527
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1610
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
15281611 return RESUME_HOST;
15291612 }
15301613
....@@ -1560,7 +1643,7 @@
15601643 }
15611644
15621645 /* Treat as MMIO */
1563
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1646
+ er = kvm_mips_emulate_load(inst, cause, vcpu);
15641647 if (er == EMULATE_FAIL) {
15651648 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
15661649 opc, badvaddr);
....@@ -1607,7 +1690,7 @@
16071690 }
16081691
16091692 /* Treat as MMIO */
1610
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1693
+ er = kvm_mips_emulate_store(inst, cause, vcpu);
16111694 if (er == EMULATE_FAIL) {
16121695 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
16131696 opc, badvaddr);
....@@ -1652,6 +1735,7 @@
16521735 KVM_REG_MIPS_CP0_CONFIG3,
16531736 KVM_REG_MIPS_CP0_CONFIG4,
16541737 KVM_REG_MIPS_CP0_CONFIG5,
1738
+ KVM_REG_MIPS_CP0_CONFIG6,
16551739 #ifdef CONFIG_64BIT
16561740 KVM_REG_MIPS_CP0_XCONTEXT,
16571741 #endif
....@@ -1706,7 +1790,7 @@
17061790 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
17071791 if (cpu_guest_has_segments)
17081792 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1709
- if (cpu_guest_has_htw)
1793
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte)
17101794 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
17111795 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
17121796 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
....@@ -1755,7 +1839,7 @@
17551839 return -EFAULT;
17561840 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
17571841 }
1758
- if (cpu_guest_has_htw) {
1842
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
17591843 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
17601844 sizeof(kvm_vz_get_one_regs_htw)))
17611845 return -EFAULT;
....@@ -1878,17 +1962,17 @@
18781962 *v = read_gc0_segctl2();
18791963 break;
18801964 case KVM_REG_MIPS_CP0_PWBASE:
1881
- if (!cpu_guest_has_htw)
1965
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
18821966 return -EINVAL;
18831967 *v = read_gc0_pwbase();
18841968 break;
18851969 case KVM_REG_MIPS_CP0_PWFIELD:
1886
- if (!cpu_guest_has_htw)
1970
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
18871971 return -EINVAL;
18881972 *v = read_gc0_pwfield();
18891973 break;
18901974 case KVM_REG_MIPS_CP0_PWSIZE:
1891
- if (!cpu_guest_has_htw)
1975
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
18921976 return -EINVAL;
18931977 *v = read_gc0_pwsize();
18941978 break;
....@@ -1896,7 +1980,7 @@
18961980 *v = (long)read_gc0_wired();
18971981 break;
18981982 case KVM_REG_MIPS_CP0_PWCTL:
1899
- if (!cpu_guest_has_htw)
1983
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
19001984 return -EINVAL;
19011985 *v = read_gc0_pwctl();
19021986 break;
....@@ -1946,7 +2030,7 @@
19462030 default:
19472031 *v = (long)kvm_read_c0_guest_prid(cop0);
19482032 break;
1949
- };
2033
+ }
19502034 break;
19512035 case KVM_REG_MIPS_CP0_EBASE:
19522036 *v = kvm_vz_read_gc0_ebase();
....@@ -1978,6 +2062,9 @@
19782062 if (!cpu_guest_has_conf5)
19792063 return -EINVAL;
19802064 *v = read_gc0_config5();
2065
+ break;
2066
+ case KVM_REG_MIPS_CP0_CONFIG6:
2067
+ *v = kvm_read_sw_gc0_config6(cop0);
19812068 break;
19822069 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
19832070 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
....@@ -2101,17 +2188,17 @@
21012188 write_gc0_segctl2(v);
21022189 break;
21032190 case KVM_REG_MIPS_CP0_PWBASE:
2104
- if (!cpu_guest_has_htw)
2191
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
21052192 return -EINVAL;
21062193 write_gc0_pwbase(v);
21072194 break;
21082195 case KVM_REG_MIPS_CP0_PWFIELD:
2109
- if (!cpu_guest_has_htw)
2196
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
21102197 return -EINVAL;
21112198 write_gc0_pwfield(v);
21122199 break;
21132200 case KVM_REG_MIPS_CP0_PWSIZE:
2114
- if (!cpu_guest_has_htw)
2201
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
21152202 return -EINVAL;
21162203 write_gc0_pwsize(v);
21172204 break;
....@@ -2119,7 +2206,7 @@
21192206 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
21202207 break;
21212208 case KVM_REG_MIPS_CP0_PWCTL:
2122
- if (!cpu_guest_has_htw)
2209
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
21232210 return -EINVAL;
21242211 write_gc0_pwctl(v);
21252212 break;
....@@ -2185,7 +2272,7 @@
21852272 default:
21862273 kvm_write_c0_guest_prid(cop0, v);
21872274 break;
2188
- };
2275
+ }
21892276 break;
21902277 case KVM_REG_MIPS_CP0_EBASE:
21912278 kvm_vz_write_gc0_ebase(v);
....@@ -2246,6 +2333,14 @@
22462333 if (change) {
22472334 v = cur ^ change;
22482335 write_gc0_config5(v);
2336
+ }
2337
+ break;
2338
+ case KVM_REG_MIPS_CP0_CONFIG6:
2339
+ cur = kvm_read_sw_gc0_config6(cop0);
2340
+ change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2341
+ if (change) {
2342
+ v = cur ^ change;
2343
+ kvm_write_sw_gc0_config6(cop0, (int)v);
22492344 }
22502345 break;
22512346 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
....@@ -2454,10 +2549,10 @@
24542549 * Root ASID dealiases guest GPA mappings in the root TLB.
24552550 * Allocate new root ASID if needed.
24562551 */
2457
- if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
2458
- || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
2459
- asid_version_mask(cpu))
2460
- get_new_mmu_context(gpa_mm, cpu);
2552
+ if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2553
+ get_new_mmu_context(gpa_mm);
2554
+ else
2555
+ check_mmu_context(gpa_mm);
24612556 }
24622557 }
24632558
....@@ -2580,7 +2675,7 @@
25802675 }
25812676
25822677 /* restore HTW registers */
2583
- if (cpu_guest_has_htw) {
2678
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
25842679 kvm_restore_gc0_pwbase(cop0);
25852680 kvm_restore_gc0_pwfield(cop0);
25862681 kvm_restore_gc0_pwsize(cop0);
....@@ -2597,7 +2692,7 @@
25972692 * prevents a SC on the next VCPU from succeeding by matching a LL on
25982693 * the previous VCPU.
25992694 */
2600
- if (cpu_guest_has_rw_llb)
2695
+ if (vcpu->kvm->created_vcpus > 1)
26012696 write_gc0_lladdr(0);
26022697
26032698 return 0;
....@@ -2685,8 +2780,8 @@
26852780 }
26862781
26872782 /* save HTW registers if enabled in guest */
2688
- if (cpu_guest_has_htw &&
2689
- kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
2783
+ if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2784
+ kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
26902785 kvm_save_gc0_pwbase(cop0);
26912786 kvm_save_gc0_pwfield(cop0);
26922787 kvm_save_gc0_pwsize(cop0);
....@@ -2853,8 +2948,12 @@
28532948 write_c0_guestctl0(MIPS_GCTL0_CP0 |
28542949 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
28552950 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2856
- if (cpu_has_guestctl0ext)
2857
- set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2951
+ if (cpu_has_guestctl0ext) {
2952
+ if (current_cpu_type() != CPU_LOONGSON64)
2953
+ set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2954
+ else
2955
+ clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2956
+ }
28582957
28592958 if (cpu_has_guestid) {
28602959 write_c0_guestctl1(0);
....@@ -2870,6 +2969,12 @@
28702969 /* clear any pending injected virtual guest interrupts */
28712970 if (cpu_has_guestctl2)
28722971 clear_c0_guestctl2(0x3f << 10);
2972
+
2973
+#ifdef CONFIG_CPU_LOONGSON64
2974
+ /* Control guest CCA attribute */
2975
+ if (cpu_has_csr())
2976
+ csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2977
+#endif
28732978
28742979 return 0;
28752980 }
....@@ -2927,6 +3032,9 @@
29273032 r = 2;
29283033 break;
29293034 #endif
3035
+ case KVM_CAP_IOEVENTFD:
3036
+ r = 1;
3037
+ break;
29303038 default:
29313039 r = 0;
29323040 break;
....@@ -2980,7 +3088,7 @@
29803088 */
29813089
29823090 /* PageGrain */
2983
- if (cpu_has_mips_r6)
3091
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
29843092 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
29853093 /* Wired */
29863094 if (cpu_has_mips_r6)
....@@ -2988,7 +3096,7 @@
29883096 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
29893097 /* Status */
29903098 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
2991
- if (cpu_has_mips_r6)
3099
+ if (cpu_has_mips_r5 || cpu_has_mips_r6)
29923100 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
29933101 /* IntCtl */
29943102 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
....@@ -3086,7 +3194,7 @@
30863194 }
30873195
30883196 /* reset HTW registers */
3089
- if (cpu_guest_has_htw && cpu_has_mips_r6) {
3197
+ if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
30903198 /* PWField */
30913199 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
30923200 /* PWSize */
....@@ -3129,7 +3237,7 @@
31293237 kvm_vz_flush_shadow_all(kvm);
31303238 }
31313239
3132
-static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
3240
+static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
31333241 {
31343242 int cpu = smp_processor_id();
31353243 int preserve_guest_tlb;
....@@ -3145,7 +3253,7 @@
31453253 kvm_vz_vcpu_load_wired(vcpu);
31463254 }
31473255
3148
-static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
3256
+static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
31493257 {
31503258 int cpu = smp_processor_id();
31513259 int r;
....@@ -3158,7 +3266,7 @@
31583266 kvm_vz_vcpu_load_tlb(vcpu, cpu);
31593267 kvm_vz_vcpu_load_wired(vcpu);
31603268
3161
- r = vcpu->arch.vcpu_run(run, vcpu);
3269
+ r = vcpu->arch.vcpu_run(vcpu);
31623270
31633271 kvm_vz_vcpu_save_wired(vcpu);
31643272