| .. | .. |
|---|
| 29 | 29 | #include <linux/kvm_host.h> |
|---|
| 30 | 30 | |
|---|
| 31 | 31 | #include "interrupt.h" |
|---|
| 32 | +#ifdef CONFIG_CPU_LOONGSON64 |
|---|
| 33 | +#include "loongson_regs.h" |
|---|
| 34 | +#endif |
|---|
| 32 | 35 | |
|---|
| 33 | 36 | #include "trace.h" |
|---|
| 34 | 37 | |
|---|
| .. | .. |
|---|
| 126 | 129 | return mask; |
|---|
| 127 | 130 | } |
|---|
| 128 | 131 | |
|---|
| 132 | +static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu) |
|---|
| 133 | +{ |
|---|
| 134 | + return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER; |
|---|
| 135 | +} |
|---|
| 136 | + |
|---|
| 129 | 137 | /* |
|---|
| 130 | 138 | * VZ optionally allows these additional Config bits to be written by root: |
|---|
| 131 | 139 | * Config: M, [MT] |
|---|
| .. | .. |
|---|
| 180 | 188 | return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP; |
|---|
| 181 | 189 | } |
|---|
| 182 | 190 | |
|---|
| 191 | +static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu) |
|---|
| 192 | +{ |
|---|
| 193 | + return kvm_vz_config6_guest_wrmask(vcpu) | |
|---|
| 194 | + LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS; |
|---|
| 195 | +} |
|---|
| 196 | + |
|---|
| 183 | 197 | static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) |
|---|
| 184 | 198 | { |
|---|
| 185 | 199 | /* VZ guest has already converted gva to gpa */ |
|---|
| .. | .. |
|---|
| 225 | 239 | * interrupts are asynchronous to vcpu execution therefore defer guest |
|---|
| 226 | 240 | * cp0 accesses |
|---|
| 227 | 241 | */ |
|---|
| 228 | | - switch (intr) { |
|---|
| 229 | | - case 2: |
|---|
| 230 | | - kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO); |
|---|
| 231 | | - break; |
|---|
| 232 | | - |
|---|
| 233 | | - case 3: |
|---|
| 234 | | - kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); |
|---|
| 235 | | - break; |
|---|
| 236 | | - |
|---|
| 237 | | - case 4: |
|---|
| 238 | | - kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); |
|---|
| 239 | | - break; |
|---|
| 240 | | - |
|---|
| 241 | | - default: |
|---|
| 242 | | - break; |
|---|
| 243 | | - } |
|---|
| 244 | | - |
|---|
| 242 | + kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr)); |
|---|
| 245 | 243 | } |
|---|
| 246 | 244 | |
|---|
| 247 | 245 | static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, |
|---|
| .. | .. |
|---|
| 253 | 251 | * interrupts are asynchronous to vcpu execution therefore defer guest |
|---|
| 254 | 252 | * cp0 accesses |
|---|
| 255 | 253 | */ |
|---|
| 256 | | - switch (intr) { |
|---|
| 257 | | - case -2: |
|---|
| 258 | | - kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO); |
|---|
| 259 | | - break; |
|---|
| 260 | | - |
|---|
| 261 | | - case -3: |
|---|
| 262 | | - kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); |
|---|
| 263 | | - break; |
|---|
| 264 | | - |
|---|
| 265 | | - case -4: |
|---|
| 266 | | - kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); |
|---|
| 267 | | - break; |
|---|
| 268 | | - |
|---|
| 269 | | - default: |
|---|
| 270 | | - break; |
|---|
| 271 | | - } |
|---|
| 272 | | - |
|---|
| 254 | + kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); |
|---|
| 273 | 255 | } |
|---|
| 274 | | - |
|---|
| 275 | | -static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = { |
|---|
| 276 | | - [MIPS_EXC_INT_TIMER] = C_IRQ5, |
|---|
| 277 | | - [MIPS_EXC_INT_IO] = C_IRQ0, |
|---|
| 278 | | - [MIPS_EXC_INT_IPI_1] = C_IRQ1, |
|---|
| 279 | | - [MIPS_EXC_INT_IPI_2] = C_IRQ2, |
|---|
| 280 | | -}; |
|---|
| 281 | 256 | |
|---|
| 282 | 257 | static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, |
|---|
| 283 | 258 | u32 cause) |
|---|
| 284 | 259 | { |
|---|
| 285 | 260 | u32 irq = (priority < MIPS_EXC_MAX) ? |
|---|
| 286 | | - kvm_vz_priority_to_irq[priority] : 0; |
|---|
| 261 | + kvm_priority_to_irq[priority] : 0; |
|---|
| 287 | 262 | |
|---|
| 288 | 263 | switch (priority) { |
|---|
| 289 | 264 | case MIPS_EXC_INT_TIMER: |
|---|
| 290 | 265 | set_gc0_cause(C_TI); |
|---|
| 291 | 266 | break; |
|---|
| 292 | 267 | |
|---|
| 293 | | - case MIPS_EXC_INT_IO: |
|---|
| 268 | + case MIPS_EXC_INT_IO_1: |
|---|
| 269 | + case MIPS_EXC_INT_IO_2: |
|---|
| 294 | 270 | case MIPS_EXC_INT_IPI_1: |
|---|
| 295 | 271 | case MIPS_EXC_INT_IPI_2: |
|---|
| 296 | 272 | if (cpu_has_guestctl2) |
|---|
| .. | .. |
|---|
| 311 | 287 | u32 cause) |
|---|
| 312 | 288 | { |
|---|
| 313 | 289 | u32 irq = (priority < MIPS_EXC_MAX) ? |
|---|
| 314 | | - kvm_vz_priority_to_irq[priority] : 0; |
|---|
| 290 | + kvm_priority_to_irq[priority] : 0; |
|---|
| 315 | 291 | |
|---|
| 316 | 292 | switch (priority) { |
|---|
| 317 | 293 | case MIPS_EXC_INT_TIMER: |
|---|
| .. | .. |
|---|
| 329 | 305 | } |
|---|
| 330 | 306 | break; |
|---|
| 331 | 307 | |
|---|
| 332 | | - case MIPS_EXC_INT_IO: |
|---|
| 308 | + case MIPS_EXC_INT_IO_1: |
|---|
| 309 | + case MIPS_EXC_INT_IO_2: |
|---|
| 333 | 310 | case MIPS_EXC_INT_IPI_1: |
|---|
| 334 | 311 | case MIPS_EXC_INT_IPI_2: |
|---|
| 335 | 312 | /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ |
|---|
| .. | .. |
|---|
| 899 | 876 | |
|---|
| 900 | 877 | static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, |
|---|
| 901 | 878 | u32 *opc, u32 cause, |
|---|
| 902 | | - struct kvm_run *run, |
|---|
| 903 | 879 | struct kvm_vcpu *vcpu) |
|---|
| 904 | 880 | { |
|---|
| 905 | 881 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
|---|
| .. | .. |
|---|
| 966 | 942 | (sel == 2 || /* SRSCtl */ |
|---|
| 967 | 943 | sel == 3)) || /* SRSMap */ |
|---|
| 968 | 944 | (rd == MIPS_CP0_CONFIG && |
|---|
| 969 | | - (sel == 7)) || /* Config7 */ |
|---|
| 945 | + (sel == 6 || /* Config6 */ |
|---|
| 946 | + sel == 7)) || /* Config7 */ |
|---|
| 970 | 947 | (rd == MIPS_CP0_LLADDR && |
|---|
| 971 | 948 | (sel == 2) && /* MAARI */ |
|---|
| 972 | 949 | cpu_guest_has_maar && |
|---|
| .. | .. |
|---|
| 974 | 951 | (rd == MIPS_CP0_ERRCTL && |
|---|
| 975 | 952 | (sel == 0))) { /* ErrCtl */ |
|---|
| 976 | 953 | val = cop0->reg[rd][sel]; |
|---|
| 954 | +#ifdef CONFIG_CPU_LOONGSON64 |
|---|
| 955 | + } else if (rd == MIPS_CP0_DIAG && |
|---|
| 956 | + (sel == 0)) { /* Diag */ |
|---|
| 957 | + val = cop0->reg[rd][sel]; |
|---|
| 958 | +#endif |
|---|
| 977 | 959 | } else { |
|---|
| 978 | 960 | val = 0; |
|---|
| 979 | 961 | er = EMULATE_FAIL; |
|---|
| .. | .. |
|---|
| 1036 | 1018 | cpu_guest_has_maar && |
|---|
| 1037 | 1019 | !cpu_guest_has_dyn_maar) { |
|---|
| 1038 | 1020 | kvm_write_maari(vcpu, val); |
|---|
| 1021 | + } else if (rd == MIPS_CP0_CONFIG && |
|---|
| 1022 | + (sel == 6)) { |
|---|
| 1023 | + cop0->reg[rd][sel] = (int)val; |
|---|
| 1039 | 1024 | } else if (rd == MIPS_CP0_ERRCTL && |
|---|
| 1040 | 1025 | (sel == 0)) { /* ErrCtl */ |
|---|
| 1041 | 1026 | /* ignore the written value */ |
|---|
| 1027 | +#ifdef CONFIG_CPU_LOONGSON64 |
|---|
| 1028 | + } else if (rd == MIPS_CP0_DIAG && |
|---|
| 1029 | + (sel == 0)) { /* Diag */ |
|---|
| 1030 | + unsigned long flags; |
|---|
| 1031 | + |
|---|
| 1032 | + local_irq_save(flags); |
|---|
| 1033 | + if (val & LOONGSON_DIAG_BTB) { |
|---|
| 1034 | + /* Flush BTB */ |
|---|
| 1035 | + set_c0_diag(LOONGSON_DIAG_BTB); |
|---|
| 1036 | + } |
|---|
| 1037 | + if (val & LOONGSON_DIAG_ITLB) { |
|---|
| 1038 | + /* Flush ITLB */ |
|---|
| 1039 | + set_c0_diag(LOONGSON_DIAG_ITLB); |
|---|
| 1040 | + } |
|---|
| 1041 | + if (val & LOONGSON_DIAG_DTLB) { |
|---|
| 1042 | + /* Flush DTLB */ |
|---|
| 1043 | + set_c0_diag(LOONGSON_DIAG_DTLB); |
|---|
| 1044 | + } |
|---|
| 1045 | + if (val & LOONGSON_DIAG_VTLB) { |
|---|
| 1046 | + /* Flush VTLB */ |
|---|
| 1047 | + kvm_loongson_clear_guest_vtlb(); |
|---|
| 1048 | + } |
|---|
| 1049 | + if (val & LOONGSON_DIAG_FTLB) { |
|---|
| 1050 | + /* Flush FTLB */ |
|---|
| 1051 | + kvm_loongson_clear_guest_ftlb(); |
|---|
| 1052 | + } |
|---|
| 1053 | + local_irq_restore(flags); |
|---|
| 1054 | +#endif |
|---|
| 1042 | 1055 | } else { |
|---|
| 1043 | 1056 | er = EMULATE_FAIL; |
|---|
| 1044 | 1057 | } |
|---|
| .. | .. |
|---|
| 1062 | 1075 | |
|---|
| 1063 | 1076 | static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, |
|---|
| 1064 | 1077 | u32 *opc, u32 cause, |
|---|
| 1065 | | - struct kvm_run *run, |
|---|
| 1066 | 1078 | struct kvm_vcpu *vcpu) |
|---|
| 1067 | 1079 | { |
|---|
| 1068 | 1080 | enum emulation_result er = EMULATE_DONE; |
|---|
| .. | .. |
|---|
| 1118 | 1130 | break; |
|---|
| 1119 | 1131 | default: |
|---|
| 1120 | 1132 | break; |
|---|
| 1121 | | - }; |
|---|
| 1133 | + } |
|---|
| 1122 | 1134 | |
|---|
| 1123 | 1135 | kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
|---|
| 1124 | 1136 | curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], |
|---|
| .. | .. |
|---|
| 1129 | 1141 | return EMULATE_FAIL; |
|---|
| 1130 | 1142 | } |
|---|
| 1131 | 1143 | |
|---|
| 1144 | +#ifdef CONFIG_CPU_LOONGSON64 |
|---|
| 1145 | +static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst, |
|---|
| 1146 | + u32 *opc, u32 cause, |
|---|
| 1147 | + struct kvm_vcpu *vcpu) |
|---|
| 1148 | +{ |
|---|
| 1149 | + unsigned int rs, rd; |
|---|
| 1150 | + unsigned int hostcfg; |
|---|
| 1151 | + unsigned long curr_pc; |
|---|
| 1152 | + enum emulation_result er = EMULATE_DONE; |
|---|
| 1153 | + |
|---|
| 1154 | + /* |
|---|
| 1155 | + * Update PC and hold onto current PC in case there is |
|---|
| 1156 | + * an error and we want to rollback the PC |
|---|
| 1157 | + */ |
|---|
| 1158 | + curr_pc = vcpu->arch.pc; |
|---|
| 1159 | + er = update_pc(vcpu, cause); |
|---|
| 1160 | + if (er == EMULATE_FAIL) |
|---|
| 1161 | + return er; |
|---|
| 1162 | + |
|---|
| 1163 | + rs = inst.loongson3_lscsr_format.rs; |
|---|
| 1164 | + rd = inst.loongson3_lscsr_format.rd; |
|---|
| 1165 | + switch (inst.loongson3_lscsr_format.fr) { |
|---|
| 1166 | + case 0x8: /* Read CPUCFG */ |
|---|
| 1167 | + ++vcpu->stat.vz_cpucfg_exits; |
|---|
| 1168 | + hostcfg = read_cpucfg(vcpu->arch.gprs[rs]); |
|---|
| 1169 | + |
|---|
| 1170 | + switch (vcpu->arch.gprs[rs]) { |
|---|
| 1171 | + case LOONGSON_CFG0: |
|---|
| 1172 | + vcpu->arch.gprs[rd] = 0x14c000; |
|---|
| 1173 | + break; |
|---|
| 1174 | + case LOONGSON_CFG1: |
|---|
| 1175 | + hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI | |
|---|
| 1176 | + LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 | |
|---|
| 1177 | + LOONGSON_CFG1_SFBP); |
|---|
| 1178 | + vcpu->arch.gprs[rd] = hostcfg; |
|---|
| 1179 | + break; |
|---|
| 1180 | + case LOONGSON_CFG2: |
|---|
| 1181 | + hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 | |
|---|
| 1182 | + LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW); |
|---|
| 1183 | + vcpu->arch.gprs[rd] = hostcfg; |
|---|
| 1184 | + break; |
|---|
| 1185 | + case LOONGSON_CFG3: |
|---|
| 1186 | + vcpu->arch.gprs[rd] = hostcfg; |
|---|
| 1187 | + break; |
|---|
| 1188 | + default: |
|---|
| 1189 | + /* Don't export any other advanced features to guest */ |
|---|
| 1190 | + vcpu->arch.gprs[rd] = 0; |
|---|
| 1191 | + break; |
|---|
| 1192 | + } |
|---|
| 1193 | + break; |
|---|
| 1194 | + |
|---|
| 1195 | + default: |
|---|
| 1196 | + kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n", |
|---|
| 1197 | + inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc); |
|---|
| 1198 | + er = EMULATE_FAIL; |
|---|
| 1199 | + break; |
|---|
| 1200 | + } |
|---|
| 1201 | + |
|---|
| 1202 | + /* Rollback PC only if emulation was unsuccessful */ |
|---|
| 1203 | + if (er == EMULATE_FAIL) { |
|---|
| 1204 | + kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n", |
|---|
| 1205 | + curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr); |
|---|
| 1206 | + |
|---|
| 1207 | + vcpu->arch.pc = curr_pc; |
|---|
| 1208 | + } |
|---|
| 1209 | + |
|---|
| 1210 | + return er; |
|---|
| 1211 | +} |
|---|
| 1212 | +#endif |
|---|
| 1213 | + |
|---|
| 1132 | 1214 | static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, |
|---|
| 1133 | 1215 | struct kvm_vcpu *vcpu) |
|---|
| 1134 | 1216 | { |
|---|
| 1135 | 1217 | enum emulation_result er = EMULATE_DONE; |
|---|
| 1136 | 1218 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
|---|
| 1137 | | - struct kvm_run *run = vcpu->run; |
|---|
| 1138 | 1219 | union mips_instruction inst; |
|---|
| 1139 | 1220 | int rd, rt, sel; |
|---|
| 1140 | 1221 | int err; |
|---|
| .. | .. |
|---|
| 1150 | 1231 | |
|---|
| 1151 | 1232 | switch (inst.r_format.opcode) { |
|---|
| 1152 | 1233 | case cop0_op: |
|---|
| 1153 | | - er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); |
|---|
| 1234 | + er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu); |
|---|
| 1154 | 1235 | break; |
|---|
| 1155 | 1236 | #ifndef CONFIG_CPU_MIPSR6 |
|---|
| 1156 | 1237 | case cache_op: |
|---|
| 1157 | 1238 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); |
|---|
| 1158 | | - er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); |
|---|
| 1239 | + er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu); |
|---|
| 1240 | + break; |
|---|
| 1241 | +#endif |
|---|
| 1242 | +#ifdef CONFIG_CPU_LOONGSON64 |
|---|
| 1243 | + case lwc2_op: |
|---|
| 1244 | + er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu); |
|---|
| 1159 | 1245 | break; |
|---|
| 1160 | 1246 | #endif |
|---|
| 1161 | 1247 | case spec3_op: |
|---|
| .. | .. |
|---|
| 1163 | 1249 | #ifdef CONFIG_CPU_MIPSR6 |
|---|
| 1164 | 1250 | case cache6_op: |
|---|
| 1165 | 1251 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); |
|---|
| 1166 | | - er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); |
|---|
| 1252 | + er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu); |
|---|
| 1167 | 1253 | break; |
|---|
| 1168 | 1254 | #endif |
|---|
| 1169 | 1255 | case rdhwr_op: |
|---|
| .. | .. |
|---|
| 1183 | 1269 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, |
|---|
| 1184 | 1270 | KVM_TRACE_HWR(rd, sel), 0); |
|---|
| 1185 | 1271 | goto unknown; |
|---|
| 1186 | | - }; |
|---|
| 1272 | + } |
|---|
| 1187 | 1273 | |
|---|
| 1188 | 1274 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, |
|---|
| 1189 | 1275 | KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); |
|---|
| .. | .. |
|---|
| 1192 | 1278 | break; |
|---|
| 1193 | 1279 | default: |
|---|
| 1194 | 1280 | goto unknown; |
|---|
| 1195 | | - }; |
|---|
| 1281 | + } |
|---|
| 1196 | 1282 | break; |
|---|
| 1197 | 1283 | unknown: |
|---|
| 1198 | 1284 | |
|---|
| .. | .. |
|---|
| 1465 | 1551 | */ |
|---|
| 1466 | 1552 | static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) |
|---|
| 1467 | 1553 | { |
|---|
| 1468 | | - struct kvm_run *run = vcpu->run; |
|---|
| 1469 | 1554 | u32 cause = vcpu->arch.host_cp0_cause; |
|---|
| 1470 | 1555 | enum emulation_result er = EMULATE_FAIL; |
|---|
| 1471 | 1556 | int ret = RESUME_GUEST; |
|---|
| .. | .. |
|---|
| 1493 | 1578 | break; |
|---|
| 1494 | 1579 | |
|---|
| 1495 | 1580 | case EMULATE_FAIL: |
|---|
| 1496 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 1581 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 1497 | 1582 | ret = RESUME_HOST; |
|---|
| 1498 | 1583 | break; |
|---|
| 1499 | 1584 | |
|---|
| .. | .. |
|---|
| 1512 | 1597 | */ |
|---|
| 1513 | 1598 | static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) |
|---|
| 1514 | 1599 | { |
|---|
| 1515 | | - struct kvm_run *run = vcpu->run; |
|---|
| 1516 | | - |
|---|
| 1517 | 1600 | /* |
|---|
| 1518 | 1601 | * If MSA not present or not exposed to guest or FR=0, the MSA operation |
|---|
| 1519 | 1602 | * should have been treated as a reserved instruction! |
|---|
| .. | .. |
|---|
| 1524 | 1607 | (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || |
|---|
| 1525 | 1608 | !(read_gc0_config5() & MIPS_CONF5_MSAEN) || |
|---|
| 1526 | 1609 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
|---|
| 1527 | | - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 1610 | + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
|---|
| 1528 | 1611 | return RESUME_HOST; |
|---|
| 1529 | 1612 | } |
|---|
| 1530 | 1613 | |
|---|
| .. | .. |
|---|
| 1560 | 1643 | } |
|---|
| 1561 | 1644 | |
|---|
| 1562 | 1645 | /* Treat as MMIO */ |
|---|
| 1563 | | - er = kvm_mips_emulate_load(inst, cause, run, vcpu); |
|---|
| 1646 | + er = kvm_mips_emulate_load(inst, cause, vcpu); |
|---|
| 1564 | 1647 | if (er == EMULATE_FAIL) { |
|---|
| 1565 | 1648 | kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", |
|---|
| 1566 | 1649 | opc, badvaddr); |
|---|
| .. | .. |
|---|
| 1607 | 1690 | } |
|---|
| 1608 | 1691 | |
|---|
| 1609 | 1692 | /* Treat as MMIO */ |
|---|
| 1610 | | - er = kvm_mips_emulate_store(inst, cause, run, vcpu); |
|---|
| 1693 | + er = kvm_mips_emulate_store(inst, cause, vcpu); |
|---|
| 1611 | 1694 | if (er == EMULATE_FAIL) { |
|---|
| 1612 | 1695 | kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", |
|---|
| 1613 | 1696 | opc, badvaddr); |
|---|
| .. | .. |
|---|
| 1652 | 1735 | KVM_REG_MIPS_CP0_CONFIG3, |
|---|
| 1653 | 1736 | KVM_REG_MIPS_CP0_CONFIG4, |
|---|
| 1654 | 1737 | KVM_REG_MIPS_CP0_CONFIG5, |
|---|
| 1738 | + KVM_REG_MIPS_CP0_CONFIG6, |
|---|
| 1655 | 1739 | #ifdef CONFIG_64BIT |
|---|
| 1656 | 1740 | KVM_REG_MIPS_CP0_XCONTEXT, |
|---|
| 1657 | 1741 | #endif |
|---|
| .. | .. |
|---|
| 1706 | 1790 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); |
|---|
| 1707 | 1791 | if (cpu_guest_has_segments) |
|---|
| 1708 | 1792 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); |
|---|
| 1709 | | - if (cpu_guest_has_htw) |
|---|
| 1793 | + if (cpu_guest_has_htw || cpu_guest_has_ldpte) |
|---|
| 1710 | 1794 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw); |
|---|
| 1711 | 1795 | if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) |
|---|
| 1712 | 1796 | ret += 1 + ARRAY_SIZE(vcpu->arch.maar); |
|---|
| .. | .. |
|---|
| 1755 | 1839 | return -EFAULT; |
|---|
| 1756 | 1840 | indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments); |
|---|
| 1757 | 1841 | } |
|---|
| 1758 | | - if (cpu_guest_has_htw) { |
|---|
| 1842 | + if (cpu_guest_has_htw || cpu_guest_has_ldpte) { |
|---|
| 1759 | 1843 | if (copy_to_user(indices, kvm_vz_get_one_regs_htw, |
|---|
| 1760 | 1844 | sizeof(kvm_vz_get_one_regs_htw))) |
|---|
| 1761 | 1845 | return -EFAULT; |
|---|
| .. | .. |
|---|
| 1878 | 1962 | *v = read_gc0_segctl2(); |
|---|
| 1879 | 1963 | break; |
|---|
| 1880 | 1964 | case KVM_REG_MIPS_CP0_PWBASE: |
|---|
| 1881 | | - if (!cpu_guest_has_htw) |
|---|
| 1965 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 1882 | 1966 | return -EINVAL; |
|---|
| 1883 | 1967 | *v = read_gc0_pwbase(); |
|---|
| 1884 | 1968 | break; |
|---|
| 1885 | 1969 | case KVM_REG_MIPS_CP0_PWFIELD: |
|---|
| 1886 | | - if (!cpu_guest_has_htw) |
|---|
| 1970 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 1887 | 1971 | return -EINVAL; |
|---|
| 1888 | 1972 | *v = read_gc0_pwfield(); |
|---|
| 1889 | 1973 | break; |
|---|
| 1890 | 1974 | case KVM_REG_MIPS_CP0_PWSIZE: |
|---|
| 1891 | | - if (!cpu_guest_has_htw) |
|---|
| 1975 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 1892 | 1976 | return -EINVAL; |
|---|
| 1893 | 1977 | *v = read_gc0_pwsize(); |
|---|
| 1894 | 1978 | break; |
|---|
| .. | .. |
|---|
| 1896 | 1980 | *v = (long)read_gc0_wired(); |
|---|
| 1897 | 1981 | break; |
|---|
| 1898 | 1982 | case KVM_REG_MIPS_CP0_PWCTL: |
|---|
| 1899 | | - if (!cpu_guest_has_htw) |
|---|
| 1983 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 1900 | 1984 | return -EINVAL; |
|---|
| 1901 | 1985 | *v = read_gc0_pwctl(); |
|---|
| 1902 | 1986 | break; |
|---|
| .. | .. |
|---|
| 1946 | 2030 | default: |
|---|
| 1947 | 2031 | *v = (long)kvm_read_c0_guest_prid(cop0); |
|---|
| 1948 | 2032 | break; |
|---|
| 1949 | | - }; |
|---|
| 2033 | + } |
|---|
| 1950 | 2034 | break; |
|---|
| 1951 | 2035 | case KVM_REG_MIPS_CP0_EBASE: |
|---|
| 1952 | 2036 | *v = kvm_vz_read_gc0_ebase(); |
|---|
| .. | .. |
|---|
| 1978 | 2062 | if (!cpu_guest_has_conf5) |
|---|
| 1979 | 2063 | return -EINVAL; |
|---|
| 1980 | 2064 | *v = read_gc0_config5(); |
|---|
| 2065 | + break; |
|---|
| 2066 | + case KVM_REG_MIPS_CP0_CONFIG6: |
|---|
| 2067 | + *v = kvm_read_sw_gc0_config6(cop0); |
|---|
| 1981 | 2068 | break; |
|---|
| 1982 | 2069 | case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): |
|---|
| 1983 | 2070 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) |
|---|
| .. | .. |
|---|
| 2101 | 2188 | write_gc0_segctl2(v); |
|---|
| 2102 | 2189 | break; |
|---|
| 2103 | 2190 | case KVM_REG_MIPS_CP0_PWBASE: |
|---|
| 2104 | | - if (!cpu_guest_has_htw) |
|---|
| 2191 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 2105 | 2192 | return -EINVAL; |
|---|
| 2106 | 2193 | write_gc0_pwbase(v); |
|---|
| 2107 | 2194 | break; |
|---|
| 2108 | 2195 | case KVM_REG_MIPS_CP0_PWFIELD: |
|---|
| 2109 | | - if (!cpu_guest_has_htw) |
|---|
| 2196 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 2110 | 2197 | return -EINVAL; |
|---|
| 2111 | 2198 | write_gc0_pwfield(v); |
|---|
| 2112 | 2199 | break; |
|---|
| 2113 | 2200 | case KVM_REG_MIPS_CP0_PWSIZE: |
|---|
| 2114 | | - if (!cpu_guest_has_htw) |
|---|
| 2201 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 2115 | 2202 | return -EINVAL; |
|---|
| 2116 | 2203 | write_gc0_pwsize(v); |
|---|
| 2117 | 2204 | break; |
|---|
| .. | .. |
|---|
| 2119 | 2206 | change_gc0_wired(MIPSR6_WIRED_WIRED, v); |
|---|
| 2120 | 2207 | break; |
|---|
| 2121 | 2208 | case KVM_REG_MIPS_CP0_PWCTL: |
|---|
| 2122 | | - if (!cpu_guest_has_htw) |
|---|
| 2209 | + if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) |
|---|
| 2123 | 2210 | return -EINVAL; |
|---|
| 2124 | 2211 | write_gc0_pwctl(v); |
|---|
| 2125 | 2212 | break; |
|---|
| .. | .. |
|---|
| 2185 | 2272 | default: |
|---|
| 2186 | 2273 | kvm_write_c0_guest_prid(cop0, v); |
|---|
| 2187 | 2274 | break; |
|---|
| 2188 | | - }; |
|---|
| 2275 | + } |
|---|
| 2189 | 2276 | break; |
|---|
| 2190 | 2277 | case KVM_REG_MIPS_CP0_EBASE: |
|---|
| 2191 | 2278 | kvm_vz_write_gc0_ebase(v); |
|---|
| .. | .. |
|---|
| 2246 | 2333 | if (change) { |
|---|
| 2247 | 2334 | v = cur ^ change; |
|---|
| 2248 | 2335 | write_gc0_config5(v); |
|---|
| 2336 | + } |
|---|
| 2337 | + break; |
|---|
| 2338 | + case KVM_REG_MIPS_CP0_CONFIG6: |
|---|
| 2339 | + cur = kvm_read_sw_gc0_config6(cop0); |
|---|
| 2340 | + change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu); |
|---|
| 2341 | + if (change) { |
|---|
| 2342 | + v = cur ^ change; |
|---|
| 2343 | + kvm_write_sw_gc0_config6(cop0, (int)v); |
|---|
| 2249 | 2344 | } |
|---|
| 2250 | 2345 | break; |
|---|
| 2251 | 2346 | case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): |
|---|
| .. | .. |
|---|
| 2454 | 2549 | * Root ASID dealiases guest GPA mappings in the root TLB. |
|---|
| 2455 | 2550 | * Allocate new root ASID if needed. |
|---|
| 2456 | 2551 | */ |
|---|
| 2457 | | - if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) |
|---|
| 2458 | | - || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & |
|---|
| 2459 | | - asid_version_mask(cpu)) |
|---|
| 2460 | | - get_new_mmu_context(gpa_mm, cpu); |
|---|
| 2552 | + if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) |
|---|
| 2553 | + get_new_mmu_context(gpa_mm); |
|---|
| 2554 | + else |
|---|
| 2555 | + check_mmu_context(gpa_mm); |
|---|
| 2461 | 2556 | } |
|---|
| 2462 | 2557 | } |
|---|
| 2463 | 2558 | |
|---|
| .. | .. |
|---|
| 2580 | 2675 | } |
|---|
| 2581 | 2676 | |
|---|
| 2582 | 2677 | /* restore HTW registers */ |
|---|
| 2583 | | - if (cpu_guest_has_htw) { |
|---|
| 2678 | + if (cpu_guest_has_htw || cpu_guest_has_ldpte) { |
|---|
| 2584 | 2679 | kvm_restore_gc0_pwbase(cop0); |
|---|
| 2585 | 2680 | kvm_restore_gc0_pwfield(cop0); |
|---|
| 2586 | 2681 | kvm_restore_gc0_pwsize(cop0); |
|---|
| .. | .. |
|---|
| 2597 | 2692 | * prevents a SC on the next VCPU from succeeding by matching a LL on |
|---|
| 2598 | 2693 | * the previous VCPU. |
|---|
| 2599 | 2694 | */ |
|---|
| 2600 | | - if (cpu_guest_has_rw_llb) |
|---|
| 2695 | + if (vcpu->kvm->created_vcpus > 1) |
|---|
| 2601 | 2696 | write_gc0_lladdr(0); |
|---|
| 2602 | 2697 | |
|---|
| 2603 | 2698 | return 0; |
|---|
| .. | .. |
|---|
| 2685 | 2780 | } |
|---|
| 2686 | 2781 | |
|---|
| 2687 | 2782 | /* save HTW registers if enabled in guest */ |
|---|
| 2688 | | - if (cpu_guest_has_htw && |
|---|
| 2689 | | - kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) { |
|---|
| 2783 | + if (cpu_guest_has_ldpte || (cpu_guest_has_htw && |
|---|
| 2784 | + kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) { |
|---|
| 2690 | 2785 | kvm_save_gc0_pwbase(cop0); |
|---|
| 2691 | 2786 | kvm_save_gc0_pwfield(cop0); |
|---|
| 2692 | 2787 | kvm_save_gc0_pwsize(cop0); |
|---|
| .. | .. |
|---|
| 2853 | 2948 | write_c0_guestctl0(MIPS_GCTL0_CP0 | |
|---|
| 2854 | 2949 | (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | |
|---|
| 2855 | 2950 | MIPS_GCTL0_CG | MIPS_GCTL0_CF); |
|---|
| 2856 | | - if (cpu_has_guestctl0ext) |
|---|
| 2857 | | - set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); |
|---|
| 2951 | + if (cpu_has_guestctl0ext) { |
|---|
| 2952 | + if (current_cpu_type() != CPU_LOONGSON64) |
|---|
| 2953 | + set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); |
|---|
| 2954 | + else |
|---|
| 2955 | + clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); |
|---|
| 2956 | + } |
|---|
| 2858 | 2957 | |
|---|
| 2859 | 2958 | if (cpu_has_guestid) { |
|---|
| 2860 | 2959 | write_c0_guestctl1(0); |
|---|
| .. | .. |
|---|
| 2870 | 2969 | /* clear any pending injected virtual guest interrupts */ |
|---|
| 2871 | 2970 | if (cpu_has_guestctl2) |
|---|
| 2872 | 2971 | clear_c0_guestctl2(0x3f << 10); |
|---|
| 2972 | + |
|---|
| 2973 | +#ifdef CONFIG_CPU_LOONGSON64 |
|---|
| 2974 | + /* Control guest CCA attribute */ |
|---|
| 2975 | + if (cpu_has_csr()) |
|---|
| 2976 | + csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec); |
|---|
| 2977 | +#endif |
|---|
| 2873 | 2978 | |
|---|
| 2874 | 2979 | return 0; |
|---|
| 2875 | 2980 | } |
|---|
| .. | .. |
|---|
| 2927 | 3032 | r = 2; |
|---|
| 2928 | 3033 | break; |
|---|
| 2929 | 3034 | #endif |
|---|
| 3035 | + case KVM_CAP_IOEVENTFD: |
|---|
| 3036 | + r = 1; |
|---|
| 3037 | + break; |
|---|
| 2930 | 3038 | default: |
|---|
| 2931 | 3039 | r = 0; |
|---|
| 2932 | 3040 | break; |
|---|
| .. | .. |
|---|
| 2980 | 3088 | */ |
|---|
| 2981 | 3089 | |
|---|
| 2982 | 3090 | /* PageGrain */ |
|---|
| 2983 | | - if (cpu_has_mips_r6) |
|---|
| 3091 | + if (cpu_has_mips_r5 || cpu_has_mips_r6) |
|---|
| 2984 | 3092 | kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); |
|---|
| 2985 | 3093 | /* Wired */ |
|---|
| 2986 | 3094 | if (cpu_has_mips_r6) |
|---|
| .. | .. |
|---|
| 2988 | 3096 | read_gc0_wired() & MIPSR6_WIRED_LIMIT); |
|---|
| 2989 | 3097 | /* Status */ |
|---|
| 2990 | 3098 | kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); |
|---|
| 2991 | | - if (cpu_has_mips_r6) |
|---|
| 3099 | + if (cpu_has_mips_r5 || cpu_has_mips_r6) |
|---|
| 2992 | 3100 | kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); |
|---|
| 2993 | 3101 | /* IntCtl */ |
|---|
| 2994 | 3102 | kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & |
|---|
| .. | .. |
|---|
| 3086 | 3194 | } |
|---|
| 3087 | 3195 | |
|---|
| 3088 | 3196 | /* reset HTW registers */ |
|---|
| 3089 | | - if (cpu_guest_has_htw && cpu_has_mips_r6) { |
|---|
| 3197 | + if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) { |
|---|
| 3090 | 3198 | /* PWField */ |
|---|
| 3091 | 3199 | kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302); |
|---|
| 3092 | 3200 | /* PWSize */ |
|---|
| .. | .. |
|---|
| 3129 | 3237 | kvm_vz_flush_shadow_all(kvm); |
|---|
| 3130 | 3238 | } |
|---|
| 3131 | 3239 | |
|---|
| 3132 | | -static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) |
|---|
| 3240 | +static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu) |
|---|
| 3133 | 3241 | { |
|---|
| 3134 | 3242 | int cpu = smp_processor_id(); |
|---|
| 3135 | 3243 | int preserve_guest_tlb; |
|---|
| .. | .. |
|---|
| 3145 | 3253 | kvm_vz_vcpu_load_wired(vcpu); |
|---|
| 3146 | 3254 | } |
|---|
| 3147 | 3255 | |
|---|
| 3148 | | -static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) |
|---|
| 3256 | +static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu) |
|---|
| 3149 | 3257 | { |
|---|
| 3150 | 3258 | int cpu = smp_processor_id(); |
|---|
| 3151 | 3259 | int r; |
|---|
| .. | .. |
|---|
| 3158 | 3266 | kvm_vz_vcpu_load_tlb(vcpu, cpu); |
|---|
| 3159 | 3267 | kvm_vz_vcpu_load_wired(vcpu); |
|---|
| 3160 | 3268 | |
|---|
| 3161 | | - r = vcpu->arch.vcpu_run(run, vcpu); |
|---|
| 3269 | + r = vcpu->arch.vcpu_run(vcpu); |
|---|
| 3162 | 3270 | |
|---|
| 3163 | 3271 | kvm_vz_vcpu_save_wired(vcpu); |
|---|
| 3164 | 3272 | |
|---|