.. | .. |
---|
2 | 2 | /* |
---|
3 | 3 | * in-kernel handling for sie intercepts |
---|
4 | 4 | * |
---|
5 | | - * Copyright IBM Corp. 2008, 2014 |
---|
| 5 | + * Copyright IBM Corp. 2008, 2020 |
---|
6 | 6 | * |
---|
7 | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
---|
8 | 8 | * Christian Borntraeger <borntraeger@de.ibm.com> |
---|
.. | .. |
---|
12 | 12 | #include <linux/errno.h> |
---|
13 | 13 | #include <linux/pagemap.h> |
---|
14 | 14 | |
---|
15 | | -#include <asm/kvm_host.h> |
---|
16 | 15 | #include <asm/asm-offsets.h> |
---|
17 | 16 | #include <asm/irq.h> |
---|
18 | 17 | #include <asm/sysinfo.h> |
---|
| 18 | +#include <asm/uv.h> |
---|
19 | 19 | |
---|
20 | 20 | #include "kvm-s390.h" |
---|
21 | 21 | #include "gaccess.h" |
---|
.. | .. |
---|
79 | 79 | return rc; |
---|
80 | 80 | } |
---|
81 | 81 | |
---|
| 82 | + /* |
---|
| 83 | + * no need to check the return value of vcpu_stop as it can only have |
---|
| 84 | + * an error for protvirt, but protvirt means user cpu state |
---|
| 85 | + */ |
---|
82 | 86 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
---|
83 | 87 | kvm_s390_vcpu_stop(vcpu); |
---|
84 | 88 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
231 | 235 | |
---|
232 | 236 | vcpu->stat.exit_program_interruption++; |
---|
233 | 237 | |
---|
| 238 | + /* |
---|
| 239 | + * Intercept 8 indicates a loop of specification exceptions |
---|
| 240 | + * for protected guests. |
---|
| 241 | + */ |
---|
| 242 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 243 | + return -EOPNOTSUPP; |
---|
| 244 | + |
---|
234 | 245 | if (guestdbg_enabled(vcpu) && per_event(vcpu)) { |
---|
235 | 246 | rc = kvm_s390_handle_per_event(vcpu); |
---|
236 | 247 | if (rc) |
---|
.. | .. |
---|
259 | 270 | /** |
---|
260 | 271 | * handle_external_interrupt - used for external interruption interceptions |
---|
261 | 272 | * |
---|
262 | | - * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if |
---|
263 | | - * the new PSW does not have external interrupts disabled. In the first case, |
---|
264 | | - * we've got to deliver the interrupt manually, and in the second case, we |
---|
265 | | - * drop to userspace to handle the situation there. |
---|
| 273 | + * This interception occurs if: |
---|
| 274 | + * - the CPUSTAT_EXT_INT bit was already set when the external interrupt |
---|
| 275 | + * occurred. In this case, the interrupt needs to be injected manually to |
---|
| 276 | + * preserve interrupt priority. |
---|
| 277 | + * - the external new PSW has external interrupts enabled, which will cause an |
---|
| 278 | + * interruption loop. We drop to userspace in this case. |
---|
| 279 | + * |
---|
| 280 | + * The latter case can be detected by inspecting the external mask bit in the |
---|
| 281 | + * external new psw. |
---|
| 282 | + * |
---|
| 283 | + * Under PV, only the latter case can occur, since interrupt priorities are |
---|
| 284 | + * handled in the ultravisor. |
---|
266 | 285 | */ |
---|
267 | 286 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) |
---|
268 | 287 | { |
---|
.. | .. |
---|
273 | 292 | |
---|
274 | 293 | vcpu->stat.exit_external_interrupt++; |
---|
275 | 294 | |
---|
276 | | - rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); |
---|
277 | | - if (rc) |
---|
278 | | - return rc; |
---|
279 | | - /* We can not handle clock comparator or timer interrupt with bad PSW */ |
---|
| 295 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 296 | + newpsw = vcpu->arch.sie_block->gpsw; |
---|
| 297 | + } else { |
---|
| 298 | + rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t)); |
---|
| 299 | + if (rc) |
---|
| 300 | + return rc; |
---|
| 301 | + } |
---|
| 302 | + |
---|
| 303 | + /* |
---|
| 304 | + * Clock comparator or timer interrupt with external interrupt enabled |
---|
| 305 | + * will cause interrupt loop. Drop to userspace. |
---|
| 306 | + */ |
---|
280 | 307 | if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) && |
---|
281 | 308 | (newpsw.mask & PSW_MASK_EXT)) |
---|
282 | 309 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
360 | 387 | */ |
---|
361 | 388 | int handle_sthyi(struct kvm_vcpu *vcpu) |
---|
362 | 389 | { |
---|
363 | | - int reg1, reg2, r = 0; |
---|
364 | | - u64 code, addr, cc = 0, rc = 0; |
---|
| 390 | + int reg1, reg2, cc = 0, r = 0; |
---|
| 391 | + u64 code, addr, rc = 0; |
---|
365 | 392 | struct sthyi_sctns *sctns = NULL; |
---|
366 | 393 | |
---|
367 | 394 | if (!test_kvm_facility(vcpu->kvm, 74)) |
---|
.. | .. |
---|
384 | 411 | goto out; |
---|
385 | 412 | } |
---|
386 | 413 | |
---|
387 | | - if (addr & ~PAGE_MASK) |
---|
| 414 | + if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK)) |
---|
388 | 415 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
---|
389 | 416 | |
---|
390 | 417 | sctns = (void *)get_zeroed_page(GFP_KERNEL); |
---|
.. | .. |
---|
392 | 419 | return -ENOMEM; |
---|
393 | 420 | |
---|
394 | 421 | cc = sthyi_fill(sctns, &rc); |
---|
395 | | - |
---|
| 422 | + if (cc < 0) { |
---|
| 423 | + free_page((unsigned long)sctns); |
---|
| 424 | + return cc; |
---|
| 425 | + } |
---|
396 | 426 | out: |
---|
397 | 427 | if (!cc) { |
---|
398 | | - r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE); |
---|
399 | | - if (r) { |
---|
400 | | - free_page((unsigned long)sctns); |
---|
401 | | - return kvm_s390_inject_prog_cond(vcpu, r); |
---|
| 428 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 429 | + memcpy((void *)(sida_origin(vcpu->arch.sie_block)), |
---|
| 430 | + sctns, PAGE_SIZE); |
---|
| 431 | + } else { |
---|
| 432 | + r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE); |
---|
| 433 | + if (r) { |
---|
| 434 | + free_page((unsigned long)sctns); |
---|
| 435 | + return kvm_s390_inject_prog_cond(vcpu, r); |
---|
| 436 | + } |
---|
402 | 437 | } |
---|
403 | 438 | } |
---|
404 | 439 | |
---|
.. | .. |
---|
444 | 479 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); |
---|
445 | 480 | } |
---|
446 | 481 | |
---|
| 482 | +static int handle_pv_spx(struct kvm_vcpu *vcpu) |
---|
| 483 | +{ |
---|
| 484 | + u32 pref = *(u32 *)vcpu->arch.sie_block->sidad; |
---|
| 485 | + |
---|
| 486 | + kvm_s390_set_prefix(vcpu, pref); |
---|
| 487 | + trace_kvm_s390_handle_prefix(vcpu, 1, pref); |
---|
| 488 | + return 0; |
---|
| 489 | +} |
---|
| 490 | + |
---|
| 491 | +static int handle_pv_sclp(struct kvm_vcpu *vcpu) |
---|
| 492 | +{ |
---|
| 493 | + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
---|
| 494 | + |
---|
| 495 | + spin_lock(&fi->lock); |
---|
| 496 | + /* |
---|
| 497 | + * 2 cases: |
---|
| 498 | + * a: an sccb answering interrupt was already pending or in flight. |
---|
| 499 | + * As the sccb value is not known we can simply set some value to |
---|
| 500 | + * trigger delivery of a saved SCCB. UV will then use its saved |
---|
| 501 | + * copy of the SCCB value. |
---|
| 502 | + * b: an error SCCB interrupt needs to be injected so we also inject |
---|
| 503 | + * a fake SCCB address. Firmware will use the proper one. |
---|
| 504 | + * This makes sure, that both errors and real sccb returns will only |
---|
| 505 | + * be delivered after a notification intercept (instruction has |
---|
| 506 | + * finished) but not after others. |
---|
| 507 | + */ |
---|
| 508 | + fi->srv_signal.ext_params |= 0x43000; |
---|
| 509 | + set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); |
---|
| 510 | + clear_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs); |
---|
| 511 | + spin_unlock(&fi->lock); |
---|
| 512 | + return 0; |
---|
| 513 | +} |
---|
| 514 | + |
---|
| 515 | +static int handle_pv_uvc(struct kvm_vcpu *vcpu) |
---|
| 516 | +{ |
---|
| 517 | + struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad; |
---|
| 518 | + struct uv_cb_cts uvcb = { |
---|
| 519 | + .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED, |
---|
| 520 | + .header.len = sizeof(uvcb), |
---|
| 521 | + .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm), |
---|
| 522 | + .gaddr = guest_uvcb->paddr, |
---|
| 523 | + }; |
---|
| 524 | + int rc; |
---|
| 525 | + |
---|
| 526 | + if (guest_uvcb->header.cmd != UVC_CMD_REMOVE_SHARED_ACCESS) { |
---|
| 527 | + WARN_ONCE(1, "Unexpected notification intercept for UVC 0x%x\n", |
---|
| 528 | + guest_uvcb->header.cmd); |
---|
| 529 | + return 0; |
---|
| 530 | + } |
---|
| 531 | + rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb); |
---|
| 532 | + /* |
---|
| 533 | + * If the unpin did not succeed, the guest will exit again for the UVC |
---|
| 534 | + * and we will retry the unpin. |
---|
| 535 | + */ |
---|
| 536 | + if (rc == -EINVAL) |
---|
| 537 | + return 0; |
---|
| 538 | + return rc; |
---|
| 539 | +} |
---|
| 540 | + |
---|
| 541 | +static int handle_pv_notification(struct kvm_vcpu *vcpu) |
---|
| 542 | +{ |
---|
| 543 | + int ret; |
---|
| 544 | + |
---|
| 545 | + if (vcpu->arch.sie_block->ipa == 0xb210) |
---|
| 546 | + return handle_pv_spx(vcpu); |
---|
| 547 | + if (vcpu->arch.sie_block->ipa == 0xb220) |
---|
| 548 | + return handle_pv_sclp(vcpu); |
---|
| 549 | + if (vcpu->arch.sie_block->ipa == 0xb9a4) |
---|
| 550 | + return handle_pv_uvc(vcpu); |
---|
| 551 | + if (vcpu->arch.sie_block->ipa >> 8 == 0xae) { |
---|
| 552 | + /* |
---|
| 553 | + * Besides external call, other SIGP orders also cause a |
---|
| 554 | + * 108 (pv notify) intercept. In contrast to external call, |
---|
| 555 | + * these orders need to be emulated and hence the appropriate |
---|
| 556 | + * place to handle them is in handle_instruction(). |
---|
| 557 | + * So first try kvm_s390_handle_sigp_pei() and if that isn't |
---|
| 558 | + * successful, go on with handle_instruction(). |
---|
| 559 | + */ |
---|
| 560 | + ret = kvm_s390_handle_sigp_pei(vcpu); |
---|
| 561 | + if (!ret) |
---|
| 562 | + return ret; |
---|
| 563 | + } |
---|
| 564 | + |
---|
| 565 | + return handle_instruction(vcpu); |
---|
| 566 | +} |
---|
| 567 | + |
---|
447 | 568 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) |
---|
448 | 569 | { |
---|
449 | 570 | int rc, per_rc = 0; |
---|
.. | .. |
---|
480 | 601 | case ICPT_KSS: |
---|
481 | 602 | rc = kvm_s390_skey_check_enable(vcpu); |
---|
482 | 603 | break; |
---|
| 604 | + case ICPT_MCHKREQ: |
---|
| 605 | + case ICPT_INT_ENABLE: |
---|
| 606 | + /* |
---|
| 607 | + * PSW bit 13 or a CR (0, 6, 14) changed and we might |
---|
| 608 | + * now be able to deliver interrupts. The pre-run code |
---|
| 609 | + * will take care of this. |
---|
| 610 | + */ |
---|
| 611 | + rc = 0; |
---|
| 612 | + break; |
---|
| 613 | + case ICPT_PV_INSTR: |
---|
| 614 | + rc = handle_instruction(vcpu); |
---|
| 615 | + break; |
---|
| 616 | + case ICPT_PV_NOTIFY: |
---|
| 617 | + rc = handle_pv_notification(vcpu); |
---|
| 618 | + break; |
---|
| 619 | + case ICPT_PV_PREF: |
---|
| 620 | + rc = 0; |
---|
| 621 | + gmap_convert_to_secure(vcpu->arch.gmap, |
---|
| 622 | + kvm_s390_get_prefix(vcpu)); |
---|
| 623 | + gmap_convert_to_secure(vcpu->arch.gmap, |
---|
| 624 | + kvm_s390_get_prefix(vcpu) + PAGE_SIZE); |
---|
| 625 | + break; |
---|
483 | 626 | default: |
---|
484 | 627 | return -EOPNOTSUPP; |
---|
485 | 628 | } |
---|