.. | .. |
---|
2 | 2 | /* |
---|
3 | 3 | * handling kvm guest interrupts |
---|
4 | 4 | * |
---|
5 | | - * Copyright IBM Corp. 2008, 2015 |
---|
| 5 | + * Copyright IBM Corp. 2008, 2020 |
---|
6 | 6 | * |
---|
7 | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
---|
8 | 8 | */ |
---|
| 9 | + |
---|
| 10 | +#define KMSG_COMPONENT "kvm-s390" |
---|
| 11 | +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
---|
9 | 12 | |
---|
10 | 13 | #include <linux/interrupt.h> |
---|
11 | 14 | #include <linux/kvm_host.h> |
---|
12 | 15 | #include <linux/hrtimer.h> |
---|
13 | 16 | #include <linux/mmu_context.h> |
---|
| 17 | +#include <linux/nospec.h> |
---|
14 | 18 | #include <linux/signal.h> |
---|
15 | 19 | #include <linux/slab.h> |
---|
16 | 20 | #include <linux/bitmap.h> |
---|
.. | .. |
---|
23 | 27 | #include <asm/gmap.h> |
---|
24 | 28 | #include <asm/switch_to.h> |
---|
25 | 29 | #include <asm/nmi.h> |
---|
| 30 | +#include <asm/airq.h> |
---|
26 | 31 | #include "kvm-s390.h" |
---|
27 | 32 | #include "gaccess.h" |
---|
28 | 33 | #include "trace-s390.h" |
---|
.. | .. |
---|
30 | 35 | #define PFAULT_INIT 0x0600 |
---|
31 | 36 | #define PFAULT_DONE 0x0680 |
---|
32 | 37 | #define VIRTIO_PARAM 0x0d00 |
---|
| 38 | + |
---|
| 39 | +static struct kvm_s390_gib *gib; |
---|
33 | 40 | |
---|
34 | 41 | /* handle external calls via sigp interpretation facility */ |
---|
35 | 42 | static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) |
---|
.. | .. |
---|
217 | 224 | */ |
---|
218 | 225 | #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE) |
---|
219 | 226 | |
---|
220 | | -static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
| 227 | +/** |
---|
| 228 | + * gisa_set_iam - change the GISA interruption alert mask |
---|
| 229 | + * |
---|
| 230 | + * @gisa: gisa to operate on |
---|
| 231 | + * @iam: new IAM value to use |
---|
| 232 | + * |
---|
| 233 | + * Change the IAM atomically with the next alert address and the IPM |
---|
| 234 | + * of the GISA if the GISA is not part of the GIB alert list. All three |
---|
| 235 | + * fields are located in the first long word of the GISA. |
---|
| 236 | + * |
---|
| 237 | + * Returns: 0 on success |
---|
| 238 | + * -EBUSY in case the gisa is part of the alert list |
---|
| 239 | + */ |
---|
| 240 | +static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam) |
---|
| 241 | +{ |
---|
| 242 | + u64 word, _word; |
---|
| 243 | + |
---|
| 244 | + do { |
---|
| 245 | + word = READ_ONCE(gisa->u64.word[0]); |
---|
| 246 | + if ((u64)gisa != word >> 32) |
---|
| 247 | + return -EBUSY; |
---|
| 248 | + _word = (word & ~0xffUL) | iam; |
---|
| 249 | + } while (cmpxchg(&gisa->u64.word[0], word, _word) != word); |
---|
| 250 | + |
---|
| 251 | + return 0; |
---|
| 252 | +} |
---|
| 253 | + |
---|
| 254 | +/** |
---|
| 255 | + * gisa_clear_ipm - clear the GISA interruption pending mask |
---|
| 256 | + * |
---|
| 257 | + * @gisa: gisa to operate on |
---|
| 258 | + * |
---|
| 259 | + * Clear the IPM atomically with the next alert address and the IAM |
---|
| 260 | + * of the GISA unconditionally. All three fields are located in the |
---|
| 261 | + * first long word of the GISA. |
---|
| 262 | + */ |
---|
| 263 | +static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa) |
---|
| 264 | +{ |
---|
| 265 | + u64 word, _word; |
---|
| 266 | + |
---|
| 267 | + do { |
---|
| 268 | + word = READ_ONCE(gisa->u64.word[0]); |
---|
| 269 | + _word = word & ~(0xffUL << 24); |
---|
| 270 | + } while (cmpxchg(&gisa->u64.word[0], word, _word) != word); |
---|
| 271 | +} |
---|
| 272 | + |
---|
| 273 | +/** |
---|
| 274 | + * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM |
---|
| 275 | + * |
---|
| 276 | + * @gi: gisa interrupt struct to work on |
---|
| 277 | + * |
---|
| 278 | + * Atomically restores the interruption alert mask if none of the |
---|
| 279 | + * relevant ISCs are pending and return the IPM. |
---|
| 280 | + * |
---|
| 281 | + * Returns: the relevant pending ISCs |
---|
| 282 | + */ |
---|
| 283 | +static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi) |
---|
| 284 | +{ |
---|
| 285 | + u8 pending_mask, alert_mask; |
---|
| 286 | + u64 word, _word; |
---|
| 287 | + |
---|
| 288 | + do { |
---|
| 289 | + word = READ_ONCE(gi->origin->u64.word[0]); |
---|
| 290 | + alert_mask = READ_ONCE(gi->alert.mask); |
---|
| 291 | + pending_mask = (u8)(word >> 24) & alert_mask; |
---|
| 292 | + if (pending_mask) |
---|
| 293 | + return pending_mask; |
---|
| 294 | + _word = (word & ~0xffUL) | alert_mask; |
---|
| 295 | + } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word); |
---|
| 296 | + |
---|
| 297 | + return 0; |
---|
| 298 | +} |
---|
| 299 | + |
---|
| 300 | +static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa) |
---|
| 301 | +{ |
---|
| 302 | + return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa; |
---|
| 303 | +} |
---|
| 304 | + |
---|
| 305 | +static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
221 | 306 | { |
---|
222 | 307 | set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
---|
223 | 308 | } |
---|
224 | 309 | |
---|
225 | | -static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa) |
---|
| 310 | +static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa) |
---|
226 | 311 | { |
---|
227 | 312 | return READ_ONCE(gisa->ipm); |
---|
228 | 313 | } |
---|
229 | 314 | |
---|
230 | | -static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
| 315 | +static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
231 | 316 | { |
---|
232 | 317 | clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
---|
233 | 318 | } |
---|
234 | 319 | |
---|
235 | | -static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
| 320 | +static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
236 | 321 | { |
---|
237 | 322 | return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
---|
238 | 323 | } |
---|
239 | 324 | |
---|
240 | 325 | static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu) |
---|
241 | 326 | { |
---|
242 | | - return vcpu->kvm->arch.float_int.pending_irqs | |
---|
243 | | - vcpu->arch.local_int.pending_irqs; |
---|
| 327 | + unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs | |
---|
| 328 | + vcpu->arch.local_int.pending_irqs; |
---|
| 329 | + |
---|
| 330 | + pending &= ~vcpu->kvm->arch.float_int.masked_irqs; |
---|
| 331 | + return pending; |
---|
244 | 332 | } |
---|
245 | 333 | |
---|
246 | 334 | static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) |
---|
247 | 335 | { |
---|
248 | | - return pending_irqs_no_gisa(vcpu) | |
---|
249 | | - kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; |
---|
| 336 | + struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; |
---|
| 337 | + unsigned long pending_mask; |
---|
| 338 | + |
---|
| 339 | + pending_mask = pending_irqs_no_gisa(vcpu); |
---|
| 340 | + if (gi->origin) |
---|
| 341 | + pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7; |
---|
| 342 | + return pending_mask; |
---|
250 | 343 | } |
---|
251 | 344 | |
---|
252 | 345 | static inline int isc_to_irq_type(unsigned long isc) |
---|
.. | .. |
---|
293 | 386 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); |
---|
294 | 387 | if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK)) |
---|
295 | 388 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); |
---|
296 | | - if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) |
---|
| 389 | + if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) { |
---|
297 | 390 | __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); |
---|
| 391 | + __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask); |
---|
| 392 | + } |
---|
298 | 393 | if (psw_mchk_disabled(vcpu)) |
---|
299 | 394 | active_mask &= ~IRQ_PEND_MCHK_MASK; |
---|
| 395 | + /* PV guest cpus can have a single interruption injected at a time. */ |
---|
| 396 | + if (kvm_s390_pv_cpu_get_handle(vcpu) && |
---|
| 397 | + vcpu->arch.sie_block->iictl != IICTL_CODE_NONE) |
---|
| 398 | + active_mask &= ~(IRQ_PEND_EXT_II_MASK | |
---|
| 399 | + IRQ_PEND_IO_MASK | |
---|
| 400 | + IRQ_PEND_MCHK_MASK); |
---|
300 | 401 | /* |
---|
301 | 402 | * Check both floating and local interrupt's cr14 because |
---|
302 | 403 | * bit IRQ_PEND_MCHK_REP could be set in both cases. |
---|
.. | .. |
---|
318 | 419 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) |
---|
319 | 420 | { |
---|
320 | 421 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); |
---|
321 | | - set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask); |
---|
| 422 | + set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); |
---|
322 | 423 | } |
---|
323 | 424 | |
---|
324 | 425 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) |
---|
325 | 426 | { |
---|
326 | 427 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); |
---|
327 | | - clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask); |
---|
| 428 | + clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); |
---|
328 | 429 | } |
---|
329 | 430 | |
---|
330 | 431 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
345 | 446 | { |
---|
346 | 447 | if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK)) |
---|
347 | 448 | return; |
---|
348 | | - else if (psw_ioint_disabled(vcpu)) |
---|
| 449 | + if (psw_ioint_disabled(vcpu)) |
---|
349 | 450 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); |
---|
350 | 451 | else |
---|
351 | 452 | vcpu->arch.sie_block->lctl |= LCTL_CR6; |
---|
.. | .. |
---|
353 | 454 | |
---|
354 | 455 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) |
---|
355 | 456 | { |
---|
356 | | - if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) |
---|
| 457 | + if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK)) |
---|
357 | 458 | return; |
---|
358 | 459 | if (psw_extint_disabled(vcpu)) |
---|
359 | 460 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); |
---|
.. | .. |
---|
363 | 464 | |
---|
364 | 465 | static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) |
---|
365 | 466 | { |
---|
366 | | - if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) |
---|
| 467 | + if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK)) |
---|
367 | 468 | return; |
---|
368 | 469 | if (psw_mchk_disabled(vcpu)) |
---|
369 | 470 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; |
---|
.. | .. |
---|
389 | 490 | static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) |
---|
390 | 491 | { |
---|
391 | 492 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
392 | | - int rc; |
---|
| 493 | + int rc = 0; |
---|
393 | 494 | |
---|
394 | 495 | vcpu->stat.deliver_cputm++; |
---|
395 | 496 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
---|
396 | 497 | 0, 0); |
---|
397 | | - |
---|
398 | | - rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, |
---|
399 | | - (u16 *)__LC_EXT_INT_CODE); |
---|
400 | | - rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
401 | | - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
402 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
403 | | - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
404 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 498 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 499 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 500 | + vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER; |
---|
| 501 | + } else { |
---|
| 502 | + rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, |
---|
| 503 | + (u16 *)__LC_EXT_INT_CODE); |
---|
| 504 | + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
| 505 | + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
| 506 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 507 | + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
| 508 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 509 | + } |
---|
405 | 510 | clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
---|
406 | 511 | return rc ? -EFAULT : 0; |
---|
407 | 512 | } |
---|
.. | .. |
---|
409 | 514 | static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) |
---|
410 | 515 | { |
---|
411 | 516 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
412 | | - int rc; |
---|
| 517 | + int rc = 0; |
---|
413 | 518 | |
---|
414 | 519 | vcpu->stat.deliver_ckc++; |
---|
415 | 520 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
---|
416 | 521 | 0, 0); |
---|
417 | | - |
---|
418 | | - rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, |
---|
419 | | - (u16 __user *)__LC_EXT_INT_CODE); |
---|
420 | | - rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
421 | | - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
422 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
423 | | - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
424 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 522 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 523 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 524 | + vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP; |
---|
| 525 | + } else { |
---|
| 526 | + rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, |
---|
| 527 | + (u16 __user *)__LC_EXT_INT_CODE); |
---|
| 528 | + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
| 529 | + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
| 530 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 531 | + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
| 532 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 533 | + } |
---|
425 | 534 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
---|
426 | 535 | return rc ? -EFAULT : 0; |
---|
427 | 536 | } |
---|
.. | .. |
---|
462 | 571 | freg_t fprs[NUM_FPRS]; |
---|
463 | 572 | union mci mci; |
---|
464 | 573 | int rc; |
---|
| 574 | + |
---|
| 575 | + /* |
---|
| 576 | + * All other possible payload for a machine check (e.g. the register |
---|
| 577 | + * contents in the save area) will be handled by the ultravisor, as |
---|
| 578 | + * the hypervisor does not not have the needed information for |
---|
| 579 | + * protected guests. |
---|
| 580 | + */ |
---|
| 581 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 582 | + vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK; |
---|
| 583 | + vcpu->arch.sie_block->mcic = mchk->mcic; |
---|
| 584 | + vcpu->arch.sie_block->faddr = mchk->failing_storage_address; |
---|
| 585 | + vcpu->arch.sie_block->edc = mchk->ext_damage_code; |
---|
| 586 | + return 0; |
---|
| 587 | + } |
---|
465 | 588 | |
---|
466 | 589 | mci.val = mchk->mcic; |
---|
467 | 590 | /* take care of lazy register loading */ |
---|
.. | .. |
---|
606 | 729 | static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) |
---|
607 | 730 | { |
---|
608 | 731 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
609 | | - int rc; |
---|
| 732 | + int rc = 0; |
---|
610 | 733 | |
---|
611 | 734 | VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); |
---|
612 | 735 | vcpu->stat.deliver_restart_signal++; |
---|
613 | 736 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); |
---|
614 | 737 | |
---|
615 | | - rc = write_guest_lc(vcpu, |
---|
616 | | - offsetof(struct lowcore, restart_old_psw), |
---|
617 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
618 | | - rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), |
---|
619 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 738 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 739 | + vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART; |
---|
| 740 | + } else { |
---|
| 741 | + rc = write_guest_lc(vcpu, |
---|
| 742 | + offsetof(struct lowcore, restart_old_psw), |
---|
| 743 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 744 | + rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), |
---|
| 745 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 746 | + } |
---|
620 | 747 | clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); |
---|
621 | 748 | return rc ? -EFAULT : 0; |
---|
622 | 749 | } |
---|
.. | .. |
---|
658 | 785 | vcpu->stat.deliver_emergency_signal++; |
---|
659 | 786 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
---|
660 | 787 | cpu_addr, 0); |
---|
| 788 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 789 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 790 | + vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG; |
---|
| 791 | + vcpu->arch.sie_block->extcpuaddr = cpu_addr; |
---|
| 792 | + return 0; |
---|
| 793 | + } |
---|
661 | 794 | |
---|
662 | 795 | rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, |
---|
663 | 796 | (u16 *)__LC_EXT_INT_CODE); |
---|
.. | .. |
---|
686 | 819 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
---|
687 | 820 | KVM_S390_INT_EXTERNAL_CALL, |
---|
688 | 821 | extcall.code, 0); |
---|
| 822 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 823 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 824 | + vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL; |
---|
| 825 | + vcpu->arch.sie_block->extcpuaddr = extcall.code; |
---|
| 826 | + return 0; |
---|
| 827 | + } |
---|
689 | 828 | |
---|
690 | 829 | rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, |
---|
691 | 830 | (u16 *)__LC_EXT_INT_CODE); |
---|
.. | .. |
---|
695 | 834 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, |
---|
696 | 835 | sizeof(psw_t)); |
---|
697 | 836 | return rc ? -EFAULT : 0; |
---|
| 837 | +} |
---|
| 838 | + |
---|
| 839 | +static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code) |
---|
| 840 | +{ |
---|
| 841 | + switch (code) { |
---|
| 842 | + case PGM_SPECIFICATION: |
---|
| 843 | + vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION; |
---|
| 844 | + break; |
---|
| 845 | + case PGM_OPERAND: |
---|
| 846 | + vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND; |
---|
| 847 | + break; |
---|
| 848 | + default: |
---|
| 849 | + return -EINVAL; |
---|
| 850 | + } |
---|
| 851 | + return 0; |
---|
698 | 852 | } |
---|
699 | 853 | |
---|
700 | 854 | static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
717 | 871 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
---|
718 | 872 | pgm_info.code, 0); |
---|
719 | 873 | |
---|
| 874 | + /* PER is handled by the ultravisor */ |
---|
| 875 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 876 | + return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER); |
---|
| 877 | + |
---|
720 | 878 | switch (pgm_info.code & ~PGM_PER) { |
---|
721 | 879 | case PGM_AFX_TRANSLATION: |
---|
722 | 880 | case PGM_ASX_TRANSLATION: |
---|
.. | .. |
---|
728 | 886 | case PGM_PRIMARY_AUTHORITY: |
---|
729 | 887 | case PGM_SECONDARY_AUTHORITY: |
---|
730 | 888 | nullifying = true; |
---|
731 | | - /* fall through */ |
---|
| 889 | + fallthrough; |
---|
732 | 890 | case PGM_SPACE_SWITCH: |
---|
733 | 891 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
---|
734 | 892 | (u64 *)__LC_TRANS_EXC_CODE); |
---|
.. | .. |
---|
812 | 970 | return rc ? -EFAULT : 0; |
---|
813 | 971 | } |
---|
814 | 972 | |
---|
| 973 | +#define SCCB_MASK 0xFFFFFFF8 |
---|
| 974 | +#define SCCB_EVENT_PENDING 0x3 |
---|
| 975 | + |
---|
| 976 | +static int write_sclp(struct kvm_vcpu *vcpu, u32 parm) |
---|
| 977 | +{ |
---|
| 978 | + int rc; |
---|
| 979 | + |
---|
| 980 | + if (kvm_s390_pv_cpu_get_handle(vcpu)) { |
---|
| 981 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 982 | + vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG; |
---|
| 983 | + vcpu->arch.sie_block->eiparams = parm; |
---|
| 984 | + return 0; |
---|
| 985 | + } |
---|
| 986 | + |
---|
| 987 | + rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
---|
| 988 | + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
| 989 | + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
| 990 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 991 | + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
| 992 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 993 | + rc |= put_guest_lc(vcpu, parm, |
---|
| 994 | + (u32 *)__LC_EXT_PARAMS); |
---|
| 995 | + |
---|
| 996 | + return rc ? -EFAULT : 0; |
---|
| 997 | +} |
---|
| 998 | + |
---|
815 | 999 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu) |
---|
816 | 1000 | { |
---|
817 | 1001 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
---|
818 | 1002 | struct kvm_s390_ext_info ext; |
---|
819 | | - int rc = 0; |
---|
820 | 1003 | |
---|
821 | 1004 | spin_lock(&fi->lock); |
---|
822 | | - if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { |
---|
| 1005 | + if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) || |
---|
| 1006 | + !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { |
---|
823 | 1007 | spin_unlock(&fi->lock); |
---|
824 | 1008 | return 0; |
---|
825 | 1009 | } |
---|
826 | 1010 | ext = fi->srv_signal; |
---|
827 | 1011 | memset(&fi->srv_signal, 0, sizeof(ext)); |
---|
828 | 1012 | clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); |
---|
| 1013 | + clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs); |
---|
| 1014 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 1015 | + set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs); |
---|
829 | 1016 | spin_unlock(&fi->lock); |
---|
830 | 1017 | |
---|
831 | 1018 | VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", |
---|
.. | .. |
---|
834 | 1021 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
---|
835 | 1022 | ext.ext_params, 0); |
---|
836 | 1023 | |
---|
837 | | - rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
---|
838 | | - rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
839 | | - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
840 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
841 | | - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
842 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
843 | | - rc |= put_guest_lc(vcpu, ext.ext_params, |
---|
844 | | - (u32 *)__LC_EXT_PARAMS); |
---|
| 1024 | + return write_sclp(vcpu, ext.ext_params); |
---|
| 1025 | +} |
---|
845 | 1026 | |
---|
846 | | - return rc ? -EFAULT : 0; |
---|
| 1027 | +static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu) |
---|
| 1028 | +{ |
---|
| 1029 | + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
---|
| 1030 | + struct kvm_s390_ext_info ext; |
---|
| 1031 | + |
---|
| 1032 | + spin_lock(&fi->lock); |
---|
| 1033 | + if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) { |
---|
| 1034 | + spin_unlock(&fi->lock); |
---|
| 1035 | + return 0; |
---|
| 1036 | + } |
---|
| 1037 | + ext = fi->srv_signal; |
---|
| 1038 | + /* only clear the event bit */ |
---|
| 1039 | + fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING; |
---|
| 1040 | + clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs); |
---|
| 1041 | + spin_unlock(&fi->lock); |
---|
| 1042 | + |
---|
| 1043 | + VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event"); |
---|
| 1044 | + vcpu->stat.deliver_service_signal++; |
---|
| 1045 | + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
---|
| 1046 | + ext.ext_params, 0); |
---|
| 1047 | + |
---|
| 1048 | + return write_sclp(vcpu, SCCB_EVENT_PENDING); |
---|
847 | 1049 | } |
---|
848 | 1050 | |
---|
849 | 1051 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
938 | 1140 | { |
---|
939 | 1141 | int rc; |
---|
940 | 1142 | |
---|
| 1143 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 1144 | + vcpu->arch.sie_block->iictl = IICTL_CODE_IO; |
---|
| 1145 | + vcpu->arch.sie_block->subchannel_id = io->subchannel_id; |
---|
| 1146 | + vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr; |
---|
| 1147 | + vcpu->arch.sie_block->io_int_parm = io->io_int_parm; |
---|
| 1148 | + vcpu->arch.sie_block->io_int_word = io->io_int_word; |
---|
| 1149 | + return 0; |
---|
| 1150 | + } |
---|
| 1151 | + |
---|
941 | 1152 | rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID); |
---|
942 | 1153 | rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR); |
---|
943 | 1154 | rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM); |
---|
.. | .. |
---|
956 | 1167 | { |
---|
957 | 1168 | struct list_head *isc_list; |
---|
958 | 1169 | struct kvm_s390_float_interrupt *fi; |
---|
| 1170 | + struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; |
---|
959 | 1171 | struct kvm_s390_interrupt_info *inti = NULL; |
---|
960 | 1172 | struct kvm_s390_io_info io; |
---|
961 | 1173 | u32 isc; |
---|
.. | .. |
---|
998 | 1210 | goto out; |
---|
999 | 1211 | } |
---|
1000 | 1212 | |
---|
1001 | | - if (vcpu->kvm->arch.gisa && |
---|
1002 | | - kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) { |
---|
| 1213 | + if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) { |
---|
1003 | 1214 | /* |
---|
1004 | 1215 | * in case an adapter interrupt was not delivered |
---|
1005 | 1216 | * in SIE context KVM will handle the delivery |
---|
.. | .. |
---|
1089 | 1300 | |
---|
1090 | 1301 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
---|
1091 | 1302 | { |
---|
| 1303 | + struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; |
---|
1092 | 1304 | u64 sltime; |
---|
1093 | 1305 | |
---|
1094 | 1306 | vcpu->stat.exit_wait_state++; |
---|
.. | .. |
---|
1101 | 1313 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
---|
1102 | 1314 | return -EOPNOTSUPP; /* disabled wait */ |
---|
1103 | 1315 | } |
---|
| 1316 | + |
---|
| 1317 | + if (gi->origin && |
---|
| 1318 | + (gisa_get_ipm_or_restore_iam(gi) & |
---|
| 1319 | + vcpu->arch.sie_block->gcr[6] >> 24)) |
---|
| 1320 | + return 0; |
---|
1104 | 1321 | |
---|
1105 | 1322 | if (!ckc_interrupts_enabled(vcpu) && |
---|
1106 | 1323 | !cpu_timer_interrupts_enabled(vcpu)) { |
---|
.. | .. |
---|
1128 | 1345 | |
---|
1129 | 1346 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) |
---|
1130 | 1347 | { |
---|
1131 | | - /* |
---|
1132 | | - * We cannot move this into the if, as the CPU might be already |
---|
1133 | | - * in kvm_vcpu_block without having the waitqueue set (polling) |
---|
1134 | | - */ |
---|
1135 | 1348 | vcpu->valid_wakeup = true; |
---|
| 1349 | + kvm_vcpu_wake_up(vcpu); |
---|
| 1350 | + |
---|
1136 | 1351 | /* |
---|
1137 | | - * This is mostly to document, that the read in swait_active could |
---|
1138 | | - * be moved before other stores, leading to subtle races. |
---|
1139 | | - * All current users do not store or use an atomic like update |
---|
1140 | | - */ |
---|
1141 | | - smp_mb__after_atomic(); |
---|
1142 | | - if (swait_active(&vcpu->wq)) { |
---|
1143 | | - /* |
---|
1144 | | - * The vcpu gave up the cpu voluntarily, mark it as a good |
---|
1145 | | - * yield-candidate. |
---|
1146 | | - */ |
---|
1147 | | - vcpu->preempted = true; |
---|
1148 | | - swake_up_one(&vcpu->wq); |
---|
1149 | | - vcpu->stat.halt_wakeup++; |
---|
1150 | | - } |
---|
1151 | | - /* |
---|
1152 | | - * The VCPU might not be sleeping but is executing the VSIE. Let's |
---|
| 1352 | + * The VCPU might not be sleeping but rather executing VSIE. Let's |
---|
1153 | 1353 | * kick it, so it leaves the SIE to process the request. |
---|
1154 | 1354 | */ |
---|
1155 | 1355 | kvm_s390_vsie_kick(vcpu); |
---|
.. | .. |
---|
1250 | 1450 | case IRQ_PEND_EXT_SERVICE: |
---|
1251 | 1451 | rc = __deliver_service(vcpu); |
---|
1252 | 1452 | break; |
---|
| 1453 | + case IRQ_PEND_EXT_SERVICE_EV: |
---|
| 1454 | + rc = __deliver_service_ev(vcpu); |
---|
| 1455 | + break; |
---|
1253 | 1456 | case IRQ_PEND_PFAULT_DONE: |
---|
1254 | 1457 | rc = __deliver_pfault_done(vcpu); |
---|
1255 | 1458 | break; |
---|
.. | .. |
---|
1342 | 1545 | if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) |
---|
1343 | 1546 | return -EINVAL; |
---|
1344 | 1547 | |
---|
1345 | | - if (sclp.has_sigpif) |
---|
| 1548 | + if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu)) |
---|
1346 | 1549 | return sca_inject_ext_call(vcpu, src_id); |
---|
1347 | 1550 | |
---|
1348 | 1551 | if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) |
---|
.. | .. |
---|
1398 | 1601 | return 0; |
---|
1399 | 1602 | } |
---|
1400 | 1603 | |
---|
1401 | | -static int __inject_sigp_restart(struct kvm_vcpu *vcpu, |
---|
1402 | | - struct kvm_s390_irq *irq) |
---|
| 1604 | +static int __inject_sigp_restart(struct kvm_vcpu *vcpu) |
---|
1403 | 1605 | { |
---|
1404 | 1606 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
1405 | 1607 | |
---|
.. | .. |
---|
1533 | 1735 | |
---|
1534 | 1736 | static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid) |
---|
1535 | 1737 | { |
---|
| 1738 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
1536 | 1739 | unsigned long active_mask; |
---|
1537 | 1740 | int isc; |
---|
1538 | 1741 | |
---|
1539 | 1742 | if (schid) |
---|
1540 | 1743 | goto out; |
---|
1541 | | - if (!kvm->arch.gisa) |
---|
| 1744 | + if (!gi->origin) |
---|
1542 | 1745 | goto out; |
---|
1543 | 1746 | |
---|
1544 | | - active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32; |
---|
| 1747 | + active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32; |
---|
1545 | 1748 | while (active_mask) { |
---|
1546 | 1749 | isc = __fls(active_mask) ^ (BITS_PER_LONG - 1); |
---|
1547 | | - if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc)) |
---|
| 1750 | + if (gisa_tac_ipm_gisc(gi->origin, isc)) |
---|
1548 | 1751 | return isc; |
---|
1549 | 1752 | clear_bit_inv(isc, &active_mask); |
---|
1550 | 1753 | } |
---|
.. | .. |
---|
1567 | 1770 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
---|
1568 | 1771 | u64 isc_mask, u32 schid) |
---|
1569 | 1772 | { |
---|
| 1773 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
1570 | 1774 | struct kvm_s390_interrupt_info *inti, *tmp_inti; |
---|
1571 | 1775 | int isc; |
---|
1572 | 1776 | |
---|
.. | .. |
---|
1584 | 1788 | /* both types of interrupts present */ |
---|
1585 | 1789 | if (int_word_to_isc(inti->io.io_int_word) <= isc) { |
---|
1586 | 1790 | /* classical IO int with higher priority */ |
---|
1587 | | - kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); |
---|
| 1791 | + gisa_set_ipm_gisc(gi->origin, isc); |
---|
1588 | 1792 | goto out; |
---|
1589 | 1793 | } |
---|
1590 | 1794 | gisa_out: |
---|
.. | .. |
---|
1596 | 1800 | kvm_s390_reinject_io_int(kvm, inti); |
---|
1597 | 1801 | inti = tmp_inti; |
---|
1598 | 1802 | } else |
---|
1599 | | - kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); |
---|
| 1803 | + gisa_set_ipm_gisc(gi->origin, isc); |
---|
1600 | 1804 | out: |
---|
1601 | 1805 | return inti; |
---|
1602 | 1806 | } |
---|
1603 | | - |
---|
1604 | | -#define SCCB_MASK 0xFFFFFFF8 |
---|
1605 | | -#define SCCB_EVENT_PENDING 0x3 |
---|
1606 | 1807 | |
---|
1607 | 1808 | static int __inject_service(struct kvm *kvm, |
---|
1608 | 1809 | struct kvm_s390_interrupt_info *inti) |
---|
.. | .. |
---|
1612 | 1813 | kvm->stat.inject_service_signal++; |
---|
1613 | 1814 | spin_lock(&fi->lock); |
---|
1614 | 1815 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; |
---|
| 1816 | + |
---|
| 1817 | + /* We always allow events, track them separately from the sccb ints */ |
---|
| 1818 | + if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING) |
---|
| 1819 | + set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs); |
---|
| 1820 | + |
---|
1615 | 1821 | /* |
---|
1616 | 1822 | * Early versions of the QEMU s390 bios will inject several |
---|
1617 | 1823 | * service interrupts after another without handling a |
---|
.. | .. |
---|
1685 | 1891 | |
---|
1686 | 1892 | static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
---|
1687 | 1893 | { |
---|
| 1894 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
1688 | 1895 | struct kvm_s390_float_interrupt *fi; |
---|
1689 | 1896 | struct list_head *list; |
---|
1690 | 1897 | int isc; |
---|
.. | .. |
---|
1692 | 1899 | kvm->stat.inject_io++; |
---|
1693 | 1900 | isc = int_word_to_isc(inti->io.io_int_word); |
---|
1694 | 1901 | |
---|
1695 | | - if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) { |
---|
| 1902 | + /* |
---|
| 1903 | + * Do not make use of gisa in protected mode. We do not use the lock |
---|
| 1904 | + * checking variant as this is just a performance optimization and we |
---|
| 1905 | + * do not hold the lock here. This is ok as the code will pick |
---|
| 1906 | + * interrupts from both "lists" for delivery. |
---|
| 1907 | + */ |
---|
| 1908 | + if (!kvm_s390_pv_get_handle(kvm) && |
---|
| 1909 | + gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) { |
---|
1696 | 1910 | VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc); |
---|
1697 | | - kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); |
---|
| 1911 | + gisa_set_ipm_gisc(gi->origin, isc); |
---|
1698 | 1912 | kfree(inti); |
---|
1699 | 1913 | return 0; |
---|
1700 | 1914 | } |
---|
.. | .. |
---|
1726 | 1940 | */ |
---|
1727 | 1941 | static void __floating_irq_kick(struct kvm *kvm, u64 type) |
---|
1728 | 1942 | { |
---|
1729 | | - struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
---|
1730 | 1943 | struct kvm_vcpu *dst_vcpu; |
---|
1731 | 1944 | int sigcpu, online_vcpus, nr_tries = 0; |
---|
1732 | 1945 | |
---|
.. | .. |
---|
1735 | 1948 | return; |
---|
1736 | 1949 | |
---|
1737 | 1950 | /* find idle VCPUs first, then round robin */ |
---|
1738 | | - sigcpu = find_first_bit(fi->idle_mask, online_vcpus); |
---|
| 1951 | + sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus); |
---|
1739 | 1952 | if (sigcpu == online_vcpus) { |
---|
1740 | 1953 | do { |
---|
1741 | | - sigcpu = fi->next_rr_cpu; |
---|
1742 | | - fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; |
---|
| 1954 | + sigcpu = kvm->arch.float_int.next_rr_cpu++; |
---|
| 1955 | + kvm->arch.float_int.next_rr_cpu %= online_vcpus; |
---|
1743 | 1956 | /* avoid endless loops if all vcpus are stopped */ |
---|
1744 | 1957 | if (nr_tries++ >= online_vcpus) |
---|
1745 | 1958 | return; |
---|
.. | .. |
---|
1753 | 1966 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); |
---|
1754 | 1967 | break; |
---|
1755 | 1968 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
---|
1756 | | - if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa)) |
---|
| 1969 | + if (!(type & KVM_S390_INT_IO_AI_MASK && |
---|
| 1970 | + kvm->arch.gisa_int.origin) || |
---|
| 1971 | + kvm_s390_pv_cpu_get_handle(dst_vcpu)) |
---|
1757 | 1972 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); |
---|
1758 | 1973 | break; |
---|
1759 | 1974 | default: |
---|
.. | .. |
---|
1932 | 2147 | rc = __inject_sigp_stop(vcpu, irq); |
---|
1933 | 2148 | break; |
---|
1934 | 2149 | case KVM_S390_RESTART: |
---|
1935 | | - rc = __inject_sigp_restart(vcpu, irq); |
---|
| 2150 | + rc = __inject_sigp_restart(vcpu); |
---|
1936 | 2151 | break; |
---|
1937 | 2152 | case KVM_S390_INT_CLOCK_COMP: |
---|
1938 | 2153 | rc = __inject_ckc(vcpu); |
---|
.. | .. |
---|
2006 | 2221 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
---|
2007 | 2222 | int i; |
---|
2008 | 2223 | |
---|
| 2224 | + mutex_lock(&kvm->lock); |
---|
| 2225 | + if (!kvm_s390_pv_is_protected(kvm)) |
---|
| 2226 | + fi->masked_irqs = 0; |
---|
| 2227 | + mutex_unlock(&kvm->lock); |
---|
2009 | 2228 | spin_lock(&fi->lock); |
---|
2010 | 2229 | fi->pending_irqs = 0; |
---|
2011 | 2230 | memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); |
---|
.. | .. |
---|
2020 | 2239 | |
---|
2021 | 2240 | static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) |
---|
2022 | 2241 | { |
---|
| 2242 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
2023 | 2243 | struct kvm_s390_interrupt_info *inti; |
---|
2024 | 2244 | struct kvm_s390_float_interrupt *fi; |
---|
2025 | 2245 | struct kvm_s390_irq *buf; |
---|
.. | .. |
---|
2043 | 2263 | |
---|
2044 | 2264 | max_irqs = len / sizeof(struct kvm_s390_irq); |
---|
2045 | 2265 | |
---|
2046 | | - if (kvm->arch.gisa && |
---|
2047 | | - kvm_s390_gisa_get_ipm(kvm->arch.gisa)) { |
---|
| 2266 | + if (gi->origin && gisa_get_ipm(gi->origin)) { |
---|
2048 | 2267 | for (i = 0; i <= MAX_ISC; i++) { |
---|
2049 | 2268 | if (n == max_irqs) { |
---|
2050 | 2269 | /* signal userspace to try again */ |
---|
2051 | 2270 | ret = -ENOMEM; |
---|
2052 | 2271 | goto out_nolock; |
---|
2053 | 2272 | } |
---|
2054 | | - if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) { |
---|
| 2273 | + if (gisa_tac_ipm_gisc(gi->origin, i)) { |
---|
2055 | 2274 | irq = (struct kvm_s390_irq *) &buf[n]; |
---|
2056 | 2275 | irq->type = KVM_S390_INT_IO(1, 0, 0, 0); |
---|
2057 | 2276 | irq->u.io.io_int_word = isc_to_int_word(i); |
---|
.. | .. |
---|
2072 | 2291 | n++; |
---|
2073 | 2292 | } |
---|
2074 | 2293 | } |
---|
2075 | | - if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { |
---|
| 2294 | + if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) || |
---|
| 2295 | + test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) { |
---|
2076 | 2296 | if (n == max_irqs) { |
---|
2077 | 2297 | /* signal userspace to try again */ |
---|
2078 | 2298 | ret = -ENOMEM; |
---|
.. | .. |
---|
2226 | 2446 | { |
---|
2227 | 2447 | if (id >= MAX_S390_IO_ADAPTERS) |
---|
2228 | 2448 | return NULL; |
---|
| 2449 | + id = array_index_nospec(id, MAX_S390_IO_ADAPTERS); |
---|
2229 | 2450 | return kvm->arch.adapters[id]; |
---|
2230 | 2451 | } |
---|
2231 | 2452 | |
---|
.. | .. |
---|
2239 | 2460 | (void __user *)attr->addr, sizeof(adapter_info))) |
---|
2240 | 2461 | return -EFAULT; |
---|
2241 | 2462 | |
---|
2242 | | - if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || |
---|
2243 | | - (dev->kvm->arch.adapters[adapter_info.id] != NULL)) |
---|
| 2463 | + if (adapter_info.id >= MAX_S390_IO_ADAPTERS) |
---|
| 2464 | + return -EINVAL; |
---|
| 2465 | + |
---|
| 2466 | + adapter_info.id = array_index_nospec(adapter_info.id, |
---|
| 2467 | + MAX_S390_IO_ADAPTERS); |
---|
| 2468 | + |
---|
| 2469 | + if (dev->kvm->arch.adapters[adapter_info.id] != NULL) |
---|
2244 | 2470 | return -EINVAL; |
---|
2245 | 2471 | |
---|
2246 | 2472 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
---|
2247 | 2473 | if (!adapter) |
---|
2248 | 2474 | return -ENOMEM; |
---|
2249 | 2475 | |
---|
2250 | | - INIT_LIST_HEAD(&adapter->maps); |
---|
2251 | | - init_rwsem(&adapter->maps_lock); |
---|
2252 | | - atomic_set(&adapter->nr_maps, 0); |
---|
2253 | 2476 | adapter->id = adapter_info.id; |
---|
2254 | 2477 | adapter->isc = adapter_info.isc; |
---|
2255 | 2478 | adapter->maskable = adapter_info.maskable; |
---|
.. | .. |
---|
2274 | 2497 | return ret; |
---|
2275 | 2498 | } |
---|
2276 | 2499 | |
---|
2277 | | -static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) |
---|
2278 | | -{ |
---|
2279 | | - struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
---|
2280 | | - struct s390_map_info *map; |
---|
2281 | | - int ret; |
---|
2282 | | - |
---|
2283 | | - if (!adapter || !addr) |
---|
2284 | | - return -EINVAL; |
---|
2285 | | - |
---|
2286 | | - map = kzalloc(sizeof(*map), GFP_KERNEL); |
---|
2287 | | - if (!map) { |
---|
2288 | | - ret = -ENOMEM; |
---|
2289 | | - goto out; |
---|
2290 | | - } |
---|
2291 | | - INIT_LIST_HEAD(&map->list); |
---|
2292 | | - map->guest_addr = addr; |
---|
2293 | | - map->addr = gmap_translate(kvm->arch.gmap, addr); |
---|
2294 | | - if (map->addr == -EFAULT) { |
---|
2295 | | - ret = -EFAULT; |
---|
2296 | | - goto out; |
---|
2297 | | - } |
---|
2298 | | - ret = get_user_pages_fast(map->addr, 1, 1, &map->page); |
---|
2299 | | - if (ret < 0) |
---|
2300 | | - goto out; |
---|
2301 | | - BUG_ON(ret != 1); |
---|
2302 | | - down_write(&adapter->maps_lock); |
---|
2303 | | - if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { |
---|
2304 | | - list_add_tail(&map->list, &adapter->maps); |
---|
2305 | | - ret = 0; |
---|
2306 | | - } else { |
---|
2307 | | - put_page(map->page); |
---|
2308 | | - ret = -EINVAL; |
---|
2309 | | - } |
---|
2310 | | - up_write(&adapter->maps_lock); |
---|
2311 | | -out: |
---|
2312 | | - if (ret) |
---|
2313 | | - kfree(map); |
---|
2314 | | - return ret; |
---|
2315 | | -} |
---|
2316 | | - |
---|
2317 | | -static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) |
---|
2318 | | -{ |
---|
2319 | | - struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
---|
2320 | | - struct s390_map_info *map, *tmp; |
---|
2321 | | - int found = 0; |
---|
2322 | | - |
---|
2323 | | - if (!adapter || !addr) |
---|
2324 | | - return -EINVAL; |
---|
2325 | | - |
---|
2326 | | - down_write(&adapter->maps_lock); |
---|
2327 | | - list_for_each_entry_safe(map, tmp, &adapter->maps, list) { |
---|
2328 | | - if (map->guest_addr == addr) { |
---|
2329 | | - found = 1; |
---|
2330 | | - atomic_dec(&adapter->nr_maps); |
---|
2331 | | - list_del(&map->list); |
---|
2332 | | - put_page(map->page); |
---|
2333 | | - kfree(map); |
---|
2334 | | - break; |
---|
2335 | | - } |
---|
2336 | | - } |
---|
2337 | | - up_write(&adapter->maps_lock); |
---|
2338 | | - |
---|
2339 | | - return found ? 0 : -EINVAL; |
---|
2340 | | -} |
---|
2341 | | - |
---|
2342 | 2500 | void kvm_s390_destroy_adapters(struct kvm *kvm) |
---|
2343 | 2501 | { |
---|
2344 | 2502 | int i; |
---|
2345 | | - struct s390_map_info *map, *tmp; |
---|
2346 | 2503 | |
---|
2347 | | - for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { |
---|
2348 | | - if (!kvm->arch.adapters[i]) |
---|
2349 | | - continue; |
---|
2350 | | - list_for_each_entry_safe(map, tmp, |
---|
2351 | | - &kvm->arch.adapters[i]->maps, list) { |
---|
2352 | | - list_del(&map->list); |
---|
2353 | | - put_page(map->page); |
---|
2354 | | - kfree(map); |
---|
2355 | | - } |
---|
| 2504 | + for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) |
---|
2356 | 2505 | kfree(kvm->arch.adapters[i]); |
---|
2357 | | - } |
---|
2358 | 2506 | } |
---|
2359 | 2507 | |
---|
2360 | 2508 | static int modify_io_adapter(struct kvm_device *dev, |
---|
.. | .. |
---|
2376 | 2524 | if (ret > 0) |
---|
2377 | 2525 | ret = 0; |
---|
2378 | 2526 | break; |
---|
| 2527 | + /* |
---|
| 2528 | + * The following operations are no longer needed and therefore no-ops. |
---|
| 2529 | + * The gpa to hva translation is done when an IRQ route is set up. The |
---|
| 2530 | + * set_irq code uses get_user_pages_remote() to do the actual write. |
---|
| 2531 | + */ |
---|
2379 | 2532 | case KVM_S390_IO_ADAPTER_MAP: |
---|
2380 | | - ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); |
---|
2381 | | - break; |
---|
2382 | 2533 | case KVM_S390_IO_ADAPTER_UNMAP: |
---|
2383 | | - ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); |
---|
| 2534 | + ret = 0; |
---|
2384 | 2535 | break; |
---|
2385 | 2536 | default: |
---|
2386 | 2537 | ret = -EINVAL; |
---|
.. | .. |
---|
2619 | 2770 | return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; |
---|
2620 | 2771 | } |
---|
2621 | 2772 | |
---|
2622 | | -static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, |
---|
2623 | | - u64 addr) |
---|
| 2773 | +static struct page *get_map_page(struct kvm *kvm, u64 uaddr) |
---|
2624 | 2774 | { |
---|
2625 | | - struct s390_map_info *map; |
---|
| 2775 | + struct page *page = NULL; |
---|
2626 | 2776 | |
---|
2627 | | - if (!adapter) |
---|
2628 | | - return NULL; |
---|
2629 | | - |
---|
2630 | | - list_for_each_entry(map, &adapter->maps, list) { |
---|
2631 | | - if (map->guest_addr == addr) |
---|
2632 | | - return map; |
---|
2633 | | - } |
---|
2634 | | - return NULL; |
---|
| 2777 | + mmap_read_lock(kvm->mm); |
---|
| 2778 | + get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, |
---|
| 2779 | + &page, NULL, NULL); |
---|
| 2780 | + mmap_read_unlock(kvm->mm); |
---|
| 2781 | + return page; |
---|
2635 | 2782 | } |
---|
2636 | 2783 | |
---|
2637 | 2784 | static int adapter_indicators_set(struct kvm *kvm, |
---|
.. | .. |
---|
2640 | 2787 | { |
---|
2641 | 2788 | unsigned long bit; |
---|
2642 | 2789 | int summary_set, idx; |
---|
2643 | | - struct s390_map_info *info; |
---|
| 2790 | + struct page *ind_page, *summary_page; |
---|
2644 | 2791 | void *map; |
---|
2645 | 2792 | |
---|
2646 | | - info = get_map_info(adapter, adapter_int->ind_addr); |
---|
2647 | | - if (!info) |
---|
| 2793 | + ind_page = get_map_page(kvm, adapter_int->ind_addr); |
---|
| 2794 | + if (!ind_page) |
---|
2648 | 2795 | return -1; |
---|
2649 | | - map = page_address(info->page); |
---|
2650 | | - bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); |
---|
2651 | | - set_bit(bit, map); |
---|
2652 | | - idx = srcu_read_lock(&kvm->srcu); |
---|
2653 | | - mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); |
---|
2654 | | - set_page_dirty_lock(info->page); |
---|
2655 | | - info = get_map_info(adapter, adapter_int->summary_addr); |
---|
2656 | | - if (!info) { |
---|
2657 | | - srcu_read_unlock(&kvm->srcu, idx); |
---|
| 2796 | + summary_page = get_map_page(kvm, adapter_int->summary_addr); |
---|
| 2797 | + if (!summary_page) { |
---|
| 2798 | + put_page(ind_page); |
---|
2658 | 2799 | return -1; |
---|
2659 | 2800 | } |
---|
2660 | | - map = page_address(info->page); |
---|
2661 | | - bit = get_ind_bit(info->addr, adapter_int->summary_offset, |
---|
2662 | | - adapter->swap); |
---|
| 2801 | + |
---|
| 2802 | + idx = srcu_read_lock(&kvm->srcu); |
---|
| 2803 | + map = page_address(ind_page); |
---|
| 2804 | + bit = get_ind_bit(adapter_int->ind_addr, |
---|
| 2805 | + adapter_int->ind_offset, adapter->swap); |
---|
| 2806 | + set_bit(bit, map); |
---|
| 2807 | + mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT); |
---|
| 2808 | + set_page_dirty_lock(ind_page); |
---|
| 2809 | + map = page_address(summary_page); |
---|
| 2810 | + bit = get_ind_bit(adapter_int->summary_addr, |
---|
| 2811 | + adapter_int->summary_offset, adapter->swap); |
---|
2663 | 2812 | summary_set = test_and_set_bit(bit, map); |
---|
2664 | | - mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); |
---|
2665 | | - set_page_dirty_lock(info->page); |
---|
| 2813 | + mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT); |
---|
| 2814 | + set_page_dirty_lock(summary_page); |
---|
2666 | 2815 | srcu_read_unlock(&kvm->srcu, idx); |
---|
| 2816 | + |
---|
| 2817 | + put_page(ind_page); |
---|
| 2818 | + put_page(summary_page); |
---|
2667 | 2819 | return summary_set ? 0 : 1; |
---|
2668 | 2820 | } |
---|
2669 | 2821 | |
---|
.. | .. |
---|
2685 | 2837 | adapter = get_io_adapter(kvm, e->adapter.adapter_id); |
---|
2686 | 2838 | if (!adapter) |
---|
2687 | 2839 | return -1; |
---|
2688 | | - down_read(&adapter->maps_lock); |
---|
2689 | 2840 | ret = adapter_indicators_set(kvm, adapter, &e->adapter); |
---|
2690 | | - up_read(&adapter->maps_lock); |
---|
2691 | 2841 | if ((ret > 0) && !adapter->masked) { |
---|
2692 | 2842 | ret = kvm_s390_inject_airq(kvm, adapter); |
---|
2693 | 2843 | if (ret == 0) |
---|
.. | .. |
---|
2738 | 2888 | struct kvm_kernel_irq_routing_entry *e, |
---|
2739 | 2889 | const struct kvm_irq_routing_entry *ue) |
---|
2740 | 2890 | { |
---|
2741 | | - int ret; |
---|
| 2891 | + u64 uaddr; |
---|
2742 | 2892 | |
---|
2743 | 2893 | switch (ue->type) { |
---|
| 2894 | + /* we store the userspace addresses instead of the guest addresses */ |
---|
2744 | 2895 | case KVM_IRQ_ROUTING_S390_ADAPTER: |
---|
2745 | 2896 | e->set = set_adapter_int; |
---|
2746 | | - e->adapter.summary_addr = ue->u.adapter.summary_addr; |
---|
2747 | | - e->adapter.ind_addr = ue->u.adapter.ind_addr; |
---|
| 2897 | + uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr); |
---|
| 2898 | + if (uaddr == -EFAULT) |
---|
| 2899 | + return -EFAULT; |
---|
| 2900 | + e->adapter.summary_addr = uaddr; |
---|
| 2901 | + uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr); |
---|
| 2902 | + if (uaddr == -EFAULT) |
---|
| 2903 | + return -EFAULT; |
---|
| 2904 | + e->adapter.ind_addr = uaddr; |
---|
2748 | 2905 | e->adapter.summary_offset = ue->u.adapter.summary_offset; |
---|
2749 | 2906 | e->adapter.ind_offset = ue->u.adapter.ind_offset; |
---|
2750 | 2907 | e->adapter.adapter_id = ue->u.adapter.adapter_id; |
---|
2751 | | - ret = 0; |
---|
2752 | | - break; |
---|
| 2908 | + return 0; |
---|
2753 | 2909 | default: |
---|
2754 | | - ret = -EINVAL; |
---|
| 2910 | + return -EINVAL; |
---|
2755 | 2911 | } |
---|
2756 | | - |
---|
2757 | | - return ret; |
---|
2758 | 2912 | } |
---|
2759 | 2913 | |
---|
2760 | 2914 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, |
---|
.. | .. |
---|
2848 | 3002 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) |
---|
2849 | 3003 | { |
---|
2850 | 3004 | int scn; |
---|
2851 | | - unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; |
---|
| 3005 | + DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); |
---|
2852 | 3006 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
2853 | 3007 | unsigned long pending_irqs; |
---|
2854 | 3008 | struct kvm_s390_irq irq; |
---|
.. | .. |
---|
2901 | 3055 | return n; |
---|
2902 | 3056 | } |
---|
2903 | 3057 | |
---|
| 3058 | +static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask) |
---|
| 3059 | +{ |
---|
| 3060 | + int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus); |
---|
| 3061 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3062 | + struct kvm_vcpu *vcpu; |
---|
| 3063 | + u8 vcpu_isc_mask; |
---|
| 3064 | + |
---|
| 3065 | + for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) { |
---|
| 3066 | + vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
---|
| 3067 | + if (psw_ioint_disabled(vcpu)) |
---|
| 3068 | + continue; |
---|
| 3069 | + vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24); |
---|
| 3070 | + if (deliverable_mask & vcpu_isc_mask) { |
---|
| 3071 | + /* lately kicked but not yet running */ |
---|
| 3072 | + if (test_and_set_bit(vcpu_idx, gi->kicked_mask)) |
---|
| 3073 | + return; |
---|
| 3074 | + kvm_s390_vcpu_wakeup(vcpu); |
---|
| 3075 | + return; |
---|
| 3076 | + } |
---|
| 3077 | + } |
---|
| 3078 | +} |
---|
| 3079 | + |
---|
| 3080 | +static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer) |
---|
| 3081 | +{ |
---|
| 3082 | + struct kvm_s390_gisa_interrupt *gi = |
---|
| 3083 | + container_of(timer, struct kvm_s390_gisa_interrupt, timer); |
---|
| 3084 | + struct kvm *kvm = |
---|
| 3085 | + container_of(gi->origin, struct sie_page2, gisa)->kvm; |
---|
| 3086 | + u8 pending_mask; |
---|
| 3087 | + |
---|
| 3088 | + pending_mask = gisa_get_ipm_or_restore_iam(gi); |
---|
| 3089 | + if (pending_mask) { |
---|
| 3090 | + __airqs_kick_single_vcpu(kvm, pending_mask); |
---|
| 3091 | + hrtimer_forward_now(timer, ns_to_ktime(gi->expires)); |
---|
| 3092 | + return HRTIMER_RESTART; |
---|
| 3093 | + } |
---|
| 3094 | + |
---|
| 3095 | + return HRTIMER_NORESTART; |
---|
| 3096 | +} |
---|
| 3097 | + |
---|
| 3098 | +#define NULL_GISA_ADDR 0x00000000UL |
---|
| 3099 | +#define NONE_GISA_ADDR 0x00000001UL |
---|
| 3100 | +#define GISA_ADDR_MASK 0xfffff000UL |
---|
| 3101 | + |
---|
| 3102 | +static void process_gib_alert_list(void) |
---|
| 3103 | +{ |
---|
| 3104 | + struct kvm_s390_gisa_interrupt *gi; |
---|
| 3105 | + struct kvm_s390_gisa *gisa; |
---|
| 3106 | + struct kvm *kvm; |
---|
| 3107 | + u32 final, origin = 0UL; |
---|
| 3108 | + |
---|
| 3109 | + do { |
---|
| 3110 | + /* |
---|
| 3111 | + * If the NONE_GISA_ADDR is still stored in the alert list |
---|
| 3112 | + * origin, we will leave the outer loop. No further GISA has |
---|
| 3113 | + * been added to the alert list by millicode while processing |
---|
| 3114 | + * the current alert list. |
---|
| 3115 | + */ |
---|
| 3116 | + final = (origin & NONE_GISA_ADDR); |
---|
| 3117 | + /* |
---|
| 3118 | + * Cut off the alert list and store the NONE_GISA_ADDR in the |
---|
| 3119 | + * alert list origin to avoid further GAL interruptions. |
---|
| 3120 | + * A new alert list can be build up by millicode in parallel |
---|
| 3121 | + * for guests not in the yet cut-off alert list. When in the |
---|
| 3122 | + * final loop, store the NULL_GISA_ADDR instead. This will re- |
---|
| 3123 | + * enable GAL interruptions on the host again. |
---|
| 3124 | + */ |
---|
| 3125 | + origin = xchg(&gib->alert_list_origin, |
---|
| 3126 | + (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR); |
---|
| 3127 | + /* |
---|
| 3128 | + * Loop through the just cut-off alert list and start the |
---|
| 3129 | + * gisa timers to kick idle vcpus to consume the pending |
---|
| 3130 | + * interruptions asap. |
---|
| 3131 | + */ |
---|
| 3132 | + while (origin & GISA_ADDR_MASK) { |
---|
| 3133 | + gisa = (struct kvm_s390_gisa *)(u64)origin; |
---|
| 3134 | + origin = gisa->next_alert; |
---|
| 3135 | + gisa->next_alert = (u32)(u64)gisa; |
---|
| 3136 | + kvm = container_of(gisa, struct sie_page2, gisa)->kvm; |
---|
| 3137 | + gi = &kvm->arch.gisa_int; |
---|
| 3138 | + if (hrtimer_active(&gi->timer)) |
---|
| 3139 | + hrtimer_cancel(&gi->timer); |
---|
| 3140 | + hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL); |
---|
| 3141 | + } |
---|
| 3142 | + } while (!final); |
---|
| 3143 | + |
---|
| 3144 | +} |
---|
| 3145 | + |
---|
2904 | 3146 | void kvm_s390_gisa_clear(struct kvm *kvm) |
---|
2905 | 3147 | { |
---|
2906 | | - if (kvm->arch.gisa) { |
---|
2907 | | - memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa)); |
---|
2908 | | - kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa; |
---|
2909 | | - VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa); |
---|
2910 | | - } |
---|
| 3148 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3149 | + |
---|
| 3150 | + if (!gi->origin) |
---|
| 3151 | + return; |
---|
| 3152 | + gisa_clear_ipm(gi->origin); |
---|
| 3153 | + VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin); |
---|
2911 | 3154 | } |
---|
2912 | 3155 | |
---|
2913 | 3156 | void kvm_s390_gisa_init(struct kvm *kvm) |
---|
2914 | 3157 | { |
---|
2915 | | - if (css_general_characteristics.aiv) { |
---|
2916 | | - kvm->arch.gisa = &kvm->arch.sie_page2->gisa; |
---|
2917 | | - VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa); |
---|
2918 | | - kvm_s390_gisa_clear(kvm); |
---|
2919 | | - } |
---|
| 3158 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3159 | + |
---|
| 3160 | + if (!css_general_characteristics.aiv) |
---|
| 3161 | + return; |
---|
| 3162 | + gi->origin = &kvm->arch.sie_page2->gisa; |
---|
| 3163 | + gi->alert.mask = 0; |
---|
| 3164 | + spin_lock_init(&gi->alert.ref_lock); |
---|
| 3165 | + gi->expires = 50 * 1000; /* 50 usec */ |
---|
| 3166 | + hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
---|
| 3167 | + gi->timer.function = gisa_vcpu_kicker; |
---|
| 3168 | + memset(gi->origin, 0, sizeof(struct kvm_s390_gisa)); |
---|
| 3169 | + gi->origin->next_alert = (u32)(u64)gi->origin; |
---|
| 3170 | + VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin); |
---|
2920 | 3171 | } |
---|
2921 | 3172 | |
---|
2922 | 3173 | void kvm_s390_gisa_destroy(struct kvm *kvm) |
---|
2923 | 3174 | { |
---|
2924 | | - if (!kvm->arch.gisa) |
---|
| 3175 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3176 | + |
---|
| 3177 | + if (!gi->origin) |
---|
2925 | 3178 | return; |
---|
2926 | | - kvm->arch.gisa = NULL; |
---|
| 3179 | + if (gi->alert.mask) |
---|
| 3180 | + KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x", |
---|
| 3181 | + kvm, gi->alert.mask); |
---|
| 3182 | + while (gisa_in_alert_list(gi->origin)) |
---|
| 3183 | + cpu_relax(); |
---|
| 3184 | + hrtimer_cancel(&gi->timer); |
---|
| 3185 | + gi->origin = NULL; |
---|
| 3186 | +} |
---|
| 3187 | + |
---|
| 3188 | +/** |
---|
| 3189 | + * kvm_s390_gisc_register - register a guest ISC |
---|
| 3190 | + * |
---|
| 3191 | + * @kvm: the kernel vm to work with |
---|
| 3192 | + * @gisc: the guest interruption sub class to register |
---|
| 3193 | + * |
---|
| 3194 | + * The function extends the vm specific alert mask to use. |
---|
| 3195 | + * The effective IAM mask in the GISA is updated as well |
---|
| 3196 | + * in case the GISA is not part of the GIB alert list. |
---|
| 3197 | + * It will be updated latest when the IAM gets restored |
---|
| 3198 | + * by gisa_get_ipm_or_restore_iam(). |
---|
| 3199 | + * |
---|
| 3200 | + * Returns: the nonspecific ISC (NISC) the gib alert mechanism |
---|
| 3201 | + * has registered with the channel subsystem. |
---|
| 3202 | + * -ENODEV in case the vm uses no GISA |
---|
| 3203 | + * -ERANGE in case the guest ISC is invalid |
---|
| 3204 | + */ |
---|
| 3205 | +int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc) |
---|
| 3206 | +{ |
---|
| 3207 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3208 | + |
---|
| 3209 | + if (!gi->origin) |
---|
| 3210 | + return -ENODEV; |
---|
| 3211 | + if (gisc > MAX_ISC) |
---|
| 3212 | + return -ERANGE; |
---|
| 3213 | + |
---|
| 3214 | + spin_lock(&gi->alert.ref_lock); |
---|
| 3215 | + gi->alert.ref_count[gisc]++; |
---|
| 3216 | + if (gi->alert.ref_count[gisc] == 1) { |
---|
| 3217 | + gi->alert.mask |= 0x80 >> gisc; |
---|
| 3218 | + gisa_set_iam(gi->origin, gi->alert.mask); |
---|
| 3219 | + } |
---|
| 3220 | + spin_unlock(&gi->alert.ref_lock); |
---|
| 3221 | + |
---|
| 3222 | + return gib->nisc; |
---|
| 3223 | +} |
---|
| 3224 | +EXPORT_SYMBOL_GPL(kvm_s390_gisc_register); |
---|
| 3225 | + |
---|
| 3226 | +/** |
---|
| 3227 | + * kvm_s390_gisc_unregister - unregister a guest ISC |
---|
| 3228 | + * |
---|
| 3229 | + * @kvm: the kernel vm to work with |
---|
| 3230 | + * @gisc: the guest interruption sub class to register |
---|
| 3231 | + * |
---|
| 3232 | + * The function reduces the vm specific alert mask to use. |
---|
| 3233 | + * The effective IAM mask in the GISA is updated as well |
---|
| 3234 | + * in case the GISA is not part of the GIB alert list. |
---|
| 3235 | + * It will be updated latest when the IAM gets restored |
---|
| 3236 | + * by gisa_get_ipm_or_restore_iam(). |
---|
| 3237 | + * |
---|
| 3238 | + * Returns: the nonspecific ISC (NISC) the gib alert mechanism |
---|
| 3239 | + * has registered with the channel subsystem. |
---|
| 3240 | + * -ENODEV in case the vm uses no GISA |
---|
| 3241 | + * -ERANGE in case the guest ISC is invalid |
---|
| 3242 | + * -EINVAL in case the guest ISC is not registered |
---|
| 3243 | + */ |
---|
| 3244 | +int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc) |
---|
| 3245 | +{ |
---|
| 3246 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3247 | + int rc = 0; |
---|
| 3248 | + |
---|
| 3249 | + if (!gi->origin) |
---|
| 3250 | + return -ENODEV; |
---|
| 3251 | + if (gisc > MAX_ISC) |
---|
| 3252 | + return -ERANGE; |
---|
| 3253 | + |
---|
| 3254 | + spin_lock(&gi->alert.ref_lock); |
---|
| 3255 | + if (gi->alert.ref_count[gisc] == 0) { |
---|
| 3256 | + rc = -EINVAL; |
---|
| 3257 | + goto out; |
---|
| 3258 | + } |
---|
| 3259 | + gi->alert.ref_count[gisc]--; |
---|
| 3260 | + if (gi->alert.ref_count[gisc] == 0) { |
---|
| 3261 | + gi->alert.mask &= ~(0x80 >> gisc); |
---|
| 3262 | + gisa_set_iam(gi->origin, gi->alert.mask); |
---|
| 3263 | + } |
---|
| 3264 | +out: |
---|
| 3265 | + spin_unlock(&gi->alert.ref_lock); |
---|
| 3266 | + |
---|
| 3267 | + return rc; |
---|
| 3268 | +} |
---|
| 3269 | +EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister); |
---|
| 3270 | + |
---|
| 3271 | +static void gib_alert_irq_handler(struct airq_struct *airq, bool floating) |
---|
| 3272 | +{ |
---|
| 3273 | + inc_irq_stat(IRQIO_GAL); |
---|
| 3274 | + process_gib_alert_list(); |
---|
| 3275 | +} |
---|
| 3276 | + |
---|
| 3277 | +static struct airq_struct gib_alert_irq = { |
---|
| 3278 | + .handler = gib_alert_irq_handler, |
---|
| 3279 | + .lsi_ptr = &gib_alert_irq.lsi_mask, |
---|
| 3280 | +}; |
---|
| 3281 | + |
---|
| 3282 | +void kvm_s390_gib_destroy(void) |
---|
| 3283 | +{ |
---|
| 3284 | + if (!gib) |
---|
| 3285 | + return; |
---|
| 3286 | + chsc_sgib(0); |
---|
| 3287 | + unregister_adapter_interrupt(&gib_alert_irq); |
---|
| 3288 | + free_page((unsigned long)gib); |
---|
| 3289 | + gib = NULL; |
---|
| 3290 | +} |
---|
| 3291 | + |
---|
| 3292 | +int kvm_s390_gib_init(u8 nisc) |
---|
| 3293 | +{ |
---|
| 3294 | + int rc = 0; |
---|
| 3295 | + |
---|
| 3296 | + if (!css_general_characteristics.aiv) { |
---|
| 3297 | + KVM_EVENT(3, "%s", "gib not initialized, no AIV facility"); |
---|
| 3298 | + goto out; |
---|
| 3299 | + } |
---|
| 3300 | + |
---|
| 3301 | + gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
---|
| 3302 | + if (!gib) { |
---|
| 3303 | + rc = -ENOMEM; |
---|
| 3304 | + goto out; |
---|
| 3305 | + } |
---|
| 3306 | + |
---|
| 3307 | + gib_alert_irq.isc = nisc; |
---|
| 3308 | + if (register_adapter_interrupt(&gib_alert_irq)) { |
---|
| 3309 | + pr_err("Registering the GIB alert interruption handler failed\n"); |
---|
| 3310 | + rc = -EIO; |
---|
| 3311 | + goto out_free_gib; |
---|
| 3312 | + } |
---|
| 3313 | + |
---|
| 3314 | + gib->nisc = nisc; |
---|
| 3315 | + if (chsc_sgib((u32)(u64)gib)) { |
---|
| 3316 | + pr_err("Associating the GIB with the AIV facility failed\n"); |
---|
| 3317 | + free_page((unsigned long)gib); |
---|
| 3318 | + gib = NULL; |
---|
| 3319 | + rc = -EIO; |
---|
| 3320 | + goto out_unreg_gal; |
---|
| 3321 | + } |
---|
| 3322 | + |
---|
| 3323 | + KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc); |
---|
| 3324 | + goto out; |
---|
| 3325 | + |
---|
| 3326 | +out_unreg_gal: |
---|
| 3327 | + unregister_adapter_interrupt(&gib_alert_irq); |
---|
| 3328 | +out_free_gib: |
---|
| 3329 | + free_page((unsigned long)gib); |
---|
| 3330 | + gib = NULL; |
---|
| 3331 | +out: |
---|
| 3332 | + return rc; |
---|
2927 | 3333 | } |
---|