.. | .. |
---|
2 | 2 | /* |
---|
3 | 3 | * handling kvm guest interrupts |
---|
4 | 4 | * |
---|
5 | | - * Copyright IBM Corp. 2008, 2015 |
---|
| 5 | + * Copyright IBM Corp. 2008, 2020 |
---|
6 | 6 | * |
---|
7 | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
---|
8 | 8 | */ |
---|
| 9 | + |
---|
| 10 | +#define KMSG_COMPONENT "kvm-s390" |
---|
| 11 | +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
---|
9 | 12 | |
---|
10 | 13 | #include <linux/interrupt.h> |
---|
11 | 14 | #include <linux/kvm_host.h> |
---|
12 | 15 | #include <linux/hrtimer.h> |
---|
13 | 16 | #include <linux/mmu_context.h> |
---|
| 17 | +#include <linux/nospec.h> |
---|
14 | 18 | #include <linux/signal.h> |
---|
15 | 19 | #include <linux/slab.h> |
---|
16 | 20 | #include <linux/bitmap.h> |
---|
.. | .. |
---|
23 | 27 | #include <asm/gmap.h> |
---|
24 | 28 | #include <asm/switch_to.h> |
---|
25 | 29 | #include <asm/nmi.h> |
---|
| 30 | +#include <asm/airq.h> |
---|
26 | 31 | #include "kvm-s390.h" |
---|
27 | 32 | #include "gaccess.h" |
---|
28 | 33 | #include "trace-s390.h" |
---|
.. | .. |
---|
30 | 35 | #define PFAULT_INIT 0x0600 |
---|
31 | 36 | #define PFAULT_DONE 0x0680 |
---|
32 | 37 | #define VIRTIO_PARAM 0x0d00 |
---|
| 38 | + |
---|
| 39 | +static struct kvm_s390_gib *gib; |
---|
33 | 40 | |
---|
34 | 41 | /* handle external calls via sigp interpretation facility */ |
---|
35 | 42 | static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) |
---|
.. | .. |
---|
74 | 81 | struct esca_block *sca = vcpu->kvm->arch.sca; |
---|
75 | 82 | union esca_sigp_ctrl *sigp_ctrl = |
---|
76 | 83 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
---|
77 | | - union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; |
---|
| 84 | + union esca_sigp_ctrl new_val = {0}, old_val; |
---|
78 | 85 | |
---|
| 86 | + old_val = READ_ONCE(*sigp_ctrl); |
---|
79 | 87 | new_val.scn = src_id; |
---|
80 | 88 | new_val.c = 1; |
---|
81 | 89 | old_val.c = 0; |
---|
.. | .. |
---|
86 | 94 | struct bsca_block *sca = vcpu->kvm->arch.sca; |
---|
87 | 95 | union bsca_sigp_ctrl *sigp_ctrl = |
---|
88 | 96 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
---|
89 | | - union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl; |
---|
| 97 | + union bsca_sigp_ctrl new_val = {0}, old_val; |
---|
90 | 98 | |
---|
| 99 | + old_val = READ_ONCE(*sigp_ctrl); |
---|
91 | 100 | new_val.scn = src_id; |
---|
92 | 101 | new_val.c = 1; |
---|
93 | 102 | old_val.c = 0; |
---|
.. | .. |
---|
117 | 126 | struct esca_block *sca = vcpu->kvm->arch.sca; |
---|
118 | 127 | union esca_sigp_ctrl *sigp_ctrl = |
---|
119 | 128 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
---|
120 | | - union esca_sigp_ctrl old = *sigp_ctrl; |
---|
| 129 | + union esca_sigp_ctrl old; |
---|
121 | 130 | |
---|
| 131 | + old = READ_ONCE(*sigp_ctrl); |
---|
122 | 132 | expect = old.value; |
---|
123 | 133 | rc = cmpxchg(&sigp_ctrl->value, old.value, 0); |
---|
124 | 134 | } else { |
---|
125 | 135 | struct bsca_block *sca = vcpu->kvm->arch.sca; |
---|
126 | 136 | union bsca_sigp_ctrl *sigp_ctrl = |
---|
127 | 137 | &(sca->cpu[vcpu->vcpu_id].sigp_ctrl); |
---|
128 | | - union bsca_sigp_ctrl old = *sigp_ctrl; |
---|
| 138 | + union bsca_sigp_ctrl old; |
---|
129 | 139 | |
---|
| 140 | + old = READ_ONCE(*sigp_ctrl); |
---|
130 | 141 | expect = old.value; |
---|
131 | 142 | rc = cmpxchg(&sigp_ctrl->value, old.value, 0); |
---|
132 | 143 | } |
---|
.. | .. |
---|
217 | 228 | */ |
---|
218 | 229 | #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE) |
---|
219 | 230 | |
---|
220 | | -static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
| 231 | +/** |
---|
| 232 | + * gisa_set_iam - change the GISA interruption alert mask |
---|
| 233 | + * |
---|
| 234 | + * @gisa: gisa to operate on |
---|
| 235 | + * @iam: new IAM value to use |
---|
| 236 | + * |
---|
| 237 | + * Change the IAM atomically with the next alert address and the IPM |
---|
| 238 | + * of the GISA if the GISA is not part of the GIB alert list. All three |
---|
| 239 | + * fields are located in the first long word of the GISA. |
---|
| 240 | + * |
---|
| 241 | + * Returns: 0 on success |
---|
| 242 | + * -EBUSY in case the gisa is part of the alert list |
---|
| 243 | + */ |
---|
| 244 | +static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam) |
---|
| 245 | +{ |
---|
| 246 | + u64 word, _word; |
---|
| 247 | + |
---|
| 248 | + do { |
---|
| 249 | + word = READ_ONCE(gisa->u64.word[0]); |
---|
| 250 | + if ((u64)gisa != word >> 32) |
---|
| 251 | + return -EBUSY; |
---|
| 252 | + _word = (word & ~0xffUL) | iam; |
---|
| 253 | + } while (cmpxchg(&gisa->u64.word[0], word, _word) != word); |
---|
| 254 | + |
---|
| 255 | + return 0; |
---|
| 256 | +} |
---|
| 257 | + |
---|
| 258 | +/** |
---|
| 259 | + * gisa_clear_ipm - clear the GISA interruption pending mask |
---|
| 260 | + * |
---|
| 261 | + * @gisa: gisa to operate on |
---|
| 262 | + * |
---|
| 263 | + * Clear the IPM atomically with the next alert address and the IAM |
---|
| 264 | + * of the GISA unconditionally. All three fields are located in the |
---|
| 265 | + * first long word of the GISA. |
---|
| 266 | + */ |
---|
| 267 | +static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa) |
---|
| 268 | +{ |
---|
| 269 | + u64 word, _word; |
---|
| 270 | + |
---|
| 271 | + do { |
---|
| 272 | + word = READ_ONCE(gisa->u64.word[0]); |
---|
| 273 | + _word = word & ~(0xffUL << 24); |
---|
| 274 | + } while (cmpxchg(&gisa->u64.word[0], word, _word) != word); |
---|
| 275 | +} |
---|
| 276 | + |
---|
| 277 | +/** |
---|
| 278 | + * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM |
---|
| 279 | + * |
---|
| 280 | + * @gi: gisa interrupt struct to work on |
---|
| 281 | + * |
---|
| 282 | + * Atomically restores the interruption alert mask if none of the |
---|
| 283 | + * relevant ISCs are pending and return the IPM. |
---|
| 284 | + * |
---|
| 285 | + * Returns: the relevant pending ISCs |
---|
| 286 | + */ |
---|
| 287 | +static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi) |
---|
| 288 | +{ |
---|
| 289 | + u8 pending_mask, alert_mask; |
---|
| 290 | + u64 word, _word; |
---|
| 291 | + |
---|
| 292 | + do { |
---|
| 293 | + word = READ_ONCE(gi->origin->u64.word[0]); |
---|
| 294 | + alert_mask = READ_ONCE(gi->alert.mask); |
---|
| 295 | + pending_mask = (u8)(word >> 24) & alert_mask; |
---|
| 296 | + if (pending_mask) |
---|
| 297 | + return pending_mask; |
---|
| 298 | + _word = (word & ~0xffUL) | alert_mask; |
---|
| 299 | + } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word); |
---|
| 300 | + |
---|
| 301 | + return 0; |
---|
| 302 | +} |
---|
| 303 | + |
---|
| 304 | +static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa) |
---|
| 305 | +{ |
---|
| 306 | + return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa; |
---|
| 307 | +} |
---|
| 308 | + |
---|
| 309 | +static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
221 | 310 | { |
---|
222 | 311 | set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
---|
223 | 312 | } |
---|
224 | 313 | |
---|
225 | | -static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa) |
---|
| 314 | +static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa) |
---|
226 | 315 | { |
---|
227 | 316 | return READ_ONCE(gisa->ipm); |
---|
228 | 317 | } |
---|
229 | 318 | |
---|
230 | | -static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
| 319 | +static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
231 | 320 | { |
---|
232 | 321 | clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
---|
233 | 322 | } |
---|
234 | 323 | |
---|
235 | | -static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
| 324 | +static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) |
---|
236 | 325 | { |
---|
237 | 326 | return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); |
---|
238 | 327 | } |
---|
239 | 328 | |
---|
240 | 329 | static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu) |
---|
241 | 330 | { |
---|
242 | | - return vcpu->kvm->arch.float_int.pending_irqs | |
---|
243 | | - vcpu->arch.local_int.pending_irqs; |
---|
| 331 | + unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs | |
---|
| 332 | + vcpu->arch.local_int.pending_irqs; |
---|
| 333 | + |
---|
| 334 | + pending &= ~vcpu->kvm->arch.float_int.masked_irqs; |
---|
| 335 | + return pending; |
---|
244 | 336 | } |
---|
245 | 337 | |
---|
246 | 338 | static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) |
---|
247 | 339 | { |
---|
248 | | - return pending_irqs_no_gisa(vcpu) | |
---|
249 | | - kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; |
---|
| 340 | + struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; |
---|
| 341 | + unsigned long pending_mask; |
---|
| 342 | + |
---|
| 343 | + pending_mask = pending_irqs_no_gisa(vcpu); |
---|
| 344 | + if (gi->origin) |
---|
| 345 | + pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7; |
---|
| 346 | + return pending_mask; |
---|
250 | 347 | } |
---|
251 | 348 | |
---|
252 | 349 | static inline int isc_to_irq_type(unsigned long isc) |
---|
.. | .. |
---|
293 | 390 | __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask); |
---|
294 | 391 | if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK)) |
---|
295 | 392 | __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask); |
---|
296 | | - if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) |
---|
| 393 | + if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) { |
---|
297 | 394 | __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask); |
---|
| 395 | + __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask); |
---|
| 396 | + } |
---|
298 | 397 | if (psw_mchk_disabled(vcpu)) |
---|
299 | 398 | active_mask &= ~IRQ_PEND_MCHK_MASK; |
---|
| 399 | + /* PV guest cpus can have a single interruption injected at a time. */ |
---|
| 400 | + if (kvm_s390_pv_cpu_get_handle(vcpu) && |
---|
| 401 | + vcpu->arch.sie_block->iictl != IICTL_CODE_NONE) |
---|
| 402 | + active_mask &= ~(IRQ_PEND_EXT_II_MASK | |
---|
| 403 | + IRQ_PEND_IO_MASK | |
---|
| 404 | + IRQ_PEND_MCHK_MASK); |
---|
300 | 405 | /* |
---|
301 | 406 | * Check both floating and local interrupt's cr14 because |
---|
302 | 407 | * bit IRQ_PEND_MCHK_REP could be set in both cases. |
---|
.. | .. |
---|
318 | 423 | static void __set_cpu_idle(struct kvm_vcpu *vcpu) |
---|
319 | 424 | { |
---|
320 | 425 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); |
---|
321 | | - set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask); |
---|
| 426 | + set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); |
---|
322 | 427 | } |
---|
323 | 428 | |
---|
324 | 429 | static void __unset_cpu_idle(struct kvm_vcpu *vcpu) |
---|
325 | 430 | { |
---|
326 | 431 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); |
---|
327 | | - clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.float_int.idle_mask); |
---|
| 432 | + clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); |
---|
328 | 433 | } |
---|
329 | 434 | |
---|
330 | 435 | static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
345 | 450 | { |
---|
346 | 451 | if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK)) |
---|
347 | 452 | return; |
---|
348 | | - else if (psw_ioint_disabled(vcpu)) |
---|
| 453 | + if (psw_ioint_disabled(vcpu)) |
---|
349 | 454 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); |
---|
350 | 455 | else |
---|
351 | 456 | vcpu->arch.sie_block->lctl |= LCTL_CR6; |
---|
.. | .. |
---|
353 | 458 | |
---|
354 | 459 | static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) |
---|
355 | 460 | { |
---|
356 | | - if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) |
---|
| 461 | + if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK)) |
---|
357 | 462 | return; |
---|
358 | 463 | if (psw_extint_disabled(vcpu)) |
---|
359 | 464 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); |
---|
.. | .. |
---|
363 | 468 | |
---|
364 | 469 | static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) |
---|
365 | 470 | { |
---|
366 | | - if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) |
---|
| 471 | + if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK)) |
---|
367 | 472 | return; |
---|
368 | 473 | if (psw_mchk_disabled(vcpu)) |
---|
369 | 474 | vcpu->arch.sie_block->ictl |= ICTL_LPSW; |
---|
.. | .. |
---|
389 | 494 | static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu) |
---|
390 | 495 | { |
---|
391 | 496 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
392 | | - int rc; |
---|
| 497 | + int rc = 0; |
---|
393 | 498 | |
---|
394 | 499 | vcpu->stat.deliver_cputm++; |
---|
395 | 500 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER, |
---|
396 | 501 | 0, 0); |
---|
397 | | - |
---|
398 | | - rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, |
---|
399 | | - (u16 *)__LC_EXT_INT_CODE); |
---|
400 | | - rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
401 | | - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
402 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
403 | | - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
404 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 502 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 503 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 504 | + vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER; |
---|
| 505 | + } else { |
---|
| 506 | + rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, |
---|
| 507 | + (u16 *)__LC_EXT_INT_CODE); |
---|
| 508 | + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
| 509 | + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
| 510 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 511 | + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
| 512 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 513 | + } |
---|
405 | 514 | clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); |
---|
406 | 515 | return rc ? -EFAULT : 0; |
---|
407 | 516 | } |
---|
.. | .. |
---|
409 | 518 | static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu) |
---|
410 | 519 | { |
---|
411 | 520 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
412 | | - int rc; |
---|
| 521 | + int rc = 0; |
---|
413 | 522 | |
---|
414 | 523 | vcpu->stat.deliver_ckc++; |
---|
415 | 524 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP, |
---|
416 | 525 | 0, 0); |
---|
417 | | - |
---|
418 | | - rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, |
---|
419 | | - (u16 __user *)__LC_EXT_INT_CODE); |
---|
420 | | - rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
421 | | - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
422 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
423 | | - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
424 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 526 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 527 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 528 | + vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP; |
---|
| 529 | + } else { |
---|
| 530 | + rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP, |
---|
| 531 | + (u16 __user *)__LC_EXT_INT_CODE); |
---|
| 532 | + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
| 533 | + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
| 534 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 535 | + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
| 536 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 537 | + } |
---|
425 | 538 | clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); |
---|
426 | 539 | return rc ? -EFAULT : 0; |
---|
427 | 540 | } |
---|
.. | .. |
---|
462 | 575 | freg_t fprs[NUM_FPRS]; |
---|
463 | 576 | union mci mci; |
---|
464 | 577 | int rc; |
---|
| 578 | + |
---|
| 579 | + /* |
---|
| 580 | + * All other possible payload for a machine check (e.g. the register |
---|
| 581 | + * contents in the save area) will be handled by the ultravisor, as |
---|
| 582 | + * the hypervisor does not not have the needed information for |
---|
| 583 | + * protected guests. |
---|
| 584 | + */ |
---|
| 585 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 586 | + vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK; |
---|
| 587 | + vcpu->arch.sie_block->mcic = mchk->mcic; |
---|
| 588 | + vcpu->arch.sie_block->faddr = mchk->failing_storage_address; |
---|
| 589 | + vcpu->arch.sie_block->edc = mchk->ext_damage_code; |
---|
| 590 | + return 0; |
---|
| 591 | + } |
---|
465 | 592 | |
---|
466 | 593 | mci.val = mchk->mcic; |
---|
467 | 594 | /* take care of lazy register loading */ |
---|
.. | .. |
---|
606 | 733 | static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) |
---|
607 | 734 | { |
---|
608 | 735 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
609 | | - int rc; |
---|
| 736 | + int rc = 0; |
---|
610 | 737 | |
---|
611 | 738 | VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart"); |
---|
612 | 739 | vcpu->stat.deliver_restart_signal++; |
---|
613 | 740 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0); |
---|
614 | 741 | |
---|
615 | | - rc = write_guest_lc(vcpu, |
---|
616 | | - offsetof(struct lowcore, restart_old_psw), |
---|
617 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
618 | | - rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), |
---|
619 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 742 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 743 | + vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART; |
---|
| 744 | + } else { |
---|
| 745 | + rc = write_guest_lc(vcpu, |
---|
| 746 | + offsetof(struct lowcore, restart_old_psw), |
---|
| 747 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 748 | + rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw), |
---|
| 749 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 750 | + } |
---|
620 | 751 | clear_bit(IRQ_PEND_RESTART, &li->pending_irqs); |
---|
621 | 752 | return rc ? -EFAULT : 0; |
---|
622 | 753 | } |
---|
.. | .. |
---|
658 | 789 | vcpu->stat.deliver_emergency_signal++; |
---|
659 | 790 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
---|
660 | 791 | cpu_addr, 0); |
---|
| 792 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 793 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 794 | + vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG; |
---|
| 795 | + vcpu->arch.sie_block->extcpuaddr = cpu_addr; |
---|
| 796 | + return 0; |
---|
| 797 | + } |
---|
661 | 798 | |
---|
662 | 799 | rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG, |
---|
663 | 800 | (u16 *)__LC_EXT_INT_CODE); |
---|
.. | .. |
---|
686 | 823 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, |
---|
687 | 824 | KVM_S390_INT_EXTERNAL_CALL, |
---|
688 | 825 | extcall.code, 0); |
---|
| 826 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 827 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 828 | + vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL; |
---|
| 829 | + vcpu->arch.sie_block->extcpuaddr = extcall.code; |
---|
| 830 | + return 0; |
---|
| 831 | + } |
---|
689 | 832 | |
---|
690 | 833 | rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL, |
---|
691 | 834 | (u16 *)__LC_EXT_INT_CODE); |
---|
.. | .. |
---|
695 | 838 | rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, |
---|
696 | 839 | sizeof(psw_t)); |
---|
697 | 840 | return rc ? -EFAULT : 0; |
---|
| 841 | +} |
---|
| 842 | + |
---|
| 843 | +static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code) |
---|
| 844 | +{ |
---|
| 845 | + switch (code) { |
---|
| 846 | + case PGM_SPECIFICATION: |
---|
| 847 | + vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION; |
---|
| 848 | + break; |
---|
| 849 | + case PGM_OPERAND: |
---|
| 850 | + vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND; |
---|
| 851 | + break; |
---|
| 852 | + default: |
---|
| 853 | + return -EINVAL; |
---|
| 854 | + } |
---|
| 855 | + return 0; |
---|
698 | 856 | } |
---|
699 | 857 | |
---|
700 | 858 | static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
717 | 875 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, |
---|
718 | 876 | pgm_info.code, 0); |
---|
719 | 877 | |
---|
| 878 | + /* PER is handled by the ultravisor */ |
---|
| 879 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 880 | + return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER); |
---|
| 881 | + |
---|
720 | 882 | switch (pgm_info.code & ~PGM_PER) { |
---|
721 | 883 | case PGM_AFX_TRANSLATION: |
---|
722 | 884 | case PGM_ASX_TRANSLATION: |
---|
.. | .. |
---|
728 | 890 | case PGM_PRIMARY_AUTHORITY: |
---|
729 | 891 | case PGM_SECONDARY_AUTHORITY: |
---|
730 | 892 | nullifying = true; |
---|
731 | | - /* fall through */ |
---|
| 893 | + fallthrough; |
---|
732 | 894 | case PGM_SPACE_SWITCH: |
---|
733 | 895 | rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, |
---|
734 | 896 | (u64 *)__LC_TRANS_EXC_CODE); |
---|
.. | .. |
---|
812 | 974 | return rc ? -EFAULT : 0; |
---|
813 | 975 | } |
---|
814 | 976 | |
---|
| 977 | +#define SCCB_MASK 0xFFFFFFF8 |
---|
| 978 | +#define SCCB_EVENT_PENDING 0x3 |
---|
| 979 | + |
---|
| 980 | +static int write_sclp(struct kvm_vcpu *vcpu, u32 parm) |
---|
| 981 | +{ |
---|
| 982 | + int rc; |
---|
| 983 | + |
---|
| 984 | + if (kvm_s390_pv_cpu_get_handle(vcpu)) { |
---|
| 985 | + vcpu->arch.sie_block->iictl = IICTL_CODE_EXT; |
---|
| 986 | + vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG; |
---|
| 987 | + vcpu->arch.sie_block->eiparams = parm; |
---|
| 988 | + return 0; |
---|
| 989 | + } |
---|
| 990 | + |
---|
| 991 | + rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
---|
| 992 | + rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
| 993 | + rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
| 994 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 995 | + rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
| 996 | + &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
| 997 | + rc |= put_guest_lc(vcpu, parm, |
---|
| 998 | + (u32 *)__LC_EXT_PARAMS); |
---|
| 999 | + |
---|
| 1000 | + return rc ? -EFAULT : 0; |
---|
| 1001 | +} |
---|
| 1002 | + |
---|
815 | 1003 | static int __must_check __deliver_service(struct kvm_vcpu *vcpu) |
---|
816 | 1004 | { |
---|
817 | 1005 | struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
---|
818 | 1006 | struct kvm_s390_ext_info ext; |
---|
819 | | - int rc = 0; |
---|
820 | 1007 | |
---|
821 | 1008 | spin_lock(&fi->lock); |
---|
822 | | - if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { |
---|
| 1009 | + if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) || |
---|
| 1010 | + !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) { |
---|
823 | 1011 | spin_unlock(&fi->lock); |
---|
824 | 1012 | return 0; |
---|
825 | 1013 | } |
---|
826 | 1014 | ext = fi->srv_signal; |
---|
827 | 1015 | memset(&fi->srv_signal, 0, sizeof(ext)); |
---|
828 | 1016 | clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs); |
---|
| 1017 | + clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs); |
---|
| 1018 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 1019 | + set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs); |
---|
829 | 1020 | spin_unlock(&fi->lock); |
---|
830 | 1021 | |
---|
831 | 1022 | VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", |
---|
.. | .. |
---|
834 | 1025 | trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
---|
835 | 1026 | ext.ext_params, 0); |
---|
836 | 1027 | |
---|
837 | | - rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE); |
---|
838 | | - rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR); |
---|
839 | | - rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, |
---|
840 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
841 | | - rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, |
---|
842 | | - &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); |
---|
843 | | - rc |= put_guest_lc(vcpu, ext.ext_params, |
---|
844 | | - (u32 *)__LC_EXT_PARAMS); |
---|
| 1028 | + return write_sclp(vcpu, ext.ext_params); |
---|
| 1029 | +} |
---|
845 | 1030 | |
---|
846 | | - return rc ? -EFAULT : 0; |
---|
| 1031 | +static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu) |
---|
| 1032 | +{ |
---|
| 1033 | + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; |
---|
| 1034 | + struct kvm_s390_ext_info ext; |
---|
| 1035 | + |
---|
| 1036 | + spin_lock(&fi->lock); |
---|
| 1037 | + if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) { |
---|
| 1038 | + spin_unlock(&fi->lock); |
---|
| 1039 | + return 0; |
---|
| 1040 | + } |
---|
| 1041 | + ext = fi->srv_signal; |
---|
| 1042 | + /* only clear the event bit */ |
---|
| 1043 | + fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING; |
---|
| 1044 | + clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs); |
---|
| 1045 | + spin_unlock(&fi->lock); |
---|
| 1046 | + |
---|
| 1047 | + VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event"); |
---|
| 1048 | + vcpu->stat.deliver_service_signal++; |
---|
| 1049 | + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE, |
---|
| 1050 | + ext.ext_params, 0); |
---|
| 1051 | + |
---|
| 1052 | + return write_sclp(vcpu, SCCB_EVENT_PENDING); |
---|
847 | 1053 | } |
---|
848 | 1054 | |
---|
849 | 1055 | static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
938 | 1144 | { |
---|
939 | 1145 | int rc; |
---|
940 | 1146 | |
---|
| 1147 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 1148 | + vcpu->arch.sie_block->iictl = IICTL_CODE_IO; |
---|
| 1149 | + vcpu->arch.sie_block->subchannel_id = io->subchannel_id; |
---|
| 1150 | + vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr; |
---|
| 1151 | + vcpu->arch.sie_block->io_int_parm = io->io_int_parm; |
---|
| 1152 | + vcpu->arch.sie_block->io_int_word = io->io_int_word; |
---|
| 1153 | + return 0; |
---|
| 1154 | + } |
---|
| 1155 | + |
---|
941 | 1156 | rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID); |
---|
942 | 1157 | rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR); |
---|
943 | 1158 | rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM); |
---|
.. | .. |
---|
956 | 1171 | { |
---|
957 | 1172 | struct list_head *isc_list; |
---|
958 | 1173 | struct kvm_s390_float_interrupt *fi; |
---|
| 1174 | + struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; |
---|
959 | 1175 | struct kvm_s390_interrupt_info *inti = NULL; |
---|
960 | 1176 | struct kvm_s390_io_info io; |
---|
961 | 1177 | u32 isc; |
---|
.. | .. |
---|
998 | 1214 | goto out; |
---|
999 | 1215 | } |
---|
1000 | 1216 | |
---|
1001 | | - if (vcpu->kvm->arch.gisa && |
---|
1002 | | - kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) { |
---|
| 1217 | + if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) { |
---|
1003 | 1218 | /* |
---|
1004 | 1219 | * in case an adapter interrupt was not delivered |
---|
1005 | 1220 | * in SIE context KVM will handle the delivery |
---|
.. | .. |
---|
1089 | 1304 | |
---|
1090 | 1305 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
---|
1091 | 1306 | { |
---|
| 1307 | + struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; |
---|
1092 | 1308 | u64 sltime; |
---|
1093 | 1309 | |
---|
1094 | 1310 | vcpu->stat.exit_wait_state++; |
---|
.. | .. |
---|
1101 | 1317 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
---|
1102 | 1318 | return -EOPNOTSUPP; /* disabled wait */ |
---|
1103 | 1319 | } |
---|
| 1320 | + |
---|
| 1321 | + if (gi->origin && |
---|
| 1322 | + (gisa_get_ipm_or_restore_iam(gi) & |
---|
| 1323 | + vcpu->arch.sie_block->gcr[6] >> 24)) |
---|
| 1324 | + return 0; |
---|
1104 | 1325 | |
---|
1105 | 1326 | if (!ckc_interrupts_enabled(vcpu) && |
---|
1106 | 1327 | !cpu_timer_interrupts_enabled(vcpu)) { |
---|
.. | .. |
---|
1128 | 1349 | |
---|
1129 | 1350 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) |
---|
1130 | 1351 | { |
---|
1131 | | - /* |
---|
1132 | | - * We cannot move this into the if, as the CPU might be already |
---|
1133 | | - * in kvm_vcpu_block without having the waitqueue set (polling) |
---|
1134 | | - */ |
---|
1135 | 1352 | vcpu->valid_wakeup = true; |
---|
| 1353 | + kvm_vcpu_wake_up(vcpu); |
---|
| 1354 | + |
---|
1136 | 1355 | /* |
---|
1137 | | - * This is mostly to document, that the read in swait_active could |
---|
1138 | | - * be moved before other stores, leading to subtle races. |
---|
1139 | | - * All current users do not store or use an atomic like update |
---|
1140 | | - */ |
---|
1141 | | - smp_mb__after_atomic(); |
---|
1142 | | - if (swait_active(&vcpu->wq)) { |
---|
1143 | | - /* |
---|
1144 | | - * The vcpu gave up the cpu voluntarily, mark it as a good |
---|
1145 | | - * yield-candidate. |
---|
1146 | | - */ |
---|
1147 | | - vcpu->preempted = true; |
---|
1148 | | - swake_up_one(&vcpu->wq); |
---|
1149 | | - vcpu->stat.halt_wakeup++; |
---|
1150 | | - } |
---|
1151 | | - /* |
---|
1152 | | - * The VCPU might not be sleeping but is executing the VSIE. Let's |
---|
| 1356 | + * The VCPU might not be sleeping but rather executing VSIE. Let's |
---|
1153 | 1357 | * kick it, so it leaves the SIE to process the request. |
---|
1154 | 1358 | */ |
---|
1155 | 1359 | kvm_s390_vsie_kick(vcpu); |
---|
.. | .. |
---|
1250 | 1454 | case IRQ_PEND_EXT_SERVICE: |
---|
1251 | 1455 | rc = __deliver_service(vcpu); |
---|
1252 | 1456 | break; |
---|
| 1457 | + case IRQ_PEND_EXT_SERVICE_EV: |
---|
| 1458 | + rc = __deliver_service_ev(vcpu); |
---|
| 1459 | + break; |
---|
1253 | 1460 | case IRQ_PEND_PFAULT_DONE: |
---|
1254 | 1461 | rc = __deliver_pfault_done(vcpu); |
---|
1255 | 1462 | break; |
---|
.. | .. |
---|
1342 | 1549 | if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) |
---|
1343 | 1550 | return -EINVAL; |
---|
1344 | 1551 | |
---|
1345 | | - if (sclp.has_sigpif) |
---|
| 1552 | + if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu)) |
---|
1346 | 1553 | return sca_inject_ext_call(vcpu, src_id); |
---|
1347 | 1554 | |
---|
1348 | 1555 | if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) |
---|
.. | .. |
---|
1398 | 1605 | return 0; |
---|
1399 | 1606 | } |
---|
1400 | 1607 | |
---|
1401 | | -static int __inject_sigp_restart(struct kvm_vcpu *vcpu, |
---|
1402 | | - struct kvm_s390_irq *irq) |
---|
| 1608 | +static int __inject_sigp_restart(struct kvm_vcpu *vcpu) |
---|
1403 | 1609 | { |
---|
1404 | 1610 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
1405 | 1611 | |
---|
.. | .. |
---|
1533 | 1739 | |
---|
1534 | 1740 | static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid) |
---|
1535 | 1741 | { |
---|
| 1742 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
1536 | 1743 | unsigned long active_mask; |
---|
1537 | 1744 | int isc; |
---|
1538 | 1745 | |
---|
1539 | 1746 | if (schid) |
---|
1540 | 1747 | goto out; |
---|
1541 | | - if (!kvm->arch.gisa) |
---|
| 1748 | + if (!gi->origin) |
---|
1542 | 1749 | goto out; |
---|
1543 | 1750 | |
---|
1544 | | - active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32; |
---|
| 1751 | + active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32; |
---|
1545 | 1752 | while (active_mask) { |
---|
1546 | 1753 | isc = __fls(active_mask) ^ (BITS_PER_LONG - 1); |
---|
1547 | | - if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc)) |
---|
| 1754 | + if (gisa_tac_ipm_gisc(gi->origin, isc)) |
---|
1548 | 1755 | return isc; |
---|
1549 | 1756 | clear_bit_inv(isc, &active_mask); |
---|
1550 | 1757 | } |
---|
.. | .. |
---|
1567 | 1774 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
---|
1568 | 1775 | u64 isc_mask, u32 schid) |
---|
1569 | 1776 | { |
---|
| 1777 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
1570 | 1778 | struct kvm_s390_interrupt_info *inti, *tmp_inti; |
---|
1571 | 1779 | int isc; |
---|
1572 | 1780 | |
---|
.. | .. |
---|
1584 | 1792 | /* both types of interrupts present */ |
---|
1585 | 1793 | if (int_word_to_isc(inti->io.io_int_word) <= isc) { |
---|
1586 | 1794 | /* classical IO int with higher priority */ |
---|
1587 | | - kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); |
---|
| 1795 | + gisa_set_ipm_gisc(gi->origin, isc); |
---|
1588 | 1796 | goto out; |
---|
1589 | 1797 | } |
---|
1590 | 1798 | gisa_out: |
---|
.. | .. |
---|
1596 | 1804 | kvm_s390_reinject_io_int(kvm, inti); |
---|
1597 | 1805 | inti = tmp_inti; |
---|
1598 | 1806 | } else |
---|
1599 | | - kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); |
---|
| 1807 | + gisa_set_ipm_gisc(gi->origin, isc); |
---|
1600 | 1808 | out: |
---|
1601 | 1809 | return inti; |
---|
1602 | 1810 | } |
---|
1603 | | - |
---|
1604 | | -#define SCCB_MASK 0xFFFFFFF8 |
---|
1605 | | -#define SCCB_EVENT_PENDING 0x3 |
---|
1606 | 1811 | |
---|
1607 | 1812 | static int __inject_service(struct kvm *kvm, |
---|
1608 | 1813 | struct kvm_s390_interrupt_info *inti) |
---|
.. | .. |
---|
1612 | 1817 | kvm->stat.inject_service_signal++; |
---|
1613 | 1818 | spin_lock(&fi->lock); |
---|
1614 | 1819 | fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING; |
---|
| 1820 | + |
---|
| 1821 | + /* We always allow events, track them separately from the sccb ints */ |
---|
| 1822 | + if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING) |
---|
| 1823 | + set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs); |
---|
| 1824 | + |
---|
1615 | 1825 | /* |
---|
1616 | 1826 | * Early versions of the QEMU s390 bios will inject several |
---|
1617 | 1827 | * service interrupts after another without handling a |
---|
.. | .. |
---|
1685 | 1895 | |
---|
1686 | 1896 | static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) |
---|
1687 | 1897 | { |
---|
| 1898 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
1688 | 1899 | struct kvm_s390_float_interrupt *fi; |
---|
1689 | 1900 | struct list_head *list; |
---|
1690 | 1901 | int isc; |
---|
.. | .. |
---|
1692 | 1903 | kvm->stat.inject_io++; |
---|
1693 | 1904 | isc = int_word_to_isc(inti->io.io_int_word); |
---|
1694 | 1905 | |
---|
1695 | | - if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) { |
---|
| 1906 | + /* |
---|
| 1907 | + * Do not make use of gisa in protected mode. We do not use the lock |
---|
| 1908 | + * checking variant as this is just a performance optimization and we |
---|
| 1909 | + * do not hold the lock here. This is ok as the code will pick |
---|
| 1910 | + * interrupts from both "lists" for delivery. |
---|
| 1911 | + */ |
---|
| 1912 | + if (!kvm_s390_pv_get_handle(kvm) && |
---|
| 1913 | + gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) { |
---|
1696 | 1914 | VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc); |
---|
1697 | | - kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); |
---|
| 1915 | + gisa_set_ipm_gisc(gi->origin, isc); |
---|
1698 | 1916 | kfree(inti); |
---|
1699 | 1917 | return 0; |
---|
1700 | 1918 | } |
---|
.. | .. |
---|
1726 | 1944 | */ |
---|
1727 | 1945 | static void __floating_irq_kick(struct kvm *kvm, u64 type) |
---|
1728 | 1946 | { |
---|
1729 | | - struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
---|
1730 | 1947 | struct kvm_vcpu *dst_vcpu; |
---|
1731 | 1948 | int sigcpu, online_vcpus, nr_tries = 0; |
---|
1732 | 1949 | |
---|
.. | .. |
---|
1735 | 1952 | return; |
---|
1736 | 1953 | |
---|
1737 | 1954 | /* find idle VCPUs first, then round robin */ |
---|
1738 | | - sigcpu = find_first_bit(fi->idle_mask, online_vcpus); |
---|
| 1955 | + sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus); |
---|
1739 | 1956 | if (sigcpu == online_vcpus) { |
---|
1740 | 1957 | do { |
---|
1741 | | - sigcpu = fi->next_rr_cpu; |
---|
1742 | | - fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus; |
---|
| 1958 | + sigcpu = kvm->arch.float_int.next_rr_cpu++; |
---|
| 1959 | + kvm->arch.float_int.next_rr_cpu %= online_vcpus; |
---|
1743 | 1960 | /* avoid endless loops if all vcpus are stopped */ |
---|
1744 | 1961 | if (nr_tries++ >= online_vcpus) |
---|
1745 | 1962 | return; |
---|
.. | .. |
---|
1753 | 1970 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); |
---|
1754 | 1971 | break; |
---|
1755 | 1972 | case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: |
---|
1756 | | - if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa)) |
---|
| 1973 | + if (!(type & KVM_S390_INT_IO_AI_MASK && |
---|
| 1974 | + kvm->arch.gisa_int.origin) || |
---|
| 1975 | + kvm_s390_pv_cpu_get_handle(dst_vcpu)) |
---|
1757 | 1976 | kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); |
---|
1758 | 1977 | break; |
---|
1759 | 1978 | default: |
---|
.. | .. |
---|
1932 | 2151 | rc = __inject_sigp_stop(vcpu, irq); |
---|
1933 | 2152 | break; |
---|
1934 | 2153 | case KVM_S390_RESTART: |
---|
1935 | | - rc = __inject_sigp_restart(vcpu, irq); |
---|
| 2154 | + rc = __inject_sigp_restart(vcpu); |
---|
1936 | 2155 | break; |
---|
1937 | 2156 | case KVM_S390_INT_CLOCK_COMP: |
---|
1938 | 2157 | rc = __inject_ckc(vcpu); |
---|
.. | .. |
---|
2006 | 2225 | struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; |
---|
2007 | 2226 | int i; |
---|
2008 | 2227 | |
---|
| 2228 | + mutex_lock(&kvm->lock); |
---|
| 2229 | + if (!kvm_s390_pv_is_protected(kvm)) |
---|
| 2230 | + fi->masked_irqs = 0; |
---|
| 2231 | + mutex_unlock(&kvm->lock); |
---|
2009 | 2232 | spin_lock(&fi->lock); |
---|
2010 | 2233 | fi->pending_irqs = 0; |
---|
2011 | 2234 | memset(&fi->srv_signal, 0, sizeof(fi->srv_signal)); |
---|
.. | .. |
---|
2020 | 2243 | |
---|
2021 | 2244 | static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) |
---|
2022 | 2245 | { |
---|
| 2246 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
2023 | 2247 | struct kvm_s390_interrupt_info *inti; |
---|
2024 | 2248 | struct kvm_s390_float_interrupt *fi; |
---|
2025 | 2249 | struct kvm_s390_irq *buf; |
---|
.. | .. |
---|
2043 | 2267 | |
---|
2044 | 2268 | max_irqs = len / sizeof(struct kvm_s390_irq); |
---|
2045 | 2269 | |
---|
2046 | | - if (kvm->arch.gisa && |
---|
2047 | | - kvm_s390_gisa_get_ipm(kvm->arch.gisa)) { |
---|
| 2270 | + if (gi->origin && gisa_get_ipm(gi->origin)) { |
---|
2048 | 2271 | for (i = 0; i <= MAX_ISC; i++) { |
---|
2049 | 2272 | if (n == max_irqs) { |
---|
2050 | 2273 | /* signal userspace to try again */ |
---|
2051 | 2274 | ret = -ENOMEM; |
---|
2052 | 2275 | goto out_nolock; |
---|
2053 | 2276 | } |
---|
2054 | | - if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) { |
---|
| 2277 | + if (gisa_tac_ipm_gisc(gi->origin, i)) { |
---|
2055 | 2278 | irq = (struct kvm_s390_irq *) &buf[n]; |
---|
2056 | 2279 | irq->type = KVM_S390_INT_IO(1, 0, 0, 0); |
---|
2057 | 2280 | irq->u.io.io_int_word = isc_to_int_word(i); |
---|
.. | .. |
---|
2072 | 2295 | n++; |
---|
2073 | 2296 | } |
---|
2074 | 2297 | } |
---|
2075 | | - if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) { |
---|
| 2298 | + if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) || |
---|
| 2299 | + test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) { |
---|
2076 | 2300 | if (n == max_irqs) { |
---|
2077 | 2301 | /* signal userspace to try again */ |
---|
2078 | 2302 | ret = -ENOMEM; |
---|
.. | .. |
---|
2226 | 2450 | { |
---|
2227 | 2451 | if (id >= MAX_S390_IO_ADAPTERS) |
---|
2228 | 2452 | return NULL; |
---|
| 2453 | + id = array_index_nospec(id, MAX_S390_IO_ADAPTERS); |
---|
2229 | 2454 | return kvm->arch.adapters[id]; |
---|
2230 | 2455 | } |
---|
2231 | 2456 | |
---|
.. | .. |
---|
2239 | 2464 | (void __user *)attr->addr, sizeof(adapter_info))) |
---|
2240 | 2465 | return -EFAULT; |
---|
2241 | 2466 | |
---|
2242 | | - if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || |
---|
2243 | | - (dev->kvm->arch.adapters[adapter_info.id] != NULL)) |
---|
| 2467 | + if (adapter_info.id >= MAX_S390_IO_ADAPTERS) |
---|
| 2468 | + return -EINVAL; |
---|
| 2469 | + |
---|
| 2470 | + adapter_info.id = array_index_nospec(adapter_info.id, |
---|
| 2471 | + MAX_S390_IO_ADAPTERS); |
---|
| 2472 | + |
---|
| 2473 | + if (dev->kvm->arch.adapters[adapter_info.id] != NULL) |
---|
2244 | 2474 | return -EINVAL; |
---|
2245 | 2475 | |
---|
2246 | 2476 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
---|
2247 | 2477 | if (!adapter) |
---|
2248 | 2478 | return -ENOMEM; |
---|
2249 | 2479 | |
---|
2250 | | - INIT_LIST_HEAD(&adapter->maps); |
---|
2251 | | - init_rwsem(&adapter->maps_lock); |
---|
2252 | | - atomic_set(&adapter->nr_maps, 0); |
---|
2253 | 2480 | adapter->id = adapter_info.id; |
---|
2254 | 2481 | adapter->isc = adapter_info.isc; |
---|
2255 | 2482 | adapter->maskable = adapter_info.maskable; |
---|
.. | .. |
---|
2274 | 2501 | return ret; |
---|
2275 | 2502 | } |
---|
2276 | 2503 | |
---|
2277 | | -static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) |
---|
2278 | | -{ |
---|
2279 | | - struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
---|
2280 | | - struct s390_map_info *map; |
---|
2281 | | - int ret; |
---|
2282 | | - |
---|
2283 | | - if (!adapter || !addr) |
---|
2284 | | - return -EINVAL; |
---|
2285 | | - |
---|
2286 | | - map = kzalloc(sizeof(*map), GFP_KERNEL); |
---|
2287 | | - if (!map) { |
---|
2288 | | - ret = -ENOMEM; |
---|
2289 | | - goto out; |
---|
2290 | | - } |
---|
2291 | | - INIT_LIST_HEAD(&map->list); |
---|
2292 | | - map->guest_addr = addr; |
---|
2293 | | - map->addr = gmap_translate(kvm->arch.gmap, addr); |
---|
2294 | | - if (map->addr == -EFAULT) { |
---|
2295 | | - ret = -EFAULT; |
---|
2296 | | - goto out; |
---|
2297 | | - } |
---|
2298 | | - ret = get_user_pages_fast(map->addr, 1, 1, &map->page); |
---|
2299 | | - if (ret < 0) |
---|
2300 | | - goto out; |
---|
2301 | | - BUG_ON(ret != 1); |
---|
2302 | | - down_write(&adapter->maps_lock); |
---|
2303 | | - if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { |
---|
2304 | | - list_add_tail(&map->list, &adapter->maps); |
---|
2305 | | - ret = 0; |
---|
2306 | | - } else { |
---|
2307 | | - put_page(map->page); |
---|
2308 | | - ret = -EINVAL; |
---|
2309 | | - } |
---|
2310 | | - up_write(&adapter->maps_lock); |
---|
2311 | | -out: |
---|
2312 | | - if (ret) |
---|
2313 | | - kfree(map); |
---|
2314 | | - return ret; |
---|
2315 | | -} |
---|
2316 | | - |
---|
2317 | | -static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) |
---|
2318 | | -{ |
---|
2319 | | - struct s390_io_adapter *adapter = get_io_adapter(kvm, id); |
---|
2320 | | - struct s390_map_info *map, *tmp; |
---|
2321 | | - int found = 0; |
---|
2322 | | - |
---|
2323 | | - if (!adapter || !addr) |
---|
2324 | | - return -EINVAL; |
---|
2325 | | - |
---|
2326 | | - down_write(&adapter->maps_lock); |
---|
2327 | | - list_for_each_entry_safe(map, tmp, &adapter->maps, list) { |
---|
2328 | | - if (map->guest_addr == addr) { |
---|
2329 | | - found = 1; |
---|
2330 | | - atomic_dec(&adapter->nr_maps); |
---|
2331 | | - list_del(&map->list); |
---|
2332 | | - put_page(map->page); |
---|
2333 | | - kfree(map); |
---|
2334 | | - break; |
---|
2335 | | - } |
---|
2336 | | - } |
---|
2337 | | - up_write(&adapter->maps_lock); |
---|
2338 | | - |
---|
2339 | | - return found ? 0 : -EINVAL; |
---|
2340 | | -} |
---|
2341 | | - |
---|
2342 | 2504 | void kvm_s390_destroy_adapters(struct kvm *kvm) |
---|
2343 | 2505 | { |
---|
2344 | 2506 | int i; |
---|
2345 | | - struct s390_map_info *map, *tmp; |
---|
2346 | 2507 | |
---|
2347 | | - for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { |
---|
2348 | | - if (!kvm->arch.adapters[i]) |
---|
2349 | | - continue; |
---|
2350 | | - list_for_each_entry_safe(map, tmp, |
---|
2351 | | - &kvm->arch.adapters[i]->maps, list) { |
---|
2352 | | - list_del(&map->list); |
---|
2353 | | - put_page(map->page); |
---|
2354 | | - kfree(map); |
---|
2355 | | - } |
---|
| 2508 | + for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) |
---|
2356 | 2509 | kfree(kvm->arch.adapters[i]); |
---|
2357 | | - } |
---|
2358 | 2510 | } |
---|
2359 | 2511 | |
---|
2360 | 2512 | static int modify_io_adapter(struct kvm_device *dev, |
---|
.. | .. |
---|
2376 | 2528 | if (ret > 0) |
---|
2377 | 2529 | ret = 0; |
---|
2378 | 2530 | break; |
---|
| 2531 | + /* |
---|
| 2532 | + * The following operations are no longer needed and therefore no-ops. |
---|
| 2533 | + * The gpa to hva translation is done when an IRQ route is set up. The |
---|
| 2534 | + * set_irq code uses get_user_pages_remote() to do the actual write. |
---|
| 2535 | + */ |
---|
2379 | 2536 | case KVM_S390_IO_ADAPTER_MAP: |
---|
2380 | | - ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); |
---|
2381 | | - break; |
---|
2382 | 2537 | case KVM_S390_IO_ADAPTER_UNMAP: |
---|
2383 | | - ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); |
---|
| 2538 | + ret = 0; |
---|
2384 | 2539 | break; |
---|
2385 | 2540 | default: |
---|
2386 | 2541 | ret = -EINVAL; |
---|
.. | .. |
---|
2619 | 2774 | return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; |
---|
2620 | 2775 | } |
---|
2621 | 2776 | |
---|
2622 | | -static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, |
---|
2623 | | - u64 addr) |
---|
| 2777 | +static struct page *get_map_page(struct kvm *kvm, u64 uaddr) |
---|
2624 | 2778 | { |
---|
2625 | | - struct s390_map_info *map; |
---|
| 2779 | + struct page *page = NULL; |
---|
2626 | 2780 | |
---|
2627 | | - if (!adapter) |
---|
2628 | | - return NULL; |
---|
2629 | | - |
---|
2630 | | - list_for_each_entry(map, &adapter->maps, list) { |
---|
2631 | | - if (map->guest_addr == addr) |
---|
2632 | | - return map; |
---|
2633 | | - } |
---|
2634 | | - return NULL; |
---|
| 2781 | + mmap_read_lock(kvm->mm); |
---|
| 2782 | + get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, |
---|
| 2783 | + &page, NULL, NULL); |
---|
| 2784 | + mmap_read_unlock(kvm->mm); |
---|
| 2785 | + return page; |
---|
2635 | 2786 | } |
---|
2636 | 2787 | |
---|
2637 | 2788 | static int adapter_indicators_set(struct kvm *kvm, |
---|
.. | .. |
---|
2640 | 2791 | { |
---|
2641 | 2792 | unsigned long bit; |
---|
2642 | 2793 | int summary_set, idx; |
---|
2643 | | - struct s390_map_info *info; |
---|
| 2794 | + struct page *ind_page, *summary_page; |
---|
2644 | 2795 | void *map; |
---|
2645 | 2796 | |
---|
2646 | | - info = get_map_info(adapter, adapter_int->ind_addr); |
---|
2647 | | - if (!info) |
---|
| 2797 | + ind_page = get_map_page(kvm, adapter_int->ind_addr); |
---|
| 2798 | + if (!ind_page) |
---|
2648 | 2799 | return -1; |
---|
2649 | | - map = page_address(info->page); |
---|
2650 | | - bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); |
---|
2651 | | - set_bit(bit, map); |
---|
2652 | | - idx = srcu_read_lock(&kvm->srcu); |
---|
2653 | | - mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); |
---|
2654 | | - set_page_dirty_lock(info->page); |
---|
2655 | | - info = get_map_info(adapter, adapter_int->summary_addr); |
---|
2656 | | - if (!info) { |
---|
2657 | | - srcu_read_unlock(&kvm->srcu, idx); |
---|
| 2800 | + summary_page = get_map_page(kvm, adapter_int->summary_addr); |
---|
| 2801 | + if (!summary_page) { |
---|
| 2802 | + put_page(ind_page); |
---|
2658 | 2803 | return -1; |
---|
2659 | 2804 | } |
---|
2660 | | - map = page_address(info->page); |
---|
2661 | | - bit = get_ind_bit(info->addr, adapter_int->summary_offset, |
---|
2662 | | - adapter->swap); |
---|
| 2805 | + |
---|
| 2806 | + idx = srcu_read_lock(&kvm->srcu); |
---|
| 2807 | + map = page_address(ind_page); |
---|
| 2808 | + bit = get_ind_bit(adapter_int->ind_addr, |
---|
| 2809 | + adapter_int->ind_offset, adapter->swap); |
---|
| 2810 | + set_bit(bit, map); |
---|
| 2811 | + mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT); |
---|
| 2812 | + set_page_dirty_lock(ind_page); |
---|
| 2813 | + map = page_address(summary_page); |
---|
| 2814 | + bit = get_ind_bit(adapter_int->summary_addr, |
---|
| 2815 | + adapter_int->summary_offset, adapter->swap); |
---|
2663 | 2816 | summary_set = test_and_set_bit(bit, map); |
---|
2664 | | - mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); |
---|
2665 | | - set_page_dirty_lock(info->page); |
---|
| 2817 | + mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT); |
---|
| 2818 | + set_page_dirty_lock(summary_page); |
---|
2666 | 2819 | srcu_read_unlock(&kvm->srcu, idx); |
---|
| 2820 | + |
---|
| 2821 | + put_page(ind_page); |
---|
| 2822 | + put_page(summary_page); |
---|
2667 | 2823 | return summary_set ? 0 : 1; |
---|
2668 | 2824 | } |
---|
2669 | 2825 | |
---|
.. | .. |
---|
2685 | 2841 | adapter = get_io_adapter(kvm, e->adapter.adapter_id); |
---|
2686 | 2842 | if (!adapter) |
---|
2687 | 2843 | return -1; |
---|
2688 | | - down_read(&adapter->maps_lock); |
---|
2689 | 2844 | ret = adapter_indicators_set(kvm, adapter, &e->adapter); |
---|
2690 | | - up_read(&adapter->maps_lock); |
---|
2691 | 2845 | if ((ret > 0) && !adapter->masked) { |
---|
2692 | 2846 | ret = kvm_s390_inject_airq(kvm, adapter); |
---|
2693 | 2847 | if (ret == 0) |
---|
.. | .. |
---|
2738 | 2892 | struct kvm_kernel_irq_routing_entry *e, |
---|
2739 | 2893 | const struct kvm_irq_routing_entry *ue) |
---|
2740 | 2894 | { |
---|
2741 | | - int ret; |
---|
| 2895 | + u64 uaddr; |
---|
2742 | 2896 | |
---|
2743 | 2897 | switch (ue->type) { |
---|
| 2898 | + /* we store the userspace addresses instead of the guest addresses */ |
---|
2744 | 2899 | case KVM_IRQ_ROUTING_S390_ADAPTER: |
---|
2745 | 2900 | e->set = set_adapter_int; |
---|
2746 | | - e->adapter.summary_addr = ue->u.adapter.summary_addr; |
---|
2747 | | - e->adapter.ind_addr = ue->u.adapter.ind_addr; |
---|
| 2901 | + uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr); |
---|
| 2902 | + if (uaddr == -EFAULT) |
---|
| 2903 | + return -EFAULT; |
---|
| 2904 | + e->adapter.summary_addr = uaddr; |
---|
| 2905 | + uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr); |
---|
| 2906 | + if (uaddr == -EFAULT) |
---|
| 2907 | + return -EFAULT; |
---|
| 2908 | + e->adapter.ind_addr = uaddr; |
---|
2748 | 2909 | e->adapter.summary_offset = ue->u.adapter.summary_offset; |
---|
2749 | 2910 | e->adapter.ind_offset = ue->u.adapter.ind_offset; |
---|
2750 | 2911 | e->adapter.adapter_id = ue->u.adapter.adapter_id; |
---|
2751 | | - ret = 0; |
---|
2752 | | - break; |
---|
| 2912 | + return 0; |
---|
2753 | 2913 | default: |
---|
2754 | | - ret = -EINVAL; |
---|
| 2914 | + return -EINVAL; |
---|
2755 | 2915 | } |
---|
2756 | | - |
---|
2757 | | - return ret; |
---|
2758 | 2916 | } |
---|
2759 | 2917 | |
---|
2760 | 2918 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, |
---|
.. | .. |
---|
2848 | 3006 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) |
---|
2849 | 3007 | { |
---|
2850 | 3008 | int scn; |
---|
2851 | | - unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; |
---|
| 3009 | + DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); |
---|
2852 | 3010 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
---|
2853 | 3011 | unsigned long pending_irqs; |
---|
2854 | 3012 | struct kvm_s390_irq irq; |
---|
.. | .. |
---|
2901 | 3059 | return n; |
---|
2902 | 3060 | } |
---|
2903 | 3061 | |
---|
| 3062 | +static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask) |
---|
| 3063 | +{ |
---|
| 3064 | + int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus); |
---|
| 3065 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3066 | + struct kvm_vcpu *vcpu; |
---|
| 3067 | + u8 vcpu_isc_mask; |
---|
| 3068 | + |
---|
| 3069 | + for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) { |
---|
| 3070 | + vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
---|
| 3071 | + if (psw_ioint_disabled(vcpu)) |
---|
| 3072 | + continue; |
---|
| 3073 | + vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24); |
---|
| 3074 | + if (deliverable_mask & vcpu_isc_mask) { |
---|
| 3075 | + /* lately kicked but not yet running */ |
---|
| 3076 | + if (test_and_set_bit(vcpu_idx, gi->kicked_mask)) |
---|
| 3077 | + return; |
---|
| 3078 | + kvm_s390_vcpu_wakeup(vcpu); |
---|
| 3079 | + return; |
---|
| 3080 | + } |
---|
| 3081 | + } |
---|
| 3082 | +} |
---|
| 3083 | + |
---|
| 3084 | +static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer) |
---|
| 3085 | +{ |
---|
| 3086 | + struct kvm_s390_gisa_interrupt *gi = |
---|
| 3087 | + container_of(timer, struct kvm_s390_gisa_interrupt, timer); |
---|
| 3088 | + struct kvm *kvm = |
---|
| 3089 | + container_of(gi->origin, struct sie_page2, gisa)->kvm; |
---|
| 3090 | + u8 pending_mask; |
---|
| 3091 | + |
---|
| 3092 | + pending_mask = gisa_get_ipm_or_restore_iam(gi); |
---|
| 3093 | + if (pending_mask) { |
---|
| 3094 | + __airqs_kick_single_vcpu(kvm, pending_mask); |
---|
| 3095 | + hrtimer_forward_now(timer, ns_to_ktime(gi->expires)); |
---|
| 3096 | + return HRTIMER_RESTART; |
---|
| 3097 | + } |
---|
| 3098 | + |
---|
| 3099 | + return HRTIMER_NORESTART; |
---|
| 3100 | +} |
---|
| 3101 | + |
---|
| 3102 | +#define NULL_GISA_ADDR 0x00000000UL |
---|
| 3103 | +#define NONE_GISA_ADDR 0x00000001UL |
---|
| 3104 | +#define GISA_ADDR_MASK 0xfffff000UL |
---|
| 3105 | + |
---|
| 3106 | +static void process_gib_alert_list(void) |
---|
| 3107 | +{ |
---|
| 3108 | + struct kvm_s390_gisa_interrupt *gi; |
---|
| 3109 | + struct kvm_s390_gisa *gisa; |
---|
| 3110 | + struct kvm *kvm; |
---|
| 3111 | + u32 final, origin = 0UL; |
---|
| 3112 | + |
---|
| 3113 | + do { |
---|
| 3114 | + /* |
---|
| 3115 | + * If the NONE_GISA_ADDR is still stored in the alert list |
---|
| 3116 | + * origin, we will leave the outer loop. No further GISA has |
---|
| 3117 | + * been added to the alert list by millicode while processing |
---|
| 3118 | + * the current alert list. |
---|
| 3119 | + */ |
---|
| 3120 | + final = (origin & NONE_GISA_ADDR); |
---|
| 3121 | + /* |
---|
| 3122 | + * Cut off the alert list and store the NONE_GISA_ADDR in the |
---|
| 3123 | + * alert list origin to avoid further GAL interruptions. |
---|
| 3124 | + * A new alert list can be build up by millicode in parallel |
---|
| 3125 | + * for guests not in the yet cut-off alert list. When in the |
---|
| 3126 | + * final loop, store the NULL_GISA_ADDR instead. This will re- |
---|
| 3127 | + * enable GAL interruptions on the host again. |
---|
| 3128 | + */ |
---|
| 3129 | + origin = xchg(&gib->alert_list_origin, |
---|
| 3130 | + (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR); |
---|
| 3131 | + /* |
---|
| 3132 | + * Loop through the just cut-off alert list and start the |
---|
| 3133 | + * gisa timers to kick idle vcpus to consume the pending |
---|
| 3134 | + * interruptions asap. |
---|
| 3135 | + */ |
---|
| 3136 | + while (origin & GISA_ADDR_MASK) { |
---|
| 3137 | + gisa = (struct kvm_s390_gisa *)(u64)origin; |
---|
| 3138 | + origin = gisa->next_alert; |
---|
| 3139 | + gisa->next_alert = (u32)(u64)gisa; |
---|
| 3140 | + kvm = container_of(gisa, struct sie_page2, gisa)->kvm; |
---|
| 3141 | + gi = &kvm->arch.gisa_int; |
---|
| 3142 | + if (hrtimer_active(&gi->timer)) |
---|
| 3143 | + hrtimer_cancel(&gi->timer); |
---|
| 3144 | + hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL); |
---|
| 3145 | + } |
---|
| 3146 | + } while (!final); |
---|
| 3147 | + |
---|
| 3148 | +} |
---|
| 3149 | + |
---|
2904 | 3150 | void kvm_s390_gisa_clear(struct kvm *kvm) |
---|
2905 | 3151 | { |
---|
2906 | | - if (kvm->arch.gisa) { |
---|
2907 | | - memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa)); |
---|
2908 | | - kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa; |
---|
2909 | | - VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa); |
---|
2910 | | - } |
---|
| 3152 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3153 | + |
---|
| 3154 | + if (!gi->origin) |
---|
| 3155 | + return; |
---|
| 3156 | + gisa_clear_ipm(gi->origin); |
---|
| 3157 | + VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin); |
---|
2911 | 3158 | } |
---|
2912 | 3159 | |
---|
2913 | 3160 | void kvm_s390_gisa_init(struct kvm *kvm) |
---|
2914 | 3161 | { |
---|
2915 | | - if (css_general_characteristics.aiv) { |
---|
2916 | | - kvm->arch.gisa = &kvm->arch.sie_page2->gisa; |
---|
2917 | | - VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa); |
---|
2918 | | - kvm_s390_gisa_clear(kvm); |
---|
2919 | | - } |
---|
| 3162 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3163 | + |
---|
| 3164 | + if (!css_general_characteristics.aiv) |
---|
| 3165 | + return; |
---|
| 3166 | + gi->origin = &kvm->arch.sie_page2->gisa; |
---|
| 3167 | + gi->alert.mask = 0; |
---|
| 3168 | + spin_lock_init(&gi->alert.ref_lock); |
---|
| 3169 | + gi->expires = 50 * 1000; /* 50 usec */ |
---|
| 3170 | + hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
---|
| 3171 | + gi->timer.function = gisa_vcpu_kicker; |
---|
| 3172 | + memset(gi->origin, 0, sizeof(struct kvm_s390_gisa)); |
---|
| 3173 | + gi->origin->next_alert = (u32)(u64)gi->origin; |
---|
| 3174 | + VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin); |
---|
2920 | 3175 | } |
---|
2921 | 3176 | |
---|
2922 | 3177 | void kvm_s390_gisa_destroy(struct kvm *kvm) |
---|
2923 | 3178 | { |
---|
2924 | | - if (!kvm->arch.gisa) |
---|
| 3179 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3180 | + |
---|
| 3181 | + if (!gi->origin) |
---|
2925 | 3182 | return; |
---|
2926 | | - kvm->arch.gisa = NULL; |
---|
| 3183 | + if (gi->alert.mask) |
---|
| 3184 | + KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x", |
---|
| 3185 | + kvm, gi->alert.mask); |
---|
| 3186 | + while (gisa_in_alert_list(gi->origin)) |
---|
| 3187 | + cpu_relax(); |
---|
| 3188 | + hrtimer_cancel(&gi->timer); |
---|
| 3189 | + gi->origin = NULL; |
---|
| 3190 | +} |
---|
| 3191 | + |
---|
| 3192 | +/** |
---|
| 3193 | + * kvm_s390_gisc_register - register a guest ISC |
---|
| 3194 | + * |
---|
| 3195 | + * @kvm: the kernel vm to work with |
---|
| 3196 | + * @gisc: the guest interruption sub class to register |
---|
| 3197 | + * |
---|
| 3198 | + * The function extends the vm specific alert mask to use. |
---|
| 3199 | + * The effective IAM mask in the GISA is updated as well |
---|
| 3200 | + * in case the GISA is not part of the GIB alert list. |
---|
| 3201 | + * It will be updated latest when the IAM gets restored |
---|
| 3202 | + * by gisa_get_ipm_or_restore_iam(). |
---|
| 3203 | + * |
---|
| 3204 | + * Returns: the nonspecific ISC (NISC) the gib alert mechanism |
---|
| 3205 | + * has registered with the channel subsystem. |
---|
| 3206 | + * -ENODEV in case the vm uses no GISA |
---|
| 3207 | + * -ERANGE in case the guest ISC is invalid |
---|
| 3208 | + */ |
---|
| 3209 | +int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc) |
---|
| 3210 | +{ |
---|
| 3211 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3212 | + |
---|
| 3213 | + if (!gi->origin) |
---|
| 3214 | + return -ENODEV; |
---|
| 3215 | + if (gisc > MAX_ISC) |
---|
| 3216 | + return -ERANGE; |
---|
| 3217 | + |
---|
| 3218 | + spin_lock(&gi->alert.ref_lock); |
---|
| 3219 | + gi->alert.ref_count[gisc]++; |
---|
| 3220 | + if (gi->alert.ref_count[gisc] == 1) { |
---|
| 3221 | + gi->alert.mask |= 0x80 >> gisc; |
---|
| 3222 | + gisa_set_iam(gi->origin, gi->alert.mask); |
---|
| 3223 | + } |
---|
| 3224 | + spin_unlock(&gi->alert.ref_lock); |
---|
| 3225 | + |
---|
| 3226 | + return gib->nisc; |
---|
| 3227 | +} |
---|
| 3228 | +EXPORT_SYMBOL_GPL(kvm_s390_gisc_register); |
---|
| 3229 | + |
---|
| 3230 | +/** |
---|
| 3231 | + * kvm_s390_gisc_unregister - unregister a guest ISC |
---|
| 3232 | + * |
---|
| 3233 | + * @kvm: the kernel vm to work with |
---|
| 3234 | + * @gisc: the guest interruption sub class to register |
---|
| 3235 | + * |
---|
| 3236 | + * The function reduces the vm specific alert mask to use. |
---|
| 3237 | + * The effective IAM mask in the GISA is updated as well |
---|
| 3238 | + * in case the GISA is not part of the GIB alert list. |
---|
| 3239 | + * It will be updated latest when the IAM gets restored |
---|
| 3240 | + * by gisa_get_ipm_or_restore_iam(). |
---|
| 3241 | + * |
---|
| 3242 | + * Returns: the nonspecific ISC (NISC) the gib alert mechanism |
---|
| 3243 | + * has registered with the channel subsystem. |
---|
| 3244 | + * -ENODEV in case the vm uses no GISA |
---|
| 3245 | + * -ERANGE in case the guest ISC is invalid |
---|
| 3246 | + * -EINVAL in case the guest ISC is not registered |
---|
| 3247 | + */ |
---|
| 3248 | +int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc) |
---|
| 3249 | +{ |
---|
| 3250 | + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; |
---|
| 3251 | + int rc = 0; |
---|
| 3252 | + |
---|
| 3253 | + if (!gi->origin) |
---|
| 3254 | + return -ENODEV; |
---|
| 3255 | + if (gisc > MAX_ISC) |
---|
| 3256 | + return -ERANGE; |
---|
| 3257 | + |
---|
| 3258 | + spin_lock(&gi->alert.ref_lock); |
---|
| 3259 | + if (gi->alert.ref_count[gisc] == 0) { |
---|
| 3260 | + rc = -EINVAL; |
---|
| 3261 | + goto out; |
---|
| 3262 | + } |
---|
| 3263 | + gi->alert.ref_count[gisc]--; |
---|
| 3264 | + if (gi->alert.ref_count[gisc] == 0) { |
---|
| 3265 | + gi->alert.mask &= ~(0x80 >> gisc); |
---|
| 3266 | + gisa_set_iam(gi->origin, gi->alert.mask); |
---|
| 3267 | + } |
---|
| 3268 | +out: |
---|
| 3269 | + spin_unlock(&gi->alert.ref_lock); |
---|
| 3270 | + |
---|
| 3271 | + return rc; |
---|
| 3272 | +} |
---|
| 3273 | +EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister); |
---|
| 3274 | + |
---|
| 3275 | +static void gib_alert_irq_handler(struct airq_struct *airq, bool floating) |
---|
| 3276 | +{ |
---|
| 3277 | + inc_irq_stat(IRQIO_GAL); |
---|
| 3278 | + process_gib_alert_list(); |
---|
| 3279 | +} |
---|
| 3280 | + |
---|
| 3281 | +static struct airq_struct gib_alert_irq = { |
---|
| 3282 | + .handler = gib_alert_irq_handler, |
---|
| 3283 | + .lsi_ptr = &gib_alert_irq.lsi_mask, |
---|
| 3284 | +}; |
---|
| 3285 | + |
---|
| 3286 | +void kvm_s390_gib_destroy(void) |
---|
| 3287 | +{ |
---|
| 3288 | + if (!gib) |
---|
| 3289 | + return; |
---|
| 3290 | + chsc_sgib(0); |
---|
| 3291 | + unregister_adapter_interrupt(&gib_alert_irq); |
---|
| 3292 | + free_page((unsigned long)gib); |
---|
| 3293 | + gib = NULL; |
---|
| 3294 | +} |
---|
| 3295 | + |
---|
| 3296 | +int kvm_s390_gib_init(u8 nisc) |
---|
| 3297 | +{ |
---|
| 3298 | + int rc = 0; |
---|
| 3299 | + |
---|
| 3300 | + if (!css_general_characteristics.aiv) { |
---|
| 3301 | + KVM_EVENT(3, "%s", "gib not initialized, no AIV facility"); |
---|
| 3302 | + goto out; |
---|
| 3303 | + } |
---|
| 3304 | + |
---|
| 3305 | + gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
---|
| 3306 | + if (!gib) { |
---|
| 3307 | + rc = -ENOMEM; |
---|
| 3308 | + goto out; |
---|
| 3309 | + } |
---|
| 3310 | + |
---|
| 3311 | + gib_alert_irq.isc = nisc; |
---|
| 3312 | + if (register_adapter_interrupt(&gib_alert_irq)) { |
---|
| 3313 | + pr_err("Registering the GIB alert interruption handler failed\n"); |
---|
| 3314 | + rc = -EIO; |
---|
| 3315 | + goto out_free_gib; |
---|
| 3316 | + } |
---|
| 3317 | + |
---|
| 3318 | + gib->nisc = nisc; |
---|
| 3319 | + if (chsc_sgib((u32)(u64)gib)) { |
---|
| 3320 | + pr_err("Associating the GIB with the AIV facility failed\n"); |
---|
| 3321 | + free_page((unsigned long)gib); |
---|
| 3322 | + gib = NULL; |
---|
| 3323 | + rc = -EIO; |
---|
| 3324 | + goto out_unreg_gal; |
---|
| 3325 | + } |
---|
| 3326 | + |
---|
| 3327 | + KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc); |
---|
| 3328 | + goto out; |
---|
| 3329 | + |
---|
| 3330 | +out_unreg_gal: |
---|
| 3331 | + unregister_adapter_interrupt(&gib_alert_irq); |
---|
| 3332 | +out_free_gib: |
---|
| 3333 | + free_page((unsigned long)gib); |
---|
| 3334 | + gib = NULL; |
---|
| 3335 | +out: |
---|
| 3336 | + return rc; |
---|
2927 | 3337 | } |
---|