.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and |
---|
3 | 4 | * initial domain support. We also handle the DSDT _PRT callbacks for GSI's |
---|
.. | .. |
---|
60 | 61 | } |
---|
61 | 62 | |
---|
62 | 63 | #ifdef CONFIG_ACPI |
---|
63 | | -static int xen_register_pirq(u32 gsi, int gsi_override, int triggering, |
---|
64 | | - bool set_pirq) |
---|
| 64 | +static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq) |
---|
65 | 65 | { |
---|
66 | | - int rc, pirq = -1, irq = -1; |
---|
| 66 | + int rc, pirq = -1, irq; |
---|
67 | 67 | struct physdev_map_pirq map_irq; |
---|
68 | 68 | int shareable = 0; |
---|
69 | 69 | char *name; |
---|
.. | .. |
---|
94 | 94 | name = "ioapic-level"; |
---|
95 | 95 | } |
---|
96 | 96 | |
---|
97 | | - if (gsi_override >= 0) |
---|
98 | | - gsi = gsi_override; |
---|
99 | | - |
---|
100 | 97 | irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); |
---|
101 | 98 | if (irq < 0) |
---|
102 | 99 | goto out; |
---|
.. | .. |
---|
112 | 109 | if (!xen_hvm_domain()) |
---|
113 | 110 | return -1; |
---|
114 | 111 | |
---|
115 | | - return xen_register_pirq(gsi, -1 /* no GSI override */, trigger, |
---|
| 112 | + return xen_register_pirq(gsi, trigger, |
---|
116 | 113 | false /* no mapping of GSI to PIRQ */); |
---|
117 | 114 | } |
---|
118 | 115 | |
---|
119 | 116 | #ifdef CONFIG_XEN_DOM0 |
---|
120 | | -static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) |
---|
| 117 | +static int xen_register_gsi(u32 gsi, int triggering, int polarity) |
---|
121 | 118 | { |
---|
122 | 119 | int rc, irq; |
---|
123 | 120 | struct physdev_setup_gsi setup_gsi; |
---|
.. | .. |
---|
128 | 125 | printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", |
---|
129 | 126 | gsi, triggering, polarity); |
---|
130 | 127 | |
---|
131 | | - irq = xen_register_pirq(gsi, gsi_override, triggering, true); |
---|
| 128 | + irq = xen_register_pirq(gsi, triggering, true); |
---|
132 | 129 | |
---|
133 | 130 | setup_gsi.gsi = gsi; |
---|
134 | 131 | setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); |
---|
.. | .. |
---|
148 | 145 | static int acpi_register_gsi_xen(struct device *dev, u32 gsi, |
---|
149 | 146 | int trigger, int polarity) |
---|
150 | 147 | { |
---|
151 | | - return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity); |
---|
| 148 | + return xen_register_gsi(gsi, trigger, polarity); |
---|
152 | 149 | } |
---|
153 | 150 | #endif |
---|
154 | 151 | #endif |
---|
.. | .. |
---|
159 | 156 | |
---|
160 | 157 | struct xen_pci_frontend_ops *xen_pci_frontend; |
---|
161 | 158 | EXPORT_SYMBOL_GPL(xen_pci_frontend); |
---|
| 159 | + |
---|
| 160 | +struct xen_msi_ops { |
---|
| 161 | + int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); |
---|
| 162 | + void (*teardown_msi_irqs)(struct pci_dev *dev); |
---|
| 163 | +}; |
---|
| 164 | + |
---|
| 165 | +static struct xen_msi_ops xen_msi_ops __ro_after_init; |
---|
162 | 166 | |
---|
163 | 167 | static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
---|
164 | 168 | { |
---|
.. | .. |
---|
375 | 379 | WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret); |
---|
376 | 380 | } |
---|
377 | 381 | } |
---|
378 | | -#endif |
---|
| 382 | +#else /* CONFIG_XEN_DOM0 */ |
---|
| 383 | +#define xen_initdom_setup_msi_irqs NULL |
---|
| 384 | +#define xen_initdom_restore_msi_irqs NULL |
---|
| 385 | +#endif /* !CONFIG_XEN_DOM0 */ |
---|
379 | 386 | |
---|
380 | 387 | static void xen_teardown_msi_irqs(struct pci_dev *dev) |
---|
381 | 388 | { |
---|
382 | 389 | struct msi_desc *msidesc; |
---|
| 390 | + int i; |
---|
383 | 391 | |
---|
384 | | - msidesc = first_pci_msi_entry(dev); |
---|
| 392 | + for_each_pci_msi_entry(msidesc, dev) { |
---|
| 393 | + if (msidesc->irq) { |
---|
| 394 | + for (i = 0; i < msidesc->nvec_used; i++) |
---|
| 395 | + xen_destroy_irq(msidesc->irq + i); |
---|
| 396 | + } |
---|
| 397 | + } |
---|
| 398 | +} |
---|
| 399 | + |
---|
| 400 | +static void xen_pv_teardown_msi_irqs(struct pci_dev *dev) |
---|
| 401 | +{ |
---|
| 402 | + struct msi_desc *msidesc = first_pci_msi_entry(dev); |
---|
| 403 | + |
---|
385 | 404 | if (msidesc->msi_attrib.is_msix) |
---|
386 | 405 | xen_pci_frontend_disable_msix(dev); |
---|
387 | 406 | else |
---|
388 | 407 | xen_pci_frontend_disable_msi(dev); |
---|
389 | 408 | |
---|
390 | | - /* Free the IRQ's and the msidesc using the generic code. */ |
---|
391 | | - default_teardown_msi_irqs(dev); |
---|
| 409 | + xen_teardown_msi_irqs(dev); |
---|
392 | 410 | } |
---|
393 | 411 | |
---|
394 | | -static void xen_teardown_msi_irq(unsigned int irq) |
---|
| 412 | +static int xen_msi_domain_alloc_irqs(struct irq_domain *domain, |
---|
| 413 | + struct device *dev, int nvec) |
---|
395 | 414 | { |
---|
396 | | - xen_destroy_irq(irq); |
---|
| 415 | + int type; |
---|
| 416 | + |
---|
| 417 | + if (WARN_ON_ONCE(!dev_is_pci(dev))) |
---|
| 418 | + return -EINVAL; |
---|
| 419 | + |
---|
| 420 | + if (first_msi_entry(dev)->msi_attrib.is_msix) |
---|
| 421 | + type = PCI_CAP_ID_MSIX; |
---|
| 422 | + else |
---|
| 423 | + type = PCI_CAP_ID_MSI; |
---|
| 424 | + |
---|
| 425 | + return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type); |
---|
397 | 426 | } |
---|
398 | 427 | |
---|
399 | | -#endif |
---|
| 428 | +static void xen_msi_domain_free_irqs(struct irq_domain *domain, |
---|
| 429 | + struct device *dev) |
---|
| 430 | +{ |
---|
| 431 | + if (WARN_ON_ONCE(!dev_is_pci(dev))) |
---|
| 432 | + return; |
---|
| 433 | + |
---|
| 434 | + xen_msi_ops.teardown_msi_irqs(to_pci_dev(dev)); |
---|
| 435 | +} |
---|
| 436 | + |
---|
| 437 | +static struct msi_domain_ops xen_pci_msi_domain_ops = { |
---|
| 438 | + .domain_alloc_irqs = xen_msi_domain_alloc_irqs, |
---|
| 439 | + .domain_free_irqs = xen_msi_domain_free_irqs, |
---|
| 440 | +}; |
---|
| 441 | + |
---|
| 442 | +static struct msi_domain_info xen_pci_msi_domain_info = { |
---|
| 443 | + .ops = &xen_pci_msi_domain_ops, |
---|
| 444 | +}; |
---|
| 445 | + |
---|
| 446 | +/* |
---|
| 447 | + * This irq domain is a blatant violation of the irq domain design, but |
---|
| 448 | + * distangling XEN into real irq domains is not a job for mere mortals with |
---|
| 449 | + * limited XENology. But it's the least dangerous way for a mere mortal to |
---|
| 450 | + * get rid of the arch_*_msi_irqs() hackery in order to store the irq |
---|
| 451 | + * domain pointer in struct device. This irq domain wrappery allows to do |
---|
| 452 | + * that without breaking XEN terminally. |
---|
| 453 | + */ |
---|
| 454 | +static __init struct irq_domain *xen_create_pci_msi_domain(void) |
---|
| 455 | +{ |
---|
| 456 | + struct irq_domain *d = NULL; |
---|
| 457 | + struct fwnode_handle *fn; |
---|
| 458 | + |
---|
| 459 | + fn = irq_domain_alloc_named_fwnode("XEN-MSI"); |
---|
| 460 | + if (fn) |
---|
| 461 | + d = msi_create_irq_domain(fn, &xen_pci_msi_domain_info, NULL); |
---|
| 462 | + |
---|
| 463 | + /* FIXME: No idea how to survive if this fails */ |
---|
| 464 | + BUG_ON(!d); |
---|
| 465 | + |
---|
| 466 | + return d; |
---|
| 467 | +} |
---|
| 468 | + |
---|
| 469 | +static __init void xen_setup_pci_msi(void) |
---|
| 470 | +{ |
---|
| 471 | + if (xen_pv_domain()) { |
---|
| 472 | + if (xen_initial_domain()) { |
---|
| 473 | + xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs; |
---|
| 474 | + x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; |
---|
| 475 | + } else { |
---|
| 476 | + xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; |
---|
| 477 | + } |
---|
| 478 | + xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; |
---|
| 479 | + } else if (xen_hvm_domain()) { |
---|
| 480 | + xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; |
---|
| 481 | + xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; |
---|
| 482 | + } else { |
---|
| 483 | + WARN_ON_ONCE(1); |
---|
| 484 | + return; |
---|
| 485 | + } |
---|
| 486 | + |
---|
| 487 | + /* |
---|
| 488 | + * Override the PCI/MSI irq domain init function. No point |
---|
| 489 | + * in allocating the native domain and never use it. |
---|
| 490 | + */ |
---|
| 491 | + x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; |
---|
| 492 | + /* |
---|
| 493 | + * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely |
---|
| 494 | + * controlled by the hypervisor. |
---|
| 495 | + */ |
---|
| 496 | + pci_msi_ignore_mask = 1; |
---|
| 497 | +} |
---|
| 498 | + |
---|
| 499 | +#else /* CONFIG_PCI_MSI */ |
---|
| 500 | +static inline void xen_setup_pci_msi(void) { } |
---|
| 501 | +#endif /* CONFIG_PCI_MSI */ |
---|
400 | 502 | |
---|
401 | 503 | int __init pci_xen_init(void) |
---|
402 | 504 | { |
---|
.. | .. |
---|
413 | 515 | /* Keep ACPI out of the picture */ |
---|
414 | 516 | acpi_noirq_set(); |
---|
415 | 517 | |
---|
416 | | -#ifdef CONFIG_PCI_MSI |
---|
417 | | - x86_msi.setup_msi_irqs = xen_setup_msi_irqs; |
---|
418 | | - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; |
---|
419 | | - x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; |
---|
420 | | - pci_msi_ignore_mask = 1; |
---|
421 | | -#endif |
---|
| 518 | + xen_setup_pci_msi(); |
---|
422 | 519 | return 0; |
---|
423 | 520 | } |
---|
424 | 521 | |
---|
425 | 522 | #ifdef CONFIG_PCI_MSI |
---|
426 | | -void __init xen_msi_init(void) |
---|
| 523 | +static void __init xen_hvm_msi_init(void) |
---|
427 | 524 | { |
---|
428 | 525 | if (!disable_apic) { |
---|
429 | 526 | /* |
---|
.. | .. |
---|
438 | 535 | ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC))) |
---|
439 | 536 | return; |
---|
440 | 537 | } |
---|
441 | | - |
---|
442 | | - x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; |
---|
443 | | - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; |
---|
| 538 | + xen_setup_pci_msi(); |
---|
444 | 539 | } |
---|
445 | 540 | #endif |
---|
446 | 541 | |
---|
.. | .. |
---|
463 | 558 | * We need to wait until after x2apic is initialized |
---|
464 | 559 | * before we can set MSI IRQ ops. |
---|
465 | 560 | */ |
---|
466 | | - x86_platform.apic_post_init = xen_msi_init; |
---|
| 561 | + x86_platform.apic_post_init = xen_hvm_msi_init; |
---|
467 | 562 | #endif |
---|
468 | 563 | return 0; |
---|
469 | 564 | } |
---|
.. | .. |
---|
473 | 568 | { |
---|
474 | 569 | int irq; |
---|
475 | 570 | |
---|
476 | | -#ifdef CONFIG_PCI_MSI |
---|
477 | | - x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; |
---|
478 | | - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; |
---|
479 | | - x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; |
---|
480 | | - pci_msi_ignore_mask = 1; |
---|
481 | | -#endif |
---|
| 571 | + xen_setup_pci_msi(); |
---|
482 | 572 | __acpi_register_gsi = acpi_register_gsi_xen; |
---|
483 | 573 | __acpi_unregister_gsi = NULL; |
---|
484 | 574 | /* |
---|
.. | .. |
---|
491 | 581 | if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) |
---|
492 | 582 | continue; |
---|
493 | 583 | |
---|
494 | | - xen_register_pirq(irq, -1 /* no GSI override */, |
---|
| 584 | + xen_register_pirq(irq, |
---|
495 | 585 | trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE, |
---|
496 | 586 | true /* Map GSI to PIRQ */); |
---|
497 | 587 | } |
---|