hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/pci/controller/vmd.c
....@@ -40,13 +40,19 @@
4040 * membars, in order to allow proper address translation during
4141 * resource assignment to enable guest virtualization
4242 */
43
- VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
43
+ VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
4444
4545 /*
4646 * Device may provide root port configuration information which limits
4747 * bus numbering
4848 */
49
- VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
49
+ VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
50
+
51
+ /*
52
+ * Device contains physical location shadow registers in
53
+ * vendor-specific capability space
54
+ */
55
+ VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2),
5056 };
5157
5258 /*
....@@ -98,11 +104,6 @@
98104 struct irq_domain *irq_domain;
99105 struct pci_bus *bus;
100106 u8 busn_start;
101
-
102
-#ifdef CONFIG_X86_DEV_DMA_OPS
103
- struct dma_map_ops dma_ops;
104
- struct dma_domain dma_domain;
105
-#endif
106107 };
107108
108109 static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
....@@ -297,173 +298,32 @@
297298 .chip = &vmd_msi_controller,
298299 };
299300
300
-#ifdef CONFIG_X86_DEV_DMA_OPS
301
-/*
302
- * VMD replaces the requester ID with its own. DMA mappings for devices in a
303
- * VMD domain need to be mapped for the VMD, not the device requiring
304
- * the mapping.
305
- */
306
-static struct device *to_vmd_dev(struct device *dev)
301
+static int vmd_create_irq_domain(struct vmd_dev *vmd)
307302 {
308
- struct pci_dev *pdev = to_pci_dev(dev);
309
- struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
303
+ struct fwnode_handle *fn;
310304
311
- return &vmd->dev->dev;
305
+ fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
306
+ if (!fn)
307
+ return -ENODEV;
308
+
309
+ vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
310
+ if (!vmd->irq_domain) {
311
+ irq_domain_free_fwnode(fn);
312
+ return -ENODEV;
313
+ }
314
+
315
+ return 0;
312316 }
313317
314
-static const struct dma_map_ops *vmd_dma_ops(struct device *dev)
318
+static void vmd_remove_irq_domain(struct vmd_dev *vmd)
315319 {
316
- return get_dma_ops(to_vmd_dev(dev));
320
+ if (vmd->irq_domain) {
321
+ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
322
+
323
+ irq_domain_remove(vmd->irq_domain);
324
+ irq_domain_free_fwnode(fn);
325
+ }
317326 }
318
-
319
-static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
320
- gfp_t flag, unsigned long attrs)
321
-{
322
- return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
323
- attrs);
324
-}
325
-
326
-static void vmd_free(struct device *dev, size_t size, void *vaddr,
327
- dma_addr_t addr, unsigned long attrs)
328
-{
329
- return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
330
- attrs);
331
-}
332
-
333
-static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
334
- void *cpu_addr, dma_addr_t addr, size_t size,
335
- unsigned long attrs)
336
-{
337
- return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
338
- size, attrs);
339
-}
340
-
341
-static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
342
- void *cpu_addr, dma_addr_t addr, size_t size,
343
- unsigned long attrs)
344
-{
345
- return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
346
- addr, size, attrs);
347
-}
348
-
349
-static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
350
- unsigned long offset, size_t size,
351
- enum dma_data_direction dir,
352
- unsigned long attrs)
353
-{
354
- return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
355
- dir, attrs);
356
-}
357
-
358
-static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
359
- enum dma_data_direction dir, unsigned long attrs)
360
-{
361
- vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
362
-}
363
-
364
-static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
365
- enum dma_data_direction dir, unsigned long attrs)
366
-{
367
- return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
368
-}
369
-
370
-static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
371
- enum dma_data_direction dir, unsigned long attrs)
372
-{
373
- vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
374
-}
375
-
376
-static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
377
- size_t size, enum dma_data_direction dir)
378
-{
379
- vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir);
380
-}
381
-
382
-static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr,
383
- size_t size, enum dma_data_direction dir)
384
-{
385
- vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size,
386
- dir);
387
-}
388
-
389
-static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
390
- int nents, enum dma_data_direction dir)
391
-{
392
- vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir);
393
-}
394
-
395
-static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
396
- int nents, enum dma_data_direction dir)
397
-{
398
- vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir);
399
-}
400
-
401
-static int vmd_mapping_error(struct device *dev, dma_addr_t addr)
402
-{
403
- return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr);
404
-}
405
-
406
-static int vmd_dma_supported(struct device *dev, u64 mask)
407
-{
408
- return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
409
-}
410
-
411
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
412
-static u64 vmd_get_required_mask(struct device *dev)
413
-{
414
- return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
415
-}
416
-#endif
417
-
418
-static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
419
-{
420
- struct dma_domain *domain = &vmd->dma_domain;
421
-
422
- if (get_dma_ops(&vmd->dev->dev))
423
- del_dma_domain(domain);
424
-}
425
-
426
-#define ASSIGN_VMD_DMA_OPS(source, dest, fn) \
427
- do { \
428
- if (source->fn) \
429
- dest->fn = vmd_##fn; \
430
- } while (0)
431
-
432
-static void vmd_setup_dma_ops(struct vmd_dev *vmd)
433
-{
434
- const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
435
- struct dma_map_ops *dest = &vmd->dma_ops;
436
- struct dma_domain *domain = &vmd->dma_domain;
437
-
438
- domain->domain_nr = vmd->sysdata.domain;
439
- domain->dma_ops = dest;
440
-
441
- if (!source)
442
- return;
443
- ASSIGN_VMD_DMA_OPS(source, dest, alloc);
444
- ASSIGN_VMD_DMA_OPS(source, dest, free);
445
- ASSIGN_VMD_DMA_OPS(source, dest, mmap);
446
- ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable);
447
- ASSIGN_VMD_DMA_OPS(source, dest, map_page);
448
- ASSIGN_VMD_DMA_OPS(source, dest, unmap_page);
449
- ASSIGN_VMD_DMA_OPS(source, dest, map_sg);
450
- ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg);
451
- ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu);
452
- ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device);
453
- ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu);
454
- ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
455
- ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
456
- ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
457
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
458
- ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
459
-#endif
460
- add_dma_domain(domain);
461
-}
462
-#undef ASSIGN_VMD_DMA_OPS
463
-#else
464
-static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {}
465
-static void vmd_setup_dma_ops(struct vmd_dev *vmd) {}
466
-#endif
467327
468328 static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
469329 unsigned int devfn, int reg, int len)
....@@ -584,16 +444,149 @@
584444 return domain + 1;
585445 }
586446
447
+static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
448
+ resource_size_t *offset1,
449
+ resource_size_t *offset2)
450
+{
451
+ struct pci_dev *dev = vmd->dev;
452
+ u64 phys1, phys2;
453
+
454
+ if (native_hint) {
455
+ u32 vmlock;
456
+ int ret;
457
+
458
+ ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
459
+ if (ret || vmlock == ~0)
460
+ return -ENODEV;
461
+
462
+ if (MB2_SHADOW_EN(vmlock)) {
463
+ void __iomem *membar2;
464
+
465
+ membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);
466
+ if (!membar2)
467
+ return -ENOMEM;
468
+ phys1 = readq(membar2 + MB2_SHADOW_OFFSET);
469
+ phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8);
470
+ pci_iounmap(dev, membar2);
471
+ } else
472
+ return 0;
473
+ } else {
474
+ /* Hypervisor-Emulated Vendor-Specific Capability */
475
+ int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
476
+ u32 reg, regu;
477
+
478
+ pci_read_config_dword(dev, pos + 4, &reg);
479
+
480
+ /* "SHDW" */
481
+ if (pos && reg == 0x53484457) {
482
+ pci_read_config_dword(dev, pos + 8, &reg);
483
+ pci_read_config_dword(dev, pos + 12, &regu);
484
+ phys1 = (u64) regu << 32 | reg;
485
+
486
+ pci_read_config_dword(dev, pos + 16, &reg);
487
+ pci_read_config_dword(dev, pos + 20, &regu);
488
+ phys2 = (u64) regu << 32 | reg;
489
+ } else
490
+ return 0;
491
+ }
492
+
493
+ *offset1 = dev->resource[VMD_MEMBAR1].start -
494
+ (phys1 & PCI_BASE_ADDRESS_MEM_MASK);
495
+ *offset2 = dev->resource[VMD_MEMBAR2].start -
496
+ (phys2 & PCI_BASE_ADDRESS_MEM_MASK);
497
+
498
+ return 0;
499
+}
500
+
501
+static int vmd_get_bus_number_start(struct vmd_dev *vmd)
502
+{
503
+ struct pci_dev *dev = vmd->dev;
504
+ u16 reg;
505
+
506
+ pci_read_config_word(dev, PCI_REG_VMCAP, &reg);
507
+ if (BUS_RESTRICT_CAP(reg)) {
508
+ pci_read_config_word(dev, PCI_REG_VMCONFIG, &reg);
509
+
510
+ switch (BUS_RESTRICT_CFG(reg)) {
511
+ case 0:
512
+ vmd->busn_start = 0;
513
+ break;
514
+ case 1:
515
+ vmd->busn_start = 128;
516
+ break;
517
+ case 2:
518
+ vmd->busn_start = 224;
519
+ break;
520
+ default:
521
+ pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
522
+ BUS_RESTRICT_CFG(reg));
523
+ return -ENODEV;
524
+ }
525
+ }
526
+
527
+ return 0;
528
+}
529
+
530
+static irqreturn_t vmd_irq(int irq, void *data)
531
+{
532
+ struct vmd_irq_list *irqs = data;
533
+ struct vmd_irq *vmdirq;
534
+ int idx;
535
+
536
+ idx = srcu_read_lock(&irqs->srcu);
537
+ list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
538
+ generic_handle_irq(vmdirq->virq);
539
+ srcu_read_unlock(&irqs->srcu, idx);
540
+
541
+ return IRQ_HANDLED;
542
+}
543
+
544
+static int vmd_alloc_irqs(struct vmd_dev *vmd)
545
+{
546
+ struct pci_dev *dev = vmd->dev;
547
+ int i, err;
548
+
549
+ vmd->msix_count = pci_msix_vec_count(dev);
550
+ if (vmd->msix_count < 0)
551
+ return -ENODEV;
552
+
553
+ vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
554
+ PCI_IRQ_MSIX);
555
+ if (vmd->msix_count < 0)
556
+ return vmd->msix_count;
557
+
558
+ vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
559
+ GFP_KERNEL);
560
+ if (!vmd->irqs)
561
+ return -ENOMEM;
562
+
563
+ for (i = 0; i < vmd->msix_count; i++) {
564
+ err = init_srcu_struct(&vmd->irqs[i].srcu);
565
+ if (err)
566
+ return err;
567
+
568
+ INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
569
+ err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
570
+ vmd_irq, IRQF_NO_THREAD,
571
+ "vmd", &vmd->irqs[i]);
572
+ if (err)
573
+ return err;
574
+ }
575
+
576
+ return 0;
577
+}
578
+
587579 static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
588580 {
589581 struct pci_sysdata *sd = &vmd->sysdata;
590
- struct fwnode_handle *fn;
591582 struct resource *res;
592583 u32 upper_bits;
593584 unsigned long flags;
594585 LIST_HEAD(resources);
595586 resource_size_t offset[2] = {0};
596587 resource_size_t membar2_offset = 0x2000;
588
+ struct pci_bus *child;
589
+ int ret;
597590
598591 /*
599592 * Shadow registers may exist in certain VMD device ids which allow
....@@ -602,42 +595,24 @@
602595 * or 0, depending on an enable bit in the VMD device.
603596 */
604597 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
605
- u32 vmlock;
606
- int ret;
607
-
608598 membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
609
- ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
610
- if (ret || vmlock == ~0)
611
- return -ENODEV;
612
-
613
- if (MB2_SHADOW_EN(vmlock)) {
614
- void __iomem *membar2;
615
-
616
- membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
617
- if (!membar2)
618
- return -ENOMEM;
619
- offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
620
- (readq(membar2 + MB2_SHADOW_OFFSET) &
621
- PCI_BASE_ADDRESS_MEM_MASK);
622
- offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
623
- (readq(membar2 + MB2_SHADOW_OFFSET + 8) &
624
- PCI_BASE_ADDRESS_MEM_MASK);
625
- pci_iounmap(vmd->dev, membar2);
626
- }
599
+ ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]);
600
+ if (ret)
601
+ return ret;
602
+ } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
603
+ ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]);
604
+ if (ret)
605
+ return ret;
627606 }
628607
629608 /*
630609 * Certain VMD devices may have a root port configuration option which
631
- * limits the bus range to between 0-127 or 128-255
610
+ * limits the bus range to between 0-127, 128-255, or 224-255
632611 */
633612 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
634
- u32 vmcap, vmconfig;
635
-
636
- pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
637
- pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
638
- if (BUS_RESTRICT_CAP(vmcap) &&
639
- (BUS_RESTRICT_CFG(vmconfig) == 0x1))
640
- vmd->busn_start = 128;
613
+ ret = vmd_get_bus_number_start(vmd);
614
+ if (ret)
615
+ return ret;
641616 }
642617
643618 res = &vmd->dev->resource[VMD_CFGBAR];
....@@ -658,7 +633,7 @@
658633 * 32-bit resources. __pci_assign_resource() enforces that
659634 * artificial restriction to make sure everything will fit.
660635 *
661
- * The only way we could use a 64-bit non-prefechable MEMBAR is
636
+ * The only way we could use a 64-bit non-prefetchable MEMBAR is
662637 * if its address is <4GB so that we can convert it to a 32-bit
663638 * resource. To be visible to the host OS, all VMD endpoints must
664639 * be initially configured by platform BIOS, which includes setting
....@@ -691,65 +666,61 @@
691666 .parent = res,
692667 };
693668
694
- sd->vmd_domain = true;
669
+ sd->vmd_dev = vmd->dev;
695670 sd->domain = vmd_find_free_domain();
696671 if (sd->domain < 0)
697672 return sd->domain;
698673
699674 sd->node = pcibus_to_node(vmd->dev->bus);
700675
701
- fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
702
- if (!fn)
703
- return -ENODEV;
676
+ ret = vmd_create_irq_domain(vmd);
677
+ if (ret)
678
+ return ret;
704679
705
- vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
706
- x86_vector_domain);
707
- if (!vmd->irq_domain) {
708
- irq_domain_free_fwnode(fn);
709
- return -ENODEV;
710
- }
680
+ /*
681
+ * Override the irq domain bus token so the domain can be distinguished
682
+ * from a regular PCI/MSI domain.
683
+ */
684
+ irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
711685
712686 pci_add_resource(&resources, &vmd->resources[0]);
713687 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
714688 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
715689
716690 vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
717
- &vmd_ops, sd, &resources);
691
+ &vmd_ops, sd, &resources);
718692 if (!vmd->bus) {
719693 pci_free_resource_list(&resources);
720
- irq_domain_remove(vmd->irq_domain);
721
- irq_domain_free_fwnode(fn);
694
+ vmd_remove_irq_domain(vmd);
722695 return -ENODEV;
723696 }
724697
725698 vmd_attach_resources(vmd);
726
- vmd_setup_dma_ops(vmd);
727
- dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
728
- pci_rescan_bus(vmd->bus);
699
+ if (vmd->irq_domain)
700
+ dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
701
+
702
+ pci_scan_child_bus(vmd->bus);
703
+ pci_assign_unassigned_bus_resources(vmd->bus);
704
+
705
+ /*
706
+ * VMD root buses are virtual and don't return true on pci_is_pcie()
707
+ * and will fail pcie_bus_configure_settings() early. It can instead be
708
+ * run on each of the real root ports.
709
+ */
710
+ list_for_each_entry(child, &vmd->bus->children, node)
711
+ pcie_bus_configure_settings(child);
712
+
713
+ pci_bus_add_devices(vmd->bus);
729714
730715 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
731716 "domain"), "Can't create symlink to domain\n");
732717 return 0;
733718 }
734719
735
-static irqreturn_t vmd_irq(int irq, void *data)
736
-{
737
- struct vmd_irq_list *irqs = data;
738
- struct vmd_irq *vmdirq;
739
- int idx;
740
-
741
- idx = srcu_read_lock(&irqs->srcu);
742
- list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
743
- generic_handle_irq(vmdirq->virq);
744
- srcu_read_unlock(&irqs->srcu, idx);
745
-
746
- return IRQ_HANDLED;
747
-}
748
-
749720 static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
750721 {
751722 struct vmd_dev *vmd;
752
- int i, err;
723
+ int err;
753724
754725 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
755726 return -ENOMEM;
....@@ -772,32 +743,9 @@
772743 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
773744 return -ENODEV;
774745
775
- vmd->msix_count = pci_msix_vec_count(dev);
776
- if (vmd->msix_count < 0)
777
- return -ENODEV;
778
-
779
- vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
780
- PCI_IRQ_MSIX);
781
- if (vmd->msix_count < 0)
782
- return vmd->msix_count;
783
-
784
- vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
785
- GFP_KERNEL);
786
- if (!vmd->irqs)
787
- return -ENOMEM;
788
-
789
- for (i = 0; i < vmd->msix_count; i++) {
790
- err = init_srcu_struct(&vmd->irqs[i].srcu);
791
- if (err)
792
- return err;
793
-
794
- INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
795
- err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
796
- vmd_irq, IRQF_NO_THREAD,
797
- "vmd", &vmd->irqs[i]);
798
- if (err)
799
- return err;
800
- }
746
+ err = vmd_alloc_irqs(vmd);
747
+ if (err)
748
+ return err;
801749
802750 spin_lock_init(&vmd->cfg_lock);
803751 pci_set_drvdata(dev, vmd);
....@@ -821,16 +769,13 @@
821769 static void vmd_remove(struct pci_dev *dev)
822770 {
823771 struct vmd_dev *vmd = pci_get_drvdata(dev);
824
- struct fwnode_handle *fn = vmd->irq_domain->fwnode;
825772
826773 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
827774 pci_stop_root_bus(vmd->bus);
828775 pci_remove_root_bus(vmd->bus);
829776 vmd_cleanup_srcu(vmd);
830
- vmd_teardown_dma_ops(vmd);
831777 vmd_detach_resources(vmd);
832
- irq_domain_remove(vmd->irq_domain);
833
- irq_domain_free_fwnode(fn);
778
+ vmd_remove_irq_domain(vmd);
834779 }
835780
836781 #ifdef CONFIG_PM_SLEEP
....@@ -841,9 +786,8 @@
841786 int i;
842787
843788 for (i = 0; i < vmd->msix_count; i++)
844
- devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
789
+ devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
845790
846
- pci_save_state(pdev);
847791 return 0;
848792 }
849793
....@@ -861,19 +805,26 @@
861805 return err;
862806 }
863807
864
- pci_restore_state(pdev);
865808 return 0;
866809 }
867810 #endif
868811 static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
869812
870813 static const struct pci_device_id vmd_ids[] = {
871
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),},
814
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
815
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
872816 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
873817 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
874818 VMD_FEAT_HAS_BUS_RESTRICTIONS,},
819
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
820
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
821
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
822
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
823
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
824
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
875825 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
876
- .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
826
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
827
+ VMD_FEAT_HAS_BUS_RESTRICTIONS,},
877828 {0,}
878829 };
879830 MODULE_DEVICE_TABLE(pci, vmd_ids);