hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/powerpc/kernel/dma-iommu.c
....@@ -6,7 +6,8 @@
66 * busses using the iommu infrastructure
77 */
88
9
-#include <linux/export.h>
9
+#include <linux/dma-direct.h>
10
+#include <linux/pci.h>
1011 #include <asm/iommu.h>
1112
1213 /*
....@@ -44,7 +45,7 @@
4445 unsigned long attrs)
4546 {
4647 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
47
- size, device_to_mask(dev), direction, attrs);
48
+ size, dma_get_mask(dev), direction, attrs);
4849 }
4950
5051
....@@ -62,7 +63,7 @@
6263 unsigned long attrs)
6364 {
6465 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
65
- device_to_mask(dev), direction, attrs);
66
+ dma_get_mask(dev), direction, attrs);
6667 }
6768
6869 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
....@@ -73,14 +74,29 @@
7374 direction, attrs);
7475 }
7576
77
+static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
78
+{
79
+ struct pci_dev *pdev = to_pci_dev(dev);
80
+ struct pci_controller *phb = pci_bus_to_host(pdev->bus);
81
+
82
+ if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
83
+ return false;
84
+ return phb->controller_ops.iommu_bypass_supported(pdev, mask);
85
+}
86
+
7687 /* We support DMA to/from any memory page via the iommu */
7788 int dma_iommu_dma_supported(struct device *dev, u64 mask)
7889 {
7990 struct iommu_table *tbl = get_iommu_table_base(dev);
8091
92
+ if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
93
+ dev->dma_ops_bypass = true;
94
+ dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
95
+ return 1;
96
+ }
97
+
8198 if (!tbl) {
82
- dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
83
- ", table unavailable\n", mask);
99
+ dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
84100 return 0;
85101 }
86102
....@@ -89,14 +105,27 @@
89105 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
90106 mask, tbl->it_offset << tbl->it_page_shift);
91107 return 0;
92
- } else
93
- return 1;
108
+ }
109
+
110
+ dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
111
+ dev->dma_ops_bypass = false;
112
+ return 1;
94113 }
95114
96
-static u64 dma_iommu_get_required_mask(struct device *dev)
115
+u64 dma_iommu_get_required_mask(struct device *dev)
97116 {
98117 struct iommu_table *tbl = get_iommu_table_base(dev);
99118 u64 mask;
119
+
120
+ if (dev_is_pci(dev)) {
121
+ u64 bypass_mask = dma_direct_get_required_mask(dev);
122
+
123
+ if (dma_iommu_dma_supported(dev, bypass_mask)) {
124
+ dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
125
+ return bypass_mask;
126
+ }
127
+ }
128
+
100129 if (!tbl)
101130 return 0;
102131
....@@ -107,21 +136,17 @@
107136 return mask;
108137 }
109138
110
-int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
111
-{
112
- return dma_addr == IOMMU_MAPPING_ERROR;
113
-}
114
-
115
-struct dma_map_ops dma_iommu_ops = {
139
+const struct dma_map_ops dma_iommu_ops = {
116140 .alloc = dma_iommu_alloc_coherent,
117141 .free = dma_iommu_free_coherent,
118
- .mmap = dma_nommu_mmap_coherent,
119142 .map_sg = dma_iommu_map_sg,
120143 .unmap_sg = dma_iommu_unmap_sg,
121144 .dma_supported = dma_iommu_dma_supported,
122145 .map_page = dma_iommu_map_page,
123146 .unmap_page = dma_iommu_unmap_page,
124147 .get_required_mask = dma_iommu_get_required_mask,
125
- .mapping_error = dma_iommu_mapping_error,
148
+ .mmap = dma_common_mmap,
149
+ .get_sgtable = dma_common_get_sgtable,
150
+ .alloc_pages = dma_common_alloc_pages,
151
+ .free_pages = dma_common_free_pages,
126152 };
127
-EXPORT_SYMBOL(dma_iommu_ops);