| .. | .. |
|---|
| 6 | 6 | * busses using the iommu infrastructure |
|---|
| 7 | 7 | */ |
|---|
| 8 | 8 | |
|---|
| 9 | | -#include <linux/export.h> |
|---|
| 9 | +#include <linux/dma-direct.h> |
|---|
| 10 | +#include <linux/pci.h> |
|---|
| 10 | 11 | #include <asm/iommu.h> |
|---|
| 11 | 12 | |
|---|
| 12 | 13 | /* |
|---|
| .. | .. |
|---|
| 44 | 45 | unsigned long attrs) |
|---|
| 45 | 46 | { |
|---|
| 46 | 47 | return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, |
|---|
| 47 | | - size, device_to_mask(dev), direction, attrs); |
|---|
| 48 | + size, dma_get_mask(dev), direction, attrs); |
|---|
| 48 | 49 | } |
|---|
| 49 | 50 | |
|---|
| 50 | 51 | |
|---|
| .. | .. |
|---|
| 62 | 63 | unsigned long attrs) |
|---|
| 63 | 64 | { |
|---|
| 64 | 65 | return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, |
|---|
| 65 | | - device_to_mask(dev), direction, attrs); |
|---|
| 66 | + dma_get_mask(dev), direction, attrs); |
|---|
| 66 | 67 | } |
|---|
| 67 | 68 | |
|---|
| 68 | 69 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, |
|---|
| .. | .. |
|---|
| 73 | 74 | direction, attrs); |
|---|
| 74 | 75 | } |
|---|
| 75 | 76 | |
|---|
| 77 | +static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) |
|---|
| 78 | +{ |
|---|
| 79 | + struct pci_dev *pdev = to_pci_dev(dev); |
|---|
| 80 | + struct pci_controller *phb = pci_bus_to_host(pdev->bus); |
|---|
| 81 | + |
|---|
| 82 | + if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported) |
|---|
| 83 | + return false; |
|---|
| 84 | + return phb->controller_ops.iommu_bypass_supported(pdev, mask); |
|---|
| 85 | +} |
|---|
| 86 | + |
|---|
| 76 | 87 | /* We support DMA to/from any memory page via the iommu */ |
|---|
| 77 | 88 | int dma_iommu_dma_supported(struct device *dev, u64 mask) |
|---|
| 78 | 89 | { |
|---|
| 79 | 90 | struct iommu_table *tbl = get_iommu_table_base(dev); |
|---|
| 80 | 91 | |
|---|
| 92 | + if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { |
|---|
| 93 | + dev->dma_ops_bypass = true; |
|---|
| 94 | + dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); |
|---|
| 95 | + return 1; |
|---|
| 96 | + } |
|---|
| 97 | + |
|---|
| 81 | 98 | if (!tbl) { |
|---|
| 82 | | - dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" |
|---|
| 83 | | - ", table unavailable\n", mask); |
|---|
| 99 | + dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask); |
|---|
| 84 | 100 | return 0; |
|---|
| 85 | 101 | } |
|---|
| 86 | 102 | |
|---|
| .. | .. |
|---|
| 89 | 105 | dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", |
|---|
| 90 | 106 | mask, tbl->it_offset << tbl->it_page_shift); |
|---|
| 91 | 107 | return 0; |
|---|
| 92 | | - } else |
|---|
| 93 | | - return 1; |
|---|
| 108 | + } |
|---|
| 109 | + |
|---|
| 110 | + dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); |
|---|
| 111 | + dev->dma_ops_bypass = false; |
|---|
| 112 | + return 1; |
|---|
| 94 | 113 | } |
|---|
| 95 | 114 | |
|---|
| 96 | | -static u64 dma_iommu_get_required_mask(struct device *dev) |
|---|
| 115 | +u64 dma_iommu_get_required_mask(struct device *dev) |
|---|
| 97 | 116 | { |
|---|
| 98 | 117 | struct iommu_table *tbl = get_iommu_table_base(dev); |
|---|
| 99 | 118 | u64 mask; |
|---|
| 119 | + |
|---|
| 120 | + if (dev_is_pci(dev)) { |
|---|
| 121 | + u64 bypass_mask = dma_direct_get_required_mask(dev); |
|---|
| 122 | + |
|---|
| 123 | + if (dma_iommu_dma_supported(dev, bypass_mask)) { |
|---|
| 124 | + dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask); |
|---|
| 125 | + return bypass_mask; |
|---|
| 126 | + } |
|---|
| 127 | + } |
|---|
| 128 | + |
|---|
| 100 | 129 | if (!tbl) |
|---|
| 101 | 130 | return 0; |
|---|
| 102 | 131 | |
|---|
| .. | .. |
|---|
| 107 | 136 | return mask; |
|---|
| 108 | 137 | } |
|---|
| 109 | 138 | |
|---|
| 110 | | -int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) |
|---|
| 111 | | -{ |
|---|
| 112 | | - return dma_addr == IOMMU_MAPPING_ERROR; |
|---|
| 113 | | -} |
|---|
| 114 | | - |
|---|
| 115 | | -struct dma_map_ops dma_iommu_ops = { |
|---|
| 139 | +const struct dma_map_ops dma_iommu_ops = { |
|---|
| 116 | 140 | .alloc = dma_iommu_alloc_coherent, |
|---|
| 117 | 141 | .free = dma_iommu_free_coherent, |
|---|
| 118 | | - .mmap = dma_nommu_mmap_coherent, |
|---|
| 119 | 142 | .map_sg = dma_iommu_map_sg, |
|---|
| 120 | 143 | .unmap_sg = dma_iommu_unmap_sg, |
|---|
| 121 | 144 | .dma_supported = dma_iommu_dma_supported, |
|---|
| 122 | 145 | .map_page = dma_iommu_map_page, |
|---|
| 123 | 146 | .unmap_page = dma_iommu_unmap_page, |
|---|
| 124 | 147 | .get_required_mask = dma_iommu_get_required_mask, |
|---|
| 125 | | - .mapping_error = dma_iommu_mapping_error, |
|---|
| 148 | + .mmap = dma_common_mmap, |
|---|
| 149 | + .get_sgtable = dma_common_get_sgtable, |
|---|
| 150 | + .alloc_pages = dma_common_alloc_pages, |
|---|
| 151 | + .free_pages = dma_common_free_pages, |
|---|
| 126 | 152 | }; |
|---|
| 127 | | -EXPORT_SYMBOL(dma_iommu_ops); |
|---|