.. | .. |
---|
16 | 16 | #include <linux/export.h> |
---|
17 | 17 | #include <linux/log2.h> |
---|
18 | 18 | #include <linux/of_device.h> |
---|
| 19 | +#include <linux/dma-map-ops.h> |
---|
19 | 20 | #include <asm/iommu-common.h> |
---|
20 | 21 | |
---|
21 | 22 | #include <asm/iommu.h> |
---|
.. | .. |
---|
73 | 74 | p->npages = 0; |
---|
74 | 75 | } |
---|
75 | 76 | |
---|
| 77 | +static inline bool iommu_use_atu(struct iommu *iommu, u64 mask) |
---|
| 78 | +{ |
---|
| 79 | + return iommu->atu && mask > DMA_BIT_MASK(32); |
---|
| 80 | +} |
---|
| 81 | + |
---|
76 | 82 | /* Interrupts must be disabled. */ |
---|
77 | 83 | static long iommu_batch_flush(struct iommu_batch *p, u64 mask) |
---|
78 | 84 | { |
---|
.. | .. |
---|
92 | 98 | prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); |
---|
93 | 99 | |
---|
94 | 100 | while (npages != 0) { |
---|
95 | | - if (mask <= DMA_BIT_MASK(32)) { |
---|
| 101 | + if (!iommu_use_atu(pbm->iommu, mask)) { |
---|
96 | 102 | num = pci_sun4v_iommu_map(devhandle, |
---|
97 | 103 | HV_PCI_TSBID(0, entry), |
---|
98 | 104 | npages, |
---|
.. | .. |
---|
179 | 185 | unsigned long flags, order, first_page, npages, n; |
---|
180 | 186 | unsigned long prot = 0; |
---|
181 | 187 | struct iommu *iommu; |
---|
182 | | - struct atu *atu; |
---|
183 | 188 | struct iommu_map_table *tbl; |
---|
184 | 189 | struct page *page; |
---|
185 | 190 | void *ret; |
---|
.. | .. |
---|
205 | 210 | memset((char *)first_page, 0, PAGE_SIZE << order); |
---|
206 | 211 | |
---|
207 | 212 | iommu = dev->archdata.iommu; |
---|
208 | | - atu = iommu->atu; |
---|
209 | | - |
---|
210 | 213 | mask = dev->coherent_dma_mask; |
---|
211 | | - if (mask <= DMA_BIT_MASK(32)) |
---|
| 214 | + if (!iommu_use_atu(iommu, mask)) |
---|
212 | 215 | tbl = &iommu->tbl; |
---|
213 | 216 | else |
---|
214 | | - tbl = &atu->tbl; |
---|
| 217 | + tbl = &iommu->atu->tbl; |
---|
215 | 218 | |
---|
216 | 219 | entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, |
---|
217 | 220 | (unsigned long)(-1), 0); |
---|
.. | .. |
---|
333 | 336 | atu = iommu->atu; |
---|
334 | 337 | devhandle = pbm->devhandle; |
---|
335 | 338 | |
---|
336 | | - if (dvma <= DMA_BIT_MASK(32)) { |
---|
| 339 | + if (!iommu_use_atu(iommu, dvma)) { |
---|
337 | 340 | tbl = &iommu->tbl; |
---|
338 | 341 | iotsb_num = 0; /* we don't care for legacy iommu */ |
---|
339 | 342 | } else { |
---|
.. | .. |
---|
374 | 377 | npages >>= IO_PAGE_SHIFT; |
---|
375 | 378 | |
---|
376 | 379 | mask = *dev->dma_mask; |
---|
377 | | - if (mask <= DMA_BIT_MASK(32)) |
---|
| 380 | + if (!iommu_use_atu(iommu, mask)) |
---|
378 | 381 | tbl = &iommu->tbl; |
---|
379 | 382 | else |
---|
380 | 383 | tbl = &atu->tbl; |
---|
.. | .. |
---|
414 | 417 | bad: |
---|
415 | 418 | if (printk_ratelimit()) |
---|
416 | 419 | WARN_ON(1); |
---|
417 | | - return SPARC_MAPPING_ERROR; |
---|
| 420 | + return DMA_MAPPING_ERROR; |
---|
418 | 421 | |
---|
419 | 422 | iommu_map_fail: |
---|
420 | 423 | local_irq_restore(flags); |
---|
421 | 424 | iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
---|
422 | | - return SPARC_MAPPING_ERROR; |
---|
| 425 | + return DMA_MAPPING_ERROR; |
---|
423 | 426 | } |
---|
424 | 427 | |
---|
425 | 428 | static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, |
---|
.. | .. |
---|
506 | 509 | iommu_batch_start(dev, prot, ~0UL); |
---|
507 | 510 | |
---|
508 | 511 | max_seg_size = dma_get_max_seg_size(dev); |
---|
509 | | - seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
---|
510 | | - IO_PAGE_SIZE) >> IO_PAGE_SHIFT; |
---|
| 512 | + seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT); |
---|
511 | 513 | |
---|
512 | 514 | mask = *dev->dma_mask; |
---|
513 | | - if (mask <= DMA_BIT_MASK(32)) |
---|
| 515 | + if (!iommu_use_atu(iommu, mask)) |
---|
514 | 516 | tbl = &iommu->tbl; |
---|
515 | 517 | else |
---|
516 | 518 | tbl = &atu->tbl; |
---|
.. | .. |
---|
592 | 594 | |
---|
593 | 595 | if (outcount < incount) { |
---|
594 | 596 | outs = sg_next(outs); |
---|
595 | | - outs->dma_address = SPARC_MAPPING_ERROR; |
---|
| 597 | + outs->dma_address = DMA_MAPPING_ERROR; |
---|
596 | 598 | outs->dma_length = 0; |
---|
597 | 599 | } |
---|
598 | 600 | |
---|
.. | .. |
---|
609 | 611 | iommu_tbl_range_free(tbl, vaddr, npages, |
---|
610 | 612 | IOMMU_ERROR_CODE); |
---|
611 | 613 | /* XXX demap? XXX */ |
---|
612 | | - s->dma_address = SPARC_MAPPING_ERROR; |
---|
| 614 | + s->dma_address = DMA_MAPPING_ERROR; |
---|
613 | 615 | s->dma_length = 0; |
---|
614 | 616 | } |
---|
615 | 617 | if (s == outs) |
---|
.. | .. |
---|
674 | 676 | static int dma_4v_supported(struct device *dev, u64 device_mask) |
---|
675 | 677 | { |
---|
676 | 678 | struct iommu *iommu = dev->archdata.iommu; |
---|
677 | | - u64 dma_addr_mask = iommu->dma_addr_mask; |
---|
678 | 679 | |
---|
679 | | - if (device_mask > DMA_BIT_MASK(32)) { |
---|
680 | | - if (iommu->atu) |
---|
681 | | - dma_addr_mask = iommu->atu->dma_addr_mask; |
---|
682 | | - else |
---|
683 | | - return 0; |
---|
684 | | - } |
---|
685 | | - |
---|
686 | | - if ((device_mask & dma_addr_mask) == dma_addr_mask) |
---|
| 680 | + if (ali_sound_dma_hack(dev, device_mask)) |
---|
687 | 681 | return 1; |
---|
688 | | - return pci64_dma_supported(to_pci_dev(dev), device_mask); |
---|
689 | | -} |
---|
690 | | - |
---|
691 | | -static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
692 | | -{ |
---|
693 | | - return dma_addr == SPARC_MAPPING_ERROR; |
---|
| 682 | + if (device_mask < iommu->dma_addr_mask) |
---|
| 683 | + return 0; |
---|
| 684 | + return 1; |
---|
694 | 685 | } |
---|
695 | 686 | |
---|
696 | 687 | static const struct dma_map_ops sun4v_dma_ops = { |
---|
.. | .. |
---|
701 | 692 | .map_sg = dma_4v_map_sg, |
---|
702 | 693 | .unmap_sg = dma_4v_unmap_sg, |
---|
703 | 694 | .dma_supported = dma_4v_supported, |
---|
704 | | - .mapping_error = dma_4v_mapping_error, |
---|
705 | 695 | }; |
---|
706 | 696 | |
---|
707 | 697 | static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) |
---|