| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | 4 | */ |
|---|
| 8 | 5 | |
|---|
| 9 | | -#include <linux/dma-noncoherent.h> |
|---|
| 6 | +#include <linux/dma-map-ops.h> |
|---|
| 10 | 7 | #include <asm/cache.h> |
|---|
| 11 | 8 | #include <asm/cacheflush.h> |
|---|
| 12 | 9 | |
|---|
| 13 | 10 | /* |
|---|
| 14 | | - * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c) |
|---|
| 11 | + * ARCH specific callbacks for generic noncoherent DMA ops |
|---|
| 15 | 12 | * - hardware IOC not available (or "dma-coherent" not set for device in DT) |
|---|
| 16 | 13 | * - But still handle both coherent and non-coherent requests from caller |
|---|
| 17 | 14 | * |
|---|
| 18 | 15 | * For DMA coherent hardware (IOC) generic code suffices |
|---|
| 19 | 16 | */ |
|---|
| 20 | | -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
|---|
| 21 | | - gfp_t gfp, unsigned long attrs) |
|---|
| 17 | + |
|---|
| 18 | +void arch_dma_prep_coherent(struct page *page, size_t size) |
|---|
| 22 | 19 | { |
|---|
| 23 | | - unsigned long order = get_order(size); |
|---|
| 24 | | - struct page *page; |
|---|
| 25 | | - phys_addr_t paddr; |
|---|
| 26 | | - void *kvaddr; |
|---|
| 27 | | - bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT); |
|---|
| 28 | | - |
|---|
| 29 | | - /* |
|---|
| 30 | | - * __GFP_HIGHMEM flag is cleared by upper layer functions |
|---|
| 31 | | - * (in include/linux/dma-mapping.h) so we should never get a |
|---|
| 32 | | - * __GFP_HIGHMEM here. |
|---|
| 33 | | - */ |
|---|
| 34 | | - BUG_ON(gfp & __GFP_HIGHMEM); |
|---|
| 35 | | - |
|---|
| 36 | | - page = alloc_pages(gfp, order); |
|---|
| 37 | | - if (!page) |
|---|
| 38 | | - return NULL; |
|---|
| 39 | | - |
|---|
| 40 | | - /* This is linear addr (0x8000_0000 based) */ |
|---|
| 41 | | - paddr = page_to_phys(page); |
|---|
| 42 | | - |
|---|
| 43 | | - *dma_handle = paddr; |
|---|
| 44 | | - |
|---|
| 45 | | - /* |
|---|
| 46 | | - * A coherent buffer needs MMU mapping to enforce non-cachability. |
|---|
| 47 | | - * kvaddr is kernel Virtual address (0x7000_0000 based). |
|---|
| 48 | | - */ |
|---|
| 49 | | - if (need_coh) { |
|---|
| 50 | | - kvaddr = ioremap_nocache(paddr, size); |
|---|
| 51 | | - if (kvaddr == NULL) { |
|---|
| 52 | | - __free_pages(page, order); |
|---|
| 53 | | - return NULL; |
|---|
| 54 | | - } |
|---|
| 55 | | - } else { |
|---|
| 56 | | - kvaddr = (void *)(u32)paddr; |
|---|
| 57 | | - } |
|---|
| 58 | | - |
|---|
| 59 | 20 | /* |
|---|
| 60 | 21 | * Evict any existing L1 and/or L2 lines for the backing page |
|---|
| 61 | 22 | * in case it was used earlier as a normal "cached" page. |
|---|
| .. | .. |
|---|
| 66 | 27 | * Currently flush_cache_vmap nukes the L1 cache completely which |
|---|
| 67 | 28 | * will be optimized as a separate commit |
|---|
| 68 | 29 | */ |
|---|
| 69 | | - if (need_coh) |
|---|
| 70 | | - dma_cache_wback_inv(paddr, size); |
|---|
| 71 | | - |
|---|
| 72 | | - return kvaddr; |
|---|
| 73 | | -} |
|---|
| 74 | | - |
|---|
| 75 | | -void arch_dma_free(struct device *dev, size_t size, void *vaddr, |
|---|
| 76 | | - dma_addr_t dma_handle, unsigned long attrs) |
|---|
| 77 | | -{ |
|---|
| 78 | | - phys_addr_t paddr = dma_handle; |
|---|
| 79 | | - struct page *page = virt_to_page(paddr); |
|---|
| 80 | | - |
|---|
| 81 | | - if (!(attrs & DMA_ATTR_NON_CONSISTENT)) |
|---|
| 82 | | - iounmap((void __force __iomem *)vaddr); |
|---|
| 83 | | - |
|---|
| 84 | | - __free_pages(page, get_order(size)); |
|---|
| 85 | | -} |
|---|
| 86 | | - |
|---|
| 87 | | -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
|---|
| 88 | | - void *cpu_addr, dma_addr_t dma_addr, size_t size, |
|---|
| 89 | | - unsigned long attrs) |
|---|
| 90 | | -{ |
|---|
| 91 | | - unsigned long user_count = vma_pages(vma); |
|---|
| 92 | | - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
|---|
| 93 | | - unsigned long pfn = __phys_to_pfn(dma_addr); |
|---|
| 94 | | - unsigned long off = vma->vm_pgoff; |
|---|
| 95 | | - int ret = -ENXIO; |
|---|
| 96 | | - |
|---|
| 97 | | - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
|---|
| 98 | | - |
|---|
| 99 | | - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
|---|
| 100 | | - return ret; |
|---|
| 101 | | - |
|---|
| 102 | | - if (off < count && user_count <= (count - off)) { |
|---|
| 103 | | - ret = remap_pfn_range(vma, vma->vm_start, |
|---|
| 104 | | - pfn + off, |
|---|
| 105 | | - user_count << PAGE_SHIFT, |
|---|
| 106 | | - vma->vm_page_prot); |
|---|
| 107 | | - } |
|---|
| 108 | | - |
|---|
| 109 | | - return ret; |
|---|
| 30 | + dma_cache_wback_inv(page_to_phys(page), size); |
|---|
| 110 | 31 | } |
|---|
| 111 | 32 | |
|---|
| 112 | 33 | /* |
|---|
| .. | .. |
|---|
| 127 | 48 | * upper layer functions (in include/linux/dma-mapping.h) |
|---|
| 128 | 49 | */ |
|---|
| 129 | 50 | |
|---|
| 130 | | -void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
|---|
| 131 | | - size_t size, enum dma_data_direction dir) |
|---|
| 51 | +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
|---|
| 52 | + enum dma_data_direction dir) |
|---|
| 132 | 53 | { |
|---|
| 133 | 54 | switch (dir) { |
|---|
| 134 | 55 | case DMA_TO_DEVICE: |
|---|
| .. | .. |
|---|
| 148 | 69 | } |
|---|
| 149 | 70 | } |
|---|
| 150 | 71 | |
|---|
| 151 | | -void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
|---|
| 152 | | - size_t size, enum dma_data_direction dir) |
|---|
| 72 | +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
|---|
| 73 | + enum dma_data_direction dir) |
|---|
| 153 | 74 | { |
|---|
| 154 | 75 | switch (dir) { |
|---|
| 155 | 76 | case DMA_TO_DEVICE: |
|---|
| .. | .. |
|---|
| 167 | 88 | } |
|---|
| 168 | 89 | |
|---|
| 169 | 90 | /* |
|---|
| 170 | | - * Plug in coherent or noncoherent dma ops |
|---|
| 91 | + * Plug in direct dma map ops. |
|---|
| 171 | 92 | */ |
|---|
| 172 | 93 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
|---|
| 173 | 94 | const struct iommu_ops *iommu, bool coherent) |
|---|
| .. | .. |
|---|
| 175 | 96 | /* |
|---|
| 176 | 97 | * IOC hardware snoops all DMA traffic keeping the caches consistent |
|---|
| 177 | 98 | * with memory - eliding need for any explicit cache maintenance of |
|---|
| 178 | | - * DMA buffers - so we can use dma_direct cache ops. |
|---|
| 99 | + * DMA buffers. |
|---|
| 179 | 100 | */ |
|---|
| 180 | | - if (is_isa_arcv2() && ioc_enable && coherent) { |
|---|
| 181 | | - set_dma_ops(dev, &dma_direct_ops); |
|---|
| 182 | | - dev_info(dev, "use dma_direct_ops cache ops\n"); |
|---|
| 183 | | - } else { |
|---|
| 184 | | - set_dma_ops(dev, &dma_noncoherent_ops); |
|---|
| 185 | | - dev_info(dev, "use dma_noncoherent_ops cache ops\n"); |
|---|
| 186 | | - } |
|---|
| 101 | + if (is_isa_arcv2() && ioc_enable && coherent) |
|---|
| 102 | + dev->dma_coherent = true; |
|---|
| 103 | + |
|---|
| 104 | + dev_info(dev, "use %scoherent DMA ops\n", |
|---|
| 105 | + dev->dma_coherent ? "" : "non"); |
|---|
| 187 | 106 | } |
|---|