.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on linux/arch/arm/mm/dma-mapping.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2000-2004 Russell King |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | 6 | */ |
---|
11 | 7 | |
---|
12 | 8 | #include <linux/export.h> |
---|
13 | 9 | #include <linux/mm.h> |
---|
14 | 10 | #include <linux/dma-direct.h> |
---|
| 11 | +#include <linux/dma-map-ops.h> |
---|
15 | 12 | #include <linux/scatterlist.h> |
---|
16 | 13 | |
---|
17 | 14 | #include <asm/cachetype.h> |
---|
.. | .. |
---|
22 | 19 | #include "dma.h" |
---|
23 | 20 | |
---|
24 | 21 | /* |
---|
25 | | - * dma_direct_ops is used if |
---|
| 22 | + * The generic direct mapping code is used if |
---|
26 | 23 | * - MMU/MPU is off |
---|
27 | 24 | * - cpu is v7m w/o cache support |
---|
28 | 25 | * - device is coherent |
---|
.. | .. |
---|
39 | 36 | unsigned long attrs) |
---|
40 | 37 | |
---|
41 | 38 | { |
---|
42 | | - void *ret; |
---|
43 | | - |
---|
44 | | - /* |
---|
45 | | - * Try generic allocator first if we are advertised that |
---|
46 | | - * consistency is not required. |
---|
47 | | - */ |
---|
48 | | - |
---|
49 | | - if (attrs & DMA_ATTR_NON_CONSISTENT) |
---|
50 | | - return dma_direct_alloc(dev, size, dma_handle, gfp, attrs); |
---|
51 | | - |
---|
52 | | - ret = dma_alloc_from_global_coherent(size, dma_handle); |
---|
| 39 | + void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle); |
---|
53 | 40 | |
---|
54 | 41 | /* |
---|
55 | 42 | * dma_alloc_from_global_coherent() may fail because: |
---|
.. | .. |
---|
69 | 56 | void *cpu_addr, dma_addr_t dma_addr, |
---|
70 | 57 | unsigned long attrs) |
---|
71 | 58 | { |
---|
72 | | - if (attrs & DMA_ATTR_NON_CONSISTENT) { |
---|
73 | | - dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); |
---|
74 | | - } else { |
---|
75 | | - int ret = dma_release_from_global_coherent(get_order(size), |
---|
76 | | - cpu_addr); |
---|
| 59 | + int ret = dma_release_from_global_coherent(get_order(size), cpu_addr); |
---|
77 | 60 | |
---|
78 | | - WARN_ON_ONCE(ret == 0); |
---|
79 | | - } |
---|
80 | | - |
---|
81 | | - return; |
---|
| 61 | + WARN_ON_ONCE(ret == 0); |
---|
82 | 62 | } |
---|
83 | 63 | |
---|
84 | 64 | static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
---|
.. | .. |
---|
89 | 69 | |
---|
90 | 70 | if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) |
---|
91 | 71 | return ret; |
---|
92 | | - |
---|
93 | | - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
---|
| 72 | + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
---|
| 73 | + return ret; |
---|
| 74 | + return -ENXIO; |
---|
94 | 75 | } |
---|
95 | 76 | |
---|
96 | 77 | |
---|
.. | .. |
---|
196 | 177 | const struct dma_map_ops arm_nommu_dma_ops = { |
---|
197 | 178 | .alloc = arm_nommu_dma_alloc, |
---|
198 | 179 | .free = arm_nommu_dma_free, |
---|
| 180 | + .alloc_pages = dma_direct_alloc_pages, |
---|
| 181 | + .free_pages = dma_direct_free_pages, |
---|
199 | 182 | .mmap = arm_nommu_dma_mmap, |
---|
200 | 183 | .map_page = arm_nommu_dma_map_page, |
---|
201 | 184 | .unmap_page = arm_nommu_dma_unmap_page, |
---|
.. | .. |
---|
208 | 191 | }; |
---|
209 | 192 | EXPORT_SYMBOL(arm_nommu_dma_ops); |
---|
210 | 193 | |
---|
211 | | -static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) |
---|
212 | | -{ |
---|
213 | | - return coherent ? &dma_direct_ops : &arm_nommu_dma_ops; |
---|
214 | | -} |
---|
215 | | - |
---|
216 | 194 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
---|
217 | 195 | const struct iommu_ops *iommu, bool coherent) |
---|
218 | 196 | { |
---|
219 | | - const struct dma_map_ops *dma_ops; |
---|
220 | | - |
---|
221 | 197 | if (IS_ENABLED(CONFIG_CPU_V7M)) { |
---|
222 | 198 | /* |
---|
223 | 199 | * Cache support for v7m is optional, so can be treated as |
---|
.. | .. |
---|
233 | 209 | dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; |
---|
234 | 210 | } |
---|
235 | 211 | |
---|
236 | | - dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); |
---|
237 | | - |
---|
238 | | - set_dma_ops(dev, dma_ops); |
---|
239 | | -} |
---|
240 | | - |
---|
241 | | -void arch_teardown_dma_ops(struct device *dev) |
---|
242 | | -{ |
---|
| 212 | + if (!dev->archdata.dma_coherent) |
---|
| 213 | + set_dma_ops(dev, &arm_nommu_dma_ops); |
---|
243 | 214 | } |
---|