.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/arch/arm/mm/dma-mapping.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2000-2004 Russell King |
---|
5 | 6 | * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | 7 | * DMA uncached mapping support. |
---|
11 | 8 | */ |
---|
12 | | -#include <linux/bootmem.h> |
---|
13 | 9 | #include <linux/module.h> |
---|
14 | 10 | #include <linux/mm.h> |
---|
15 | 11 | #include <linux/genalloc.h> |
---|
.. | .. |
---|
19 | 15 | #include <linux/init.h> |
---|
20 | 16 | #include <linux/device.h> |
---|
21 | 17 | #include <linux/dma-direct.h> |
---|
22 | | -#include <linux/dma-mapping.h> |
---|
23 | | -#include <linux/dma-contiguous.h> |
---|
| 18 | +#include <linux/dma-map-ops.h> |
---|
24 | 19 | #include <linux/highmem.h> |
---|
25 | 20 | #include <linux/memblock.h> |
---|
26 | 21 | #include <linux/slab.h> |
---|
.. | .. |
---|
29 | 24 | #include <linux/vmalloc.h> |
---|
30 | 25 | #include <linux/sizes.h> |
---|
31 | 26 | #include <linux/cma.h> |
---|
32 | | -#include <linux/swiotlb.h> |
---|
33 | 27 | |
---|
34 | 28 | #include <asm/memory.h> |
---|
35 | 29 | #include <asm/highmem.h> |
---|
.. | .. |
---|
39 | 33 | #include <asm/dma-iommu.h> |
---|
40 | 34 | #include <asm/mach/map.h> |
---|
41 | 35 | #include <asm/system_info.h> |
---|
42 | | -#include <asm/dma-contiguous.h> |
---|
| 36 | +#include <xen/swiotlb-xen.h> |
---|
43 | 37 | |
---|
44 | 38 | #include "dma.h" |
---|
45 | 39 | #include "mm.h" |
---|
.. | .. |
---|
132 | 126 | unsigned long offset, size_t size, enum dma_data_direction dir, |
---|
133 | 127 | unsigned long attrs) |
---|
134 | 128 | { |
---|
135 | | -#ifdef CONFIG_SWIOTLB |
---|
136 | | - if (unlikely(!dma_capable(dev, phys_to_dma(dev, page_to_phys(page) + |
---|
137 | | - offset), size))) |
---|
138 | | - return swiotlb_map_page(dev, page, offset, size, dir, attrs); |
---|
139 | | -#endif |
---|
140 | | - |
---|
141 | 129 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
---|
142 | 130 | __dma_page_cpu_to_dev(page, offset, size, dir); |
---|
143 | 131 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
---|
.. | .. |
---|
170 | 158 | if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) |
---|
171 | 159 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), |
---|
172 | 160 | handle & ~PAGE_MASK, size, dir); |
---|
173 | | - |
---|
174 | | -#ifdef CONFIG_SWIOTLB |
---|
175 | | - if (unlikely(is_swiotlb_buffer(dma_to_phys(dev, handle)))) |
---|
176 | | - swiotlb_tbl_unmap_single(dev, dma_to_phys(dev, handle), size, |
---|
177 | | - dir, attrs); |
---|
178 | | -#endif |
---|
179 | 161 | } |
---|
180 | 162 | |
---|
181 | 163 | static void arm_dma_sync_single_for_cpu(struct device *dev, |
---|
.. | .. |
---|
183 | 165 | { |
---|
184 | 166 | unsigned int offset = handle & (PAGE_SIZE - 1); |
---|
185 | 167 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
---|
186 | | - |
---|
187 | 168 | __dma_page_dev_to_cpu(page, offset, size, dir); |
---|
188 | | -#ifdef CONFIG_SWIOTLB |
---|
189 | | - if (unlikely(is_swiotlb_buffer(dma_to_phys(dev, handle)))) |
---|
190 | | - swiotlb_tbl_sync_single(dev, dma_to_phys(dev, handle), size, |
---|
191 | | - dir, SYNC_FOR_CPU); |
---|
192 | | -#endif |
---|
193 | 169 | } |
---|
194 | 170 | |
---|
195 | 171 | static void arm_dma_sync_single_for_device(struct device *dev, |
---|
.. | .. |
---|
197 | 173 | { |
---|
198 | 174 | unsigned int offset = handle & (PAGE_SIZE - 1); |
---|
199 | 175 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); |
---|
200 | | - |
---|
201 | | -#ifdef CONFIG_SWIOTLB |
---|
202 | | - if (unlikely(is_swiotlb_buffer(dma_to_phys(dev, handle)))) |
---|
203 | | - swiotlb_tbl_sync_single(dev, dma_to_phys(dev, handle), size, |
---|
204 | | - dir, SYNC_FOR_DEVICE); |
---|
205 | | -#endif |
---|
206 | 176 | __dma_page_cpu_to_dev(page, offset, size, dir); |
---|
207 | 177 | } |
---|
208 | 178 | |
---|
209 | | -static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
---|
| 179 | +/* |
---|
| 180 | + * Return whether the given device DMA address mask can be supported |
---|
| 181 | + * properly. For example, if your device can only drive the low 24-bits |
---|
| 182 | + * during bus mastering, then you would pass 0x00ffffff as the mask |
---|
| 183 | + * to this function. |
---|
| 184 | + */ |
---|
| 185 | +static int arm_dma_supported(struct device *dev, u64 mask) |
---|
210 | 186 | { |
---|
211 | | - return dma_addr == ARM_MAPPING_ERROR; |
---|
| 187 | + unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); |
---|
| 188 | + |
---|
| 189 | + /* |
---|
| 190 | + * Translate the device's DMA mask to a PFN limit. This |
---|
| 191 | + * PFN number includes the page which we can DMA to. |
---|
| 192 | + */ |
---|
| 193 | + return dma_to_pfn(dev, mask) >= max_dma_pfn; |
---|
212 | 194 | } |
---|
213 | 195 | |
---|
214 | 196 | const struct dma_map_ops arm_dma_ops = { |
---|
215 | 197 | .alloc = arm_dma_alloc, |
---|
216 | 198 | .free = arm_dma_free, |
---|
| 199 | + .alloc_pages = dma_direct_alloc_pages, |
---|
| 200 | + .free_pages = dma_direct_free_pages, |
---|
217 | 201 | .mmap = arm_dma_mmap, |
---|
218 | 202 | .get_sgtable = arm_dma_get_sgtable, |
---|
219 | 203 | .map_page = arm_dma_map_page, |
---|
220 | 204 | .unmap_page = arm_dma_unmap_page, |
---|
221 | 205 | .map_sg = arm_dma_map_sg, |
---|
222 | 206 | .unmap_sg = arm_dma_unmap_sg, |
---|
| 207 | + .map_resource = dma_direct_map_resource, |
---|
223 | 208 | .sync_single_for_cpu = arm_dma_sync_single_for_cpu, |
---|
224 | 209 | .sync_single_for_device = arm_dma_sync_single_for_device, |
---|
225 | 210 | .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, |
---|
226 | 211 | .sync_sg_for_device = arm_dma_sync_sg_for_device, |
---|
227 | | - .mapping_error = arm_dma_mapping_error, |
---|
228 | 212 | .dma_supported = arm_dma_supported, |
---|
| 213 | + .get_required_mask = dma_direct_get_required_mask, |
---|
229 | 214 | }; |
---|
230 | 215 | EXPORT_SYMBOL(arm_dma_ops); |
---|
231 | 216 | |
---|
.. | .. |
---|
240 | 225 | const struct dma_map_ops arm_coherent_dma_ops = { |
---|
241 | 226 | .alloc = arm_coherent_dma_alloc, |
---|
242 | 227 | .free = arm_coherent_dma_free, |
---|
| 228 | + .alloc_pages = dma_direct_alloc_pages, |
---|
| 229 | + .free_pages = dma_direct_free_pages, |
---|
243 | 230 | .mmap = arm_coherent_dma_mmap, |
---|
244 | 231 | .get_sgtable = arm_dma_get_sgtable, |
---|
245 | 232 | .map_page = arm_coherent_dma_map_page, |
---|
246 | 233 | .map_sg = arm_dma_map_sg, |
---|
247 | | - .mapping_error = arm_dma_mapping_error, |
---|
| 234 | + .map_resource = dma_direct_map_resource, |
---|
248 | 235 | .dma_supported = arm_dma_supported, |
---|
| 236 | + .get_required_mask = dma_direct_get_required_mask, |
---|
249 | 237 | }; |
---|
250 | 238 | EXPORT_SYMBOL(arm_coherent_dma_ops); |
---|
251 | | - |
---|
252 | | -static int __dma_supported(struct device *dev, u64 mask, bool warn) |
---|
253 | | -{ |
---|
254 | | - unsigned long max_dma_pfn; |
---|
255 | | - |
---|
256 | | - /* |
---|
257 | | - * If the mask allows for more memory than we can address, |
---|
258 | | - * and we actually have that much memory, then we must |
---|
259 | | - * indicate that DMA to this device is not supported. |
---|
260 | | - */ |
---|
261 | | - if (sizeof(mask) != sizeof(dma_addr_t) && |
---|
262 | | - mask > (dma_addr_t)~0 && |
---|
263 | | - dma_to_pfn(dev, ~0) < max_pfn - 1) { |
---|
264 | | - if (warn) { |
---|
265 | | - dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", |
---|
266 | | - mask); |
---|
267 | | - dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); |
---|
268 | | - } |
---|
269 | | - return 0; |
---|
270 | | - } |
---|
271 | | - |
---|
272 | | - max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); |
---|
273 | | - |
---|
274 | | - /* |
---|
275 | | - * Translate the device's DMA mask to a PFN limit. This |
---|
276 | | - * PFN number includes the page which we can DMA to. |
---|
277 | | - */ |
---|
278 | | - if (dma_to_pfn(dev, mask) < max_dma_pfn) { |
---|
279 | | - if (warn) |
---|
280 | | - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", |
---|
281 | | - mask, |
---|
282 | | - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, |
---|
283 | | - max_dma_pfn + 1); |
---|
284 | | - return 0; |
---|
285 | | - } |
---|
286 | | - |
---|
287 | | - return 1; |
---|
288 | | -} |
---|
289 | | - |
---|
290 | | -static u64 get_coherent_dma_mask(struct device *dev) |
---|
291 | | -{ |
---|
292 | | - u64 mask = (u64)DMA_BIT_MASK(32); |
---|
293 | | - |
---|
294 | | - if (dev) { |
---|
295 | | - mask = dev->coherent_dma_mask; |
---|
296 | | - |
---|
297 | | - /* |
---|
298 | | - * Sanity check the DMA mask - it must be non-zero, and |
---|
299 | | - * must be able to be satisfied by a DMA allocation. |
---|
300 | | - */ |
---|
301 | | - if (mask == 0) { |
---|
302 | | - dev_warn(dev, "coherent DMA mask is unset\n"); |
---|
303 | | - return 0; |
---|
304 | | - } |
---|
305 | | - |
---|
306 | | - if (!__dma_supported(dev, mask, true)) |
---|
307 | | - return 0; |
---|
308 | | - } |
---|
309 | | - |
---|
310 | | - return mask; |
---|
311 | | -} |
---|
312 | 239 | |
---|
313 | 240 | static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) |
---|
314 | 241 | { |
---|
.. | .. |
---|
387 | 314 | static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, |
---|
388 | 315 | pgprot_t prot, struct page **ret_page, |
---|
389 | 316 | const void *caller, bool want_vaddr); |
---|
390 | | - |
---|
391 | | -static void * |
---|
392 | | -__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, |
---|
393 | | - const void *caller) |
---|
394 | | -{ |
---|
395 | | - /* |
---|
396 | | - * DMA allocation can be mapped to user space, so lets |
---|
397 | | - * set VM_USERMAP flags too. |
---|
398 | | - */ |
---|
399 | | - return dma_common_contiguous_remap(page, size, |
---|
400 | | - VM_ARM_DMA_CONSISTENT | VM_USERMAP, |
---|
401 | | - prot, caller); |
---|
402 | | -} |
---|
403 | | - |
---|
404 | | -static void __dma_free_remap(void *cpu_addr, size_t size) |
---|
405 | | -{ |
---|
406 | | - dma_common_free_remap(cpu_addr, size, |
---|
407 | | - VM_ARM_DMA_CONSISTENT | VM_USERMAP, false); |
---|
408 | | -} |
---|
409 | 317 | |
---|
410 | 318 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K |
---|
411 | 319 | static struct gen_pool *atomic_pool __ro_after_init; |
---|
.. | .. |
---|
528 | 436 | } |
---|
529 | 437 | } |
---|
530 | 438 | |
---|
531 | | -static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, |
---|
532 | | - void *data) |
---|
| 439 | +static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) |
---|
533 | 440 | { |
---|
534 | 441 | struct page *page = virt_to_page(addr); |
---|
535 | 442 | pgprot_t prot = *(pgprot_t *)data; |
---|
.. | .. |
---|
563 | 470 | if (!want_vaddr) |
---|
564 | 471 | goto out; |
---|
565 | 472 | |
---|
566 | | - ptr = __dma_alloc_remap(page, size, gfp, prot, caller); |
---|
| 473 | + ptr = dma_common_contiguous_remap(page, size, prot, caller); |
---|
567 | 474 | if (!ptr) { |
---|
568 | 475 | __dma_free_buffer(page, size); |
---|
569 | 476 | return NULL; |
---|
.. | .. |
---|
597 | 504 | |
---|
598 | 505 | static bool __in_atomic_pool(void *start, size_t size) |
---|
599 | 506 | { |
---|
600 | | - return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); |
---|
| 507 | + return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); |
---|
601 | 508 | } |
---|
602 | 509 | |
---|
603 | 510 | static int __free_from_pool(void *start, size_t size) |
---|
.. | .. |
---|
630 | 537 | goto out; |
---|
631 | 538 | |
---|
632 | 539 | if (PageHighMem(page)) { |
---|
633 | | - ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); |
---|
| 540 | + ptr = dma_common_contiguous_remap(page, size, prot, caller); |
---|
634 | 541 | if (!ptr) { |
---|
635 | 542 | dma_release_from_contiguous(dev, page, count); |
---|
636 | 543 | return NULL; |
---|
.. | .. |
---|
650 | 557 | { |
---|
651 | 558 | if (want_vaddr) { |
---|
652 | 559 | if (PageHighMem(page)) |
---|
653 | | - __dma_free_remap(cpu_addr, size); |
---|
| 560 | + dma_common_free_remap(cpu_addr, size); |
---|
654 | 561 | else |
---|
655 | 562 | __dma_remap(page, size, PAGE_KERNEL); |
---|
656 | 563 | } |
---|
.. | .. |
---|
742 | 649 | static void remap_allocator_free(struct arm_dma_free_args *args) |
---|
743 | 650 | { |
---|
744 | 651 | if (args->want_vaddr) |
---|
745 | | - __dma_free_remap(args->cpu_addr, args->size); |
---|
| 652 | + dma_common_free_remap(args->cpu_addr, args->size); |
---|
746 | 653 | |
---|
747 | 654 | __dma_free_buffer(args->page, args->size); |
---|
748 | 655 | } |
---|
.. | .. |
---|
756 | 663 | gfp_t gfp, pgprot_t prot, bool is_coherent, |
---|
757 | 664 | unsigned long attrs, const void *caller) |
---|
758 | 665 | { |
---|
759 | | - u64 mask = get_coherent_dma_mask(dev); |
---|
| 666 | + u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); |
---|
760 | 667 | struct page *page = NULL; |
---|
761 | 668 | void *addr; |
---|
762 | 669 | bool allowblock, cma; |
---|
.. | .. |
---|
780 | 687 | } |
---|
781 | 688 | #endif |
---|
782 | 689 | |
---|
783 | | - if (!mask) |
---|
784 | | - return NULL; |
---|
785 | | - |
---|
786 | 690 | buf = kzalloc(sizeof(*buf), |
---|
787 | 691 | gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); |
---|
788 | 692 | if (!buf) |
---|
.. | .. |
---|
801 | 705 | gfp &= ~(__GFP_COMP); |
---|
802 | 706 | args.gfp = gfp; |
---|
803 | 707 | |
---|
804 | | - *handle = ARM_MAPPING_ERROR; |
---|
| 708 | + *handle = DMA_MAPPING_ERROR; |
---|
805 | 709 | allowblock = gfpflags_allow_blocking(gfp); |
---|
806 | 710 | cma = allowblock ? dev_get_cma_area(dev) : false; |
---|
807 | 711 | |
---|
.. | .. |
---|
930 | 834 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); |
---|
931 | 835 | } |
---|
932 | 836 | |
---|
933 | | -/* |
---|
934 | | - * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems |
---|
935 | | - * that the intention is to allow exporting memory allocated via the |
---|
936 | | - * coherent DMA APIs through the dma_buf API, which only accepts a |
---|
937 | | - * scattertable. This presents a couple of problems: |
---|
938 | | - * 1. Not all memory allocated via the coherent DMA APIs is backed by |
---|
939 | | - * a struct page |
---|
940 | | - * 2. Passing coherent DMA memory into the streaming APIs is not allowed |
---|
941 | | - * as we will try to flush the memory through a different alias to that |
---|
942 | | - * actually being used (and the flushes are redundant.) |
---|
943 | | - */ |
---|
944 | 837 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
---|
945 | 838 | void *cpu_addr, dma_addr_t handle, size_t size, |
---|
946 | 839 | unsigned long attrs) |
---|
.. | .. |
---|
1166 | 1059 | dir); |
---|
1167 | 1060 | } |
---|
1168 | 1061 | |
---|
1169 | | -/* |
---|
1170 | | - * Return whether the given device DMA address mask can be supported |
---|
1171 | | - * properly. For example, if your device can only drive the low 24-bits |
---|
1172 | | - * during bus mastering, then you would pass 0x00ffffff as the mask |
---|
1173 | | - * to this function. |
---|
1174 | | - */ |
---|
1175 | | -int arm_dma_supported(struct device *dev, u64 mask) |
---|
1176 | | -{ |
---|
1177 | | - return __dma_supported(dev, mask, false); |
---|
1178 | | -} |
---|
1179 | | - |
---|
1180 | 1062 | static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) |
---|
1181 | 1063 | { |
---|
| 1064 | + /* |
---|
| 1065 | + * When CONFIG_ARM_LPAE is set, physical address can extend above |
---|
| 1066 | + * 32-bits, which then can't be addressed by devices that only support |
---|
| 1067 | + * 32-bit DMA. |
---|
| 1068 | + * Use the generic dma-direct / swiotlb ops code in that case, as that |
---|
| 1069 | + * handles bounce buffering for us. |
---|
| 1070 | + */ |
---|
| 1071 | + if (IS_ENABLED(CONFIG_ARM_LPAE)) |
---|
| 1072 | + return NULL; |
---|
1182 | 1073 | return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; |
---|
1183 | 1074 | } |
---|
1184 | 1075 | |
---|
.. | .. |
---|
1224 | 1115 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
1225 | 1116 | align = (1 << order) - 1; |
---|
1226 | 1117 | |
---|
1227 | | - /* workaround for avoid va hole */ |
---|
1228 | | - align = 0; |
---|
1229 | | - |
---|
1230 | 1118 | spin_lock_irqsave(&mapping->lock, flags); |
---|
1231 | 1119 | for (i = 0; i < mapping->nr_bitmaps; i++) { |
---|
1232 | 1120 | start = bitmap_find_next_zero_area(mapping->bitmaps[i], |
---|
.. | .. |
---|
1247 | 1135 | if (i == mapping->nr_bitmaps) { |
---|
1248 | 1136 | if (extend_iommu_mapping(mapping)) { |
---|
1249 | 1137 | spin_unlock_irqrestore(&mapping->lock, flags); |
---|
1250 | | - return ARM_MAPPING_ERROR; |
---|
| 1138 | + return DMA_MAPPING_ERROR; |
---|
1251 | 1139 | } |
---|
1252 | 1140 | |
---|
1253 | 1141 | start = bitmap_find_next_zero_area(mapping->bitmaps[i], |
---|
.. | .. |
---|
1255 | 1143 | |
---|
1256 | 1144 | if (start > mapping->bits) { |
---|
1257 | 1145 | spin_unlock_irqrestore(&mapping->lock, flags); |
---|
1258 | | - return ARM_MAPPING_ERROR; |
---|
| 1146 | + return DMA_MAPPING_ERROR; |
---|
1259 | 1147 | } |
---|
1260 | 1148 | |
---|
1261 | 1149 | bitmap_set(mapping->bitmaps[i], start, count); |
---|
.. | .. |
---|
1416 | 1304 | } |
---|
1417 | 1305 | |
---|
1418 | 1306 | /* |
---|
1419 | | - * Create a CPU mapping for a specified pages |
---|
1420 | | - */ |
---|
1421 | | -static void * |
---|
1422 | | -__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, |
---|
1423 | | - const void *caller) |
---|
1424 | | -{ |
---|
1425 | | - return dma_common_pages_remap(pages, size, |
---|
1426 | | - VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); |
---|
1427 | | -} |
---|
1428 | | - |
---|
1429 | | -/* |
---|
1430 | 1307 | * Create a mapping in device IO address space for specified pages |
---|
1431 | 1308 | */ |
---|
1432 | 1309 | static dma_addr_t |
---|
.. | .. |
---|
1439 | 1316 | int i; |
---|
1440 | 1317 | |
---|
1441 | 1318 | dma_addr = __alloc_iova(mapping, size); |
---|
1442 | | - if (dma_addr == ARM_MAPPING_ERROR) |
---|
| 1319 | + if (dma_addr == DMA_MAPPING_ERROR) |
---|
1443 | 1320 | return dma_addr; |
---|
1444 | 1321 | |
---|
1445 | 1322 | iova = dma_addr; |
---|
.. | .. |
---|
1466 | 1343 | fail: |
---|
1467 | 1344 | iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); |
---|
1468 | 1345 | __free_iova(mapping, dma_addr, size); |
---|
1469 | | - return ARM_MAPPING_ERROR; |
---|
| 1346 | + return DMA_MAPPING_ERROR; |
---|
1470 | 1347 | } |
---|
1471 | 1348 | |
---|
1472 | 1349 | static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) |
---|
.. | .. |
---|
1498 | 1375 | |
---|
1499 | 1376 | static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) |
---|
1500 | 1377 | { |
---|
1501 | | - struct vm_struct *area; |
---|
1502 | | - |
---|
1503 | 1378 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) |
---|
1504 | 1379 | return __atomic_get_pages(cpu_addr); |
---|
1505 | 1380 | |
---|
1506 | 1381 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
---|
1507 | 1382 | return cpu_addr; |
---|
1508 | 1383 | |
---|
1509 | | - area = find_vm_area(cpu_addr); |
---|
1510 | | - if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) |
---|
1511 | | - return area->pages; |
---|
1512 | | - return NULL; |
---|
| 1384 | + return dma_common_find_pages(cpu_addr); |
---|
1513 | 1385 | } |
---|
1514 | 1386 | |
---|
1515 | 1387 | static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, |
---|
.. | .. |
---|
1527 | 1399 | return NULL; |
---|
1528 | 1400 | |
---|
1529 | 1401 | *handle = __iommu_create_mapping(dev, &page, size, attrs); |
---|
1530 | | - if (*handle == ARM_MAPPING_ERROR) |
---|
| 1402 | + if (*handle == DMA_MAPPING_ERROR) |
---|
1531 | 1403 | goto err_mapping; |
---|
1532 | 1404 | |
---|
1533 | 1405 | return addr; |
---|
.. | .. |
---|
1555 | 1427 | struct page **pages; |
---|
1556 | 1428 | void *addr = NULL; |
---|
1557 | 1429 | |
---|
1558 | | - *handle = ARM_MAPPING_ERROR; |
---|
| 1430 | + *handle = DMA_MAPPING_ERROR; |
---|
1559 | 1431 | size = PAGE_ALIGN(size); |
---|
1560 | 1432 | |
---|
1561 | 1433 | if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) |
---|
.. | .. |
---|
1576 | 1448 | return NULL; |
---|
1577 | 1449 | |
---|
1578 | 1450 | *handle = __iommu_create_mapping(dev, pages, size, attrs); |
---|
1579 | | - if (*handle == ARM_MAPPING_ERROR) |
---|
| 1451 | + if (*handle == DMA_MAPPING_ERROR) |
---|
1580 | 1452 | goto err_buffer; |
---|
1581 | 1453 | |
---|
1582 | 1454 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
---|
1583 | 1455 | return pages; |
---|
1584 | 1456 | |
---|
1585 | | - addr = __iommu_alloc_remap(pages, size, gfp, prot, |
---|
| 1457 | + addr = dma_common_pages_remap(pages, size, prot, |
---|
1586 | 1458 | __builtin_return_address(0)); |
---|
1587 | 1459 | if (!addr) |
---|
1588 | 1460 | goto err_mapping; |
---|
.. | .. |
---|
1612 | 1484 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
---|
1613 | 1485 | unsigned long attrs) |
---|
1614 | 1486 | { |
---|
1615 | | - unsigned long uaddr = vma->vm_start; |
---|
1616 | | - unsigned long usize = vma->vm_end - vma->vm_start; |
---|
1617 | 1487 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); |
---|
1618 | 1488 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
1619 | | - unsigned long off = vma->vm_pgoff; |
---|
| 1489 | + int err; |
---|
1620 | 1490 | |
---|
1621 | 1491 | if (!pages) |
---|
1622 | 1492 | return -ENXIO; |
---|
1623 | 1493 | |
---|
1624 | | - if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off) |
---|
| 1494 | + if (vma->vm_pgoff >= nr_pages) |
---|
1625 | 1495 | return -ENXIO; |
---|
1626 | 1496 | |
---|
1627 | | - pages += off; |
---|
| 1497 | + err = vm_map_pages(vma, pages, nr_pages); |
---|
| 1498 | + if (err) |
---|
| 1499 | + pr_err("Remapping memory failed: %d\n", err); |
---|
1628 | 1500 | |
---|
1629 | | - do { |
---|
1630 | | - int ret = vm_insert_page(vma, uaddr, *pages++); |
---|
1631 | | - if (ret) { |
---|
1632 | | - pr_err("Remapping memory failed: %d\n", ret); |
---|
1633 | | - return ret; |
---|
1634 | | - } |
---|
1635 | | - uaddr += PAGE_SIZE; |
---|
1636 | | - usize -= PAGE_SIZE; |
---|
1637 | | - } while (usize > 0); |
---|
1638 | | - |
---|
1639 | | - return 0; |
---|
| 1501 | + return err; |
---|
1640 | 1502 | } |
---|
1641 | 1503 | static int arm_iommu_mmap_attrs(struct device *dev, |
---|
1642 | 1504 | struct vm_area_struct *vma, void *cpu_addr, |
---|
.. | .. |
---|
1658 | 1520 | * free a page as defined by the above mapping. |
---|
1659 | 1521 | * Must not be called with IRQs disabled. |
---|
1660 | 1522 | */ |
---|
1661 | | -void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
---|
| 1523 | +static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
---|
1662 | 1524 | dma_addr_t handle, unsigned long attrs, int coherent_flag) |
---|
1663 | 1525 | { |
---|
1664 | 1526 | struct page **pages; |
---|
.. | .. |
---|
1675 | 1537 | return; |
---|
1676 | 1538 | } |
---|
1677 | 1539 | |
---|
1678 | | - if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) { |
---|
1679 | | - dma_common_free_remap(cpu_addr, size, |
---|
1680 | | - VM_ARM_DMA_CONSISTENT | VM_USERMAP, false); |
---|
1681 | | - } |
---|
| 1540 | + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) |
---|
| 1541 | + dma_common_free_remap(cpu_addr, size); |
---|
1682 | 1542 | |
---|
1683 | 1543 | __iommu_remove_mapping(dev, handle, size); |
---|
1684 | 1544 | __iommu_free_buffer(dev, pages, size, attrs); |
---|
1685 | 1545 | } |
---|
1686 | 1546 | |
---|
1687 | | -void arm_iommu_free_attrs(struct device *dev, size_t size, |
---|
1688 | | - void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
---|
| 1547 | +static void arm_iommu_free_attrs(struct device *dev, size_t size, |
---|
| 1548 | + void *cpu_addr, dma_addr_t handle, |
---|
| 1549 | + unsigned long attrs) |
---|
1689 | 1550 | { |
---|
1690 | 1551 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); |
---|
1691 | 1552 | } |
---|
1692 | 1553 | |
---|
1693 | | -void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, |
---|
| 1554 | +static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, |
---|
1694 | 1555 | void *cpu_addr, dma_addr_t handle, unsigned long attrs) |
---|
1695 | 1556 | { |
---|
1696 | 1557 | __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); |
---|
.. | .. |
---|
1726 | 1587 | int prot; |
---|
1727 | 1588 | |
---|
1728 | 1589 | size = PAGE_ALIGN(size); |
---|
1729 | | - *handle = ARM_MAPPING_ERROR; |
---|
| 1590 | + *handle = DMA_MAPPING_ERROR; |
---|
1730 | 1591 | |
---|
1731 | 1592 | iova_base = iova = __alloc_iova(mapping, size); |
---|
1732 | | - if (iova == ARM_MAPPING_ERROR) |
---|
| 1593 | + if (iova == DMA_MAPPING_ERROR) |
---|
1733 | 1594 | return -ENOMEM; |
---|
1734 | 1595 | |
---|
1735 | 1596 | for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { |
---|
.. | .. |
---|
1769 | 1630 | for (i = 1; i < nents; i++) { |
---|
1770 | 1631 | s = sg_next(s); |
---|
1771 | 1632 | |
---|
1772 | | - s->dma_address = ARM_MAPPING_ERROR; |
---|
| 1633 | + s->dma_address = DMA_MAPPING_ERROR; |
---|
1773 | 1634 | s->dma_length = 0; |
---|
1774 | 1635 | |
---|
1775 | 1636 | if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { |
---|
.. | .. |
---|
1814 | 1675 | * possible) and tagged with the appropriate dma address and length. They are |
---|
1815 | 1676 | * obtained via sg_dma_{address,length}. |
---|
1816 | 1677 | */ |
---|
1817 | | -int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
---|
| 1678 | +static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
---|
1818 | 1679 | int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
1819 | 1680 | { |
---|
1820 | 1681 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); |
---|
.. | .. |
---|
1832 | 1693 | * tagged with the appropriate dma address and length. They are obtained via |
---|
1833 | 1694 | * sg_dma_{address,length}. |
---|
1834 | 1695 | */ |
---|
1835 | | -int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
---|
| 1696 | +static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, |
---|
1836 | 1697 | int nents, enum dma_data_direction dir, unsigned long attrs) |
---|
1837 | 1698 | { |
---|
1838 | 1699 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); |
---|
.. | .. |
---|
1865 | 1726 | * Unmap a set of streaming mode DMA translations. Again, CPU access |
---|
1866 | 1727 | * rules concerning calls here are the same as for dma_unmap_single(). |
---|
1867 | 1728 | */ |
---|
1868 | | -void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, |
---|
1869 | | - int nents, enum dma_data_direction dir, |
---|
| 1729 | +static void arm_coherent_iommu_unmap_sg(struct device *dev, |
---|
| 1730 | + struct scatterlist *sg, int nents, enum dma_data_direction dir, |
---|
1870 | 1731 | unsigned long attrs) |
---|
1871 | 1732 | { |
---|
1872 | 1733 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); |
---|
.. | .. |
---|
1882 | 1743 | * Unmap a set of streaming mode DMA translations. Again, CPU access |
---|
1883 | 1744 | * rules concerning calls here are the same as for dma_unmap_single(). |
---|
1884 | 1745 | */ |
---|
1885 | | -void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
---|
1886 | | - enum dma_data_direction dir, |
---|
1887 | | - unsigned long attrs) |
---|
| 1746 | +static void arm_iommu_unmap_sg(struct device *dev, |
---|
| 1747 | + struct scatterlist *sg, int nents, |
---|
| 1748 | + enum dma_data_direction dir, |
---|
| 1749 | + unsigned long attrs) |
---|
1888 | 1750 | { |
---|
1889 | 1751 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); |
---|
1890 | 1752 | } |
---|
.. | .. |
---|
1896 | 1758 | * @nents: number of buffers to map (returned from dma_map_sg) |
---|
1897 | 1759 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
---|
1898 | 1760 | */ |
---|
1899 | | -void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
---|
| 1761 | +static void arm_iommu_sync_sg_for_cpu(struct device *dev, |
---|
| 1762 | + struct scatterlist *sg, |
---|
1900 | 1763 | int nents, enum dma_data_direction dir) |
---|
1901 | 1764 | { |
---|
1902 | 1765 | struct scatterlist *s; |
---|
.. | .. |
---|
1914 | 1777 | * @nents: number of buffers to map (returned from dma_map_sg) |
---|
1915 | 1778 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
---|
1916 | 1779 | */ |
---|
1917 | | -void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
---|
| 1780 | +static void arm_iommu_sync_sg_for_device(struct device *dev, |
---|
| 1781 | + struct scatterlist *sg, |
---|
1918 | 1782 | int nents, enum dma_data_direction dir) |
---|
1919 | 1783 | { |
---|
1920 | 1784 | struct scatterlist *s; |
---|
.. | .. |
---|
1944 | 1808 | int ret, prot, len = PAGE_ALIGN(size + offset); |
---|
1945 | 1809 | |
---|
1946 | 1810 | dma_addr = __alloc_iova(mapping, len); |
---|
1947 | | - if (dma_addr == ARM_MAPPING_ERROR) |
---|
| 1811 | + if (dma_addr == DMA_MAPPING_ERROR) |
---|
1948 | 1812 | return dma_addr; |
---|
1949 | 1813 | |
---|
1950 | 1814 | prot = __dma_info_to_prot(dir, attrs); |
---|
.. | .. |
---|
1956 | 1820 | return dma_addr + offset; |
---|
1957 | 1821 | fail: |
---|
1958 | 1822 | __free_iova(mapping, dma_addr, len); |
---|
1959 | | - return ARM_MAPPING_ERROR; |
---|
| 1823 | + return DMA_MAPPING_ERROR; |
---|
1960 | 1824 | } |
---|
1961 | 1825 | |
---|
1962 | 1826 | /** |
---|
.. | .. |
---|
2050 | 1914 | size_t len = PAGE_ALIGN(size + offset); |
---|
2051 | 1915 | |
---|
2052 | 1916 | dma_addr = __alloc_iova(mapping, len); |
---|
2053 | | - if (dma_addr == ARM_MAPPING_ERROR) |
---|
| 1917 | + if (dma_addr == DMA_MAPPING_ERROR) |
---|
2054 | 1918 | return dma_addr; |
---|
2055 | 1919 | |
---|
2056 | 1920 | prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; |
---|
.. | .. |
---|
2062 | 1926 | return dma_addr + offset; |
---|
2063 | 1927 | fail: |
---|
2064 | 1928 | __free_iova(mapping, dma_addr, len); |
---|
2065 | | - return ARM_MAPPING_ERROR; |
---|
| 1929 | + return DMA_MAPPING_ERROR; |
---|
2066 | 1930 | } |
---|
2067 | 1931 | |
---|
2068 | 1932 | /** |
---|
.. | .. |
---|
2116 | 1980 | __dma_page_cpu_to_dev(page, offset, size, dir); |
---|
2117 | 1981 | } |
---|
2118 | 1982 | |
---|
2119 | | -const struct dma_map_ops iommu_ops = { |
---|
| 1983 | +static const struct dma_map_ops iommu_ops = { |
---|
2120 | 1984 | .alloc = arm_iommu_alloc_attrs, |
---|
2121 | 1985 | .free = arm_iommu_free_attrs, |
---|
2122 | 1986 | .mmap = arm_iommu_mmap_attrs, |
---|
.. | .. |
---|
2135 | 1999 | .map_resource = arm_iommu_map_resource, |
---|
2136 | 2000 | .unmap_resource = arm_iommu_unmap_resource, |
---|
2137 | 2001 | |
---|
2138 | | - .mapping_error = arm_dma_mapping_error, |
---|
2139 | 2002 | .dma_supported = arm_dma_supported, |
---|
2140 | 2003 | }; |
---|
2141 | 2004 | |
---|
2142 | | -const struct dma_map_ops iommu_coherent_ops = { |
---|
| 2005 | +static const struct dma_map_ops iommu_coherent_ops = { |
---|
2143 | 2006 | .alloc = arm_coherent_iommu_alloc_attrs, |
---|
2144 | 2007 | .free = arm_coherent_iommu_free_attrs, |
---|
2145 | 2008 | .mmap = arm_coherent_iommu_mmap_attrs, |
---|
.. | .. |
---|
2154 | 2017 | .map_resource = arm_iommu_map_resource, |
---|
2155 | 2018 | .unmap_resource = arm_iommu_unmap_resource, |
---|
2156 | 2019 | |
---|
2157 | | - .mapping_error = arm_dma_mapping_error, |
---|
2158 | 2020 | .dma_supported = arm_dma_supported, |
---|
2159 | 2021 | }; |
---|
2160 | 2022 | |
---|
.. | .. |
---|
2316 | 2178 | * @dev: valid struct device pointer |
---|
2317 | 2179 | * |
---|
2318 | 2180 | * Detaches the provided device from a previously attached map. |
---|
2319 | | - * This voids the dma operations (dma_map_ops pointer) |
---|
| 2181 | + * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. |
---|
2320 | 2182 | */ |
---|
2321 | 2183 | void arm_iommu_detach_device(struct device *dev) |
---|
2322 | 2184 | { |
---|
.. | .. |
---|
2398 | 2260 | const struct dma_map_ops *dma_ops; |
---|
2399 | 2261 | |
---|
2400 | 2262 | dev->archdata.dma_coherent = coherent; |
---|
| 2263 | +#ifdef CONFIG_SWIOTLB |
---|
| 2264 | + dev->dma_coherent = coherent; |
---|
| 2265 | +#endif |
---|
2401 | 2266 | |
---|
2402 | 2267 | /* |
---|
2403 | 2268 | * Don't override the dma_ops if they have already been set. Ideally |
---|
.. | .. |
---|
2415 | 2280 | set_dma_ops(dev, dma_ops); |
---|
2416 | 2281 | |
---|
2417 | 2282 | #ifdef CONFIG_XEN |
---|
2418 | | - if (xen_initial_domain()) { |
---|
2419 | | - dev->archdata.dev_dma_ops = dev->dma_ops; |
---|
2420 | | - dev->dma_ops = xen_dma_ops; |
---|
2421 | | - } |
---|
| 2283 | + if (xen_initial_domain()) |
---|
| 2284 | + dev->dma_ops = &xen_swiotlb_dma_ops; |
---|
2422 | 2285 | #endif |
---|
2423 | 2286 | dev->archdata.dma_ops_setup = true; |
---|
2424 | 2287 | } |
---|
2425 | | -EXPORT_SYMBOL_GPL(arch_setup_dma_ops); |
---|
2426 | 2288 | |
---|
2427 | 2289 | void arch_teardown_dma_ops(struct device *dev) |
---|
2428 | 2290 | { |
---|
.. | .. |
---|
2433 | 2295 | /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ |
---|
2434 | 2296 | set_dma_ops(dev, NULL); |
---|
2435 | 2297 | } |
---|
| 2298 | + |
---|
| 2299 | +#ifdef CONFIG_SWIOTLB |
---|
| 2300 | +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
---|
| 2301 | + enum dma_data_direction dir) |
---|
| 2302 | +{ |
---|
| 2303 | + __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), |
---|
| 2304 | + size, dir); |
---|
| 2305 | +} |
---|
| 2306 | + |
---|
| 2307 | +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
---|
| 2308 | + enum dma_data_direction dir) |
---|
| 2309 | +{ |
---|
| 2310 | + __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), |
---|
| 2311 | + size, dir); |
---|
| 2312 | +} |
---|
| 2313 | + |
---|
| 2314 | +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
---|
| 2315 | + gfp_t gfp, unsigned long attrs) |
---|
| 2316 | +{ |
---|
| 2317 | + return __dma_alloc(dev, size, dma_handle, gfp, |
---|
| 2318 | + __get_dma_pgprot(attrs, PAGE_KERNEL), false, |
---|
| 2319 | + attrs, __builtin_return_address(0)); |
---|
| 2320 | +} |
---|
| 2321 | + |
---|
| 2322 | +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, |
---|
| 2323 | + dma_addr_t dma_handle, unsigned long attrs) |
---|
| 2324 | +{ |
---|
| 2325 | + __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); |
---|
| 2326 | +} |
---|
| 2327 | +#endif /* CONFIG_SWIOTLB */ |
---|