forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/arm/mm/dma-mapping.c
....@@ -1,15 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/arch/arm/mm/dma-mapping.c
34 *
45 * Copyright (C) 2000-2004 Russell King
56 *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
107 * DMA uncached mapping support.
118 */
12
-#include <linux/bootmem.h>
139 #include <linux/module.h>
1410 #include <linux/mm.h>
1511 #include <linux/genalloc.h>
....@@ -19,8 +15,7 @@
1915 #include <linux/init.h>
2016 #include <linux/device.h>
2117 #include <linux/dma-direct.h>
22
-#include <linux/dma-mapping.h>
23
-#include <linux/dma-contiguous.h>
18
+#include <linux/dma-map-ops.h>
2419 #include <linux/highmem.h>
2520 #include <linux/memblock.h>
2621 #include <linux/slab.h>
....@@ -29,7 +24,6 @@
2924 #include <linux/vmalloc.h>
3025 #include <linux/sizes.h>
3126 #include <linux/cma.h>
32
-#include <linux/swiotlb.h>
3327
3428 #include <asm/memory.h>
3529 #include <asm/highmem.h>
....@@ -39,7 +33,7 @@
3933 #include <asm/dma-iommu.h>
4034 #include <asm/mach/map.h>
4135 #include <asm/system_info.h>
42
-#include <asm/dma-contiguous.h>
36
+#include <xen/swiotlb-xen.h>
4337
4438 #include "dma.h"
4539 #include "mm.h"
....@@ -132,12 +126,6 @@
132126 unsigned long offset, size_t size, enum dma_data_direction dir,
133127 unsigned long attrs)
134128 {
135
-#ifdef CONFIG_SWIOTLB
136
- if (unlikely(!dma_capable(dev, phys_to_dma(dev, page_to_phys(page) +
137
- offset), size)))
138
- return swiotlb_map_page(dev, page, offset, size, dir, attrs);
139
-#endif
140
-
141129 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
142130 __dma_page_cpu_to_dev(page, offset, size, dir);
143131 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
....@@ -170,12 +158,6 @@
170158 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
171159 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
172160 handle & ~PAGE_MASK, size, dir);
173
-
174
-#ifdef CONFIG_SWIOTLB
175
- if (unlikely(is_swiotlb_buffer(dma_to_phys(dev, handle))))
176
- swiotlb_tbl_unmap_single(dev, dma_to_phys(dev, handle), size,
177
- dir, attrs);
178
-#endif
179161 }
180162
181163 static void arm_dma_sync_single_for_cpu(struct device *dev,
....@@ -183,13 +165,7 @@
183165 {
184166 unsigned int offset = handle & (PAGE_SIZE - 1);
185167 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
186
-
187168 __dma_page_dev_to_cpu(page, offset, size, dir);
188
-#ifdef CONFIG_SWIOTLB
189
- if (unlikely(is_swiotlb_buffer(dma_to_phys(dev, handle))))
190
- swiotlb_tbl_sync_single(dev, dma_to_phys(dev, handle), size,
191
- dir, SYNC_FOR_CPU);
192
-#endif
193169 }
194170
195171 static void arm_dma_sync_single_for_device(struct device *dev,
....@@ -197,35 +173,44 @@
197173 {
198174 unsigned int offset = handle & (PAGE_SIZE - 1);
199175 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
200
-
201
-#ifdef CONFIG_SWIOTLB
202
- if (unlikely(is_swiotlb_buffer(dma_to_phys(dev, handle))))
203
- swiotlb_tbl_sync_single(dev, dma_to_phys(dev, handle), size,
204
- dir, SYNC_FOR_DEVICE);
205
-#endif
206176 __dma_page_cpu_to_dev(page, offset, size, dir);
207177 }
208178
209
-static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
179
+/*
180
+ * Return whether the given device DMA address mask can be supported
181
+ * properly. For example, if your device can only drive the low 24-bits
182
+ * during bus mastering, then you would pass 0x00ffffff as the mask
183
+ * to this function.
184
+ */
185
+static int arm_dma_supported(struct device *dev, u64 mask)
210186 {
211
- return dma_addr == ARM_MAPPING_ERROR;
187
+ unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
188
+
189
+ /*
190
+ * Translate the device's DMA mask to a PFN limit. This
191
+ * PFN number includes the page which we can DMA to.
192
+ */
193
+ return dma_to_pfn(dev, mask) >= max_dma_pfn;
212194 }
213195
214196 const struct dma_map_ops arm_dma_ops = {
215197 .alloc = arm_dma_alloc,
216198 .free = arm_dma_free,
199
+ .alloc_pages = dma_direct_alloc_pages,
200
+ .free_pages = dma_direct_free_pages,
217201 .mmap = arm_dma_mmap,
218202 .get_sgtable = arm_dma_get_sgtable,
219203 .map_page = arm_dma_map_page,
220204 .unmap_page = arm_dma_unmap_page,
221205 .map_sg = arm_dma_map_sg,
222206 .unmap_sg = arm_dma_unmap_sg,
207
+ .map_resource = dma_direct_map_resource,
223208 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
224209 .sync_single_for_device = arm_dma_sync_single_for_device,
225210 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
226211 .sync_sg_for_device = arm_dma_sync_sg_for_device,
227
- .mapping_error = arm_dma_mapping_error,
228212 .dma_supported = arm_dma_supported,
213
+ .get_required_mask = dma_direct_get_required_mask,
229214 };
230215 EXPORT_SYMBOL(arm_dma_ops);
231216
....@@ -240,75 +225,17 @@
240225 const struct dma_map_ops arm_coherent_dma_ops = {
241226 .alloc = arm_coherent_dma_alloc,
242227 .free = arm_coherent_dma_free,
228
+ .alloc_pages = dma_direct_alloc_pages,
229
+ .free_pages = dma_direct_free_pages,
243230 .mmap = arm_coherent_dma_mmap,
244231 .get_sgtable = arm_dma_get_sgtable,
245232 .map_page = arm_coherent_dma_map_page,
246233 .map_sg = arm_dma_map_sg,
247
- .mapping_error = arm_dma_mapping_error,
234
+ .map_resource = dma_direct_map_resource,
248235 .dma_supported = arm_dma_supported,
236
+ .get_required_mask = dma_direct_get_required_mask,
249237 };
250238 EXPORT_SYMBOL(arm_coherent_dma_ops);
251
-
252
-static int __dma_supported(struct device *dev, u64 mask, bool warn)
253
-{
254
- unsigned long max_dma_pfn;
255
-
256
- /*
257
- * If the mask allows for more memory than we can address,
258
- * and we actually have that much memory, then we must
259
- * indicate that DMA to this device is not supported.
260
- */
261
- if (sizeof(mask) != sizeof(dma_addr_t) &&
262
- mask > (dma_addr_t)~0 &&
263
- dma_to_pfn(dev, ~0) < max_pfn - 1) {
264
- if (warn) {
265
- dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
266
- mask);
267
- dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
268
- }
269
- return 0;
270
- }
271
-
272
- max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
273
-
274
- /*
275
- * Translate the device's DMA mask to a PFN limit. This
276
- * PFN number includes the page which we can DMA to.
277
- */
278
- if (dma_to_pfn(dev, mask) < max_dma_pfn) {
279
- if (warn)
280
- dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
281
- mask,
282
- dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
283
- max_dma_pfn + 1);
284
- return 0;
285
- }
286
-
287
- return 1;
288
-}
289
-
290
-static u64 get_coherent_dma_mask(struct device *dev)
291
-{
292
- u64 mask = (u64)DMA_BIT_MASK(32);
293
-
294
- if (dev) {
295
- mask = dev->coherent_dma_mask;
296
-
297
- /*
298
- * Sanity check the DMA mask - it must be non-zero, and
299
- * must be able to be satisfied by a DMA allocation.
300
- */
301
- if (mask == 0) {
302
- dev_warn(dev, "coherent DMA mask is unset\n");
303
- return 0;
304
- }
305
-
306
- if (!__dma_supported(dev, mask, true))
307
- return 0;
308
- }
309
-
310
- return mask;
311
-}
312239
313240 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
314241 {
....@@ -387,25 +314,6 @@
387314 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
388315 pgprot_t prot, struct page **ret_page,
389316 const void *caller, bool want_vaddr);
390
-
391
-static void *
392
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
393
- const void *caller)
394
-{
395
- /*
396
- * DMA allocation can be mapped to user space, so lets
397
- * set VM_USERMAP flags too.
398
- */
399
- return dma_common_contiguous_remap(page, size,
400
- VM_ARM_DMA_CONSISTENT | VM_USERMAP,
401
- prot, caller);
402
-}
403
-
404
-static void __dma_free_remap(void *cpu_addr, size_t size)
405
-{
406
- dma_common_free_remap(cpu_addr, size,
407
- VM_ARM_DMA_CONSISTENT | VM_USERMAP, false);
408
-}
409317
410318 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
411319 static struct gen_pool *atomic_pool __ro_after_init;
....@@ -528,8 +436,7 @@
528436 }
529437 }
530438
531
-static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
532
- void *data)
439
+static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
533440 {
534441 struct page *page = virt_to_page(addr);
535442 pgprot_t prot = *(pgprot_t *)data;
....@@ -563,7 +470,7 @@
563470 if (!want_vaddr)
564471 goto out;
565472
566
- ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
473
+ ptr = dma_common_contiguous_remap(page, size, prot, caller);
567474 if (!ptr) {
568475 __dma_free_buffer(page, size);
569476 return NULL;
....@@ -597,7 +504,7 @@
597504
598505 static bool __in_atomic_pool(void *start, size_t size)
599506 {
600
- return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
507
+ return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
601508 }
602509
603510 static int __free_from_pool(void *start, size_t size)
....@@ -630,7 +537,7 @@
630537 goto out;
631538
632539 if (PageHighMem(page)) {
633
- ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
540
+ ptr = dma_common_contiguous_remap(page, size, prot, caller);
634541 if (!ptr) {
635542 dma_release_from_contiguous(dev, page, count);
636543 return NULL;
....@@ -650,7 +557,7 @@
650557 {
651558 if (want_vaddr) {
652559 if (PageHighMem(page))
653
- __dma_free_remap(cpu_addr, size);
560
+ dma_common_free_remap(cpu_addr, size);
654561 else
655562 __dma_remap(page, size, PAGE_KERNEL);
656563 }
....@@ -742,7 +649,7 @@
742649 static void remap_allocator_free(struct arm_dma_free_args *args)
743650 {
744651 if (args->want_vaddr)
745
- __dma_free_remap(args->cpu_addr, args->size);
652
+ dma_common_free_remap(args->cpu_addr, args->size);
746653
747654 __dma_free_buffer(args->page, args->size);
748655 }
....@@ -756,7 +663,7 @@
756663 gfp_t gfp, pgprot_t prot, bool is_coherent,
757664 unsigned long attrs, const void *caller)
758665 {
759
- u64 mask = get_coherent_dma_mask(dev);
666
+ u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
760667 struct page *page = NULL;
761668 void *addr;
762669 bool allowblock, cma;
....@@ -780,9 +687,6 @@
780687 }
781688 #endif
782689
783
- if (!mask)
784
- return NULL;
785
-
786690 buf = kzalloc(sizeof(*buf),
787691 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
788692 if (!buf)
....@@ -801,7 +705,7 @@
801705 gfp &= ~(__GFP_COMP);
802706 args.gfp = gfp;
803707
804
- *handle = ARM_MAPPING_ERROR;
708
+ *handle = DMA_MAPPING_ERROR;
805709 allowblock = gfpflags_allow_blocking(gfp);
806710 cma = allowblock ? dev_get_cma_area(dev) : false;
807711
....@@ -930,17 +834,6 @@
930834 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
931835 }
932836
933
-/*
934
- * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
935
- * that the intention is to allow exporting memory allocated via the
936
- * coherent DMA APIs through the dma_buf API, which only accepts a
937
- * scattertable. This presents a couple of problems:
938
- * 1. Not all memory allocated via the coherent DMA APIs is backed by
939
- * a struct page
940
- * 2. Passing coherent DMA memory into the streaming APIs is not allowed
941
- * as we will try to flush the memory through a different alias to that
942
- * actually being used (and the flushes are redundant.)
943
- */
944837 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
945838 void *cpu_addr, dma_addr_t handle, size_t size,
946839 unsigned long attrs)
....@@ -1166,19 +1059,17 @@
11661059 dir);
11671060 }
11681061
1169
-/*
1170
- * Return whether the given device DMA address mask can be supported
1171
- * properly. For example, if your device can only drive the low 24-bits
1172
- * during bus mastering, then you would pass 0x00ffffff as the mask
1173
- * to this function.
1174
- */
1175
-int arm_dma_supported(struct device *dev, u64 mask)
1176
-{
1177
- return __dma_supported(dev, mask, false);
1178
-}
1179
-
11801062 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
11811063 {
1064
+ /*
1065
+ * When CONFIG_ARM_LPAE is set, physical address can extend above
1066
+ * 32-bits, which then can't be addressed by devices that only support
1067
+ * 32-bit DMA.
1068
+ * Use the generic dma-direct / swiotlb ops code in that case, as that
1069
+ * handles bounce buffering for us.
1070
+ */
1071
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
1072
+ return NULL;
11821073 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
11831074 }
11841075
....@@ -1224,9 +1115,6 @@
12241115 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
12251116 align = (1 << order) - 1;
12261117
1227
- /* workaround for avoid va hole */
1228
- align = 0;
1229
-
12301118 spin_lock_irqsave(&mapping->lock, flags);
12311119 for (i = 0; i < mapping->nr_bitmaps; i++) {
12321120 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
....@@ -1247,7 +1135,7 @@
12471135 if (i == mapping->nr_bitmaps) {
12481136 if (extend_iommu_mapping(mapping)) {
12491137 spin_unlock_irqrestore(&mapping->lock, flags);
1250
- return ARM_MAPPING_ERROR;
1138
+ return DMA_MAPPING_ERROR;
12511139 }
12521140
12531141 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
....@@ -1255,7 +1143,7 @@
12551143
12561144 if (start > mapping->bits) {
12571145 spin_unlock_irqrestore(&mapping->lock, flags);
1258
- return ARM_MAPPING_ERROR;
1146
+ return DMA_MAPPING_ERROR;
12591147 }
12601148
12611149 bitmap_set(mapping->bitmaps[i], start, count);
....@@ -1416,17 +1304,6 @@
14161304 }
14171305
14181306 /*
1419
- * Create a CPU mapping for a specified pages
1420
- */
1421
-static void *
1422
-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1423
- const void *caller)
1424
-{
1425
- return dma_common_pages_remap(pages, size,
1426
- VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
1427
-}
1428
-
1429
-/*
14301307 * Create a mapping in device IO address space for specified pages
14311308 */
14321309 static dma_addr_t
....@@ -1439,7 +1316,7 @@
14391316 int i;
14401317
14411318 dma_addr = __alloc_iova(mapping, size);
1442
- if (dma_addr == ARM_MAPPING_ERROR)
1319
+ if (dma_addr == DMA_MAPPING_ERROR)
14431320 return dma_addr;
14441321
14451322 iova = dma_addr;
....@@ -1466,7 +1343,7 @@
14661343 fail:
14671344 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
14681345 __free_iova(mapping, dma_addr, size);
1469
- return ARM_MAPPING_ERROR;
1346
+ return DMA_MAPPING_ERROR;
14701347 }
14711348
14721349 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
....@@ -1498,18 +1375,13 @@
14981375
14991376 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
15001377 {
1501
- struct vm_struct *area;
1502
-
15031378 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
15041379 return __atomic_get_pages(cpu_addr);
15051380
15061381 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
15071382 return cpu_addr;
15081383
1509
- area = find_vm_area(cpu_addr);
1510
- if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1511
- return area->pages;
1512
- return NULL;
1384
+ return dma_common_find_pages(cpu_addr);
15131385 }
15141386
15151387 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
....@@ -1527,7 +1399,7 @@
15271399 return NULL;
15281400
15291401 *handle = __iommu_create_mapping(dev, &page, size, attrs);
1530
- if (*handle == ARM_MAPPING_ERROR)
1402
+ if (*handle == DMA_MAPPING_ERROR)
15311403 goto err_mapping;
15321404
15331405 return addr;
....@@ -1555,7 +1427,7 @@
15551427 struct page **pages;
15561428 void *addr = NULL;
15571429
1558
- *handle = ARM_MAPPING_ERROR;
1430
+ *handle = DMA_MAPPING_ERROR;
15591431 size = PAGE_ALIGN(size);
15601432
15611433 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
....@@ -1576,13 +1448,13 @@
15761448 return NULL;
15771449
15781450 *handle = __iommu_create_mapping(dev, pages, size, attrs);
1579
- if (*handle == ARM_MAPPING_ERROR)
1451
+ if (*handle == DMA_MAPPING_ERROR)
15801452 goto err_buffer;
15811453
15821454 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
15831455 return pages;
15841456
1585
- addr = __iommu_alloc_remap(pages, size, gfp, prot,
1457
+ addr = dma_common_pages_remap(pages, size, prot,
15861458 __builtin_return_address(0));
15871459 if (!addr)
15881460 goto err_mapping;
....@@ -1612,31 +1484,21 @@
16121484 void *cpu_addr, dma_addr_t dma_addr, size_t size,
16131485 unsigned long attrs)
16141486 {
1615
- unsigned long uaddr = vma->vm_start;
1616
- unsigned long usize = vma->vm_end - vma->vm_start;
16171487 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
16181488 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1619
- unsigned long off = vma->vm_pgoff;
1489
+ int err;
16201490
16211491 if (!pages)
16221492 return -ENXIO;
16231493
1624
- if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
1494
+ if (vma->vm_pgoff >= nr_pages)
16251495 return -ENXIO;
16261496
1627
- pages += off;
1497
+ err = vm_map_pages(vma, pages, nr_pages);
1498
+ if (err)
1499
+ pr_err("Remapping memory failed: %d\n", err);
16281500
1629
- do {
1630
- int ret = vm_insert_page(vma, uaddr, *pages++);
1631
- if (ret) {
1632
- pr_err("Remapping memory failed: %d\n", ret);
1633
- return ret;
1634
- }
1635
- uaddr += PAGE_SIZE;
1636
- usize -= PAGE_SIZE;
1637
- } while (usize > 0);
1638
-
1639
- return 0;
1501
+ return err;
16401502 }
16411503 static int arm_iommu_mmap_attrs(struct device *dev,
16421504 struct vm_area_struct *vma, void *cpu_addr,
....@@ -1658,7 +1520,7 @@
16581520 * free a page as defined by the above mapping.
16591521 * Must not be called with IRQs disabled.
16601522 */
1661
-void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1523
+static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
16621524 dma_addr_t handle, unsigned long attrs, int coherent_flag)
16631525 {
16641526 struct page **pages;
....@@ -1675,22 +1537,21 @@
16751537 return;
16761538 }
16771539
1678
- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
1679
- dma_common_free_remap(cpu_addr, size,
1680
- VM_ARM_DMA_CONSISTENT | VM_USERMAP, false);
1681
- }
1540
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
1541
+ dma_common_free_remap(cpu_addr, size);
16821542
16831543 __iommu_remove_mapping(dev, handle, size);
16841544 __iommu_free_buffer(dev, pages, size, attrs);
16851545 }
16861546
1687
-void arm_iommu_free_attrs(struct device *dev, size_t size,
1688
- void *cpu_addr, dma_addr_t handle, unsigned long attrs)
1547
+static void arm_iommu_free_attrs(struct device *dev, size_t size,
1548
+ void *cpu_addr, dma_addr_t handle,
1549
+ unsigned long attrs)
16891550 {
16901551 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
16911552 }
16921553
1693
-void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
1554
+static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
16941555 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
16951556 {
16961557 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
....@@ -1726,10 +1587,10 @@
17261587 int prot;
17271588
17281589 size = PAGE_ALIGN(size);
1729
- *handle = ARM_MAPPING_ERROR;
1590
+ *handle = DMA_MAPPING_ERROR;
17301591
17311592 iova_base = iova = __alloc_iova(mapping, size);
1732
- if (iova == ARM_MAPPING_ERROR)
1593
+ if (iova == DMA_MAPPING_ERROR)
17331594 return -ENOMEM;
17341595
17351596 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
....@@ -1769,7 +1630,7 @@
17691630 for (i = 1; i < nents; i++) {
17701631 s = sg_next(s);
17711632
1772
- s->dma_address = ARM_MAPPING_ERROR;
1633
+ s->dma_address = DMA_MAPPING_ERROR;
17731634 s->dma_length = 0;
17741635
17751636 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
....@@ -1814,7 +1675,7 @@
18141675 * possible) and tagged with the appropriate dma address and length. They are
18151676 * obtained via sg_dma_{address,length}.
18161677 */
1817
-int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1678
+static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
18181679 int nents, enum dma_data_direction dir, unsigned long attrs)
18191680 {
18201681 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
....@@ -1832,7 +1693,7 @@
18321693 * tagged with the appropriate dma address and length. They are obtained via
18331694 * sg_dma_{address,length}.
18341695 */
1835
-int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1696
+static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
18361697 int nents, enum dma_data_direction dir, unsigned long attrs)
18371698 {
18381699 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
....@@ -1865,8 +1726,8 @@
18651726 * Unmap a set of streaming mode DMA translations. Again, CPU access
18661727 * rules concerning calls here are the same as for dma_unmap_single().
18671728 */
1868
-void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1869
- int nents, enum dma_data_direction dir,
1729
+static void arm_coherent_iommu_unmap_sg(struct device *dev,
1730
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
18701731 unsigned long attrs)
18711732 {
18721733 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
....@@ -1882,9 +1743,10 @@
18821743 * Unmap a set of streaming mode DMA translations. Again, CPU access
18831744 * rules concerning calls here are the same as for dma_unmap_single().
18841745 */
1885
-void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1886
- enum dma_data_direction dir,
1887
- unsigned long attrs)
1746
+static void arm_iommu_unmap_sg(struct device *dev,
1747
+ struct scatterlist *sg, int nents,
1748
+ enum dma_data_direction dir,
1749
+ unsigned long attrs)
18881750 {
18891751 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
18901752 }
....@@ -1896,7 +1758,8 @@
18961758 * @nents: number of buffers to map (returned from dma_map_sg)
18971759 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
18981760 */
1899
-void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1761
+static void arm_iommu_sync_sg_for_cpu(struct device *dev,
1762
+ struct scatterlist *sg,
19001763 int nents, enum dma_data_direction dir)
19011764 {
19021765 struct scatterlist *s;
....@@ -1914,7 +1777,8 @@
19141777 * @nents: number of buffers to map (returned from dma_map_sg)
19151778 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
19161779 */
1917
-void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1780
+static void arm_iommu_sync_sg_for_device(struct device *dev,
1781
+ struct scatterlist *sg,
19181782 int nents, enum dma_data_direction dir)
19191783 {
19201784 struct scatterlist *s;
....@@ -1944,7 +1808,7 @@
19441808 int ret, prot, len = PAGE_ALIGN(size + offset);
19451809
19461810 dma_addr = __alloc_iova(mapping, len);
1947
- if (dma_addr == ARM_MAPPING_ERROR)
1811
+ if (dma_addr == DMA_MAPPING_ERROR)
19481812 return dma_addr;
19491813
19501814 prot = __dma_info_to_prot(dir, attrs);
....@@ -1956,7 +1820,7 @@
19561820 return dma_addr + offset;
19571821 fail:
19581822 __free_iova(mapping, dma_addr, len);
1959
- return ARM_MAPPING_ERROR;
1823
+ return DMA_MAPPING_ERROR;
19601824 }
19611825
19621826 /**
....@@ -2050,7 +1914,7 @@
20501914 size_t len = PAGE_ALIGN(size + offset);
20511915
20521916 dma_addr = __alloc_iova(mapping, len);
2053
- if (dma_addr == ARM_MAPPING_ERROR)
1917
+ if (dma_addr == DMA_MAPPING_ERROR)
20541918 return dma_addr;
20551919
20561920 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
....@@ -2062,7 +1926,7 @@
20621926 return dma_addr + offset;
20631927 fail:
20641928 __free_iova(mapping, dma_addr, len);
2065
- return ARM_MAPPING_ERROR;
1929
+ return DMA_MAPPING_ERROR;
20661930 }
20671931
20681932 /**
....@@ -2116,7 +1980,7 @@
21161980 __dma_page_cpu_to_dev(page, offset, size, dir);
21171981 }
21181982
2119
-const struct dma_map_ops iommu_ops = {
1983
+static const struct dma_map_ops iommu_ops = {
21201984 .alloc = arm_iommu_alloc_attrs,
21211985 .free = arm_iommu_free_attrs,
21221986 .mmap = arm_iommu_mmap_attrs,
....@@ -2135,11 +1999,10 @@
21351999 .map_resource = arm_iommu_map_resource,
21362000 .unmap_resource = arm_iommu_unmap_resource,
21372001
2138
- .mapping_error = arm_dma_mapping_error,
21392002 .dma_supported = arm_dma_supported,
21402003 };
21412004
2142
-const struct dma_map_ops iommu_coherent_ops = {
2005
+static const struct dma_map_ops iommu_coherent_ops = {
21432006 .alloc = arm_coherent_iommu_alloc_attrs,
21442007 .free = arm_coherent_iommu_free_attrs,
21452008 .mmap = arm_coherent_iommu_mmap_attrs,
....@@ -2154,7 +2017,6 @@
21542017 .map_resource = arm_iommu_map_resource,
21552018 .unmap_resource = arm_iommu_unmap_resource,
21562019
2157
- .mapping_error = arm_dma_mapping_error,
21582020 .dma_supported = arm_dma_supported,
21592021 };
21602022
....@@ -2316,7 +2178,7 @@
23162178 * @dev: valid struct device pointer
23172179 *
23182180 * Detaches the provided device from a previously attached map.
2319
- * This voids the dma operations (dma_map_ops pointer)
2181
+ * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
23202182 */
23212183 void arm_iommu_detach_device(struct device *dev)
23222184 {
....@@ -2398,6 +2260,9 @@
23982260 const struct dma_map_ops *dma_ops;
23992261
24002262 dev->archdata.dma_coherent = coherent;
2263
+#ifdef CONFIG_SWIOTLB
2264
+ dev->dma_coherent = coherent;
2265
+#endif
24012266
24022267 /*
24032268 * Don't override the dma_ops if they have already been set. Ideally
....@@ -2415,14 +2280,11 @@
24152280 set_dma_ops(dev, dma_ops);
24162281
24172282 #ifdef CONFIG_XEN
2418
- if (xen_initial_domain()) {
2419
- dev->archdata.dev_dma_ops = dev->dma_ops;
2420
- dev->dma_ops = xen_dma_ops;
2421
- }
2283
+ if (xen_initial_domain())
2284
+ dev->dma_ops = &xen_swiotlb_dma_ops;
24222285 #endif
24232286 dev->archdata.dma_ops_setup = true;
24242287 }
2425
-EXPORT_SYMBOL_GPL(arch_setup_dma_ops);
24262288
24272289 void arch_teardown_dma_ops(struct device *dev)
24282290 {
....@@ -2433,3 +2295,33 @@
24332295 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
24342296 set_dma_ops(dev, NULL);
24352297 }
2298
+
2299
+#ifdef CONFIG_SWIOTLB
2300
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
2301
+ enum dma_data_direction dir)
2302
+{
2303
+ __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2304
+ size, dir);
2305
+}
2306
+
2307
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
2308
+ enum dma_data_direction dir)
2309
+{
2310
+ __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
2311
+ size, dir);
2312
+}
2313
+
2314
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
2315
+ gfp_t gfp, unsigned long attrs)
2316
+{
2317
+ return __dma_alloc(dev, size, dma_handle, gfp,
2318
+ __get_dma_pgprot(attrs, PAGE_KERNEL), false,
2319
+ attrs, __builtin_return_address(0));
2320
+}
2321
+
2322
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
2323
+ dma_addr_t dma_handle, unsigned long attrs)
2324
+{
2325
+ __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
2326
+}
2327
+#endif /* CONFIG_SWIOTLB */