forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 748e4f3d702def1a4bff191e0cf93b6a05340f01
kernel/arch/arm/mm/dma-mapping-nommu.c
....@@ -1,17 +1,14 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Based on linux/arch/arm/mm/dma-mapping.c
34 *
45 * Copyright (C) 2000-2004 Russell King
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
106 */
117
128 #include <linux/export.h>
139 #include <linux/mm.h>
1410 #include <linux/dma-direct.h>
11
+#include <linux/dma-map-ops.h>
1512 #include <linux/scatterlist.h>
1613
1714 #include <asm/cachetype.h>
....@@ -22,7 +19,7 @@
2219 #include "dma.h"
2320
2421 /*
25
- * dma_direct_ops is used if
22
+ * The generic direct mapping code is used if
2623 * - MMU/MPU is off
2724 * - cpu is v7m w/o cache support
2825 * - device is coherent
....@@ -39,17 +36,7 @@
3936 unsigned long attrs)
4037
4138 {
42
- void *ret;
43
-
44
- /*
45
- * Try generic allocator first if we are advertised that
46
- * consistency is not required.
47
- */
48
-
49
- if (attrs & DMA_ATTR_NON_CONSISTENT)
50
- return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
51
-
52
- ret = dma_alloc_from_global_coherent(size, dma_handle);
39
+ void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
5340
5441 /*
5542 * dma_alloc_from_global_coherent() may fail because:
....@@ -69,16 +56,9 @@
6956 void *cpu_addr, dma_addr_t dma_addr,
7057 unsigned long attrs)
7158 {
72
- if (attrs & DMA_ATTR_NON_CONSISTENT) {
73
- dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
74
- } else {
75
- int ret = dma_release_from_global_coherent(get_order(size),
76
- cpu_addr);
59
+ int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);
7760
78
- WARN_ON_ONCE(ret == 0);
79
- }
80
-
81
- return;
61
+ WARN_ON_ONCE(ret == 0);
8262 }
8363
8464 static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
....@@ -89,8 +69,9 @@
8969
9070 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
9171 return ret;
92
-
93
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
72
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
73
+ return ret;
74
+ return -ENXIO;
9475 }
9576
9677
....@@ -196,6 +177,8 @@
196177 const struct dma_map_ops arm_nommu_dma_ops = {
197178 .alloc = arm_nommu_dma_alloc,
198179 .free = arm_nommu_dma_free,
180
+ .alloc_pages = dma_direct_alloc_pages,
181
+ .free_pages = dma_direct_free_pages,
199182 .mmap = arm_nommu_dma_mmap,
200183 .map_page = arm_nommu_dma_map_page,
201184 .unmap_page = arm_nommu_dma_unmap_page,
....@@ -208,16 +191,9 @@
208191 };
209192 EXPORT_SYMBOL(arm_nommu_dma_ops);
210193
211
-static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
212
-{
213
- return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
214
-}
215
-
216194 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
217195 const struct iommu_ops *iommu, bool coherent)
218196 {
219
- const struct dma_map_ops *dma_ops;
220
-
221197 if (IS_ENABLED(CONFIG_CPU_V7M)) {
222198 /*
223199 * Cache support for v7m is optional, so can be treated as
....@@ -233,11 +209,6 @@
233209 dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
234210 }
235211
236
- dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
237
-
238
- set_dma_ops(dev, dma_ops);
239
-}
240
-
241
-void arch_teardown_dma_ops(struct device *dev)
242
-{
212
+ if (!dev->archdata.dma_coherent)
213
+ set_dma_ops(dev, &arm_nommu_dma_ops);
243214 }