forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 748e4f3d702def1a4bff191e0cf93b6a05340f01
kernel/arch/arc/mm/dma.c
....@@ -1,61 +1,22 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
74 */
85
9
-#include <linux/dma-noncoherent.h>
6
+#include <linux/dma-map-ops.h>
107 #include <asm/cache.h>
118 #include <asm/cacheflush.h>
129
1310 /*
14
- * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
11
+ * ARCH specific callbacks for generic noncoherent DMA ops
1512 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
1613 * - But still handle both coherent and non-coherent requests from caller
1714 *
1815 * For DMA coherent hardware (IOC) generic code suffices
1916 */
20
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
21
- gfp_t gfp, unsigned long attrs)
17
+
18
+void arch_dma_prep_coherent(struct page *page, size_t size)
2219 {
23
- unsigned long order = get_order(size);
24
- struct page *page;
25
- phys_addr_t paddr;
26
- void *kvaddr;
27
- bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
28
-
29
- /*
30
- * __GFP_HIGHMEM flag is cleared by upper layer functions
31
- * (in include/linux/dma-mapping.h) so we should never get a
32
- * __GFP_HIGHMEM here.
33
- */
34
- BUG_ON(gfp & __GFP_HIGHMEM);
35
-
36
- page = alloc_pages(gfp, order);
37
- if (!page)
38
- return NULL;
39
-
40
- /* This is linear addr (0x8000_0000 based) */
41
- paddr = page_to_phys(page);
42
-
43
- *dma_handle = paddr;
44
-
45
- /*
46
- * A coherent buffer needs MMU mapping to enforce non-cachability.
47
- * kvaddr is kernel Virtual address (0x7000_0000 based).
48
- */
49
- if (need_coh) {
50
- kvaddr = ioremap_nocache(paddr, size);
51
- if (kvaddr == NULL) {
52
- __free_pages(page, order);
53
- return NULL;
54
- }
55
- } else {
56
- kvaddr = (void *)(u32)paddr;
57
- }
58
-
5920 /*
6021 * Evict any existing L1 and/or L2 lines for the backing page
6122 * in case it was used earlier as a normal "cached" page.
....@@ -66,47 +27,7 @@
6627 * Currently flush_cache_vmap nukes the L1 cache completely which
6728 * will be optimized as a separate commit
6829 */
69
- if (need_coh)
70
- dma_cache_wback_inv(paddr, size);
71
-
72
- return kvaddr;
73
-}
74
-
75
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
76
- dma_addr_t dma_handle, unsigned long attrs)
77
-{
78
- phys_addr_t paddr = dma_handle;
79
- struct page *page = virt_to_page(paddr);
80
-
81
- if (!(attrs & DMA_ATTR_NON_CONSISTENT))
82
- iounmap((void __force __iomem *)vaddr);
83
-
84
- __free_pages(page, get_order(size));
85
-}
86
-
87
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
88
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
89
- unsigned long attrs)
90
-{
91
- unsigned long user_count = vma_pages(vma);
92
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
93
- unsigned long pfn = __phys_to_pfn(dma_addr);
94
- unsigned long off = vma->vm_pgoff;
95
- int ret = -ENXIO;
96
-
97
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
98
-
99
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
100
- return ret;
101
-
102
- if (off < count && user_count <= (count - off)) {
103
- ret = remap_pfn_range(vma, vma->vm_start,
104
- pfn + off,
105
- user_count << PAGE_SHIFT,
106
- vma->vm_page_prot);
107
- }
108
-
109
- return ret;
30
+ dma_cache_wback_inv(page_to_phys(page), size);
11031 }
11132
11233 /*
....@@ -127,8 +48,8 @@
12748 * upper layer functions (in include/linux/dma-mapping.h)
12849 */
12950
130
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
131
- size_t size, enum dma_data_direction dir)
51
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
52
+ enum dma_data_direction dir)
13253 {
13354 switch (dir) {
13455 case DMA_TO_DEVICE:
....@@ -148,8 +69,8 @@
14869 }
14970 }
15071
151
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
152
- size_t size, enum dma_data_direction dir)
72
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
73
+ enum dma_data_direction dir)
15374 {
15475 switch (dir) {
15576 case DMA_TO_DEVICE:
....@@ -167,7 +88,7 @@
16788 }
16889
16990 /*
170
- * Plug in coherent or noncoherent dma ops
91
+ * Plug in direct dma map ops.
17192 */
17293 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
17394 const struct iommu_ops *iommu, bool coherent)
....@@ -175,13 +96,11 @@
17596 /*
17697 * IOC hardware snoops all DMA traffic keeping the caches consistent
17798 * with memory - eliding need for any explicit cache maintenance of
178
- * DMA buffers - so we can use dma_direct cache ops.
99
+ * DMA buffers.
179100 */
180
- if (is_isa_arcv2() && ioc_enable && coherent) {
181
- set_dma_ops(dev, &dma_direct_ops);
182
- dev_info(dev, "use dma_direct_ops cache ops\n");
183
- } else {
184
- set_dma_ops(dev, &dma_noncoherent_ops);
185
- dev_info(dev, "use dma_noncoherent_ops cache ops\n");
186
- }
101
+ if (is_isa_arcv2() && ioc_enable && coherent)
102
+ dev->dma_coherent = true;
103
+
104
+ dev_info(dev, "use %scoherent DMA ops\n",
105
+ dev->dma_coherent ? "" : "non");
187106 }