forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/sh/kernel/dma-coherent.c
....@@ -1,69 +1,19 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Copyright (C) 2004 - 2007 Paul Mundt
3
- *
4
- * This file is subject to the terms and conditions of the GNU General Public
5
- * License. See the file "COPYING" in the main directory of this archive
6
- * for more details.
74 */
85 #include <linux/mm.h>
9
-#include <linux/init.h>
10
-#include <linux/dma-noncoherent.h>
11
-#include <linux/module.h>
6
+#include <linux/dma-map-ops.h>
127 #include <asm/cacheflush.h>
138 #include <asm/addrspace.h>
149
15
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
16
- gfp_t gfp, unsigned long attrs)
10
+void arch_dma_prep_coherent(struct page *page, size_t size)
1711 {
18
- void *ret, *ret_nocache;
19
- int order = get_order(size);
20
-
21
- gfp |= __GFP_ZERO;
22
-
23
- ret = (void *)__get_free_pages(gfp, order);
24
- if (!ret)
25
- return NULL;
26
-
27
- /*
28
- * Pages from the page allocator may have data present in
29
- * cache. So flush the cache before using uncached memory.
30
- */
31
- arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
32
- DMA_BIDIRECTIONAL);
33
-
34
- ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
35
- if (!ret_nocache) {
36
- free_pages((unsigned long)ret, order);
37
- return NULL;
38
- }
39
-
40
- split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
41
-
42
- *dma_handle = virt_to_phys(ret);
43
- if (!WARN_ON(!dev))
44
- *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
45
-
46
- return ret_nocache;
12
+ __flush_purge_region(page_address(page), size);
4713 }
4814
49
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
50
- dma_addr_t dma_handle, unsigned long attrs)
51
-{
52
- int order = get_order(size);
53
- unsigned long pfn = (dma_handle >> PAGE_SHIFT);
54
- int k;
55
-
56
- if (!WARN_ON(!dev))
57
- pfn += dev->dma_pfn_offset;
58
-
59
- for (k = 0; k < (1 << order); k++)
60
- __free_pages(pfn_to_page(pfn + k), 0);
61
-
62
- iounmap(vaddr);
63
-}
64
-
65
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
66
- size_t size, enum dma_data_direction dir)
15
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
16
+ enum dma_data_direction dir)
6717 {
6818 void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
6919