forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/arch/microblaze/kernel/dma.c
....@@ -8,14 +8,13 @@
88 */
99
1010 #include <linux/device.h>
11
-#include <linux/dma-noncoherent.h>
11
+#include <linux/dma-map-ops.h>
1212 #include <linux/gfp.h>
13
-#include <linux/dma-debug.h>
1413 #include <linux/export.h>
1514 #include <linux/bug.h>
1615 #include <asm/cacheflush.h>
1716
18
-static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
17
+static void __dma_sync(phys_addr_t paddr, size_t size,
1918 enum dma_data_direction direction)
2019 {
2120 switch (direction) {
....@@ -31,36 +30,14 @@
3130 }
3231 }
3332
34
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
35
- size_t size, enum dma_data_direction dir)
33
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
34
+ enum dma_data_direction dir)
3635 {
37
- __dma_sync(dev, paddr, size, dir);
36
+ __dma_sync(paddr, size, dir);
3837 }
3938
40
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
41
- size_t size, enum dma_data_direction dir)
39
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
40
+ enum dma_data_direction dir)
4241 {
43
- __dma_sync(dev, paddr, size, dir);
44
-}
45
-
46
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
47
- void *cpu_addr, dma_addr_t handle, size_t size,
48
- unsigned long attrs)
49
-{
50
-#ifdef CONFIG_MMU
51
- unsigned long user_count = vma_pages(vma);
52
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
53
- unsigned long off = vma->vm_pgoff;
54
- unsigned long pfn;
55
-
56
- if (off >= count || user_count > (count - off))
57
- return -ENXIO;
58
-
59
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
60
- pfn = consistent_virt_to_pfn(cpu_addr);
61
- return remap_pfn_range(vma, vma->vm_start, pfn + off,
62
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
63
-#else
64
- return -ENXIO;
65
-#endif
42
+ __dma_sync(paddr, size, dir);
6643 }