hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/nios2/mm/dma-mapping.c
....@@ -18,8 +18,8 @@
1818 #include <linux/cache.h>
1919 #include <asm/cacheflush.h>
2020
21
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
22
- size_t size, enum dma_data_direction dir)
21
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
22
+ enum dma_data_direction dir)
2323 {
2424 void *vaddr = phys_to_virt(paddr);
2525
....@@ -42,8 +42,8 @@
4242 }
4343 }
4444
45
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
46
- size_t size, enum dma_data_direction dir)
45
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
46
+ enum dma_data_direction dir)
4747 {
4848 void *vaddr = phys_to_virt(paddr);
4949
....@@ -60,32 +60,18 @@
6060 }
6161 }
6262
63
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
64
- gfp_t gfp, unsigned long attrs)
63
+void arch_dma_prep_coherent(struct page *page, size_t size)
6564 {
66
- void *ret;
65
+ unsigned long start = (unsigned long)page_address(page);
6766
68
- /* optimized page clearing */
69
- gfp |= __GFP_ZERO;
70
-
71
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
72
- gfp |= GFP_DMA;
73
-
74
- ret = (void *) __get_free_pages(gfp, get_order(size));
75
- if (ret != NULL) {
76
- *dma_handle = virt_to_phys(ret);
77
- flush_dcache_range((unsigned long) ret,
78
- (unsigned long) ret + size);
79
- ret = UNCAC_ADDR(ret);
80
- }
81
-
82
- return ret;
67
+ flush_dcache_range(start, start + size);
8368 }
8469
85
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
86
- dma_addr_t dma_handle, unsigned long attrs)
70
+void *arch_dma_set_uncached(void *ptr, size_t size)
8771 {
88
- unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
72
+ unsigned long addr = (unsigned long)ptr;
8973
90
- free_pages(addr, get_order(size));
74
+ addr |= CONFIG_NIOS2_IO_REGION_BASE;
75
+
76
+ return (void *)ptr;
9177 }