forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/mips/mm/dma-noncoherent.c
....@@ -5,34 +5,13 @@
55 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
66 */
77 #include <linux/dma-direct.h>
8
-#include <linux/dma-noncoherent.h>
9
-#include <linux/dma-contiguous.h>
8
+#include <linux/dma-map-ops.h>
109 #include <linux/highmem.h>
1110
1211 #include <asm/cache.h>
1312 #include <asm/cpu-type.h>
1413 #include <asm/dma-coherence.h>
1514 #include <asm/io.h>
16
-
17
-#ifdef CONFIG_DMA_PERDEV_COHERENT
18
-static inline int dev_is_coherent(struct device *dev)
19
-{
20
- return dev->archdata.dma_coherent;
21
-}
22
-#else
23
-static inline int dev_is_coherent(struct device *dev)
24
-{
25
- switch (coherentio) {
26
- default:
27
- case IO_COHERENCE_DEFAULT:
28
- return hw_coherentio;
29
- case IO_COHERENCE_ENABLED:
30
- return 1;
31
- case IO_COHERENCE_DISABLED:
32
- return 0;
33
- }
34
-}
35
-#endif /* CONFIG_DMA_PERDEV_COHERENT */
3615
3716 /*
3817 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
....@@ -47,15 +26,13 @@
4726 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
4827 * SGI IP32 aka O2.
4928 */
50
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
29
+static inline bool cpu_needs_post_dma_flush(void)
5130 {
52
- if (dev_is_coherent(dev))
53
- return false;
54
-
5531 switch (boot_cpu_type()) {
5632 case CPU_R10000:
5733 case CPU_R12000:
5834 case CPU_BMIPS5000:
35
+ case CPU_LOONGSON2EF:
5936 return true;
6037 default:
6138 /*
....@@ -67,81 +44,44 @@
6744 }
6845 }
6946
70
-void *arch_dma_alloc(struct device *dev, size_t size,
71
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
47
+void arch_dma_prep_coherent(struct page *page, size_t size)
7248 {
73
- void *ret;
74
-
75
- ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
76
- if (!ret)
77
- return NULL;
78
-
79
- if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
80
- dma_cache_wback_inv((unsigned long) ret, size);
81
- ret = (void *)UNCAC_ADDR(ret);
82
- }
83
-
84
- return ret;
49
+ dma_cache_wback_inv((unsigned long)page_address(page), size);
8550 }
8651
87
-void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
88
- dma_addr_t dma_addr, unsigned long attrs)
52
+void *arch_dma_set_uncached(void *addr, size_t size)
8953 {
90
- if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev))
91
- cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr);
92
- dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
54
+ return (void *)(__pa(addr) + UNCAC_BASE);
9355 }
9456
95
-int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
96
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
97
- unsigned long attrs)
98
-{
99
- unsigned long user_count = vma_pages(vma);
100
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
101
- unsigned long addr = (unsigned long)cpu_addr;
102
- unsigned long off = vma->vm_pgoff;
103
- unsigned long pfn;
104
- int ret = -ENXIO;
105
-
106
- if (!dev_is_coherent(dev))
107
- addr = CAC_ADDR(addr);
108
-
109
- pfn = page_to_pfn(virt_to_page((void *)addr));
110
-
111
- if (attrs & DMA_ATTR_WRITE_COMBINE)
112
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
113
- else
114
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
115
-
116
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
117
- return ret;
118
-
119
- if (off < count && user_count <= (count - off)) {
120
- ret = remap_pfn_range(vma, vma->vm_start,
121
- pfn + off,
122
- user_count << PAGE_SHIFT,
123
- vma->vm_page_prot);
124
- }
125
-
126
- return ret;
127
-}
128
-
129
-static inline void dma_sync_virt(void *addr, size_t size,
57
+static inline void dma_sync_virt_for_device(void *addr, size_t size,
13058 enum dma_data_direction dir)
13159 {
13260 switch (dir) {
13361 case DMA_TO_DEVICE:
13462 dma_cache_wback((unsigned long)addr, size);
13563 break;
136
-
13764 case DMA_FROM_DEVICE:
13865 dma_cache_inv((unsigned long)addr, size);
13966 break;
140
-
14167 case DMA_BIDIRECTIONAL:
14268 dma_cache_wback_inv((unsigned long)addr, size);
14369 break;
70
+ default:
71
+ BUG();
72
+ }
73
+}
14474
75
+static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
76
+ enum dma_data_direction dir)
77
+{
78
+ switch (dir) {
79
+ case DMA_TO_DEVICE:
80
+ break;
81
+ case DMA_FROM_DEVICE:
82
+ case DMA_BIDIRECTIONAL:
83
+ dma_cache_inv((unsigned long)addr, size);
84
+ break;
14585 default:
14686 BUG();
14787 }
....@@ -153,7 +93,7 @@
15393 * configured then the bulk of this loop gets optimized out.
15494 */
15595 static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
156
- enum dma_data_direction dir)
96
+ enum dma_data_direction dir, bool for_device)
15797 {
15898 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
15999 unsigned long offset = paddr & ~PAGE_MASK;
....@@ -161,48 +101,45 @@
161101
162102 do {
163103 size_t len = left;
104
+ void *addr;
164105
165106 if (PageHighMem(page)) {
166
- void *addr;
167
-
168
- if (offset + len > PAGE_SIZE) {
169
- if (offset >= PAGE_SIZE) {
170
- page += offset >> PAGE_SHIFT;
171
- offset &= ~PAGE_MASK;
172
- }
107
+ if (offset + len > PAGE_SIZE)
173108 len = PAGE_SIZE - offset;
174
- }
109
+ }
175110
176
- addr = kmap_atomic(page);
177
- dma_sync_virt(addr + offset, len, dir);
178
- kunmap_atomic(addr);
179
- } else
180
- dma_sync_virt(page_address(page) + offset, size, dir);
111
+ addr = kmap_atomic(page);
112
+ if (for_device)
113
+ dma_sync_virt_for_device(addr + offset, len, dir);
114
+ else
115
+ dma_sync_virt_for_cpu(addr + offset, len, dir);
116
+ kunmap_atomic(addr);
117
+
181118 offset = 0;
182119 page++;
183120 left -= len;
184121 } while (left);
185122 }
186123
187
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
188
- size_t size, enum dma_data_direction dir)
124
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
125
+ enum dma_data_direction dir)
189126 {
190
- if (!dev_is_coherent(dev))
191
- dma_sync_phys(paddr, size, dir);
127
+ dma_sync_phys(paddr, size, dir, true);
192128 }
193129
194
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
195
- size_t size, enum dma_data_direction dir)
130
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
131
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
132
+ enum dma_data_direction dir)
196133 {
197
- if (cpu_needs_post_dma_flush(dev))
198
- dma_sync_phys(paddr, size, dir);
134
+ if (cpu_needs_post_dma_flush())
135
+ dma_sync_phys(paddr, size, dir, false);
199136 }
137
+#endif
200138
201
-void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
202
- enum dma_data_direction direction)
139
+#ifdef CONFIG_DMA_PERDEV_COHERENT
140
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
141
+ const struct iommu_ops *iommu, bool coherent)
203142 {
204
- BUG_ON(direction == DMA_NONE);
205
-
206
- if (!dev_is_coherent(dev))
207
- dma_sync_virt(vaddr, size, direction);
143
+ dev->dma_coherent = coherent;
208144 }
145
+#endif