hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/kernel/dma/coherent.c
....@@ -7,14 +7,14 @@
77 #include <linux/slab.h>
88 #include <linux/kernel.h>
99 #include <linux/module.h>
10
-#include <linux/dma-mapping.h>
10
+#include <linux/dma-direct.h>
11
+#include <linux/dma-map-ops.h>
1112
1213 struct dma_coherent_mem {
1314 void *virt_base;
1415 dma_addr_t device_base;
1516 unsigned long pfn_base;
1617 int size;
17
- int flags;
1818 unsigned long *bitmap;
1919 spinlock_t spinlock;
2020 bool use_dev_dma_pfn_offset;
....@@ -29,28 +29,20 @@
2929 return NULL;
3030 }
3131
32
-dma_addr_t dma_get_device_base(struct device *dev,
33
- struct dma_coherent_mem *mem)
32
+static inline dma_addr_t dma_get_device_base(struct device *dev,
33
+ struct dma_coherent_mem * mem)
3434 {
3535 if (mem->use_dev_dma_pfn_offset)
36
- return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
37
- else
38
- return mem->device_base;
36
+ return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
37
+ return mem->device_base;
3938 }
40
-EXPORT_SYMBOL_GPL(dma_get_device_base);
4139
42
-unsigned long dma_get_size(struct dma_coherent_mem *mem)
43
-{
44
- return mem->size << PAGE_SHIFT;
45
-}
46
-EXPORT_SYMBOL_GPL(dma_get_size);
47
-
48
-static int dma_init_coherent_memory(
49
- phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
50
- struct dma_coherent_mem **mem)
40
+static int dma_init_coherent_memory(phys_addr_t phys_addr,
41
+ dma_addr_t device_addr, size_t size,
42
+ struct dma_coherent_mem **mem)
5143 {
5244 struct dma_coherent_mem *dma_mem = NULL;
53
- void __iomem *mem_base = NULL;
45
+ void *mem_base = NULL;
5446 int pages = size >> PAGE_SHIFT;
5547 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
5648 int ret;
....@@ -80,7 +72,6 @@
8072 dma_mem->device_base = device_addr;
8173 dma_mem->pfn_base = PFN_DOWN(phys_addr);
8274 dma_mem->size = pages;
83
- dma_mem->flags = flags;
8475 spin_lock_init(&dma_mem->spinlock);
8576
8677 *mem = dma_mem;
....@@ -93,7 +84,7 @@
9384 return ret;
9485 }
9586
96
-static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
87
+static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
9788 {
9889 if (!mem)
9990 return;
....@@ -116,59 +107,48 @@
116107 return 0;
117108 }
118109
110
+/*
111
+ * Declare a region of memory to be handed out by dma_alloc_coherent() when it
112
+ * is asked for coherent memory for this device. This shall only be used
113
+ * from platform code, usually based on the device tree description.
114
+ *
115
+ * phys_addr is the CPU physical address to which the memory is currently
116
+ * assigned (this will be ioremapped so the CPU can access the region).
117
+ *
118
+ * device_addr is the DMA address the device needs to be programmed with to
119
+ * actually address this memory (this will be handed out as the dma_addr_t in
120
+ * dma_alloc_coherent()).
121
+ *
122
+ * size is the size of the area (must be a multiple of PAGE_SIZE).
123
+ *
124
+ * As a simplification for the platforms, only *one* such region of memory may
125
+ * be declared per device.
126
+ */
119127 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
120
- dma_addr_t device_addr, size_t size, int flags)
128
+ dma_addr_t device_addr, size_t size)
121129 {
122130 struct dma_coherent_mem *mem;
123131 int ret;
124132
125
- ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
133
+ ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
126134 if (ret)
127135 return ret;
128136
129137 ret = dma_assign_coherent_memory(dev, mem);
130138 if (ret)
131
- dma_release_coherent_memory(mem);
139
+ _dma_release_coherent_memory(mem);
132140 return ret;
133141 }
134
-EXPORT_SYMBOL(dma_declare_coherent_memory);
135142
136
-void dma_release_declared_memory(struct device *dev)
143
+void dma_release_coherent_memory(struct device *dev)
137144 {
138
- struct dma_coherent_mem *mem = dev->dma_mem;
139
-
140
- if (!mem)
141
- return;
142
- dma_release_coherent_memory(mem);
143
- dev->dma_mem = NULL;
145
+ if (dev)
146
+ _dma_release_coherent_memory(dev->dma_mem);
144147 }
145
-EXPORT_SYMBOL(dma_release_declared_memory);
146148
147
-void *dma_mark_declared_memory_occupied(struct device *dev,
148
- dma_addr_t device_addr, size_t size)
149
-{
150
- struct dma_coherent_mem *mem = dev->dma_mem;
151
- unsigned long flags;
152
- int pos, err;
153
-
154
- size += device_addr & ~PAGE_MASK;
155
-
156
- if (!mem)
157
- return ERR_PTR(-EINVAL);
158
-
159
- spin_lock_irqsave(&mem->spinlock, flags);
160
- pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
161
- err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
162
- spin_unlock_irqrestore(&mem->spinlock, flags);
163
-
164
- if (err != 0)
165
- return ERR_PTR(err);
166
- return mem->virt_base + (pos << PAGE_SHIFT);
167
-}
168
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
169
-
170
-static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
171
- ssize_t size, dma_addr_t *dma_handle)
149
+static void *__dma_alloc_from_coherent(struct device *dev,
150
+ struct dma_coherent_mem *mem,
151
+ ssize_t size, dma_addr_t *dma_handle)
172152 {
173153 int order = get_order(size);
174154 unsigned long flags;
....@@ -177,7 +157,7 @@
177157
178158 spin_lock_irqsave(&mem->spinlock, flags);
179159
180
- if (unlikely(size > (mem->size << PAGE_SHIFT)))
160
+ if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
181161 goto err;
182162
183163 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
....@@ -187,8 +167,9 @@
187167 /*
188168 * Memory was found in the coherent area.
189169 */
190
- *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
191
- ret = mem->virt_base + (pageno << PAGE_SHIFT);
170
+ *dma_handle = dma_get_device_base(dev, mem) +
171
+ ((dma_addr_t)pageno << PAGE_SHIFT);
172
+ ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
192173 spin_unlock_irqrestore(&mem->spinlock, flags);
193174 memset(ret, 0, size);
194175 return ret;
....@@ -219,33 +200,25 @@
219200 if (!mem)
220201 return 0;
221202
222
- *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
223
- if (*ret)
224
- return 1;
225
-
226
- /*
227
- * In the case where the allocation can not be satisfied from the
228
- * per-device area, try to fall back to generic memory if the
229
- * constraints allow it.
230
- */
231
- return mem->flags & DMA_MEMORY_EXCLUSIVE;
203
+ *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
204
+ return 1;
232205 }
233
-EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
234206
235
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
207
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
208
+ dma_addr_t *dma_handle)
236209 {
237210 if (!dma_coherent_default_memory)
238211 return NULL;
239212
240
- return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
241
- dma_handle);
213
+ return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
214
+ dma_handle);
242215 }
243216
244217 static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
245218 int order, void *vaddr)
246219 {
247220 if (mem && vaddr >= mem->virt_base && vaddr <
248
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
221
+ (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
249222 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
250223 unsigned long flags;
251224
....@@ -275,7 +248,6 @@
275248
276249 return __dma_release_from_coherent(mem, order, vaddr);
277250 }
278
-EXPORT_SYMBOL(dma_release_from_dev_coherent);
279251
280252 int dma_release_from_global_coherent(int order, void *vaddr)
281253 {
....@@ -290,10 +262,10 @@
290262 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
291263 {
292264 if (mem && vaddr >= mem->virt_base && vaddr + size <=
293
- (mem->virt_base + (mem->size << PAGE_SHIFT))) {
265
+ (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
294266 unsigned long off = vma->vm_pgoff;
295267 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
296
- int user_count = vma_pages(vma);
268
+ unsigned long user_count = vma_pages(vma);
297269 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
298270
299271 *ret = -ENXIO;
....@@ -330,7 +302,6 @@
330302
331303 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
332304 }
333
-EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
334305
335306 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
336307 size_t size, int *ret)
....@@ -359,8 +330,7 @@
359330
360331 if (!mem) {
361332 ret = dma_init_coherent_memory(rmem->base, rmem->base,
362
- rmem->size,
363
- DMA_MEMORY_EXCLUSIVE, &mem);
333
+ rmem->size, &mem);
364334 if (ret) {
365335 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
366336 &rmem->base, (unsigned long)rmem->size / SZ_1M);