.. | .. |
---|
7 | 7 | #include <linux/slab.h> |
---|
8 | 8 | #include <linux/kernel.h> |
---|
9 | 9 | #include <linux/module.h> |
---|
10 | | -#include <linux/dma-mapping.h> |
---|
| 10 | +#include <linux/dma-direct.h> |
---|
| 11 | +#include <linux/dma-map-ops.h> |
---|
11 | 12 | |
---|
12 | 13 | struct dma_coherent_mem { |
---|
13 | 14 | void *virt_base; |
---|
14 | 15 | dma_addr_t device_base; |
---|
15 | 16 | unsigned long pfn_base; |
---|
16 | 17 | int size; |
---|
17 | | - int flags; |
---|
18 | 18 | unsigned long *bitmap; |
---|
19 | 19 | spinlock_t spinlock; |
---|
20 | 20 | bool use_dev_dma_pfn_offset; |
---|
.. | .. |
---|
29 | 29 | return NULL; |
---|
30 | 30 | } |
---|
31 | 31 | |
---|
32 | | -dma_addr_t dma_get_device_base(struct device *dev, |
---|
33 | | - struct dma_coherent_mem *mem) |
---|
| 32 | +static inline dma_addr_t dma_get_device_base(struct device *dev, |
---|
| 33 | + struct dma_coherent_mem * mem) |
---|
34 | 34 | { |
---|
35 | 35 | if (mem->use_dev_dma_pfn_offset) |
---|
36 | | - return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; |
---|
37 | | - else |
---|
38 | | - return mem->device_base; |
---|
| 36 | + return phys_to_dma(dev, PFN_PHYS(mem->pfn_base)); |
---|
| 37 | + return mem->device_base; |
---|
39 | 38 | } |
---|
40 | | -EXPORT_SYMBOL_GPL(dma_get_device_base); |
---|
41 | 39 | |
---|
42 | | -unsigned long dma_get_size(struct dma_coherent_mem *mem) |
---|
43 | | -{ |
---|
44 | | - return mem->size << PAGE_SHIFT; |
---|
45 | | -} |
---|
46 | | -EXPORT_SYMBOL_GPL(dma_get_size); |
---|
47 | | - |
---|
48 | | -static int dma_init_coherent_memory( |
---|
49 | | - phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, |
---|
50 | | - struct dma_coherent_mem **mem) |
---|
| 40 | +static int dma_init_coherent_memory(phys_addr_t phys_addr, |
---|
| 41 | + dma_addr_t device_addr, size_t size, |
---|
| 42 | + struct dma_coherent_mem **mem) |
---|
51 | 43 | { |
---|
52 | 44 | struct dma_coherent_mem *dma_mem = NULL; |
---|
53 | | - void __iomem *mem_base = NULL; |
---|
| 45 | + void *mem_base = NULL; |
---|
54 | 46 | int pages = size >> PAGE_SHIFT; |
---|
55 | 47 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); |
---|
56 | 48 | int ret; |
---|
.. | .. |
---|
80 | 72 | dma_mem->device_base = device_addr; |
---|
81 | 73 | dma_mem->pfn_base = PFN_DOWN(phys_addr); |
---|
82 | 74 | dma_mem->size = pages; |
---|
83 | | - dma_mem->flags = flags; |
---|
84 | 75 | spin_lock_init(&dma_mem->spinlock); |
---|
85 | 76 | |
---|
86 | 77 | *mem = dma_mem; |
---|
.. | .. |
---|
93 | 84 | return ret; |
---|
94 | 85 | } |
---|
95 | 86 | |
---|
96 | | -static void dma_release_coherent_memory(struct dma_coherent_mem *mem) |
---|
| 87 | +static void _dma_release_coherent_memory(struct dma_coherent_mem *mem) |
---|
97 | 88 | { |
---|
98 | 89 | if (!mem) |
---|
99 | 90 | return; |
---|
.. | .. |
---|
116 | 107 | return 0; |
---|
117 | 108 | } |
---|
118 | 109 | |
---|
| 110 | +/* |
---|
| 111 | + * Declare a region of memory to be handed out by dma_alloc_coherent() when it |
---|
| 112 | + * is asked for coherent memory for this device. This shall only be used |
---|
| 113 | + * from platform code, usually based on the device tree description. |
---|
| 114 | + * |
---|
| 115 | + * phys_addr is the CPU physical address to which the memory is currently |
---|
| 116 | + * assigned (this will be ioremapped so the CPU can access the region). |
---|
| 117 | + * |
---|
| 118 | + * device_addr is the DMA address the device needs to be programmed with to |
---|
| 119 | + * actually address this memory (this will be handed out as the dma_addr_t in |
---|
| 120 | + * dma_alloc_coherent()). |
---|
| 121 | + * |
---|
| 122 | + * size is the size of the area (must be a multiple of PAGE_SIZE). |
---|
| 123 | + * |
---|
| 124 | + * As a simplification for the platforms, only *one* such region of memory may |
---|
| 125 | + * be declared per device. |
---|
| 126 | + */ |
---|
119 | 127 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
---|
120 | | - dma_addr_t device_addr, size_t size, int flags) |
---|
| 128 | + dma_addr_t device_addr, size_t size) |
---|
121 | 129 | { |
---|
122 | 130 | struct dma_coherent_mem *mem; |
---|
123 | 131 | int ret; |
---|
124 | 132 | |
---|
125 | | - ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); |
---|
| 133 | + ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem); |
---|
126 | 134 | if (ret) |
---|
127 | 135 | return ret; |
---|
128 | 136 | |
---|
129 | 137 | ret = dma_assign_coherent_memory(dev, mem); |
---|
130 | 138 | if (ret) |
---|
131 | | - dma_release_coherent_memory(mem); |
---|
| 139 | + _dma_release_coherent_memory(mem); |
---|
132 | 140 | return ret; |
---|
133 | 141 | } |
---|
134 | | -EXPORT_SYMBOL(dma_declare_coherent_memory); |
---|
135 | 142 | |
---|
136 | | -void dma_release_declared_memory(struct device *dev) |
---|
| 143 | +void dma_release_coherent_memory(struct device *dev) |
---|
137 | 144 | { |
---|
138 | | - struct dma_coherent_mem *mem = dev->dma_mem; |
---|
139 | | - |
---|
140 | | - if (!mem) |
---|
141 | | - return; |
---|
142 | | - dma_release_coherent_memory(mem); |
---|
143 | | - dev->dma_mem = NULL; |
---|
| 145 | + if (dev) |
---|
| 146 | + _dma_release_coherent_memory(dev->dma_mem); |
---|
144 | 147 | } |
---|
145 | | -EXPORT_SYMBOL(dma_release_declared_memory); |
---|
146 | 148 | |
---|
147 | | -void *dma_mark_declared_memory_occupied(struct device *dev, |
---|
148 | | - dma_addr_t device_addr, size_t size) |
---|
149 | | -{ |
---|
150 | | - struct dma_coherent_mem *mem = dev->dma_mem; |
---|
151 | | - unsigned long flags; |
---|
152 | | - int pos, err; |
---|
153 | | - |
---|
154 | | - size += device_addr & ~PAGE_MASK; |
---|
155 | | - |
---|
156 | | - if (!mem) |
---|
157 | | - return ERR_PTR(-EINVAL); |
---|
158 | | - |
---|
159 | | - spin_lock_irqsave(&mem->spinlock, flags); |
---|
160 | | - pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); |
---|
161 | | - err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); |
---|
162 | | - spin_unlock_irqrestore(&mem->spinlock, flags); |
---|
163 | | - |
---|
164 | | - if (err != 0) |
---|
165 | | - return ERR_PTR(err); |
---|
166 | | - return mem->virt_base + (pos << PAGE_SHIFT); |
---|
167 | | -} |
---|
168 | | -EXPORT_SYMBOL(dma_mark_declared_memory_occupied); |
---|
169 | | - |
---|
170 | | -static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, |
---|
171 | | - ssize_t size, dma_addr_t *dma_handle) |
---|
| 149 | +static void *__dma_alloc_from_coherent(struct device *dev, |
---|
| 150 | + struct dma_coherent_mem *mem, |
---|
| 151 | + ssize_t size, dma_addr_t *dma_handle) |
---|
172 | 152 | { |
---|
173 | 153 | int order = get_order(size); |
---|
174 | 154 | unsigned long flags; |
---|
.. | .. |
---|
177 | 157 | |
---|
178 | 158 | spin_lock_irqsave(&mem->spinlock, flags); |
---|
179 | 159 | |
---|
180 | | - if (unlikely(size > (mem->size << PAGE_SHIFT))) |
---|
| 160 | + if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT))) |
---|
181 | 161 | goto err; |
---|
182 | 162 | |
---|
183 | 163 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
---|
.. | .. |
---|
187 | 167 | /* |
---|
188 | 168 | * Memory was found in the coherent area. |
---|
189 | 169 | */ |
---|
190 | | - *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
---|
191 | | - ret = mem->virt_base + (pageno << PAGE_SHIFT); |
---|
| 170 | + *dma_handle = dma_get_device_base(dev, mem) + |
---|
| 171 | + ((dma_addr_t)pageno << PAGE_SHIFT); |
---|
| 172 | + ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT); |
---|
192 | 173 | spin_unlock_irqrestore(&mem->spinlock, flags); |
---|
193 | 174 | memset(ret, 0, size); |
---|
194 | 175 | return ret; |
---|
.. | .. |
---|
219 | 200 | if (!mem) |
---|
220 | 201 | return 0; |
---|
221 | 202 | |
---|
222 | | - *ret = __dma_alloc_from_coherent(mem, size, dma_handle); |
---|
223 | | - if (*ret) |
---|
224 | | - return 1; |
---|
225 | | - |
---|
226 | | - /* |
---|
227 | | - * In the case where the allocation can not be satisfied from the |
---|
228 | | - * per-device area, try to fall back to generic memory if the |
---|
229 | | - * constraints allow it. |
---|
230 | | - */ |
---|
231 | | - return mem->flags & DMA_MEMORY_EXCLUSIVE; |
---|
| 203 | + *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle); |
---|
| 204 | + return 1; |
---|
232 | 205 | } |
---|
233 | | -EXPORT_SYMBOL(dma_alloc_from_dev_coherent); |
---|
234 | 206 | |
---|
235 | | -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) |
---|
| 207 | +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, |
---|
| 208 | + dma_addr_t *dma_handle) |
---|
236 | 209 | { |
---|
237 | 210 | if (!dma_coherent_default_memory) |
---|
238 | 211 | return NULL; |
---|
239 | 212 | |
---|
240 | | - return __dma_alloc_from_coherent(dma_coherent_default_memory, size, |
---|
241 | | - dma_handle); |
---|
| 213 | + return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, |
---|
| 214 | + dma_handle); |
---|
242 | 215 | } |
---|
243 | 216 | |
---|
244 | 217 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, |
---|
245 | 218 | int order, void *vaddr) |
---|
246 | 219 | { |
---|
247 | 220 | if (mem && vaddr >= mem->virt_base && vaddr < |
---|
248 | | - (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
---|
| 221 | + (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
---|
249 | 222 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
---|
250 | 223 | unsigned long flags; |
---|
251 | 224 | |
---|
.. | .. |
---|
275 | 248 | |
---|
276 | 249 | return __dma_release_from_coherent(mem, order, vaddr); |
---|
277 | 250 | } |
---|
278 | | -EXPORT_SYMBOL(dma_release_from_dev_coherent); |
---|
279 | 251 | |
---|
280 | 252 | int dma_release_from_global_coherent(int order, void *vaddr) |
---|
281 | 253 | { |
---|
.. | .. |
---|
290 | 262 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) |
---|
291 | 263 | { |
---|
292 | 264 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
---|
293 | | - (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
---|
| 265 | + (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) { |
---|
294 | 266 | unsigned long off = vma->vm_pgoff; |
---|
295 | 267 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
---|
296 | | - int user_count = vma_pages(vma); |
---|
| 268 | + unsigned long user_count = vma_pages(vma); |
---|
297 | 269 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
---|
298 | 270 | |
---|
299 | 271 | *ret = -ENXIO; |
---|
.. | .. |
---|
330 | 302 | |
---|
331 | 303 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); |
---|
332 | 304 | } |
---|
333 | | -EXPORT_SYMBOL(dma_mmap_from_dev_coherent); |
---|
334 | 305 | |
---|
335 | 306 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, |
---|
336 | 307 | size_t size, int *ret) |
---|
.. | .. |
---|
359 | 330 | |
---|
360 | 331 | if (!mem) { |
---|
361 | 332 | ret = dma_init_coherent_memory(rmem->base, rmem->base, |
---|
362 | | - rmem->size, |
---|
363 | | - DMA_MEMORY_EXCLUSIVE, &mem); |
---|
| 333 | + rmem->size, &mem); |
---|
364 | 334 | if (ret) { |
---|
365 | 335 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", |
---|
366 | 336 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
---|