.. | .. |
---|
12 | 12 | #include <linux/cma.h> |
---|
13 | 13 | #include <linux/dma-buf.h> |
---|
14 | 14 | #include <linux/dma-heap.h> |
---|
15 | | -#include <linux/dma-contiguous.h> |
---|
| 15 | +#include <linux/dma-map-ops.h> |
---|
16 | 16 | #include <linux/err.h> |
---|
17 | 17 | #include <linux/highmem.h> |
---|
18 | 18 | #include <linux/io.h> |
---|
.. | .. |
---|
21 | 21 | #include <linux/scatterlist.h> |
---|
22 | 22 | #include <linux/slab.h> |
---|
23 | 23 | #include <linux/vmalloc.h> |
---|
24 | | -#include <uapi/linux/dma-heap.h> |
---|
25 | 24 | |
---|
26 | 25 | |
---|
27 | 26 | struct cma_heap { |
---|
.. | .. |
---|
39 | 38 | pgoff_t pagecount; |
---|
40 | 39 | int vmap_cnt; |
---|
41 | 40 | void *vaddr; |
---|
42 | | - |
---|
43 | | - bool uncached; |
---|
44 | 41 | }; |
---|
45 | 42 | |
---|
46 | 43 | struct dma_heap_attachment { |
---|
.. | .. |
---|
48 | 45 | struct sg_table table; |
---|
49 | 46 | struct list_head list; |
---|
50 | 47 | bool mapped; |
---|
51 | | - |
---|
52 | | - bool uncached; |
---|
53 | 48 | }; |
---|
54 | 49 | |
---|
55 | 50 | static int cma_heap_attach(struct dma_buf *dmabuf, |
---|
.. | .. |
---|
76 | 71 | INIT_LIST_HEAD(&a->list); |
---|
77 | 72 | a->mapped = false; |
---|
78 | 73 | |
---|
79 | | - a->uncached = buffer->uncached; |
---|
80 | 74 | attachment->priv = a; |
---|
81 | 75 | |
---|
82 | 76 | mutex_lock(&buffer->lock); |
---|
.. | .. |
---|
108 | 102 | int attrs = attachment->dma_map_attrs; |
---|
109 | 103 | int ret; |
---|
110 | 104 | |
---|
111 | | - if (a->uncached) |
---|
112 | | - attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
---|
113 | | - |
---|
114 | 105 | ret = dma_map_sgtable(attachment->dev, table, direction, attrs); |
---|
115 | 106 | if (ret) |
---|
116 | 107 | return ERR_PTR(-ENOMEM); |
---|
.. | .. |
---|
126 | 117 | int attrs = attachment->dma_map_attrs; |
---|
127 | 118 | |
---|
128 | 119 | a->mapped = false; |
---|
129 | | - |
---|
130 | | - if (a->uncached) |
---|
131 | | - attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
---|
132 | | - |
---|
133 | 120 | dma_unmap_sgtable(attachment->dev, table, direction, attrs); |
---|
134 | 121 | } |
---|
135 | 122 | |
---|
136 | | -static int |
---|
137 | | -cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, |
---|
138 | | - enum dma_data_direction direction, |
---|
139 | | - unsigned int offset, |
---|
140 | | - unsigned int len) |
---|
| 123 | +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
---|
| 124 | + enum dma_data_direction direction) |
---|
141 | 125 | { |
---|
142 | 126 | struct cma_heap_buffer *buffer = dmabuf->priv; |
---|
143 | 127 | struct dma_heap_attachment *a; |
---|
144 | 128 | |
---|
| 129 | + mutex_lock(&buffer->lock); |
---|
| 130 | + |
---|
145 | 131 | if (buffer->vmap_cnt) |
---|
146 | 132 | invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); |
---|
147 | 133 | |
---|
148 | | - if (buffer->uncached) |
---|
149 | | - return 0; |
---|
150 | | - |
---|
151 | | - mutex_lock(&buffer->lock); |
---|
152 | 134 | list_for_each_entry(a, &buffer->attachments, list) { |
---|
153 | 135 | if (!a->mapped) |
---|
154 | 136 | continue; |
---|
155 | 137 | dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); |
---|
156 | 138 | } |
---|
157 | | - if (list_empty(&buffer->attachments)) { |
---|
158 | | - phys_addr_t phys = page_to_phys(buffer->cma_pages); |
---|
159 | | - |
---|
160 | | - dma_sync_single_for_cpu(dma_heap_get_dev(buffer->heap->heap), |
---|
161 | | - phys + offset, |
---|
162 | | - len, |
---|
163 | | - direction); |
---|
164 | | - } |
---|
165 | 139 | mutex_unlock(&buffer->lock); |
---|
166 | 140 | |
---|
167 | 141 | return 0; |
---|
168 | 142 | } |
---|
169 | 143 | |
---|
170 | | -static int |
---|
171 | | -cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, |
---|
172 | | - enum dma_data_direction direction, |
---|
173 | | - unsigned int offset, |
---|
174 | | - unsigned int len) |
---|
| 144 | +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
---|
| 145 | + enum dma_data_direction direction) |
---|
175 | 146 | { |
---|
176 | 147 | struct cma_heap_buffer *buffer = dmabuf->priv; |
---|
177 | 148 | struct dma_heap_attachment *a; |
---|
178 | 149 | |
---|
| 150 | + mutex_lock(&buffer->lock); |
---|
| 151 | + |
---|
179 | 152 | if (buffer->vmap_cnt) |
---|
180 | 153 | flush_kernel_vmap_range(buffer->vaddr, buffer->len); |
---|
181 | 154 | |
---|
182 | | - if (buffer->uncached) |
---|
183 | | - return 0; |
---|
184 | | - |
---|
185 | | - mutex_lock(&buffer->lock); |
---|
186 | 155 | list_for_each_entry(a, &buffer->attachments, list) { |
---|
187 | 156 | if (!a->mapped) |
---|
188 | 157 | continue; |
---|
189 | 158 | dma_sync_sgtable_for_device(a->dev, &a->table, direction); |
---|
190 | 159 | } |
---|
191 | | - if (list_empty(&buffer->attachments)) { |
---|
192 | | - phys_addr_t phys = page_to_phys(buffer->cma_pages); |
---|
193 | | - |
---|
194 | | - dma_sync_single_for_device(dma_heap_get_dev(buffer->heap->heap), |
---|
195 | | - phys + offset, |
---|
196 | | - len, |
---|
197 | | - direction); |
---|
198 | | - } |
---|
199 | 160 | mutex_unlock(&buffer->lock); |
---|
200 | 161 | |
---|
201 | 162 | return 0; |
---|
202 | | -} |
---|
203 | | - |
---|
204 | | -static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
---|
205 | | - enum dma_data_direction dir) |
---|
206 | | -{ |
---|
207 | | - return cma_heap_dma_buf_begin_cpu_access_partial(dmabuf, dir, 0, |
---|
208 | | - dmabuf->size); |
---|
209 | | -} |
---|
210 | | - |
---|
211 | | -static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
---|
212 | | - enum dma_data_direction dir) |
---|
213 | | -{ |
---|
214 | | - return cma_heap_dma_buf_end_cpu_access_partial(dmabuf, dir, 0, |
---|
215 | | - dmabuf->size); |
---|
216 | 163 | } |
---|
217 | 164 | |
---|
218 | 165 | static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) |
---|
.. | .. |
---|
240 | 187 | if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) |
---|
241 | 188 | return -EINVAL; |
---|
242 | 189 | |
---|
243 | | - if (buffer->uncached) |
---|
244 | | - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
---|
245 | | - |
---|
246 | 190 | vma->vm_ops = &dma_heap_vm_ops; |
---|
247 | 191 | vma->vm_private_data = buffer; |
---|
248 | 192 | |
---|
.. | .. |
---|
252 | 196 | static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) |
---|
253 | 197 | { |
---|
254 | 198 | void *vaddr; |
---|
255 | | - pgprot_t pgprot = PAGE_KERNEL; |
---|
256 | 199 | |
---|
257 | | - if (buffer->uncached) |
---|
258 | | - pgprot = pgprot_writecombine(PAGE_KERNEL); |
---|
259 | | - |
---|
260 | | - vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot); |
---|
| 200 | + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); |
---|
261 | 201 | if (!vaddr) |
---|
262 | 202 | return ERR_PTR(-ENOMEM); |
---|
263 | 203 | |
---|
.. | .. |
---|
324 | 264 | .unmap_dma_buf = cma_heap_unmap_dma_buf, |
---|
325 | 265 | .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, |
---|
326 | 266 | .end_cpu_access = cma_heap_dma_buf_end_cpu_access, |
---|
327 | | - .begin_cpu_access_partial = cma_heap_dma_buf_begin_cpu_access_partial, |
---|
328 | | - .end_cpu_access_partial = cma_heap_dma_buf_end_cpu_access_partial, |
---|
329 | 267 | .mmap = cma_heap_mmap, |
---|
330 | 268 | .vmap = cma_heap_vmap, |
---|
331 | 269 | .vunmap = cma_heap_vunmap, |
---|
332 | 270 | .release = cma_heap_dma_buf_release, |
---|
333 | 271 | }; |
---|
334 | 272 | |
---|
335 | | -static struct dma_buf *cma_heap_do_allocate(struct dma_heap *heap, |
---|
| 273 | +static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, |
---|
336 | 274 | unsigned long len, |
---|
337 | 275 | unsigned long fd_flags, |
---|
338 | | - unsigned long heap_flags, bool uncached) |
---|
| 276 | + unsigned long heap_flags) |
---|
339 | 277 | { |
---|
340 | 278 | struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); |
---|
341 | 279 | struct cma_heap_buffer *buffer; |
---|
.. | .. |
---|
347 | 285 | struct dma_buf *dmabuf; |
---|
348 | 286 | int ret = -ENOMEM; |
---|
349 | 287 | pgoff_t pg; |
---|
350 | | - dma_addr_t dma; |
---|
351 | 288 | |
---|
352 | 289 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
---|
353 | 290 | if (!buffer) |
---|
354 | 291 | return ERR_PTR(-ENOMEM); |
---|
355 | | - |
---|
356 | | - buffer->uncached = uncached; |
---|
357 | 292 | |
---|
358 | 293 | INIT_LIST_HEAD(&buffer->attachments); |
---|
359 | 294 | mutex_init(&buffer->lock); |
---|
.. | .. |
---|
414 | 349 | goto free_pages; |
---|
415 | 350 | } |
---|
416 | 351 | |
---|
417 | | - if (buffer->uncached) { |
---|
418 | | - dma = dma_map_page(dma_heap_get_dev(heap), buffer->cma_pages, 0, |
---|
419 | | - buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE); |
---|
420 | | - dma_unmap_page(dma_heap_get_dev(heap), dma, |
---|
421 | | - buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE); |
---|
422 | | - } |
---|
423 | | - |
---|
424 | 352 | return dmabuf; |
---|
425 | 353 | |
---|
426 | 354 | free_pages: |
---|
.. | .. |
---|
433 | 361 | return ERR_PTR(ret); |
---|
434 | 362 | } |
---|
435 | 363 | |
---|
436 | | -static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, |
---|
437 | | - unsigned long len, |
---|
438 | | - unsigned long fd_flags, |
---|
439 | | - unsigned long heap_flags) |
---|
440 | | -{ |
---|
441 | | - return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, false); |
---|
442 | | -} |
---|
443 | | - |
---|
444 | | -#if IS_ENABLED(CONFIG_NO_GKI) |
---|
445 | | -static int cma_heap_get_phys(struct dma_heap *heap, |
---|
446 | | - struct dma_heap_phys_data *phys) |
---|
447 | | -{ |
---|
448 | | - struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); |
---|
449 | | - struct cma_heap_buffer *buffer; |
---|
450 | | - struct dma_buf *dmabuf; |
---|
451 | | - |
---|
452 | | - phys->paddr = (__u64)-1; |
---|
453 | | - |
---|
454 | | - if (IS_ERR_OR_NULL(phys)) |
---|
455 | | - return -EINVAL; |
---|
456 | | - |
---|
457 | | - dmabuf = dma_buf_get(phys->fd); |
---|
458 | | - if (IS_ERR_OR_NULL(dmabuf)) |
---|
459 | | - return -EBADFD; |
---|
460 | | - |
---|
461 | | - buffer = dmabuf->priv; |
---|
462 | | - if (IS_ERR_OR_NULL(buffer)) |
---|
463 | | - goto err; |
---|
464 | | - |
---|
465 | | - if (buffer->heap != cma_heap) |
---|
466 | | - goto err; |
---|
467 | | - |
---|
468 | | - phys->paddr = page_to_phys(buffer->cma_pages); |
---|
469 | | - |
---|
470 | | -err: |
---|
471 | | - dma_buf_put(dmabuf); |
---|
472 | | - |
---|
473 | | - return (phys->paddr == (__u64)-1) ? -EINVAL : 0; |
---|
474 | | -} |
---|
475 | | -#endif |
---|
476 | | - |
---|
477 | 364 | static const struct dma_heap_ops cma_heap_ops = { |
---|
478 | 365 | .allocate = cma_heap_allocate, |
---|
479 | | -#if IS_ENABLED(CONFIG_NO_GKI) |
---|
480 | | - .get_phys = cma_heap_get_phys, |
---|
481 | | -#endif |
---|
482 | 366 | }; |
---|
483 | | - |
---|
484 | | -static struct dma_buf *cma_uncached_heap_allocate(struct dma_heap *heap, |
---|
485 | | - unsigned long len, |
---|
486 | | - unsigned long fd_flags, |
---|
487 | | - unsigned long heap_flags) |
---|
488 | | -{ |
---|
489 | | - return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, true); |
---|
490 | | -} |
---|
491 | | - |
---|
492 | | -static struct dma_buf *cma_uncached_heap_not_initialized(struct dma_heap *heap, |
---|
493 | | - unsigned long len, |
---|
494 | | - unsigned long fd_flags, |
---|
495 | | - unsigned long heap_flags) |
---|
496 | | -{ |
---|
497 | | - pr_info("heap %s not initialized\n", dma_heap_get_name(heap)); |
---|
498 | | - return ERR_PTR(-EBUSY); |
---|
499 | | -} |
---|
500 | | - |
---|
501 | | -static struct dma_heap_ops cma_uncached_heap_ops = { |
---|
502 | | - .allocate = cma_uncached_heap_not_initialized, |
---|
503 | | -}; |
---|
504 | | - |
---|
505 | | -static int set_heap_dev_dma(struct device *heap_dev) |
---|
506 | | -{ |
---|
507 | | - int err = 0; |
---|
508 | | - |
---|
509 | | - if (!heap_dev) |
---|
510 | | - return -EINVAL; |
---|
511 | | - |
---|
512 | | - dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64)); |
---|
513 | | - |
---|
514 | | - if (!heap_dev->dma_parms) { |
---|
515 | | - heap_dev->dma_parms = devm_kzalloc(heap_dev, |
---|
516 | | - sizeof(*heap_dev->dma_parms), |
---|
517 | | - GFP_KERNEL); |
---|
518 | | - if (!heap_dev->dma_parms) |
---|
519 | | - return -ENOMEM; |
---|
520 | | - |
---|
521 | | - err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64)); |
---|
522 | | - if (err) { |
---|
523 | | - devm_kfree(heap_dev, heap_dev->dma_parms); |
---|
524 | | - dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err); |
---|
525 | | - return err; |
---|
526 | | - } |
---|
527 | | - } |
---|
528 | | - |
---|
529 | | - return 0; |
---|
530 | | -} |
---|
531 | 367 | |
---|
532 | 368 | static int __add_cma_heap(struct cma *cma, void *data) |
---|
533 | 369 | { |
---|
534 | | - struct cma_heap *cma_heap, *cma_uncached_heap; |
---|
| 370 | + struct cma_heap *cma_heap; |
---|
535 | 371 | struct dma_heap_export_info exp_info; |
---|
536 | | - int ret; |
---|
537 | 372 | |
---|
538 | 373 | cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); |
---|
539 | 374 | if (!cma_heap) |
---|
.. | .. |
---|
546 | 381 | |
---|
547 | 382 | cma_heap->heap = dma_heap_add(&exp_info); |
---|
548 | 383 | if (IS_ERR(cma_heap->heap)) { |
---|
549 | | - ret = PTR_ERR(cma_heap->heap); |
---|
550 | | - goto free_cma_heap; |
---|
| 384 | + int ret = PTR_ERR(cma_heap->heap); |
---|
| 385 | + |
---|
| 386 | + kfree(cma_heap); |
---|
| 387 | + return ret; |
---|
551 | 388 | } |
---|
552 | | - |
---|
553 | | - cma_uncached_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); |
---|
554 | | - if (!cma_uncached_heap) { |
---|
555 | | - ret = -ENOMEM; |
---|
556 | | - goto put_cma_heap; |
---|
557 | | - } |
---|
558 | | - |
---|
559 | | - cma_uncached_heap->cma = cma; |
---|
560 | | - |
---|
561 | | - exp_info.name = "cma-uncached"; |
---|
562 | | - exp_info.ops = &cma_uncached_heap_ops; |
---|
563 | | - exp_info.priv = cma_uncached_heap; |
---|
564 | | - |
---|
565 | | - cma_uncached_heap->heap = dma_heap_add(&exp_info); |
---|
566 | | - if (IS_ERR(cma_uncached_heap->heap)) { |
---|
567 | | - ret = PTR_ERR(cma_uncached_heap->heap); |
---|
568 | | - goto free_uncached_cma_heap; |
---|
569 | | - } |
---|
570 | | - |
---|
571 | | - ret = set_heap_dev_dma(dma_heap_get_dev(cma_uncached_heap->heap)); |
---|
572 | | - if (ret) |
---|
573 | | - goto put_uncached_cma_heap; |
---|
574 | | - |
---|
575 | | - mb(); /* make sure we only set allocate after dma_mask is set */ |
---|
576 | | - cma_uncached_heap_ops.allocate = cma_uncached_heap_allocate; |
---|
577 | 389 | |
---|
578 | 390 | return 0; |
---|
579 | | - |
---|
580 | | -put_uncached_cma_heap: |
---|
581 | | - dma_heap_put(cma_uncached_heap->heap); |
---|
582 | | -free_uncached_cma_heap: |
---|
583 | | - kfree(cma_uncached_heap); |
---|
584 | | -put_cma_heap: |
---|
585 | | - dma_heap_put(cma_heap->heap); |
---|
586 | | -free_cma_heap: |
---|
587 | | - kfree(cma_heap); |
---|
588 | | - |
---|
589 | | - return ret; |
---|
590 | 391 | } |
---|
591 | 392 | |
---|
592 | 393 | static int add_default_cma_heap(void) |
---|