forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-02-19 1c055e55a242a33e574e48be530e06770a210dcd
kernel/drivers/dma-buf/heaps/cma_heap.c
....@@ -12,7 +12,7 @@
1212 #include <linux/cma.h>
1313 #include <linux/dma-buf.h>
1414 #include <linux/dma-heap.h>
15
-#include <linux/dma-contiguous.h>
15
+#include <linux/dma-map-ops.h>
1616 #include <linux/err.h>
1717 #include <linux/highmem.h>
1818 #include <linux/io.h>
....@@ -21,7 +21,6 @@
2121 #include <linux/scatterlist.h>
2222 #include <linux/slab.h>
2323 #include <linux/vmalloc.h>
24
-#include <uapi/linux/dma-heap.h>
2524
2625
2726 struct cma_heap {
....@@ -39,8 +38,6 @@
3938 pgoff_t pagecount;
4039 int vmap_cnt;
4140 void *vaddr;
42
-
43
- bool uncached;
4441 };
4542
4643 struct dma_heap_attachment {
....@@ -48,8 +45,6 @@
4845 struct sg_table table;
4946 struct list_head list;
5047 bool mapped;
51
-
52
- bool uncached;
5348 };
5449
5550 static int cma_heap_attach(struct dma_buf *dmabuf,
....@@ -76,7 +71,6 @@
7671 INIT_LIST_HEAD(&a->list);
7772 a->mapped = false;
7873
79
- a->uncached = buffer->uncached;
8074 attachment->priv = a;
8175
8276 mutex_lock(&buffer->lock);
....@@ -108,9 +102,6 @@
108102 int attrs = attachment->dma_map_attrs;
109103 int ret;
110104
111
- if (a->uncached)
112
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
113
-
114105 ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
115106 if (ret)
116107 return ERR_PTR(-ENOMEM);
....@@ -126,93 +117,49 @@
126117 int attrs = attachment->dma_map_attrs;
127118
128119 a->mapped = false;
129
-
130
- if (a->uncached)
131
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
132
-
133120 dma_unmap_sgtable(attachment->dev, table, direction, attrs);
134121 }
135122
136
-static int
137
-cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
138
- enum dma_data_direction direction,
139
- unsigned int offset,
140
- unsigned int len)
123
+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
124
+ enum dma_data_direction direction)
141125 {
142126 struct cma_heap_buffer *buffer = dmabuf->priv;
143127 struct dma_heap_attachment *a;
144128
129
+ mutex_lock(&buffer->lock);
130
+
145131 if (buffer->vmap_cnt)
146132 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
147133
148
- if (buffer->uncached)
149
- return 0;
150
-
151
- mutex_lock(&buffer->lock);
152134 list_for_each_entry(a, &buffer->attachments, list) {
153135 if (!a->mapped)
154136 continue;
155137 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
156138 }
157
- if (list_empty(&buffer->attachments)) {
158
- phys_addr_t phys = page_to_phys(buffer->cma_pages);
159
-
160
- dma_sync_single_for_cpu(dma_heap_get_dev(buffer->heap->heap),
161
- phys + offset,
162
- len,
163
- direction);
164
- }
165139 mutex_unlock(&buffer->lock);
166140
167141 return 0;
168142 }
169143
170
-static int
171
-cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
172
- enum dma_data_direction direction,
173
- unsigned int offset,
174
- unsigned int len)
144
+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
145
+ enum dma_data_direction direction)
175146 {
176147 struct cma_heap_buffer *buffer = dmabuf->priv;
177148 struct dma_heap_attachment *a;
178149
150
+ mutex_lock(&buffer->lock);
151
+
179152 if (buffer->vmap_cnt)
180153 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
181154
182
- if (buffer->uncached)
183
- return 0;
184
-
185
- mutex_lock(&buffer->lock);
186155 list_for_each_entry(a, &buffer->attachments, list) {
187156 if (!a->mapped)
188157 continue;
189158 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
190159 }
191
- if (list_empty(&buffer->attachments)) {
192
- phys_addr_t phys = page_to_phys(buffer->cma_pages);
193
-
194
- dma_sync_single_for_device(dma_heap_get_dev(buffer->heap->heap),
195
- phys + offset,
196
- len,
197
- direction);
198
- }
199160 mutex_unlock(&buffer->lock);
200161
201162 return 0;
202
-}
203
-
204
-static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
205
- enum dma_data_direction dir)
206
-{
207
- return cma_heap_dma_buf_begin_cpu_access_partial(dmabuf, dir, 0,
208
- dmabuf->size);
209
-}
210
-
211
-static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
212
- enum dma_data_direction dir)
213
-{
214
- return cma_heap_dma_buf_end_cpu_access_partial(dmabuf, dir, 0,
215
- dmabuf->size);
216163 }
217164
218165 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
....@@ -240,9 +187,6 @@
240187 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
241188 return -EINVAL;
242189
243
- if (buffer->uncached)
244
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
245
-
246190 vma->vm_ops = &dma_heap_vm_ops;
247191 vma->vm_private_data = buffer;
248192
....@@ -252,12 +196,8 @@
252196 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
253197 {
254198 void *vaddr;
255
- pgprot_t pgprot = PAGE_KERNEL;
256199
257
- if (buffer->uncached)
258
- pgprot = pgprot_writecombine(PAGE_KERNEL);
259
-
260
- vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
200
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
261201 if (!vaddr)
262202 return ERR_PTR(-ENOMEM);
263203
....@@ -324,18 +264,16 @@
324264 .unmap_dma_buf = cma_heap_unmap_dma_buf,
325265 .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
326266 .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
327
- .begin_cpu_access_partial = cma_heap_dma_buf_begin_cpu_access_partial,
328
- .end_cpu_access_partial = cma_heap_dma_buf_end_cpu_access_partial,
329267 .mmap = cma_heap_mmap,
330268 .vmap = cma_heap_vmap,
331269 .vunmap = cma_heap_vunmap,
332270 .release = cma_heap_dma_buf_release,
333271 };
334272
335
-static struct dma_buf *cma_heap_do_allocate(struct dma_heap *heap,
273
+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
336274 unsigned long len,
337275 unsigned long fd_flags,
338
- unsigned long heap_flags, bool uncached)
276
+ unsigned long heap_flags)
339277 {
340278 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
341279 struct cma_heap_buffer *buffer;
....@@ -347,13 +285,10 @@
347285 struct dma_buf *dmabuf;
348286 int ret = -ENOMEM;
349287 pgoff_t pg;
350
- dma_addr_t dma;
351288
352289 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
353290 if (!buffer)
354291 return ERR_PTR(-ENOMEM);
355
-
356
- buffer->uncached = uncached;
357292
358293 INIT_LIST_HEAD(&buffer->attachments);
359294 mutex_init(&buffer->lock);
....@@ -414,13 +349,6 @@
414349 goto free_pages;
415350 }
416351
417
- if (buffer->uncached) {
418
- dma = dma_map_page(dma_heap_get_dev(heap), buffer->cma_pages, 0,
419
- buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE);
420
- dma_unmap_page(dma_heap_get_dev(heap), dma,
421
- buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE);
422
- }
423
-
424352 return dmabuf;
425353
426354 free_pages:
....@@ -433,107 +361,14 @@
433361 return ERR_PTR(ret);
434362 }
435363
436
-static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
437
- unsigned long len,
438
- unsigned long fd_flags,
439
- unsigned long heap_flags)
440
-{
441
- return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
442
-}
443
-
444
-#if IS_ENABLED(CONFIG_NO_GKI)
445
-static int cma_heap_get_phys(struct dma_heap *heap,
446
- struct dma_heap_phys_data *phys)
447
-{
448
- struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
449
- struct cma_heap_buffer *buffer;
450
- struct dma_buf *dmabuf;
451
-
452
- phys->paddr = (__u64)-1;
453
-
454
- if (IS_ERR_OR_NULL(phys))
455
- return -EINVAL;
456
-
457
- dmabuf = dma_buf_get(phys->fd);
458
- if (IS_ERR_OR_NULL(dmabuf))
459
- return -EBADFD;
460
-
461
- buffer = dmabuf->priv;
462
- if (IS_ERR_OR_NULL(buffer))
463
- goto err;
464
-
465
- if (buffer->heap != cma_heap)
466
- goto err;
467
-
468
- phys->paddr = page_to_phys(buffer->cma_pages);
469
-
470
-err:
471
- dma_buf_put(dmabuf);
472
-
473
- return (phys->paddr == (__u64)-1) ? -EINVAL : 0;
474
-}
475
-#endif
476
-
477364 static const struct dma_heap_ops cma_heap_ops = {
478365 .allocate = cma_heap_allocate,
479
-#if IS_ENABLED(CONFIG_NO_GKI)
480
- .get_phys = cma_heap_get_phys,
481
-#endif
482366 };
483
-
484
-static struct dma_buf *cma_uncached_heap_allocate(struct dma_heap *heap,
485
- unsigned long len,
486
- unsigned long fd_flags,
487
- unsigned long heap_flags)
488
-{
489
- return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
490
-}
491
-
492
-static struct dma_buf *cma_uncached_heap_not_initialized(struct dma_heap *heap,
493
- unsigned long len,
494
- unsigned long fd_flags,
495
- unsigned long heap_flags)
496
-{
497
- pr_info("heap %s not initialized\n", dma_heap_get_name(heap));
498
- return ERR_PTR(-EBUSY);
499
-}
500
-
501
-static struct dma_heap_ops cma_uncached_heap_ops = {
502
- .allocate = cma_uncached_heap_not_initialized,
503
-};
504
-
505
-static int set_heap_dev_dma(struct device *heap_dev)
506
-{
507
- int err = 0;
508
-
509
- if (!heap_dev)
510
- return -EINVAL;
511
-
512
- dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
513
-
514
- if (!heap_dev->dma_parms) {
515
- heap_dev->dma_parms = devm_kzalloc(heap_dev,
516
- sizeof(*heap_dev->dma_parms),
517
- GFP_KERNEL);
518
- if (!heap_dev->dma_parms)
519
- return -ENOMEM;
520
-
521
- err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
522
- if (err) {
523
- devm_kfree(heap_dev, heap_dev->dma_parms);
524
- dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
525
- return err;
526
- }
527
- }
528
-
529
- return 0;
530
-}
531367
532368 static int __add_cma_heap(struct cma *cma, void *data)
533369 {
534
- struct cma_heap *cma_heap, *cma_uncached_heap;
370
+ struct cma_heap *cma_heap;
535371 struct dma_heap_export_info exp_info;
536
- int ret;
537372
538373 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
539374 if (!cma_heap)
....@@ -546,47 +381,13 @@
546381
547382 cma_heap->heap = dma_heap_add(&exp_info);
548383 if (IS_ERR(cma_heap->heap)) {
549
- ret = PTR_ERR(cma_heap->heap);
550
- goto free_cma_heap;
384
+ int ret = PTR_ERR(cma_heap->heap);
385
+
386
+ kfree(cma_heap);
387
+ return ret;
551388 }
552
-
553
- cma_uncached_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
554
- if (!cma_uncached_heap) {
555
- ret = -ENOMEM;
556
- goto put_cma_heap;
557
- }
558
-
559
- cma_uncached_heap->cma = cma;
560
-
561
- exp_info.name = "cma-uncached";
562
- exp_info.ops = &cma_uncached_heap_ops;
563
- exp_info.priv = cma_uncached_heap;
564
-
565
- cma_uncached_heap->heap = dma_heap_add(&exp_info);
566
- if (IS_ERR(cma_uncached_heap->heap)) {
567
- ret = PTR_ERR(cma_uncached_heap->heap);
568
- goto free_uncached_cma_heap;
569
- }
570
-
571
- ret = set_heap_dev_dma(dma_heap_get_dev(cma_uncached_heap->heap));
572
- if (ret)
573
- goto put_uncached_cma_heap;
574
-
575
- mb(); /* make sure we only set allocate after dma_mask is set */
576
- cma_uncached_heap_ops.allocate = cma_uncached_heap_allocate;
577389
578390 return 0;
579
-
580
-put_uncached_cma_heap:
581
- dma_heap_put(cma_uncached_heap->heap);
582
-free_uncached_cma_heap:
583
- kfree(cma_uncached_heap);
584
-put_cma_heap:
585
- dma_heap_put(cma_heap->heap);
586
-free_cma_heap:
587
- kfree(cma_heap);
588
-
589
- return ret;
590391 }
591392
592393 static int add_default_cma_heap(void)