forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/media/common/videobuf2/videobuf2-dma-contig.c
....@@ -53,10 +53,10 @@
5353 unsigned int i;
5454 unsigned long size = 0;
5555
56
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
56
+ for_each_sgtable_dma_sg(sgt, s, i) {
5757 if (sg_dma_address(s) != expected)
5858 break;
59
- expected = sg_dma_address(s) + sg_dma_len(s);
59
+ expected += sg_dma_len(s);
6060 size += sg_dma_len(s);
6161 }
6262 return size;
....@@ -95,12 +95,10 @@
9595 struct vb2_dc_buf *buf = buf_priv;
9696 struct sg_table *sgt = buf->dma_sgt;
9797
98
- /* DMABUF exporter will flush the cache for us */
99
- if (!sgt || buf->db_attach)
98
+ if (!sgt)
10099 return;
101100
102
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103
- buf->dma_dir);
101
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
104102 }
105103
106104 static void vb2_dc_finish(void *buf_priv)
....@@ -108,11 +106,10 @@
108106 struct vb2_dc_buf *buf = buf_priv;
109107 struct sg_table *sgt = buf->dma_sgt;
110108
111
- /* DMABUF exporter will flush the cache for us */
112
- if (!sgt || buf->db_attach)
109
+ if (!sgt)
113110 return;
114111
115
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
112
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
116113 }
117114
118115 /*********************************************/
....@@ -149,8 +146,7 @@
149146 if (!buf)
150147 return ERR_PTR(-ENOMEM);
151148
152
- if (attrs)
153
- buf->attrs = attrs;
149
+ buf->attrs = attrs;
154150 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155151 GFP_KERNEL | gfp_flags, buf->attrs);
156152 if (!buf->cookie) {
....@@ -185,12 +181,6 @@
185181 printk(KERN_ERR "No buffer to map\n");
186182 return -EINVAL;
187183 }
188
-
189
- /*
190
- * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
191
- * map whole buffer
192
- */
193
- vma->vm_pgoff = 0;
194184
195185 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
196186 buf->dma_addr, buf->size, buf->attrs);
....@@ -273,8 +263,14 @@
273263
274264 /* release the scatterlist cache */
275265 if (attach->dma_dir != DMA_NONE)
276
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
277
- attach->dma_dir);
266
+ /*
267
+ * Cache sync can be skipped here, as the vb2_dc memory is
268
+ * allocated from device coherent memory, which means the
269
+ * memory locations do not require any explicit cache
270
+ * maintenance prior or after being used by the device.
271
+ */
272
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
273
+ DMA_ATTR_SKIP_CPU_SYNC);
278274 sg_free_table(sgt);
279275 kfree(attach);
280276 db_attach->priv = NULL;
....@@ -299,15 +295,17 @@
299295
300296 /* release any previous cache */
301297 if (attach->dma_dir != DMA_NONE) {
302
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
303
- attach->dma_dir);
298
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
299
+ DMA_ATTR_SKIP_CPU_SYNC);
304300 attach->dma_dir = DMA_NONE;
305301 }
306302
307
- /* mapping to the client with new direction */
308
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
309
- dma_dir);
310
- if (!sgt->nents) {
303
+ /*
304
+ * mapping to the client with new direction, no cache sync
305
+ * required see comment in vb2_dc_dmabuf_ops_detach()
306
+ */
307
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
308
+ DMA_ATTR_SKIP_CPU_SYNC)) {
311309 pr_err("failed to map scatterlist\n");
312310 mutex_unlock(lock);
313311 return ERR_PTR(-EIO);
....@@ -332,11 +330,18 @@
332330 vb2_dc_put(dbuf->priv);
333331 }
334332
335
-static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
333
+static int
334
+vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
335
+ enum dma_data_direction direction)
336336 {
337
- struct vb2_dc_buf *buf = dbuf->priv;
337
+ return 0;
338
+}
338339
339
- return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
340
+static int
341
+vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
342
+ enum dma_data_direction direction)
343
+{
344
+ return 0;
340345 }
341346
342347 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
....@@ -357,7 +362,8 @@
357362 .detach = vb2_dc_dmabuf_ops_detach,
358363 .map_dma_buf = vb2_dc_dmabuf_ops_map,
359364 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
360
- .map = vb2_dc_dmabuf_ops_kmap,
365
+ .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
366
+ .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
361367 .vmap = vb2_dc_dmabuf_ops_vmap,
362368 .mmap = vb2_dc_dmabuf_ops_mmap,
363369 .release = vb2_dc_dmabuf_ops_release,
....@@ -428,8 +434,8 @@
428434 * No need to sync to CPU, it's already synced to the CPU
429435 * since the finish() memop will have been called before this.
430436 */
431
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
432
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
437
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
438
+ DMA_ATTR_SKIP_CPU_SYNC);
433439 pages = frame_vector_pages(buf->vec);
434440 /* sgt should exist only if vector contains pages... */
435441 BUG_ON(IS_ERR(pages));
....@@ -439,41 +445,13 @@
439445 set_page_dirty_lock(pages[i]);
440446 sg_free_table(sgt);
441447 kfree(sgt);
448
+ } else {
449
+ dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
450
+ buf->dma_dir, 0);
442451 }
443452 vb2_destroy_framevec(buf->vec);
444453 kfree(buf);
445454 }
446
-
447
-/*
448
- * For some kind of reserved memory there might be no struct page available,
449
- * so all that can be done to support such 'pages' is to try to convert
450
- * pfn to dma address or at the last resort just assume that
451
- * dma address == physical address (like it has been assumed in earlier version
452
- * of videobuf2-dma-contig
453
- */
454
-
455
-#ifdef __arch_pfn_to_dma
456
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
457
-{
458
- return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
459
-}
460
-#elif defined(__pfn_to_bus)
461
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
462
-{
463
- return (dma_addr_t)__pfn_to_bus(pfn);
464
-}
465
-#elif defined(__pfn_to_phys)
466
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
467
-{
468
- return (dma_addr_t)__pfn_to_phys(pfn);
469
-}
470
-#else
471
-static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
472
-{
473
- /* really, we cannot do anything better at this point */
474
- return (dma_addr_t)(pfn) << PAGE_SHIFT;
475
-}
476
-#endif
477455
478456 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
479457 unsigned long size, enum dma_data_direction dma_dir)
....@@ -509,8 +487,7 @@
509487 buf->dma_dir = dma_dir;
510488
511489 offset = lower_32_bits(offset_in_page(vaddr));
512
- vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
513
- dma_dir == DMA_BIDIRECTIONAL);
490
+ vec = vb2_create_framevec(vaddr, size);
514491 if (IS_ERR(vec)) {
515492 ret = PTR_ERR(vec);
516493 goto fail_buf;
....@@ -528,7 +505,12 @@
528505 for (i = 1; i < n_pages; i++)
529506 if (nums[i-1] + 1 != nums[i])
530507 goto fail_pfnvec;
531
- buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
508
+ buf->dma_addr = dma_map_resource(buf->dev,
509
+ __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
510
+ if (dma_mapping_error(buf->dev, buf->dma_addr)) {
511
+ ret = -ENOMEM;
512
+ goto fail_pfnvec;
513
+ }
532514 goto out;
533515 }
534516
....@@ -550,9 +532,8 @@
550532 * No need to sync to the device, this will happen later when the
551533 * prepare() memop is called.
552534 */
553
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
554
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
555
- if (sgt->nents <= 0) {
535
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
536
+ DMA_ATTR_SKIP_CPU_SYNC)) {
556537 pr_err("failed to map scatterlist\n");
557538 ret = -EIO;
558539 goto fail_sgt_init;
....@@ -574,8 +555,7 @@
574555 return buf;
575556
576557 fail_map_sg:
577
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
578
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
558
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
579559
580560 fail_sgt_init:
581561 sg_free_table(sgt);
....@@ -622,8 +602,8 @@
622602 /* checking if dmabuf is big enough to store contiguous chunk */
623603 contig_size = vb2_dc_get_contiguous_size(sgt);
624604 if (contig_size < buf->size) {
625
- pr_err("contiguous chunk is too small %lu/%lu b\n",
626
- contig_size, buf->size);
605
+ pr_err("contiguous chunk is too small %lu/%lu\n",
606
+ contig_size, buf->size);
627607 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
628608 return -EFAULT;
629609 }
....@@ -755,9 +735,8 @@
755735 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
756736 {
757737 if (!dev->dma_parms) {
758
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
759
- if (!dev->dma_parms)
760
- return -ENOMEM;
738
+ dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
739
+ return -ENODEV;
761740 }
762741 if (dma_get_max_seg_size(dev) < size)
763742 return dma_set_max_seg_size(dev, size);
....@@ -765,21 +744,6 @@
765744 return 0;
766745 }
767746 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
768
-
769
-/*
770
- * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
771
- * @dev: device for configuring DMA parameters
772
- *
773
- * This function releases resources allocated to configure DMA parameters
774
- * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
775
- * device drivers on driver remove.
776
- */
777
-void vb2_dma_contig_clear_max_seg_size(struct device *dev)
778
-{
779
- kfree(dev->dma_parms);
780
- dev->dma_parms = NULL;
781
-}
782
-EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
783747
784748 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
785749 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");