forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/media/common/videobuf2/videobuf2-dma-sg.c
....@@ -3,7 +3,7 @@
33 *
44 * Copyright (C) 2010 Samsung Electronics
55 *
6
- * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
6
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
77 *
88 * This program is free software; you can redistribute it and/or modify
99 * it under the terms of the GNU General Public License as published by
....@@ -67,7 +67,7 @@
6767 int i;
6868
6969 order = get_order(size);
70
- /* Dont over allocate*/
70
+ /* Don't over allocate*/
7171 if ((PAGE_SIZE << order) > size)
7272 order--;
7373
....@@ -120,6 +120,11 @@
120120 buf->num_pages = size >> PAGE_SHIFT;
121121 buf->dma_sgt = &buf->sg_table;
122122
123
+ /*
124
+ * NOTE: dma-sg allocates memory using the page allocator directly, so
125
+ * there is no memory consistency guarantee, hence dma-sg ignores DMA
126
+ * attributes passed from the upper layer.
127
+ */
123128 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
124129 GFP_KERNEL | __GFP_ZERO);
125130 if (!buf->pages)
....@@ -142,9 +147,8 @@
142147 * No need to sync to the device, this will happen later when the
143148 * prepare() memop is called.
144149 */
145
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
146
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
147
- if (!sgt->nents)
150
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
151
+ DMA_ATTR_SKIP_CPU_SYNC))
148152 goto fail_map;
149153
150154 buf->handler.refcount = &buf->refcount;
....@@ -180,8 +184,8 @@
180184 if (refcount_dec_and_test(&buf->refcount)) {
181185 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
182186 buf->num_pages);
183
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
184
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
187
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
188
+ DMA_ATTR_SKIP_CPU_SYNC);
185189 if (buf->vaddr)
186190 vm_unmap_ram(buf->vaddr, buf->num_pages);
187191 sg_free_table(buf->dma_sgt);
....@@ -198,12 +202,7 @@
198202 struct vb2_dma_sg_buf *buf = buf_priv;
199203 struct sg_table *sgt = buf->dma_sgt;
200204
201
- /* DMABUF exporter will flush the cache for us */
202
- if (buf->db_attach)
203
- return;
204
-
205
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
206
- buf->dma_dir);
205
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
207206 }
208207
209208 static void vb2_dma_sg_finish(void *buf_priv)
....@@ -211,11 +210,7 @@
211210 struct vb2_dma_sg_buf *buf = buf_priv;
212211 struct sg_table *sgt = buf->dma_sgt;
213212
214
- /* DMABUF exporter will flush the cache for us */
215
- if (buf->db_attach)
216
- return;
217
-
218
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
213
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
219214 }
220215
221216 static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
....@@ -239,8 +234,7 @@
239234 buf->offset = vaddr & ~PAGE_MASK;
240235 buf->size = size;
241236 buf->dma_sgt = &buf->sg_table;
242
- vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
243
- dma_dir == DMA_BIDIRECTIONAL);
237
+ vec = vb2_create_framevec(vaddr, size);
244238 if (IS_ERR(vec))
245239 goto userptr_fail_pfnvec;
246240 buf->vec = vec;
....@@ -259,9 +253,8 @@
259253 * No need to sync to the device, this will happen later when the
260254 * prepare() memop is called.
261255 */
262
- sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
263
- buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
264
- if (!sgt->nents)
256
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
257
+ DMA_ATTR_SKIP_CPU_SYNC))
265258 goto userptr_fail_map;
266259
267260 return buf;
....@@ -287,8 +280,7 @@
287280
288281 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
289282 __func__, buf->num_pages);
290
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
291
- DMA_ATTR_SKIP_CPU_SYNC);
283
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
292284 if (buf->vaddr)
293285 vm_unmap_ram(buf->vaddr, buf->num_pages);
294286 sg_free_table(buf->dma_sgt);
....@@ -310,8 +302,7 @@
310302 if (buf->db_attach)
311303 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
312304 else
313
- buf->vaddr = vm_map_ram(buf->pages,
314
- buf->num_pages, -1, PAGE_KERNEL);
305
+ buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
315306 }
316307
317308 /* add offset in case userptr is not page-aligned */
....@@ -328,28 +319,18 @@
328319 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
329320 {
330321 struct vb2_dma_sg_buf *buf = buf_priv;
331
- unsigned long uaddr = vma->vm_start;
332
- unsigned long usize = vma->vm_end - vma->vm_start;
333
- int i = 0;
322
+ int err;
334323
335324 if (!buf) {
336325 printk(KERN_ERR "No memory to map\n");
337326 return -EINVAL;
338327 }
339328
340
- do {
341
- int ret;
342
-
343
- ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
344
- if (ret) {
345
- printk(KERN_ERR "Remapping memory, error: %d\n", ret);
346
- return ret;
347
- }
348
-
349
- uaddr += PAGE_SIZE;
350
- usize -= PAGE_SIZE;
351
- } while (usize > 0);
352
-
329
+ err = vm_map_pages(vma, buf->pages, buf->num_pages);
330
+ if (err) {
331
+ printk(KERN_ERR "Remapping memory, error: %d\n", err);
332
+ return err;
333
+ }
353334
354335 /*
355336 * Use common vm_area operations to track buffer refcount.
....@@ -422,8 +403,7 @@
422403
423404 /* release the scatterlist cache */
424405 if (attach->dma_dir != DMA_NONE)
425
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
426
- attach->dma_dir);
406
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
427407 sg_free_table(sgt);
428408 kfree(attach);
429409 db_attach->priv = NULL;
....@@ -448,15 +428,12 @@
448428
449429 /* release any previous cache */
450430 if (attach->dma_dir != DMA_NONE) {
451
- dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
452
- attach->dma_dir);
431
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
453432 attach->dma_dir = DMA_NONE;
454433 }
455434
456435 /* mapping to the client with new direction */
457
- sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
458
- dma_dir);
459
- if (!sgt->nents) {
436
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
460437 pr_err("failed to map scatterlist\n");
461438 mutex_unlock(lock);
462439 return ERR_PTR(-EIO);
....@@ -481,11 +458,26 @@
481458 vb2_dma_sg_put(dbuf->priv);
482459 }
483460
484
-static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
461
+static int
462
+vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
463
+ enum dma_data_direction direction)
485464 {
486465 struct vb2_dma_sg_buf *buf = dbuf->priv;
466
+ struct sg_table *sgt = buf->dma_sgt;
487467
488
- return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
468
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
469
+ return 0;
470
+}
471
+
472
+static int
473
+vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
474
+ enum dma_data_direction direction)
475
+{
476
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
477
+ struct sg_table *sgt = buf->dma_sgt;
478
+
479
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
480
+ return 0;
489481 }
490482
491483 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
....@@ -506,7 +498,8 @@
506498 .detach = vb2_dma_sg_dmabuf_ops_detach,
507499 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
508500 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
509
- .map = vb2_dma_sg_dmabuf_ops_kmap,
501
+ .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
502
+ .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
510503 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
511504 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
512505 .release = vb2_dma_sg_dmabuf_ops_release,