.. | .. |
---|
53 | 53 | unsigned int i; |
---|
54 | 54 | unsigned long size = 0; |
---|
55 | 55 | |
---|
56 | | - for_each_sg(sgt->sgl, s, sgt->nents, i) { |
---|
| 56 | + for_each_sgtable_dma_sg(sgt, s, i) { |
---|
57 | 57 | if (sg_dma_address(s) != expected) |
---|
58 | 58 | break; |
---|
59 | | - expected = sg_dma_address(s) + sg_dma_len(s); |
---|
| 59 | + expected += sg_dma_len(s); |
---|
60 | 60 | size += sg_dma_len(s); |
---|
61 | 61 | } |
---|
62 | 62 | return size; |
---|
.. | .. |
---|
95 | 95 | struct vb2_dc_buf *buf = buf_priv; |
---|
96 | 96 | struct sg_table *sgt = buf->dma_sgt; |
---|
97 | 97 | |
---|
98 | | - /* DMABUF exporter will flush the cache for us */ |
---|
99 | | - if (!sgt || buf->db_attach) |
---|
| 98 | + if (!sgt) |
---|
100 | 99 | return; |
---|
101 | 100 | |
---|
102 | | - dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, |
---|
103 | | - buf->dma_dir); |
---|
| 101 | + dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir); |
---|
104 | 102 | } |
---|
105 | 103 | |
---|
106 | 104 | static void vb2_dc_finish(void *buf_priv) |
---|
.. | .. |
---|
108 | 106 | struct vb2_dc_buf *buf = buf_priv; |
---|
109 | 107 | struct sg_table *sgt = buf->dma_sgt; |
---|
110 | 108 | |
---|
111 | | - /* DMABUF exporter will flush the cache for us */ |
---|
112 | | - if (!sgt || buf->db_attach) |
---|
| 109 | + if (!sgt) |
---|
113 | 110 | return; |
---|
114 | 111 | |
---|
115 | | - dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); |
---|
| 112 | + dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir); |
---|
116 | 113 | } |
---|
117 | 114 | |
---|
118 | 115 | /*********************************************/ |
---|
.. | .. |
---|
149 | 146 | if (!buf) |
---|
150 | 147 | return ERR_PTR(-ENOMEM); |
---|
151 | 148 | |
---|
152 | | - if (attrs) |
---|
153 | | - buf->attrs = attrs; |
---|
| 149 | + buf->attrs = attrs; |
---|
154 | 150 | buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, |
---|
155 | 151 | GFP_KERNEL | gfp_flags, buf->attrs); |
---|
156 | 152 | if (!buf->cookie) { |
---|
.. | .. |
---|
185 | 181 | printk(KERN_ERR "No buffer to map\n"); |
---|
186 | 182 | return -EINVAL; |
---|
187 | 183 | } |
---|
188 | | - |
---|
189 | | - /* |
---|
190 | | - * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to |
---|
191 | | - * map whole buffer |
---|
192 | | - */ |
---|
193 | | - vma->vm_pgoff = 0; |
---|
194 | 184 | |
---|
195 | 185 | ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, |
---|
196 | 186 | buf->dma_addr, buf->size, buf->attrs); |
---|
.. | .. |
---|
273 | 263 | |
---|
274 | 264 | /* release the scatterlist cache */ |
---|
275 | 265 | if (attach->dma_dir != DMA_NONE) |
---|
276 | | - dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
---|
277 | | - attach->dma_dir); |
---|
| 266 | + /* |
---|
| 267 | + * Cache sync can be skipped here, as the vb2_dc memory is |
---|
| 268 | + * allocated from device coherent memory, which means the |
---|
| 269 | + * memory locations do not require any explicit cache |
---|
| 270 | + * maintenance prior or after being used by the device. |
---|
| 271 | + */ |
---|
| 272 | + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, |
---|
| 273 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
278 | 274 | sg_free_table(sgt); |
---|
279 | 275 | kfree(attach); |
---|
280 | 276 | db_attach->priv = NULL; |
---|
.. | .. |
---|
299 | 295 | |
---|
300 | 296 | /* release any previous cache */ |
---|
301 | 297 | if (attach->dma_dir != DMA_NONE) { |
---|
302 | | - dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
---|
303 | | - attach->dma_dir); |
---|
| 298 | + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, |
---|
| 299 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
304 | 300 | attach->dma_dir = DMA_NONE; |
---|
305 | 301 | } |
---|
306 | 302 | |
---|
307 | | - /* mapping to the client with new direction */ |
---|
308 | | - sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
---|
309 | | - dma_dir); |
---|
310 | | - if (!sgt->nents) { |
---|
| 303 | + /* |
---|
| 304 | + * mapping to the client with new direction, no cache sync |
---|
| 305 | + * required see comment in vb2_dc_dmabuf_ops_detach() |
---|
| 306 | + */ |
---|
| 307 | + if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, |
---|
| 308 | + DMA_ATTR_SKIP_CPU_SYNC)) { |
---|
311 | 309 | pr_err("failed to map scatterlist\n"); |
---|
312 | 310 | mutex_unlock(lock); |
---|
313 | 311 | return ERR_PTR(-EIO); |
---|
.. | .. |
---|
332 | 330 | vb2_dc_put(dbuf->priv); |
---|
333 | 331 | } |
---|
334 | 332 | |
---|
335 | | -static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) |
---|
| 333 | +static int |
---|
| 334 | +vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, |
---|
| 335 | + enum dma_data_direction direction) |
---|
336 | 336 | { |
---|
337 | | - struct vb2_dc_buf *buf = dbuf->priv; |
---|
| 337 | + return 0; |
---|
| 338 | +} |
---|
338 | 339 | |
---|
339 | | - return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; |
---|
| 340 | +static int |
---|
| 341 | +vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, |
---|
| 342 | + enum dma_data_direction direction) |
---|
| 343 | +{ |
---|
| 344 | + return 0; |
---|
340 | 345 | } |
---|
341 | 346 | |
---|
342 | 347 | static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) |
---|
.. | .. |
---|
357 | 362 | .detach = vb2_dc_dmabuf_ops_detach, |
---|
358 | 363 | .map_dma_buf = vb2_dc_dmabuf_ops_map, |
---|
359 | 364 | .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, |
---|
360 | | - .map = vb2_dc_dmabuf_ops_kmap, |
---|
| 365 | + .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access, |
---|
| 366 | + .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access, |
---|
361 | 367 | .vmap = vb2_dc_dmabuf_ops_vmap, |
---|
362 | 368 | .mmap = vb2_dc_dmabuf_ops_mmap, |
---|
363 | 369 | .release = vb2_dc_dmabuf_ops_release, |
---|
.. | .. |
---|
428 | 434 | * No need to sync to CPU, it's already synced to the CPU |
---|
429 | 435 | * since the finish() memop will have been called before this. |
---|
430 | 436 | */ |
---|
431 | | - dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
---|
432 | | - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
---|
| 437 | + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, |
---|
| 438 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
433 | 439 | pages = frame_vector_pages(buf->vec); |
---|
434 | 440 | /* sgt should exist only if vector contains pages... */ |
---|
435 | 441 | BUG_ON(IS_ERR(pages)); |
---|
.. | .. |
---|
439 | 445 | set_page_dirty_lock(pages[i]); |
---|
440 | 446 | sg_free_table(sgt); |
---|
441 | 447 | kfree(sgt); |
---|
| 448 | + } else { |
---|
| 449 | + dma_unmap_resource(buf->dev, buf->dma_addr, buf->size, |
---|
| 450 | + buf->dma_dir, 0); |
---|
442 | 451 | } |
---|
443 | 452 | vb2_destroy_framevec(buf->vec); |
---|
444 | 453 | kfree(buf); |
---|
445 | 454 | } |
---|
446 | | - |
---|
447 | | -/* |
---|
448 | | - * For some kind of reserved memory there might be no struct page available, |
---|
449 | | - * so all that can be done to support such 'pages' is to try to convert |
---|
450 | | - * pfn to dma address or at the last resort just assume that |
---|
451 | | - * dma address == physical address (like it has been assumed in earlier version |
---|
452 | | - * of videobuf2-dma-contig |
---|
453 | | - */ |
---|
454 | | - |
---|
455 | | -#ifdef __arch_pfn_to_dma |
---|
456 | | -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) |
---|
457 | | -{ |
---|
458 | | - return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); |
---|
459 | | -} |
---|
460 | | -#elif defined(__pfn_to_bus) |
---|
461 | | -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) |
---|
462 | | -{ |
---|
463 | | - return (dma_addr_t)__pfn_to_bus(pfn); |
---|
464 | | -} |
---|
465 | | -#elif defined(__pfn_to_phys) |
---|
466 | | -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) |
---|
467 | | -{ |
---|
468 | | - return (dma_addr_t)__pfn_to_phys(pfn); |
---|
469 | | -} |
---|
470 | | -#else |
---|
471 | | -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) |
---|
472 | | -{ |
---|
473 | | - /* really, we cannot do anything better at this point */ |
---|
474 | | - return (dma_addr_t)(pfn) << PAGE_SHIFT; |
---|
475 | | -} |
---|
476 | | -#endif |
---|
477 | 455 | |
---|
478 | 456 | static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, |
---|
479 | 457 | unsigned long size, enum dma_data_direction dma_dir) |
---|
.. | .. |
---|
509 | 487 | buf->dma_dir = dma_dir; |
---|
510 | 488 | |
---|
511 | 489 | offset = lower_32_bits(offset_in_page(vaddr)); |
---|
512 | | - vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE || |
---|
513 | | - dma_dir == DMA_BIDIRECTIONAL); |
---|
| 490 | + vec = vb2_create_framevec(vaddr, size); |
---|
514 | 491 | if (IS_ERR(vec)) { |
---|
515 | 492 | ret = PTR_ERR(vec); |
---|
516 | 493 | goto fail_buf; |
---|
.. | .. |
---|
528 | 505 | for (i = 1; i < n_pages; i++) |
---|
529 | 506 | if (nums[i-1] + 1 != nums[i]) |
---|
530 | 507 | goto fail_pfnvec; |
---|
531 | | - buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]); |
---|
| 508 | + buf->dma_addr = dma_map_resource(buf->dev, |
---|
| 509 | + __pfn_to_phys(nums[0]), size, buf->dma_dir, 0); |
---|
| 510 | + if (dma_mapping_error(buf->dev, buf->dma_addr)) { |
---|
| 511 | + ret = -ENOMEM; |
---|
| 512 | + goto fail_pfnvec; |
---|
| 513 | + } |
---|
532 | 514 | goto out; |
---|
533 | 515 | } |
---|
534 | 516 | |
---|
.. | .. |
---|
550 | 532 | * No need to sync to the device, this will happen later when the |
---|
551 | 533 | * prepare() memop is called. |
---|
552 | 534 | */ |
---|
553 | | - sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
---|
554 | | - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
---|
555 | | - if (sgt->nents <= 0) { |
---|
| 535 | + if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, |
---|
| 536 | + DMA_ATTR_SKIP_CPU_SYNC)) { |
---|
556 | 537 | pr_err("failed to map scatterlist\n"); |
---|
557 | 538 | ret = -EIO; |
---|
558 | 539 | goto fail_sgt_init; |
---|
.. | .. |
---|
574 | 555 | return buf; |
---|
575 | 556 | |
---|
576 | 557 | fail_map_sg: |
---|
577 | | - dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
---|
578 | | - buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
---|
| 558 | + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
---|
579 | 559 | |
---|
580 | 560 | fail_sgt_init: |
---|
581 | 561 | sg_free_table(sgt); |
---|
.. | .. |
---|
622 | 602 | /* checking if dmabuf is big enough to store contiguous chunk */ |
---|
623 | 603 | contig_size = vb2_dc_get_contiguous_size(sgt); |
---|
624 | 604 | if (contig_size < buf->size) { |
---|
625 | | - pr_err("contiguous chunk is too small %lu/%lu b\n", |
---|
626 | | - contig_size, buf->size); |
---|
| 605 | + pr_err("contiguous chunk is too small %lu/%lu\n", |
---|
| 606 | + contig_size, buf->size); |
---|
627 | 607 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); |
---|
628 | 608 | return -EFAULT; |
---|
629 | 609 | } |
---|
.. | .. |
---|
755 | 735 | int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size) |
---|
756 | 736 | { |
---|
757 | 737 | if (!dev->dma_parms) { |
---|
758 | | - dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL); |
---|
759 | | - if (!dev->dma_parms) |
---|
760 | | - return -ENOMEM; |
---|
| 738 | + dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n"); |
---|
| 739 | + return -ENODEV; |
---|
761 | 740 | } |
---|
762 | 741 | if (dma_get_max_seg_size(dev) < size) |
---|
763 | 742 | return dma_set_max_seg_size(dev, size); |
---|
.. | .. |
---|
765 | 744 | return 0; |
---|
766 | 745 | } |
---|
767 | 746 | EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size); |
---|
768 | | - |
---|
769 | | -/* |
---|
770 | | - * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters |
---|
771 | | - * @dev: device for configuring DMA parameters |
---|
772 | | - * |
---|
773 | | - * This function releases resources allocated to configure DMA parameters |
---|
774 | | - * (see vb2_dma_contig_set_max_seg_size() function). It should be called from |
---|
775 | | - * device drivers on driver remove. |
---|
776 | | - */ |
---|
777 | | -void vb2_dma_contig_clear_max_seg_size(struct device *dev) |
---|
778 | | -{ |
---|
779 | | - kfree(dev->dma_parms); |
---|
780 | | - dev->dma_parms = NULL; |
---|
781 | | -} |
---|
782 | | -EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size); |
---|
783 | 747 | |
---|
784 | 748 | MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2"); |
---|
785 | 749 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); |
---|