.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * NVIDIA Tegra DRM GEM helper functions |
---|
3 | 4 | * |
---|
.. | .. |
---|
7 | 8 | * Based on the GEM/CMA helpers |
---|
8 | 9 | * |
---|
9 | 10 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. |
---|
10 | | - * |
---|
11 | | - * This program is free software; you can redistribute it and/or modify |
---|
12 | | - * it under the terms of the GNU General Public License version 2 as |
---|
13 | | - * published by the Free Software Foundation. |
---|
14 | 11 | */ |
---|
15 | 12 | |
---|
16 | 13 | #include <linux/dma-buf.h> |
---|
17 | 14 | #include <linux/iommu.h> |
---|
| 15 | + |
---|
| 16 | +#include <drm/drm_drv.h> |
---|
| 17 | +#include <drm/drm_prime.h> |
---|
18 | 18 | #include <drm/tegra_drm.h> |
---|
19 | 19 | |
---|
20 | 20 | #include "drm.h" |
---|
.. | .. |
---|
24 | 24 | { |
---|
25 | 25 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
---|
26 | 26 | |
---|
27 | | - drm_gem_object_put_unlocked(&obj->gem); |
---|
| 27 | + drm_gem_object_put(&obj->gem); |
---|
28 | 28 | } |
---|
29 | 29 | |
---|
30 | | -static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) |
---|
| 30 | +/* XXX move this into lib/scatterlist.c? */ |
---|
| 31 | +static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg, |
---|
| 32 | + unsigned int nents, gfp_t gfp_mask) |
---|
| 33 | +{ |
---|
| 34 | + struct scatterlist *dst; |
---|
| 35 | + unsigned int i; |
---|
| 36 | + int err; |
---|
| 37 | + |
---|
| 38 | + err = sg_alloc_table(sgt, nents, gfp_mask); |
---|
| 39 | + if (err < 0) |
---|
| 40 | + return err; |
---|
| 41 | + |
---|
| 42 | + dst = sgt->sgl; |
---|
| 43 | + |
---|
| 44 | + for (i = 0; i < nents; i++) { |
---|
| 45 | + sg_set_page(dst, sg_page(sg), sg->length, 0); |
---|
| 46 | + dst = sg_next(dst); |
---|
| 47 | + sg = sg_next(sg); |
---|
| 48 | + } |
---|
| 49 | + |
---|
| 50 | + return 0; |
---|
| 51 | +} |
---|
| 52 | + |
---|
| 53 | +static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, |
---|
| 54 | + dma_addr_t *phys) |
---|
31 | 55 | { |
---|
32 | 56 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
---|
| 57 | + struct sg_table *sgt; |
---|
| 58 | + int err; |
---|
33 | 59 | |
---|
34 | | - *sgt = obj->sgt; |
---|
| 60 | + /* |
---|
| 61 | + * If we've manually mapped the buffer object through the IOMMU, make |
---|
| 62 | + * sure to return the IOVA address of our mapping. |
---|
| 63 | + * |
---|
| 64 | + * Similarly, for buffers that have been allocated by the DMA API the |
---|
| 65 | + * physical address can be used for devices that are not attached to |
---|
| 66 | + * an IOMMU. For these devices, callers must pass a valid pointer via |
---|
| 67 | + * the @phys argument. |
---|
| 68 | + * |
---|
| 69 | + * Imported buffers were also already mapped at import time, so the |
---|
| 70 | + * existing mapping can be reused. |
---|
| 71 | + */ |
---|
| 72 | + if (phys) { |
---|
| 73 | + *phys = obj->iova; |
---|
| 74 | + return NULL; |
---|
| 75 | + } |
---|
35 | 76 | |
---|
36 | | - return obj->paddr; |
---|
| 77 | + /* |
---|
| 78 | + * If we don't have a mapping for this buffer yet, return an SG table |
---|
| 79 | + * so that host1x can do the mapping for us via the DMA API. |
---|
| 80 | + */ |
---|
| 81 | + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
---|
| 82 | + if (!sgt) |
---|
| 83 | + return ERR_PTR(-ENOMEM); |
---|
| 84 | + |
---|
| 85 | + if (obj->pages) { |
---|
| 86 | + /* |
---|
| 87 | + * If the buffer object was allocated from the explicit IOMMU |
---|
| 88 | + * API code paths, construct an SG table from the pages. |
---|
| 89 | + */ |
---|
| 90 | + err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages, |
---|
| 91 | + 0, obj->gem.size, GFP_KERNEL); |
---|
| 92 | + if (err < 0) |
---|
| 93 | + goto free; |
---|
| 94 | + } else if (obj->sgt) { |
---|
| 95 | + /* |
---|
| 96 | + * If the buffer object already has an SG table but no pages |
---|
| 97 | + * were allocated for it, it means the buffer was imported and |
---|
| 98 | + * the SG table needs to be copied to avoid overwriting any |
---|
| 99 | + * other potential users of the original SG table. |
---|
| 100 | + */ |
---|
| 101 | + err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, |
---|
| 102 | + obj->sgt->orig_nents, GFP_KERNEL); |
---|
| 103 | + if (err < 0) |
---|
| 104 | + goto free; |
---|
| 105 | + } else { |
---|
| 106 | + /* |
---|
| 107 | + * If the buffer object had no pages allocated and if it was |
---|
| 108 | + * not imported, it had to be allocated with the DMA API, so |
---|
| 109 | + * the DMA API helper can be used. |
---|
| 110 | + */ |
---|
| 111 | + err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova, |
---|
| 112 | + obj->gem.size); |
---|
| 113 | + if (err < 0) |
---|
| 114 | + goto free; |
---|
| 115 | + } |
---|
| 116 | + |
---|
| 117 | + return sgt; |
---|
| 118 | + |
---|
| 119 | +free: |
---|
| 120 | + kfree(sgt); |
---|
| 121 | + return ERR_PTR(err); |
---|
37 | 122 | } |
---|
38 | 123 | |
---|
39 | | -static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) |
---|
| 124 | +static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) |
---|
40 | 125 | { |
---|
| 126 | + if (sgt) { |
---|
| 127 | + sg_free_table(sgt); |
---|
| 128 | + kfree(sgt); |
---|
| 129 | + } |
---|
41 | 130 | } |
---|
42 | 131 | |
---|
43 | 132 | static void *tegra_bo_mmap(struct host1x_bo *bo) |
---|
.. | .. |
---|
65 | 154 | vunmap(addr); |
---|
66 | 155 | } |
---|
67 | 156 | |
---|
68 | | -static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) |
---|
69 | | -{ |
---|
70 | | - struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
---|
71 | | - |
---|
72 | | - if (obj->vaddr) |
---|
73 | | - return obj->vaddr + page * PAGE_SIZE; |
---|
74 | | - else if (obj->gem.import_attach) |
---|
75 | | - return dma_buf_kmap(obj->gem.import_attach->dmabuf, page); |
---|
76 | | - else |
---|
77 | | - return vmap(obj->pages + page, 1, VM_MAP, |
---|
78 | | - pgprot_writecombine(PAGE_KERNEL)); |
---|
79 | | -} |
---|
80 | | - |
---|
81 | | -static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, |
---|
82 | | - void *addr) |
---|
83 | | -{ |
---|
84 | | - struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
---|
85 | | - |
---|
86 | | - if (obj->vaddr) |
---|
87 | | - return; |
---|
88 | | - else if (obj->gem.import_attach) |
---|
89 | | - dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr); |
---|
90 | | - else |
---|
91 | | - vunmap(addr); |
---|
92 | | -} |
---|
93 | | - |
---|
94 | 157 | static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) |
---|
95 | 158 | { |
---|
96 | 159 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
---|
.. | .. |
---|
107 | 170 | .unpin = tegra_bo_unpin, |
---|
108 | 171 | .mmap = tegra_bo_mmap, |
---|
109 | 172 | .munmap = tegra_bo_munmap, |
---|
110 | | - .kmap = tegra_bo_kmap, |
---|
111 | | - .kunmap = tegra_bo_kunmap, |
---|
112 | 173 | }; |
---|
113 | 174 | |
---|
114 | 175 | static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) |
---|
.. | .. |
---|
133 | 194 | goto unlock; |
---|
134 | 195 | } |
---|
135 | 196 | |
---|
136 | | - bo->paddr = bo->mm->start; |
---|
| 197 | + bo->iova = bo->mm->start; |
---|
137 | 198 | |
---|
138 | | - bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, |
---|
139 | | - bo->sgt->nents, prot); |
---|
| 199 | + bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot); |
---|
140 | 200 | if (!bo->size) { |
---|
141 | 201 | dev_err(tegra->drm->dev, "failed to map buffer\n"); |
---|
142 | 202 | err = -ENOMEM; |
---|
.. | .. |
---|
161 | 221 | return 0; |
---|
162 | 222 | |
---|
163 | 223 | mutex_lock(&tegra->mm_lock); |
---|
164 | | - iommu_unmap(tegra->domain, bo->paddr, bo->size); |
---|
| 224 | + iommu_unmap(tegra->domain, bo->iova, bo->size); |
---|
165 | 225 | drm_mm_remove_node(bo->mm); |
---|
166 | 226 | mutex_unlock(&tegra->mm_lock); |
---|
167 | 227 | |
---|
.. | .. |
---|
203 | 263 | static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) |
---|
204 | 264 | { |
---|
205 | 265 | if (bo->pages) { |
---|
206 | | - dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
---|
207 | | - DMA_FROM_DEVICE); |
---|
| 266 | + dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); |
---|
208 | 267 | drm_gem_put_pages(&bo->gem, bo->pages, true, true); |
---|
209 | 268 | sg_free_table(bo->sgt); |
---|
210 | 269 | kfree(bo->sgt); |
---|
211 | 270 | } else if (bo->vaddr) { |
---|
212 | | - dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); |
---|
| 271 | + dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova); |
---|
213 | 272 | } |
---|
214 | 273 | } |
---|
215 | 274 | |
---|
.. | .. |
---|
223 | 282 | |
---|
224 | 283 | bo->num_pages = bo->gem.size >> PAGE_SHIFT; |
---|
225 | 284 | |
---|
226 | | - bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); |
---|
| 285 | + bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages); |
---|
227 | 286 | if (IS_ERR(bo->sgt)) { |
---|
228 | 287 | err = PTR_ERR(bo->sgt); |
---|
229 | 288 | goto put_pages; |
---|
230 | 289 | } |
---|
231 | 290 | |
---|
232 | | - err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
---|
233 | | - DMA_FROM_DEVICE); |
---|
234 | | - if (err == 0) { |
---|
235 | | - err = -EFAULT; |
---|
| 291 | + err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); |
---|
| 292 | + if (err) |
---|
236 | 293 | goto free_sgt; |
---|
237 | | - } |
---|
238 | 294 | |
---|
239 | 295 | return 0; |
---|
240 | 296 | |
---|
.. | .. |
---|
264 | 320 | } else { |
---|
265 | 321 | size_t size = bo->gem.size; |
---|
266 | 322 | |
---|
267 | | - bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, |
---|
| 323 | + bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova, |
---|
268 | 324 | GFP_KERNEL | __GFP_NOWARN); |
---|
269 | 325 | if (!bo->vaddr) { |
---|
270 | 326 | dev_err(drm->dev, |
---|
.. | .. |
---|
324 | 380 | return ERR_PTR(err); |
---|
325 | 381 | } |
---|
326 | 382 | |
---|
327 | | - drm_gem_object_put_unlocked(&bo->gem); |
---|
| 383 | + drm_gem_object_put(&bo->gem); |
---|
328 | 384 | |
---|
329 | 385 | return bo; |
---|
330 | 386 | } |
---|
.. | .. |
---|
359 | 415 | err = tegra_bo_iommu_map(tegra, bo); |
---|
360 | 416 | if (err < 0) |
---|
361 | 417 | goto detach; |
---|
362 | | - } else { |
---|
363 | | - if (bo->sgt->nents > 1) { |
---|
364 | | - err = -EINVAL; |
---|
365 | | - goto detach; |
---|
366 | | - } |
---|
367 | | - |
---|
368 | | - bo->paddr = sg_dma_address(bo->sgt->sgl); |
---|
369 | 418 | } |
---|
370 | 419 | |
---|
371 | 420 | bo->gem.import_attach = attach; |
---|
.. | .. |
---|
461 | 510 | vma->vm_flags &= ~VM_PFNMAP; |
---|
462 | 511 | vma->vm_pgoff = 0; |
---|
463 | 512 | |
---|
464 | | - err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, |
---|
| 513 | + err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova, |
---|
465 | 514 | gem->size); |
---|
466 | 515 | if (err < 0) { |
---|
467 | 516 | drm_gem_vm_close(vma); |
---|
.. | .. |
---|
508 | 557 | return NULL; |
---|
509 | 558 | |
---|
510 | 559 | if (bo->pages) { |
---|
511 | | - struct scatterlist *sg; |
---|
512 | | - unsigned int i; |
---|
513 | | - |
---|
514 | | - if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) |
---|
515 | | - goto free; |
---|
516 | | - |
---|
517 | | - for_each_sg(sgt->sgl, sg, bo->num_pages, i) |
---|
518 | | - sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); |
---|
519 | | - |
---|
520 | | - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) |
---|
| 560 | + if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages, |
---|
| 561 | + 0, gem->size, GFP_KERNEL) < 0) |
---|
521 | 562 | goto free; |
---|
522 | 563 | } else { |
---|
523 | | - if (sg_alloc_table(sgt, 1, GFP_KERNEL)) |
---|
| 564 | + if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova, |
---|
| 565 | + gem->size) < 0) |
---|
524 | 566 | goto free; |
---|
525 | | - |
---|
526 | | - sg_dma_address(sgt->sgl) = bo->paddr; |
---|
527 | | - sg_dma_len(sgt->sgl) = gem->size; |
---|
528 | 567 | } |
---|
| 568 | + |
---|
| 569 | + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) |
---|
| 570 | + goto free; |
---|
529 | 571 | |
---|
530 | 572 | return sgt; |
---|
531 | 573 | |
---|
.. | .. |
---|
543 | 585 | struct tegra_bo *bo = to_tegra_bo(gem); |
---|
544 | 586 | |
---|
545 | 587 | if (bo->pages) |
---|
546 | | - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); |
---|
| 588 | + dma_unmap_sgtable(attach->dev, sgt, dir, 0); |
---|
547 | 589 | |
---|
548 | 590 | sg_free_table(sgt); |
---|
549 | 591 | kfree(sgt); |
---|
.. | .. |
---|
562 | 604 | struct drm_device *drm = gem->dev; |
---|
563 | 605 | |
---|
564 | 606 | if (bo->pages) |
---|
565 | | - dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
---|
566 | | - DMA_FROM_DEVICE); |
---|
| 607 | + dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE); |
---|
567 | 608 | |
---|
568 | 609 | return 0; |
---|
569 | 610 | } |
---|
.. | .. |
---|
576 | 617 | struct drm_device *drm = gem->dev; |
---|
577 | 618 | |
---|
578 | 619 | if (bo->pages) |
---|
579 | | - dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, |
---|
580 | | - DMA_TO_DEVICE); |
---|
| 620 | + dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE); |
---|
581 | 621 | |
---|
582 | 622 | return 0; |
---|
583 | | -} |
---|
584 | | - |
---|
585 | | -static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) |
---|
586 | | -{ |
---|
587 | | - return NULL; |
---|
588 | | -} |
---|
589 | | - |
---|
590 | | -static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page, |
---|
591 | | - void *addr) |
---|
592 | | -{ |
---|
593 | 623 | } |
---|
594 | 624 | |
---|
595 | 625 | static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) |
---|
.. | .. |
---|
622 | 652 | .release = tegra_gem_prime_release, |
---|
623 | 653 | .begin_cpu_access = tegra_gem_prime_begin_cpu_access, |
---|
624 | 654 | .end_cpu_access = tegra_gem_prime_end_cpu_access, |
---|
625 | | - .map = tegra_gem_prime_kmap, |
---|
626 | | - .unmap = tegra_gem_prime_kunmap, |
---|
627 | 655 | .mmap = tegra_gem_prime_mmap, |
---|
628 | 656 | .vmap = tegra_gem_prime_vmap, |
---|
629 | 657 | .vunmap = tegra_gem_prime_vunmap, |
---|
630 | 658 | }; |
---|
631 | 659 | |
---|
632 | | -struct dma_buf *tegra_gem_prime_export(struct drm_device *drm, |
---|
633 | | - struct drm_gem_object *gem, |
---|
| 660 | +struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem, |
---|
634 | 661 | int flags) |
---|
635 | 662 | { |
---|
636 | 663 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
---|
637 | 664 | |
---|
638 | 665 | exp_info.exp_name = KBUILD_MODNAME; |
---|
639 | | - exp_info.owner = drm->driver->fops->owner; |
---|
| 666 | + exp_info.owner = gem->dev->driver->fops->owner; |
---|
640 | 667 | exp_info.ops = &tegra_gem_prime_dmabuf_ops; |
---|
641 | 668 | exp_info.size = gem->size; |
---|
642 | 669 | exp_info.flags = flags; |
---|
643 | 670 | exp_info.priv = gem; |
---|
644 | 671 | |
---|
645 | | - return drm_gem_dmabuf_export(drm, &exp_info); |
---|
| 672 | + return drm_gem_dmabuf_export(gem->dev, &exp_info); |
---|
646 | 673 | } |
---|
647 | 674 | |
---|
648 | 675 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, |
---|