hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/tegra/gem.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * NVIDIA Tegra DRM GEM helper functions
34 *
....@@ -7,14 +8,13 @@
78 * Based on the GEM/CMA helpers
89 *
910 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10
- *
11
- * This program is free software; you can redistribute it and/or modify
12
- * it under the terms of the GNU General Public License version 2 as
13
- * published by the Free Software Foundation.
1411 */
1512
1613 #include <linux/dma-buf.h>
1714 #include <linux/iommu.h>
15
+
16
+#include <drm/drm_drv.h>
17
+#include <drm/drm_prime.h>
1818 #include <drm/tegra_drm.h>
1919
2020 #include "drm.h"
....@@ -24,20 +24,109 @@
2424 {
2525 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
2626
27
- drm_gem_object_put_unlocked(&obj->gem);
27
+ drm_gem_object_put(&obj->gem);
2828 }
2929
30
-static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
30
+/* XXX move this into lib/scatterlist.c? */
31
+static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32
+ unsigned int nents, gfp_t gfp_mask)
33
+{
34
+ struct scatterlist *dst;
35
+ unsigned int i;
36
+ int err;
37
+
38
+ err = sg_alloc_table(sgt, nents, gfp_mask);
39
+ if (err < 0)
40
+ return err;
41
+
42
+ dst = sgt->sgl;
43
+
44
+ for (i = 0; i < nents; i++) {
45
+ sg_set_page(dst, sg_page(sg), sg->length, 0);
46
+ dst = sg_next(dst);
47
+ sg = sg_next(sg);
48
+ }
49
+
50
+ return 0;
51
+}
52
+
53
+static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54
+ dma_addr_t *phys)
3155 {
3256 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
57
+ struct sg_table *sgt;
58
+ int err;
3359
34
- *sgt = obj->sgt;
60
+ /*
61
+ * If we've manually mapped the buffer object through the IOMMU, make
62
+ * sure to return the IOVA address of our mapping.
63
+ *
64
+ * Similarly, for buffers that have been allocated by the DMA API the
65
+ * physical address can be used for devices that are not attached to
66
+ * an IOMMU. For these devices, callers must pass a valid pointer via
67
+ * the @phys argument.
68
+ *
69
+ * Imported buffers were also already mapped at import time, so the
70
+ * existing mapping can be reused.
71
+ */
72
+ if (phys) {
73
+ *phys = obj->iova;
74
+ return NULL;
75
+ }
3576
36
- return obj->paddr;
77
+ /*
78
+ * If we don't have a mapping for this buffer yet, return an SG table
79
+ * so that host1x can do the mapping for us via the DMA API.
80
+ */
81
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
82
+ if (!sgt)
83
+ return ERR_PTR(-ENOMEM);
84
+
85
+ if (obj->pages) {
86
+ /*
87
+ * If the buffer object was allocated from the explicit IOMMU
88
+ * API code paths, construct an SG table from the pages.
89
+ */
90
+ err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
91
+ 0, obj->gem.size, GFP_KERNEL);
92
+ if (err < 0)
93
+ goto free;
94
+ } else if (obj->sgt) {
95
+ /*
96
+ * If the buffer object already has an SG table but no pages
97
+ * were allocated for it, it means the buffer was imported and
98
+ * the SG table needs to be copied to avoid overwriting any
99
+ * other potential users of the original SG table.
100
+ */
101
+ err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
102
+ obj->sgt->orig_nents, GFP_KERNEL);
103
+ if (err < 0)
104
+ goto free;
105
+ } else {
106
+ /*
107
+ * If the buffer object had no pages allocated and if it was
108
+ * not imported, it had to be allocated with the DMA API, so
109
+ * the DMA API helper can be used.
110
+ */
111
+ err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
112
+ obj->gem.size);
113
+ if (err < 0)
114
+ goto free;
115
+ }
116
+
117
+ return sgt;
118
+
119
+free:
120
+ kfree(sgt);
121
+ return ERR_PTR(err);
37122 }
38123
39
-static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
124
+static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
40125 {
126
+ if (sgt) {
127
+ sg_free_table(sgt);
128
+ kfree(sgt);
129
+ }
41130 }
42131
43132 static void *tegra_bo_mmap(struct host1x_bo *bo)
....@@ -65,32 +154,6 @@
65154 vunmap(addr);
66155 }
67156
68
-static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
69
-{
70
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
71
-
72
- if (obj->vaddr)
73
- return obj->vaddr + page * PAGE_SIZE;
74
- else if (obj->gem.import_attach)
75
- return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
76
- else
77
- return vmap(obj->pages + page, 1, VM_MAP,
78
- pgprot_writecombine(PAGE_KERNEL));
79
-}
80
-
81
-static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
82
- void *addr)
83
-{
84
- struct tegra_bo *obj = host1x_to_tegra_bo(bo);
85
-
86
- if (obj->vaddr)
87
- return;
88
- else if (obj->gem.import_attach)
89
- dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
90
- else
91
- vunmap(addr);
92
-}
93
-
94157 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
95158 {
96159 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
....@@ -107,8 +170,6 @@
107170 .unpin = tegra_bo_unpin,
108171 .mmap = tegra_bo_mmap,
109172 .munmap = tegra_bo_munmap,
110
- .kmap = tegra_bo_kmap,
111
- .kunmap = tegra_bo_kunmap,
112173 };
113174
114175 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
....@@ -133,10 +194,9 @@
133194 goto unlock;
134195 }
135196
136
- bo->paddr = bo->mm->start;
197
+ bo->iova = bo->mm->start;
137198
138
- bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
139
- bo->sgt->nents, prot);
199
+ bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
140200 if (!bo->size) {
141201 dev_err(tegra->drm->dev, "failed to map buffer\n");
142202 err = -ENOMEM;
....@@ -161,7 +221,7 @@
161221 return 0;
162222
163223 mutex_lock(&tegra->mm_lock);
164
- iommu_unmap(tegra->domain, bo->paddr, bo->size);
224
+ iommu_unmap(tegra->domain, bo->iova, bo->size);
165225 drm_mm_remove_node(bo->mm);
166226 mutex_unlock(&tegra->mm_lock);
167227
....@@ -203,13 +263,12 @@
203263 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
204264 {
205265 if (bo->pages) {
206
- dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
207
- DMA_FROM_DEVICE);
266
+ dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
208267 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
209268 sg_free_table(bo->sgt);
210269 kfree(bo->sgt);
211270 } else if (bo->vaddr) {
212
- dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
271
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
213272 }
214273 }
215274
....@@ -223,18 +282,15 @@
223282
224283 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
225284
226
- bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
285
+ bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
227286 if (IS_ERR(bo->sgt)) {
228287 err = PTR_ERR(bo->sgt);
229288 goto put_pages;
230289 }
231290
232
- err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
233
- DMA_FROM_DEVICE);
234
- if (err == 0) {
235
- err = -EFAULT;
291
+ err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
292
+ if (err)
236293 goto free_sgt;
237
- }
238294
239295 return 0;
240296
....@@ -264,7 +320,7 @@
264320 } else {
265321 size_t size = bo->gem.size;
266322
267
- bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
323
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
268324 GFP_KERNEL | __GFP_NOWARN);
269325 if (!bo->vaddr) {
270326 dev_err(drm->dev,
....@@ -324,7 +380,7 @@
324380 return ERR_PTR(err);
325381 }
326382
327
- drm_gem_object_put_unlocked(&bo->gem);
383
+ drm_gem_object_put(&bo->gem);
328384
329385 return bo;
330386 }
....@@ -359,13 +415,6 @@
359415 err = tegra_bo_iommu_map(tegra, bo);
360416 if (err < 0)
361417 goto detach;
362
- } else {
363
- if (bo->sgt->nents > 1) {
364
- err = -EINVAL;
365
- goto detach;
366
- }
367
-
368
- bo->paddr = sg_dma_address(bo->sgt->sgl);
369418 }
370419
371420 bo->gem.import_attach = attach;
....@@ -461,7 +510,7 @@
461510 vma->vm_flags &= ~VM_PFNMAP;
462511 vma->vm_pgoff = 0;
463512
464
- err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
513
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
465514 gem->size);
466515 if (err < 0) {
467516 drm_gem_vm_close(vma);
....@@ -508,24 +557,17 @@
508557 return NULL;
509558
510559 if (bo->pages) {
511
- struct scatterlist *sg;
512
- unsigned int i;
513
-
514
- if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
515
- goto free;
516
-
517
- for_each_sg(sgt->sgl, sg, bo->num_pages, i)
518
- sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
519
-
520
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
560
+ if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
561
+ 0, gem->size, GFP_KERNEL) < 0)
521562 goto free;
522563 } else {
523
- if (sg_alloc_table(sgt, 1, GFP_KERNEL))
564
+ if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
565
+ gem->size) < 0)
524566 goto free;
525
-
526
- sg_dma_address(sgt->sgl) = bo->paddr;
527
- sg_dma_len(sgt->sgl) = gem->size;
528567 }
568
+
569
+ if (dma_map_sgtable(attach->dev, sgt, dir, 0))
570
+ goto free;
529571
530572 return sgt;
531573
....@@ -543,7 +585,7 @@
543585 struct tegra_bo *bo = to_tegra_bo(gem);
544586
545587 if (bo->pages)
546
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
588
+ dma_unmap_sgtable(attach->dev, sgt, dir, 0);
547589
548590 sg_free_table(sgt);
549591 kfree(sgt);
....@@ -562,8 +604,7 @@
562604 struct drm_device *drm = gem->dev;
563605
564606 if (bo->pages)
565
- dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
566
- DMA_FROM_DEVICE);
607
+ dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
567608
568609 return 0;
569610 }
....@@ -576,20 +617,9 @@
576617 struct drm_device *drm = gem->dev;
577618
578619 if (bo->pages)
579
- dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
580
- DMA_TO_DEVICE);
620
+ dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
581621
582622 return 0;
583
-}
584
-
585
-static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
586
-{
587
- return NULL;
588
-}
589
-
590
-static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
591
- void *addr)
592
-{
593623 }
594624
595625 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
....@@ -622,27 +652,24 @@
622652 .release = tegra_gem_prime_release,
623653 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
624654 .end_cpu_access = tegra_gem_prime_end_cpu_access,
625
- .map = tegra_gem_prime_kmap,
626
- .unmap = tegra_gem_prime_kunmap,
627655 .mmap = tegra_gem_prime_mmap,
628656 .vmap = tegra_gem_prime_vmap,
629657 .vunmap = tegra_gem_prime_vunmap,
630658 };
631659
632
-struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
633
- struct drm_gem_object *gem,
660
+struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
634661 int flags)
635662 {
636663 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
637664
638665 exp_info.exp_name = KBUILD_MODNAME;
639
- exp_info.owner = drm->driver->fops->owner;
666
+ exp_info.owner = gem->dev->driver->fops->owner;
640667 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
641668 exp_info.size = gem->size;
642669 exp_info.flags = flags;
643670 exp_info.priv = gem;
644671
645
- return drm_gem_dmabuf_export(drm, &exp_info);
672
+ return drm_gem_dmabuf_export(gem->dev, &exp_info);
646673 }
647674
648675 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,