forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/drm/msm/msm_gem_vma.c
....@@ -1,18 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2016 Red Hat
34 * Author: Rob Clark <robdclark@gmail.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms of the GNU General Public License version 2 as published by
7
- * the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along with
15
- * this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
187 #include "msm_drv.h"
....@@ -28,6 +17,7 @@
2817 drm_mm_takedown(&aspace->mm);
2918 if (aspace->mmu)
3019 aspace->mmu->funcs->destroy(aspace->mmu);
20
+ put_pid(aspace->pid);
3121 kfree(aspace);
3222 }
3323
....@@ -38,20 +28,84 @@
3828 kref_put(&aspace->kref, msm_gem_address_space_destroy);
3929 }
4030
41
-void
42
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
43
- struct msm_gem_vma *vma, struct sg_table *sgt)
31
+struct msm_gem_address_space *
32
+msm_gem_address_space_get(struct msm_gem_address_space *aspace)
4433 {
45
- if (!aspace || !vma->iova)
34
+ if (!IS_ERR_OR_NULL(aspace))
35
+ kref_get(&aspace->kref);
36
+
37
+ return aspace;
38
+}
39
+
40
+/* Actually unmap memory for the vma */
41
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
42
+ struct msm_gem_vma *vma)
43
+{
44
+ unsigned size = vma->node.size << PAGE_SHIFT;
45
+
46
+ /* Print a message if we try to purge a vma in use */
47
+ if (WARN_ON(vma->inuse > 0))
4648 return;
4749
48
- if (aspace->mmu) {
49
- unsigned size = vma->node.size << PAGE_SHIFT;
50
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
50
+ /* Don't do anything if the memory isn't mapped */
51
+ if (!vma->mapped)
52
+ return;
53
+
54
+ if (aspace->mmu)
55
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
56
+
57
+ vma->mapped = false;
58
+}
59
+
60
+/* Remove reference counts for the mapping */
61
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
62
+ struct msm_gem_vma *vma)
63
+{
64
+ if (!WARN_ON(!vma->iova))
65
+ vma->inuse--;
66
+}
67
+
68
+int
69
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
70
+ struct msm_gem_vma *vma, int prot,
71
+ struct sg_table *sgt, int npages)
72
+{
73
+ unsigned size = npages << PAGE_SHIFT;
74
+ int ret = 0;
75
+
76
+ if (WARN_ON(!vma->iova))
77
+ return -EINVAL;
78
+
79
+ /* Increase the usage counter */
80
+ vma->inuse++;
81
+
82
+ if (vma->mapped)
83
+ return 0;
84
+
85
+ vma->mapped = true;
86
+
87
+ if (aspace && aspace->mmu)
88
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
89
+ size, prot);
90
+
91
+ if (ret) {
92
+ vma->mapped = false;
93
+ vma->inuse--;
5194 }
5295
96
+ return ret;
97
+}
98
+
99
+/* Close an iova. Warn if it is still in use */
100
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
101
+ struct msm_gem_vma *vma)
102
+{
103
+ if (WARN_ON(vma->inuse > 0 || vma->mapped))
104
+ return;
105
+
53106 spin_lock(&aspace->lock);
54
- drm_mm_remove_node(&vma->node);
107
+ if (vma->iova)
108
+ drm_mm_remove_node(&vma->node);
55109 spin_unlock(&aspace->lock);
56110
57111 vma->iova = 0;
....@@ -59,45 +113,40 @@
59113 msm_gem_address_space_put(aspace);
60114 }
61115
62
-int
63
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
64
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
116
+/* Initialize a new vma and allocate an iova for it */
117
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
118
+ struct msm_gem_vma *vma, int npages,
119
+ u64 range_start, u64 range_end)
65120 {
66121 int ret;
67122
68
- spin_lock(&aspace->lock);
69
- if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
70
- spin_unlock(&aspace->lock);
71
- return 0;
72
- }
123
+ if (WARN_ON(vma->iova))
124
+ return -EBUSY;
73125
74
- ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
126
+ spin_lock(&aspace->lock);
127
+ ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
128
+ 0, range_start, range_end, 0);
75129 spin_unlock(&aspace->lock);
76130
77131 if (ret)
78132 return ret;
79133
80134 vma->iova = vma->node.start << PAGE_SHIFT;
135
+ vma->mapped = false;
81136
82
- if (aspace->mmu) {
83
- unsigned size = npages << PAGE_SHIFT;
84
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
85
- size, IOMMU_READ | IOMMU_WRITE);
86
- }
87
-
88
- /* Get a reference to the aspace to keep it around */
89137 kref_get(&aspace->kref);
90138
91
- return ret;
139
+ return 0;
92140 }
93141
94142 struct msm_gem_address_space *
95
-msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
96
- const char *name)
143
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
144
+ u64 va_start, u64 size)
97145 {
98146 struct msm_gem_address_space *aspace;
99
- u64 size = domain->geometry.aperture_end -
100
- domain->geometry.aperture_start;
147
+
148
+ if (IS_ERR(mmu))
149
+ return ERR_CAST(mmu);
101150
102151 aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
103152 if (!aspace)
....@@ -105,10 +154,9 @@
105154
106155 spin_lock_init(&aspace->lock);
107156 aspace->name = name;
108
- aspace->mmu = msm_iommu_new(dev, domain);
157
+ aspace->mmu = mmu;
109158
110
- drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
111
- size >> PAGE_SHIFT);
159
+ drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
112160
113161 kref_init(&aspace->kref);
114162