forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/v3d/v3d_bo.c
....@@ -25,162 +25,6 @@
2525 #include "v3d_drv.h"
2626 #include "uapi/drm/v3d_drm.h"
2727
28
-/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
29
- * it for DMA.
30
- */
31
-static int
32
-v3d_bo_get_pages(struct v3d_bo *bo)
33
-{
34
- struct drm_gem_object *obj = &bo->base;
35
- struct drm_device *dev = obj->dev;
36
- int npages = obj->size >> PAGE_SHIFT;
37
- int ret = 0;
38
-
39
- mutex_lock(&bo->lock);
40
- if (bo->pages_refcount++ != 0)
41
- goto unlock;
42
-
43
- if (!obj->import_attach) {
44
- bo->pages = drm_gem_get_pages(obj);
45
- if (IS_ERR(bo->pages)) {
46
- ret = PTR_ERR(bo->pages);
47
- goto unlock;
48
- }
49
-
50
- bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
51
- if (IS_ERR(bo->sgt)) {
52
- ret = PTR_ERR(bo->sgt);
53
- goto put_pages;
54
- }
55
-
56
- /* Map the pages for use by the GPU. */
57
- dma_map_sg(dev->dev, bo->sgt->sgl,
58
- bo->sgt->nents, DMA_BIDIRECTIONAL);
59
- } else {
60
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
61
- if (!bo->pages)
62
- goto put_pages;
63
-
64
- drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
65
- NULL, npages);
66
-
67
- /* Note that dma-bufs come in mapped. */
68
- }
69
-
70
- mutex_unlock(&bo->lock);
71
-
72
- return 0;
73
-
74
-put_pages:
75
- drm_gem_put_pages(obj, bo->pages, true, true);
76
- bo->pages = NULL;
77
-unlock:
78
- bo->pages_refcount--;
79
- mutex_unlock(&bo->lock);
80
- return ret;
81
-}
82
-
83
-static void
84
-v3d_bo_put_pages(struct v3d_bo *bo)
85
-{
86
- struct drm_gem_object *obj = &bo->base;
87
-
88
- mutex_lock(&bo->lock);
89
- if (--bo->pages_refcount == 0) {
90
- if (!obj->import_attach) {
91
- dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
92
- bo->sgt->nents, DMA_BIDIRECTIONAL);
93
- sg_free_table(bo->sgt);
94
- kfree(bo->sgt);
95
- drm_gem_put_pages(obj, bo->pages, true, true);
96
- } else {
97
- kfree(bo->pages);
98
- }
99
- }
100
- mutex_unlock(&bo->lock);
101
-}
102
-
103
-static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
104
- size_t unaligned_size)
105
-{
106
- struct v3d_dev *v3d = to_v3d_dev(dev);
107
- struct drm_gem_object *obj;
108
- struct v3d_bo *bo;
109
- size_t size = roundup(unaligned_size, PAGE_SIZE);
110
- int ret;
111
-
112
- if (size == 0)
113
- return ERR_PTR(-EINVAL);
114
-
115
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
116
- if (!bo)
117
- return ERR_PTR(-ENOMEM);
118
- obj = &bo->base;
119
-
120
- INIT_LIST_HEAD(&bo->vmas);
121
- INIT_LIST_HEAD(&bo->unref_head);
122
- mutex_init(&bo->lock);
123
-
124
- ret = drm_gem_object_init(dev, obj, size);
125
- if (ret)
126
- goto free_bo;
127
-
128
- spin_lock(&v3d->mm_lock);
129
- ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
130
- obj->size >> PAGE_SHIFT,
131
- GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
132
- spin_unlock(&v3d->mm_lock);
133
- if (ret)
134
- goto free_obj;
135
-
136
- return bo;
137
-
138
-free_obj:
139
- drm_gem_object_release(obj);
140
-free_bo:
141
- kfree(bo);
142
- return ERR_PTR(ret);
143
-}
144
-
145
-struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
146
- size_t unaligned_size)
147
-{
148
- struct v3d_dev *v3d = to_v3d_dev(dev);
149
- struct drm_gem_object *obj;
150
- struct v3d_bo *bo;
151
- int ret;
152
-
153
- bo = v3d_bo_create_struct(dev, unaligned_size);
154
- if (IS_ERR(bo))
155
- return bo;
156
- obj = &bo->base;
157
-
158
- bo->resv = &bo->_resv;
159
- reservation_object_init(bo->resv);
160
-
161
- ret = v3d_bo_get_pages(bo);
162
- if (ret)
163
- goto free_mm;
164
-
165
- v3d_mmu_insert_ptes(bo);
166
-
167
- mutex_lock(&v3d->bo_lock);
168
- v3d->bo_stats.num_allocated++;
169
- v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
170
- mutex_unlock(&v3d->bo_lock);
171
-
172
- return bo;
173
-
174
-free_mm:
175
- spin_lock(&v3d->mm_lock);
176
- drm_mm_remove_node(&bo->node);
177
- spin_unlock(&v3d->mm_lock);
178
-
179
- drm_gem_object_release(obj);
180
- kfree(bo);
181
- return ERR_PTR(ret);
182
-}
183
-
18428 /* Called DRM core on the last userspace/kernel unreference of the
18529 * BO.
18630 */
....@@ -189,92 +33,116 @@
18933 struct v3d_dev *v3d = to_v3d_dev(obj->dev);
19034 struct v3d_bo *bo = to_v3d_bo(obj);
19135
36
+ v3d_mmu_remove_ptes(bo);
37
+
19238 mutex_lock(&v3d->bo_lock);
19339 v3d->bo_stats.num_allocated--;
19440 v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
19541 mutex_unlock(&v3d->bo_lock);
19642
197
- reservation_object_fini(&bo->_resv);
198
-
199
- v3d_bo_put_pages(bo);
200
-
201
- if (obj->import_attach)
202
- drm_prime_gem_destroy(obj, bo->sgt);
203
-
204
- v3d_mmu_remove_ptes(bo);
20543 spin_lock(&v3d->mm_lock);
20644 drm_mm_remove_node(&bo->node);
20745 spin_unlock(&v3d->mm_lock);
20846
209
- mutex_destroy(&bo->lock);
47
+ /* GPU execution may have dirtied any pages in the BO. */
48
+ bo->base.pages_mark_dirty_on_put = true;
21049
211
- drm_gem_object_release(obj);
212
- kfree(bo);
50
+ drm_gem_shmem_free_object(obj);
21351 }
21452
215
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
53
+static const struct drm_gem_object_funcs v3d_gem_funcs = {
54
+ .free = v3d_free_object,
55
+ .print_info = drm_gem_shmem_print_info,
56
+ .pin = drm_gem_shmem_pin,
57
+ .unpin = drm_gem_shmem_unpin,
58
+ .get_sg_table = drm_gem_shmem_get_sg_table,
59
+ .vmap = drm_gem_shmem_vmap,
60
+ .vunmap = drm_gem_shmem_vunmap,
61
+ .mmap = drm_gem_shmem_mmap,
62
+};
63
+
64
+/* gem_create_object function for allocating a BO struct and doing
65
+ * early setup.
66
+ */
67
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
21668 {
69
+ struct v3d_bo *bo;
70
+ struct drm_gem_object *obj;
71
+
72
+ if (size == 0)
73
+ return NULL;
74
+
75
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
76
+ if (!bo)
77
+ return NULL;
78
+ obj = &bo->base.base;
79
+
80
+ obj->funcs = &v3d_gem_funcs;
81
+
82
+ INIT_LIST_HEAD(&bo->unref_head);
83
+
84
+ return &bo->base.base;
85
+}
86
+
87
+static int
88
+v3d_bo_create_finish(struct drm_gem_object *obj)
89
+{
90
+ struct v3d_dev *v3d = to_v3d_dev(obj->dev);
21791 struct v3d_bo *bo = to_v3d_bo(obj);
218
-
219
- return bo->resv;
220
-}
221
-
222
-static void
223
-v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
224
-{
225
- vma->vm_flags &= ~VM_PFNMAP;
226
- vma->vm_flags |= VM_MIXEDMAP;
227
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
228
-}
229
-
230
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
231
-{
232
- struct vm_area_struct *vma = vmf->vma;
233
- struct drm_gem_object *obj = vma->vm_private_data;
234
- struct v3d_bo *bo = to_v3d_bo(obj);
235
- pfn_t pfn;
236
- pgoff_t pgoff;
237
-
238
- /* We don't use vmf->pgoff since that has the fake offset: */
239
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
240
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
241
-
242
- return vmf_insert_mixed(vma, vmf->address, pfn);
243
-}
244
-
245
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
246
-{
92
+ struct sg_table *sgt;
24793 int ret;
24894
249
- ret = drm_gem_mmap(filp, vma);
95
+ /* So far we pin the BO in the MMU for its lifetime, so use
96
+ * shmem's helper for getting a lifetime sgt.
97
+ */
98
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
99
+ if (IS_ERR(sgt))
100
+ return PTR_ERR(sgt);
101
+
102
+ spin_lock(&v3d->mm_lock);
103
+ /* Allocate the object's space in the GPU's page tables.
104
+ * Inserting PTEs will happen later, but the offset is for the
105
+ * lifetime of the BO.
106
+ */
107
+ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
108
+ obj->size >> PAGE_SHIFT,
109
+ GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
110
+ spin_unlock(&v3d->mm_lock);
250111 if (ret)
251112 return ret;
252113
253
- v3d_set_mmap_vma_flags(vma);
114
+ /* Track stats for /debug/dri/n/bo_stats. */
115
+ mutex_lock(&v3d->bo_lock);
116
+ v3d->bo_stats.num_allocated++;
117
+ v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
118
+ mutex_unlock(&v3d->bo_lock);
254119
255
- return ret;
256
-}
257
-
258
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
259
-{
260
- int ret;
261
-
262
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
263
- if (ret < 0)
264
- return ret;
265
-
266
- v3d_set_mmap_vma_flags(vma);
120
+ v3d_mmu_insert_ptes(bo);
267121
268122 return 0;
269123 }
270124
271
-struct sg_table *
272
-v3d_prime_get_sg_table(struct drm_gem_object *obj)
125
+struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
126
+ size_t unaligned_size)
273127 {
274
- struct v3d_bo *bo = to_v3d_bo(obj);
275
- int npages = obj->size >> PAGE_SHIFT;
128
+ struct drm_gem_shmem_object *shmem_obj;
129
+ struct v3d_bo *bo;
130
+ int ret;
276131
277
- return drm_prime_pages_to_sg(bo->pages, npages);
132
+ shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
133
+ if (IS_ERR(shmem_obj))
134
+ return ERR_CAST(shmem_obj);
135
+ bo = to_v3d_bo(&shmem_obj->base);
136
+
137
+ ret = v3d_bo_create_finish(&shmem_obj->base);
138
+ if (ret)
139
+ goto free_obj;
140
+
141
+ return bo;
142
+
143
+free_obj:
144
+ drm_gem_shmem_free_object(&shmem_obj->base);
145
+ return ERR_PTR(ret);
278146 }
279147
280148 struct drm_gem_object *
....@@ -283,20 +151,17 @@
283151 struct sg_table *sgt)
284152 {
285153 struct drm_gem_object *obj;
286
- struct v3d_bo *bo;
154
+ int ret;
287155
288
- bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
289
- if (IS_ERR(bo))
290
- return ERR_CAST(bo);
291
- obj = &bo->base;
156
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
157
+ if (IS_ERR(obj))
158
+ return obj;
292159
293
- bo->resv = attach->dmabuf->resv;
294
-
295
- bo->sgt = sgt;
296
- obj->import_attach = attach;
297
- v3d_bo_get_pages(bo);
298
-
299
- v3d_mmu_insert_ptes(bo);
160
+ ret = v3d_bo_create_finish(obj);
161
+ if (ret) {
162
+ drm_gem_shmem_free_object(obj);
163
+ return ERR_PTR(ret);
164
+ }
300165
301166 return obj;
302167 }
....@@ -319,8 +184,8 @@
319184
320185 args->offset = bo->node.start << PAGE_SHIFT;
321186
322
- ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
323
- drm_gem_object_put_unlocked(&bo->base);
187
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
188
+ drm_gem_object_put(&bo->base.base);
324189
325190 return ret;
326191 }
....@@ -330,7 +195,6 @@
330195 {
331196 struct drm_v3d_mmap_bo *args = data;
332197 struct drm_gem_object *gem_obj;
333
- int ret;
334198
335199 if (args->flags != 0) {
336200 DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
....@@ -343,12 +207,10 @@
343207 return -ENOENT;
344208 }
345209
346
- ret = drm_gem_create_mmap_offset(gem_obj);
347
- if (ret == 0)
348
- args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
349
- drm_gem_object_put_unlocked(gem_obj);
210
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
211
+ drm_gem_object_put(gem_obj);
350212
351
- return ret;
213
+ return 0;
352214 }
353215
354216 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
....@@ -367,6 +229,6 @@
367229
368230 args->offset = bo->node.start << PAGE_SHIFT;
369231
370
- drm_gem_object_put_unlocked(gem_obj);
232
+ drm_gem_object_put(gem_obj);
371233 return 0;
372234 }