hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/gpu/drm/vkms/vkms_gem.c
....@@ -1,6 +1,9 @@
11 // SPDX-License-Identifier: GPL-2.0+
22
3
+#include <linux/dma-buf.h>
34 #include <linux/shmem_fs.h>
5
+#include <linux/vmalloc.h>
6
+#include <drm/drm_prime.h>
47
58 #include "vkms_drv.h"
69
....@@ -31,20 +34,22 @@
3134 struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
3235 gem);
3336
34
- kvfree(gem->pages);
37
+ WARN_ON(gem->pages);
38
+ WARN_ON(gem->vaddr);
39
+
3540 mutex_destroy(&gem->pages_lock);
3641 drm_gem_object_release(obj);
3742 kfree(gem);
3843 }
3944
40
-int vkms_gem_fault(struct vm_fault *vmf)
45
+vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
4146 {
4247 struct vm_area_struct *vma = vmf->vma;
4348 struct vkms_gem_object *obj = vma->vm_private_data;
4449 unsigned long vaddr = vmf->address;
4550 pgoff_t page_offset;
4651 loff_t num_pages;
47
- int ret;
52
+ vm_fault_t ret = VM_FAULT_SIGBUS;
4853
4954 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
5055 num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
....@@ -52,7 +57,6 @@
5257 if (page_offset > num_pages)
5358 return VM_FAULT_SIGBUS;
5459
55
- ret = -ENOENT;
5660 mutex_lock(&obj->pages_lock);
5761 if (obj->pages) {
5862 get_page(obj->pages[page_offset]);
....@@ -137,35 +141,108 @@
137141 args->size = gem_obj->size;
138142 args->pitch = pitch;
139143
140
- drm_gem_object_put_unlocked(gem_obj);
144
+ drm_gem_object_put(gem_obj);
141145
142146 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
143147
144148 return 0;
145149 }
146150
147
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
148
- u32 handle, u64 *offset)
151
+static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
149152 {
150
- struct drm_gem_object *obj;
151
- int ret;
153
+ struct drm_gem_object *gem_obj = &vkms_obj->gem;
152154
153
- obj = drm_gem_object_lookup(file, handle);
154
- if (!obj)
155
- return -ENOENT;
155
+ if (!vkms_obj->pages) {
156
+ struct page **pages = drm_gem_get_pages(gem_obj);
156157
157
- if (!obj->filp) {
158
- ret = -EINVAL;
159
- goto unref;
158
+ if (IS_ERR(pages))
159
+ return pages;
160
+
161
+ if (cmpxchg(&vkms_obj->pages, NULL, pages))
162
+ drm_gem_put_pages(gem_obj, pages, false, true);
160163 }
161164
162
- ret = drm_gem_create_mmap_offset(obj);
163
- if (ret)
164
- goto unref;
165
+ return vkms_obj->pages;
166
+}
165167
166
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
167
-unref:
168
- drm_gem_object_put_unlocked(obj);
168
+void vkms_gem_vunmap(struct drm_gem_object *obj)
169
+{
170
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
169171
172
+ mutex_lock(&vkms_obj->pages_lock);
173
+ if (vkms_obj->vmap_count < 1) {
174
+ WARN_ON(vkms_obj->vaddr);
175
+ WARN_ON(vkms_obj->pages);
176
+ mutex_unlock(&vkms_obj->pages_lock);
177
+ return;
178
+ }
179
+
180
+ vkms_obj->vmap_count--;
181
+
182
+ if (vkms_obj->vmap_count == 0) {
183
+ vunmap(vkms_obj->vaddr);
184
+ vkms_obj->vaddr = NULL;
185
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
186
+ vkms_obj->pages = NULL;
187
+ }
188
+
189
+ mutex_unlock(&vkms_obj->pages_lock);
190
+}
191
+
192
+int vkms_gem_vmap(struct drm_gem_object *obj)
193
+{
194
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
195
+ int ret = 0;
196
+
197
+ mutex_lock(&vkms_obj->pages_lock);
198
+
199
+ if (!vkms_obj->vaddr) {
200
+ unsigned int n_pages = obj->size >> PAGE_SHIFT;
201
+ struct page **pages = _get_pages(vkms_obj);
202
+
203
+ if (IS_ERR(pages)) {
204
+ ret = PTR_ERR(pages);
205
+ goto out;
206
+ }
207
+
208
+ vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
209
+ if (!vkms_obj->vaddr)
210
+ goto err_vmap;
211
+ }
212
+
213
+ vkms_obj->vmap_count++;
214
+ goto out;
215
+
216
+err_vmap:
217
+ ret = -ENOMEM;
218
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
219
+ vkms_obj->pages = NULL;
220
+out:
221
+ mutex_unlock(&vkms_obj->pages_lock);
170222 return ret;
171223 }
224
+
225
+struct drm_gem_object *
226
+vkms_prime_import_sg_table(struct drm_device *dev,
227
+ struct dma_buf_attachment *attach,
228
+ struct sg_table *sg)
229
+{
230
+ struct vkms_gem_object *obj;
231
+ int npages;
232
+
233
+ obj = __vkms_gem_create(dev, attach->dmabuf->size);
234
+ if (IS_ERR(obj))
235
+ return ERR_CAST(obj);
236
+
237
+ npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
238
+ DRM_DEBUG_PRIME("Importing %d pages\n", npages);
239
+
240
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
241
+ if (!obj->pages) {
242
+ vkms_gem_free_object(&obj->gem);
243
+ return ERR_PTR(-ENOMEM);
244
+ }
245
+
246
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
247
+ return &obj->gem;
248
+}