hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/gpu/drm/virtio/virtgpu_drv.h
....@@ -31,16 +31,15 @@
3131 #include <linux/virtio_config.h>
3232 #include <linux/virtio_gpu.h>
3333
34
-#include <drm/drmP.h>
35
-#include <drm/drm_gem.h>
3634 #include <drm/drm_atomic.h>
37
-#include <drm/drm_crtc_helper.h>
35
+#include <drm/drm_drv.h>
3836 #include <drm/drm_encoder.h>
3937 #include <drm/drm_fb_helper.h>
40
-#include <drm/ttm/ttm_bo_api.h>
41
-#include <drm/ttm/ttm_bo_driver.h>
42
-#include <drm/ttm/ttm_placement.h>
43
-#include <drm/ttm/ttm_module.h>
38
+#include <drm/drm_gem.h>
39
+#include <drm/drm_gem_shmem_helper.h>
40
+#include <drm/drm_ioctl.h>
41
+#include <drm/drm_probe_helper.h>
42
+#include <drm/virtgpu_drm.h>
4443
4544 #define DRIVER_NAME "virtio_gpu"
4645 #define DRIVER_DESC "virtio GPU"
....@@ -49,6 +48,10 @@
4948 #define DRIVER_MAJOR 0
5049 #define DRIVER_MINOR 1
5150 #define DRIVER_PATCHLEVEL 0
51
+
52
+#define UUID_INITIALIZING 0
53
+#define UUID_INITIALIZED 1
54
+#define UUID_INITIALIZATION_FAILED 2
5255
5356 struct virtio_gpu_object_params {
5457 uint32_t format;
....@@ -68,21 +71,32 @@
6871 };
6972
7073 struct virtio_gpu_object {
71
- struct drm_gem_object gem_base;
74
+ struct drm_gem_shmem_object base;
7275 uint32_t hw_res_handle;
73
-
74
- struct sg_table *pages;
75
- uint32_t mapped;
76
- void *vmap;
7776 bool dumb;
78
- struct ttm_place placement_code;
79
- struct ttm_placement placement;
80
- struct ttm_buffer_object tbo;
81
- struct ttm_bo_kmap_obj kmap;
8277 bool created;
78
+
79
+ int uuid_state;
80
+ uuid_t uuid;
8381 };
8482 #define gem_to_virtio_gpu_obj(gobj) \
85
- container_of((gobj), struct virtio_gpu_object, gem_base)
83
+ container_of((gobj), struct virtio_gpu_object, base.base)
84
+
85
+struct virtio_gpu_object_shmem {
86
+ struct virtio_gpu_object base;
87
+ struct sg_table *pages;
88
+ uint32_t mapped;
89
+};
90
+
91
+#define to_virtio_gpu_shmem(virtio_gpu_object) \
92
+ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
93
+
94
+struct virtio_gpu_object_array {
95
+ struct ww_acquire_ctx ticket;
96
+ struct list_head next;
97
+ u32 nents, total;
98
+ struct drm_gem_object *objs[];
99
+};
86100
87101 struct virtio_gpu_vbuffer;
88102 struct virtio_gpu_device;
....@@ -103,8 +117,6 @@
103117 struct virtio_gpu_fence_driver *drv;
104118 struct list_head node;
105119 };
106
-#define to_virtio_fence(x) \
107
- container_of(x, struct virtio_gpu_fence, f)
108120
109121 struct virtio_gpu_vbuffer {
110122 char *buf;
....@@ -115,9 +127,10 @@
115127
116128 char *resp_buf;
117129 int resp_size;
118
-
119130 virtio_gpu_resp_cb resp_cb;
131
+ void *resp_cb_data;
120132
133
+ struct virtio_gpu_object_array *objs;
121134 struct list_head list;
122135 };
123136
....@@ -131,31 +144,17 @@
131144 struct edid *edid;
132145 int cur_x;
133146 int cur_y;
134
- bool enabled;
147
+ bool needs_modeset;
135148 };
136149 #define drm_crtc_to_virtio_gpu_output(x) \
137150 container_of(x, struct virtio_gpu_output, crtc)
138
-#define drm_connector_to_virtio_gpu_output(x) \
139
- container_of(x, struct virtio_gpu_output, conn)
140
-#define drm_encoder_to_virtio_gpu_output(x) \
141
- container_of(x, struct virtio_gpu_output, enc)
142151
143152 struct virtio_gpu_framebuffer {
144153 struct drm_framebuffer base;
145
- int x1, y1, x2, y2; /* dirty rect */
146
- spinlock_t dirty_lock;
147
- uint32_t hw_res_handle;
148154 struct virtio_gpu_fence *fence;
149155 };
150156 #define to_virtio_gpu_framebuffer(x) \
151157 container_of(x, struct virtio_gpu_framebuffer, base)
152
-
153
-struct virtio_gpu_mman {
154
- struct ttm_bo_global_ref bo_global_ref;
155
- struct drm_global_reference mem_global_ref;
156
- bool mem_global_referenced;
157
- struct ttm_bo_device bdev;
158
-};
159158
160159 struct virtio_gpu_queue {
161160 struct virtqueue *vq;
....@@ -185,15 +184,14 @@
185184
186185 struct virtio_device *vdev;
187186
188
- struct virtio_gpu_mman mman;
189
-
190187 struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
191188 uint32_t num_scanouts;
192189
193190 struct virtio_gpu_queue ctrlq;
194191 struct virtio_gpu_queue cursorq;
195192 struct kmem_cache *vbufs;
196
- bool vqs_ready;
193
+
194
+ atomic_t pending_commands;
197195
198196 struct ida resource_ida;
199197
....@@ -208,48 +206,46 @@
208206
209207 bool has_virgl_3d;
210208 bool has_edid;
209
+ bool has_indirect;
210
+ bool has_resource_assign_uuid;
211211
212212 struct work_struct config_changed_work;
213
+
214
+ struct work_struct obj_free_work;
215
+ spinlock_t obj_free_lock;
216
+ struct list_head obj_free_list;
213217
214218 struct virtio_gpu_drv_capset *capsets;
215219 uint32_t num_capsets;
216220 struct list_head cap_cache;
221
+
222
+ /* protects resource state when exporting */
223
+ spinlock_t resource_export_lock;
217224 };
218225
219226 struct virtio_gpu_fpriv {
220227 uint32_t ctx_id;
228
+ bool context_created;
229
+ struct mutex context_lock;
221230 };
222231
223
-/* virtio_ioctl.c */
232
+/* virtgpu_ioctl.c */
224233 #define DRM_VIRTIO_NUM_IOCTLS 10
225234 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
226
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
227
- struct list_head *head);
228
-void virtio_gpu_unref_list(struct list_head *head);
235
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
229236
230
-/* virtio_kms.c */
237
+/* virtgpu_kms.c */
231238 int virtio_gpu_init(struct drm_device *dev);
232239 void virtio_gpu_deinit(struct drm_device *dev);
240
+void virtio_gpu_release(struct drm_device *dev);
233241 int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
234242 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
235243
236
-/* virtio_gem.c */
237
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
238
-int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
239
-void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
240
-int virtio_gpu_gem_create(struct drm_file *file,
241
- struct drm_device *dev,
242
- struct virtio_gpu_object_params *params,
243
- struct drm_gem_object **obj_p,
244
- uint32_t *handle_p);
244
+/* virtgpu_gem.c */
245245 int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
246246 struct drm_file *file);
247247 void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
248248 struct drm_file *file);
249
-struct virtio_gpu_object*
250
-virtio_gpu_alloc_object(struct drm_device *dev,
251
- struct virtio_gpu_object_params *params,
252
- struct virtio_gpu_fence *fence);
253249 int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
254250 struct drm_device *dev,
255251 struct drm_mode_create_dumb *args);
....@@ -257,24 +253,35 @@
257253 struct drm_device *dev,
258254 uint32_t handle, uint64_t *offset_p);
259255
260
-/* virtio_fb */
261
-int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
262
- struct drm_clip_rect *clips,
263
- unsigned int num_clips);
264
-/* virtio vg */
256
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
257
+struct virtio_gpu_object_array*
258
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
259
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
260
+ struct drm_gem_object *obj);
261
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
262
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
263
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
264
+ struct dma_fence *fence);
265
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
266
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
267
+ struct virtio_gpu_object_array *objs);
268
+void virtio_gpu_array_put_free_work(struct work_struct *work);
269
+
270
+/* virtgpu_vq.c */
265271 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
266272 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
267273 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
268274 struct virtio_gpu_object *bo,
269275 struct virtio_gpu_object_params *params,
276
+ struct virtio_gpu_object_array *objs,
270277 struct virtio_gpu_fence *fence);
271278 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
272
- uint32_t resource_id);
279
+ struct virtio_gpu_object *bo);
273280 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
274
- struct virtio_gpu_object *bo,
275281 uint64_t offset,
276
- __le32 width, __le32 height,
277
- __le32 x, __le32 y,
282
+ uint32_t width, uint32_t height,
283
+ uint32_t x, uint32_t y,
284
+ struct virtio_gpu_object_array *objs,
278285 struct virtio_gpu_fence *fence);
279286 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
280287 uint32_t resource_id,
....@@ -284,11 +291,10 @@
284291 uint32_t scanout_id, uint32_t resource_id,
285292 uint32_t width, uint32_t height,
286293 uint32_t x, uint32_t y);
287
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
288
- struct virtio_gpu_object *obj,
289
- struct virtio_gpu_fence *fence);
290
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
291
- struct virtio_gpu_object *obj);
294
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
295
+ struct virtio_gpu_object *obj,
296
+ struct virtio_gpu_mem_entry *ents,
297
+ unsigned int nents);
292298 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
293299 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
294300 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
....@@ -305,28 +311,32 @@
305311 uint32_t id);
306312 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
307313 uint32_t ctx_id,
308
- uint32_t resource_id);
314
+ struct virtio_gpu_object_array *objs);
309315 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
310316 uint32_t ctx_id,
311
- uint32_t resource_id);
317
+ struct virtio_gpu_object_array *objs);
312318 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
313319 void *data, uint32_t data_size,
314
- uint32_t ctx_id, struct virtio_gpu_fence *fence);
320
+ uint32_t ctx_id,
321
+ struct virtio_gpu_object_array *objs,
322
+ struct virtio_gpu_fence *fence);
315323 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
316
- uint32_t resource_id, uint32_t ctx_id,
324
+ uint32_t ctx_id,
317325 uint64_t offset, uint32_t level,
318
- struct virtio_gpu_box *box,
326
+ struct drm_virtgpu_3d_box *box,
327
+ struct virtio_gpu_object_array *objs,
319328 struct virtio_gpu_fence *fence);
320329 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
321
- struct virtio_gpu_object *bo,
322330 uint32_t ctx_id,
323331 uint64_t offset, uint32_t level,
324
- struct virtio_gpu_box *box,
332
+ struct drm_virtgpu_3d_box *box,
333
+ struct virtio_gpu_object_array *objs,
325334 struct virtio_gpu_fence *fence);
326335 void
327336 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
328337 struct virtio_gpu_object *bo,
329338 struct virtio_gpu_object_params *params,
339
+ struct virtio_gpu_object_array *objs,
330340 struct virtio_gpu_fence *fence);
331341 void virtio_gpu_ctrl_ack(struct virtqueue *vq);
332342 void virtio_gpu_cursor_ack(struct virtqueue *vq);
....@@ -335,27 +345,23 @@
335345 void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
336346 void virtio_gpu_dequeue_fence_func(struct work_struct *work);
337347
338
-/* virtio_gpu_display.c */
339
-int virtio_gpu_framebuffer_init(struct drm_device *dev,
340
- struct virtio_gpu_framebuffer *vgfb,
341
- const struct drm_mode_fb_cmd2 *mode_cmd,
342
- struct drm_gem_object *obj);
343
-void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
348
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
349
+
350
+int
351
+virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
352
+ struct virtio_gpu_object_array *objs);
353
+
354
+/* virtgpu_display.c */
355
+int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
344356 void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
345357
346
-/* virtio_gpu_plane.c */
358
+/* virtgpu_plane.c */
347359 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
348360 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
349361 enum drm_plane_type type,
350362 int index);
351363
352
-/* virtio_gpu_ttm.c */
353
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
354
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
355
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
356
-
357
-/* virtio_gpu_fence.c */
358
-bool virtio_fence_signaled(struct dma_fence *f);
364
+/* virtgpu_fence.c */
359365 struct virtio_gpu_fence *virtio_gpu_fence_alloc(
360366 struct virtio_gpu_device *vgdev);
361367 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
....@@ -364,74 +370,29 @@
364370 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
365371 u64 last_seq);
366372
367
-/* virtio_gpu_object */
373
+/* virtgpu_object.c */
374
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
375
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
376
+ size_t size);
368377 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
369378 struct virtio_gpu_object_params *params,
370379 struct virtio_gpu_object **bo_ptr,
371380 struct virtio_gpu_fence *fence);
372
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
373
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
374
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
375
- struct virtio_gpu_object *bo);
376
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
377
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
381
+
382
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
378383
379384 /* virtgpu_prime.c */
380
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
385
+struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
386
+ int flags);
387
+struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
388
+ struct dma_buf *buf);
389
+int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
390
+ uuid_t *uuid);
381391 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
382392 struct drm_device *dev, struct dma_buf_attachment *attach,
383393 struct sg_table *sgt);
384
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
385
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
386
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
387
- struct vm_area_struct *vma);
388394
389
-static inline struct virtio_gpu_object*
390
-virtio_gpu_object_ref(struct virtio_gpu_object *bo)
391
-{
392
- ttm_bo_get(&bo->tbo);
393
- return bo;
394
-}
395
-
396
-static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
397
-{
398
- struct ttm_buffer_object *tbo;
399
-
400
- if ((*bo) == NULL)
401
- return;
402
- tbo = &((*bo)->tbo);
403
- ttm_bo_put(tbo);
404
- *bo = NULL;
405
-}
406
-
407
-static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
408
-{
409
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
410
-}
411
-
412
-static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
413
- bool no_wait)
414
-{
415
- int r;
416
-
417
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
418
- if (unlikely(r != 0)) {
419
- if (r != -ERESTARTSYS) {
420
- struct virtio_gpu_device *qdev =
421
- bo->gem_base.dev->dev_private;
422
- dev_err(qdev->dev, "%p reserve failed\n", bo);
423
- }
424
- return r;
425
- }
426
- return 0;
427
-}
428
-
429
-static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
430
-{
431
- ttm_bo_unreserve(&bo->tbo);
432
-}
433
-
434
-/* virgl debufs */
435
-int virtio_gpu_debugfs_init(struct drm_minor *minor);
395
+/* virtgpu_debugfs.c */
396
+void virtio_gpu_debugfs_init(struct drm_minor *minor);
436397
437398 #endif