hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/gpu/drm/virtio/virtgpu_vq.c
....@@ -26,18 +26,30 @@
2626 * OTHER DEALINGS IN THE SOFTWARE.
2727 */
2828
29
-#include <drm/drmP.h>
30
-#include "virtgpu_drv.h"
31
-#include "virtgpu_trace.h"
29
+#include <linux/dma-mapping.h>
3230 #include <linux/virtio.h>
3331 #include <linux/virtio_config.h>
3432 #include <linux/virtio_ring.h>
33
+
34
+#include "virtgpu_drv.h"
35
+#include "virtgpu_trace.h"
3536
3637 #define MAX_INLINE_CMD_SIZE 96
3738 #define MAX_INLINE_RESP_SIZE 24
3839 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
3940 + MAX_INLINE_CMD_SIZE \
4041 + MAX_INLINE_RESP_SIZE)
42
+
43
+static void convert_to_hw_box(struct virtio_gpu_box *dst,
44
+ const struct drm_virtgpu_3d_box *src)
45
+{
46
+ dst->x = cpu_to_le32(src->x);
47
+ dst->y = cpu_to_le32(src->y);
48
+ dst->z = cpu_to_le32(src->z);
49
+ dst->w = cpu_to_le32(src->w);
50
+ dst->h = cpu_to_le32(src->h);
51
+ dst->d = cpu_to_le32(src->d);
52
+}
4153
4254 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
4355 {
....@@ -79,11 +91,10 @@
7991 {
8092 struct virtio_gpu_vbuffer *vbuf;
8193
82
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
83
- if (!vbuf)
84
- return ERR_PTR(-ENOMEM);
94
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
8595
86
- BUG_ON(size > MAX_INLINE_CMD_SIZE);
96
+ BUG_ON(size > MAX_INLINE_CMD_SIZE ||
97
+ size < sizeof(struct virtio_gpu_ctrl_hdr));
8798 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
8899 vbuf->size = size;
89100
....@@ -97,21 +108,14 @@
97108 return vbuf;
98109 }
99110
100
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
101
- struct virtio_gpu_vbuffer **vbuffer_p,
102
- int size)
111
+static struct virtio_gpu_ctrl_hdr *
112
+virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
103113 {
104
- struct virtio_gpu_vbuffer *vbuf;
105
-
106
- vbuf = virtio_gpu_get_vbuf(vgdev, size,
107
- sizeof(struct virtio_gpu_ctrl_hdr),
108
- NULL, NULL);
109
- if (IS_ERR(vbuf)) {
110
- *vbuffer_p = NULL;
111
- return ERR_CAST(vbuf);
112
- }
113
- *vbuffer_p = vbuf;
114
- return vbuf->buf;
114
+ /* this assumes a vbuf contains a command that starts with a
115
+ * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
116
+ * virtqueues.
117
+ */
118
+ return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
115119 }
116120
117121 static struct virtio_gpu_update_cursor*
....@@ -141,12 +145,27 @@
141145
142146 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
143147 resp_size, resp_buf, cb);
144
- if (IS_ERR(vbuf)) {
145
- *vbuffer_p = NULL;
146
- return ERR_CAST(vbuf);
147
- }
148148 *vbuffer_p = vbuf;
149149 return (struct virtio_gpu_command *)vbuf->buf;
150
+}
151
+
152
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
153
+ struct virtio_gpu_vbuffer **vbuffer_p,
154
+ int size)
155
+{
156
+ return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
157
+ sizeof(struct virtio_gpu_ctrl_hdr),
158
+ NULL);
159
+}
160
+
161
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
162
+ struct virtio_gpu_vbuffer **vbuffer_p,
163
+ int size,
164
+ virtio_gpu_resp_cb cb)
165
+{
166
+ return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
167
+ sizeof(struct virtio_gpu_ctrl_hdr),
168
+ NULL);
150169 }
151170
152171 static void free_vbuf(struct virtio_gpu_device *vgdev,
....@@ -191,18 +210,18 @@
191210 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
192211 spin_unlock(&vgdev->ctrlq.qlock);
193212
194
- list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
213
+ list_for_each_entry(entry, &reclaim_list, list) {
195214 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
196215
197216 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
198217
199218 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
200
- if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
219
+ if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
201220 struct virtio_gpu_ctrl_hdr *cmd;
202
- cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
203
- DRM_ERROR("response 0x%x (command 0x%x)\n",
204
- le32_to_cpu(resp->type),
205
- le32_to_cpu(cmd->type));
221
+ cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
222
+ DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
223
+ le32_to_cpu(resp->type),
224
+ le32_to_cpu(cmd->type));
206225 } else
207226 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
208227 }
....@@ -218,14 +237,18 @@
218237 }
219238 if (entry->resp_cb)
220239 entry->resp_cb(vgdev, entry);
221
-
222
- list_del(&entry->list);
223
- free_vbuf(vgdev, entry);
224240 }
225241 wake_up(&vgdev->ctrlq.ack_queue);
226242
227243 if (fence_id)
228244 virtio_gpu_fence_event_process(vgdev, fence_id);
245
+
246
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
247
+ if (entry->objs)
248
+ virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
249
+ list_del(&entry->list);
250
+ free_vbuf(vgdev, entry);
251
+ }
229252 }
230253
231254 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
....@@ -273,7 +296,7 @@
273296 return NULL;
274297 }
275298
276
- for_each_sg(sgt->sgl, sg, *sg_ents, i) {
299
+ for_each_sgtable_sg(sgt, sg, i) {
277300 pg = vmalloc_to_page(data);
278301 if (!pg) {
279302 sg_free_table(sgt);
....@@ -291,125 +314,150 @@
291314 return sgt;
292315 }
293316
294
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
295
- struct virtio_gpu_vbuffer *vbuf,
296
- struct scatterlist *vout)
297
- __releases(&vgdev->ctrlq.qlock)
298
- __acquires(&vgdev->ctrlq.qlock)
317
+static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
318
+ struct virtio_gpu_vbuffer *vbuf,
319
+ struct virtio_gpu_fence *fence,
320
+ int elemcnt,
321
+ struct scatterlist **sgs,
322
+ int outcnt,
323
+ int incnt)
299324 {
300325 struct virtqueue *vq = vgdev->ctrlq.vq;
301
- struct scatterlist *sgs[3], vcmd, vresp;
302
- int outcnt = 0, incnt = 0;
303
- int ret;
326
+ int ret, idx;
304327
305
- if (!vgdev->vqs_ready)
306
- return -ENODEV;
307
-
308
- sg_init_one(&vcmd, vbuf->buf, vbuf->size);
309
- sgs[outcnt + incnt] = &vcmd;
310
- outcnt++;
311
-
312
- if (vout) {
313
- sgs[outcnt + incnt] = vout;
314
- outcnt++;
328
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
329
+ if (fence && vbuf->objs)
330
+ virtio_gpu_array_unlock_resv(vbuf->objs);
331
+ free_vbuf(vgdev, vbuf);
332
+ return -1;
315333 }
316334
317
- if (vbuf->resp_size) {
318
- sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
319
- sgs[outcnt + incnt] = &vresp;
320
- incnt++;
321
- }
322
-
323
-retry:
324
- ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
325
- if (ret == -ENOSPC) {
326
- spin_unlock(&vgdev->ctrlq.qlock);
327
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
328
- spin_lock(&vgdev->ctrlq.qlock);
329
- goto retry;
330
- } else {
331
- trace_virtio_gpu_cmd_queue(vq,
332
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
333
-
334
- virtqueue_kick(vq);
335
- }
336
-
337
- if (!ret)
338
- ret = vq->num_free;
339
- return ret;
340
-}
341
-
342
-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
343
- struct virtio_gpu_vbuffer *vbuf,
344
- struct virtio_gpu_ctrl_hdr *hdr,
345
- struct virtio_gpu_fence *fence)
346
-{
347
- struct virtqueue *vq = vgdev->ctrlq.vq;
348
- struct scatterlist *vout = NULL, sg;
349
- struct sg_table *sgt = NULL;
350
- int rc;
351
- int outcnt = 0;
352
-
353
- if (vbuf->data_size) {
354
- if (is_vmalloc_addr(vbuf->data_buf)) {
355
- sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
356
- &outcnt);
357
- if (!sgt)
358
- return -ENOMEM;
359
- vout = sgt->sgl;
360
- } else {
361
- sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
362
- vout = &sg;
363
- outcnt = 1;
364
- }
365
- }
335
+ if (vgdev->has_indirect)
336
+ elemcnt = 1;
366337
367338 again:
368339 spin_lock(&vgdev->ctrlq.qlock);
369340
370
- /*
371
- * Make sure we have enouth space in the virtqueue. If not
372
- * wait here until we have.
373
- *
374
- * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
375
- * to wait for free space, which can result in fence ids being
376
- * submitted out-of-order.
377
- */
378
- if (vq->num_free < 2 + outcnt) {
341
+ if (vq->num_free < elemcnt) {
379342 spin_unlock(&vgdev->ctrlq.qlock);
380
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
343
+ virtio_gpu_notify(vgdev);
344
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
381345 goto again;
382346 }
383347
384
- if (hdr && fence)
385
- virtio_gpu_fence_emit(vgdev, hdr, fence);
386
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
348
+ /* now that the position of the vbuf in the virtqueue is known, we can
349
+ * finally set the fence id
350
+ */
351
+ if (fence) {
352
+ virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
353
+ fence);
354
+ if (vbuf->objs) {
355
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
356
+ virtio_gpu_array_unlock_resv(vbuf->objs);
357
+ }
358
+ }
359
+
360
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
361
+ WARN_ON(ret);
362
+
363
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
364
+
365
+ atomic_inc(&vgdev->pending_commands);
366
+
387367 spin_unlock(&vgdev->ctrlq.qlock);
368
+
369
+ drm_dev_exit(idx);
370
+ return 0;
371
+}
372
+
373
+static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
374
+ struct virtio_gpu_vbuffer *vbuf,
375
+ struct virtio_gpu_fence *fence)
376
+{
377
+ struct scatterlist *sgs[3], vcmd, vout, vresp;
378
+ struct sg_table *sgt = NULL;
379
+ int elemcnt = 0, outcnt = 0, incnt = 0, ret;
380
+
381
+ /* set up vcmd */
382
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
383
+ elemcnt++;
384
+ sgs[outcnt] = &vcmd;
385
+ outcnt++;
386
+
387
+ /* set up vout */
388
+ if (vbuf->data_size) {
389
+ if (is_vmalloc_addr(vbuf->data_buf)) {
390
+ int sg_ents;
391
+ sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
392
+ &sg_ents);
393
+ if (!sgt) {
394
+ if (fence && vbuf->objs)
395
+ virtio_gpu_array_unlock_resv(vbuf->objs);
396
+ return -1;
397
+ }
398
+
399
+ elemcnt += sg_ents;
400
+ sgs[outcnt] = sgt->sgl;
401
+ } else {
402
+ sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
403
+ elemcnt++;
404
+ sgs[outcnt] = &vout;
405
+ }
406
+ outcnt++;
407
+ }
408
+
409
+ /* set up vresp */
410
+ if (vbuf->resp_size) {
411
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
412
+ elemcnt++;
413
+ sgs[outcnt + incnt] = &vresp;
414
+ incnt++;
415
+ }
416
+
417
+ ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
418
+ incnt);
388419
389420 if (sgt) {
390421 sg_free_table(sgt);
391422 kfree(sgt);
392423 }
424
+ return ret;
425
+}
393426
394
- return rc;
427
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
428
+{
429
+ bool notify;
430
+
431
+ if (!atomic_read(&vgdev->pending_commands))
432
+ return;
433
+
434
+ spin_lock(&vgdev->ctrlq.qlock);
435
+ atomic_set(&vgdev->pending_commands, 0);
436
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
437
+ spin_unlock(&vgdev->ctrlq.qlock);
438
+
439
+ if (notify)
440
+ virtqueue_notify(vgdev->ctrlq.vq);
395441 }
396442
397443 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
398444 struct virtio_gpu_vbuffer *vbuf)
399445 {
400
- return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
446
+ return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
401447 }
402448
403
-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
404
- struct virtio_gpu_vbuffer *vbuf)
449
+static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
450
+ struct virtio_gpu_vbuffer *vbuf)
405451 {
406452 struct virtqueue *vq = vgdev->cursorq.vq;
407453 struct scatterlist *sgs[1], ccmd;
408
- int ret;
409
- int outcnt;
454
+ int idx, ret, outcnt;
455
+ bool notify;
410456
411
- if (!vgdev->vqs_ready)
412
- return -ENODEV;
457
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
458
+ free_vbuf(vgdev, vbuf);
459
+ return;
460
+ }
413461
414462 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
415463 sgs[0] = &ccmd;
....@@ -425,16 +473,17 @@
425473 goto retry;
426474 } else {
427475 trace_virtio_gpu_cmd_queue(vq,
428
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
476
+ virtio_gpu_vbuf_ctrl_hdr(vbuf));
429477
430
- virtqueue_kick(vq);
478
+ notify = virtqueue_kick_prepare(vq);
431479 }
432480
433481 spin_unlock(&vgdev->cursorq.qlock);
434482
435
- if (!ret)
436
- ret = vq->num_free;
437
- return ret;
483
+ if (notify)
484
+ virtqueue_notify(vq);
485
+
486
+ drm_dev_exit(idx);
438487 }
439488
440489 /* just create gem objects for userspace and long lived objects,
....@@ -445,6 +494,7 @@
445494 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
446495 struct virtio_gpu_object *bo,
447496 struct virtio_gpu_object_params *params,
497
+ struct virtio_gpu_object_array *objs,
448498 struct virtio_gpu_fence *fence)
449499 {
450500 struct virtio_gpu_resource_create_2d *cmd_p;
....@@ -452,6 +502,7 @@
452502
453503 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
454504 memset(cmd_p, 0, sizeof(*cmd_p));
505
+ vbuf->objs = objs;
455506
456507 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
457508 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
....@@ -459,39 +510,39 @@
459510 cmd_p->width = cpu_to_le32(params->width);
460511 cmd_p->height = cpu_to_le32(params->height);
461512
462
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
513
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
463514 bo->created = true;
464515 }
465516
517
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
518
+ struct virtio_gpu_vbuffer *vbuf)
519
+{
520
+ struct virtio_gpu_object *bo;
521
+
522
+ bo = vbuf->resp_cb_data;
523
+ vbuf->resp_cb_data = NULL;
524
+
525
+ virtio_gpu_cleanup_object(bo);
526
+}
527
+
466528 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
467
- uint32_t resource_id)
529
+ struct virtio_gpu_object *bo)
468530 {
469531 struct virtio_gpu_resource_unref *cmd_p;
470532 struct virtio_gpu_vbuffer *vbuf;
533
+ int ret;
471534
472
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
535
+ cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
536
+ virtio_gpu_cmd_unref_cb);
473537 memset(cmd_p, 0, sizeof(*cmd_p));
474538
475539 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
476
- cmd_p->resource_id = cpu_to_le32(resource_id);
540
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
477541
478
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
479
-}
480
-
481
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
482
- uint32_t resource_id,
483
- struct virtio_gpu_fence *fence)
484
-{
485
- struct virtio_gpu_resource_detach_backing *cmd_p;
486
- struct virtio_gpu_vbuffer *vbuf;
487
-
488
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
489
- memset(cmd_p, 0, sizeof(*cmd_p));
490
-
491
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
492
- cmd_p->resource_id = cpu_to_le32(resource_id);
493
-
494
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
542
+ vbuf->resp_cb_data = bo;
543
+ ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
544
+ if (ret < 0)
545
+ virtio_gpu_cleanup_object(bo);
495546 }
496547
497548 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
....@@ -538,33 +589,35 @@
538589 }
539590
540591 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
541
- struct virtio_gpu_object *bo,
542592 uint64_t offset,
543
- __le32 width, __le32 height,
544
- __le32 x, __le32 y,
593
+ uint32_t width, uint32_t height,
594
+ uint32_t x, uint32_t y,
595
+ struct virtio_gpu_object_array *objs,
545596 struct virtio_gpu_fence *fence)
546597 {
598
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
547599 struct virtio_gpu_transfer_to_host_2d *cmd_p;
548600 struct virtio_gpu_vbuffer *vbuf;
549
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
601
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
602
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
550603
551
- if (use_dma_api)
552
- dma_sync_sg_for_device(vgdev->vdev->dev.parent,
553
- bo->pages->sgl, bo->pages->nents,
554
- DMA_TO_DEVICE);
604
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
605
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
606
+ shmem->pages, DMA_TO_DEVICE);
555607
556608 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
557609 memset(cmd_p, 0, sizeof(*cmd_p));
610
+ vbuf->objs = objs;
558611
559612 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
560613 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
561614 cmd_p->offset = cpu_to_le64(offset);
562
- cmd_p->r.width = width;
563
- cmd_p->r.height = height;
564
- cmd_p->r.x = x;
565
- cmd_p->r.y = y;
615
+ cmd_p->r.width = cpu_to_le32(width);
616
+ cmd_p->r.height = cpu_to_le32(height);
617
+ cmd_p->r.x = cpu_to_le32(x);
618
+ cmd_p->r.y = cpu_to_le32(y);
566619
567
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
620
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
568621 }
569622
570623 static void
....@@ -587,7 +640,7 @@
587640 vbuf->data_buf = ents;
588641 vbuf->data_size = sizeof(*ents) * nents;
589642
590
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
643
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
591644 }
592645
593646 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
....@@ -887,34 +940,37 @@
887940
888941 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
889942 uint32_t ctx_id,
890
- uint32_t resource_id)
943
+ struct virtio_gpu_object_array *objs)
891944 {
945
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
892946 struct virtio_gpu_ctx_resource *cmd_p;
893947 struct virtio_gpu_vbuffer *vbuf;
894948
895949 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
896950 memset(cmd_p, 0, sizeof(*cmd_p));
951
+ vbuf->objs = objs;
897952
898953 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
899954 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
900
- cmd_p->resource_id = cpu_to_le32(resource_id);
955
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
901956 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
902
-
903957 }
904958
905959 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
906960 uint32_t ctx_id,
907
- uint32_t resource_id)
961
+ struct virtio_gpu_object_array *objs)
908962 {
963
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
909964 struct virtio_gpu_ctx_resource *cmd_p;
910965 struct virtio_gpu_vbuffer *vbuf;
911966
912967 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
913968 memset(cmd_p, 0, sizeof(*cmd_p));
969
+ vbuf->objs = objs;
914970
915971 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
916972 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
917
- cmd_p->resource_id = cpu_to_le32(resource_id);
973
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
918974 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
919975 }
920976
....@@ -922,6 +978,7 @@
922978 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
923979 struct virtio_gpu_object *bo,
924980 struct virtio_gpu_object_params *params,
981
+ struct virtio_gpu_object_array *objs,
925982 struct virtio_gpu_fence *fence)
926983 {
927984 struct virtio_gpu_resource_create_3d *cmd_p;
....@@ -929,6 +986,7 @@
929986
930987 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
931988 memset(cmd_p, 0, sizeof(*cmd_p));
989
+ vbuf->objs = objs;
932990
933991 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
934992 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
....@@ -944,64 +1002,74 @@
9441002 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
9451003 cmd_p->flags = cpu_to_le32(params->flags);
9461004
947
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1005
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1006
+
9481007 bo->created = true;
9491008 }
9501009
9511010 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
952
- struct virtio_gpu_object *bo,
9531011 uint32_t ctx_id,
9541012 uint64_t offset, uint32_t level,
955
- struct virtio_gpu_box *box,
1013
+ struct drm_virtgpu_3d_box *box,
1014
+ struct virtio_gpu_object_array *objs,
9561015 struct virtio_gpu_fence *fence)
9571016 {
1017
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
9581018 struct virtio_gpu_transfer_host_3d *cmd_p;
9591019 struct virtio_gpu_vbuffer *vbuf;
960
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1020
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1021
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
9611022
9621023 if (use_dma_api)
963
- dma_sync_sg_for_device(vgdev->vdev->dev.parent,
964
- bo->pages->sgl, bo->pages->nents,
965
- DMA_TO_DEVICE);
1024
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1025
+ shmem->pages, DMA_TO_DEVICE);
9661026
9671027 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
9681028 memset(cmd_p, 0, sizeof(*cmd_p));
1029
+
1030
+ vbuf->objs = objs;
9691031
9701032 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
9711033 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
9721034 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
973
- cmd_p->box = *box;
1035
+ convert_to_hw_box(&cmd_p->box, box);
9741036 cmd_p->offset = cpu_to_le64(offset);
9751037 cmd_p->level = cpu_to_le32(level);
9761038
977
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1039
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
9781040 }
9791041
9801042 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
981
- uint32_t resource_id, uint32_t ctx_id,
1043
+ uint32_t ctx_id,
9821044 uint64_t offset, uint32_t level,
983
- struct virtio_gpu_box *box,
1045
+ struct drm_virtgpu_3d_box *box,
1046
+ struct virtio_gpu_object_array *objs,
9841047 struct virtio_gpu_fence *fence)
9851048 {
1049
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
9861050 struct virtio_gpu_transfer_host_3d *cmd_p;
9871051 struct virtio_gpu_vbuffer *vbuf;
9881052
9891053 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
9901054 memset(cmd_p, 0, sizeof(*cmd_p));
9911055
1056
+ vbuf->objs = objs;
1057
+
9921058 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
9931059 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
994
- cmd_p->resource_id = cpu_to_le32(resource_id);
995
- cmd_p->box = *box;
1060
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1061
+ convert_to_hw_box(&cmd_p->box, box);
9961062 cmd_p->offset = cpu_to_le64(offset);
9971063 cmd_p->level = cpu_to_le32(level);
9981064
999
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1065
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
10001066 }
10011067
10021068 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
10031069 void *data, uint32_t data_size,
1004
- uint32_t ctx_id, struct virtio_gpu_fence *fence)
1070
+ uint32_t ctx_id,
1071
+ struct virtio_gpu_object_array *objs,
1072
+ struct virtio_gpu_fence *fence)
10051073 {
10061074 struct virtio_gpu_cmd_submit *cmd_p;
10071075 struct virtio_gpu_vbuffer *vbuf;
....@@ -1011,85 +1079,22 @@
10111079
10121080 vbuf->data_buf = data;
10131081 vbuf->data_size = data_size;
1082
+ vbuf->objs = objs;
10141083
10151084 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
10161085 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
10171086 cmd_p->size = cpu_to_le32(data_size);
10181087
1019
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1088
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
10201089 }
10211090
1022
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1023
- struct virtio_gpu_object *obj,
1024
- struct virtio_gpu_fence *fence)
1091
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1092
+ struct virtio_gpu_object *obj,
1093
+ struct virtio_gpu_mem_entry *ents,
1094
+ unsigned int nents)
10251095 {
1026
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1027
- struct virtio_gpu_mem_entry *ents;
1028
- struct scatterlist *sg;
1029
- int si, nents;
1030
-
1031
- if (WARN_ON_ONCE(!obj->created))
1032
- return -EINVAL;
1033
-
1034
- if (!obj->pages) {
1035
- int ret;
1036
-
1037
- ret = virtio_gpu_object_get_sg_table(vgdev, obj);
1038
- if (ret)
1039
- return ret;
1040
- }
1041
-
1042
- if (use_dma_api) {
1043
- obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
1044
- obj->pages->sgl, obj->pages->nents,
1045
- DMA_TO_DEVICE);
1046
- nents = obj->mapped;
1047
- } else {
1048
- nents = obj->pages->nents;
1049
- }
1050
-
1051
- /* gets freed when the ring has consumed it */
1052
- ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
1053
- GFP_KERNEL);
1054
- if (!ents) {
1055
- DRM_ERROR("failed to allocate ent list\n");
1056
- return -ENOMEM;
1057
- }
1058
-
1059
- for_each_sg(obj->pages->sgl, sg, nents, si) {
1060
- ents[si].addr = cpu_to_le64(use_dma_api
1061
- ? sg_dma_address(sg)
1062
- : sg_phys(sg));
1063
- ents[si].length = cpu_to_le32(sg->length);
1064
- ents[si].padding = 0;
1065
- }
1066
-
10671096 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1068
- ents, nents,
1069
- fence);
1070
- return 0;
1071
-}
1072
-
1073
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1074
- struct virtio_gpu_object *obj)
1075
-{
1076
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1077
-
1078
- if (use_dma_api && obj->mapped) {
1079
- struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1080
- /* detach backing and wait for the host process it ... */
1081
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1082
- dma_fence_wait(&fence->f, true);
1083
- dma_fence_put(&fence->f);
1084
-
1085
- /* ... then tear down iommu mappings */
1086
- dma_unmap_sg(vgdev->vdev->dev.parent,
1087
- obj->pages->sgl, obj->mapped,
1088
- DMA_TO_DEVICE);
1089
- obj->mapped = 0;
1090
- } else {
1091
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1092
- }
1097
+ ents, nents, NULL);
10931098 }
10941099
10951100 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
....@@ -1103,3 +1108,58 @@
11031108 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
11041109 virtio_gpu_queue_cursor(vgdev, vbuf);
11051110 }
1111
+
1112
+static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1113
+ struct virtio_gpu_vbuffer *vbuf)
1114
+{
1115
+ struct virtio_gpu_object *obj =
1116
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1117
+ struct virtio_gpu_resp_resource_uuid *resp =
1118
+ (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1119
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1120
+
1121
+ spin_lock(&vgdev->resource_export_lock);
1122
+ WARN_ON(obj->uuid_state != UUID_INITIALIZING);
1123
+
1124
+ if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1125
+ obj->uuid_state == UUID_INITIALIZING) {
1126
+ memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
1127
+ obj->uuid_state = UUID_INITIALIZED;
1128
+ } else {
1129
+ obj->uuid_state = UUID_INITIALIZATION_FAILED;
1130
+ }
1131
+ spin_unlock(&vgdev->resource_export_lock);
1132
+
1133
+ wake_up_all(&vgdev->resp_wq);
1134
+}
1135
+
1136
+int
1137
+virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1138
+ struct virtio_gpu_object_array *objs)
1139
+{
1140
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1141
+ struct virtio_gpu_resource_assign_uuid *cmd_p;
1142
+ struct virtio_gpu_vbuffer *vbuf;
1143
+ struct virtio_gpu_resp_resource_uuid *resp_buf;
1144
+
1145
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1146
+ if (!resp_buf) {
1147
+ spin_lock(&vgdev->resource_export_lock);
1148
+ bo->uuid_state = UUID_INITIALIZATION_FAILED;
1149
+ spin_unlock(&vgdev->resource_export_lock);
1150
+ virtio_gpu_array_put_free(objs);
1151
+ return -ENOMEM;
1152
+ }
1153
+
1154
+ cmd_p = virtio_gpu_alloc_cmd_resp
1155
+ (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1156
+ sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1157
+ memset(cmd_p, 0, sizeof(*cmd_p));
1158
+
1159
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1160
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1161
+
1162
+ vbuf->objs = objs;
1163
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1164
+ return 0;
1165
+}