From bedbef8ad3e75a304af6361af235302bcc61d06b Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 14 May 2024 06:39:01 +0000
Subject: [PATCH] 修改内核路径
---
kernel/drivers/gpu/drm/virtio/virtgpu_vq.c | 566 +++++++++++++++++++++++++++++++-------------------------
1 files changed, 313 insertions(+), 253 deletions(-)
diff --git a/kernel/drivers/gpu/drm/virtio/virtgpu_vq.c b/kernel/drivers/gpu/drm/virtio/virtgpu_vq.c
index 66011cb..e98a29d 100644
--- a/kernel/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/kernel/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -26,18 +26,30 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include "virtgpu_drv.h"
-#include "virtgpu_trace.h"
+#include <linux/dma-mapping.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
+
+#include "virtgpu_drv.h"
+#include "virtgpu_trace.h"
#define MAX_INLINE_CMD_SIZE 96
#define MAX_INLINE_RESP_SIZE 24
#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
+
+static void convert_to_hw_box(struct virtio_gpu_box *dst,
+ const struct drm_virtgpu_3d_box *src)
+{
+ dst->x = cpu_to_le32(src->x);
+ dst->y = cpu_to_le32(src->y);
+ dst->z = cpu_to_le32(src->z);
+ dst->w = cpu_to_le32(src->w);
+ dst->h = cpu_to_le32(src->h);
+ dst->d = cpu_to_le32(src->d);
+}
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
@@ -79,11 +91,10 @@
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
- if (!vbuf)
- return ERR_PTR(-ENOMEM);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
- BUG_ON(size > MAX_INLINE_CMD_SIZE);
+ BUG_ON(size > MAX_INLINE_CMD_SIZE ||
+ size < sizeof(struct virtio_gpu_ctrl_hdr));
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
vbuf->size = size;
@@ -97,21 +108,14 @@
return vbuf;
}
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer **vbuffer_p,
- int size)
+static struct virtio_gpu_ctrl_hdr *
+virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_vbuffer *vbuf;
-
- vbuf = virtio_gpu_get_vbuf(vgdev, size,
- sizeof(struct virtio_gpu_ctrl_hdr),
- NULL, NULL);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
- *vbuffer_p = vbuf;
- return vbuf->buf;
+ /* this assumes a vbuf contains a command that starts with a
+ * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
+ * virtqueues.
+ */
+ return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
}
static struct virtio_gpu_update_cursor*
@@ -141,12 +145,27 @@
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
+}
+
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size,
+ virtio_gpu_resp_cb cb)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
}
static void free_vbuf(struct virtio_gpu_device *vgdev,
@@ -191,18 +210,18 @@
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
spin_unlock(&vgdev->ctrlq.qlock);
- list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
- if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
- cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
- DRM_ERROR("response 0x%x (command 0x%x)\n",
- le32_to_cpu(resp->type),
- le32_to_cpu(cmd->type));
+ cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
+ DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
} else
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
@@ -218,14 +237,18 @@
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
-
- list_del(&entry->list);
- free_vbuf(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
if (fence_id)
virtio_gpu_fence_event_process(vgdev, fence_id);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ if (entry->objs)
+ virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
+ list_del(&entry->list);
+ free_vbuf(vgdev, entry);
+ }
}
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
@@ -273,7 +296,7 @@
return NULL;
}
- for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+ for_each_sgtable_sg(sgt, sg, i) {
pg = vmalloc_to_page(data);
if (!pg) {
sg_free_table(sgt);
@@ -291,125 +314,150 @@
return sgt;
}
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct scatterlist *vout)
- __releases(&vgdev->ctrlq.qlock)
- __acquires(&vgdev->ctrlq.qlock)
+static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vresp;
- int outcnt = 0, incnt = 0;
- int ret;
+ int ret, idx;
- if (!vgdev->vqs_ready)
- return -ENODEV;
-
- sg_init_one(&vcmd, vbuf->buf, vbuf->size);
- sgs[outcnt + incnt] = &vcmd;
- outcnt++;
-
- if (vout) {
- sgs[outcnt + incnt] = vout;
- outcnt++;
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ free_vbuf(vgdev, vbuf);
+ return -1;
}
- if (vbuf->resp_size) {
- sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
- sgs[outcnt + incnt] = &vresp;
- incnt++;
- }
-
-retry:
- ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
- if (ret == -ENOSPC) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
- spin_lock(&vgdev->ctrlq.qlock);
- goto retry;
- } else {
- trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
-
- virtqueue_kick(vq);
- }
-
- if (!ret)
- ret = vq->num_free;
- return ret;
-}
-
-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence *fence)
-{
- struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *vout = NULL, sg;
- struct sg_table *sgt = NULL;
- int rc;
- int outcnt = 0;
-
- if (vbuf->data_size) {
- if (is_vmalloc_addr(vbuf->data_buf)) {
- sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
- &outcnt);
- if (!sgt)
- return -ENOMEM;
- vout = sgt->sgl;
- } else {
- sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
- vout = &sg;
- outcnt = 1;
- }
- }
+ if (vgdev->has_indirect)
+ elemcnt = 1;
again:
spin_lock(&vgdev->ctrlq.qlock);
- /*
- * Make sure we have enouth space in the virtqueue. If not
- * wait here until we have.
- *
- * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
- * to wait for free space, which can result in fence ids being
- * submitted out-of-order.
- */
- if (vq->num_free < 2 + outcnt) {
+ if (vq->num_free < elemcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
+ virtio_gpu_notify(vgdev);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
goto again;
}
- if (hdr && fence)
- virtio_gpu_fence_emit(vgdev, hdr, fence);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
+ /* now that the position of the vbuf in the virtqueue is known, we can
+ * finally set the fence id
+ */
+ if (fence) {
+ virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ fence);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
+ }
+
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ WARN_ON(ret);
+
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+
+ atomic_inc(&vgdev->pending_commands);
+
spin_unlock(&vgdev->ctrlq.qlock);
+
+ drm_dev_exit(idx);
+ return 0;
+}
+
+static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence)
+{
+ struct scatterlist *sgs[3], vcmd, vout, vresp;
+ struct sg_table *sgt = NULL;
+ int elemcnt = 0, outcnt = 0, incnt = 0, ret;
+
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vout */
+ if (vbuf->data_size) {
+ if (is_vmalloc_addr(vbuf->data_buf)) {
+ int sg_ents;
+ sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
+ &sg_ents);
+ if (!sgt) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ return -1;
+ }
+
+ elemcnt += sg_ents;
+ sgs[outcnt] = sgt->sgl;
+ } else {
+ sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+ elemcnt++;
+ sgs[outcnt] = &vout;
+ }
+ outcnt++;
+ }
+
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
+ }
+
+ ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+ incnt);
if (sgt) {
sg_free_table(sgt);
kfree(sgt);
}
+ return ret;
+}
- return rc;
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
+{
+ bool notify;
+
+ if (!atomic_read(&vgdev->pending_commands))
+ return;
+
+ spin_lock(&vgdev->ctrlq.qlock);
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
}
static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
- return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
+ return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
}
-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
- int ret;
- int outcnt;
+ int idx, ret, outcnt;
+ bool notify;
- if (!vgdev->vqs_ready)
- return -ENODEV;
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ free_vbuf(vgdev, vbuf);
+ return;
+ }
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -425,16 +473,17 @@
goto retry;
} else {
trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ virtio_gpu_vbuf_ctrl_hdr(vbuf));
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
spin_unlock(&vgdev->cursorq.qlock);
- if (!ret)
- ret = vq->num_free;
- return ret;
+ if (notify)
+ virtqueue_notify(vq);
+
+ drm_dev_exit(idx);
}
/* just create gem objects for userspace and long lived objects,
@@ -445,6 +494,7 @@
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
@@ -452,6 +502,7 @@
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -459,39 +510,39 @@
cmd_p->width = cpu_to_le32(params->width);
cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
bo->created = true;
}
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *bo;
+
+ bo = vbuf->resp_cb_data;
+ vbuf->resp_cb_data = NULL;
+
+ virtio_gpu_cleanup_object(bo);
+}
+
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+ struct virtio_gpu_object *bo)
{
struct virtio_gpu_resource_unref *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
+ int ret;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
+ virtio_gpu_cmd_unref_cb);
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-}
-
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_resource_detach_backing *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
-
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
-
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
- cmd_p->resource_id = cpu_to_le32(resource_id);
-
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ vbuf->resp_cb_data = bo;
+ ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ if (ret < 0)
+ virtio_gpu_cleanup_object(bo);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -538,33 +589,35 @@
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
- dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
- DMA_TO_DEVICE);
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ shmem->pages, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
- cmd_p->r.width = width;
- cmd_p->r.height = height;
- cmd_p->r.x = x;
- cmd_p->r.y = y;
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void
@@ -587,7 +640,7 @@
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -887,34 +940,37 @@
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -922,6 +978,7 @@
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
@@ -929,6 +986,7 @@
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -944,64 +1002,74 @@
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+
bo->created = true;
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
- dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
- DMA_TO_DEVICE);
+ dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ shmem->pages, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->box = *box;
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
- cmd_p->box = *box;
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence)
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -1011,85 +1079,22 @@
vbuf->data_buf = data;
vbuf->data_size = data_size;
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->size = cpu_to_le32(data_size);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence)
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_mem_entry *ents;
- struct scatterlist *sg;
- int si, nents;
-
- if (WARN_ON_ONCE(!obj->created))
- return -EINVAL;
-
- if (!obj->pages) {
- int ret;
-
- ret = virtio_gpu_object_get_sg_table(vgdev, obj);
- if (ret)
- return ret;
- }
-
- if (use_dma_api) {
- obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->pages->nents,
- DMA_TO_DEVICE);
- nents = obj->mapped;
- } else {
- nents = obj->pages->nents;
- }
-
- /* gets freed when the ring has consumed it */
- ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
- GFP_KERNEL);
- if (!ents) {
- DRM_ERROR("failed to allocate ent list\n");
- return -ENOMEM;
- }
-
- for_each_sg(obj->pages->sgl, sg, nents, si) {
- ents[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
- ents[si].length = cpu_to_le32(sg->length);
- ents[si].padding = 0;
- }
-
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
- ents, nents,
- fence);
- return 0;
-}
-
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj)
-{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
- if (use_dma_api && obj->mapped) {
- struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
- /* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
- dma_fence_wait(&fence->f, true);
- dma_fence_put(&fence->f);
-
- /* ... then tear down iommu mappings */
- dma_unmap_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->mapped,
- DMA_TO_DEVICE);
- obj->mapped = 0;
- } else {
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
- }
+ ents, nents, NULL);
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -1103,3 +1108,58 @@
memcpy(cur_p, &output->cursor, sizeof(output->cursor));
virtio_gpu_queue_cursor(vgdev, vbuf);
}
+
+static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *obj =
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+ struct virtio_gpu_resp_resource_uuid *resp =
+ (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+ spin_lock(&vgdev->resource_export_lock);
+ WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
+ obj->uuid_state == UUID_INITIALIZING) {
+ memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
+ obj->uuid_state = UUID_INITIALIZED;
+ } else {
+ obj->uuid_state = UUID_INITIALIZATION_FAILED;
+ }
+ spin_unlock(&vgdev->resource_export_lock);
+
+ wake_up_all(&vgdev->resp_wq);
+}
+
+int
+virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_resource_assign_uuid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_resource_uuid *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ spin_lock(&vgdev->resource_export_lock);
+ bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ spin_unlock(&vgdev->resource_export_lock);
+ virtio_gpu_array_put_free(objs);
+ return -ENOMEM;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ vbuf->objs = objs;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
--
Gitblit v1.6.2