.. | .. |
---|
26 | 26 | * OTHER DEALINGS IN THE SOFTWARE. |
---|
27 | 27 | */ |
---|
28 | 28 | |
---|
29 | | -#include <drm/drmP.h> |
---|
30 | | -#include "virtgpu_drv.h" |
---|
31 | | -#include "virtgpu_trace.h" |
---|
| 29 | +#include <linux/dma-mapping.h> |
---|
32 | 30 | #include <linux/virtio.h> |
---|
33 | 31 | #include <linux/virtio_config.h> |
---|
34 | 32 | #include <linux/virtio_ring.h> |
---|
| 33 | + |
---|
| 34 | +#include "virtgpu_drv.h" |
---|
| 35 | +#include "virtgpu_trace.h" |
---|
35 | 36 | |
---|
36 | 37 | #define MAX_INLINE_CMD_SIZE 96 |
---|
37 | 38 | #define MAX_INLINE_RESP_SIZE 24 |
---|
38 | 39 | #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ |
---|
39 | 40 | + MAX_INLINE_CMD_SIZE \ |
---|
40 | 41 | + MAX_INLINE_RESP_SIZE) |
---|
| 42 | + |
---|
| 43 | +static void convert_to_hw_box(struct virtio_gpu_box *dst, |
---|
| 44 | + const struct drm_virtgpu_3d_box *src) |
---|
| 45 | +{ |
---|
| 46 | + dst->x = cpu_to_le32(src->x); |
---|
| 47 | + dst->y = cpu_to_le32(src->y); |
---|
| 48 | + dst->z = cpu_to_le32(src->z); |
---|
| 49 | + dst->w = cpu_to_le32(src->w); |
---|
| 50 | + dst->h = cpu_to_le32(src->h); |
---|
| 51 | + dst->d = cpu_to_le32(src->d); |
---|
| 52 | +} |
---|
41 | 53 | |
---|
42 | 54 | void virtio_gpu_ctrl_ack(struct virtqueue *vq) |
---|
43 | 55 | { |
---|
.. | .. |
---|
79 | 91 | { |
---|
80 | 92 | struct virtio_gpu_vbuffer *vbuf; |
---|
81 | 93 | |
---|
82 | | - vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL); |
---|
83 | | - if (!vbuf) |
---|
84 | | - return ERR_PTR(-ENOMEM); |
---|
| 94 | + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL); |
---|
85 | 95 | |
---|
86 | | - BUG_ON(size > MAX_INLINE_CMD_SIZE); |
---|
| 96 | + BUG_ON(size > MAX_INLINE_CMD_SIZE || |
---|
| 97 | + size < sizeof(struct virtio_gpu_ctrl_hdr)); |
---|
87 | 98 | vbuf->buf = (void *)vbuf + sizeof(*vbuf); |
---|
88 | 99 | vbuf->size = size; |
---|
89 | 100 | |
---|
.. | .. |
---|
97 | 108 | return vbuf; |
---|
98 | 109 | } |
---|
99 | 110 | |
---|
100 | | -static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, |
---|
101 | | - struct virtio_gpu_vbuffer **vbuffer_p, |
---|
102 | | - int size) |
---|
| 111 | +static struct virtio_gpu_ctrl_hdr * |
---|
| 112 | +virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) |
---|
103 | 113 | { |
---|
104 | | - struct virtio_gpu_vbuffer *vbuf; |
---|
105 | | - |
---|
106 | | - vbuf = virtio_gpu_get_vbuf(vgdev, size, |
---|
107 | | - sizeof(struct virtio_gpu_ctrl_hdr), |
---|
108 | | - NULL, NULL); |
---|
109 | | - if (IS_ERR(vbuf)) { |
---|
110 | | - *vbuffer_p = NULL; |
---|
111 | | - return ERR_CAST(vbuf); |
---|
112 | | - } |
---|
113 | | - *vbuffer_p = vbuf; |
---|
114 | | - return vbuf->buf; |
---|
| 114 | + /* this assumes a vbuf contains a command that starts with a |
---|
| 115 | + * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor |
---|
| 116 | + * virtqueues. |
---|
| 117 | + */ |
---|
| 118 | + return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; |
---|
115 | 119 | } |
---|
116 | 120 | |
---|
117 | 121 | static struct virtio_gpu_update_cursor* |
---|
.. | .. |
---|
141 | 145 | |
---|
142 | 146 | vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, |
---|
143 | 147 | resp_size, resp_buf, cb); |
---|
144 | | - if (IS_ERR(vbuf)) { |
---|
145 | | - *vbuffer_p = NULL; |
---|
146 | | - return ERR_CAST(vbuf); |
---|
147 | | - } |
---|
148 | 148 | *vbuffer_p = vbuf; |
---|
149 | 149 | return (struct virtio_gpu_command *)vbuf->buf; |
---|
| 150 | +} |
---|
| 151 | + |
---|
| 152 | +static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, |
---|
| 153 | + struct virtio_gpu_vbuffer **vbuffer_p, |
---|
| 154 | + int size) |
---|
| 155 | +{ |
---|
| 156 | + return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, |
---|
| 157 | + sizeof(struct virtio_gpu_ctrl_hdr), |
---|
| 158 | + NULL); |
---|
| 159 | +} |
---|
| 160 | + |
---|
| 161 | +static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, |
---|
| 162 | + struct virtio_gpu_vbuffer **vbuffer_p, |
---|
| 163 | + int size, |
---|
| 164 | + virtio_gpu_resp_cb cb) |
---|
| 165 | +{ |
---|
| 166 | + return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, |
---|
| 167 | + sizeof(struct virtio_gpu_ctrl_hdr), |
---|
| 168 | + NULL); |
---|
150 | 169 | } |
---|
151 | 170 | |
---|
152 | 171 | static void free_vbuf(struct virtio_gpu_device *vgdev, |
---|
.. | .. |
---|
191 | 210 | } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); |
---|
192 | 211 | spin_unlock(&vgdev->ctrlq.qlock); |
---|
193 | 212 | |
---|
194 | | - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { |
---|
| 213 | + list_for_each_entry(entry, &reclaim_list, list) { |
---|
195 | 214 | resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; |
---|
196 | 215 | |
---|
197 | 216 | trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); |
---|
198 | 217 | |
---|
199 | 218 | if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { |
---|
200 | | - if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) { |
---|
| 219 | + if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { |
---|
201 | 220 | struct virtio_gpu_ctrl_hdr *cmd; |
---|
202 | | - cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf; |
---|
203 | | - DRM_ERROR("response 0x%x (command 0x%x)\n", |
---|
204 | | - le32_to_cpu(resp->type), |
---|
205 | | - le32_to_cpu(cmd->type)); |
---|
| 221 | + cmd = virtio_gpu_vbuf_ctrl_hdr(entry); |
---|
| 222 | + DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", |
---|
| 223 | + le32_to_cpu(resp->type), |
---|
| 224 | + le32_to_cpu(cmd->type)); |
---|
206 | 225 | } else |
---|
207 | 226 | DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); |
---|
208 | 227 | } |
---|
.. | .. |
---|
218 | 237 | } |
---|
219 | 238 | if (entry->resp_cb) |
---|
220 | 239 | entry->resp_cb(vgdev, entry); |
---|
221 | | - |
---|
222 | | - list_del(&entry->list); |
---|
223 | | - free_vbuf(vgdev, entry); |
---|
224 | 240 | } |
---|
225 | 241 | wake_up(&vgdev->ctrlq.ack_queue); |
---|
226 | 242 | |
---|
227 | 243 | if (fence_id) |
---|
228 | 244 | virtio_gpu_fence_event_process(vgdev, fence_id); |
---|
| 245 | + |
---|
| 246 | + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { |
---|
| 247 | + if (entry->objs) |
---|
| 248 | + virtio_gpu_array_put_free_delayed(vgdev, entry->objs); |
---|
| 249 | + list_del(&entry->list); |
---|
| 250 | + free_vbuf(vgdev, entry); |
---|
| 251 | + } |
---|
229 | 252 | } |
---|
230 | 253 | |
---|
231 | 254 | void virtio_gpu_dequeue_cursor_func(struct work_struct *work) |
---|
.. | .. |
---|
273 | 296 | return NULL; |
---|
274 | 297 | } |
---|
275 | 298 | |
---|
276 | | - for_each_sg(sgt->sgl, sg, *sg_ents, i) { |
---|
| 299 | + for_each_sgtable_sg(sgt, sg, i) { |
---|
277 | 300 | pg = vmalloc_to_page(data); |
---|
278 | 301 | if (!pg) { |
---|
279 | 302 | sg_free_table(sgt); |
---|
.. | .. |
---|
291 | 314 | return sgt; |
---|
292 | 315 | } |
---|
293 | 316 | |
---|
294 | | -static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, |
---|
295 | | - struct virtio_gpu_vbuffer *vbuf, |
---|
296 | | - struct scatterlist *vout) |
---|
297 | | - __releases(&vgdev->ctrlq.qlock) |
---|
298 | | - __acquires(&vgdev->ctrlq.qlock) |
---|
| 317 | +static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, |
---|
| 318 | + struct virtio_gpu_vbuffer *vbuf, |
---|
| 319 | + struct virtio_gpu_fence *fence, |
---|
| 320 | + int elemcnt, |
---|
| 321 | + struct scatterlist **sgs, |
---|
| 322 | + int outcnt, |
---|
| 323 | + int incnt) |
---|
299 | 324 | { |
---|
300 | 325 | struct virtqueue *vq = vgdev->ctrlq.vq; |
---|
301 | | - struct scatterlist *sgs[3], vcmd, vresp; |
---|
302 | | - int outcnt = 0, incnt = 0; |
---|
303 | | - int ret; |
---|
| 326 | + int ret, idx; |
---|
304 | 327 | |
---|
305 | | - if (!vgdev->vqs_ready) |
---|
306 | | - return -ENODEV; |
---|
307 | | - |
---|
308 | | - sg_init_one(&vcmd, vbuf->buf, vbuf->size); |
---|
309 | | - sgs[outcnt + incnt] = &vcmd; |
---|
310 | | - outcnt++; |
---|
311 | | - |
---|
312 | | - if (vout) { |
---|
313 | | - sgs[outcnt + incnt] = vout; |
---|
314 | | - outcnt++; |
---|
| 328 | + if (!drm_dev_enter(vgdev->ddev, &idx)) { |
---|
| 329 | + if (fence && vbuf->objs) |
---|
| 330 | + virtio_gpu_array_unlock_resv(vbuf->objs); |
---|
| 331 | + free_vbuf(vgdev, vbuf); |
---|
| 332 | + return -1; |
---|
315 | 333 | } |
---|
316 | 334 | |
---|
317 | | - if (vbuf->resp_size) { |
---|
318 | | - sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); |
---|
319 | | - sgs[outcnt + incnt] = &vresp; |
---|
320 | | - incnt++; |
---|
321 | | - } |
---|
322 | | - |
---|
323 | | -retry: |
---|
324 | | - ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); |
---|
325 | | - if (ret == -ENOSPC) { |
---|
326 | | - spin_unlock(&vgdev->ctrlq.qlock); |
---|
327 | | - wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); |
---|
328 | | - spin_lock(&vgdev->ctrlq.qlock); |
---|
329 | | - goto retry; |
---|
330 | | - } else { |
---|
331 | | - trace_virtio_gpu_cmd_queue(vq, |
---|
332 | | - (struct virtio_gpu_ctrl_hdr *)vbuf->buf); |
---|
333 | | - |
---|
334 | | - virtqueue_kick(vq); |
---|
335 | | - } |
---|
336 | | - |
---|
337 | | - if (!ret) |
---|
338 | | - ret = vq->num_free; |
---|
339 | | - return ret; |
---|
340 | | -} |
---|
341 | | - |
---|
342 | | -static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
---|
343 | | - struct virtio_gpu_vbuffer *vbuf, |
---|
344 | | - struct virtio_gpu_ctrl_hdr *hdr, |
---|
345 | | - struct virtio_gpu_fence *fence) |
---|
346 | | -{ |
---|
347 | | - struct virtqueue *vq = vgdev->ctrlq.vq; |
---|
348 | | - struct scatterlist *vout = NULL, sg; |
---|
349 | | - struct sg_table *sgt = NULL; |
---|
350 | | - int rc; |
---|
351 | | - int outcnt = 0; |
---|
352 | | - |
---|
353 | | - if (vbuf->data_size) { |
---|
354 | | - if (is_vmalloc_addr(vbuf->data_buf)) { |
---|
355 | | - sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, |
---|
356 | | - &outcnt); |
---|
357 | | - if (!sgt) |
---|
358 | | - return -ENOMEM; |
---|
359 | | - vout = sgt->sgl; |
---|
360 | | - } else { |
---|
361 | | - sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); |
---|
362 | | - vout = &sg; |
---|
363 | | - outcnt = 1; |
---|
364 | | - } |
---|
365 | | - } |
---|
| 335 | + if (vgdev->has_indirect) |
---|
| 336 | + elemcnt = 1; |
---|
366 | 337 | |
---|
367 | 338 | again: |
---|
368 | 339 | spin_lock(&vgdev->ctrlq.qlock); |
---|
369 | 340 | |
---|
370 | | - /* |
---|
371 | | - * Make sure we have enouth space in the virtqueue. If not |
---|
372 | | - * wait here until we have. |
---|
373 | | - * |
---|
374 | | - * Without that virtio_gpu_queue_ctrl_buffer_nolock might have |
---|
375 | | - * to wait for free space, which can result in fence ids being |
---|
376 | | - * submitted out-of-order. |
---|
377 | | - */ |
---|
378 | | - if (vq->num_free < 2 + outcnt) { |
---|
| 341 | + if (vq->num_free < elemcnt) { |
---|
379 | 342 | spin_unlock(&vgdev->ctrlq.qlock); |
---|
380 | | - wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); |
---|
| 343 | + virtio_gpu_notify(vgdev); |
---|
| 344 | + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); |
---|
381 | 345 | goto again; |
---|
382 | 346 | } |
---|
383 | 347 | |
---|
384 | | - if (hdr && fence) |
---|
385 | | - virtio_gpu_fence_emit(vgdev, hdr, fence); |
---|
386 | | - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); |
---|
| 348 | + /* now that the position of the vbuf in the virtqueue is known, we can |
---|
| 349 | + * finally set the fence id |
---|
| 350 | + */ |
---|
| 351 | + if (fence) { |
---|
| 352 | + virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf), |
---|
| 353 | + fence); |
---|
| 354 | + if (vbuf->objs) { |
---|
| 355 | + virtio_gpu_array_add_fence(vbuf->objs, &fence->f); |
---|
| 356 | + virtio_gpu_array_unlock_resv(vbuf->objs); |
---|
| 357 | + } |
---|
| 358 | + } |
---|
| 359 | + |
---|
| 360 | + ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); |
---|
| 361 | + WARN_ON(ret); |
---|
| 362 | + |
---|
| 363 | + trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); |
---|
| 364 | + |
---|
| 365 | + atomic_inc(&vgdev->pending_commands); |
---|
| 366 | + |
---|
387 | 367 | spin_unlock(&vgdev->ctrlq.qlock); |
---|
| 368 | + |
---|
| 369 | + drm_dev_exit(idx); |
---|
| 370 | + return 0; |
---|
| 371 | +} |
---|
| 372 | + |
---|
| 373 | +static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, |
---|
| 374 | + struct virtio_gpu_vbuffer *vbuf, |
---|
| 375 | + struct virtio_gpu_fence *fence) |
---|
| 376 | +{ |
---|
| 377 | + struct scatterlist *sgs[3], vcmd, vout, vresp; |
---|
| 378 | + struct sg_table *sgt = NULL; |
---|
| 379 | + int elemcnt = 0, outcnt = 0, incnt = 0, ret; |
---|
| 380 | + |
---|
| 381 | + /* set up vcmd */ |
---|
| 382 | + sg_init_one(&vcmd, vbuf->buf, vbuf->size); |
---|
| 383 | + elemcnt++; |
---|
| 384 | + sgs[outcnt] = &vcmd; |
---|
| 385 | + outcnt++; |
---|
| 386 | + |
---|
| 387 | + /* set up vout */ |
---|
| 388 | + if (vbuf->data_size) { |
---|
| 389 | + if (is_vmalloc_addr(vbuf->data_buf)) { |
---|
| 390 | + int sg_ents; |
---|
| 391 | + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, |
---|
| 392 | + &sg_ents); |
---|
| 393 | + if (!sgt) { |
---|
| 394 | + if (fence && vbuf->objs) |
---|
| 395 | + virtio_gpu_array_unlock_resv(vbuf->objs); |
---|
| 396 | + return -1; |
---|
| 397 | + } |
---|
| 398 | + |
---|
| 399 | + elemcnt += sg_ents; |
---|
| 400 | + sgs[outcnt] = sgt->sgl; |
---|
| 401 | + } else { |
---|
| 402 | + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); |
---|
| 403 | + elemcnt++; |
---|
| 404 | + sgs[outcnt] = &vout; |
---|
| 405 | + } |
---|
| 406 | + outcnt++; |
---|
| 407 | + } |
---|
| 408 | + |
---|
| 409 | + /* set up vresp */ |
---|
| 410 | + if (vbuf->resp_size) { |
---|
| 411 | + sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); |
---|
| 412 | + elemcnt++; |
---|
| 413 | + sgs[outcnt + incnt] = &vresp; |
---|
| 414 | + incnt++; |
---|
| 415 | + } |
---|
| 416 | + |
---|
| 417 | + ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, |
---|
| 418 | + incnt); |
---|
388 | 419 | |
---|
389 | 420 | if (sgt) { |
---|
390 | 421 | sg_free_table(sgt); |
---|
391 | 422 | kfree(sgt); |
---|
392 | 423 | } |
---|
| 424 | + return ret; |
---|
| 425 | +} |
---|
393 | 426 | |
---|
394 | | - return rc; |
---|
| 427 | +void virtio_gpu_notify(struct virtio_gpu_device *vgdev) |
---|
| 428 | +{ |
---|
| 429 | + bool notify; |
---|
| 430 | + |
---|
| 431 | + if (!atomic_read(&vgdev->pending_commands)) |
---|
| 432 | + return; |
---|
| 433 | + |
---|
| 434 | + spin_lock(&vgdev->ctrlq.qlock); |
---|
| 435 | + atomic_set(&vgdev->pending_commands, 0); |
---|
| 436 | + notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); |
---|
| 437 | + spin_unlock(&vgdev->ctrlq.qlock); |
---|
| 438 | + |
---|
| 439 | + if (notify) |
---|
| 440 | + virtqueue_notify(vgdev->ctrlq.vq); |
---|
395 | 441 | } |
---|
396 | 442 | |
---|
397 | 443 | static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, |
---|
398 | 444 | struct virtio_gpu_vbuffer *vbuf) |
---|
399 | 445 | { |
---|
400 | | - return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL); |
---|
| 446 | + return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); |
---|
401 | 447 | } |
---|
402 | 448 | |
---|
403 | | -static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
---|
404 | | - struct virtio_gpu_vbuffer *vbuf) |
---|
| 449 | +static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, |
---|
| 450 | + struct virtio_gpu_vbuffer *vbuf) |
---|
405 | 451 | { |
---|
406 | 452 | struct virtqueue *vq = vgdev->cursorq.vq; |
---|
407 | 453 | struct scatterlist *sgs[1], ccmd; |
---|
408 | | - int ret; |
---|
409 | | - int outcnt; |
---|
| 454 | + int idx, ret, outcnt; |
---|
| 455 | + bool notify; |
---|
410 | 456 | |
---|
411 | | - if (!vgdev->vqs_ready) |
---|
412 | | - return -ENODEV; |
---|
| 457 | + if (!drm_dev_enter(vgdev->ddev, &idx)) { |
---|
| 458 | + free_vbuf(vgdev, vbuf); |
---|
| 459 | + return; |
---|
| 460 | + } |
---|
413 | 461 | |
---|
414 | 462 | sg_init_one(&ccmd, vbuf->buf, vbuf->size); |
---|
415 | 463 | sgs[0] = &ccmd; |
---|
.. | .. |
---|
425 | 473 | goto retry; |
---|
426 | 474 | } else { |
---|
427 | 475 | trace_virtio_gpu_cmd_queue(vq, |
---|
428 | | - (struct virtio_gpu_ctrl_hdr *)vbuf->buf); |
---|
| 476 | + virtio_gpu_vbuf_ctrl_hdr(vbuf)); |
---|
429 | 477 | |
---|
430 | | - virtqueue_kick(vq); |
---|
| 478 | + notify = virtqueue_kick_prepare(vq); |
---|
431 | 479 | } |
---|
432 | 480 | |
---|
433 | 481 | spin_unlock(&vgdev->cursorq.qlock); |
---|
434 | 482 | |
---|
435 | | - if (!ret) |
---|
436 | | - ret = vq->num_free; |
---|
437 | | - return ret; |
---|
| 483 | + if (notify) |
---|
| 484 | + virtqueue_notify(vq); |
---|
| 485 | + |
---|
| 486 | + drm_dev_exit(idx); |
---|
438 | 487 | } |
---|
439 | 488 | |
---|
440 | 489 | /* just create gem objects for userspace and long lived objects, |
---|
.. | .. |
---|
445 | 494 | void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, |
---|
446 | 495 | struct virtio_gpu_object *bo, |
---|
447 | 496 | struct virtio_gpu_object_params *params, |
---|
| 497 | + struct virtio_gpu_object_array *objs, |
---|
448 | 498 | struct virtio_gpu_fence *fence) |
---|
449 | 499 | { |
---|
450 | 500 | struct virtio_gpu_resource_create_2d *cmd_p; |
---|
.. | .. |
---|
452 | 502 | |
---|
453 | 503 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
454 | 504 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
| 505 | + vbuf->objs = objs; |
---|
455 | 506 | |
---|
456 | 507 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); |
---|
457 | 508 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
.. | .. |
---|
459 | 510 | cmd_p->width = cpu_to_le32(params->width); |
---|
460 | 511 | cmd_p->height = cpu_to_le32(params->height); |
---|
461 | 512 | |
---|
462 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 513 | + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
---|
463 | 514 | bo->created = true; |
---|
464 | 515 | } |
---|
465 | 516 | |
---|
| 517 | +static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, |
---|
| 518 | + struct virtio_gpu_vbuffer *vbuf) |
---|
| 519 | +{ |
---|
| 520 | + struct virtio_gpu_object *bo; |
---|
| 521 | + |
---|
| 522 | + bo = vbuf->resp_cb_data; |
---|
| 523 | + vbuf->resp_cb_data = NULL; |
---|
| 524 | + |
---|
| 525 | + virtio_gpu_cleanup_object(bo); |
---|
| 526 | +} |
---|
| 527 | + |
---|
466 | 528 | void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, |
---|
467 | | - uint32_t resource_id) |
---|
| 529 | + struct virtio_gpu_object *bo) |
---|
468 | 530 | { |
---|
469 | 531 | struct virtio_gpu_resource_unref *cmd_p; |
---|
470 | 532 | struct virtio_gpu_vbuffer *vbuf; |
---|
| 533 | + int ret; |
---|
471 | 534 | |
---|
472 | | - cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
| 535 | + cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), |
---|
| 536 | + virtio_gpu_cmd_unref_cb); |
---|
473 | 537 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
474 | 538 | |
---|
475 | 539 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); |
---|
476 | | - cmd_p->resource_id = cpu_to_le32(resource_id); |
---|
| 540 | + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
477 | 541 | |
---|
478 | | - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
---|
479 | | -} |
---|
480 | | - |
---|
481 | | -static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, |
---|
482 | | - uint32_t resource_id, |
---|
483 | | - struct virtio_gpu_fence *fence) |
---|
484 | | -{ |
---|
485 | | - struct virtio_gpu_resource_detach_backing *cmd_p; |
---|
486 | | - struct virtio_gpu_vbuffer *vbuf; |
---|
487 | | - |
---|
488 | | - cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
489 | | - memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
490 | | - |
---|
491 | | - cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING); |
---|
492 | | - cmd_p->resource_id = cpu_to_le32(resource_id); |
---|
493 | | - |
---|
494 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 542 | + vbuf->resp_cb_data = bo; |
---|
| 543 | + ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
---|
| 544 | + if (ret < 0) |
---|
| 545 | + virtio_gpu_cleanup_object(bo); |
---|
495 | 546 | } |
---|
496 | 547 | |
---|
497 | 548 | void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, |
---|
.. | .. |
---|
538 | 589 | } |
---|
539 | 590 | |
---|
540 | 591 | void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, |
---|
541 | | - struct virtio_gpu_object *bo, |
---|
542 | 592 | uint64_t offset, |
---|
543 | | - __le32 width, __le32 height, |
---|
544 | | - __le32 x, __le32 y, |
---|
| 593 | + uint32_t width, uint32_t height, |
---|
| 594 | + uint32_t x, uint32_t y, |
---|
| 595 | + struct virtio_gpu_object_array *objs, |
---|
545 | 596 | struct virtio_gpu_fence *fence) |
---|
546 | 597 | { |
---|
| 598 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
---|
547 | 599 | struct virtio_gpu_transfer_to_host_2d *cmd_p; |
---|
548 | 600 | struct virtio_gpu_vbuffer *vbuf; |
---|
549 | | - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
---|
| 601 | + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); |
---|
| 602 | + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); |
---|
550 | 603 | |
---|
551 | | - if (use_dma_api) |
---|
552 | | - dma_sync_sg_for_device(vgdev->vdev->dev.parent, |
---|
553 | | - bo->pages->sgl, bo->pages->nents, |
---|
554 | | - DMA_TO_DEVICE); |
---|
| 604 | + if (virtio_gpu_is_shmem(bo) && use_dma_api) |
---|
| 605 | + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, |
---|
| 606 | + shmem->pages, DMA_TO_DEVICE); |
---|
555 | 607 | |
---|
556 | 608 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
557 | 609 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
| 610 | + vbuf->objs = objs; |
---|
558 | 611 | |
---|
559 | 612 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); |
---|
560 | 613 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
561 | 614 | cmd_p->offset = cpu_to_le64(offset); |
---|
562 | | - cmd_p->r.width = width; |
---|
563 | | - cmd_p->r.height = height; |
---|
564 | | - cmd_p->r.x = x; |
---|
565 | | - cmd_p->r.y = y; |
---|
| 615 | + cmd_p->r.width = cpu_to_le32(width); |
---|
| 616 | + cmd_p->r.height = cpu_to_le32(height); |
---|
| 617 | + cmd_p->r.x = cpu_to_le32(x); |
---|
| 618 | + cmd_p->r.y = cpu_to_le32(y); |
---|
566 | 619 | |
---|
567 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 620 | + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
---|
568 | 621 | } |
---|
569 | 622 | |
---|
570 | 623 | static void |
---|
.. | .. |
---|
587 | 640 | vbuf->data_buf = ents; |
---|
588 | 641 | vbuf->data_size = sizeof(*ents) * nents; |
---|
589 | 642 | |
---|
590 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 643 | + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
---|
591 | 644 | } |
---|
592 | 645 | |
---|
593 | 646 | static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, |
---|
.. | .. |
---|
887 | 940 | |
---|
888 | 941 | void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, |
---|
889 | 942 | uint32_t ctx_id, |
---|
890 | | - uint32_t resource_id) |
---|
| 943 | + struct virtio_gpu_object_array *objs) |
---|
891 | 944 | { |
---|
| 945 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
---|
892 | 946 | struct virtio_gpu_ctx_resource *cmd_p; |
---|
893 | 947 | struct virtio_gpu_vbuffer *vbuf; |
---|
894 | 948 | |
---|
895 | 949 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
896 | 950 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
| 951 | + vbuf->objs = objs; |
---|
897 | 952 | |
---|
898 | 953 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); |
---|
899 | 954 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
---|
900 | | - cmd_p->resource_id = cpu_to_le32(resource_id); |
---|
| 955 | + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
901 | 956 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
---|
902 | | - |
---|
903 | 957 | } |
---|
904 | 958 | |
---|
905 | 959 | void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, |
---|
906 | 960 | uint32_t ctx_id, |
---|
907 | | - uint32_t resource_id) |
---|
| 961 | + struct virtio_gpu_object_array *objs) |
---|
908 | 962 | { |
---|
| 963 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
---|
909 | 964 | struct virtio_gpu_ctx_resource *cmd_p; |
---|
910 | 965 | struct virtio_gpu_vbuffer *vbuf; |
---|
911 | 966 | |
---|
912 | 967 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
913 | 968 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
| 969 | + vbuf->objs = objs; |
---|
914 | 970 | |
---|
915 | 971 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); |
---|
916 | 972 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
---|
917 | | - cmd_p->resource_id = cpu_to_le32(resource_id); |
---|
| 973 | + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
918 | 974 | virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
---|
919 | 975 | } |
---|
920 | 976 | |
---|
.. | .. |
---|
922 | 978 | virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, |
---|
923 | 979 | struct virtio_gpu_object *bo, |
---|
924 | 980 | struct virtio_gpu_object_params *params, |
---|
| 981 | + struct virtio_gpu_object_array *objs, |
---|
925 | 982 | struct virtio_gpu_fence *fence) |
---|
926 | 983 | { |
---|
927 | 984 | struct virtio_gpu_resource_create_3d *cmd_p; |
---|
.. | .. |
---|
929 | 986 | |
---|
930 | 987 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
931 | 988 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
| 989 | + vbuf->objs = objs; |
---|
932 | 990 | |
---|
933 | 991 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); |
---|
934 | 992 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
.. | .. |
---|
944 | 1002 | cmd_p->nr_samples = cpu_to_le32(params->nr_samples); |
---|
945 | 1003 | cmd_p->flags = cpu_to_le32(params->flags); |
---|
946 | 1004 | |
---|
947 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 1005 | + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
---|
| 1006 | + |
---|
948 | 1007 | bo->created = true; |
---|
949 | 1008 | } |
---|
950 | 1009 | |
---|
951 | 1010 | void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, |
---|
952 | | - struct virtio_gpu_object *bo, |
---|
953 | 1011 | uint32_t ctx_id, |
---|
954 | 1012 | uint64_t offset, uint32_t level, |
---|
955 | | - struct virtio_gpu_box *box, |
---|
| 1013 | + struct drm_virtgpu_3d_box *box, |
---|
| 1014 | + struct virtio_gpu_object_array *objs, |
---|
956 | 1015 | struct virtio_gpu_fence *fence) |
---|
957 | 1016 | { |
---|
| 1017 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
---|
958 | 1018 | struct virtio_gpu_transfer_host_3d *cmd_p; |
---|
959 | 1019 | struct virtio_gpu_vbuffer *vbuf; |
---|
960 | | - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
---|
| 1020 | + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); |
---|
| 1021 | + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); |
---|
961 | 1022 | |
---|
962 | 1023 | if (use_dma_api) |
---|
963 | | - dma_sync_sg_for_device(vgdev->vdev->dev.parent, |
---|
964 | | - bo->pages->sgl, bo->pages->nents, |
---|
965 | | - DMA_TO_DEVICE); |
---|
| 1024 | + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, |
---|
| 1025 | + shmem->pages, DMA_TO_DEVICE); |
---|
966 | 1026 | |
---|
967 | 1027 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
968 | 1028 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
| 1029 | + |
---|
| 1030 | + vbuf->objs = objs; |
---|
969 | 1031 | |
---|
970 | 1032 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); |
---|
971 | 1033 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
---|
972 | 1034 | cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
973 | | - cmd_p->box = *box; |
---|
| 1035 | + convert_to_hw_box(&cmd_p->box, box); |
---|
974 | 1036 | cmd_p->offset = cpu_to_le64(offset); |
---|
975 | 1037 | cmd_p->level = cpu_to_le32(level); |
---|
976 | 1038 | |
---|
977 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 1039 | + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
---|
978 | 1040 | } |
---|
979 | 1041 | |
---|
980 | 1042 | void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, |
---|
981 | | - uint32_t resource_id, uint32_t ctx_id, |
---|
| 1043 | + uint32_t ctx_id, |
---|
982 | 1044 | uint64_t offset, uint32_t level, |
---|
983 | | - struct virtio_gpu_box *box, |
---|
| 1045 | + struct drm_virtgpu_3d_box *box, |
---|
| 1046 | + struct virtio_gpu_object_array *objs, |
---|
984 | 1047 | struct virtio_gpu_fence *fence) |
---|
985 | 1048 | { |
---|
| 1049 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
---|
986 | 1050 | struct virtio_gpu_transfer_host_3d *cmd_p; |
---|
987 | 1051 | struct virtio_gpu_vbuffer *vbuf; |
---|
988 | 1052 | |
---|
989 | 1053 | cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); |
---|
990 | 1054 | memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
991 | 1055 | |
---|
| 1056 | + vbuf->objs = objs; |
---|
| 1057 | + |
---|
992 | 1058 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); |
---|
993 | 1059 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
---|
994 | | - cmd_p->resource_id = cpu_to_le32(resource_id); |
---|
995 | | - cmd_p->box = *box; |
---|
| 1060 | + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
| 1061 | + convert_to_hw_box(&cmd_p->box, box); |
---|
996 | 1062 | cmd_p->offset = cpu_to_le64(offset); |
---|
997 | 1063 | cmd_p->level = cpu_to_le32(level); |
---|
998 | 1064 | |
---|
999 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 1065 | + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
---|
1000 | 1066 | } |
---|
1001 | 1067 | |
---|
1002 | 1068 | void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, |
---|
1003 | 1069 | void *data, uint32_t data_size, |
---|
1004 | | - uint32_t ctx_id, struct virtio_gpu_fence *fence) |
---|
| 1070 | + uint32_t ctx_id, |
---|
| 1071 | + struct virtio_gpu_object_array *objs, |
---|
| 1072 | + struct virtio_gpu_fence *fence) |
---|
1005 | 1073 | { |
---|
1006 | 1074 | struct virtio_gpu_cmd_submit *cmd_p; |
---|
1007 | 1075 | struct virtio_gpu_vbuffer *vbuf; |
---|
.. | .. |
---|
1011 | 1079 | |
---|
1012 | 1080 | vbuf->data_buf = data; |
---|
1013 | 1081 | vbuf->data_size = data_size; |
---|
| 1082 | + vbuf->objs = objs; |
---|
1014 | 1083 | |
---|
1015 | 1084 | cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); |
---|
1016 | 1085 | cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); |
---|
1017 | 1086 | cmd_p->size = cpu_to_le32(data_size); |
---|
1018 | 1087 | |
---|
1019 | | - virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); |
---|
| 1088 | + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); |
---|
1020 | 1089 | } |
---|
1021 | 1090 | |
---|
1022 | | -int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
---|
1023 | | - struct virtio_gpu_object *obj, |
---|
1024 | | - struct virtio_gpu_fence *fence) |
---|
| 1091 | +void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, |
---|
| 1092 | + struct virtio_gpu_object *obj, |
---|
| 1093 | + struct virtio_gpu_mem_entry *ents, |
---|
| 1094 | + unsigned int nents) |
---|
1025 | 1095 | { |
---|
1026 | | - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
---|
1027 | | - struct virtio_gpu_mem_entry *ents; |
---|
1028 | | - struct scatterlist *sg; |
---|
1029 | | - int si, nents; |
---|
1030 | | - |
---|
1031 | | - if (WARN_ON_ONCE(!obj->created)) |
---|
1032 | | - return -EINVAL; |
---|
1033 | | - |
---|
1034 | | - if (!obj->pages) { |
---|
1035 | | - int ret; |
---|
1036 | | - |
---|
1037 | | - ret = virtio_gpu_object_get_sg_table(vgdev, obj); |
---|
1038 | | - if (ret) |
---|
1039 | | - return ret; |
---|
1040 | | - } |
---|
1041 | | - |
---|
1042 | | - if (use_dma_api) { |
---|
1043 | | - obj->mapped = dma_map_sg(vgdev->vdev->dev.parent, |
---|
1044 | | - obj->pages->sgl, obj->pages->nents, |
---|
1045 | | - DMA_TO_DEVICE); |
---|
1046 | | - nents = obj->mapped; |
---|
1047 | | - } else { |
---|
1048 | | - nents = obj->pages->nents; |
---|
1049 | | - } |
---|
1050 | | - |
---|
1051 | | - /* gets freed when the ring has consumed it */ |
---|
1052 | | - ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry), |
---|
1053 | | - GFP_KERNEL); |
---|
1054 | | - if (!ents) { |
---|
1055 | | - DRM_ERROR("failed to allocate ent list\n"); |
---|
1056 | | - return -ENOMEM; |
---|
1057 | | - } |
---|
1058 | | - |
---|
1059 | | - for_each_sg(obj->pages->sgl, sg, nents, si) { |
---|
1060 | | - ents[si].addr = cpu_to_le64(use_dma_api |
---|
1061 | | - ? sg_dma_address(sg) |
---|
1062 | | - : sg_phys(sg)); |
---|
1063 | | - ents[si].length = cpu_to_le32(sg->length); |
---|
1064 | | - ents[si].padding = 0; |
---|
1065 | | - } |
---|
1066 | | - |
---|
1067 | 1096 | virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, |
---|
1068 | | - ents, nents, |
---|
1069 | | - fence); |
---|
1070 | | - return 0; |
---|
1071 | | -} |
---|
1072 | | - |
---|
1073 | | -void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, |
---|
1074 | | - struct virtio_gpu_object *obj) |
---|
1075 | | -{ |
---|
1076 | | - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); |
---|
1077 | | - |
---|
1078 | | - if (use_dma_api && obj->mapped) { |
---|
1079 | | - struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); |
---|
1080 | | - /* detach backing and wait for the host process it ... */ |
---|
1081 | | - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence); |
---|
1082 | | - dma_fence_wait(&fence->f, true); |
---|
1083 | | - dma_fence_put(&fence->f); |
---|
1084 | | - |
---|
1085 | | - /* ... then tear down iommu mappings */ |
---|
1086 | | - dma_unmap_sg(vgdev->vdev->dev.parent, |
---|
1087 | | - obj->pages->sgl, obj->mapped, |
---|
1088 | | - DMA_TO_DEVICE); |
---|
1089 | | - obj->mapped = 0; |
---|
1090 | | - } else { |
---|
1091 | | - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); |
---|
1092 | | - } |
---|
| 1097 | + ents, nents, NULL); |
---|
1093 | 1098 | } |
---|
1094 | 1099 | |
---|
1095 | 1100 | void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, |
---|
.. | .. |
---|
1103 | 1108 | memcpy(cur_p, &output->cursor, sizeof(output->cursor)); |
---|
1104 | 1109 | virtio_gpu_queue_cursor(vgdev, vbuf); |
---|
1105 | 1110 | } |
---|
| 1111 | + |
---|
| 1112 | +static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev, |
---|
| 1113 | + struct virtio_gpu_vbuffer *vbuf) |
---|
| 1114 | +{ |
---|
| 1115 | + struct virtio_gpu_object *obj = |
---|
| 1116 | + gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); |
---|
| 1117 | + struct virtio_gpu_resp_resource_uuid *resp = |
---|
| 1118 | + (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf; |
---|
| 1119 | + uint32_t resp_type = le32_to_cpu(resp->hdr.type); |
---|
| 1120 | + |
---|
| 1121 | + spin_lock(&vgdev->resource_export_lock); |
---|
| 1122 | + WARN_ON(obj->uuid_state != UUID_INITIALIZING); |
---|
| 1123 | + |
---|
| 1124 | + if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID && |
---|
| 1125 | + obj->uuid_state == UUID_INITIALIZING) { |
---|
| 1126 | + memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b)); |
---|
| 1127 | + obj->uuid_state = UUID_INITIALIZED; |
---|
| 1128 | + } else { |
---|
| 1129 | + obj->uuid_state = UUID_INITIALIZATION_FAILED; |
---|
| 1130 | + } |
---|
| 1131 | + spin_unlock(&vgdev->resource_export_lock); |
---|
| 1132 | + |
---|
| 1133 | + wake_up_all(&vgdev->resp_wq); |
---|
| 1134 | +} |
---|
| 1135 | + |
---|
| 1136 | +int |
---|
| 1137 | +virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, |
---|
| 1138 | + struct virtio_gpu_object_array *objs) |
---|
| 1139 | +{ |
---|
| 1140 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); |
---|
| 1141 | + struct virtio_gpu_resource_assign_uuid *cmd_p; |
---|
| 1142 | + struct virtio_gpu_vbuffer *vbuf; |
---|
| 1143 | + struct virtio_gpu_resp_resource_uuid *resp_buf; |
---|
| 1144 | + |
---|
| 1145 | + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); |
---|
| 1146 | + if (!resp_buf) { |
---|
| 1147 | + spin_lock(&vgdev->resource_export_lock); |
---|
| 1148 | + bo->uuid_state = UUID_INITIALIZATION_FAILED; |
---|
| 1149 | + spin_unlock(&vgdev->resource_export_lock); |
---|
| 1150 | + virtio_gpu_array_put_free(objs); |
---|
| 1151 | + return -ENOMEM; |
---|
| 1152 | + } |
---|
| 1153 | + |
---|
| 1154 | + cmd_p = virtio_gpu_alloc_cmd_resp |
---|
| 1155 | + (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p), |
---|
| 1156 | + sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf); |
---|
| 1157 | + memset(cmd_p, 0, sizeof(*cmd_p)); |
---|
| 1158 | + |
---|
| 1159 | + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID); |
---|
| 1160 | + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); |
---|
| 1161 | + |
---|
| 1162 | + vbuf->objs = objs; |
---|
| 1163 | + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); |
---|
| 1164 | + return 0; |
---|
| 1165 | +} |
---|