.. | .. |
---|
22 | 22 | * Authors: Andreas Pokorny |
---|
23 | 23 | */ |
---|
24 | 24 | |
---|
| 25 | +#include <drm/drm_prime.h> |
---|
| 26 | +#include <linux/virtio_dma_buf.h> |
---|
| 27 | + |
---|
25 | 28 | #include "virtgpu_drv.h" |
---|
26 | 29 | |
---|
27 | | -/* Empty Implementations as there should not be any other driver for a virtual |
---|
28 | | - * device that might share buffers with virtgpu |
---|
29 | | - */ |
---|
30 | | - |
---|
31 | | -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) |
---|
| 30 | +static int virtgpu_virtio_get_uuid(struct dma_buf *buf, |
---|
| 31 | + uuid_t *uuid) |
---|
32 | 32 | { |
---|
| 33 | + struct drm_gem_object *obj = buf->priv; |
---|
33 | 34 | struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
---|
| 35 | + struct virtio_gpu_device *vgdev = obj->dev->dev_private; |
---|
34 | 36 | |
---|
35 | | - if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) |
---|
36 | | - /* should not happen */ |
---|
37 | | - return ERR_PTR(-EINVAL); |
---|
| 37 | + wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING); |
---|
| 38 | + if (bo->uuid_state != UUID_INITIALIZED) |
---|
| 39 | + return -ENODEV; |
---|
38 | 40 | |
---|
39 | | - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, |
---|
40 | | - bo->tbo.ttm->num_pages); |
---|
| 41 | + uuid_copy(uuid, &bo->uuid); |
---|
| 42 | + |
---|
| 43 | + return 0; |
---|
| 44 | +} |
---|
| 45 | + |
---|
| 46 | +const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { |
---|
| 47 | + .ops = { |
---|
| 48 | + .cache_sgt_mapping = true, |
---|
| 49 | + .attach = virtio_dma_buf_attach, |
---|
| 50 | + .detach = drm_gem_map_detach, |
---|
| 51 | + .map_dma_buf = drm_gem_map_dma_buf, |
---|
| 52 | + .unmap_dma_buf = drm_gem_unmap_dma_buf, |
---|
| 53 | + .release = drm_gem_dmabuf_release, |
---|
| 54 | + .mmap = drm_gem_dmabuf_mmap, |
---|
| 55 | + .vmap = drm_gem_dmabuf_vmap, |
---|
| 56 | + .vunmap = drm_gem_dmabuf_vunmap, |
---|
| 57 | + }, |
---|
| 58 | + .device_attach = drm_gem_map_attach, |
---|
| 59 | + .get_uuid = virtgpu_virtio_get_uuid, |
---|
| 60 | +}; |
---|
| 61 | + |
---|
| 62 | +struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, |
---|
| 63 | + int flags) |
---|
| 64 | +{ |
---|
| 65 | + struct dma_buf *buf; |
---|
| 66 | + struct drm_device *dev = obj->dev; |
---|
| 67 | + struct virtio_gpu_device *vgdev = dev->dev_private; |
---|
| 68 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
---|
| 69 | + struct virtio_gpu_object_array *objs; |
---|
| 70 | + int ret = 0; |
---|
| 71 | + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
---|
| 72 | + |
---|
| 73 | + if (vgdev->has_resource_assign_uuid) { |
---|
| 74 | + objs = virtio_gpu_array_alloc(1); |
---|
| 75 | + if (!objs) |
---|
| 76 | + return ERR_PTR(-ENOMEM); |
---|
| 77 | + virtio_gpu_array_add_obj(objs, &bo->base.base); |
---|
| 78 | + |
---|
| 79 | + ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); |
---|
| 80 | + if (ret) |
---|
| 81 | + return ERR_PTR(ret); |
---|
| 82 | + virtio_gpu_notify(vgdev); |
---|
| 83 | + } else { |
---|
| 84 | + bo->uuid_state = UUID_INITIALIZATION_FAILED; |
---|
| 85 | + } |
---|
| 86 | + |
---|
| 87 | + exp_info.ops = &virtgpu_dmabuf_ops.ops; |
---|
| 88 | + exp_info.size = obj->size; |
---|
| 89 | + exp_info.flags = flags; |
---|
| 90 | + exp_info.priv = obj; |
---|
| 91 | + exp_info.resv = obj->resv; |
---|
| 92 | + |
---|
| 93 | + buf = virtio_dma_buf_export(&exp_info); |
---|
| 94 | + if (IS_ERR(buf)) |
---|
| 95 | + return buf; |
---|
| 96 | + |
---|
| 97 | + drm_dev_get(dev); |
---|
| 98 | + drm_gem_object_get(obj); |
---|
| 99 | + |
---|
| 100 | + return buf; |
---|
| 101 | +} |
---|
| 102 | + |
---|
| 103 | +struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, |
---|
| 104 | + struct dma_buf *buf) |
---|
| 105 | +{ |
---|
| 106 | + struct drm_gem_object *obj; |
---|
| 107 | + |
---|
| 108 | + if (buf->ops == &virtgpu_dmabuf_ops.ops) { |
---|
| 109 | + obj = buf->priv; |
---|
| 110 | + if (obj->dev == dev) { |
---|
| 111 | + /* |
---|
| 112 | + * Importing dmabuf exported from our own gem increases |
---|
| 113 | + * refcount on gem itself instead of f_count of dmabuf. |
---|
| 114 | + */ |
---|
| 115 | + drm_gem_object_get(obj); |
---|
| 116 | + return obj; |
---|
| 117 | + } |
---|
| 118 | + } |
---|
| 119 | + |
---|
| 120 | + return drm_gem_prime_import(dev, buf); |
---|
41 | 121 | } |
---|
42 | 122 | |
---|
43 | 123 | struct drm_gem_object *virtgpu_gem_prime_import_sg_table( |
---|
44 | 124 | struct drm_device *dev, struct dma_buf_attachment *attach, |
---|
45 | 125 | struct sg_table *table) |
---|
46 | 126 | { |
---|
47 | | - WARN_ONCE(1, "not implemented"); |
---|
48 | 127 | return ERR_PTR(-ENODEV); |
---|
49 | | -} |
---|
50 | | - |
---|
51 | | -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) |
---|
52 | | -{ |
---|
53 | | - struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
---|
54 | | - int ret; |
---|
55 | | - |
---|
56 | | - ret = virtio_gpu_object_kmap(bo); |
---|
57 | | - if (ret) |
---|
58 | | - return NULL; |
---|
59 | | - return bo->vmap; |
---|
60 | | -} |
---|
61 | | - |
---|
62 | | -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) |
---|
63 | | -{ |
---|
64 | | - virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj)); |
---|
65 | | -} |
---|
66 | | - |
---|
67 | | -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, |
---|
68 | | - struct vm_area_struct *vma) |
---|
69 | | -{ |
---|
70 | | - struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
---|
71 | | - |
---|
72 | | - bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start; |
---|
73 | | - return drm_gem_prime_mmap(obj, vma); |
---|
74 | 128 | } |
---|