.. | .. |
---|
23 | 23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
---|
24 | 24 | */ |
---|
25 | 25 | |
---|
26 | | -#include <drm/drmP.h> |
---|
| 26 | +#include <drm/drm_file.h> |
---|
| 27 | +#include <drm/drm_fourcc.h> |
---|
| 28 | + |
---|
27 | 29 | #include "virtgpu_drv.h" |
---|
28 | 30 | |
---|
29 | | -void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj) |
---|
30 | | -{ |
---|
31 | | - struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj); |
---|
32 | | - |
---|
33 | | - if (obj) |
---|
34 | | - virtio_gpu_object_unref(&obj); |
---|
35 | | -} |
---|
36 | | - |
---|
37 | | -struct virtio_gpu_object* |
---|
38 | | -virtio_gpu_alloc_object(struct drm_device *dev, |
---|
39 | | - struct virtio_gpu_object_params *params, |
---|
40 | | - struct virtio_gpu_fence *fence) |
---|
| 31 | +static int virtio_gpu_gem_create(struct drm_file *file, |
---|
| 32 | + struct drm_device *dev, |
---|
| 33 | + struct virtio_gpu_object_params *params, |
---|
| 34 | + struct drm_gem_object **obj_p, |
---|
| 35 | + uint32_t *handle_p) |
---|
41 | 36 | { |
---|
42 | 37 | struct virtio_gpu_device *vgdev = dev->dev_private; |
---|
43 | 38 | struct virtio_gpu_object *obj; |
---|
44 | 39 | int ret; |
---|
45 | | - |
---|
46 | | - ret = virtio_gpu_object_create(vgdev, params, &obj, fence); |
---|
47 | | - if (ret) |
---|
48 | | - return ERR_PTR(ret); |
---|
49 | | - |
---|
50 | | - return obj; |
---|
51 | | -} |
---|
52 | | - |
---|
53 | | -int virtio_gpu_gem_create(struct drm_file *file, |
---|
54 | | - struct drm_device *dev, |
---|
55 | | - struct virtio_gpu_object_params *params, |
---|
56 | | - struct drm_gem_object **obj_p, |
---|
57 | | - uint32_t *handle_p) |
---|
58 | | -{ |
---|
59 | | - struct virtio_gpu_object *obj; |
---|
60 | | - int ret; |
---|
61 | 40 | u32 handle; |
---|
62 | 41 | |
---|
63 | | - obj = virtio_gpu_alloc_object(dev, params, NULL); |
---|
64 | | - if (IS_ERR(obj)) |
---|
65 | | - return PTR_ERR(obj); |
---|
| 42 | + ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); |
---|
| 43 | + if (ret < 0) |
---|
| 44 | + return ret; |
---|
66 | 45 | |
---|
67 | | - ret = drm_gem_handle_create(file, &obj->gem_base, &handle); |
---|
| 46 | + ret = drm_gem_handle_create(file, &obj->base.base, &handle); |
---|
68 | 47 | if (ret) { |
---|
69 | | - drm_gem_object_release(&obj->gem_base); |
---|
| 48 | + drm_gem_object_release(&obj->base.base); |
---|
70 | 49 | return ret; |
---|
71 | 50 | } |
---|
72 | 51 | |
---|
73 | | - *obj_p = &obj->gem_base; |
---|
| 52 | + *obj_p = &obj->base.base; |
---|
74 | 53 | |
---|
75 | 54 | /* drop reference from allocate - handle holds it now */ |
---|
76 | | - drm_gem_object_put_unlocked(&obj->gem_base); |
---|
| 55 | + drm_gem_object_put(&obj->base.base); |
---|
77 | 56 | |
---|
78 | 57 | *handle_p = handle; |
---|
79 | 58 | return 0; |
---|
.. | .. |
---|
88 | 67 | int ret; |
---|
89 | 68 | uint32_t pitch; |
---|
90 | 69 | |
---|
91 | | - pitch = args->width * ((args->bpp + 1) / 8); |
---|
| 70 | + if (args->bpp != 32) |
---|
| 71 | + return -EINVAL; |
---|
| 72 | + |
---|
| 73 | + pitch = args->width * 4; |
---|
92 | 74 | args->size = pitch * args->height; |
---|
93 | 75 | args->size = ALIGN(args->size, PAGE_SIZE); |
---|
94 | 76 | |
---|
.. | .. |
---|
114 | 96 | uint32_t handle, uint64_t *offset_p) |
---|
115 | 97 | { |
---|
116 | 98 | struct drm_gem_object *gobj; |
---|
117 | | - struct virtio_gpu_object *obj; |
---|
118 | 99 | |
---|
119 | 100 | BUG_ON(!offset_p); |
---|
120 | 101 | gobj = drm_gem_object_lookup(file_priv, handle); |
---|
121 | 102 | if (gobj == NULL) |
---|
122 | 103 | return -ENOENT; |
---|
123 | | - obj = gem_to_virtio_gpu_obj(gobj); |
---|
124 | | - *offset_p = virtio_gpu_object_mmap_offset(obj); |
---|
125 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 104 | + *offset_p = drm_vma_node_offset_addr(&gobj->vma_node); |
---|
| 105 | + drm_gem_object_put(gobj); |
---|
126 | 106 | return 0; |
---|
127 | 107 | } |
---|
128 | 108 | |
---|
.. | .. |
---|
131 | 111 | { |
---|
132 | 112 | struct virtio_gpu_device *vgdev = obj->dev->dev_private; |
---|
133 | 113 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
---|
134 | | - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); |
---|
135 | | - int r; |
---|
| 114 | + struct virtio_gpu_object_array *objs; |
---|
136 | 115 | |
---|
137 | 116 | if (!vgdev->has_virgl_3d) |
---|
138 | | - return 0; |
---|
| 117 | + goto out_notify; |
---|
139 | 118 | |
---|
140 | | - r = virtio_gpu_object_reserve(qobj, false); |
---|
141 | | - if (r) |
---|
142 | | - return r; |
---|
| 119 | + /* the context might still be missing when the first ioctl is |
---|
| 120 | + * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE |
---|
| 121 | + */ |
---|
| 122 | + virtio_gpu_create_context(obj->dev, file); |
---|
| 123 | + |
---|
| 124 | + objs = virtio_gpu_array_alloc(1); |
---|
| 125 | + if (!objs) |
---|
| 126 | + return -ENOMEM; |
---|
| 127 | + virtio_gpu_array_add_obj(objs, obj); |
---|
143 | 128 | |
---|
144 | 129 | virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, |
---|
145 | | - qobj->hw_res_handle); |
---|
146 | | - virtio_gpu_object_unreserve(qobj); |
---|
| 130 | + objs); |
---|
| 131 | +out_notify: |
---|
| 132 | + virtio_gpu_notify(vgdev); |
---|
147 | 133 | return 0; |
---|
148 | 134 | } |
---|
149 | 135 | |
---|
.. | .. |
---|
152 | 138 | { |
---|
153 | 139 | struct virtio_gpu_device *vgdev = obj->dev->dev_private; |
---|
154 | 140 | struct virtio_gpu_fpriv *vfpriv = file->driver_priv; |
---|
155 | | - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); |
---|
156 | | - int r; |
---|
| 141 | + struct virtio_gpu_object_array *objs; |
---|
157 | 142 | |
---|
158 | 143 | if (!vgdev->has_virgl_3d) |
---|
159 | 144 | return; |
---|
160 | 145 | |
---|
161 | | - r = virtio_gpu_object_reserve(qobj, false); |
---|
162 | | - if (r) |
---|
| 146 | + objs = virtio_gpu_array_alloc(1); |
---|
| 147 | + if (!objs) |
---|
163 | 148 | return; |
---|
| 149 | + virtio_gpu_array_add_obj(objs, obj); |
---|
164 | 150 | |
---|
165 | 151 | virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, |
---|
166 | | - qobj->hw_res_handle); |
---|
167 | | - virtio_gpu_object_unreserve(qobj); |
---|
| 152 | + objs); |
---|
| 153 | + virtio_gpu_notify(vgdev); |
---|
| 154 | +} |
---|
| 155 | + |
---|
| 156 | +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) |
---|
| 157 | +{ |
---|
| 158 | + struct virtio_gpu_object_array *objs; |
---|
| 159 | + |
---|
| 160 | + objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL); |
---|
| 161 | + if (!objs) |
---|
| 162 | + return NULL; |
---|
| 163 | + |
---|
| 164 | + objs->nents = 0; |
---|
| 165 | + objs->total = nents; |
---|
| 166 | + return objs; |
---|
| 167 | +} |
---|
| 168 | + |
---|
| 169 | +static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs) |
---|
| 170 | +{ |
---|
| 171 | + kfree(objs); |
---|
| 172 | +} |
---|
| 173 | + |
---|
| 174 | +struct virtio_gpu_object_array* |
---|
| 175 | +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) |
---|
| 176 | +{ |
---|
| 177 | + struct virtio_gpu_object_array *objs; |
---|
| 178 | + u32 i; |
---|
| 179 | + |
---|
| 180 | + objs = virtio_gpu_array_alloc(nents); |
---|
| 181 | + if (!objs) |
---|
| 182 | + return NULL; |
---|
| 183 | + |
---|
| 184 | + for (i = 0; i < nents; i++) { |
---|
| 185 | + objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]); |
---|
| 186 | + if (!objs->objs[i]) { |
---|
| 187 | + objs->nents = i; |
---|
| 188 | + virtio_gpu_array_put_free(objs); |
---|
| 189 | + return NULL; |
---|
| 190 | + } |
---|
| 191 | + } |
---|
| 192 | + objs->nents = i; |
---|
| 193 | + return objs; |
---|
| 194 | +} |
---|
| 195 | + |
---|
| 196 | +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, |
---|
| 197 | + struct drm_gem_object *obj) |
---|
| 198 | +{ |
---|
| 199 | + if (WARN_ON_ONCE(objs->nents == objs->total)) |
---|
| 200 | + return; |
---|
| 201 | + |
---|
| 202 | + drm_gem_object_get(obj); |
---|
| 203 | + objs->objs[objs->nents] = obj; |
---|
| 204 | + objs->nents++; |
---|
| 205 | +} |
---|
| 206 | + |
---|
| 207 | +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) |
---|
| 208 | +{ |
---|
| 209 | + int ret; |
---|
| 210 | + |
---|
| 211 | + if (objs->nents == 1) { |
---|
| 212 | + ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL); |
---|
| 213 | + } else { |
---|
| 214 | + ret = drm_gem_lock_reservations(objs->objs, objs->nents, |
---|
| 215 | + &objs->ticket); |
---|
| 216 | + } |
---|
| 217 | + return ret; |
---|
| 218 | +} |
---|
| 219 | + |
---|
| 220 | +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs) |
---|
| 221 | +{ |
---|
| 222 | + if (objs->nents == 1) { |
---|
| 223 | + dma_resv_unlock(objs->objs[0]->resv); |
---|
| 224 | + } else { |
---|
| 225 | + drm_gem_unlock_reservations(objs->objs, objs->nents, |
---|
| 226 | + &objs->ticket); |
---|
| 227 | + } |
---|
| 228 | +} |
---|
| 229 | + |
---|
| 230 | +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, |
---|
| 231 | + struct dma_fence *fence) |
---|
| 232 | +{ |
---|
| 233 | + int i; |
---|
| 234 | + |
---|
| 235 | + for (i = 0; i < objs->nents; i++) |
---|
| 236 | + dma_resv_add_excl_fence(objs->objs[i]->resv, fence); |
---|
| 237 | +} |
---|
| 238 | + |
---|
| 239 | +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) |
---|
| 240 | +{ |
---|
| 241 | + u32 i; |
---|
| 242 | + |
---|
| 243 | + for (i = 0; i < objs->nents; i++) |
---|
| 244 | + drm_gem_object_put(objs->objs[i]); |
---|
| 245 | + virtio_gpu_array_free(objs); |
---|
| 246 | +} |
---|
| 247 | + |
---|
| 248 | +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, |
---|
| 249 | + struct virtio_gpu_object_array *objs) |
---|
| 250 | +{ |
---|
| 251 | + spin_lock(&vgdev->obj_free_lock); |
---|
| 252 | + list_add_tail(&objs->next, &vgdev->obj_free_list); |
---|
| 253 | + spin_unlock(&vgdev->obj_free_lock); |
---|
| 254 | + schedule_work(&vgdev->obj_free_work); |
---|
| 255 | +} |
---|
| 256 | + |
---|
| 257 | +void virtio_gpu_array_put_free_work(struct work_struct *work) |
---|
| 258 | +{ |
---|
| 259 | + struct virtio_gpu_device *vgdev = |
---|
| 260 | + container_of(work, struct virtio_gpu_device, obj_free_work); |
---|
| 261 | + struct virtio_gpu_object_array *objs; |
---|
| 262 | + |
---|
| 263 | + spin_lock(&vgdev->obj_free_lock); |
---|
| 264 | + while (!list_empty(&vgdev->obj_free_list)) { |
---|
| 265 | + objs = list_first_entry(&vgdev->obj_free_list, |
---|
| 266 | + struct virtio_gpu_object_array, next); |
---|
| 267 | + list_del(&objs->next); |
---|
| 268 | + spin_unlock(&vgdev->obj_free_lock); |
---|
| 269 | + virtio_gpu_array_put_free(objs); |
---|
| 270 | + spin_lock(&vgdev->obj_free_lock); |
---|
| 271 | + } |
---|
| 272 | + spin_unlock(&vgdev->obj_free_lock); |
---|
168 | 273 | } |
---|