| .. | .. |
|---|
| 23 | 23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
|---|
| 24 | 24 | */ |
|---|
| 25 | 25 | |
|---|
| 26 | +#include <linux/dma-mapping.h> |
|---|
| 26 | 27 | #include <linux/moduleparam.h> |
|---|
| 27 | | - |
|---|
| 28 | | -#include <drm/ttm/ttm_execbuf_util.h> |
|---|
| 29 | 28 | |
|---|
| 30 | 29 | #include "virtgpu_drv.h" |
|---|
| 31 | 30 | |
|---|
| .. | .. |
|---|
| 63 | 62 | } |
|---|
| 64 | 63 | } |
|---|
| 65 | 64 | |
|---|
| 66 | | -static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
|---|
| 65 | +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) |
|---|
| 67 | 66 | { |
|---|
| 68 | | - struct virtio_gpu_object *bo; |
|---|
| 69 | | - struct virtio_gpu_device *vgdev; |
|---|
| 67 | + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; |
|---|
| 70 | 68 | |
|---|
| 71 | | - bo = container_of(tbo, struct virtio_gpu_object, tbo); |
|---|
| 72 | | - vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; |
|---|
| 73 | | - |
|---|
| 74 | | - if (bo->created) |
|---|
| 75 | | - virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); |
|---|
| 76 | | - if (bo->pages) |
|---|
| 77 | | - virtio_gpu_object_free_sg_table(bo); |
|---|
| 78 | | - drm_gem_object_release(&bo->gem_base); |
|---|
| 79 | 69 | virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); |
|---|
| 80 | | - kfree(bo); |
|---|
| 70 | + if (virtio_gpu_is_shmem(bo)) { |
|---|
| 71 | + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); |
|---|
| 72 | + |
|---|
| 73 | + if (shmem->pages) { |
|---|
| 74 | + if (shmem->mapped) { |
|---|
| 75 | + dma_unmap_sgtable(vgdev->vdev->dev.parent, |
|---|
| 76 | + shmem->pages, DMA_TO_DEVICE, 0); |
|---|
| 77 | + shmem->mapped = 0; |
|---|
| 78 | + } |
|---|
| 79 | + |
|---|
| 80 | + sg_free_table(shmem->pages); |
|---|
| 81 | + kfree(shmem->pages); |
|---|
| 82 | + shmem->pages = NULL; |
|---|
| 83 | + drm_gem_shmem_unpin(&bo->base.base); |
|---|
| 84 | + } |
|---|
| 85 | + |
|---|
| 86 | + drm_gem_shmem_free_object(&bo->base.base); |
|---|
| 87 | + } |
|---|
| 81 | 88 | } |
|---|
| 82 | 89 | |
|---|
| 83 | | -static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo) |
|---|
| 90 | +static void virtio_gpu_free_object(struct drm_gem_object *obj) |
|---|
| 84 | 91 | { |
|---|
| 85 | | - u32 c = 1; |
|---|
| 92 | + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); |
|---|
| 93 | + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; |
|---|
| 86 | 94 | |
|---|
| 87 | | - vgbo->placement.placement = &vgbo->placement_code; |
|---|
| 88 | | - vgbo->placement.busy_placement = &vgbo->placement_code; |
|---|
| 89 | | - vgbo->placement_code.fpfn = 0; |
|---|
| 90 | | - vgbo->placement_code.lpfn = 0; |
|---|
| 91 | | - vgbo->placement_code.flags = |
|---|
| 92 | | - TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | |
|---|
| 93 | | - TTM_PL_FLAG_NO_EVICT; |
|---|
| 94 | | - vgbo->placement.num_placement = c; |
|---|
| 95 | | - vgbo->placement.num_busy_placement = c; |
|---|
| 95 | + if (bo->created) { |
|---|
| 96 | + virtio_gpu_cmd_unref_resource(vgdev, bo); |
|---|
| 97 | + virtio_gpu_notify(vgdev); |
|---|
| 98 | + /* completion handler calls virtio_gpu_cleanup_object() */ |
|---|
| 99 | + return; |
|---|
| 100 | + } |
|---|
| 101 | + virtio_gpu_cleanup_object(bo); |
|---|
| 102 | +} |
|---|
| 96 | 103 | |
|---|
| 104 | +static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { |
|---|
| 105 | + .free = virtio_gpu_free_object, |
|---|
| 106 | + .open = virtio_gpu_gem_object_open, |
|---|
| 107 | + .close = virtio_gpu_gem_object_close, |
|---|
| 108 | + |
|---|
| 109 | + .print_info = drm_gem_shmem_print_info, |
|---|
| 110 | + .pin = drm_gem_shmem_pin, |
|---|
| 111 | + .unpin = drm_gem_shmem_unpin, |
|---|
| 112 | + .get_sg_table = drm_gem_shmem_get_sg_table, |
|---|
| 113 | + .vmap = drm_gem_shmem_vmap, |
|---|
| 114 | + .vunmap = drm_gem_shmem_vunmap, |
|---|
| 115 | + .mmap = drm_gem_shmem_mmap, |
|---|
| 116 | +}; |
|---|
| 117 | + |
|---|
| 118 | +bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) |
|---|
| 119 | +{ |
|---|
| 120 | + return bo->base.base.funcs == &virtio_gpu_shmem_funcs; |
|---|
| 121 | +} |
|---|
| 122 | + |
|---|
| 123 | +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, |
|---|
| 124 | + size_t size) |
|---|
| 125 | +{ |
|---|
| 126 | + struct virtio_gpu_object_shmem *shmem; |
|---|
| 127 | + struct drm_gem_shmem_object *dshmem; |
|---|
| 128 | + |
|---|
| 129 | + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); |
|---|
| 130 | + if (!shmem) |
|---|
| 131 | + return NULL; |
|---|
| 132 | + |
|---|
| 133 | + dshmem = &shmem->base.base; |
|---|
| 134 | + dshmem->base.funcs = &virtio_gpu_shmem_funcs; |
|---|
| 135 | + dshmem->map_cached = true; |
|---|
| 136 | + return &dshmem->base; |
|---|
| 137 | +} |
|---|
| 138 | + |
|---|
| 139 | +static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, |
|---|
| 140 | + struct virtio_gpu_object *bo, |
|---|
| 141 | + struct virtio_gpu_mem_entry **ents, |
|---|
| 142 | + unsigned int *nents) |
|---|
| 143 | +{ |
|---|
| 144 | + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); |
|---|
| 145 | + struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); |
|---|
| 146 | + struct scatterlist *sg; |
|---|
| 147 | + int si, ret; |
|---|
| 148 | + |
|---|
| 149 | + ret = drm_gem_shmem_pin(&bo->base.base); |
|---|
| 150 | + if (ret < 0) |
|---|
| 151 | + return -EINVAL; |
|---|
| 152 | + |
|---|
| 153 | + /* |
|---|
| 154 | + * virtio_gpu uses drm_gem_shmem_get_sg_table instead of |
|---|
| 155 | + * drm_gem_shmem_get_pages_sgt because virtio has it's own set of |
|---|
| 156 | + * dma-ops. This is discouraged for other drivers, but should be fine |
|---|
| 157 | + * since virtio_gpu doesn't support dma-buf import from other devices. |
|---|
| 158 | + */ |
|---|
| 159 | + shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); |
|---|
| 160 | + if (!shmem->pages) { |
|---|
| 161 | + drm_gem_shmem_unpin(&bo->base.base); |
|---|
| 162 | + return -EINVAL; |
|---|
| 163 | + } |
|---|
| 164 | + |
|---|
| 165 | + if (use_dma_api) { |
|---|
| 166 | + ret = dma_map_sgtable(vgdev->vdev->dev.parent, |
|---|
| 167 | + shmem->pages, DMA_TO_DEVICE, 0); |
|---|
| 168 | + if (ret) |
|---|
| 169 | + return ret; |
|---|
| 170 | + *nents = shmem->mapped = shmem->pages->nents; |
|---|
| 171 | + } else { |
|---|
| 172 | + *nents = shmem->pages->orig_nents; |
|---|
| 173 | + } |
|---|
| 174 | + |
|---|
| 175 | + *ents = kvmalloc_array(*nents, |
|---|
| 176 | + sizeof(struct virtio_gpu_mem_entry), |
|---|
| 177 | + GFP_KERNEL); |
|---|
| 178 | + if (!(*ents)) { |
|---|
| 179 | + DRM_ERROR("failed to allocate ent list\n"); |
|---|
| 180 | + return -ENOMEM; |
|---|
| 181 | + } |
|---|
| 182 | + |
|---|
| 183 | + if (use_dma_api) { |
|---|
| 184 | + for_each_sgtable_dma_sg(shmem->pages, sg, si) { |
|---|
| 185 | + (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg)); |
|---|
| 186 | + (*ents)[si].length = cpu_to_le32(sg_dma_len(sg)); |
|---|
| 187 | + (*ents)[si].padding = 0; |
|---|
| 188 | + } |
|---|
| 189 | + } else { |
|---|
| 190 | + for_each_sgtable_sg(shmem->pages, sg, si) { |
|---|
| 191 | + (*ents)[si].addr = cpu_to_le64(sg_phys(sg)); |
|---|
| 192 | + (*ents)[si].length = cpu_to_le32(sg->length); |
|---|
| 193 | + (*ents)[si].padding = 0; |
|---|
| 194 | + } |
|---|
| 195 | + } |
|---|
| 196 | + |
|---|
| 197 | + return 0; |
|---|
| 97 | 198 | } |
|---|
| 98 | 199 | |
|---|
| 99 | 200 | int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, |
|---|
| .. | .. |
|---|
| 101 | 202 | struct virtio_gpu_object **bo_ptr, |
|---|
| 102 | 203 | struct virtio_gpu_fence *fence) |
|---|
| 103 | 204 | { |
|---|
| 205 | + struct virtio_gpu_object_array *objs = NULL; |
|---|
| 206 | + struct drm_gem_shmem_object *shmem_obj; |
|---|
| 104 | 207 | struct virtio_gpu_object *bo; |
|---|
| 105 | | - size_t acc_size; |
|---|
| 208 | + struct virtio_gpu_mem_entry *ents; |
|---|
| 209 | + unsigned int nents; |
|---|
| 106 | 210 | int ret; |
|---|
| 107 | 211 | |
|---|
| 108 | 212 | *bo_ptr = NULL; |
|---|
| 109 | 213 | |
|---|
| 110 | | - acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size, |
|---|
| 111 | | - sizeof(struct virtio_gpu_object)); |
|---|
| 112 | | - |
|---|
| 113 | | - bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); |
|---|
| 114 | | - if (bo == NULL) |
|---|
| 115 | | - return -ENOMEM; |
|---|
| 116 | | - ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); |
|---|
| 117 | | - if (ret < 0) { |
|---|
| 118 | | - kfree(bo); |
|---|
| 119 | | - return ret; |
|---|
| 120 | | - } |
|---|
| 121 | 214 | params->size = roundup(params->size, PAGE_SIZE); |
|---|
| 122 | | - ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size); |
|---|
| 123 | | - if (ret != 0) { |
|---|
| 124 | | - virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); |
|---|
| 125 | | - kfree(bo); |
|---|
| 126 | | - return ret; |
|---|
| 127 | | - } |
|---|
| 215 | + shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); |
|---|
| 216 | + if (IS_ERR(shmem_obj)) |
|---|
| 217 | + return PTR_ERR(shmem_obj); |
|---|
| 218 | + bo = gem_to_virtio_gpu_obj(&shmem_obj->base); |
|---|
| 219 | + |
|---|
| 220 | + ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); |
|---|
| 221 | + if (ret < 0) |
|---|
| 222 | + goto err_free_gem; |
|---|
| 223 | + |
|---|
| 128 | 224 | bo->dumb = params->dumb; |
|---|
| 129 | 225 | |
|---|
| 130 | | - if (params->virgl) { |
|---|
| 131 | | - virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence); |
|---|
| 132 | | - } else { |
|---|
| 133 | | - virtio_gpu_cmd_create_resource(vgdev, bo, params, fence); |
|---|
| 134 | | - } |
|---|
| 135 | | - |
|---|
| 136 | | - virtio_gpu_init_ttm_placement(bo); |
|---|
| 137 | | - ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, |
|---|
| 138 | | - ttm_bo_type_device, &bo->placement, 0, |
|---|
| 139 | | - true, acc_size, NULL, NULL, |
|---|
| 140 | | - &virtio_gpu_ttm_bo_destroy); |
|---|
| 141 | | - /* ttm_bo_init failure will call the destroy */ |
|---|
| 142 | | - if (ret != 0) |
|---|
| 143 | | - return ret; |
|---|
| 144 | | - |
|---|
| 145 | 226 | if (fence) { |
|---|
| 146 | | - struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; |
|---|
| 147 | | - struct list_head validate_list; |
|---|
| 148 | | - struct ttm_validate_buffer mainbuf; |
|---|
| 149 | | - struct ww_acquire_ctx ticket; |
|---|
| 150 | | - unsigned long irq_flags; |
|---|
| 151 | | - bool signaled; |
|---|
| 227 | + ret = -ENOMEM; |
|---|
| 228 | + objs = virtio_gpu_array_alloc(1); |
|---|
| 229 | + if (!objs) |
|---|
| 230 | + goto err_put_id; |
|---|
| 231 | + virtio_gpu_array_add_obj(objs, &bo->base.base); |
|---|
| 152 | 232 | |
|---|
| 153 | | - INIT_LIST_HEAD(&validate_list); |
|---|
| 154 | | - memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer)); |
|---|
| 155 | | - |
|---|
| 156 | | - /* use a gem reference since unref list undoes them */ |
|---|
| 157 | | - drm_gem_object_get(&bo->gem_base); |
|---|
| 158 | | - mainbuf.bo = &bo->tbo; |
|---|
| 159 | | - list_add(&mainbuf.head, &validate_list); |
|---|
| 160 | | - |
|---|
| 161 | | - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); |
|---|
| 162 | | - if (ret == 0) { |
|---|
| 163 | | - spin_lock_irqsave(&drv->lock, irq_flags); |
|---|
| 164 | | - signaled = virtio_fence_signaled(&fence->f); |
|---|
| 165 | | - if (!signaled) |
|---|
| 166 | | - /* virtio create command still in flight */ |
|---|
| 167 | | - ttm_eu_fence_buffer_objects(&ticket, &validate_list, |
|---|
| 168 | | - &fence->f); |
|---|
| 169 | | - spin_unlock_irqrestore(&drv->lock, irq_flags); |
|---|
| 170 | | - if (signaled) |
|---|
| 171 | | - /* virtio create command finished */ |
|---|
| 172 | | - ttm_eu_backoff_reservation(&ticket, &validate_list); |
|---|
| 173 | | - } |
|---|
| 174 | | - virtio_gpu_unref_list(&validate_list); |
|---|
| 233 | + ret = virtio_gpu_array_lock_resv(objs); |
|---|
| 234 | + if (ret != 0) |
|---|
| 235 | + goto err_put_objs; |
|---|
| 175 | 236 | } |
|---|
| 237 | + |
|---|
| 238 | + if (params->virgl) { |
|---|
| 239 | + virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, |
|---|
| 240 | + objs, fence); |
|---|
| 241 | + } else { |
|---|
| 242 | + virtio_gpu_cmd_create_resource(vgdev, bo, params, |
|---|
| 243 | + objs, fence); |
|---|
| 244 | + } |
|---|
| 245 | + |
|---|
| 246 | + ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); |
|---|
| 247 | + if (ret != 0) { |
|---|
| 248 | + virtio_gpu_free_object(&shmem_obj->base); |
|---|
| 249 | + return ret; |
|---|
| 250 | + } |
|---|
| 251 | + |
|---|
| 252 | + virtio_gpu_object_attach(vgdev, bo, ents, nents); |
|---|
| 176 | 253 | |
|---|
| 177 | 254 | *bo_ptr = bo; |
|---|
| 178 | 255 | return 0; |
|---|
| 256 | + |
|---|
| 257 | +err_put_objs: |
|---|
| 258 | + virtio_gpu_array_put_free(objs); |
|---|
| 259 | +err_put_id: |
|---|
| 260 | + virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); |
|---|
| 261 | +err_free_gem: |
|---|
| 262 | + drm_gem_shmem_free_object(&shmem_obj->base); |
|---|
| 263 | + return ret; |
|---|
| 179 | 264 | } |
|---|
| 180 | | - |
|---|
| 181 | | -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo) |
|---|
| 182 | | -{ |
|---|
| 183 | | - bo->vmap = NULL; |
|---|
| 184 | | - ttm_bo_kunmap(&bo->kmap); |
|---|
| 185 | | -} |
|---|
| 186 | | - |
|---|
| 187 | | -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo) |
|---|
| 188 | | -{ |
|---|
| 189 | | - bool is_iomem; |
|---|
| 190 | | - int r; |
|---|
| 191 | | - |
|---|
| 192 | | - WARN_ON(bo->vmap); |
|---|
| 193 | | - |
|---|
| 194 | | - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
|---|
| 195 | | - if (r) |
|---|
| 196 | | - return r; |
|---|
| 197 | | - bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
|---|
| 198 | | - return 0; |
|---|
| 199 | | -} |
|---|
| 200 | | - |
|---|
| 201 | | -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, |
|---|
| 202 | | - struct virtio_gpu_object *bo) |
|---|
| 203 | | -{ |
|---|
| 204 | | - int ret; |
|---|
| 205 | | - struct page **pages = bo->tbo.ttm->pages; |
|---|
| 206 | | - int nr_pages = bo->tbo.num_pages; |
|---|
| 207 | | - struct ttm_operation_ctx ctx = { |
|---|
| 208 | | - .interruptible = false, |
|---|
| 209 | | - .no_wait_gpu = false |
|---|
| 210 | | - }; |
|---|
| 211 | | - |
|---|
| 212 | | - /* wtf swapping */ |
|---|
| 213 | | - if (bo->pages) |
|---|
| 214 | | - return 0; |
|---|
| 215 | | - |
|---|
| 216 | | - if (bo->tbo.ttm->state == tt_unpopulated) |
|---|
| 217 | | - bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); |
|---|
| 218 | | - bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); |
|---|
| 219 | | - if (!bo->pages) |
|---|
| 220 | | - goto out; |
|---|
| 221 | | - |
|---|
| 222 | | - ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, |
|---|
| 223 | | - nr_pages << PAGE_SHIFT, GFP_KERNEL); |
|---|
| 224 | | - if (ret) |
|---|
| 225 | | - goto out; |
|---|
| 226 | | - return 0; |
|---|
| 227 | | -out: |
|---|
| 228 | | - kfree(bo->pages); |
|---|
| 229 | | - bo->pages = NULL; |
|---|
| 230 | | - return -ENOMEM; |
|---|
| 231 | | -} |
|---|
| 232 | | - |
|---|
| 233 | | -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo) |
|---|
| 234 | | -{ |
|---|
| 235 | | - sg_free_table(bo->pages); |
|---|
| 236 | | - kfree(bo->pages); |
|---|
| 237 | | - bo->pages = NULL; |
|---|
| 238 | | -} |
|---|
| 239 | | - |
|---|
| 240 | | -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) |
|---|
| 241 | | -{ |
|---|
| 242 | | - int r; |
|---|
| 243 | | - |
|---|
| 244 | | - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); |
|---|
| 245 | | - if (unlikely(r != 0)) |
|---|
| 246 | | - return r; |
|---|
| 247 | | - r = ttm_bo_wait(&bo->tbo, true, no_wait); |
|---|
| 248 | | - ttm_bo_unreserve(&bo->tbo); |
|---|
| 249 | | - return r; |
|---|
| 250 | | -} |
|---|
| 251 | | - |
|---|