hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/drivers/gpu/drm/virtio/virtgpu_object.c
....@@ -23,9 +23,8 @@
2323 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2424 */
2525
26
+#include <linux/dma-mapping.h>
2627 #include <linux/moduleparam.h>
27
-
28
-#include <drm/ttm/ttm_execbuf_util.h>
2928
3029 #include "virtgpu_drv.h"
3130
....@@ -63,37 +62,141 @@
6362 }
6463 }
6564
66
-static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
65
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
6766 {
68
- struct virtio_gpu_object *bo;
69
- struct virtio_gpu_device *vgdev;
67
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
7068
71
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
72
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
73
-
74
- if (bo->created)
75
- virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
76
- if (bo->pages)
77
- virtio_gpu_object_free_sg_table(bo);
78
- drm_gem_object_release(&bo->gem_base);
7969 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
80
- kfree(bo);
70
+ if (virtio_gpu_is_shmem(bo)) {
71
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
72
+
73
+ if (shmem->pages) {
74
+ if (shmem->mapped) {
75
+ dma_unmap_sgtable(vgdev->vdev->dev.parent,
76
+ shmem->pages, DMA_TO_DEVICE, 0);
77
+ shmem->mapped = 0;
78
+ }
79
+
80
+ sg_free_table(shmem->pages);
81
+ kfree(shmem->pages);
82
+ shmem->pages = NULL;
83
+ drm_gem_shmem_unpin(&bo->base.base);
84
+ }
85
+
86
+ drm_gem_shmem_free_object(&bo->base.base);
87
+ }
8188 }
8289
83
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
90
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
8491 {
85
- u32 c = 1;
92
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
93
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
8694
87
- vgbo->placement.placement = &vgbo->placement_code;
88
- vgbo->placement.busy_placement = &vgbo->placement_code;
89
- vgbo->placement_code.fpfn = 0;
90
- vgbo->placement_code.lpfn = 0;
91
- vgbo->placement_code.flags =
92
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
93
- TTM_PL_FLAG_NO_EVICT;
94
- vgbo->placement.num_placement = c;
95
- vgbo->placement.num_busy_placement = c;
95
+ if (bo->created) {
96
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
97
+ virtio_gpu_notify(vgdev);
98
+ /* completion handler calls virtio_gpu_cleanup_object() */
99
+ return;
100
+ }
101
+ virtio_gpu_cleanup_object(bo);
102
+}
96103
104
+static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
105
+ .free = virtio_gpu_free_object,
106
+ .open = virtio_gpu_gem_object_open,
107
+ .close = virtio_gpu_gem_object_close,
108
+
109
+ .print_info = drm_gem_shmem_print_info,
110
+ .pin = drm_gem_shmem_pin,
111
+ .unpin = drm_gem_shmem_unpin,
112
+ .get_sg_table = drm_gem_shmem_get_sg_table,
113
+ .vmap = drm_gem_shmem_vmap,
114
+ .vunmap = drm_gem_shmem_vunmap,
115
+ .mmap = drm_gem_shmem_mmap,
116
+};
117
+
118
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
119
+{
120
+ return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
121
+}
122
+
123
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
124
+ size_t size)
125
+{
126
+ struct virtio_gpu_object_shmem *shmem;
127
+ struct drm_gem_shmem_object *dshmem;
128
+
129
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
130
+ if (!shmem)
131
+ return NULL;
132
+
133
+ dshmem = &shmem->base.base;
134
+ dshmem->base.funcs = &virtio_gpu_shmem_funcs;
135
+ dshmem->map_cached = true;
136
+ return &dshmem->base;
137
+}
138
+
139
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
140
+ struct virtio_gpu_object *bo,
141
+ struct virtio_gpu_mem_entry **ents,
142
+ unsigned int *nents)
143
+{
144
+ bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
145
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
146
+ struct scatterlist *sg;
147
+ int si, ret;
148
+
149
+ ret = drm_gem_shmem_pin(&bo->base.base);
150
+ if (ret < 0)
151
+ return -EINVAL;
152
+
153
+ /*
154
+ * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
155
+ * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
156
+ * dma-ops. This is discouraged for other drivers, but should be fine
157
+ * since virtio_gpu doesn't support dma-buf import from other devices.
158
+ */
159
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
160
+ if (IS_ERR(shmem->pages)) {
161
+ drm_gem_shmem_unpin(&bo->base.base);
162
+ ret = PTR_ERR(shmem->pages);
163
+ shmem->pages = NULL;
164
+ return ret;
165
+ }
166
+
167
+ if (use_dma_api) {
168
+ ret = dma_map_sgtable(vgdev->vdev->dev.parent,
169
+ shmem->pages, DMA_TO_DEVICE, 0);
170
+ if (ret)
171
+ return ret;
172
+ *nents = shmem->mapped = shmem->pages->nents;
173
+ } else {
174
+ *nents = shmem->pages->orig_nents;
175
+ }
176
+
177
+ *ents = kvmalloc_array(*nents,
178
+ sizeof(struct virtio_gpu_mem_entry),
179
+ GFP_KERNEL);
180
+ if (!(*ents)) {
181
+ DRM_ERROR("failed to allocate ent list\n");
182
+ return -ENOMEM;
183
+ }
184
+
185
+ if (use_dma_api) {
186
+ for_each_sgtable_dma_sg(shmem->pages, sg, si) {
187
+ (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
188
+ (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
189
+ (*ents)[si].padding = 0;
190
+ }
191
+ } else {
192
+ for_each_sgtable_sg(shmem->pages, sg, si) {
193
+ (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
194
+ (*ents)[si].length = cpu_to_le32(sg->length);
195
+ (*ents)[si].padding = 0;
196
+ }
197
+ }
198
+
199
+ return 0;
97200 }
98201
99202 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
....@@ -101,151 +204,63 @@
101204 struct virtio_gpu_object **bo_ptr,
102205 struct virtio_gpu_fence *fence)
103206 {
207
+ struct virtio_gpu_object_array *objs = NULL;
208
+ struct drm_gem_shmem_object *shmem_obj;
104209 struct virtio_gpu_object *bo;
105
- size_t acc_size;
210
+ struct virtio_gpu_mem_entry *ents;
211
+ unsigned int nents;
106212 int ret;
107213
108214 *bo_ptr = NULL;
109215
110
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
111
- sizeof(struct virtio_gpu_object));
112
-
113
- bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
114
- if (bo == NULL)
115
- return -ENOMEM;
116
- ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
117
- if (ret < 0) {
118
- kfree(bo);
119
- return ret;
120
- }
121216 params->size = roundup(params->size, PAGE_SIZE);
122
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
123
- if (ret != 0) {
124
- virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
125
- kfree(bo);
126
- return ret;
127
- }
217
+ shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
218
+ if (IS_ERR(shmem_obj))
219
+ return PTR_ERR(shmem_obj);
220
+ bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
221
+
222
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
223
+ if (ret < 0)
224
+ goto err_free_gem;
225
+
128226 bo->dumb = params->dumb;
129227
130
- if (params->virgl) {
131
- virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
132
- } else {
133
- virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
134
- }
135
-
136
- virtio_gpu_init_ttm_placement(bo);
137
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
138
- ttm_bo_type_device, &bo->placement, 0,
139
- true, acc_size, NULL, NULL,
140
- &virtio_gpu_ttm_bo_destroy);
141
- /* ttm_bo_init failure will call the destroy */
142
- if (ret != 0)
143
- return ret;
144
-
145228 if (fence) {
146
- struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
147
- struct list_head validate_list;
148
- struct ttm_validate_buffer mainbuf;
149
- struct ww_acquire_ctx ticket;
150
- unsigned long irq_flags;
151
- bool signaled;
229
+ ret = -ENOMEM;
230
+ objs = virtio_gpu_array_alloc(1);
231
+ if (!objs)
232
+ goto err_put_id;
233
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
152234
153
- INIT_LIST_HEAD(&validate_list);
154
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
155
-
156
- /* use a gem reference since unref list undoes them */
157
- drm_gem_object_get(&bo->gem_base);
158
- mainbuf.bo = &bo->tbo;
159
- list_add(&mainbuf.head, &validate_list);
160
-
161
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
162
- if (ret == 0) {
163
- spin_lock_irqsave(&drv->lock, irq_flags);
164
- signaled = virtio_fence_signaled(&fence->f);
165
- if (!signaled)
166
- /* virtio create command still in flight */
167
- ttm_eu_fence_buffer_objects(&ticket, &validate_list,
168
- &fence->f);
169
- spin_unlock_irqrestore(&drv->lock, irq_flags);
170
- if (signaled)
171
- /* virtio create command finished */
172
- ttm_eu_backoff_reservation(&ticket, &validate_list);
173
- }
174
- virtio_gpu_unref_list(&validate_list);
235
+ ret = virtio_gpu_array_lock_resv(objs);
236
+ if (ret != 0)
237
+ goto err_put_objs;
175238 }
239
+
240
+ if (params->virgl) {
241
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
242
+ objs, fence);
243
+ } else {
244
+ virtio_gpu_cmd_create_resource(vgdev, bo, params,
245
+ objs, fence);
246
+ }
247
+
248
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
249
+ if (ret != 0) {
250
+ virtio_gpu_free_object(&shmem_obj->base);
251
+ return ret;
252
+ }
253
+
254
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
176255
177256 *bo_ptr = bo;
178257 return 0;
258
+
259
+err_put_objs:
260
+ virtio_gpu_array_put_free(objs);
261
+err_put_id:
262
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
263
+err_free_gem:
264
+ drm_gem_shmem_free_object(&shmem_obj->base);
265
+ return ret;
179266 }
180
-
181
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
182
-{
183
- bo->vmap = NULL;
184
- ttm_bo_kunmap(&bo->kmap);
185
-}
186
-
187
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
188
-{
189
- bool is_iomem;
190
- int r;
191
-
192
- WARN_ON(bo->vmap);
193
-
194
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
195
- if (r)
196
- return r;
197
- bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
198
- return 0;
199
-}
200
-
201
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
202
- struct virtio_gpu_object *bo)
203
-{
204
- int ret;
205
- struct page **pages = bo->tbo.ttm->pages;
206
- int nr_pages = bo->tbo.num_pages;
207
- struct ttm_operation_ctx ctx = {
208
- .interruptible = false,
209
- .no_wait_gpu = false
210
- };
211
-
212
- /* wtf swapping */
213
- if (bo->pages)
214
- return 0;
215
-
216
- if (bo->tbo.ttm->state == tt_unpopulated)
217
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
218
- bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
219
- if (!bo->pages)
220
- goto out;
221
-
222
- ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
223
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
224
- if (ret)
225
- goto out;
226
- return 0;
227
-out:
228
- kfree(bo->pages);
229
- bo->pages = NULL;
230
- return -ENOMEM;
231
-}
232
-
233
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
234
-{
235
- sg_free_table(bo->pages);
236
- kfree(bo->pages);
237
- bo->pages = NULL;
238
-}
239
-
240
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
241
-{
242
- int r;
243
-
244
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
245
- if (unlikely(r != 0))
246
- return r;
247
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
248
- ttm_bo_unreserve(&bo->tbo);
249
- return r;
250
-}
251
-