forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/v3d/v3d_gem.c
....@@ -1,16 +1,19 @@
11 // SPDX-License-Identifier: GPL-2.0+
22 /* Copyright (C) 2014-2018 Broadcom */
33
4
-#include <drm/drmP.h>
5
-#include <drm/drm_syncobj.h>
4
+#include <linux/device.h>
5
+#include <linux/dma-mapping.h>
6
+#include <linux/io.h>
67 #include <linux/module.h>
78 #include <linux/platform_device.h>
89 #include <linux/pm_runtime.h>
9
-#include <linux/device.h>
10
-#include <linux/io.h>
10
+#include <linux/reset.h>
1111 #include <linux/sched/signal.h>
12
+#include <linux/uaccess.h>
1213
13
-#include "uapi/drm/v3d_drm.h"
14
+#include <drm/drm_syncobj.h>
15
+#include <uapi/drm/v3d_drm.h>
16
+
1417 #include "v3d_drv.h"
1518 #include "v3d_regs.h"
1619 #include "v3d_trace.h"
....@@ -24,7 +27,8 @@
2427 * type. If you want the default behavior, you can still put
2528 * "2" in the indirect texture state's output_type field.
2629 */
27
- V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
30
+ if (v3d->ver < 40)
31
+ V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
2832
2933 /* Whenever we flush the L2T cache, we always want to flush
3034 * the whole thing.
....@@ -69,7 +73,7 @@
6973 }
7074
7175 static void
72
-v3d_reset_v3d(struct v3d_dev *v3d)
76
+v3d_reset_by_bridge(struct v3d_dev *v3d)
7377 {
7478 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
7579
....@@ -89,6 +93,15 @@
8993 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
9094 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
9195 }
96
+}
97
+
98
+static void
99
+v3d_reset_v3d(struct v3d_dev *v3d)
100
+{
101
+ if (v3d->reset)
102
+ reset_control_reset(v3d->reset);
103
+ else
104
+ v3d_reset_by_bridge(v3d);
92105
93106 v3d_init_hw_state(v3d);
94107 }
....@@ -98,7 +111,9 @@
98111 {
99112 struct drm_device *dev = &v3d->drm;
100113
101
- DRM_ERROR("Resetting GPU.\n");
114
+ DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
115
+ DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
116
+ V3D_CORE_READ(0, V3D_ERR_STAT));
102117 trace_v3d_reset_begin(dev);
103118
104119 /* XXX: only needed for safe powerdown, not reset. */
....@@ -130,38 +145,73 @@
130145 }
131146 }
132147
133
-/* Invalidates the (read-only) L2 cache. */
148
+/* Invalidates the (read-only) L2C cache. This was the L2 cache for
149
+ * uniforms and instructions on V3D 3.2.
150
+ */
134151 static void
135
-v3d_invalidate_l2(struct v3d_dev *v3d, int core)
152
+v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
136153 {
154
+ if (v3d->ver > 32)
155
+ return;
156
+
137157 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
138158 V3D_L2CACTL_L2CCLR |
139159 V3D_L2CACTL_L2CENA);
140
-}
141
-
142
-static void
143
-v3d_invalidate_l1td(struct v3d_dev *v3d, int core)
144
-{
145
- V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
146
- if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
147
- V3D_L2TCACTL_L2TFLS), 100)) {
148
- DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
149
- }
150160 }
151161
152162 /* Invalidates texture L2 cachelines */
153163 static void
154164 v3d_flush_l2t(struct v3d_dev *v3d, int core)
155165 {
156
- v3d_invalidate_l1td(v3d, core);
157
-
166
+ /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
167
+ * need to wait for completion before dispatching the job --
168
+ * L2T accesses will be stalled until the flush has completed.
169
+ * However, we do need to make sure we don't try to trigger a
170
+ * new flush while the L2_CLEAN queue is trying to
171
+ * synchronously clean after a job.
172
+ */
173
+ mutex_lock(&v3d->cache_clean_lock);
158174 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
159175 V3D_L2TCACTL_L2TFLS |
160176 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
177
+ mutex_unlock(&v3d->cache_clean_lock);
178
+}
179
+
180
+/* Cleans texture L1 and L2 cachelines (writing back dirty data).
181
+ *
182
+ * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
183
+ * executed, we need to make sure that the clean is done before
184
+ * signaling job completion. So, we synchronously wait before
185
+ * returning, and we make sure that L2 invalidates don't happen in the
186
+ * meantime to confuse our are-we-done checks.
187
+ */
188
+void
189
+v3d_clean_caches(struct v3d_dev *v3d)
190
+{
191
+ struct drm_device *dev = &v3d->drm;
192
+ int core = 0;
193
+
194
+ trace_v3d_cache_clean_begin(dev);
195
+
196
+ V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
197
+ if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
198
+ V3D_L2TCACTL_TMUWCF), 100)) {
199
+ DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
200
+ }
201
+
202
+ mutex_lock(&v3d->cache_clean_lock);
203
+ V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
204
+ V3D_L2TCACTL_L2TFLS |
205
+ V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
206
+
161207 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
162208 V3D_L2TCACTL_L2TFLS), 100)) {
163
- DRM_ERROR("Timeout waiting for L2T flush\n");
209
+ DRM_ERROR("Timeout waiting for L2T clean\n");
164210 }
211
+
212
+ mutex_unlock(&v3d->cache_clean_lock);
213
+
214
+ trace_v3d_cache_clean_end(dev);
165215 }
166216
167217 /* Invalidates the slice caches. These are read-only caches. */
....@@ -175,66 +225,18 @@
175225 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
176226 }
177227
178
-/* Invalidates texture L2 cachelines */
179
-static void
180
-v3d_invalidate_l2t(struct v3d_dev *v3d, int core)
181
-{
182
- V3D_CORE_WRITE(core,
183
- V3D_CTL_L2TCACTL,
184
- V3D_L2TCACTL_L2TFLS |
185
- V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM));
186
- if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
187
- V3D_L2TCACTL_L2TFLS), 100)) {
188
- DRM_ERROR("Timeout waiting for L2T invalidate\n");
189
- }
190
-}
191
-
192228 void
193229 v3d_invalidate_caches(struct v3d_dev *v3d)
194230 {
231
+ /* Invalidate the caches from the outside in. That way if
232
+ * another CL's concurrent use of nearby memory were to pull
233
+ * an invalidated cacheline back in, we wouldn't leave stale
234
+ * data in the inner cache.
235
+ */
195236 v3d_flush_l3(v3d);
196
-
197
- v3d_invalidate_l2(v3d, 0);
198
- v3d_invalidate_slices(v3d, 0);
237
+ v3d_invalidate_l2c(v3d, 0);
199238 v3d_flush_l2t(v3d, 0);
200
-}
201
-
202
-void
203
-v3d_flush_caches(struct v3d_dev *v3d)
204
-{
205
- v3d_invalidate_l1td(v3d, 0);
206
- v3d_invalidate_l2t(v3d, 0);
207
-}
208
-
209
-static void
210
-v3d_attach_object_fences(struct v3d_exec_info *exec)
211
-{
212
- struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
213
- struct v3d_bo *bo;
214
- int i;
215
-
216
- for (i = 0; i < exec->bo_count; i++) {
217
- bo = to_v3d_bo(&exec->bo[i]->base);
218
-
219
- /* XXX: Use shared fences for read-only objects. */
220
- reservation_object_add_excl_fence(bo->resv, out_fence);
221
- }
222
-}
223
-
224
-static void
225
-v3d_unlock_bo_reservations(struct drm_device *dev,
226
- struct v3d_exec_info *exec,
227
- struct ww_acquire_ctx *acquire_ctx)
228
-{
229
- int i;
230
-
231
- for (i = 0; i < exec->bo_count; i++) {
232
- struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
233
-
234
- ww_mutex_unlock(&bo->resv->lock);
235
- }
236
-
237
- ww_acquire_fini(acquire_ctx);
239
+ v3d_invalidate_slices(v3d, 0);
238240 }
239241
240242 /* Takes the reservation lock on all the BOs being referenced, so that
....@@ -245,69 +247,21 @@
245247 * to v3d, so we don't attach dma-buf fences to them.
246248 */
247249 static int
248
-v3d_lock_bo_reservations(struct drm_device *dev,
249
- struct v3d_exec_info *exec,
250
+v3d_lock_bo_reservations(struct v3d_job *job,
250251 struct ww_acquire_ctx *acquire_ctx)
251252 {
252
- int contended_lock = -1;
253253 int i, ret;
254
- struct v3d_bo *bo;
255254
256
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
255
+ ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
256
+ if (ret)
257
+ return ret;
257258
258
-retry:
259
- if (contended_lock != -1) {
260
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
261
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
262
- acquire_ctx);
259
+ for (i = 0; i < job->bo_count; i++) {
260
+ ret = drm_gem_fence_array_add_implicit(&job->deps,
261
+ job->bo[i], true);
263262 if (ret) {
264
- ww_acquire_done(acquire_ctx);
265
- return ret;
266
- }
267
- }
268
-
269
- for (i = 0; i < exec->bo_count; i++) {
270
- if (i == contended_lock)
271
- continue;
272
-
273
- bo = to_v3d_bo(&exec->bo[i]->base);
274
-
275
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
276
- if (ret) {
277
- int j;
278
-
279
- for (j = 0; j < i; j++) {
280
- bo = to_v3d_bo(&exec->bo[j]->base);
281
- ww_mutex_unlock(&bo->resv->lock);
282
- }
283
-
284
- if (contended_lock != -1 && contended_lock >= i) {
285
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
286
-
287
- ww_mutex_unlock(&bo->resv->lock);
288
- }
289
-
290
- if (ret == -EDEADLK) {
291
- contended_lock = i;
292
- goto retry;
293
- }
294
-
295
- ww_acquire_done(acquire_ctx);
296
- return ret;
297
- }
298
- }
299
-
300
- ww_acquire_done(acquire_ctx);
301
-
302
- /* Reserve space for our shared (read-only) fence references,
303
- * before we commit the CL to the hardware.
304
- */
305
- for (i = 0; i < exec->bo_count; i++) {
306
- bo = to_v3d_bo(&exec->bo[i]->base);
307
-
308
- ret = reservation_object_reserve_shared(bo->resv);
309
- if (ret) {
310
- v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
263
+ drm_gem_unlock_reservations(job->bo, job->bo_count,
264
+ acquire_ctx);
311265 return ret;
312266 }
313267 }
....@@ -316,11 +270,11 @@
316270 }
317271
318272 /**
319
- * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
273
+ * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
320274 * referenced by the job.
321275 * @dev: DRM device
322276 * @file_priv: DRM file for this fd
323
- * @exec: V3D job being set up
277
+ * @job: V3D job being set up
324278 *
325279 * The command validator needs to reference BOs by their index within
326280 * the submitted job's BO list. This does the validation of the job's
....@@ -330,18 +284,19 @@
330284 * failure, because that will happen at v3d_exec_cleanup() time.
331285 */
332286 static int
333
-v3d_cl_lookup_bos(struct drm_device *dev,
334
- struct drm_file *file_priv,
335
- struct drm_v3d_submit_cl *args,
336
- struct v3d_exec_info *exec)
287
+v3d_lookup_bos(struct drm_device *dev,
288
+ struct drm_file *file_priv,
289
+ struct v3d_job *job,
290
+ u64 bo_handles,
291
+ u32 bo_count)
337292 {
338293 u32 *handles;
339294 int ret = 0;
340295 int i;
341296
342
- exec->bo_count = args->bo_handle_count;
297
+ job->bo_count = bo_count;
343298
344
- if (!exec->bo_count) {
299
+ if (!job->bo_count) {
345300 /* See comment on bo_index for why we have to check
346301 * this.
347302 */
....@@ -349,15 +304,15 @@
349304 return -EINVAL;
350305 }
351306
352
- exec->bo = kvmalloc_array(exec->bo_count,
353
- sizeof(struct drm_gem_cma_object *),
354
- GFP_KERNEL | __GFP_ZERO);
355
- if (!exec->bo) {
307
+ job->bo = kvmalloc_array(job->bo_count,
308
+ sizeof(struct drm_gem_cma_object *),
309
+ GFP_KERNEL | __GFP_ZERO);
310
+ if (!job->bo) {
356311 DRM_DEBUG("Failed to allocate validated BO pointers\n");
357312 return -ENOMEM;
358313 }
359314
360
- handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL);
315
+ handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
361316 if (!handles) {
362317 ret = -ENOMEM;
363318 DRM_DEBUG("Failed to allocate incoming GEM handles\n");
....@@ -365,15 +320,15 @@
365320 }
366321
367322 if (copy_from_user(handles,
368
- (void __user *)(uintptr_t)args->bo_handles,
369
- exec->bo_count * sizeof(u32))) {
323
+ (void __user *)(uintptr_t)bo_handles,
324
+ job->bo_count * sizeof(u32))) {
370325 ret = -EFAULT;
371326 DRM_DEBUG("Failed to copy in GEM handles\n");
372327 goto fail;
373328 }
374329
375330 spin_lock(&file_priv->table_lock);
376
- for (i = 0; i < exec->bo_count; i++) {
331
+ for (i = 0; i < job->bo_count; i++) {
377332 struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
378333 handles[i]);
379334 if (!bo) {
....@@ -384,7 +339,7 @@
384339 goto fail;
385340 }
386341 drm_gem_object_get(bo);
387
- exec->bo[i] = to_v3d_bo(bo);
342
+ job->bo[i] = bo;
388343 }
389344 spin_unlock(&file_priv->table_lock);
390345
....@@ -394,39 +349,50 @@
394349 }
395350
396351 static void
397
-v3d_exec_cleanup(struct kref *ref)
352
+v3d_job_free(struct kref *ref)
398353 {
399
- struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info,
400
- refcount);
401
- struct v3d_dev *v3d = exec->v3d;
402
- unsigned int i;
403
- struct v3d_bo *bo, *save;
354
+ struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
355
+ unsigned long index;
356
+ struct dma_fence *fence;
357
+ int i;
404358
405
- dma_fence_put(exec->bin.in_fence);
406
- dma_fence_put(exec->render.in_fence);
407
-
408
- dma_fence_put(exec->bin.done_fence);
409
- dma_fence_put(exec->render.done_fence);
410
-
411
- dma_fence_put(exec->bin_done_fence);
412
-
413
- for (i = 0; i < exec->bo_count; i++)
414
- drm_gem_object_put_unlocked(&exec->bo[i]->base);
415
- kvfree(exec->bo);
416
-
417
- list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
418
- drm_gem_object_put_unlocked(&bo->base);
359
+ for (i = 0; i < job->bo_count; i++) {
360
+ if (job->bo[i])
361
+ drm_gem_object_put(job->bo[i]);
419362 }
363
+ kvfree(job->bo);
420364
421
- pm_runtime_mark_last_busy(v3d->dev);
422
- pm_runtime_put_autosuspend(v3d->dev);
365
+ xa_for_each(&job->deps, index, fence) {
366
+ dma_fence_put(fence);
367
+ }
368
+ xa_destroy(&job->deps);
423369
424
- kfree(exec);
370
+ dma_fence_put(job->irq_fence);
371
+ dma_fence_put(job->done_fence);
372
+
373
+ pm_runtime_mark_last_busy(job->v3d->drm.dev);
374
+ pm_runtime_put_autosuspend(job->v3d->drm.dev);
375
+
376
+ kfree(job);
425377 }
426378
427
-void v3d_exec_put(struct v3d_exec_info *exec)
379
+static void
380
+v3d_render_job_free(struct kref *ref)
428381 {
429
- kref_put(&exec->refcount, v3d_exec_cleanup);
382
+ struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
383
+ base.refcount);
384
+ struct v3d_bo *bo, *save;
385
+
386
+ list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
387
+ drm_gem_object_put(&bo->base.base);
388
+ }
389
+
390
+ v3d_job_free(ref);
391
+}
392
+
393
+void v3d_job_put(struct v3d_job *job)
394
+{
395
+ kref_put(&job->refcount, job->free);
430396 }
431397
432398 int
....@@ -435,8 +401,6 @@
435401 {
436402 int ret;
437403 struct drm_v3d_wait_bo *args = data;
438
- struct drm_gem_object *gem_obj;
439
- struct v3d_bo *bo;
440404 ktime_t start = ktime_get();
441405 u64 delta_ns;
442406 unsigned long timeout_jiffies =
....@@ -445,21 +409,8 @@
445409 if (args->pad != 0)
446410 return -EINVAL;
447411
448
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
449
- if (!gem_obj) {
450
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
451
- return -EINVAL;
452
- }
453
- bo = to_v3d_bo(gem_obj);
454
-
455
- ret = reservation_object_wait_timeout_rcu(bo->resv,
456
- true, true,
457
- timeout_jiffies);
458
-
459
- if (ret == 0)
460
- ret = -ETIME;
461
- else if (ret > 0)
462
- ret = 0;
412
+ ret = drm_gem_dma_resv_wait(file_priv, args->handle,
413
+ true, timeout_jiffies);
463414
464415 /* Decrement the user's timeout, in case we got interrupted
465416 * such that the ioctl will be restarted.
....@@ -474,9 +425,88 @@
474425 if (ret == -ETIME && args->timeout_ns)
475426 ret = -EAGAIN;
476427
477
- drm_gem_object_put_unlocked(gem_obj);
478
-
479428 return ret;
429
+}
430
+
431
+static int
432
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
433
+ struct v3d_job *job, void (*free)(struct kref *ref),
434
+ u32 in_sync)
435
+{
436
+ struct dma_fence *in_fence = NULL;
437
+ int ret;
438
+
439
+ job->v3d = v3d;
440
+ job->free = free;
441
+
442
+ ret = pm_runtime_get_sync(v3d->drm.dev);
443
+ if (ret < 0)
444
+ return ret;
445
+
446
+ xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
447
+
448
+ ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
449
+ if (ret == -EINVAL)
450
+ goto fail;
451
+
452
+ ret = drm_gem_fence_array_add(&job->deps, in_fence);
453
+ if (ret)
454
+ goto fail;
455
+
456
+ kref_init(&job->refcount);
457
+
458
+ return 0;
459
+fail:
460
+ xa_destroy(&job->deps);
461
+ pm_runtime_put_autosuspend(v3d->drm.dev);
462
+ return ret;
463
+}
464
+
465
+static int
466
+v3d_push_job(struct v3d_file_priv *v3d_priv,
467
+ struct v3d_job *job, enum v3d_queue queue)
468
+{
469
+ int ret;
470
+
471
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
472
+ v3d_priv);
473
+ if (ret)
474
+ return ret;
475
+
476
+ job->done_fence = dma_fence_get(&job->base.s_fence->finished);
477
+
478
+ /* put by scheduler job completion */
479
+ kref_get(&job->refcount);
480
+
481
+ drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
482
+
483
+ return 0;
484
+}
485
+
486
+static void
487
+v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
488
+ struct v3d_job *job,
489
+ struct ww_acquire_ctx *acquire_ctx,
490
+ u32 out_sync,
491
+ struct dma_fence *done_fence)
492
+{
493
+ struct drm_syncobj *sync_out;
494
+ int i;
495
+
496
+ for (i = 0; i < job->bo_count; i++) {
497
+ /* XXX: Use shared fences for read-only objects. */
498
+ dma_resv_add_excl_fence(job->bo[i]->resv,
499
+ job->done_fence);
500
+ }
501
+
502
+ drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
503
+
504
+ /* Update the return sync object for the job */
505
+ sync_out = drm_syncobj_find(file_priv, out_sync);
506
+ if (sync_out) {
507
+ drm_syncobj_replace_fence(sync_out, done_fence);
508
+ drm_syncobj_put(sync_out);
509
+ }
480510 }
481511
482512 /**
....@@ -498,106 +528,336 @@
498528 struct v3d_dev *v3d = to_v3d_dev(dev);
499529 struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
500530 struct drm_v3d_submit_cl *args = data;
501
- struct v3d_exec_info *exec;
531
+ struct v3d_bin_job *bin = NULL;
532
+ struct v3d_render_job *render;
533
+ struct v3d_job *clean_job = NULL;
534
+ struct v3d_job *last_job;
502535 struct ww_acquire_ctx acquire_ctx;
503
- struct drm_syncobj *sync_out;
504536 int ret = 0;
505537
506
- if (args->pad != 0) {
507
- DRM_INFO("pad must be zero: %d\n", args->pad);
538
+ trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
539
+
540
+ if (args->flags != 0 &&
541
+ args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
542
+ DRM_INFO("invalid flags: %d\n", args->flags);
508543 return -EINVAL;
509544 }
510545
511
- exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
512
- if (!exec)
546
+ render = kcalloc(1, sizeof(*render), GFP_KERNEL);
547
+ if (!render)
513548 return -ENOMEM;
514549
515
- ret = pm_runtime_get_sync(v3d->dev);
516
- if (ret < 0) {
517
- kfree(exec);
550
+ render->start = args->rcl_start;
551
+ render->end = args->rcl_end;
552
+ INIT_LIST_HEAD(&render->unref_list);
553
+
554
+ ret = v3d_job_init(v3d, file_priv, &render->base,
555
+ v3d_render_job_free, args->in_sync_rcl);
556
+ if (ret) {
557
+ kfree(render);
518558 return ret;
519559 }
520560
521
- kref_init(&exec->refcount);
561
+ if (args->bcl_start != args->bcl_end) {
562
+ bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
563
+ if (!bin) {
564
+ v3d_job_put(&render->base);
565
+ return -ENOMEM;
566
+ }
522567
523
- ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
524
- &exec->bin.in_fence);
525
- if (ret == -EINVAL)
526
- goto fail;
568
+ ret = v3d_job_init(v3d, file_priv, &bin->base,
569
+ v3d_job_free, args->in_sync_bcl);
570
+ if (ret) {
571
+ v3d_job_put(&render->base);
572
+ kfree(bin);
573
+ return ret;
574
+ }
527575
528
- ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
529
- &exec->render.in_fence);
530
- if (ret == -EINVAL)
531
- goto fail;
576
+ bin->start = args->bcl_start;
577
+ bin->end = args->bcl_end;
578
+ bin->qma = args->qma;
579
+ bin->qms = args->qms;
580
+ bin->qts = args->qts;
581
+ bin->render = render;
582
+ }
532583
533
- exec->qma = args->qma;
534
- exec->qms = args->qms;
535
- exec->qts = args->qts;
536
- exec->bin.exec = exec;
537
- exec->bin.start = args->bcl_start;
538
- exec->bin.end = args->bcl_end;
539
- exec->render.exec = exec;
540
- exec->render.start = args->rcl_start;
541
- exec->render.end = args->rcl_end;
542
- exec->v3d = v3d;
543
- INIT_LIST_HEAD(&exec->unref_list);
584
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
585
+ clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
586
+ if (!clean_job) {
587
+ ret = -ENOMEM;
588
+ goto fail;
589
+ }
544590
545
- ret = v3d_cl_lookup_bos(dev, file_priv, args, exec);
591
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
592
+ if (ret) {
593
+ kfree(clean_job);
594
+ clean_job = NULL;
595
+ goto fail;
596
+ }
597
+
598
+ last_job = clean_job;
599
+ } else {
600
+ last_job = &render->base;
601
+ }
602
+
603
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
604
+ args->bo_handles, args->bo_handle_count);
546605 if (ret)
547606 goto fail;
548607
549
- ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
608
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
550609 if (ret)
551610 goto fail;
552611
553612 mutex_lock(&v3d->sched_lock);
554
- if (exec->bin.start != exec->bin.end) {
555
- ret = drm_sched_job_init(&exec->bin.base,
556
- &v3d_priv->sched_entity[V3D_BIN],
557
- v3d_priv);
613
+ if (bin) {
614
+ ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
558615 if (ret)
559616 goto fail_unreserve;
560617
561
- exec->bin_done_fence =
562
- dma_fence_get(&exec->bin.base.s_fence->finished);
563
-
564
- kref_get(&exec->refcount); /* put by scheduler job completion */
565
- drm_sched_entity_push_job(&exec->bin.base,
566
- &v3d_priv->sched_entity[V3D_BIN]);
618
+ ret = drm_gem_fence_array_add(&render->base.deps,
619
+ dma_fence_get(bin->base.done_fence));
620
+ if (ret)
621
+ goto fail_unreserve;
567622 }
568623
569
- ret = drm_sched_job_init(&exec->render.base,
570
- &v3d_priv->sched_entity[V3D_RENDER],
571
- v3d_priv);
624
+ ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
572625 if (ret)
573626 goto fail_unreserve;
574627
575
- kref_get(&exec->refcount); /* put by scheduler job completion */
576
- drm_sched_entity_push_job(&exec->render.base,
577
- &v3d_priv->sched_entity[V3D_RENDER]);
578
- mutex_unlock(&v3d->sched_lock);
579
-
580
- v3d_attach_object_fences(exec);
581
-
582
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
583
-
584
- /* Update the return sync object for the */
585
- sync_out = drm_syncobj_find(file_priv, args->out_sync);
586
- if (sync_out) {
587
- drm_syncobj_replace_fence(sync_out,
588
- &exec->render.base.s_fence->finished);
589
- drm_syncobj_put(sync_out);
628
+ if (clean_job) {
629
+ struct dma_fence *render_fence =
630
+ dma_fence_get(render->base.done_fence);
631
+ ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
632
+ if (ret)
633
+ goto fail_unreserve;
634
+ ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
635
+ if (ret)
636
+ goto fail_unreserve;
590637 }
591638
592
- v3d_exec_put(exec);
639
+ mutex_unlock(&v3d->sched_lock);
640
+
641
+ v3d_attach_fences_and_unlock_reservation(file_priv,
642
+ last_job,
643
+ &acquire_ctx,
644
+ args->out_sync,
645
+ last_job->done_fence);
646
+
647
+ if (bin)
648
+ v3d_job_put(&bin->base);
649
+ v3d_job_put(&render->base);
650
+ if (clean_job)
651
+ v3d_job_put(clean_job);
593652
594653 return 0;
595654
596655 fail_unreserve:
597656 mutex_unlock(&v3d->sched_lock);
598
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
657
+ drm_gem_unlock_reservations(last_job->bo,
658
+ last_job->bo_count, &acquire_ctx);
599659 fail:
600
- v3d_exec_put(exec);
660
+ if (bin)
661
+ v3d_job_put(&bin->base);
662
+ v3d_job_put(&render->base);
663
+ if (clean_job)
664
+ v3d_job_put(clean_job);
665
+
666
+ return ret;
667
+}
668
+
669
+/**
670
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
671
+ * @dev: DRM device
672
+ * @data: ioctl argument
673
+ * @file_priv: DRM file for this fd
674
+ *
675
+ * Userspace provides the register setup for the TFU, which we don't
676
+ * need to validate since the TFU is behind the MMU.
677
+ */
678
+int
679
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
680
+ struct drm_file *file_priv)
681
+{
682
+ struct v3d_dev *v3d = to_v3d_dev(dev);
683
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
684
+ struct drm_v3d_submit_tfu *args = data;
685
+ struct v3d_tfu_job *job;
686
+ struct ww_acquire_ctx acquire_ctx;
687
+ int ret = 0;
688
+
689
+ trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
690
+
691
+ job = kcalloc(1, sizeof(*job), GFP_KERNEL);
692
+ if (!job)
693
+ return -ENOMEM;
694
+
695
+ ret = v3d_job_init(v3d, file_priv, &job->base,
696
+ v3d_job_free, args->in_sync);
697
+ if (ret) {
698
+ kfree(job);
699
+ return ret;
700
+ }
701
+
702
+ job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
703
+ sizeof(*job->base.bo), GFP_KERNEL);
704
+ if (!job->base.bo) {
705
+ v3d_job_put(&job->base);
706
+ return -ENOMEM;
707
+ }
708
+
709
+ job->args = *args;
710
+
711
+ spin_lock(&file_priv->table_lock);
712
+ for (job->base.bo_count = 0;
713
+ job->base.bo_count < ARRAY_SIZE(args->bo_handles);
714
+ job->base.bo_count++) {
715
+ struct drm_gem_object *bo;
716
+
717
+ if (!args->bo_handles[job->base.bo_count])
718
+ break;
719
+
720
+ bo = idr_find(&file_priv->object_idr,
721
+ args->bo_handles[job->base.bo_count]);
722
+ if (!bo) {
723
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
724
+ job->base.bo_count,
725
+ args->bo_handles[job->base.bo_count]);
726
+ ret = -ENOENT;
727
+ spin_unlock(&file_priv->table_lock);
728
+ goto fail;
729
+ }
730
+ drm_gem_object_get(bo);
731
+ job->base.bo[job->base.bo_count] = bo;
732
+ }
733
+ spin_unlock(&file_priv->table_lock);
734
+
735
+ ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
736
+ if (ret)
737
+ goto fail;
738
+
739
+ mutex_lock(&v3d->sched_lock);
740
+ ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
741
+ if (ret)
742
+ goto fail_unreserve;
743
+ mutex_unlock(&v3d->sched_lock);
744
+
745
+ v3d_attach_fences_and_unlock_reservation(file_priv,
746
+ &job->base, &acquire_ctx,
747
+ args->out_sync,
748
+ job->base.done_fence);
749
+
750
+ v3d_job_put(&job->base);
751
+
752
+ return 0;
753
+
754
+fail_unreserve:
755
+ mutex_unlock(&v3d->sched_lock);
756
+ drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
757
+ &acquire_ctx);
758
+fail:
759
+ v3d_job_put(&job->base);
760
+
761
+ return ret;
762
+}
763
+
764
+/**
765
+ * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
766
+ * @dev: DRM device
767
+ * @data: ioctl argument
768
+ * @file_priv: DRM file for this fd
769
+ *
770
+ * Userspace provides the register setup for the CSD, which we don't
771
+ * need to validate since the CSD is behind the MMU.
772
+ */
773
+int
774
+v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
775
+ struct drm_file *file_priv)
776
+{
777
+ struct v3d_dev *v3d = to_v3d_dev(dev);
778
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
779
+ struct drm_v3d_submit_csd *args = data;
780
+ struct v3d_csd_job *job;
781
+ struct v3d_job *clean_job;
782
+ struct ww_acquire_ctx acquire_ctx;
783
+ int ret;
784
+
785
+ trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
786
+
787
+ if (!v3d_has_csd(v3d)) {
788
+ DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
789
+ return -EINVAL;
790
+ }
791
+
792
+ job = kcalloc(1, sizeof(*job), GFP_KERNEL);
793
+ if (!job)
794
+ return -ENOMEM;
795
+
796
+ ret = v3d_job_init(v3d, file_priv, &job->base,
797
+ v3d_job_free, args->in_sync);
798
+ if (ret) {
799
+ kfree(job);
800
+ return ret;
801
+ }
802
+
803
+ clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
804
+ if (!clean_job) {
805
+ v3d_job_put(&job->base);
806
+ kfree(job);
807
+ return -ENOMEM;
808
+ }
809
+
810
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
811
+ if (ret) {
812
+ v3d_job_put(&job->base);
813
+ kfree(clean_job);
814
+ return ret;
815
+ }
816
+
817
+ job->args = *args;
818
+
819
+ ret = v3d_lookup_bos(dev, file_priv, clean_job,
820
+ args->bo_handles, args->bo_handle_count);
821
+ if (ret)
822
+ goto fail;
823
+
824
+ ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
825
+ if (ret)
826
+ goto fail;
827
+
828
+ mutex_lock(&v3d->sched_lock);
829
+ ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
830
+ if (ret)
831
+ goto fail_unreserve;
832
+
833
+ ret = drm_gem_fence_array_add(&clean_job->deps,
834
+ dma_fence_get(job->base.done_fence));
835
+ if (ret)
836
+ goto fail_unreserve;
837
+
838
+ ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
839
+ if (ret)
840
+ goto fail_unreserve;
841
+ mutex_unlock(&v3d->sched_lock);
842
+
843
+ v3d_attach_fences_and_unlock_reservation(file_priv,
844
+ clean_job,
845
+ &acquire_ctx,
846
+ args->out_sync,
847
+ clean_job->done_fence);
848
+
849
+ v3d_job_put(&job->base);
850
+ v3d_job_put(clean_job);
851
+
852
+ return 0;
853
+
854
+fail_unreserve:
855
+ mutex_unlock(&v3d->sched_lock);
856
+ drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
857
+ &acquire_ctx);
858
+fail:
859
+ v3d_job_put(&job->base);
860
+ v3d_job_put(clean_job);
601861
602862 return ret;
603863 }
....@@ -617,6 +877,7 @@
617877 mutex_init(&v3d->bo_lock);
618878 mutex_init(&v3d->reset_lock);
619879 mutex_init(&v3d->sched_lock);
880
+ mutex_init(&v3d->cache_clean_lock);
620881
621882 /* Note: We don't allocate address 0. Various bits of HW
622883 * treat 0 as special, such as the occlusion query counters
....@@ -624,12 +885,12 @@
624885 */
625886 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
626887
627
- v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
888
+ v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
628889 &v3d->pt_paddr,
629890 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
630891 if (!v3d->pt) {
631892 drm_mm_takedown(&v3d->mm);
632
- dev_err(v3d->dev,
893
+ dev_err(v3d->drm.dev,
633894 "Failed to allocate page tables. "
634895 "Please ensure you have CMA enabled.\n");
635896 return -ENOMEM;
....@@ -641,7 +902,7 @@
641902 ret = v3d_sched_init(v3d);
642903 if (ret) {
643904 drm_mm_takedown(&v3d->mm);
644
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
905
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
645906 v3d->pt_paddr);
646907 }
647908
....@@ -655,7 +916,7 @@
655916
656917 v3d_sched_fini(v3d);
657918
658
- /* Waiting for exec to finish would need to be done before
919
+ /* Waiting for jobs to finish would need to be done before
659920 * unregistering V3D.
660921 */
661922 WARN_ON(v3d->bin_job);
....@@ -663,5 +924,6 @@
663924
664925 drm_mm_takedown(&v3d->mm);
665926
666
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
927
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
928
+ v3d->pt_paddr);
667929 }