| .. | .. |
|---|
| 29 | 29 | #include <linux/sched/signal.h> |
|---|
| 30 | 30 | #include <linux/dma-fence-array.h> |
|---|
| 31 | 31 | |
|---|
| 32 | +#include <drm/drm_syncobj.h> |
|---|
| 33 | + |
|---|
| 32 | 34 | #include "uapi/drm/vc4_drm.h" |
|---|
| 33 | 35 | #include "vc4_drv.h" |
|---|
| 34 | 36 | #include "vc4_regs.h" |
|---|
| .. | .. |
|---|
| 56 | 58 | unsigned int i; |
|---|
| 57 | 59 | |
|---|
| 58 | 60 | for (i = 0; i < state->user_state.bo_count; i++) |
|---|
| 59 | | - drm_gem_object_put_unlocked(state->bo[i]); |
|---|
| 61 | + drm_gem_object_put(state->bo[i]); |
|---|
| 60 | 62 | |
|---|
| 61 | 63 | kfree(state); |
|---|
| 62 | 64 | } |
|---|
| .. | .. |
|---|
| 73 | 75 | unsigned long irqflags; |
|---|
| 74 | 76 | u32 i; |
|---|
| 75 | 77 | int ret = 0; |
|---|
| 78 | + |
|---|
| 79 | + if (!vc4->v3d) { |
|---|
| 80 | + DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n"); |
|---|
| 81 | + return -ENODEV; |
|---|
| 82 | + } |
|---|
| 76 | 83 | |
|---|
| 77 | 84 | spin_lock_irqsave(&vc4->job_lock, irqflags); |
|---|
| 78 | 85 | kernel_state = vc4->hang_state; |
|---|
| .. | .. |
|---|
| 307 | 314 | struct vc4_dev *vc4 = |
|---|
| 308 | 315 | container_of(work, struct vc4_dev, hangcheck.reset_work); |
|---|
| 309 | 316 | |
|---|
| 310 | | - vc4_save_hang_state(vc4->dev); |
|---|
| 317 | + vc4_save_hang_state(&vc4->base); |
|---|
| 311 | 318 | |
|---|
| 312 | | - vc4_reset(vc4->dev); |
|---|
| 319 | + vc4_reset(&vc4->base); |
|---|
| 313 | 320 | } |
|---|
| 314 | 321 | |
|---|
| 315 | 322 | static void |
|---|
| 316 | 323 | vc4_hangcheck_elapsed(struct timer_list *t) |
|---|
| 317 | 324 | { |
|---|
| 318 | 325 | struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer); |
|---|
| 319 | | - struct drm_device *dev = vc4->dev; |
|---|
| 326 | + struct drm_device *dev = &vc4->base; |
|---|
| 320 | 327 | uint32_t ct0ca, ct1ca; |
|---|
| 321 | 328 | unsigned long irqflags; |
|---|
| 322 | 329 | struct vc4_exec_info *bin_exec, *render_exec; |
|---|
| .. | .. |
|---|
| 536 | 543 | bo = to_vc4_bo(&exec->bo[i]->base); |
|---|
| 537 | 544 | bo->seqno = seqno; |
|---|
| 538 | 545 | |
|---|
| 539 | | - reservation_object_add_shared_fence(bo->resv, exec->fence); |
|---|
| 546 | + dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); |
|---|
| 540 | 547 | } |
|---|
| 541 | 548 | |
|---|
| 542 | 549 | list_for_each_entry(bo, &exec->unref_list, unref_head) { |
|---|
| .. | .. |
|---|
| 547 | 554 | bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); |
|---|
| 548 | 555 | bo->write_seqno = seqno; |
|---|
| 549 | 556 | |
|---|
| 550 | | - reservation_object_add_excl_fence(bo->resv, exec->fence); |
|---|
| 557 | + dma_resv_add_excl_fence(bo->base.base.resv, exec->fence); |
|---|
| 551 | 558 | } |
|---|
| 552 | 559 | } |
|---|
| 553 | 560 | |
|---|
| .. | .. |
|---|
| 559 | 566 | int i; |
|---|
| 560 | 567 | |
|---|
| 561 | 568 | for (i = 0; i < exec->bo_count; i++) { |
|---|
| 562 | | - struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); |
|---|
| 569 | + struct drm_gem_object *bo = &exec->bo[i]->base; |
|---|
| 563 | 570 | |
|---|
| 564 | | - ww_mutex_unlock(&bo->resv->lock); |
|---|
| 571 | + dma_resv_unlock(bo->resv); |
|---|
| 565 | 572 | } |
|---|
| 566 | 573 | |
|---|
| 567 | 574 | ww_acquire_fini(acquire_ctx); |
|---|
| .. | .. |
|---|
| 581 | 588 | { |
|---|
| 582 | 589 | int contended_lock = -1; |
|---|
| 583 | 590 | int i, ret; |
|---|
| 584 | | - struct vc4_bo *bo; |
|---|
| 591 | + struct drm_gem_object *bo; |
|---|
| 585 | 592 | |
|---|
| 586 | 593 | ww_acquire_init(acquire_ctx, &reservation_ww_class); |
|---|
| 587 | 594 | |
|---|
| 588 | 595 | retry: |
|---|
| 589 | 596 | if (contended_lock != -1) { |
|---|
| 590 | | - bo = to_vc4_bo(&exec->bo[contended_lock]->base); |
|---|
| 591 | | - ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
|---|
| 592 | | - acquire_ctx); |
|---|
| 597 | + bo = &exec->bo[contended_lock]->base; |
|---|
| 598 | + ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx); |
|---|
| 593 | 599 | if (ret) { |
|---|
| 594 | 600 | ww_acquire_done(acquire_ctx); |
|---|
| 595 | 601 | return ret; |
|---|
| .. | .. |
|---|
| 600 | 606 | if (i == contended_lock) |
|---|
| 601 | 607 | continue; |
|---|
| 602 | 608 | |
|---|
| 603 | | - bo = to_vc4_bo(&exec->bo[i]->base); |
|---|
| 609 | + bo = &exec->bo[i]->base; |
|---|
| 604 | 610 | |
|---|
| 605 | | - ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); |
|---|
| 611 | + ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx); |
|---|
| 606 | 612 | if (ret) { |
|---|
| 607 | 613 | int j; |
|---|
| 608 | 614 | |
|---|
| 609 | 615 | for (j = 0; j < i; j++) { |
|---|
| 610 | | - bo = to_vc4_bo(&exec->bo[j]->base); |
|---|
| 611 | | - ww_mutex_unlock(&bo->resv->lock); |
|---|
| 616 | + bo = &exec->bo[j]->base; |
|---|
| 617 | + dma_resv_unlock(bo->resv); |
|---|
| 612 | 618 | } |
|---|
| 613 | 619 | |
|---|
| 614 | 620 | if (contended_lock != -1 && contended_lock >= i) { |
|---|
| 615 | | - bo = to_vc4_bo(&exec->bo[contended_lock]->base); |
|---|
| 621 | + bo = &exec->bo[contended_lock]->base; |
|---|
| 616 | 622 | |
|---|
| 617 | | - ww_mutex_unlock(&bo->resv->lock); |
|---|
| 623 | + dma_resv_unlock(bo->resv); |
|---|
| 618 | 624 | } |
|---|
| 619 | 625 | |
|---|
| 620 | 626 | if (ret == -EDEADLK) { |
|---|
| .. | .. |
|---|
| 633 | 639 | * before we commit the CL to the hardware. |
|---|
| 634 | 640 | */ |
|---|
| 635 | 641 | for (i = 0; i < exec->bo_count; i++) { |
|---|
| 636 | | - bo = to_vc4_bo(&exec->bo[i]->base); |
|---|
| 642 | + bo = &exec->bo[i]->base; |
|---|
| 637 | 643 | |
|---|
| 638 | | - ret = reservation_object_reserve_shared(bo->resv); |
|---|
| 644 | + ret = dma_resv_reserve_shared(bo->resv, 1); |
|---|
| 639 | 645 | if (ret) { |
|---|
| 640 | 646 | vc4_unlock_bo_reservations(dev, exec, acquire_ctx); |
|---|
| 641 | 647 | return ret; |
|---|
| .. | .. |
|---|
| 802 | 808 | fail_put_bo: |
|---|
| 803 | 809 | /* Release any reference to acquired objects. */ |
|---|
| 804 | 810 | for (i = 0; i < exec->bo_count && exec->bo[i]; i++) |
|---|
| 805 | | - drm_gem_object_put_unlocked(&exec->bo[i]->base); |
|---|
| 811 | + drm_gem_object_put(&exec->bo[i]->base); |
|---|
| 806 | 812 | |
|---|
| 807 | 813 | fail: |
|---|
| 808 | 814 | kvfree(handles); |
|---|
| .. | .. |
|---|
| 815 | 821 | vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) |
|---|
| 816 | 822 | { |
|---|
| 817 | 823 | struct drm_vc4_submit_cl *args = exec->args; |
|---|
| 824 | + struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 818 | 825 | void *temp = NULL; |
|---|
| 819 | 826 | void *bin; |
|---|
| 820 | 827 | int ret = 0; |
|---|
| .. | .. |
|---|
| 913 | 920 | if (ret) |
|---|
| 914 | 921 | goto fail; |
|---|
| 915 | 922 | |
|---|
| 923 | + if (exec->found_tile_binning_mode_config_packet) { |
|---|
| 924 | + ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used); |
|---|
| 925 | + if (ret) |
|---|
| 926 | + goto fail; |
|---|
| 927 | + } |
|---|
| 928 | + |
|---|
| 916 | 929 | /* Block waiting on any previous rendering into the CS's VBO, |
|---|
| 917 | 930 | * IB, or textures, so that pixels are actually written by the |
|---|
| 918 | 931 | * time we try to read them. |
|---|
| .. | .. |
|---|
| 944 | 957 | struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base); |
|---|
| 945 | 958 | |
|---|
| 946 | 959 | vc4_bo_dec_usecnt(bo); |
|---|
| 947 | | - drm_gem_object_put_unlocked(&exec->bo[i]->base); |
|---|
| 960 | + drm_gem_object_put(&exec->bo[i]->base); |
|---|
| 948 | 961 | } |
|---|
| 949 | 962 | kvfree(exec->bo); |
|---|
| 950 | 963 | } |
|---|
| .. | .. |
|---|
| 953 | 966 | struct vc4_bo *bo = list_first_entry(&exec->unref_list, |
|---|
| 954 | 967 | struct vc4_bo, unref_head); |
|---|
| 955 | 968 | list_del(&bo->unref_head); |
|---|
| 956 | | - drm_gem_object_put_unlocked(&bo->base.base); |
|---|
| 969 | + drm_gem_object_put(&bo->base.base); |
|---|
| 957 | 970 | } |
|---|
| 958 | 971 | |
|---|
| 959 | 972 | /* Free up the allocation of any bin slots we used. */ |
|---|
| .. | .. |
|---|
| 961 | 974 | vc4->bin_alloc_used &= ~exec->bin_slots; |
|---|
| 962 | 975 | spin_unlock_irqrestore(&vc4->job_lock, irqflags); |
|---|
| 963 | 976 | |
|---|
| 977 | + /* Release the reference on the binner BO if needed. */ |
|---|
| 978 | + if (exec->bin_bo_used) |
|---|
| 979 | + vc4_v3d_bin_bo_put(vc4); |
|---|
| 980 | + |
|---|
| 964 | 981 | /* Release the reference we had on the perf monitor. */ |
|---|
| 965 | 982 | vc4_perfmon_put(exec->perfmon); |
|---|
| 966 | 983 | |
|---|
| 967 | | - mutex_lock(&vc4->power_lock); |
|---|
| 968 | | - if (--vc4->power_refcount == 0) { |
|---|
| 969 | | - pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
|---|
| 970 | | - pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
|---|
| 971 | | - } |
|---|
| 972 | | - mutex_unlock(&vc4->power_lock); |
|---|
| 984 | + vc4_v3d_pm_put(vc4); |
|---|
| 973 | 985 | |
|---|
| 974 | 986 | kfree(exec); |
|---|
| 975 | 987 | } |
|---|
| .. | .. |
|---|
| 988 | 1000 | list_del(&exec->head); |
|---|
| 989 | 1001 | |
|---|
| 990 | 1002 | spin_unlock_irqrestore(&vc4->job_lock, irqflags); |
|---|
| 991 | | - vc4_complete_exec(vc4->dev, exec); |
|---|
| 1003 | + vc4_complete_exec(&vc4->base, exec); |
|---|
| 992 | 1004 | spin_lock_irqsave(&vc4->job_lock, irqflags); |
|---|
| 993 | 1005 | } |
|---|
| 994 | 1006 | |
|---|
| .. | .. |
|---|
| 1095 | 1107 | ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, |
|---|
| 1096 | 1108 | &args->timeout_ns); |
|---|
| 1097 | 1109 | |
|---|
| 1098 | | - drm_gem_object_put_unlocked(gem_obj); |
|---|
| 1110 | + drm_gem_object_put(gem_obj); |
|---|
| 1099 | 1111 | return ret; |
|---|
| 1100 | 1112 | } |
|---|
| 1101 | 1113 | |
|---|
| .. | .. |
|---|
| 1124 | 1136 | struct dma_fence *in_fence; |
|---|
| 1125 | 1137 | int ret = 0; |
|---|
| 1126 | 1138 | |
|---|
| 1139 | + if (!vc4->v3d) { |
|---|
| 1140 | + DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n"); |
|---|
| 1141 | + return -ENODEV; |
|---|
| 1142 | + } |
|---|
| 1143 | + |
|---|
| 1127 | 1144 | if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR | |
|---|
| 1128 | 1145 | VC4_SUBMIT_CL_FIXED_RCL_ORDER | |
|---|
| 1129 | 1146 | VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X | |
|---|
| .. | .. |
|---|
| 1143 | 1160 | return -ENOMEM; |
|---|
| 1144 | 1161 | } |
|---|
| 1145 | 1162 | |
|---|
| 1146 | | - mutex_lock(&vc4->power_lock); |
|---|
| 1147 | | - if (vc4->power_refcount++ == 0) { |
|---|
| 1148 | | - ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
|---|
| 1149 | | - if (ret < 0) { |
|---|
| 1150 | | - mutex_unlock(&vc4->power_lock); |
|---|
| 1151 | | - vc4->power_refcount--; |
|---|
| 1152 | | - kfree(exec); |
|---|
| 1153 | | - return ret; |
|---|
| 1154 | | - } |
|---|
| 1163 | + ret = vc4_v3d_pm_get(vc4); |
|---|
| 1164 | + if (ret) { |
|---|
| 1165 | + kfree(exec); |
|---|
| 1166 | + return ret; |
|---|
| 1155 | 1167 | } |
|---|
| 1156 | | - mutex_unlock(&vc4->power_lock); |
|---|
| 1157 | 1168 | |
|---|
| 1158 | 1169 | exec->args = args; |
|---|
| 1159 | 1170 | INIT_LIST_HEAD(&exec->unref_list); |
|---|
| .. | .. |
|---|
| 1173 | 1184 | |
|---|
| 1174 | 1185 | if (args->in_sync) { |
|---|
| 1175 | 1186 | ret = drm_syncobj_find_fence(file_priv, args->in_sync, |
|---|
| 1176 | | - &in_fence); |
|---|
| 1187 | + 0, 0, &in_fence); |
|---|
| 1177 | 1188 | if (ret) |
|---|
| 1178 | 1189 | goto fail; |
|---|
| 1179 | 1190 | |
|---|
| .. | .. |
|---|
| 1247 | 1258 | return 0; |
|---|
| 1248 | 1259 | |
|---|
| 1249 | 1260 | fail: |
|---|
| 1250 | | - vc4_complete_exec(vc4->dev, exec); |
|---|
| 1261 | + vc4_complete_exec(&vc4->base, exec); |
|---|
| 1251 | 1262 | |
|---|
| 1252 | 1263 | return ret; |
|---|
| 1253 | 1264 | } |
|---|
| 1254 | 1265 | |
|---|
| 1255 | | -void |
|---|
| 1256 | | -vc4_gem_init(struct drm_device *dev) |
|---|
| 1266 | +static void vc4_gem_destroy(struct drm_device *dev, void *unused); |
|---|
| 1267 | +int vc4_gem_init(struct drm_device *dev) |
|---|
| 1257 | 1268 | { |
|---|
| 1258 | 1269 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 1259 | 1270 | |
|---|
| .. | .. |
|---|
| 1274 | 1285 | |
|---|
| 1275 | 1286 | INIT_LIST_HEAD(&vc4->purgeable.list); |
|---|
| 1276 | 1287 | mutex_init(&vc4->purgeable.lock); |
|---|
| 1288 | + |
|---|
| 1289 | + return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL); |
|---|
| 1277 | 1290 | } |
|---|
| 1278 | 1291 | |
|---|
| 1279 | | -void |
|---|
| 1280 | | -vc4_gem_destroy(struct drm_device *dev) |
|---|
| 1292 | +static void vc4_gem_destroy(struct drm_device *dev, void *unused) |
|---|
| 1281 | 1293 | { |
|---|
| 1282 | 1294 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
|---|
| 1283 | 1295 | |
|---|
| .. | .. |
|---|
| 1290 | 1302 | * the overflow allocation registers. Now free the object. |
|---|
| 1291 | 1303 | */ |
|---|
| 1292 | 1304 | if (vc4->bin_bo) { |
|---|
| 1293 | | - drm_gem_object_put_unlocked(&vc4->bin_bo->base.base); |
|---|
| 1305 | + drm_gem_object_put(&vc4->bin_bo->base.base); |
|---|
| 1294 | 1306 | vc4->bin_bo = NULL; |
|---|
| 1295 | 1307 | } |
|---|
| 1296 | 1308 | |
|---|
| .. | .. |
|---|
| 1371 | 1383 | ret = 0; |
|---|
| 1372 | 1384 | |
|---|
| 1373 | 1385 | out_put_gem: |
|---|
| 1374 | | - drm_gem_object_put_unlocked(gem_obj); |
|---|
| 1386 | + drm_gem_object_put(gem_obj); |
|---|
| 1375 | 1387 | |
|---|
| 1376 | 1388 | return ret; |
|---|
| 1377 | 1389 | } |
|---|