.. | .. |
---|
28 | 28 | |
---|
29 | 29 | #include <drm/ttm/ttm_placement.h> |
---|
30 | 30 | |
---|
31 | | -#include <drm/drmP.h> |
---|
32 | 31 | #include "vmwgfx_drv.h" |
---|
33 | | -#include "drm/ttm/ttm_object.h" |
---|
| 32 | +#include "ttm_object.h" |
---|
34 | 33 | |
---|
35 | 34 | |
---|
36 | 35 | /** |
---|
.. | .. |
---|
259 | 258 | ret = ttm_bo_validate(bo, &placement, &ctx); |
---|
260 | 259 | |
---|
261 | 260 | /* For some reason we didn't end up at the start of vram */ |
---|
262 | | - WARN_ON(ret == 0 && bo->offset != 0); |
---|
| 261 | + WARN_ON(ret == 0 && bo->mem.start != 0); |
---|
263 | 262 | if (!ret) |
---|
264 | 263 | vmw_bo_pin_reserved(buf, true); |
---|
265 | 264 | |
---|
.. | .. |
---|
318 | 317 | { |
---|
319 | 318 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
---|
320 | 319 | ptr->gmrId = SVGA_GMR_FRAMEBUFFER; |
---|
321 | | - ptr->offset = bo->offset; |
---|
| 320 | + ptr->offset = bo->mem.start << PAGE_SHIFT; |
---|
322 | 321 | } else { |
---|
323 | 322 | ptr->gmrId = bo->mem.start; |
---|
324 | 323 | ptr->offset = 0; |
---|
.. | .. |
---|
342 | 341 | uint32_t old_mem_type = bo->mem.mem_type; |
---|
343 | 342 | int ret; |
---|
344 | 343 | |
---|
345 | | - lockdep_assert_held(&bo->resv->lock.base); |
---|
| 344 | + dma_resv_assert_held(bo->base.resv); |
---|
346 | 345 | |
---|
347 | 346 | if (pin) { |
---|
348 | 347 | if (vbo->pin_count++ > 0) |
---|
.. | .. |
---|
355 | 354 | |
---|
356 | 355 | pl.fpfn = 0; |
---|
357 | 356 | pl.lpfn = 0; |
---|
358 | | - pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB |
---|
359 | | - | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; |
---|
| 357 | + pl.mem_type = bo->mem.mem_type; |
---|
| 358 | + pl.flags = bo->mem.placement; |
---|
360 | 359 | if (pin) |
---|
361 | 360 | pl.flags |= TTM_PL_FLAG_NO_EVICT; |
---|
| 361 | + else |
---|
| 362 | + pl.flags &= ~TTM_PL_FLAG_NO_EVICT; |
---|
362 | 363 | |
---|
363 | 364 | memset(&placement, 0, sizeof(placement)); |
---|
364 | 365 | placement.num_placement = 1; |
---|
.. | .. |
---|
441 | 442 | struct_size = backend_size + |
---|
442 | 443 | ttm_round_pot(sizeof(struct vmw_buffer_object)); |
---|
443 | 444 | user_struct_size = backend_size + |
---|
444 | | - ttm_round_pot(sizeof(struct vmw_user_buffer_object)); |
---|
| 445 | + ttm_round_pot(sizeof(struct vmw_user_buffer_object)) + |
---|
| 446 | + TTM_OBJ_EXTRA_SIZE; |
---|
445 | 447 | } |
---|
446 | 448 | |
---|
447 | 449 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) |
---|
.. | .. |
---|
462 | 464 | { |
---|
463 | 465 | struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); |
---|
464 | 466 | |
---|
| 467 | + WARN_ON(vmw_bo->dirty); |
---|
| 468 | + WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); |
---|
465 | 469 | vmw_bo_unmap(vmw_bo); |
---|
466 | 470 | kfree(vmw_bo); |
---|
467 | 471 | } |
---|
.. | .. |
---|
475 | 479 | static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) |
---|
476 | 480 | { |
---|
477 | 481 | struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); |
---|
| 482 | + struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; |
---|
478 | 483 | |
---|
479 | | - vmw_bo_unmap(&vmw_user_bo->vbo); |
---|
| 484 | + WARN_ON(vbo->dirty); |
---|
| 485 | + WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); |
---|
| 486 | + vmw_bo_unmap(vbo); |
---|
480 | 487 | ttm_prime_object_kfree(vmw_user_bo, prime); |
---|
481 | 488 | } |
---|
482 | 489 | |
---|
.. | .. |
---|
509 | 516 | |
---|
510 | 517 | acc_size = vmw_bo_acc_size(dev_priv, size, user); |
---|
511 | 518 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
---|
512 | | - |
---|
513 | | - INIT_LIST_HEAD(&vmw_bo->res_list); |
---|
| 519 | + BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); |
---|
| 520 | + vmw_bo->base.priority = 3; |
---|
| 521 | + vmw_bo->res_tree = RB_ROOT; |
---|
514 | 522 | |
---|
515 | 523 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
---|
516 | 524 | ttm_bo_type_device, placement, |
---|
.. | .. |
---|
533 | 541 | { |
---|
534 | 542 | struct vmw_user_buffer_object *vmw_user_bo; |
---|
535 | 543 | struct ttm_base_object *base = *p_base; |
---|
536 | | - struct ttm_buffer_object *bo; |
---|
537 | 544 | |
---|
538 | 545 | *p_base = NULL; |
---|
539 | 546 | |
---|
.. | .. |
---|
542 | 549 | |
---|
543 | 550 | vmw_user_bo = container_of(base, struct vmw_user_buffer_object, |
---|
544 | 551 | prime.base); |
---|
545 | | - bo = &vmw_user_bo->vbo.base; |
---|
546 | | - ttm_bo_unref(&bo); |
---|
| 552 | + ttm_bo_put(&vmw_user_bo->vbo.base); |
---|
547 | 553 | } |
---|
548 | 554 | |
---|
549 | 555 | |
---|
.. | .. |
---|
566 | 572 | |
---|
567 | 573 | switch (ref_type) { |
---|
568 | 574 | case TTM_REF_SYNCCPU_WRITE: |
---|
569 | | - ttm_bo_synccpu_write_release(&user_bo->vbo.base); |
---|
| 575 | + atomic_dec(&user_bo->vbo.cpu_writers); |
---|
570 | 576 | break; |
---|
571 | 577 | default: |
---|
572 | 578 | WARN_ONCE(true, "Undefined buffer object reference release.\n"); |
---|
.. | .. |
---|
596 | 602 | struct ttm_base_object **p_base) |
---|
597 | 603 | { |
---|
598 | 604 | struct vmw_user_buffer_object *user_bo; |
---|
599 | | - struct ttm_buffer_object *tmp; |
---|
600 | 605 | int ret; |
---|
601 | 606 | |
---|
602 | 607 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); |
---|
.. | .. |
---|
613 | 618 | if (unlikely(ret != 0)) |
---|
614 | 619 | return ret; |
---|
615 | 620 | |
---|
616 | | - tmp = ttm_bo_reference(&user_bo->vbo.base); |
---|
| 621 | + ttm_bo_get(&user_bo->vbo.base); |
---|
617 | 622 | ret = ttm_prime_object_init(tfile, |
---|
618 | 623 | size, |
---|
619 | 624 | &user_bo->prime, |
---|
.. | .. |
---|
622 | 627 | &vmw_user_bo_release, |
---|
623 | 628 | &vmw_user_bo_ref_obj_release); |
---|
624 | 629 | if (unlikely(ret != 0)) { |
---|
625 | | - ttm_bo_unref(&tmp); |
---|
| 630 | + ttm_bo_put(&user_bo->vbo.base); |
---|
626 | 631 | goto out_no_base_object; |
---|
627 | 632 | } |
---|
628 | 633 | |
---|
.. | .. |
---|
631 | 636 | *p_base = &user_bo->prime.base; |
---|
632 | 637 | kref_get(&(*p_base)->refcount); |
---|
633 | 638 | } |
---|
634 | | - *handle = user_bo->prime.base.hash.key; |
---|
| 639 | + *handle = user_bo->prime.base.handle; |
---|
635 | 640 | |
---|
636 | 641 | out_no_base_object: |
---|
637 | 642 | return ret; |
---|
.. | .. |
---|
683 | 688 | struct ttm_object_file *tfile, |
---|
684 | 689 | uint32_t flags) |
---|
685 | 690 | { |
---|
| 691 | + bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
---|
686 | 692 | struct ttm_buffer_object *bo = &user_bo->vbo.base; |
---|
687 | 693 | bool existed; |
---|
688 | 694 | int ret; |
---|
689 | 695 | |
---|
690 | 696 | if (flags & drm_vmw_synccpu_allow_cs) { |
---|
691 | | - bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
---|
692 | 697 | long lret; |
---|
693 | 698 | |
---|
694 | | - lret = reservation_object_wait_timeout_rcu |
---|
695 | | - (bo->resv, true, true, |
---|
| 699 | + lret = dma_resv_wait_timeout_rcu |
---|
| 700 | + (bo->base.resv, true, true, |
---|
696 | 701 | nonblock ? 0 : MAX_SCHEDULE_TIMEOUT); |
---|
697 | 702 | if (!lret) |
---|
698 | 703 | return -EBUSY; |
---|
.. | .. |
---|
701 | 706 | return 0; |
---|
702 | 707 | } |
---|
703 | 708 | |
---|
704 | | - ret = ttm_bo_synccpu_write_grab |
---|
705 | | - (bo, !!(flags & drm_vmw_synccpu_dontblock)); |
---|
| 709 | + ret = ttm_bo_reserve(bo, true, nonblock, NULL); |
---|
| 710 | + if (unlikely(ret != 0)) |
---|
| 711 | + return ret; |
---|
| 712 | + |
---|
| 713 | + ret = ttm_bo_wait(bo, true, nonblock); |
---|
| 714 | + if (likely(ret == 0)) |
---|
| 715 | + atomic_inc(&user_bo->vbo.cpu_writers); |
---|
| 716 | + |
---|
| 717 | + ttm_bo_unreserve(bo); |
---|
706 | 718 | if (unlikely(ret != 0)) |
---|
707 | 719 | return ret; |
---|
708 | 720 | |
---|
709 | 721 | ret = ttm_ref_object_add(tfile, &user_bo->prime.base, |
---|
710 | 722 | TTM_REF_SYNCCPU_WRITE, &existed, false); |
---|
711 | 723 | if (ret != 0 || existed) |
---|
712 | | - ttm_bo_synccpu_write_release(&user_bo->vbo.base); |
---|
| 724 | + atomic_dec(&user_bo->vbo.cpu_writers); |
---|
713 | 725 | |
---|
714 | 726 | return ret; |
---|
715 | 727 | } |
---|
.. | .. |
---|
837 | 849 | goto out_no_bo; |
---|
838 | 850 | |
---|
839 | 851 | rep->handle = handle; |
---|
840 | | - rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node); |
---|
| 852 | + rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); |
---|
841 | 853 | rep->cur_gmr_id = handle; |
---|
842 | 854 | rep->cur_gmr_offset = 0; |
---|
843 | 855 | |
---|
.. | .. |
---|
910 | 922 | |
---|
911 | 923 | vmw_user_bo = container_of(base, struct vmw_user_buffer_object, |
---|
912 | 924 | prime.base); |
---|
913 | | - (void)ttm_bo_reference(&vmw_user_bo->vbo.base); |
---|
| 925 | + ttm_bo_get(&vmw_user_bo->vbo.base); |
---|
914 | 926 | if (p_base) |
---|
915 | 927 | *p_base = base; |
---|
916 | 928 | else |
---|
.. | .. |
---|
920 | 932 | return 0; |
---|
921 | 933 | } |
---|
922 | 934 | |
---|
| 935 | +/** |
---|
| 936 | + * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference |
---|
| 937 | + * @tfile: The TTM object file the handle is registered with. |
---|
| 938 | + * @handle: The user buffer object handle. |
---|
| 939 | + * |
---|
| 940 | + * This function looks up a struct vmw_user_bo and returns a pointer to the |
---|
| 941 | + * struct vmw_buffer_object it derives from without refcounting the pointer. |
---|
| 942 | + * The returned pointer is only valid until vmw_user_bo_noref_release() is |
---|
| 943 | + * called, and the object pointed to by the returned pointer may be doomed. |
---|
| 944 | + * Any persistent usage of the object requires a refcount to be taken using |
---|
| 945 | + * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it |
---|
| 946 | + * needs to be paired with vmw_user_bo_noref_release() and no sleeping- |
---|
| 947 | + * or scheduling functions may be called inbetween these function calls. |
---|
| 948 | + * |
---|
| 949 | + * Return: A struct vmw_buffer_object pointer if successful or negative |
---|
| 950 | + * error pointer on failure. |
---|
| 951 | + */ |
---|
| 952 | +struct vmw_buffer_object * |
---|
| 953 | +vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle) |
---|
| 954 | +{ |
---|
| 955 | + struct vmw_user_buffer_object *vmw_user_bo; |
---|
| 956 | + struct ttm_base_object *base; |
---|
| 957 | + |
---|
| 958 | + base = ttm_base_object_noref_lookup(tfile, handle); |
---|
| 959 | + if (!base) { |
---|
| 960 | + DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", |
---|
| 961 | + (unsigned long)handle); |
---|
| 962 | + return ERR_PTR(-ESRCH); |
---|
| 963 | + } |
---|
| 964 | + |
---|
| 965 | + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { |
---|
| 966 | + ttm_base_object_noref_release(); |
---|
| 967 | + DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", |
---|
| 968 | + (unsigned long)handle); |
---|
| 969 | + return ERR_PTR(-EINVAL); |
---|
| 970 | + } |
---|
| 971 | + |
---|
| 972 | + vmw_user_bo = container_of(base, struct vmw_user_buffer_object, |
---|
| 973 | + prime.base); |
---|
| 974 | + return &vmw_user_bo->vbo; |
---|
| 975 | +} |
---|
923 | 976 | |
---|
924 | 977 | /** |
---|
925 | 978 | * vmw_user_bo_reference - Open a handle to a vmw user buffer object. |
---|
.. | .. |
---|
940 | 993 | |
---|
941 | 994 | user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); |
---|
942 | 995 | |
---|
943 | | - *handle = user_bo->prime.base.hash.key; |
---|
| 996 | + *handle = user_bo->prime.base.handle; |
---|
944 | 997 | return ttm_ref_object_add(tfile, &user_bo->prime.base, |
---|
945 | 998 | TTM_REF_USAGE, NULL, false); |
---|
946 | 999 | } |
---|
.. | .. |
---|
968 | 1021 | |
---|
969 | 1022 | if (fence == NULL) { |
---|
970 | 1023 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
---|
971 | | - reservation_object_add_excl_fence(bo->resv, &fence->base); |
---|
| 1024 | + dma_resv_add_excl_fence(bo->base.resv, &fence->base); |
---|
972 | 1025 | dma_fence_put(&fence->base); |
---|
973 | 1026 | } else |
---|
974 | | - reservation_object_add_excl_fence(bo->resv, &fence->base); |
---|
| 1027 | + dma_resv_add_excl_fence(bo->base.resv, &fence->base); |
---|
975 | 1028 | } |
---|
976 | 1029 | |
---|
977 | 1030 | |
---|
.. | .. |
---|
1038 | 1091 | if (ret != 0) |
---|
1039 | 1092 | return -EINVAL; |
---|
1040 | 1093 | |
---|
1041 | | - *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); |
---|
| 1094 | + *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node); |
---|
1042 | 1095 | vmw_bo_unreference(&out_buf); |
---|
1043 | 1096 | return 0; |
---|
1044 | 1097 | } |
---|
.. | .. |
---|
1084 | 1137 | * vmw_bo_move_notify - TTM move_notify_callback |
---|
1085 | 1138 | * |
---|
1086 | 1139 | * @bo: The TTM buffer object about to move. |
---|
1087 | | - * @mem: The struct ttm_mem_reg indicating to what memory |
---|
| 1140 | + * @mem: The struct ttm_resource indicating to what memory |
---|
1088 | 1141 | * region the move is taking place. |
---|
1089 | 1142 | * |
---|
1090 | 1143 | * Detaches cached maps and device bindings that require that the |
---|
1091 | 1144 | * buffer doesn't move. |
---|
1092 | 1145 | */ |
---|
1093 | 1146 | void vmw_bo_move_notify(struct ttm_buffer_object *bo, |
---|
1094 | | - struct ttm_mem_reg *mem) |
---|
| 1147 | + struct ttm_resource *mem) |
---|
1095 | 1148 | { |
---|
1096 | 1149 | struct vmw_buffer_object *vbo; |
---|
1097 | 1150 | |
---|