.. | .. |
---|
25 | 25 | * Alex Deucher |
---|
26 | 26 | * Jerome Glisse |
---|
27 | 27 | */ |
---|
28 | | -#include <drm/drmP.h> |
---|
| 28 | + |
---|
29 | 29 | #include <drm/radeon_drm.h> |
---|
30 | 30 | #include "radeon.h" |
---|
31 | 31 | #include "radeon_trace.h" |
---|
.. | .. |
---|
142 | 142 | list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM; |
---|
143 | 143 | list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
---|
144 | 144 | list[0].tv.bo = &vm->page_directory->tbo; |
---|
145 | | - list[0].tv.shared = true; |
---|
| 145 | + list[0].tv.num_shared = 1; |
---|
146 | 146 | list[0].tiling_flags = 0; |
---|
147 | 147 | list_add(&list[0].tv.head, head); |
---|
148 | 148 | |
---|
.. | .. |
---|
154 | 154 | list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM; |
---|
155 | 155 | list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
---|
156 | 156 | list[idx].tv.bo = &list[idx].robj->tbo; |
---|
157 | | - list[idx].tv.shared = true; |
---|
| 157 | + list[idx].tv.num_shared = 1; |
---|
158 | 158 | list[idx].tiling_flags = 0; |
---|
159 | 159 | list_add(&list[idx++].tv.head, head); |
---|
160 | 160 | } |
---|
.. | .. |
---|
188 | 188 | vm_id->last_id_use == rdev->vm_manager.active[vm_id->id]) |
---|
189 | 189 | return NULL; |
---|
190 | 190 | |
---|
191 | | - /* we definately need to flush */ |
---|
| 191 | + /* we definitely need to flush */ |
---|
192 | 192 | vm_id->pd_gpu_addr = ~0ll; |
---|
193 | 193 | |
---|
194 | 194 | /* skip over VMID 0, since it is the system VM */ |
---|
.. | .. |
---|
296 | 296 | struct radeon_bo_va *bo_va; |
---|
297 | 297 | |
---|
298 | 298 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
---|
299 | | - if (bo_va->vm == vm) { |
---|
| 299 | + if (bo_va->vm == vm) |
---|
300 | 300 | return bo_va; |
---|
301 | | - } |
---|
| 301 | + |
---|
302 | 302 | } |
---|
303 | 303 | return NULL; |
---|
304 | 304 | } |
---|
.. | .. |
---|
323 | 323 | struct radeon_bo_va *bo_va; |
---|
324 | 324 | |
---|
325 | 325 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
---|
326 | | - if (bo_va == NULL) { |
---|
| 326 | + if (bo_va == NULL) |
---|
327 | 327 | return NULL; |
---|
328 | | - } |
---|
| 328 | + |
---|
329 | 329 | bo_va->vm = vm; |
---|
330 | 330 | bo_va->bo = bo; |
---|
331 | 331 | bo_va->it.start = 0; |
---|
.. | .. |
---|
702 | 702 | if (ib.length_dw != 0) { |
---|
703 | 703 | radeon_asic_vm_pad_ib(rdev, &ib); |
---|
704 | 704 | |
---|
705 | | - radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true); |
---|
| 705 | + radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true); |
---|
706 | 706 | WARN_ON(ib.length_dw > ndw); |
---|
707 | 707 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
---|
708 | 708 | if (r) { |
---|
.. | .. |
---|
830 | 830 | uint64_t pte; |
---|
831 | 831 | int r; |
---|
832 | 832 | |
---|
833 | | - radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); |
---|
834 | | - r = reservation_object_reserve_shared(pt->tbo.resv); |
---|
| 833 | + radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true); |
---|
| 834 | + r = dma_resv_reserve_shared(pt->tbo.base.resv, 1); |
---|
835 | 835 | if (r) |
---|
836 | 836 | return r; |
---|
837 | 837 | |
---|
.. | .. |
---|
911 | 911 | */ |
---|
912 | 912 | int radeon_vm_bo_update(struct radeon_device *rdev, |
---|
913 | 913 | struct radeon_bo_va *bo_va, |
---|
914 | | - struct ttm_mem_reg *mem) |
---|
| 914 | + struct ttm_resource *mem) |
---|
915 | 915 | { |
---|
916 | 916 | struct radeon_vm *vm = bo_va->vm; |
---|
917 | 917 | struct radeon_ib ib; |
---|
.. | .. |
---|
942 | 942 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
---|
943 | 943 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
---|
944 | 944 | bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; |
---|
945 | | - if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) |
---|
| 945 | + if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm)) |
---|
946 | 946 | bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; |
---|
947 | 947 | |
---|
948 | 948 | if (mem) { |
---|
949 | | - addr = mem->start << PAGE_SHIFT; |
---|
950 | | - if (mem->mem_type != TTM_PL_SYSTEM) { |
---|
| 949 | + addr = (u64)mem->start << PAGE_SHIFT; |
---|
| 950 | + if (mem->mem_type != TTM_PL_SYSTEM) |
---|
951 | 951 | bo_va->flags |= RADEON_VM_PAGE_VALID; |
---|
952 | | - } |
---|
| 952 | + |
---|
953 | 953 | if (mem->mem_type == TTM_PL_TT) { |
---|
954 | 954 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; |
---|
955 | 955 | if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC))) |
---|
.. | .. |
---|
1233 | 1233 | struct radeon_bo_va *bo_va, *tmp; |
---|
1234 | 1234 | int i, r; |
---|
1235 | 1235 | |
---|
1236 | | - if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { |
---|
| 1236 | + if (!RB_EMPTY_ROOT(&vm->va.rb_root)) |
---|
1237 | 1237 | dev_err(rdev->dev, "still active bo inside vm\n"); |
---|
1238 | | - } |
---|
| 1238 | + |
---|
1239 | 1239 | rbtree_postorder_for_each_entry_safe(bo_va, tmp, |
---|
1240 | 1240 | &vm->va.rb_root, it.rb) { |
---|
1241 | 1241 | interval_tree_remove(&bo_va->it, &vm->va); |
---|