.. | .. |
---|
31 | 31 | */ |
---|
32 | 32 | #include <linux/list.h> |
---|
33 | 33 | #include <linux/slab.h> |
---|
34 | | -#include <drm/drmP.h> |
---|
| 34 | +#include <linux/dma-buf.h> |
---|
| 35 | + |
---|
35 | 36 | #include <drm/amdgpu_drm.h> |
---|
36 | 37 | #include <drm/drm_cache.h> |
---|
37 | 38 | #include "amdgpu.h" |
---|
.. | .. |
---|
50 | 51 | * uvd, etc. for kernel managed allocations used by the GPU. |
---|
51 | 52 | * |
---|
52 | 53 | */ |
---|
53 | | - |
---|
54 | | -static bool amdgpu_bo_need_backup(struct amdgpu_device *adev) |
---|
55 | | -{ |
---|
56 | | - if (adev->flags & AMD_IS_APU) |
---|
57 | | - return false; |
---|
58 | | - |
---|
59 | | - if (amdgpu_gpu_recovery == 0 || |
---|
60 | | - (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev))) |
---|
61 | | - return false; |
---|
62 | | - |
---|
63 | | - return true; |
---|
64 | | -} |
---|
65 | 54 | |
---|
66 | 55 | /** |
---|
67 | 56 | * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting |
---|
.. | .. |
---|
92 | 81 | if (bo->pin_count > 0) |
---|
93 | 82 | amdgpu_bo_subtract_pin_size(bo); |
---|
94 | 83 | |
---|
95 | | - if (bo->kfd_bo) |
---|
96 | | - amdgpu_amdkfd_unreserve_system_memory_limit(bo); |
---|
97 | | - |
---|
98 | 84 | amdgpu_bo_kunmap(bo); |
---|
99 | 85 | |
---|
100 | | - if (bo->gem_base.import_attach) |
---|
101 | | - drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); |
---|
102 | | - drm_gem_object_release(&bo->gem_base); |
---|
103 | | - amdgpu_bo_unref(&bo->parent); |
---|
| 86 | + if (bo->tbo.base.import_attach) |
---|
| 87 | + drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); |
---|
| 88 | + drm_gem_object_release(&bo->tbo.base); |
---|
| 89 | + /* in case amdgpu_device_recover_vram got NULL of bo->parent */ |
---|
104 | 90 | if (!list_empty(&bo->shadow_list)) { |
---|
105 | 91 | mutex_lock(&adev->shadow_list_lock); |
---|
106 | 92 | list_del_init(&bo->shadow_list); |
---|
107 | 93 | mutex_unlock(&adev->shadow_list_lock); |
---|
108 | 94 | } |
---|
| 95 | + amdgpu_bo_unref(&bo->parent); |
---|
| 96 | + |
---|
109 | 97 | kfree(bo->metadata); |
---|
110 | 98 | kfree(bo); |
---|
111 | 99 | } |
---|
.. | .. |
---|
148 | 136 | |
---|
149 | 137 | places[c].fpfn = 0; |
---|
150 | 138 | places[c].lpfn = 0; |
---|
151 | | - places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
---|
152 | | - TTM_PL_FLAG_VRAM; |
---|
| 139 | + places[c].mem_type = TTM_PL_VRAM; |
---|
| 140 | + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
---|
153 | 141 | |
---|
154 | 142 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
---|
155 | 143 | places[c].lpfn = visible_pfn; |
---|
.. | .. |
---|
163 | 151 | |
---|
164 | 152 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { |
---|
165 | 153 | places[c].fpfn = 0; |
---|
166 | | - if (flags & AMDGPU_GEM_CREATE_SHADOW) |
---|
167 | | - places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT; |
---|
168 | | - else |
---|
169 | | - places[c].lpfn = 0; |
---|
170 | | - places[c].flags = TTM_PL_FLAG_TT; |
---|
| 154 | + places[c].lpfn = 0; |
---|
| 155 | + places[c].mem_type = TTM_PL_TT; |
---|
| 156 | + places[c].flags = 0; |
---|
171 | 157 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) |
---|
172 | 158 | places[c].flags |= TTM_PL_FLAG_WC | |
---|
173 | 159 | TTM_PL_FLAG_UNCACHED; |
---|
.. | .. |
---|
179 | 165 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { |
---|
180 | 166 | places[c].fpfn = 0; |
---|
181 | 167 | places[c].lpfn = 0; |
---|
182 | | - places[c].flags = TTM_PL_FLAG_SYSTEM; |
---|
| 168 | + places[c].mem_type = TTM_PL_SYSTEM; |
---|
| 169 | + places[c].flags = 0; |
---|
183 | 170 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) |
---|
184 | 171 | places[c].flags |= TTM_PL_FLAG_WC | |
---|
185 | 172 | TTM_PL_FLAG_UNCACHED; |
---|
.. | .. |
---|
191 | 178 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { |
---|
192 | 179 | places[c].fpfn = 0; |
---|
193 | 180 | places[c].lpfn = 0; |
---|
194 | | - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; |
---|
| 181 | + places[c].mem_type = AMDGPU_PL_GDS; |
---|
| 182 | + places[c].flags = TTM_PL_FLAG_UNCACHED; |
---|
195 | 183 | c++; |
---|
196 | 184 | } |
---|
197 | 185 | |
---|
198 | 186 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
---|
199 | 187 | places[c].fpfn = 0; |
---|
200 | 188 | places[c].lpfn = 0; |
---|
201 | | - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; |
---|
| 189 | + places[c].mem_type = AMDGPU_PL_GWS; |
---|
| 190 | + places[c].flags = TTM_PL_FLAG_UNCACHED; |
---|
202 | 191 | c++; |
---|
203 | 192 | } |
---|
204 | 193 | |
---|
205 | 194 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
---|
206 | 195 | places[c].fpfn = 0; |
---|
207 | 196 | places[c].lpfn = 0; |
---|
208 | | - places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; |
---|
| 197 | + places[c].mem_type = AMDGPU_PL_OA; |
---|
| 198 | + places[c].flags = TTM_PL_FLAG_UNCACHED; |
---|
209 | 199 | c++; |
---|
210 | 200 | } |
---|
211 | 201 | |
---|
212 | 202 | if (!c) { |
---|
213 | 203 | places[c].fpfn = 0; |
---|
214 | 204 | places[c].lpfn = 0; |
---|
215 | | - places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
---|
| 205 | + places[c].mem_type = TTM_PL_SYSTEM; |
---|
| 206 | + places[c].flags = TTM_PL_MASK_CACHING; |
---|
216 | 207 | c++; |
---|
217 | 208 | } |
---|
218 | 209 | |
---|
.. | .. |
---|
253 | 244 | bool free = false; |
---|
254 | 245 | int r; |
---|
255 | 246 | |
---|
| 247 | + if (!size) { |
---|
| 248 | + amdgpu_bo_unref(bo_ptr); |
---|
| 249 | + return 0; |
---|
| 250 | + } |
---|
| 251 | + |
---|
256 | 252 | memset(&bp, 0, sizeof(bp)); |
---|
257 | 253 | bp.size = size; |
---|
258 | 254 | bp.byte_align = align; |
---|
259 | 255 | bp.domain = domain; |
---|
260 | | - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
---|
261 | | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
---|
| 256 | + bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
---|
| 257 | + : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
---|
| 258 | + bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
---|
262 | 259 | bp.type = ttm_bo_type_kernel; |
---|
263 | 260 | bp.resv = NULL; |
---|
264 | 261 | |
---|
.. | .. |
---|
346 | 343 | if (r) |
---|
347 | 344 | return r; |
---|
348 | 345 | |
---|
349 | | - amdgpu_bo_unreserve(*bo_ptr); |
---|
| 346 | + if (*bo_ptr) |
---|
| 347 | + amdgpu_bo_unreserve(*bo_ptr); |
---|
350 | 348 | |
---|
351 | 349 | return 0; |
---|
| 350 | +} |
---|
| 351 | + |
---|
| 352 | +/** |
---|
| 353 | + * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location |
---|
| 354 | + * |
---|
| 355 | + * @adev: amdgpu device object |
---|
| 356 | + * @offset: offset of the BO |
---|
| 357 | + * @size: size of the BO |
---|
| 358 | + * @domain: where to place it |
---|
| 359 | + * @bo_ptr: used to initialize BOs in structures |
---|
| 360 | + * @cpu_addr: optional CPU address mapping |
---|
| 361 | + * |
---|
| 362 | + * Creates a kernel BO at a specific offset in the address space of the domain. |
---|
| 363 | + * |
---|
| 364 | + * Returns: |
---|
| 365 | + * 0 on success, negative error code otherwise. |
---|
| 366 | + */ |
---|
| 367 | +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
---|
| 368 | + uint64_t offset, uint64_t size, uint32_t domain, |
---|
| 369 | + struct amdgpu_bo **bo_ptr, void **cpu_addr) |
---|
| 370 | +{ |
---|
| 371 | + struct ttm_operation_ctx ctx = { false, false }; |
---|
| 372 | + unsigned int i; |
---|
| 373 | + int r; |
---|
| 374 | + |
---|
| 375 | + offset &= PAGE_MASK; |
---|
| 376 | + size = ALIGN(size, PAGE_SIZE); |
---|
| 377 | + |
---|
| 378 | + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, |
---|
| 379 | + NULL, cpu_addr); |
---|
| 380 | + if (r) |
---|
| 381 | + return r; |
---|
| 382 | + |
---|
| 383 | + if ((*bo_ptr) == NULL) |
---|
| 384 | + return 0; |
---|
| 385 | + |
---|
| 386 | + /* |
---|
| 387 | + * Remove the original mem node and create a new one at the request |
---|
| 388 | + * position. |
---|
| 389 | + */ |
---|
| 390 | + if (cpu_addr) |
---|
| 391 | + amdgpu_bo_kunmap(*bo_ptr); |
---|
| 392 | + |
---|
| 393 | + ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); |
---|
| 394 | + |
---|
| 395 | + for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { |
---|
| 396 | + (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; |
---|
| 397 | + (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
---|
| 398 | + } |
---|
| 399 | + r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, |
---|
| 400 | + &(*bo_ptr)->tbo.mem, &ctx); |
---|
| 401 | + if (r) |
---|
| 402 | + goto error; |
---|
| 403 | + |
---|
| 404 | + if (cpu_addr) { |
---|
| 405 | + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); |
---|
| 406 | + if (r) |
---|
| 407 | + goto error; |
---|
| 408 | + } |
---|
| 409 | + |
---|
| 410 | + amdgpu_bo_unreserve(*bo_ptr); |
---|
| 411 | + return 0; |
---|
| 412 | + |
---|
| 413 | +error: |
---|
| 414 | + amdgpu_bo_unreserve(*bo_ptr); |
---|
| 415 | + amdgpu_bo_unref(bo_ptr); |
---|
| 416 | + return r; |
---|
352 | 417 | } |
---|
353 | 418 | |
---|
354 | 419 | /** |
---|
.. | .. |
---|
386 | 451 | static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, |
---|
387 | 452 | unsigned long size, u32 domain) |
---|
388 | 453 | { |
---|
389 | | - struct ttm_mem_type_manager *man = NULL; |
---|
| 454 | + struct ttm_resource_manager *man = NULL; |
---|
390 | 455 | |
---|
391 | 456 | /* |
---|
392 | 457 | * If GTT is part of requested domains the check must succeed to |
---|
393 | 458 | * allow fall back to GTT |
---|
394 | 459 | */ |
---|
395 | 460 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { |
---|
396 | | - man = &adev->mman.bdev.man[TTM_PL_TT]; |
---|
| 461 | + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); |
---|
397 | 462 | |
---|
398 | 463 | if (size < (man->size << PAGE_SHIFT)) |
---|
399 | 464 | return true; |
---|
.. | .. |
---|
402 | 467 | } |
---|
403 | 468 | |
---|
404 | 469 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
---|
405 | | - man = &adev->mman.bdev.man[TTM_PL_VRAM]; |
---|
| 470 | + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); |
---|
406 | 471 | |
---|
407 | 472 | if (size < (man->size << PAGE_SHIFT)) |
---|
408 | 473 | return true; |
---|
.. | .. |
---|
420 | 485 | return false; |
---|
421 | 486 | } |
---|
422 | 487 | |
---|
| 488 | +bool amdgpu_bo_support_uswc(u64 bo_flags) |
---|
| 489 | +{ |
---|
| 490 | + |
---|
| 491 | +#ifdef CONFIG_X86_32 |
---|
| 492 | + /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
---|
| 493 | + * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
---|
| 494 | + */ |
---|
| 495 | + return false; |
---|
| 496 | +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
---|
| 497 | + /* Don't try to enable write-combining when it can't work, or things |
---|
| 498 | + * may be slow |
---|
| 499 | + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
---|
| 500 | + */ |
---|
| 501 | + |
---|
| 502 | +#ifndef CONFIG_COMPILE_TEST |
---|
| 503 | +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
---|
| 504 | + thanks to write-combining |
---|
| 505 | +#endif |
---|
| 506 | + |
---|
| 507 | + if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) |
---|
| 508 | + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
---|
| 509 | + "better performance thanks to write-combining\n"); |
---|
| 510 | + return false; |
---|
| 511 | +#else |
---|
| 512 | + /* For architectures that don't support WC memory, |
---|
| 513 | + * mask out the WC flag from the BO |
---|
| 514 | + */ |
---|
| 515 | + if (!drm_arch_can_wc_memory()) |
---|
| 516 | + return false; |
---|
| 517 | + |
---|
| 518 | + return true; |
---|
| 519 | +#endif |
---|
| 520 | +} |
---|
| 521 | + |
---|
423 | 522 | static int amdgpu_bo_do_create(struct amdgpu_device *adev, |
---|
424 | 523 | struct amdgpu_bo_param *bp, |
---|
425 | 524 | struct amdgpu_bo **bo_ptr) |
---|
426 | 525 | { |
---|
427 | 526 | struct ttm_operation_ctx ctx = { |
---|
428 | 527 | .interruptible = (bp->type != ttm_bo_type_kernel), |
---|
429 | | - .no_wait_gpu = false, |
---|
| 528 | + .no_wait_gpu = bp->no_wait_gpu, |
---|
430 | 529 | .resv = bp->resv, |
---|
431 | 530 | .flags = bp->type != ttm_bo_type_kernel ? |
---|
432 | 531 | TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 |
---|
.. | .. |
---|
436 | 535 | size_t acc_size; |
---|
437 | 536 | int r; |
---|
438 | 537 | |
---|
439 | | - page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
---|
440 | | - size = ALIGN(size, PAGE_SIZE); |
---|
| 538 | + /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ |
---|
| 539 | + if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { |
---|
| 540 | + /* GWS and OA don't need any alignment. */ |
---|
| 541 | + page_align = bp->byte_align; |
---|
| 542 | + size <<= PAGE_SHIFT; |
---|
| 543 | + } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) { |
---|
| 544 | + /* Both size and alignment must be a multiple of 4. */ |
---|
| 545 | + page_align = ALIGN(bp->byte_align, 4); |
---|
| 546 | + size = ALIGN(size, 4) << PAGE_SHIFT; |
---|
| 547 | + } else { |
---|
| 548 | + /* Memory should be aligned at least to a page size. */ |
---|
| 549 | + page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
---|
| 550 | + size = ALIGN(size, PAGE_SIZE); |
---|
| 551 | + } |
---|
441 | 552 | |
---|
442 | 553 | if (!amdgpu_bo_validate_size(adev, size, bp->domain)) |
---|
443 | 554 | return -ENOMEM; |
---|
.. | .. |
---|
450 | 561 | bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); |
---|
451 | 562 | if (bo == NULL) |
---|
452 | 563 | return -ENOMEM; |
---|
453 | | - drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); |
---|
| 564 | + drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); |
---|
454 | 565 | INIT_LIST_HEAD(&bo->shadow_list); |
---|
455 | | - INIT_LIST_HEAD(&bo->va); |
---|
| 566 | + bo->vm_bo = NULL; |
---|
456 | 567 | bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : |
---|
457 | 568 | bp->domain; |
---|
458 | 569 | bo->allowed_domains = bo->preferred_domains; |
---|
.. | .. |
---|
462 | 573 | |
---|
463 | 574 | bo->flags = bp->flags; |
---|
464 | 575 | |
---|
465 | | -#ifdef CONFIG_X86_32 |
---|
466 | | - /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
---|
467 | | - * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
---|
468 | | - */ |
---|
469 | | - bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
---|
470 | | -#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
---|
471 | | - /* Don't try to enable write-combining when it can't work, or things |
---|
472 | | - * may be slow |
---|
473 | | - * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
---|
474 | | - */ |
---|
475 | | - |
---|
476 | | -#ifndef CONFIG_COMPILE_TEST |
---|
477 | | -#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
---|
478 | | - thanks to write-combining |
---|
479 | | -#endif |
---|
480 | | - |
---|
481 | | - if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) |
---|
482 | | - DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
---|
483 | | - "better performance thanks to write-combining\n"); |
---|
484 | | - bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
---|
485 | | -#else |
---|
486 | | - /* For architectures that don't support WC memory, |
---|
487 | | - * mask out the WC flag from the BO |
---|
488 | | - */ |
---|
489 | | - if (!drm_arch_can_wc_memory()) |
---|
| 576 | + if (!amdgpu_bo_support_uswc(bo->flags)) |
---|
490 | 577 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
---|
491 | | -#endif |
---|
492 | 578 | |
---|
493 | 579 | bo->tbo.bdev = &adev->mman.bdev; |
---|
494 | | - amdgpu_bo_placement_from_domain(bo, bp->domain); |
---|
| 580 | + if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | |
---|
| 581 | + AMDGPU_GEM_DOMAIN_GDS)) |
---|
| 582 | + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); |
---|
| 583 | + else |
---|
| 584 | + amdgpu_bo_placement_from_domain(bo, bp->domain); |
---|
495 | 585 | if (bp->type == ttm_bo_type_kernel) |
---|
496 | 586 | bo->tbo.priority = 1; |
---|
497 | 587 | |
---|
.. | .. |
---|
510 | 600 | amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); |
---|
511 | 601 | |
---|
512 | 602 | if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && |
---|
513 | | - bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { |
---|
| 603 | + bo->tbo.mem.mem_type == TTM_PL_VRAM) { |
---|
514 | 604 | struct dma_fence *fence; |
---|
515 | 605 | |
---|
516 | | - r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); |
---|
| 606 | + r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); |
---|
517 | 607 | if (unlikely(r)) |
---|
518 | 608 | goto fail_unreserve; |
---|
519 | 609 | |
---|
.. | .. |
---|
536 | 626 | |
---|
537 | 627 | fail_unreserve: |
---|
538 | 628 | if (!bp->resv) |
---|
539 | | - ww_mutex_unlock(&bo->tbo.resv->lock); |
---|
| 629 | + dma_resv_unlock(bo->tbo.base.resv); |
---|
540 | 630 | amdgpu_bo_unref(&bo); |
---|
541 | 631 | return r; |
---|
542 | 632 | } |
---|
543 | 633 | |
---|
544 | 634 | static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, |
---|
545 | | - unsigned long size, int byte_align, |
---|
| 635 | + unsigned long size, |
---|
546 | 636 | struct amdgpu_bo *bo) |
---|
547 | 637 | { |
---|
548 | 638 | struct amdgpu_bo_param bp; |
---|
.. | .. |
---|
553 | 643 | |
---|
554 | 644 | memset(&bp, 0, sizeof(bp)); |
---|
555 | 645 | bp.size = size; |
---|
556 | | - bp.byte_align = byte_align; |
---|
557 | 646 | bp.domain = AMDGPU_GEM_DOMAIN_GTT; |
---|
558 | 647 | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | |
---|
559 | 648 | AMDGPU_GEM_CREATE_SHADOW; |
---|
560 | 649 | bp.type = ttm_bo_type_kernel; |
---|
561 | | - bp.resv = bo->tbo.resv; |
---|
| 650 | + bp.resv = bo->tbo.base.resv; |
---|
562 | 651 | |
---|
563 | 652 | r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); |
---|
564 | 653 | if (!r) { |
---|
565 | 654 | bo->shadow->parent = amdgpu_bo_ref(bo); |
---|
566 | 655 | mutex_lock(&adev->shadow_list_lock); |
---|
567 | | - list_add_tail(&bo->shadow_list, &adev->shadow_list); |
---|
| 656 | + list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list); |
---|
568 | 657 | mutex_unlock(&adev->shadow_list_lock); |
---|
569 | 658 | } |
---|
570 | 659 | |
---|
.. | .. |
---|
597 | 686 | if (r) |
---|
598 | 687 | return r; |
---|
599 | 688 | |
---|
600 | | - if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) { |
---|
| 689 | + if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { |
---|
601 | 690 | if (!bp->resv) |
---|
602 | | - WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, |
---|
| 691 | + WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv, |
---|
603 | 692 | NULL)); |
---|
604 | 693 | |
---|
605 | | - r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr)); |
---|
| 694 | + r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); |
---|
606 | 695 | |
---|
607 | 696 | if (!bp->resv) |
---|
608 | | - reservation_object_unlock((*bo_ptr)->tbo.resv); |
---|
| 697 | + dma_resv_unlock((*bo_ptr)->tbo.base.resv); |
---|
609 | 698 | |
---|
610 | 699 | if (r) |
---|
611 | 700 | amdgpu_bo_unref(bo_ptr); |
---|
612 | 701 | } |
---|
613 | 702 | |
---|
614 | | - return r; |
---|
615 | | -} |
---|
616 | | - |
---|
617 | | -/** |
---|
618 | | - * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object |
---|
619 | | - * @adev: amdgpu device object |
---|
620 | | - * @ring: amdgpu_ring for the engine handling the buffer operations |
---|
621 | | - * @bo: &amdgpu_bo buffer to be backed up |
---|
622 | | - * @resv: reservation object with embedded fence |
---|
623 | | - * @fence: dma_fence associated with the operation |
---|
624 | | - * @direct: whether to submit the job directly |
---|
625 | | - * |
---|
626 | | - * Copies an &amdgpu_bo buffer object to its shadow object. |
---|
627 | | - * Not used for now. |
---|
628 | | - * |
---|
629 | | - * Returns: |
---|
630 | | - * 0 for success or a negative error code on failure. |
---|
631 | | - */ |
---|
632 | | -int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, |
---|
633 | | - struct amdgpu_ring *ring, |
---|
634 | | - struct amdgpu_bo *bo, |
---|
635 | | - struct reservation_object *resv, |
---|
636 | | - struct dma_fence **fence, |
---|
637 | | - bool direct) |
---|
638 | | - |
---|
639 | | -{ |
---|
640 | | - struct amdgpu_bo *shadow = bo->shadow; |
---|
641 | | - uint64_t bo_addr, shadow_addr; |
---|
642 | | - int r; |
---|
643 | | - |
---|
644 | | - if (!shadow) |
---|
645 | | - return -EINVAL; |
---|
646 | | - |
---|
647 | | - bo_addr = amdgpu_bo_gpu_offset(bo); |
---|
648 | | - shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); |
---|
649 | | - |
---|
650 | | - r = reservation_object_reserve_shared(bo->tbo.resv); |
---|
651 | | - if (r) |
---|
652 | | - goto err; |
---|
653 | | - |
---|
654 | | - r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, |
---|
655 | | - amdgpu_bo_size(bo), resv, fence, |
---|
656 | | - direct, false); |
---|
657 | | - if (!r) |
---|
658 | | - amdgpu_bo_fence(bo, *fence, true); |
---|
659 | | - |
---|
660 | | -err: |
---|
661 | 703 | return r; |
---|
662 | 704 | } |
---|
663 | 705 | |
---|
.. | .. |
---|
696 | 738 | } |
---|
697 | 739 | |
---|
698 | 740 | /** |
---|
699 | | - * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object |
---|
700 | | - * @adev: amdgpu device object |
---|
701 | | - * @ring: amdgpu_ring for the engine handling the buffer operations |
---|
702 | | - * @bo: &amdgpu_bo buffer to be restored |
---|
703 | | - * @resv: reservation object with embedded fence |
---|
| 741 | + * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow |
---|
| 742 | + * |
---|
| 743 | + * @shadow: &amdgpu_bo shadow to be restored |
---|
704 | 744 | * @fence: dma_fence associated with the operation |
---|
705 | | - * @direct: whether to submit the job directly |
---|
706 | 745 | * |
---|
707 | 746 | * Copies a buffer object's shadow content back to the object. |
---|
708 | 747 | * This is used for recovering a buffer from its shadow in case of a gpu |
---|
.. | .. |
---|
711 | 750 | * Returns: |
---|
712 | 751 | * 0 for success or a negative error code on failure. |
---|
713 | 752 | */ |
---|
714 | | -int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, |
---|
715 | | - struct amdgpu_ring *ring, |
---|
716 | | - struct amdgpu_bo *bo, |
---|
717 | | - struct reservation_object *resv, |
---|
718 | | - struct dma_fence **fence, |
---|
719 | | - bool direct) |
---|
| 753 | +int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) |
---|
720 | 754 | |
---|
721 | 755 | { |
---|
722 | | - struct amdgpu_bo *shadow = bo->shadow; |
---|
723 | | - uint64_t bo_addr, shadow_addr; |
---|
724 | | - int r; |
---|
| 756 | + struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev); |
---|
| 757 | + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
---|
| 758 | + uint64_t shadow_addr, parent_addr; |
---|
725 | 759 | |
---|
726 | | - if (!shadow) |
---|
727 | | - return -EINVAL; |
---|
| 760 | + shadow_addr = amdgpu_bo_gpu_offset(shadow); |
---|
| 761 | + parent_addr = amdgpu_bo_gpu_offset(shadow->parent); |
---|
728 | 762 | |
---|
729 | | - bo_addr = amdgpu_bo_gpu_offset(bo); |
---|
730 | | - shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); |
---|
731 | | - |
---|
732 | | - r = reservation_object_reserve_shared(bo->tbo.resv); |
---|
733 | | - if (r) |
---|
734 | | - goto err; |
---|
735 | | - |
---|
736 | | - r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, |
---|
737 | | - amdgpu_bo_size(bo), resv, fence, |
---|
738 | | - direct, false); |
---|
739 | | - if (!r) |
---|
740 | | - amdgpu_bo_fence(bo, *fence, true); |
---|
741 | | - |
---|
742 | | -err: |
---|
743 | | - return r; |
---|
| 763 | + return amdgpu_copy_buffer(ring, shadow_addr, parent_addr, |
---|
| 764 | + amdgpu_bo_size(shadow), NULL, fence, |
---|
| 765 | + true, false, false); |
---|
744 | 766 | } |
---|
745 | 767 | |
---|
746 | 768 | /** |
---|
.. | .. |
---|
769 | 791 | return 0; |
---|
770 | 792 | } |
---|
771 | 793 | |
---|
772 | | - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false, |
---|
| 794 | + r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, |
---|
773 | 795 | MAX_SCHEDULE_TIMEOUT); |
---|
774 | 796 | if (r < 0) |
---|
775 | 797 | return r; |
---|
.. | .. |
---|
883 | 905 | if (WARN_ON_ONCE(min_offset > max_offset)) |
---|
884 | 906 | return -EINVAL; |
---|
885 | 907 | |
---|
| 908 | + /* Check domain to be pinned to against preferred domains */ |
---|
| 909 | + if (bo->preferred_domains & domain) |
---|
| 910 | + domain = bo->preferred_domains & domain; |
---|
| 911 | + |
---|
886 | 912 | /* A shared bo cannot be migrated to VRAM */ |
---|
887 | 913 | if (bo->prime_shared_count) { |
---|
888 | 914 | if (domain & AMDGPU_GEM_DOMAIN_GTT) |
---|
.. | .. |
---|
905 | 931 | bo->pin_count++; |
---|
906 | 932 | |
---|
907 | 933 | if (max_offset != 0) { |
---|
908 | | - u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; |
---|
| 934 | + u64 domain_start = amdgpu_ttm_domain_start(adev, |
---|
| 935 | + mem_type); |
---|
909 | 936 | WARN_ON_ONCE(max_offset < |
---|
910 | 937 | (amdgpu_bo_gpu_offset(bo) - domain_start)); |
---|
911 | 938 | } |
---|
912 | 939 | |
---|
913 | 940 | return 0; |
---|
914 | 941 | } |
---|
| 942 | + |
---|
| 943 | + if (bo->tbo.base.import_attach) |
---|
| 944 | + dma_buf_pin(bo->tbo.base.import_attach); |
---|
915 | 945 | |
---|
916 | 946 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
---|
917 | 947 | /* force to pin into visible video ram */ |
---|
.. | .. |
---|
986 | 1016 | struct ttm_operation_ctx ctx = { false, false }; |
---|
987 | 1017 | int r, i; |
---|
988 | 1018 | |
---|
989 | | - if (!bo->pin_count) { |
---|
| 1019 | + if (WARN_ON_ONCE(!bo->pin_count)) { |
---|
990 | 1020 | dev_warn(adev->dev, "%p unpin not necessary\n", bo); |
---|
991 | 1021 | return 0; |
---|
992 | 1022 | } |
---|
.. | .. |
---|
995 | 1025 | return 0; |
---|
996 | 1026 | |
---|
997 | 1027 | amdgpu_bo_subtract_pin_size(bo); |
---|
| 1028 | + |
---|
| 1029 | + if (bo->tbo.base.import_attach) |
---|
| 1030 | + dma_buf_unpin(bo->tbo.base.import_attach); |
---|
998 | 1031 | |
---|
999 | 1032 | for (i = 0; i < bo->placement.num_placement; i++) { |
---|
1000 | 1033 | bo->placements[i].lpfn = 0; |
---|
.. | .. |
---|
1020 | 1053 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev) |
---|
1021 | 1054 | { |
---|
1022 | 1055 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
---|
1023 | | - if (0 && (adev->flags & AMD_IS_APU)) { |
---|
| 1056 | +#ifndef CONFIG_HIBERNATION |
---|
| 1057 | + if (adev->flags & AMD_IS_APU) { |
---|
1024 | 1058 | /* Useless to evict on IGP chips */ |
---|
1025 | 1059 | return 0; |
---|
1026 | 1060 | } |
---|
| 1061 | +#endif |
---|
1027 | 1062 | return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); |
---|
1028 | 1063 | } |
---|
1029 | 1064 | |
---|
.. | .. |
---|
1037 | 1072 | "HBM", |
---|
1038 | 1073 | "DDR3", |
---|
1039 | 1074 | "DDR4", |
---|
| 1075 | + "GDDR6", |
---|
1040 | 1076 | }; |
---|
1041 | 1077 | |
---|
1042 | 1078 | /** |
---|
.. | .. |
---|
1108 | 1144 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, |
---|
1109 | 1145 | struct vm_area_struct *vma) |
---|
1110 | 1146 | { |
---|
1111 | | - return ttm_fbdev_mmap(vma, &bo->tbo); |
---|
| 1147 | + if (vma->vm_pgoff != 0) |
---|
| 1148 | + return -EACCES; |
---|
| 1149 | + |
---|
| 1150 | + return ttm_bo_mmap_obj(vma, &bo->tbo); |
---|
1112 | 1151 | } |
---|
1113 | 1152 | |
---|
1114 | 1153 | /** |
---|
.. | .. |
---|
1144 | 1183 | */ |
---|
1145 | 1184 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) |
---|
1146 | 1185 | { |
---|
1147 | | - lockdep_assert_held(&bo->tbo.resv->lock.base); |
---|
| 1186 | + dma_resv_assert_held(bo->tbo.base.resv); |
---|
1148 | 1187 | |
---|
1149 | 1188 | if (tiling_flags) |
---|
1150 | 1189 | *tiling_flags = bo->tiling_flags; |
---|
.. | .. |
---|
1242 | 1281 | */ |
---|
1243 | 1282 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
---|
1244 | 1283 | bool evict, |
---|
1245 | | - struct ttm_mem_reg *new_mem) |
---|
| 1284 | + struct ttm_resource *new_mem) |
---|
1246 | 1285 | { |
---|
1247 | 1286 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
---|
1248 | 1287 | struct amdgpu_bo *abo; |
---|
1249 | | - struct ttm_mem_reg *old_mem = &bo->mem; |
---|
| 1288 | + struct ttm_resource *old_mem = &bo->mem; |
---|
1250 | 1289 | |
---|
1251 | 1290 | if (!amdgpu_bo_is_amdgpu_bo(bo)) |
---|
1252 | 1291 | return; |
---|
.. | .. |
---|
1255 | 1294 | amdgpu_vm_bo_invalidate(adev, abo, evict); |
---|
1256 | 1295 | |
---|
1257 | 1296 | amdgpu_bo_kunmap(abo); |
---|
| 1297 | + |
---|
| 1298 | + if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && |
---|
| 1299 | + bo->mem.mem_type != TTM_PL_SYSTEM) |
---|
| 1300 | + dma_buf_move_notify(abo->tbo.base.dma_buf); |
---|
1258 | 1301 | |
---|
1259 | 1302 | /* remember the eviction */ |
---|
1260 | 1303 | if (evict) |
---|
.. | .. |
---|
1266 | 1309 | |
---|
1267 | 1310 | /* move_notify is called before move happens */ |
---|
1268 | 1311 | trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); |
---|
| 1312 | +} |
---|
| 1313 | + |
---|
| 1314 | +/** |
---|
| 1315 | + * amdgpu_bo_release_notify - notification about a BO being released |
---|
| 1316 | + * @bo: pointer to a buffer object |
---|
| 1317 | + * |
---|
| 1318 | + * Wipes VRAM buffers whose contents should not be leaked before the |
---|
| 1319 | + * memory is released. |
---|
| 1320 | + */ |
---|
| 1321 | +void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) |
---|
| 1322 | +{ |
---|
| 1323 | + struct dma_fence *fence = NULL; |
---|
| 1324 | + struct amdgpu_bo *abo; |
---|
| 1325 | + int r; |
---|
| 1326 | + |
---|
| 1327 | + if (!amdgpu_bo_is_amdgpu_bo(bo)) |
---|
| 1328 | + return; |
---|
| 1329 | + |
---|
| 1330 | + abo = ttm_to_amdgpu_bo(bo); |
---|
| 1331 | + |
---|
| 1332 | + if (abo->kfd_bo) |
---|
| 1333 | + amdgpu_amdkfd_unreserve_memory_limit(abo); |
---|
| 1334 | + |
---|
| 1335 | + /* We only remove the fence if the resv has individualized. */ |
---|
| 1336 | + WARN_ON_ONCE(bo->type == ttm_bo_type_kernel |
---|
| 1337 | + && bo->base.resv != &bo->base._resv); |
---|
| 1338 | + if (bo->base.resv == &bo->base._resv) |
---|
| 1339 | + amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); |
---|
| 1340 | + |
---|
| 1341 | + if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || |
---|
| 1342 | + !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) |
---|
| 1343 | + return; |
---|
| 1344 | + |
---|
| 1345 | + if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) |
---|
| 1346 | + return; |
---|
| 1347 | + |
---|
| 1348 | + r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); |
---|
| 1349 | + if (!WARN_ON(r)) { |
---|
| 1350 | + amdgpu_bo_fence(abo, fence, false); |
---|
| 1351 | + dma_fence_put(fence); |
---|
| 1352 | + } |
---|
| 1353 | + |
---|
| 1354 | + dma_resv_unlock(bo->base.resv); |
---|
1269 | 1355 | } |
---|
1270 | 1356 | |
---|
1271 | 1357 | /** |
---|
.. | .. |
---|
1340 | 1426 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
---|
1341 | 1427 | bool shared) |
---|
1342 | 1428 | { |
---|
1343 | | - struct reservation_object *resv = bo->tbo.resv; |
---|
| 1429 | + struct dma_resv *resv = bo->tbo.base.resv; |
---|
1344 | 1430 | |
---|
1345 | 1431 | if (shared) |
---|
1346 | | - reservation_object_add_shared_fence(resv, fence); |
---|
| 1432 | + dma_resv_add_shared_fence(resv, fence); |
---|
1347 | 1433 | else |
---|
1348 | | - reservation_object_add_excl_fence(resv, fence); |
---|
| 1434 | + dma_resv_add_excl_fence(resv, fence); |
---|
| 1435 | +} |
---|
| 1436 | + |
---|
| 1437 | +/** |
---|
| 1438 | + * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences |
---|
| 1439 | + * |
---|
| 1440 | + * @adev: amdgpu device pointer |
---|
| 1441 | + * @resv: reservation object to sync to |
---|
| 1442 | + * @sync_mode: synchronization mode |
---|
| 1443 | + * @owner: fence owner |
---|
| 1444 | + * @intr: Whether the wait is interruptible |
---|
| 1445 | + * |
---|
| 1446 | + * Extract the fences from the reservation object and waits for them to finish. |
---|
| 1447 | + * |
---|
| 1448 | + * Returns: |
---|
| 1449 | + * 0 on success, errno otherwise. |
---|
| 1450 | + */ |
---|
| 1451 | +int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
---|
| 1452 | + enum amdgpu_sync_mode sync_mode, void *owner, |
---|
| 1453 | + bool intr) |
---|
| 1454 | +{ |
---|
| 1455 | + struct amdgpu_sync sync; |
---|
| 1456 | + int r; |
---|
| 1457 | + |
---|
| 1458 | + amdgpu_sync_create(&sync); |
---|
| 1459 | + amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner); |
---|
| 1460 | + r = amdgpu_sync_wait(&sync, intr); |
---|
| 1461 | + amdgpu_sync_free(&sync); |
---|
| 1462 | + return r; |
---|
| 1463 | +} |
---|
| 1464 | + |
---|
| 1465 | +/** |
---|
| 1466 | + * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv |
---|
| 1467 | + * @bo: buffer object to wait for |
---|
| 1468 | + * @owner: fence owner |
---|
| 1469 | + * @intr: Whether the wait is interruptible |
---|
| 1470 | + * |
---|
| 1471 | + * Wrapper to wait for fences in a BO. |
---|
| 1472 | + * Returns: |
---|
| 1473 | + * 0 on success, errno otherwise. |
---|
| 1474 | + */ |
---|
| 1475 | +int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) |
---|
| 1476 | +{ |
---|
| 1477 | + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
---|
| 1478 | + |
---|
| 1479 | + return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv, |
---|
| 1480 | + AMDGPU_SYNC_NE_OWNER, owner, intr); |
---|
1349 | 1481 | } |
---|
1350 | 1482 | |
---|
1351 | 1483 | /** |
---|
.. | .. |
---|
1361 | 1493 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) |
---|
1362 | 1494 | { |
---|
1363 | 1495 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); |
---|
1364 | | - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && |
---|
1365 | | - !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem)); |
---|
1366 | | - WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && |
---|
1367 | | - !bo->pin_count); |
---|
| 1496 | + WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && |
---|
| 1497 | + !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); |
---|
1368 | 1498 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); |
---|
1369 | 1499 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && |
---|
1370 | 1500 | !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); |
---|
1371 | 1501 | |
---|
1372 | | - return bo->tbo.offset; |
---|
| 1502 | + return amdgpu_bo_gpu_offset_no_check(bo); |
---|
| 1503 | +} |
---|
| 1504 | + |
---|
| 1505 | +/** |
---|
| 1506 | + * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo |
---|
| 1507 | + * @bo: amdgpu object for which we query the offset |
---|
| 1508 | + * |
---|
| 1509 | + * Returns: |
---|
| 1510 | + * current GPU offset of the object without raising warnings. |
---|
| 1511 | + */ |
---|
| 1512 | +u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) |
---|
| 1513 | +{ |
---|
| 1514 | + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
---|
| 1515 | + uint64_t offset; |
---|
| 1516 | + |
---|
| 1517 | + offset = (bo->tbo.mem.start << PAGE_SHIFT) + |
---|
| 1518 | + amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); |
---|
| 1519 | + |
---|
| 1520 | + return amdgpu_gmc_sign_extend(offset); |
---|
1373 | 1521 | } |
---|
1374 | 1522 | |
---|
1375 | 1523 | /** |
---|