| .. | .. |
|---|
| 23 | 23 | */ |
|---|
| 24 | 24 | |
|---|
| 25 | 25 | #include <linux/firmware.h> |
|---|
| 26 | | -#include <drm/drmP.h> |
|---|
| 26 | + |
|---|
| 27 | 27 | #include "amdgpu.h" |
|---|
| 28 | 28 | #include "amdgpu_uvd.h" |
|---|
| 29 | 29 | #include "cikd.h" |
|---|
| .. | .. |
|---|
| 108 | 108 | int r; |
|---|
| 109 | 109 | |
|---|
| 110 | 110 | /* UVD TRAP */ |
|---|
| 111 | | - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); |
|---|
| 111 | + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); |
|---|
| 112 | 112 | if (r) |
|---|
| 113 | 113 | return r; |
|---|
| 114 | 114 | |
|---|
| .. | .. |
|---|
| 118 | 118 | |
|---|
| 119 | 119 | ring = &adev->uvd.inst->ring; |
|---|
| 120 | 120 | sprintf(ring->name, "uvd"); |
|---|
| 121 | | - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); |
|---|
| 121 | + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0, |
|---|
| 122 | + AMDGPU_RING_PRIO_DEFAULT); |
|---|
| 122 | 123 | if (r) |
|---|
| 123 | 124 | return r; |
|---|
| 124 | 125 | |
|---|
| .. | .. |
|---|
| 162 | 163 | uvd_v4_2_enable_mgcg(adev, true); |
|---|
| 163 | 164 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
|---|
| 164 | 165 | |
|---|
| 165 | | - ring->ready = true; |
|---|
| 166 | | - r = amdgpu_ring_test_ring(ring); |
|---|
| 167 | | - if (r) { |
|---|
| 168 | | - ring->ready = false; |
|---|
| 166 | + r = amdgpu_ring_test_helper(ring); |
|---|
| 167 | + if (r) |
|---|
| 169 | 168 | goto done; |
|---|
| 170 | | - } |
|---|
| 171 | 169 | |
|---|
| 172 | 170 | r = amdgpu_ring_alloc(ring, 10); |
|---|
| 173 | 171 | if (r) { |
|---|
| .. | .. |
|---|
| 213 | 211 | static int uvd_v4_2_hw_fini(void *handle) |
|---|
| 214 | 212 | { |
|---|
| 215 | 213 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
|---|
| 216 | | - struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
|---|
| 217 | 214 | |
|---|
| 218 | 215 | if (RREG32(mmUVD_STATUS) != 0) |
|---|
| 219 | 216 | uvd_v4_2_stop(adev); |
|---|
| 220 | | - |
|---|
| 221 | | - ring->ready = false; |
|---|
| 222 | 217 | |
|---|
| 223 | 218 | return 0; |
|---|
| 224 | 219 | } |
|---|
| .. | .. |
|---|
| 353 | 348 | /* Set the write pointer delay */ |
|---|
| 354 | 349 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); |
|---|
| 355 | 350 | |
|---|
| 356 | | - /* programm the 4GB memory segment for rptr and ring buffer */ |
|---|
| 351 | + /* program the 4GB memory segment for rptr and ring buffer */ |
|---|
| 357 | 352 | WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | |
|---|
| 358 | 353 | (0x7 << 16) | (0x1 << 31)); |
|---|
| 359 | 354 | |
|---|
| .. | .. |
|---|
| 484 | 479 | |
|---|
| 485 | 480 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
|---|
| 486 | 481 | r = amdgpu_ring_alloc(ring, 3); |
|---|
| 487 | | - if (r) { |
|---|
| 488 | | - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", |
|---|
| 489 | | - ring->idx, r); |
|---|
| 482 | + if (r) |
|---|
| 490 | 483 | return r; |
|---|
| 491 | | - } |
|---|
| 484 | + |
|---|
| 492 | 485 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
|---|
| 493 | 486 | amdgpu_ring_write(ring, 0xDEADBEEF); |
|---|
| 494 | 487 | amdgpu_ring_commit(ring); |
|---|
| .. | .. |
|---|
| 496 | 489 | tmp = RREG32(mmUVD_CONTEXT_ID); |
|---|
| 497 | 490 | if (tmp == 0xDEADBEEF) |
|---|
| 498 | 491 | break; |
|---|
| 499 | | - DRM_UDELAY(1); |
|---|
| 492 | + udelay(1); |
|---|
| 500 | 493 | } |
|---|
| 501 | 494 | |
|---|
| 502 | | - if (i < adev->usec_timeout) { |
|---|
| 503 | | - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
|---|
| 504 | | - ring->idx, i); |
|---|
| 505 | | - } else { |
|---|
| 506 | | - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", |
|---|
| 507 | | - ring->idx, tmp); |
|---|
| 508 | | - r = -EINVAL; |
|---|
| 509 | | - } |
|---|
| 495 | + if (i >= adev->usec_timeout) |
|---|
| 496 | + r = -ETIMEDOUT; |
|---|
| 497 | + |
|---|
| 510 | 498 | return r; |
|---|
| 511 | 499 | } |
|---|
| 512 | 500 | |
|---|
| .. | .. |
|---|
| 519 | 507 | * Write ring commands to execute the indirect buffer |
|---|
| 520 | 508 | */ |
|---|
| 521 | 509 | static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, |
|---|
| 510 | + struct amdgpu_job *job, |
|---|
| 522 | 511 | struct amdgpu_ib *ib, |
|---|
| 523 | | - unsigned vmid, bool ctx_switch) |
|---|
| 512 | + uint32_t flags) |
|---|
| 524 | 513 | { |
|---|
| 525 | 514 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); |
|---|
| 526 | 515 | amdgpu_ring_write(ring, ib->gpu_addr); |
|---|
| .. | .. |
|---|
| 552 | 541 | uint64_t addr; |
|---|
| 553 | 542 | uint32_t size; |
|---|
| 554 | 543 | |
|---|
| 555 | | - /* programm the VCPU memory controller bits 0-27 */ |
|---|
| 544 | + /* program the VCPU memory controller bits 0-27 */ |
|---|
| 556 | 545 | addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; |
|---|
| 557 | 546 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; |
|---|
| 558 | 547 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); |
|---|
| .. | .. |
|---|
| 750 | 739 | .type = AMDGPU_RING_TYPE_UVD, |
|---|
| 751 | 740 | .align_mask = 0xf, |
|---|
| 752 | 741 | .support_64bit_ptrs = false, |
|---|
| 742 | + .no_user_fence = true, |
|---|
| 753 | 743 | .get_rptr = uvd_v4_2_ring_get_rptr, |
|---|
| 754 | 744 | .get_wptr = uvd_v4_2_ring_get_wptr, |
|---|
| 755 | 745 | .set_wptr = uvd_v4_2_ring_set_wptr, |
|---|