hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
....@@ -22,7 +22,7 @@
2222 */
2323
2424 #include <linux/firmware.h>
25
-#include <drm/drmP.h>
25
+
2626 #include "amdgpu.h"
2727 #include "amdgpu_uvd.h"
2828 #include "soc15.h"
....@@ -183,11 +183,8 @@
183183 return 0;
184184
185185 r = amdgpu_ring_alloc(ring, 16);
186
- if (r) {
187
- DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
188
- ring->me, ring->idx, r);
186
+ if (r)
189187 return r;
190
- }
191188
192189 rptr = amdgpu_ring_get_rptr(ring);
193190
....@@ -197,17 +194,11 @@
197194 for (i = 0; i < adev->usec_timeout; i++) {
198195 if (amdgpu_ring_get_rptr(ring) != rptr)
199196 break;
200
- DRM_UDELAY(1);
197
+ udelay(1);
201198 }
202199
203
- if (i < adev->usec_timeout) {
204
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
205
- ring->me, ring->idx, i);
206
- } else {
207
- DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
208
- ring->me, ring->idx);
200
+ if (i >= adev->usec_timeout)
209201 r = -ETIMEDOUT;
210
- }
211202
212203 return r;
213204 }
....@@ -223,29 +214,31 @@
223214 * Open up a stream for HW test
224215 */
225216 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
217
+ struct amdgpu_bo *bo,
226218 struct dma_fence **fence)
227219 {
228220 const unsigned ib_size_dw = 16;
229221 struct amdgpu_job *job;
230222 struct amdgpu_ib *ib;
231223 struct dma_fence *f = NULL;
232
- uint64_t dummy;
224
+ uint64_t addr;
233225 int i, r;
234226
235
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
227
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
228
+ AMDGPU_IB_POOL_DIRECT, &job);
236229 if (r)
237230 return r;
238231
239232 ib = &job->ibs[0];
240
- dummy = ib->gpu_addr + 1024;
233
+ addr = amdgpu_bo_gpu_offset(bo);
241234
242235 ib->length_dw = 0;
243236 ib->ptr[ib->length_dw++] = 0x00000018;
244237 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
245238 ib->ptr[ib->length_dw++] = handle;
246239 ib->ptr[ib->length_dw++] = 0x00000000;
247
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
248
- ib->ptr[ib->length_dw++] = dummy;
240
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
241
+ ib->ptr[ib->length_dw++] = addr;
249242
250243 ib->ptr[ib->length_dw++] = 0x00000014;
251244 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
....@@ -283,30 +276,32 @@
283276 *
284277 * Close up a stream for HW test or if userspace failed to do so
285278 */
286
-int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
287
- bool direct, struct dma_fence **fence)
279
+static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
280
+ struct amdgpu_bo *bo,
281
+ struct dma_fence **fence)
288282 {
289283 const unsigned ib_size_dw = 16;
290284 struct amdgpu_job *job;
291285 struct amdgpu_ib *ib;
292286 struct dma_fence *f = NULL;
293
- uint64_t dummy;
287
+ uint64_t addr;
294288 int i, r;
295289
296
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
290
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
291
+ AMDGPU_IB_POOL_DIRECT, &job);
297292 if (r)
298293 return r;
299294
300295 ib = &job->ibs[0];
301
- dummy = ib->gpu_addr + 1024;
296
+ addr = amdgpu_bo_gpu_offset(bo);
302297
303298 ib->length_dw = 0;
304299 ib->ptr[ib->length_dw++] = 0x00000018;
305300 ib->ptr[ib->length_dw++] = 0x00000001;
306301 ib->ptr[ib->length_dw++] = handle;
307302 ib->ptr[ib->length_dw++] = 0x00000000;
308
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
309
- ib->ptr[ib->length_dw++] = dummy;
303
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
304
+ ib->ptr[ib->length_dw++] = addr;
310305
311306 ib->ptr[ib->length_dw++] = 0x00000014;
312307 ib->ptr[ib->length_dw++] = 0x00000002;
....@@ -320,11 +315,7 @@
320315 for (i = ib->length_dw; i < ib_size_dw; ++i)
321316 ib->ptr[i] = 0x0;
322317
323
- if (direct)
324
- r = amdgpu_job_submit_direct(job, ring, &f);
325
- else
326
- r = amdgpu_job_submit(job, &ring->adev->vce.entity,
327
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
318
+ r = amdgpu_job_submit_direct(job, ring, &f);
328319 if (r)
329320 goto err;
330321
....@@ -347,32 +338,33 @@
347338 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
348339 {
349340 struct dma_fence *fence = NULL;
341
+ struct amdgpu_bo *bo = NULL;
350342 long r;
351343
352
- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
353
- if (r) {
354
- DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
355
- goto error;
356
- }
344
+ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
345
+ AMDGPU_GEM_DOMAIN_VRAM,
346
+ &bo, NULL, NULL);
347
+ if (r)
348
+ return r;
357349
358
- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
359
- if (r) {
360
- DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
350
+ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
351
+ if (r)
361352 goto error;
362
- }
353
+
354
+ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
355
+ if (r)
356
+ goto error;
363357
364358 r = dma_fence_wait_timeout(fence, false, timeout);
365
- if (r == 0) {
366
- DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
359
+ if (r == 0)
367360 r = -ETIMEDOUT;
368
- } else if (r < 0) {
369
- DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
370
- } else {
371
- DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
361
+ else if (r > 0)
372362 r = 0;
373
- }
363
+
374364 error:
375365 dma_fence_put(fence);
366
+ amdgpu_bo_unreserve(bo);
367
+ amdgpu_bo_unref(&bo);
376368 return r;
377369 }
378370
....@@ -444,6 +436,13 @@
444436 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
445437 adev->firmware.fw_size +=
446438 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
439
+
440
+ if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
441
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
442
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
443
+ adev->firmware.fw_size +=
444
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
445
+ }
447446 DRM_INFO("PSP loading UVD firmware\n");
448447 }
449448
....@@ -452,15 +451,17 @@
452451 continue;
453452 if (!amdgpu_sriov_vf(adev)) {
454453 ring = &adev->uvd.inst[j].ring;
455
- sprintf(ring->name, "uvd<%d>", j);
456
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
454
+ sprintf(ring->name, "uvd_%d", ring->me);
455
+ r = amdgpu_ring_init(adev, ring, 512,
456
+ &adev->uvd.inst[j].irq, 0,
457
+ AMDGPU_RING_PRIO_DEFAULT);
457458 if (r)
458459 return r;
459460 }
460461
461462 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
462463 ring = &adev->uvd.inst[j].ring_enc[i];
463
- sprintf(ring->name, "uvd_enc%d<%d>", i, j);
464
+ sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
464465 if (amdgpu_sriov_vf(adev)) {
465466 ring->use_doorbell = true;
466467
....@@ -468,11 +469,13 @@
468469 * sriov, so set unused location for other unused rings.
469470 */
470471 if (i == 0)
471
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
472
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
472473 else
473
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
474
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
474475 }
475
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
476
+ r = amdgpu_ring_init(adev, ring, 512,
477
+ &adev->uvd.inst[j].irq, 0,
478
+ AMDGPU_RING_PRIO_DEFAULT);
476479 if (r)
477480 return r;
478481 }
....@@ -540,12 +543,9 @@
540543 ring = &adev->uvd.inst[j].ring;
541544
542545 if (!amdgpu_sriov_vf(adev)) {
543
- ring->ready = true;
544
- r = amdgpu_ring_test_ring(ring);
545
- if (r) {
546
- ring->ready = false;
546
+ r = amdgpu_ring_test_helper(ring);
547
+ if (r)
547548 goto done;
548
- }
549549
550550 r = amdgpu_ring_alloc(ring, 10);
551551 if (r) {
....@@ -582,12 +582,9 @@
582582
583583 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
584584 ring = &adev->uvd.inst[j].ring_enc[i];
585
- ring->ready = true;
586
- r = amdgpu_ring_test_ring(ring);
587
- if (r) {
588
- ring->ready = false;
585
+ r = amdgpu_ring_test_helper(ring);
586
+ if (r)
589587 goto done;
590
- }
591588 }
592589 }
593590 done:
....@@ -607,19 +604,12 @@
607604 static int uvd_v7_0_hw_fini(void *handle)
608605 {
609606 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610
- int i;
611607
612608 if (!amdgpu_sriov_vf(adev))
613609 uvd_v7_0_stop(adev);
614610 else {
615611 /* full access mode, so don't touch any UVD register */
616612 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
617
- }
618
-
619
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
620
- if (adev->uvd.harvest_config & (1 << i))
621
- continue;
622
- adev->uvd.inst[i].ring.ready = false;
623613 }
624614
625615 return 0;
....@@ -667,9 +657,14 @@
667657 continue;
668658 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
669659 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
670
- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
660
+ i == 0 ?
661
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo:
662
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
671663 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
672
- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
664
+ i == 0 ?
665
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi:
666
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
667
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
673668 offset = 0;
674669 } else {
675670 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
....@@ -677,10 +672,10 @@
677672 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
678673 upper_32_bits(adev->uvd.inst[i].gpu_addr));
679674 offset = size;
675
+ WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
676
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
680677 }
681678
682
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
683
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
684679 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
685680
686681 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
....@@ -805,10 +800,13 @@
805800 0xFFFFFFFF, 0x00000004);
806801 /* mc resume*/
807802 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
808
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
809
- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
810
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
811
- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
803
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
804
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
805
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
806
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
807
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
808
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
809
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
812810 offset = 0;
813811 } else {
814812 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
....@@ -816,10 +814,11 @@
816814 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
817815 upper_32_bits(adev->uvd.inst[i].gpu_addr));
818816 offset = size;
817
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
818
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
819
+
819820 }
820821
821
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
822
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
823822 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
824823
825824 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
....@@ -1074,7 +1073,7 @@
10741073 WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
10751074 (upper_32_bits(ring->gpu_addr) >> 2));
10761075
1077
- /* programm the RB_BASE for ring buffer */
1076
+ /* program the RB_BASE for ring buffer */
10781077 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
10791078 lower_32_bits(ring->gpu_addr));
10801079 WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
....@@ -1230,11 +1229,9 @@
12301229
12311230 WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
12321231 r = amdgpu_ring_alloc(ring, 3);
1233
- if (r) {
1234
- DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
1235
- ring->me, ring->idx, r);
1232
+ if (r)
12361233 return r;
1237
- }
1234
+
12381235 amdgpu_ring_write(ring,
12391236 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
12401237 amdgpu_ring_write(ring, 0xDEADBEEF);
....@@ -1243,17 +1240,12 @@
12431240 tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
12441241 if (tmp == 0xDEADBEEF)
12451242 break;
1246
- DRM_UDELAY(1);
1243
+ udelay(1);
12471244 }
12481245
1249
- if (i < adev->usec_timeout) {
1250
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
1251
- ring->me, ring->idx, i);
1252
- } else {
1253
- DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
1254
- ring->me, ring->idx, tmp);
1255
- r = -EINVAL;
1256
- }
1246
+ if (i >= adev->usec_timeout)
1247
+ r = -ETIMEDOUT;
1248
+
12571249 return r;
12581250 }
12591251
....@@ -1267,11 +1259,12 @@
12671259 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
12681260 uint32_t ib_idx)
12691261 {
1262
+ struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
12701263 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
12711264 unsigned i;
12721265
12731266 /* No patching necessary for the first instance */
1274
- if (!p->ring->me)
1267
+ if (!ring->me)
12751268 return 0;
12761269
12771270 for (i = 0; i < ib->length_dw; i += 2) {
....@@ -1294,10 +1287,12 @@
12941287 * Write ring commands to execute the indirect buffer
12951288 */
12961289 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1290
+ struct amdgpu_job *job,
12971291 struct amdgpu_ib *ib,
1298
- unsigned vmid, bool ctx_switch)
1292
+ uint32_t flags)
12991293 {
13001294 struct amdgpu_device *adev = ring->adev;
1295
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
13011296
13021297 amdgpu_ring_write(ring,
13031298 PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
....@@ -1323,8 +1318,12 @@
13231318 * Write enc ring commands to execute the indirect buffer
13241319 */
13251320 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1326
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1321
+ struct amdgpu_job *job,
1322
+ struct amdgpu_ib *ib,
1323
+ uint32_t flags)
13271324 {
1325
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1326
+
13281327 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
13291328 amdgpu_ring_write(ring, vmid);
13301329 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
....@@ -1376,7 +1375,7 @@
13761375 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
13771376
13781377 /* wait for reg writes */
1379
- data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1378
+ data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
13801379 data1 = lower_32_bits(pd_addr);
13811380 mask = 0xffffffff;
13821381 uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
....@@ -1418,7 +1417,8 @@
14181417 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
14191418
14201419 /* wait for reg writes */
1421
- uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1420
+ uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1421
+ vmid * hub->ctx_addr_distance,
14221422 lower_32_bits(pd_addr), 0xffffffff);
14231423 }
14241424
....@@ -1694,7 +1694,7 @@
16941694 enum amd_clockgating_state state)
16951695 {
16961696 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1697
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1697
+ bool enable = (state == AMD_CG_STATE_GATE);
16981698
16991699 uvd_v7_0_set_bypass_mode(adev, enable);
17001700
....@@ -1773,7 +1773,8 @@
17731773 .type = AMDGPU_RING_TYPE_UVD,
17741774 .align_mask = 0xf,
17751775 .support_64bit_ptrs = false,
1776
- .vmhub = AMDGPU_MMHUB,
1776
+ .no_user_fence = true,
1777
+ .vmhub = AMDGPU_MMHUB_0,
17771778 .get_rptr = uvd_v7_0_ring_get_rptr,
17781779 .get_wptr = uvd_v7_0_ring_get_wptr,
17791780 .set_wptr = uvd_v7_0_ring_set_wptr,
....@@ -1805,7 +1806,8 @@
18051806 .align_mask = 0x3f,
18061807 .nop = HEVC_ENC_CMD_NO_OP,
18071808 .support_64bit_ptrs = false,
1808
- .vmhub = AMDGPU_MMHUB,
1809
+ .no_user_fence = true,
1810
+ .vmhub = AMDGPU_MMHUB_0,
18091811 .get_rptr = uvd_v7_0_enc_ring_get_rptr,
18101812 .get_wptr = uvd_v7_0_enc_ring_get_wptr,
18111813 .set_wptr = uvd_v7_0_enc_ring_set_wptr,