hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
....@@ -21,6 +21,8 @@
2121 *
2222 */
2323 #include <linux/firmware.h>
24
+#include <linux/module.h>
25
+
2426 #include "amdgpu.h"
2527 #include "amdgpu_ih.h"
2628 #include "amdgpu_gfx.h"
....@@ -782,6 +784,25 @@
782784 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
783785 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
784786 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
787
+ tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
788
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
789
+ PIPE_CONFIG(ADDR_SURF_P4_8x16);
790
+ tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
791
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
792
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
793
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
794
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
795
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
796
+ NUM_BANKS(ADDR_SURF_16_BANK) |
797
+ TILE_SPLIT(split_equal_to_row_size);
798
+ tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
799
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
800
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
801
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
802
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
803
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
804
+ NUM_BANKS(ADDR_SURF_16_BANK) |
805
+ TILE_SPLIT(split_equal_to_row_size);
785806 tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
786807 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
787808 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
....@@ -1552,10 +1573,10 @@
15521573 adev->gfx.config.double_offchip_lds_buf = 0;
15531574 }
15541575
1555
-static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
1576
+static void gfx_v6_0_constants_init(struct amdgpu_device *adev)
15561577 {
15571578 u32 gb_addr_config = 0;
1558
- u32 mc_shared_chmap, mc_arb_ramcfg;
1579
+ u32 mc_arb_ramcfg;
15591580 u32 sx_debug_1;
15601581 u32 hdp_host_path_cntl;
15611582 u32 tmp;
....@@ -1657,7 +1678,6 @@
16571678
16581679 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
16591680
1660
- mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
16611681 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
16621682 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
16631683
....@@ -1775,18 +1795,15 @@
17751795 int r;
17761796
17771797 r = amdgpu_gfx_scratch_get(adev, &scratch);
1778
- if (r) {
1779
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
1798
+ if (r)
17801799 return r;
1781
- }
1800
+
17821801 WREG32(scratch, 0xCAFEDEAD);
17831802
17841803 r = amdgpu_ring_alloc(ring, 3);
1785
- if (r) {
1786
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
1787
- amdgpu_gfx_scratch_free(adev, scratch);
1788
- return r;
1789
- }
1804
+ if (r)
1805
+ goto error_free_scratch;
1806
+
17901807 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
17911808 amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
17921809 amdgpu_ring_write(ring, 0xDEADBEEF);
....@@ -1796,15 +1813,13 @@
17961813 tmp = RREG32(scratch);
17971814 if (tmp == 0xDEADBEEF)
17981815 break;
1799
- DRM_UDELAY(1);
1816
+ udelay(1);
18001817 }
1801
- if (i < adev->usec_timeout) {
1802
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
1803
- } else {
1804
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
1805
- ring->idx, scratch, tmp);
1806
- r = -EINVAL;
1807
- }
1818
+
1819
+ if (i >= adev->usec_timeout)
1820
+ r = -ETIMEDOUT;
1821
+
1822
+error_free_scratch:
18081823 amdgpu_gfx_scratch_free(adev, scratch);
18091824 return r;
18101825 }
....@@ -1845,13 +1860,15 @@
18451860 }
18461861
18471862 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1863
+ struct amdgpu_job *job,
18481864 struct amdgpu_ib *ib,
1849
- unsigned vmid, bool ctx_switch)
1865
+ uint32_t flags)
18501866 {
1867
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
18511868 u32 header, control = 0;
18521869
18531870 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
1854
- if (ctx_switch) {
1871
+ if (flags & AMDGPU_HAVE_CTX_SWITCH) {
18551872 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
18561873 amdgpu_ring_write(ring, 0);
18571874 }
....@@ -1892,17 +1909,16 @@
18921909 long r;
18931910
18941911 r = amdgpu_gfx_scratch_get(adev, &scratch);
1895
- if (r) {
1896
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
1912
+ if (r)
18971913 return r;
1898
- }
1914
+
18991915 WREG32(scratch, 0xCAFEDEAD);
19001916 memset(&ib, 0, sizeof(ib));
1901
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
1902
- if (r) {
1903
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1917
+ r = amdgpu_ib_get(adev, NULL, 256,
1918
+ AMDGPU_IB_POOL_DIRECT, &ib);
1919
+ if (r)
19041920 goto err1;
1905
- }
1921
+
19061922 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
19071923 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
19081924 ib.ptr[2] = 0xDEADBEEF;
....@@ -1914,22 +1930,16 @@
19141930
19151931 r = dma_fence_wait_timeout(f, false, timeout);
19161932 if (r == 0) {
1917
- DRM_ERROR("amdgpu: IB test timed out\n");
19181933 r = -ETIMEDOUT;
19191934 goto err2;
19201935 } else if (r < 0) {
1921
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
19221936 goto err2;
19231937 }
19241938 tmp = RREG32(scratch);
1925
- if (tmp == 0xDEADBEEF) {
1926
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
1939
+ if (tmp == 0xDEADBEEF)
19271940 r = 0;
1928
- } else {
1929
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
1930
- scratch, tmp);
1941
+ else
19311942 r = -EINVAL;
1932
- }
19331943
19341944 err2:
19351945 amdgpu_ib_free(adev, &ib, NULL);
....@@ -1941,7 +1951,6 @@
19411951
19421952 static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
19431953 {
1944
- int i;
19451954 if (enable) {
19461955 WREG32(mmCP_ME_CNTL, 0);
19471956 } else {
....@@ -1949,10 +1958,6 @@
19491958 CP_ME_CNTL__PFP_HALT_MASK |
19501959 CP_ME_CNTL__CE_HALT_MASK));
19511960 WREG32(mmSCRATCH_UMSK, 0);
1952
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1953
- adev->gfx.gfx_ring[i].ready = false;
1954
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
1955
- adev->gfx.compute_ring[i].ready = false;
19561961 }
19571962 udelay(50);
19581963 }
....@@ -2124,12 +2129,9 @@
21242129
21252130 /* start the rings */
21262131 gfx_v6_0_cp_gfx_start(adev);
2127
- ring->ready = true;
2128
- r = amdgpu_ring_test_ring(ring);
2129
- if (r) {
2130
- ring->ready = false;
2132
+ r = amdgpu_ring_test_helper(ring);
2133
+ if (r)
21312134 return r;
2132
- }
21332135
21342136 return 0;
21352137 }
....@@ -2227,14 +2229,11 @@
22272229 WREG32(mmCP_RB2_CNTL, tmp);
22282230 WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
22292231
2230
- adev->gfx.compute_ring[0].ready = false;
2231
- adev->gfx.compute_ring[1].ready = false;
22322232
22332233 for (i = 0; i < 2; i++) {
2234
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
2234
+ r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
22352235 if (r)
22362236 return r;
2237
- adev->gfx.compute_ring[i].ready = true;
22382237 }
22392238
22402239 return 0;
....@@ -2368,18 +2367,11 @@
23682367 amdgpu_ring_write(ring, val);
23692368 }
23702369
2371
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
2372
-{
2373
- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
2374
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
2375
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
2376
-}
2377
-
23782370 static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
23792371 {
23802372 const u32 *src_ptr;
23812373 volatile u32 *dst_ptr;
2382
- u32 dws, i;
2374
+ u32 dws;
23832375 u64 reg_list_mc_addr;
23842376 const struct cs_section_def *cs_data;
23852377 int r;
....@@ -2394,26 +2386,10 @@
23942386 cs_data = adev->gfx.rlc.cs_data;
23952387
23962388 if (src_ptr) {
2397
- /* save restore block */
2398
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
2399
- AMDGPU_GEM_DOMAIN_VRAM,
2400
- &adev->gfx.rlc.save_restore_obj,
2401
- &adev->gfx.rlc.save_restore_gpu_addr,
2402
- (void **)&adev->gfx.rlc.sr_ptr);
2403
- if (r) {
2404
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
2405
- r);
2406
- gfx_v6_0_rlc_fini(adev);
2389
+ /* init save restore block */
2390
+ r = amdgpu_gfx_rlc_init_sr(adev, dws);
2391
+ if (r)
24072392 return r;
2408
- }
2409
-
2410
- /* write the sr buffer */
2411
- dst_ptr = adev->gfx.rlc.sr_ptr;
2412
- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
2413
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
2414
-
2415
- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
2416
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
24172393 }
24182394
24192395 if (cs_data) {
....@@ -2428,7 +2404,7 @@
24282404 (void **)&adev->gfx.rlc.cs_ptr);
24292405 if (r) {
24302406 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
2431
- gfx_v6_0_rlc_fini(adev);
2407
+ amdgpu_gfx_rlc_fini(adev);
24322408 return r;
24332409 }
24342410
....@@ -2549,8 +2525,8 @@
25492525 if (!adev->gfx.rlc_fw)
25502526 return -EINVAL;
25512527
2552
- gfx_v6_0_rlc_stop(adev);
2553
- gfx_v6_0_rlc_reset(adev);
2528
+ adev->gfx.rlc.funcs->stop(adev);
2529
+ adev->gfx.rlc.funcs->reset(adev);
25542530 gfx_v6_0_init_pg(adev);
25552531 gfx_v6_0_init_cg(adev);
25562532
....@@ -2578,7 +2554,7 @@
25782554 WREG32(mmRLC_UCODE_ADDR, 0);
25792555
25802556 gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
2581
- gfx_v6_0_rlc_start(adev);
2557
+ adev->gfx.rlc.funcs->start(adev);
25822558
25832559 return 0;
25842560 }
....@@ -3062,7 +3038,7 @@
30623038 }
30633039
30643040 static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device *adev,
3065
- u32 me, u32 pipe, u32 q)
3041
+ u32 me, u32 pipe, u32 q, u32 vm)
30663042 {
30673043 DRM_INFO("Not implemented\n");
30683044 }
....@@ -3075,6 +3051,14 @@
30753051 .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
30763052 };
30773053
3054
+static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
3055
+ .init = gfx_v6_0_rlc_init,
3056
+ .resume = gfx_v6_0_rlc_resume,
3057
+ .stop = gfx_v6_0_rlc_stop,
3058
+ .reset = gfx_v6_0_rlc_reset,
3059
+ .start = gfx_v6_0_rlc_start
3060
+};
3061
+
30783062 static int gfx_v6_0_early_init(void *handle)
30793063 {
30803064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
....@@ -3082,6 +3066,7 @@
30823066 adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
30833067 adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
30843068 adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
3069
+ adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
30853070 gfx_v6_0_set_ring_funcs(adev);
30863071 gfx_v6_0_set_irq_funcs(adev);
30873072
....@@ -3094,15 +3079,15 @@
30943079 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
30953080 int i, r;
30963081
3097
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
3082
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
30983083 if (r)
30993084 return r;
31003085
3101
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
3086
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
31023087 if (r)
31033088 return r;
31043089
3105
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
3090
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
31063091 if (r)
31073092 return r;
31083093
....@@ -3114,7 +3099,7 @@
31143099 return r;
31153100 }
31163101
3117
- r = gfx_v6_0_rlc_init(adev);
3102
+ r = adev->gfx.rlc.funcs->init(adev);
31183103 if (r) {
31193104 DRM_ERROR("Failed to init rlc BOs!\n");
31203105 return r;
....@@ -3125,7 +3110,9 @@
31253110 ring->ring_obj = NULL;
31263111 sprintf(ring->name, "gfx");
31273112 r = amdgpu_ring_init(adev, ring, 1024,
3128
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
3113
+ &adev->gfx.eop_irq,
3114
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
3115
+ AMDGPU_RING_PRIO_DEFAULT);
31293116 if (r)
31303117 return r;
31313118 }
....@@ -3147,7 +3134,8 @@
31473134 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
31483135 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
31493136 r = amdgpu_ring_init(adev, ring, 1024,
3150
- &adev->gfx.eop_irq, irq_type);
3137
+ &adev->gfx.eop_irq, irq_type,
3138
+ AMDGPU_RING_PRIO_DEFAULT);
31513139 if (r)
31523140 return r;
31533141 }
....@@ -3165,7 +3153,7 @@
31653153 for (i = 0; i < adev->gfx.num_compute_rings; i++)
31663154 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
31673155
3168
- gfx_v6_0_rlc_fini(adev);
3156
+ amdgpu_gfx_rlc_fini(adev);
31693157
31703158 return 0;
31713159 }
....@@ -3175,9 +3163,9 @@
31753163 int r;
31763164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
31773165
3178
- gfx_v6_0_gpu_init(adev);
3166
+ gfx_v6_0_constants_init(adev);
31793167
3180
- r = gfx_v6_0_rlc_resume(adev);
3168
+ r = adev->gfx.rlc.funcs->resume(adev);
31813169 if (r)
31823170 return r;
31833171
....@@ -3195,7 +3183,7 @@
31953183 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
31963184
31973185 gfx_v6_0_cp_enable(adev, false);
3198
- gfx_v6_0_rlc_stop(adev);
3186
+ adev->gfx.rlc.funcs->stop(adev);
31993187 gfx_v6_0_fini_pg(adev);
32003188
32013189 return 0;
....@@ -3360,7 +3348,7 @@
33603348 enum amdgpu_interrupt_state state)
33613349 {
33623350 switch (type) {
3363
- case AMDGPU_CP_IRQ_GFX_EOP:
3351
+ case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
33643352 gfx_v6_0_set_gfx_eop_interrupt_state(adev, state);
33653353 break;
33663354 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
....@@ -3393,12 +3381,31 @@
33933381 return 0;
33943382 }
33953383
3384
+static void gfx_v6_0_fault(struct amdgpu_device *adev,
3385
+ struct amdgpu_iv_entry *entry)
3386
+{
3387
+ struct amdgpu_ring *ring;
3388
+
3389
+ switch (entry->ring_id) {
3390
+ case 0:
3391
+ ring = &adev->gfx.gfx_ring[0];
3392
+ break;
3393
+ case 1:
3394
+ case 2:
3395
+ ring = &adev->gfx.compute_ring[entry->ring_id - 1];
3396
+ break;
3397
+ default:
3398
+ return;
3399
+ }
3400
+ drm_sched_fault(&ring->sched);
3401
+}
3402
+
33963403 static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
33973404 struct amdgpu_irq_src *source,
33983405 struct amdgpu_iv_entry *entry)
33993406 {
34003407 DRM_ERROR("Illegal register access in command stream\n");
3401
- schedule_work(&adev->reset_work);
3408
+ gfx_v6_0_fault(adev, entry);
34023409 return 0;
34033410 }
34043411
....@@ -3407,7 +3414,7 @@
34073414 struct amdgpu_iv_entry *entry)
34083415 {
34093416 DRM_ERROR("Illegal instruction in command stream\n");
3410
- schedule_work(&adev->reset_work);
3417
+ gfx_v6_0_fault(adev, entry);
34113418 return 0;
34123419 }
34133420
....@@ -3458,6 +3465,18 @@
34583465 return 0;
34593466 }
34603467
3468
+static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
3469
+{
3470
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3471
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3472
+ PACKET3_TC_ACTION_ENA |
3473
+ PACKET3_SH_KCACHE_ACTION_ENA |
3474
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
3475
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
3476
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3477
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
3478
+}
3479
+
34613480 static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
34623481 .name = "gfx_v6_0",
34633482 .early_init = gfx_v6_0_early_init,
....@@ -3488,7 +3507,8 @@
34883507 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
34893508 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
34903509 SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
3491
- 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
3510
+ 3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
3511
+ 5, /* SURFACE_SYNC */
34923512 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
34933513 .emit_ib = gfx_v6_0_ring_emit_ib,
34943514 .emit_fence = gfx_v6_0_ring_emit_fence,
....@@ -3499,6 +3519,7 @@
34993519 .insert_nop = amdgpu_ring_insert_nop,
35003520 .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
35013521 .emit_wreg = gfx_v6_0_ring_emit_wreg,
3522
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
35023523 };
35033524
35043525 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
....@@ -3512,7 +3533,8 @@
35123533 5 + 5 + /* hdp flush / invalidate */
35133534 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
35143535 SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
3515
- 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3536
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3537
+ 5, /* SURFACE_SYNC */
35163538 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
35173539 .emit_ib = gfx_v6_0_ring_emit_ib,
35183540 .emit_fence = gfx_v6_0_ring_emit_fence,
....@@ -3522,6 +3544,7 @@
35223544 .test_ib = gfx_v6_0_ring_test_ib,
35233545 .insert_nop = amdgpu_ring_insert_nop,
35243546 .emit_wreg = gfx_v6_0_ring_emit_wreg,
3547
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
35253548 };
35263549
35273550 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)