hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/cik.c
....@@ -24,7 +24,8 @@
2424 #include <linux/firmware.h>
2525 #include <linux/slab.h>
2626 #include <linux/module.h>
27
-#include <drm/drmP.h>
27
+#include <linux/pci.h>
28
+
2829 #include "amdgpu.h"
2930 #include "amdgpu_atombios.h"
3031 #include "amdgpu_ih.h"
....@@ -965,6 +966,25 @@
965966
966967 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
967968 {mmGRBM_STATUS},
969
+ {mmGRBM_STATUS2},
970
+ {mmGRBM_STATUS_SE0},
971
+ {mmGRBM_STATUS_SE1},
972
+ {mmGRBM_STATUS_SE2},
973
+ {mmGRBM_STATUS_SE3},
974
+ {mmSRBM_STATUS},
975
+ {mmSRBM_STATUS2},
976
+ {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
977
+ {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
978
+ {mmCP_STAT},
979
+ {mmCP_STALLED_STAT1},
980
+ {mmCP_STALLED_STAT2},
981
+ {mmCP_STALLED_STAT3},
982
+ {mmCP_CPF_BUSY_STAT},
983
+ {mmCP_CPF_STALLED_STAT1},
984
+ {mmCP_CPF_STATUS},
985
+ {mmCP_CPC_BUSY_STAT},
986
+ {mmCP_CPC_STALLED_STAT1},
987
+ {mmCP_CPC_STATUS},
968988 {mmGB_ADDR_CONFIG},
969989 {mmMC_ARB_RAMCFG},
970990 {mmGB_TILE_MODE0},
....@@ -1269,6 +1289,72 @@
12691289 }
12701290
12711291 /**
1292
+ * cik_asic_pci_config_reset - soft reset GPU
1293
+ *
1294
+ * @adev: amdgpu_device pointer
1295
+ *
1296
+ * Use PCI Config method to reset the GPU.
1297
+ *
1298
+ * Returns 0 for success.
1299
+ */
1300
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
1301
+{
1302
+ int r;
1303
+
1304
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
1305
+
1306
+ r = cik_gpu_pci_config_reset(adev);
1307
+
1308
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
1309
+
1310
+ return r;
1311
+}
1312
+
1313
+static bool cik_asic_supports_baco(struct amdgpu_device *adev)
1314
+{
1315
+ switch (adev->asic_type) {
1316
+ case CHIP_BONAIRE:
1317
+ case CHIP_HAWAII:
1318
+ return amdgpu_dpm_is_baco_supported(adev);
1319
+ default:
1320
+ return false;
1321
+ }
1322
+}
1323
+
1324
+static enum amd_reset_method
1325
+cik_asic_reset_method(struct amdgpu_device *adev)
1326
+{
1327
+ bool baco_reset;
1328
+
1329
+ if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
1330
+ amdgpu_reset_method == AMD_RESET_METHOD_BACO)
1331
+ return amdgpu_reset_method;
1332
+
1333
+ if (amdgpu_reset_method != -1)
1334
+ dev_warn(adev->dev, "Specified reset:%d isn't supported, using AUTO instead.\n",
1335
+ amdgpu_reset_method);
1336
+
1337
+ switch (adev->asic_type) {
1338
+ case CHIP_BONAIRE:
1339
+ /* disable baco reset until it works */
1340
+ /* smu7_asic_get_baco_capability(adev, &baco_reset); */
1341
+ baco_reset = false;
1342
+ break;
1343
+ case CHIP_HAWAII:
1344
+ baco_reset = cik_asic_supports_baco(adev);
1345
+ break;
1346
+ default:
1347
+ baco_reset = false;
1348
+ break;
1349
+ }
1350
+
1351
+ if (baco_reset)
1352
+ return AMD_RESET_METHOD_BACO;
1353
+ else
1354
+ return AMD_RESET_METHOD_LEGACY;
1355
+}
1356
+
1357
+/**
12721358 * cik_asic_reset - soft reset GPU
12731359 *
12741360 * @adev: amdgpu_device pointer
....@@ -1281,11 +1367,13 @@
12811367 {
12821368 int r;
12831369
1284
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
1285
-
1286
- r = cik_gpu_pci_config_reset(adev);
1287
-
1288
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
1370
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
1371
+ dev_info(adev->dev, "BACO reset\n");
1372
+ r = amdgpu_dpm_baco_reset(adev);
1373
+ } else {
1374
+ dev_info(adev->dev, "PCI CONFIG reset\n");
1375
+ r = cik_asic_pci_config_reset(adev);
1376
+ }
12891377
12901378 return r;
12911379 }
....@@ -1377,7 +1465,6 @@
13771465 static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
13781466 {
13791467 struct pci_dev *root = adev->pdev->bus->self;
1380
- int bridge_pos, gpu_pos;
13811468 u32 speed_cntl, current_data_rate;
13821469 int i;
13831470 u16 tmp16;
....@@ -1412,12 +1499,7 @@
14121499 DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
14131500 }
14141501
1415
- bridge_pos = pci_pcie_cap(root);
1416
- if (!bridge_pos)
1417
- return;
1418
-
1419
- gpu_pos = pci_pcie_cap(adev->pdev);
1420
- if (!gpu_pos)
1502
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
14211503 return;
14221504
14231505 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
....@@ -1427,14 +1509,17 @@
14271509 u16 bridge_cfg2, gpu_cfg2;
14281510 u32 max_lw, current_lw, tmp;
14291511
1430
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
1431
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
1512
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
1513
+ &bridge_cfg);
1514
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
1515
+ &gpu_cfg);
14321516
14331517 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
1434
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
1518
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
14351519
14361520 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
1437
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
1521
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
1522
+ tmp16);
14381523
14391524 tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
14401525 max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
....@@ -1458,15 +1543,23 @@
14581543
14591544 for (i = 0; i < 10; i++) {
14601545 /* check status */
1461
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
1546
+ pcie_capability_read_word(adev->pdev,
1547
+ PCI_EXP_DEVSTA,
1548
+ &tmp16);
14621549 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
14631550 break;
14641551
1465
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
1466
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
1552
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
1553
+ &bridge_cfg);
1554
+ pcie_capability_read_word(adev->pdev,
1555
+ PCI_EXP_LNKCTL,
1556
+ &gpu_cfg);
14671557
1468
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
1469
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
1558
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
1559
+ &bridge_cfg2);
1560
+ pcie_capability_read_word(adev->pdev,
1561
+ PCI_EXP_LNKCTL2,
1562
+ &gpu_cfg2);
14701563
14711564 tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
14721565 tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
....@@ -1479,26 +1572,45 @@
14791572 msleep(100);
14801573
14811574 /* linkctl */
1482
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
1575
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
1576
+ &tmp16);
14831577 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
14841578 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
1485
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
1579
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
1580
+ tmp16);
14861581
1487
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
1582
+ pcie_capability_read_word(adev->pdev,
1583
+ PCI_EXP_LNKCTL,
1584
+ &tmp16);
14881585 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
14891586 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
1490
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
1587
+ pcie_capability_write_word(adev->pdev,
1588
+ PCI_EXP_LNKCTL,
1589
+ tmp16);
14911590
14921591 /* linkctl2 */
1493
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
1494
- tmp16 &= ~((1 << 4) | (7 << 9));
1495
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
1496
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
1592
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
1593
+ &tmp16);
1594
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
1595
+ PCI_EXP_LNKCTL2_TX_MARGIN);
1596
+ tmp16 |= (bridge_cfg2 &
1597
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
1598
+ PCI_EXP_LNKCTL2_TX_MARGIN));
1599
+ pcie_capability_write_word(root,
1600
+ PCI_EXP_LNKCTL2,
1601
+ tmp16);
14971602
1498
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1499
- tmp16 &= ~((1 << 4) | (7 << 9));
1500
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
1501
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
1603
+ pcie_capability_read_word(adev->pdev,
1604
+ PCI_EXP_LNKCTL2,
1605
+ &tmp16);
1606
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
1607
+ PCI_EXP_LNKCTL2_TX_MARGIN);
1608
+ tmp16 |= (gpu_cfg2 &
1609
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
1610
+ PCI_EXP_LNKCTL2_TX_MARGIN));
1611
+ pcie_capability_write_word(adev->pdev,
1612
+ PCI_EXP_LNKCTL2,
1613
+ tmp16);
15021614
15031615 tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
15041616 tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
....@@ -1513,15 +1625,16 @@
15131625 speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
15141626 WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
15151627
1516
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1517
- tmp16 &= ~0xf;
1628
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
1629
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
1630
+
15181631 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1519
- tmp16 |= 3; /* gen3 */
1632
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
15201633 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1521
- tmp16 |= 2; /* gen2 */
1634
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
15221635 else
1523
- tmp16 |= 1; /* gen1 */
1524
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
1636
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
1637
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
15251638
15261639 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
15271640 speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
....@@ -1708,12 +1821,6 @@
17081821 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
17091822 }
17101823
1711
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
1712
-{
1713
- if (is_virtual_machine()) /* passthrough mode */
1714
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
1715
-}
1716
-
17171824 static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
17181825 {
17191826 if (!ring || !ring->funcs->emit_wreg) {
....@@ -1741,12 +1848,92 @@
17411848 return true;
17421849 }
17431850
1851
+static void cik_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1852
+ uint64_t *count1)
1853
+{
1854
+ uint32_t perfctr = 0;
1855
+ uint64_t cnt0_of, cnt1_of;
1856
+ int tmp;
1857
+
1858
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
1859
+ * that may or may not be different from their GPU counterparts
1860
+ */
1861
+ if (adev->flags & AMD_IS_APU)
1862
+ return;
1863
+
1864
+ /* Set the 2 events that we wish to watch, defined above */
1865
+ /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1866
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1867
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1868
+
1869
+ /* Write to enable desired perf counters */
1870
+ WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1871
+ /* Zero out and enable the perf counters
1872
+ * Write 0x5:
1873
+ * Bit 0 = Start all counters(1)
1874
+ * Bit 2 = Global counter reset enable(1)
1875
+ */
1876
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1877
+
1878
+ msleep(1000);
1879
+
1880
+ /* Load the shadow and disable the perf counters
1881
+ * Write 0x2:
1882
+ * Bit 0 = Stop counters(0)
1883
+ * Bit 1 = Load the shadow counters(1)
1884
+ */
1885
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1886
+
1887
+ /* Read register values to get any >32bit overflow */
1888
+ tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1889
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1890
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1891
+
1892
+ /* Get the values and add the overflow */
1893
+ *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1894
+ *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1895
+}
1896
+
1897
+static bool cik_need_reset_on_init(struct amdgpu_device *adev)
1898
+{
1899
+ u32 clock_cntl, pc;
1900
+
1901
+ if (adev->flags & AMD_IS_APU)
1902
+ return false;
1903
+
1904
+ /* check if the SMC is already running */
1905
+ clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1906
+ pc = RREG32_SMC(ixSMC_PC_C);
1907
+ if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1908
+ (0x20100 <= pc))
1909
+ return true;
1910
+
1911
+ return false;
1912
+}
1913
+
1914
+static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
1915
+{
1916
+ uint64_t nak_r, nak_g;
1917
+
1918
+ /* Get the number of NAKs received and generated */
1919
+ nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1920
+ nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1921
+
1922
+ /* Add the total number of NAKs, i.e the number of replays */
1923
+ return (nak_r + nak_g);
1924
+}
1925
+
1926
+static void cik_pre_asic_init(struct amdgpu_device *adev)
1927
+{
1928
+}
1929
+
17441930 static const struct amdgpu_asic_funcs cik_asic_funcs =
17451931 {
17461932 .read_disabled_bios = &cik_read_disabled_bios,
17471933 .read_bios_from_rom = &cik_read_bios_from_rom,
17481934 .read_register = &cik_read_register,
17491935 .reset = &cik_asic_reset,
1936
+ .reset_method = &cik_asic_reset_method,
17501937 .set_vga_state = &cik_vga_set_state,
17511938 .get_xclk = &cik_get_xclk,
17521939 .set_uvd_clocks = &cik_set_uvd_clocks,
....@@ -1755,6 +1942,12 @@
17551942 .flush_hdp = &cik_flush_hdp,
17561943 .invalidate_hdp = &cik_invalidate_hdp,
17571944 .need_full_reset = &cik_need_full_reset,
1945
+ .init_doorbell_index = &legacy_doorbell_index_init,
1946
+ .get_pcie_usage = &cik_get_pcie_usage,
1947
+ .need_reset_on_init = &cik_need_reset_on_init,
1948
+ .get_pcie_replay_count = &cik_get_pcie_replay_count,
1949
+ .supports_baco = &cik_asic_supports_baco,
1950
+ .pre_asic_init = &cik_pre_asic_init,
17581951 };
17591952
17601953 static int cik_common_early_init(void *handle)
....@@ -1995,17 +2188,14 @@
19952188
19962189 int cik_set_ip_blocks(struct amdgpu_device *adev)
19972190 {
1998
- cik_detect_hw_virtualization(adev);
1999
-
20002191 switch (adev->asic_type) {
20012192 case CHIP_BONAIRE:
20022193 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
20032194 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
20042195 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2005
- if (amdgpu_dpm == -1)
2006
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2007
- else
2008
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2196
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
2197
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
2198
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
20092199 if (adev->enable_virtual_display)
20102200 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
20112201 #if defined(CONFIG_DRM_AMD_DC)
....@@ -2014,8 +2204,6 @@
20142204 #endif
20152205 else
20162206 amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
2017
- amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
2018
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
20192207 amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
20202208 amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
20212209 break;
....@@ -2023,10 +2211,9 @@
20232211 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
20242212 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
20252213 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2026
- if (amdgpu_dpm == -1)
2027
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2028
- else
2029
- amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
2214
+ amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
2215
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
2216
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
20302217 if (adev->enable_virtual_display)
20312218 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
20322219 #if defined(CONFIG_DRM_AMD_DC)
....@@ -2035,8 +2222,6 @@
20352222 #endif
20362223 else
20372224 amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
2038
- amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
2039
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
20402225 amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
20412226 amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
20422227 break;
....@@ -2044,6 +2229,8 @@
20442229 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
20452230 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
20462231 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2232
+ amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
2233
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
20472234 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
20482235 if (adev->enable_virtual_display)
20492236 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
....@@ -2053,8 +2240,7 @@
20532240 #endif
20542241 else
20552242 amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
2056
- amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
2057
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
2243
+
20582244 amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
20592245 amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
20602246 break;
....@@ -2063,6 +2249,8 @@
20632249 amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
20642250 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
20652251 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
2252
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
2253
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
20662254 amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
20672255 if (adev->enable_virtual_display)
20682256 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
....@@ -2072,8 +2260,6 @@
20722260 #endif
20732261 else
20742262 amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
2075
- amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
2076
- amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
20772263 amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
20782264 amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
20792265 break;