forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/gpu/drm/amd/amdgpu/soc15.c
....@@ -23,7 +23,8 @@
2323 #include <linux/firmware.h>
2424 #include <linux/slab.h>
2525 #include <linux/module.h>
26
-#include <drm/drmP.h>
26
+#include <linux/pci.h>
27
+
2728 #include "amdgpu.h"
2829 #include "amdgpu_atombios.h"
2930 #include "amdgpu_ih.h"
....@@ -43,6 +44,11 @@
4344 #include "hdp/hdp_4_0_sh_mask.h"
4445 #include "smuio/smuio_9_0_offset.h"
4546 #include "smuio/smuio_9_0_sh_mask.h"
47
+#include "nbio/nbio_7_0_default.h"
48
+#include "nbio/nbio_7_0_offset.h"
49
+#include "nbio/nbio_7_0_sh_mask.h"
50
+#include "nbio/nbio_7_0_smn.h"
51
+#include "mp/mp_9_0_offset.h"
4652
4753 #include "soc15.h"
4854 #include "soc15_common.h"
....@@ -52,50 +58,83 @@
5258 #include "mmhub_v1_0.h"
5359 #include "df_v1_7.h"
5460 #include "df_v3_6.h"
61
+#include "nbio_v6_1.h"
62
+#include "nbio_v7_0.h"
63
+#include "nbio_v7_4.h"
5564 #include "vega10_ih.h"
5665 #include "sdma_v4_0.h"
5766 #include "uvd_v7_0.h"
5867 #include "vce_v4_0.h"
5968 #include "vcn_v1_0.h"
69
+#include "vcn_v2_0.h"
70
+#include "jpeg_v2_0.h"
71
+#include "vcn_v2_5.h"
72
+#include "jpeg_v2_5.h"
6073 #include "dce_virtual.h"
6174 #include "mxgpu_ai.h"
75
+#include "amdgpu_smu.h"
76
+#include "amdgpu_ras.h"
77
+#include "amdgpu_xgmi.h"
78
+#include <uapi/linux/kfd_ioctl.h>
6279
6380 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
6481 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
6582 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
6683 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
6784
85
+/* for Vega20 register name change */
86
+#define mmHDP_MEM_POWER_CTRL 0x00d4
87
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
88
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
89
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
90
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
91
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
92
+
93
+/* for Vega20/arcturus regiter offset change */
94
+#define mmROM_INDEX_VG20 0x00e4
95
+#define mmROM_INDEX_VG20_BASE_IDX 0
96
+#define mmROM_DATA_VG20 0x00e5
97
+#define mmROM_DATA_VG20_BASE_IDX 0
98
+
6899 /*
69100 * Indirect registers accessor
70101 */
71102 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
72103 {
73
- unsigned long flags, address, data;
74
- u32 r;
75
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
76
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
104
+ unsigned long address, data;
105
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
106
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
77107
78
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
79
- WREG32(address, reg);
80
- (void)RREG32(address);
81
- r = RREG32(data);
82
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
83
- return r;
108
+ return amdgpu_device_indirect_rreg(adev, address, data, reg);
84109 }
85110
86111 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
87112 {
88
- unsigned long flags, address, data;
113
+ unsigned long address, data;
89114
90
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
91
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
115
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
116
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
92117
93
- spin_lock_irqsave(&adev->pcie_idx_lock, flags);
94
- WREG32(address, reg);
95
- (void)RREG32(address);
96
- WREG32(data, v);
97
- (void)RREG32(data);
98
- spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
118
+ amdgpu_device_indirect_wreg(adev, address, data, reg, v);
119
+}
120
+
121
+static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
122
+{
123
+ unsigned long address, data;
124
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
125
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
126
+
127
+ return amdgpu_device_indirect_rreg64(adev, address, data, reg);
128
+}
129
+
130
+static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
131
+{
132
+ unsigned long address, data;
133
+
134
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
135
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
136
+
137
+ amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
99138 }
100139
101140 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
....@@ -200,13 +239,15 @@
200239
201240 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
202241 {
203
- return adev->nbio_funcs->get_memsize(adev);
242
+ return adev->nbio.funcs->get_memsize(adev);
204243 }
205244
206245 static u32 soc15_get_xclk(struct amdgpu_device *adev)
207246 {
208247 u32 reference_clock = adev->clock.spll.reference_freq;
209248
249
+ if (adev->asic_type == CHIP_RENOIR)
250
+ return 10000;
210251 if (adev->asic_type == CHIP_RAVEN)
211252 return reference_clock / 4;
212253
....@@ -223,7 +264,7 @@
223264 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
224265 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
225266
226
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
267
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
227268 }
228269
229270 static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
....@@ -242,6 +283,8 @@
242283 {
243284 u32 *dw_ptr;
244285 u32 i, length_dw;
286
+ uint32_t rom_index_offset;
287
+ uint32_t rom_data_offset;
245288
246289 if (bios == NULL)
247290 return false;
....@@ -254,23 +297,26 @@
254297 dw_ptr = (u32 *)bios;
255298 length_dw = ALIGN(length_bytes, 4) / 4;
256299
300
+ switch (adev->asic_type) {
301
+ case CHIP_VEGA20:
302
+ case CHIP_ARCTURUS:
303
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
304
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
305
+ break;
306
+ default:
307
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
308
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
309
+ break;
310
+ }
311
+
257312 /* set rom index to 0 */
258
- WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
313
+ WREG32(rom_index_offset, 0);
259314 /* read out the rom data */
260315 for (i = 0; i < length_dw; i++)
261
- dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
316
+ dw_ptr[i] = RREG32(rom_data_offset);
262317
263318 return true;
264319 }
265
-
266
-struct soc15_allowed_register_entry {
267
- uint32_t hwip;
268
- uint32_t inst;
269
- uint32_t seg;
270
- uint32_t reg_offset;
271
- bool grbm_indexed;
272
-};
273
-
274320
275321 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
276322 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
....@@ -288,6 +334,7 @@
288334 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
289335 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
290336 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
337
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
291338 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
292339 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
293340 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
....@@ -335,7 +382,9 @@
335382 *value = 0;
336383 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
337384 en = &soc15_allowed_read_registers[i];
338
- if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
385
+ if (!adev->reg_offset[en->hwip][en->inst])
386
+ continue;
387
+ else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
339388 + en->reg_offset))
340389 continue;
341390
....@@ -376,34 +425,44 @@
376425 } else {
377426 tmp = RREG32(reg);
378427 tmp &= ~(entry->and_mask);
379
- tmp |= entry->or_mask;
428
+ tmp |= (entry->or_mask & entry->and_mask);
380429 }
381
- WREG32(reg, tmp);
430
+
431
+ if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
432
+ reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
433
+ reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
434
+ reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
435
+ WREG32_RLC(reg, tmp);
436
+ else
437
+ WREG32(reg, tmp);
438
+
382439 }
383440
384441 }
385442
386
-
387
-static int soc15_asic_reset(struct amdgpu_device *adev)
443
+static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
388444 {
389445 u32 i;
446
+ int ret = 0;
390447
391448 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
392449
393
- dev_info(adev->dev, "GPU reset\n");
450
+ dev_info(adev->dev, "GPU mode1 reset\n");
394451
395452 /* disable BM */
396453 pci_clear_master(adev->pdev);
397454
398
- pci_save_state(adev->pdev);
455
+ amdgpu_device_cache_pci_state(adev->pdev);
399456
400
- psp_gpu_reset(adev);
457
+ ret = psp_gpu_reset(adev);
458
+ if (ret)
459
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
401460
402
- pci_restore_state(adev->pdev);
461
+ amdgpu_device_load_pci_state(adev->pdev);
403462
404463 /* wait for asic to come out of reset */
405464 for (i = 0; i < adev->usec_timeout; i++) {
406
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
465
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
407466
408467 if (memsize != 0xffffffff)
409468 break;
....@@ -412,7 +471,108 @@
412471
413472 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
414473
474
+ return ret;
475
+}
476
+
477
+static int soc15_asic_baco_reset(struct amdgpu_device *adev)
478
+{
479
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
480
+ int ret = 0;
481
+
482
+ /* avoid NBIF got stuck when do RAS recovery in BACO reset */
483
+ if (ras && ras->supported)
484
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
485
+
486
+ ret = amdgpu_dpm_baco_reset(adev);
487
+ if (ret)
488
+ return ret;
489
+
490
+ /* re-enable doorbell interrupt after BACO exit */
491
+ if (ras && ras->supported)
492
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
493
+
415494 return 0;
495
+}
496
+
497
+static enum amd_reset_method
498
+soc15_asic_reset_method(struct amdgpu_device *adev)
499
+{
500
+ bool baco_reset = false;
501
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
502
+
503
+ if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
504
+ amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
505
+ amdgpu_reset_method == AMD_RESET_METHOD_BACO)
506
+ return amdgpu_reset_method;
507
+
508
+ if (amdgpu_reset_method != -1)
509
+ dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
510
+ amdgpu_reset_method);
511
+
512
+ switch (adev->asic_type) {
513
+ case CHIP_RAVEN:
514
+ case CHIP_RENOIR:
515
+ return AMD_RESET_METHOD_MODE2;
516
+ case CHIP_VEGA10:
517
+ case CHIP_VEGA12:
518
+ case CHIP_ARCTURUS:
519
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
520
+ break;
521
+ case CHIP_VEGA20:
522
+ if (adev->psp.sos_fw_version >= 0x80067)
523
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
524
+
525
+ /*
526
+ * 1. PMFW version > 0x284300: all cases use baco
527
+ * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
528
+ */
529
+ if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
530
+ baco_reset = false;
531
+ break;
532
+ default:
533
+ break;
534
+ }
535
+
536
+ if (baco_reset)
537
+ return AMD_RESET_METHOD_BACO;
538
+ else
539
+ return AMD_RESET_METHOD_MODE1;
540
+}
541
+
542
+static int soc15_asic_reset(struct amdgpu_device *adev)
543
+{
544
+ /* original raven doesn't have full asic reset */
545
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
546
+ !(adev->apu_flags & AMD_APU_IS_RAVEN2))
547
+ return 0;
548
+
549
+ switch (soc15_asic_reset_method(adev)) {
550
+ case AMD_RESET_METHOD_BACO:
551
+ dev_info(adev->dev, "BACO reset\n");
552
+ return soc15_asic_baco_reset(adev);
553
+ case AMD_RESET_METHOD_MODE2:
554
+ dev_info(adev->dev, "MODE2 reset\n");
555
+ return amdgpu_dpm_mode2_reset(adev);
556
+ default:
557
+ dev_info(adev->dev, "MODE1 reset\n");
558
+ return soc15_asic_mode1_reset(adev);
559
+ }
560
+}
561
+
562
+static bool soc15_supports_baco(struct amdgpu_device *adev)
563
+{
564
+ switch (adev->asic_type) {
565
+ case CHIP_VEGA10:
566
+ case CHIP_VEGA12:
567
+ case CHIP_ARCTURUS:
568
+ return amdgpu_dpm_is_baco_supported(adev);
569
+ case CHIP_VEGA20:
570
+ if (adev->psp.sos_fw_version >= 0x80067)
571
+ return amdgpu_dpm_is_baco_supported(adev);
572
+ return false;
573
+ default:
574
+ return false;
575
+ }
416576 }
417577
418578 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
....@@ -471,8 +631,8 @@
471631 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
472632 bool enable)
473633 {
474
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
475
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
634
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
635
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
476636 }
477637
478638 static const struct amdgpu_ip_block_version vega10_common_ip_block =
....@@ -484,8 +644,15 @@
484644 .funcs = &soc15_common_ip_funcs,
485645 };
486646
487
-int soc15_set_ip_blocks(struct amdgpu_device *adev)
647
+static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
488648 {
649
+ return adev->nbio.funcs->get_rev_id(adev);
650
+}
651
+
652
+static void soc15_reg_base_init(struct amdgpu_device *adev)
653
+{
654
+ int r;
655
+
489656 /* Set IP register base before any HW register access */
490657 switch (adev->asic_type) {
491658 case CHIP_VEGA10:
....@@ -493,28 +660,67 @@
493660 case CHIP_RAVEN:
494661 vega10_reg_base_init(adev);
495662 break;
663
+ case CHIP_RENOIR:
664
+ /* It's safe to do ip discovery here for Renior,
665
+ * it doesn't support SRIOV. */
666
+ if (amdgpu_discovery) {
667
+ r = amdgpu_discovery_reg_base_init(adev);
668
+ if (r == 0)
669
+ break;
670
+ DRM_WARN("failed to init reg base from ip discovery table, "
671
+ "fallback to legacy init method\n");
672
+ }
673
+ vega10_reg_base_init(adev);
674
+ break;
496675 case CHIP_VEGA20:
497676 vega20_reg_base_init(adev);
498677 break;
678
+ case CHIP_ARCTURUS:
679
+ arct_reg_base_init(adev);
680
+ break;
499681 default:
500
- return -EINVAL;
682
+ DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
683
+ break;
684
+ }
685
+}
686
+
687
+void soc15_set_virt_ops(struct amdgpu_device *adev)
688
+{
689
+ adev->virt.ops = &xgpu_ai_virt_ops;
690
+
691
+ /* init soc15 reg base early enough so we can
692
+ * request request full access for sriov before
693
+ * set_ip_blocks. */
694
+ soc15_reg_base_init(adev);
695
+}
696
+
697
+int soc15_set_ip_blocks(struct amdgpu_device *adev)
698
+{
699
+ /* for bare metal case */
700
+ if (!amdgpu_sriov_vf(adev))
701
+ soc15_reg_base_init(adev);
702
+
703
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
704
+ adev->gmc.xgmi.supported = true;
705
+
706
+ if (adev->flags & AMD_IS_APU) {
707
+ adev->nbio.funcs = &nbio_v7_0_funcs;
708
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
709
+ } else if (adev->asic_type == CHIP_VEGA20 ||
710
+ adev->asic_type == CHIP_ARCTURUS) {
711
+ adev->nbio.funcs = &nbio_v7_4_funcs;
712
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
713
+ } else {
714
+ adev->nbio.funcs = &nbio_v6_1_funcs;
715
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
501716 }
502717
503
- if (adev->flags & AMD_IS_APU)
504
- adev->nbio_funcs = &nbio_v7_0_funcs;
505
- else if (adev->asic_type == CHIP_VEGA20)
506
- adev->nbio_funcs = &nbio_v7_0_funcs;
718
+ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
719
+ adev->df.funcs = &df_v3_6_funcs;
507720 else
508
- adev->nbio_funcs = &nbio_v6_1_funcs;
721
+ adev->df.funcs = &df_v1_7_funcs;
509722
510
- if (adev->asic_type == CHIP_VEGA20)
511
- adev->df_funcs = &df_v3_6_funcs;
512
- else
513
- adev->df_funcs = &df_v1_7_funcs;
514
- adev->nbio_funcs->detect_hw_virt(adev);
515
-
516
- if (amdgpu_sriov_vf(adev))
517
- adev->virt.ops = &xgpu_ai_virt_ops;
723
+ adev->rev_id = soc15_get_rev_id(adev);
518724
519725 switch (adev->asic_type) {
520726 case CHIP_VEGA10:
....@@ -522,42 +728,107 @@
522728 case CHIP_VEGA20:
523729 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
524730 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
525
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
526
- if (adev->asic_type != CHIP_VEGA20) {
527
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
731
+
732
+ /* For Vega10 SR-IOV, PSP need to be initialized before IH */
733
+ if (amdgpu_sriov_vf(adev)) {
734
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
735
+ if (adev->asic_type == CHIP_VEGA20)
736
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
737
+ else
738
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
739
+ }
740
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
741
+ } else {
742
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
743
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
744
+ if (adev->asic_type == CHIP_VEGA20)
745
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
746
+ else
747
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
748
+ }
749
+ }
750
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
751
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
752
+ if (is_support_sw_smu(adev)) {
528753 if (!amdgpu_sriov_vf(adev))
529
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
754
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
755
+ } else {
756
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
530757 }
531758 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
532759 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
533760 #if defined(CONFIG_DRM_AMD_DC)
534761 else if (amdgpu_device_has_dc_support(adev))
535762 amdgpu_device_ip_block_add(adev, &dm_ip_block);
536
-#else
537
-# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
538763 #endif
539
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
540
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
541
- amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
542
- amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
764
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
765
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
766
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
767
+ }
543768 break;
544769 case CHIP_RAVEN:
545770 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
546771 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
547772 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
548
- amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
773
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
774
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
775
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
776
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
549777 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
550778 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
551779 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
552780 #if defined(CONFIG_DRM_AMD_DC)
553781 else if (amdgpu_device_has_dc_support(adev))
554782 amdgpu_device_ip_block_add(adev, &dm_ip_block);
555
-#else
556
-# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
557783 #endif
784
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
785
+ break;
786
+ case CHIP_ARCTURUS:
787
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
788
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
789
+
790
+ if (amdgpu_sriov_vf(adev)) {
791
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
792
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
793
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
794
+ } else {
795
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
796
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
797
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
798
+ }
799
+
800
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
801
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
558802 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
559803 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
560
- amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
804
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
805
+
806
+ if (amdgpu_sriov_vf(adev)) {
807
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
808
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
809
+ } else {
810
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
811
+ }
812
+ if (!amdgpu_sriov_vf(adev))
813
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
814
+ break;
815
+ case CHIP_RENOIR:
816
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
817
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
818
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
819
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
820
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
821
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
822
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
823
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
824
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
825
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
826
+#if defined(CONFIG_DRM_AMD_DC)
827
+ else if (amdgpu_device_has_dc_support(adev))
828
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
829
+#endif
830
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
831
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
561832 break;
562833 default:
563834 return -EINVAL;
....@@ -566,21 +837,16 @@
566837 return 0;
567838 }
568839
569
-static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
570
-{
571
- return adev->nbio_funcs->get_rev_id(adev);
572
-}
573
-
574840 static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
575841 {
576
- adev->nbio_funcs->hdp_flush(adev, ring);
842
+ adev->nbio.funcs->hdp_flush(adev, ring);
577843 }
578844
579845 static void soc15_invalidate_hdp(struct amdgpu_device *adev,
580846 struct amdgpu_ring *ring)
581847 {
582848 if (!ring || !ring->funcs->emit_wreg)
583
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
849
+ WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
584850 else
585851 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
586852 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
....@@ -592,12 +858,157 @@
592858 return true;
593859 }
594860
861
+static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
862
+{
863
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
864
+ return;
865
+ /*read back hdp ras counter to reset it to 0 */
866
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
867
+}
868
+
869
+static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
870
+ uint64_t *count1)
871
+{
872
+ uint32_t perfctr = 0;
873
+ uint64_t cnt0_of, cnt1_of;
874
+ int tmp;
875
+
876
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
877
+ * that may or may not be different from their GPU counterparts
878
+ */
879
+ if (adev->flags & AMD_IS_APU)
880
+ return;
881
+
882
+ /* Set the 2 events that we wish to watch, defined above */
883
+ /* Reg 40 is # received msgs */
884
+ /* Reg 104 is # of posted requests sent */
885
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
886
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
887
+
888
+ /* Write to enable desired perf counters */
889
+ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
890
+ /* Zero out and enable the perf counters
891
+ * Write 0x5:
892
+ * Bit 0 = Start all counters(1)
893
+ * Bit 2 = Global counter reset enable(1)
894
+ */
895
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
896
+
897
+ msleep(1000);
898
+
899
+ /* Load the shadow and disable the perf counters
900
+ * Write 0x2:
901
+ * Bit 0 = Stop counters(0)
902
+ * Bit 1 = Load the shadow counters(1)
903
+ */
904
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
905
+
906
+ /* Read register values to get any >32bit overflow */
907
+ tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
908
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
909
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
910
+
911
+ /* Get the values and add the overflow */
912
+ *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
913
+ *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
914
+}
915
+
916
+static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
917
+ uint64_t *count1)
918
+{
919
+ uint32_t perfctr = 0;
920
+ uint64_t cnt0_of, cnt1_of;
921
+ int tmp;
922
+
923
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
924
+ * that may or may not be different from their GPU counterparts
925
+ */
926
+ if (adev->flags & AMD_IS_APU)
927
+ return;
928
+
929
+ /* Set the 2 events that we wish to watch, defined above */
930
+ /* Reg 40 is # received msgs */
931
+ /* Reg 108 is # of posted requests sent on VG20 */
932
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
933
+ EVENT0_SEL, 40);
934
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
935
+ EVENT1_SEL, 108);
936
+
937
+ /* Write to enable desired perf counters */
938
+ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
939
+ /* Zero out and enable the perf counters
940
+ * Write 0x5:
941
+ * Bit 0 = Start all counters(1)
942
+ * Bit 2 = Global counter reset enable(1)
943
+ */
944
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
945
+
946
+ msleep(1000);
947
+
948
+ /* Load the shadow and disable the perf counters
949
+ * Write 0x2:
950
+ * Bit 0 = Stop counters(0)
951
+ * Bit 1 = Load the shadow counters(1)
952
+ */
953
+ WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
954
+
955
+ /* Read register values to get any >32bit overflow */
956
+ tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
957
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
958
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
959
+
960
+ /* Get the values and add the overflow */
961
+ *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
962
+ *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
963
+}
964
+
965
+static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
966
+{
967
+ u32 sol_reg;
968
+
969
+ /* Just return false for soc15 GPUs. Reset does not seem to
970
+ * be necessary.
971
+ */
972
+ if (!amdgpu_passthrough(adev))
973
+ return false;
974
+
975
+ if (adev->flags & AMD_IS_APU)
976
+ return false;
977
+
978
+ /* Check sOS sign of life register to confirm sys driver and sOS
979
+ * are already been loaded.
980
+ */
981
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
982
+ if (sol_reg)
983
+ return true;
984
+
985
+ return false;
986
+}
987
+
988
+static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
989
+{
990
+ uint64_t nak_r, nak_g;
991
+
992
+ /* Get the number of NAKs received and generated */
993
+ nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
994
+ nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
995
+
996
+ /* Add the total number of NAKs, i.e the number of replays */
997
+ return (nak_r + nak_g);
998
+}
999
+
1000
+static void soc15_pre_asic_init(struct amdgpu_device *adev)
1001
+{
1002
+ gmc_v9_0_restore_registers(adev);
1003
+}
1004
+
5951005 static const struct amdgpu_asic_funcs soc15_asic_funcs =
5961006 {
5971007 .read_disabled_bios = &soc15_read_disabled_bios,
5981008 .read_bios_from_rom = &soc15_read_bios_from_rom,
5991009 .read_register = &soc15_read_register,
6001010 .reset = &soc15_asic_reset,
1011
+ .reset_method = &soc15_asic_reset_method,
6011012 .set_vga_state = &soc15_vga_set_state,
6021013 .get_xclk = &soc15_get_xclk,
6031014 .set_uvd_clocks = &soc15_set_uvd_clocks,
....@@ -606,16 +1017,51 @@
6061017 .flush_hdp = &soc15_flush_hdp,
6071018 .invalidate_hdp = &soc15_invalidate_hdp,
6081019 .need_full_reset = &soc15_need_full_reset,
1020
+ .init_doorbell_index = &vega10_doorbell_index_init,
1021
+ .get_pcie_usage = &soc15_get_pcie_usage,
1022
+ .need_reset_on_init = &soc15_need_reset_on_init,
1023
+ .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1024
+ .supports_baco = &soc15_supports_baco,
1025
+ .pre_asic_init = &soc15_pre_asic_init,
1026
+};
1027
+
1028
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
1029
+{
1030
+ .read_disabled_bios = &soc15_read_disabled_bios,
1031
+ .read_bios_from_rom = &soc15_read_bios_from_rom,
1032
+ .read_register = &soc15_read_register,
1033
+ .reset = &soc15_asic_reset,
1034
+ .reset_method = &soc15_asic_reset_method,
1035
+ .set_vga_state = &soc15_vga_set_state,
1036
+ .get_xclk = &soc15_get_xclk,
1037
+ .set_uvd_clocks = &soc15_set_uvd_clocks,
1038
+ .set_vce_clocks = &soc15_set_vce_clocks,
1039
+ .get_config_memsize = &soc15_get_config_memsize,
1040
+ .flush_hdp = &soc15_flush_hdp,
1041
+ .invalidate_hdp = &soc15_invalidate_hdp,
1042
+ .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
1043
+ .need_full_reset = &soc15_need_full_reset,
1044
+ .init_doorbell_index = &vega20_doorbell_index_init,
1045
+ .get_pcie_usage = &vega20_get_pcie_usage,
1046
+ .need_reset_on_init = &soc15_need_reset_on_init,
1047
+ .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1048
+ .supports_baco = &soc15_supports_baco,
1049
+ .pre_asic_init = &soc15_pre_asic_init,
6091050 };
6101051
6111052 static int soc15_common_early_init(void *handle)
6121053 {
1054
+#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
6131055 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6141056
1057
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1058
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
6151059 adev->smc_rreg = NULL;
6161060 adev->smc_wreg = NULL;
6171061 adev->pcie_rreg = &soc15_pcie_rreg;
6181062 adev->pcie_wreg = &soc15_pcie_wreg;
1063
+ adev->pcie_rreg64 = &soc15_pcie_rreg64;
1064
+ adev->pcie_wreg64 = &soc15_pcie_wreg64;
6191065 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
6201066 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
6211067 adev->didt_rreg = &soc15_didt_rreg;
....@@ -625,12 +1071,11 @@
6251071 adev->se_cac_rreg = &soc15_se_cac_rreg;
6261072 adev->se_cac_wreg = &soc15_se_cac_wreg;
6271073
628
- adev->asic_funcs = &soc15_asic_funcs;
6291074
630
- adev->rev_id = soc15_get_rev_id(adev);
6311075 adev->external_rev_id = 0xFF;
6321076 switch (adev->asic_type) {
6331077 case CHIP_VEGA10:
1078
+ adev->asic_funcs = &soc15_asic_funcs;
6341079 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
6351080 AMD_CG_SUPPORT_GFX_MGLS |
6361081 AMD_CG_SUPPORT_GFX_RLC_LS |
....@@ -654,6 +1099,7 @@
6541099 adev->external_rev_id = 0x1;
6551100 break;
6561101 case CHIP_VEGA12:
1102
+ adev->asic_funcs = &soc15_asic_funcs;
6571103 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
6581104 AMD_CG_SUPPORT_GFX_MGLS |
6591105 AMD_CG_SUPPORT_GFX_CGCG |
....@@ -676,6 +1122,7 @@
6761122 adev->external_rev_id = adev->rev_id + 0x14;
6771123 break;
6781124 case CHIP_VEGA20:
1125
+ adev->asic_funcs = &vega20_asic_funcs;
6791126 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
6801127 AMD_CG_SUPPORT_GFX_MGLS |
6811128 AMD_CG_SUPPORT_GFX_CGCG |
....@@ -698,35 +1145,141 @@
6981145 adev->external_rev_id = adev->rev_id + 0x28;
6991146 break;
7001147 case CHIP_RAVEN:
1148
+ adev->asic_funcs = &soc15_asic_funcs;
1149
+ if (adev->pdev->device == 0x15dd)
1150
+ adev->apu_flags |= AMD_APU_IS_RAVEN;
1151
+ if (adev->pdev->device == 0x15d8)
1152
+ adev->apu_flags |= AMD_APU_IS_PICASSO;
1153
+ if (adev->rev_id >= 0x8)
1154
+ adev->apu_flags |= AMD_APU_IS_RAVEN2;
1155
+
1156
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1157
+ adev->external_rev_id = adev->rev_id + 0x79;
1158
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1159
+ adev->external_rev_id = adev->rev_id + 0x41;
1160
+ else if (adev->rev_id == 1)
1161
+ adev->external_rev_id = adev->rev_id + 0x20;
1162
+ else
1163
+ adev->external_rev_id = adev->rev_id + 0x01;
1164
+
1165
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1166
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1167
+ AMD_CG_SUPPORT_GFX_MGLS |
1168
+ AMD_CG_SUPPORT_GFX_CP_LS |
1169
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
1170
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
1171
+ AMD_CG_SUPPORT_GFX_CGCG |
1172
+ AMD_CG_SUPPORT_GFX_CGLS |
1173
+ AMD_CG_SUPPORT_BIF_LS |
1174
+ AMD_CG_SUPPORT_HDP_LS |
1175
+ AMD_CG_SUPPORT_ROM_MGCG |
1176
+ AMD_CG_SUPPORT_MC_MGCG |
1177
+ AMD_CG_SUPPORT_MC_LS |
1178
+ AMD_CG_SUPPORT_SDMA_MGCG |
1179
+ AMD_CG_SUPPORT_SDMA_LS |
1180
+ AMD_CG_SUPPORT_VCN_MGCG;
1181
+
1182
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1183
+ } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1184
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1185
+ AMD_CG_SUPPORT_GFX_MGLS |
1186
+ AMD_CG_SUPPORT_GFX_CP_LS |
1187
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
1188
+ AMD_CG_SUPPORT_GFX_CGCG |
1189
+ AMD_CG_SUPPORT_GFX_CGLS |
1190
+ AMD_CG_SUPPORT_BIF_LS |
1191
+ AMD_CG_SUPPORT_HDP_LS |
1192
+ AMD_CG_SUPPORT_ROM_MGCG |
1193
+ AMD_CG_SUPPORT_MC_MGCG |
1194
+ AMD_CG_SUPPORT_MC_LS |
1195
+ AMD_CG_SUPPORT_SDMA_MGCG |
1196
+ AMD_CG_SUPPORT_SDMA_LS;
1197
+
1198
+ /*
1199
+ * MMHUB PG needs to be disabled for Picasso for
1200
+ * stability reasons.
1201
+ */
1202
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1203
+ AMD_PG_SUPPORT_VCN;
1204
+ } else {
1205
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1206
+ AMD_CG_SUPPORT_GFX_MGLS |
1207
+ AMD_CG_SUPPORT_GFX_RLC_LS |
1208
+ AMD_CG_SUPPORT_GFX_CP_LS |
1209
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
1210
+ AMD_CG_SUPPORT_GFX_CGCG |
1211
+ AMD_CG_SUPPORT_GFX_CGLS |
1212
+ AMD_CG_SUPPORT_BIF_MGCG |
1213
+ AMD_CG_SUPPORT_BIF_LS |
1214
+ AMD_CG_SUPPORT_HDP_MGCG |
1215
+ AMD_CG_SUPPORT_HDP_LS |
1216
+ AMD_CG_SUPPORT_DRM_MGCG |
1217
+ AMD_CG_SUPPORT_DRM_LS |
1218
+ AMD_CG_SUPPORT_ROM_MGCG |
1219
+ AMD_CG_SUPPORT_MC_MGCG |
1220
+ AMD_CG_SUPPORT_MC_LS |
1221
+ AMD_CG_SUPPORT_SDMA_MGCG |
1222
+ AMD_CG_SUPPORT_SDMA_LS |
1223
+ AMD_CG_SUPPORT_VCN_MGCG;
1224
+
1225
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1226
+ }
1227
+ break;
1228
+ case CHIP_ARCTURUS:
1229
+ adev->asic_funcs = &vega20_asic_funcs;
7011230 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
7021231 AMD_CG_SUPPORT_GFX_MGLS |
703
- AMD_CG_SUPPORT_GFX_RLC_LS |
704
- AMD_CG_SUPPORT_GFX_CP_LS |
705
- AMD_CG_SUPPORT_GFX_3D_CGCG |
706
- AMD_CG_SUPPORT_GFX_3D_CGLS |
7071232 AMD_CG_SUPPORT_GFX_CGCG |
7081233 AMD_CG_SUPPORT_GFX_CGLS |
709
- AMD_CG_SUPPORT_BIF_MGCG |
710
- AMD_CG_SUPPORT_BIF_LS |
1234
+ AMD_CG_SUPPORT_GFX_CP_LS |
7111235 AMD_CG_SUPPORT_HDP_MGCG |
7121236 AMD_CG_SUPPORT_HDP_LS |
713
- AMD_CG_SUPPORT_DRM_MGCG |
714
- AMD_CG_SUPPORT_DRM_LS |
715
- AMD_CG_SUPPORT_ROM_MGCG |
716
- AMD_CG_SUPPORT_MC_MGCG |
717
- AMD_CG_SUPPORT_MC_LS |
7181237 AMD_CG_SUPPORT_SDMA_MGCG |
7191238 AMD_CG_SUPPORT_SDMA_LS |
720
- AMD_CG_SUPPORT_VCN_MGCG;
1239
+ AMD_CG_SUPPORT_MC_MGCG |
1240
+ AMD_CG_SUPPORT_MC_LS |
1241
+ AMD_CG_SUPPORT_IH_CG |
1242
+ AMD_CG_SUPPORT_VCN_MGCG |
1243
+ AMD_CG_SUPPORT_JPEG_MGCG;
1244
+ adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1245
+ adev->external_rev_id = adev->rev_id + 0x32;
1246
+ break;
1247
+ case CHIP_RENOIR:
1248
+ adev->asic_funcs = &soc15_asic_funcs;
1249
+ if ((adev->pdev->device == 0x1636) ||
1250
+ (adev->pdev->device == 0x164c))
1251
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
1252
+ else
1253
+ adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
7211254
722
- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
723
-
724
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
725
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
726
- AMD_PG_SUPPORT_CP |
727
- AMD_PG_SUPPORT_RLC_SMU_HS;
728
-
729
- adev->external_rev_id = 0x1;
1255
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
1256
+ adev->external_rev_id = adev->rev_id + 0x91;
1257
+ else
1258
+ adev->external_rev_id = adev->rev_id + 0xa1;
1259
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1260
+ AMD_CG_SUPPORT_GFX_MGLS |
1261
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
1262
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
1263
+ AMD_CG_SUPPORT_GFX_CGCG |
1264
+ AMD_CG_SUPPORT_GFX_CGLS |
1265
+ AMD_CG_SUPPORT_GFX_CP_LS |
1266
+ AMD_CG_SUPPORT_MC_MGCG |
1267
+ AMD_CG_SUPPORT_MC_LS |
1268
+ AMD_CG_SUPPORT_SDMA_MGCG |
1269
+ AMD_CG_SUPPORT_SDMA_LS |
1270
+ AMD_CG_SUPPORT_BIF_LS |
1271
+ AMD_CG_SUPPORT_HDP_LS |
1272
+ AMD_CG_SUPPORT_ROM_MGCG |
1273
+ AMD_CG_SUPPORT_VCN_MGCG |
1274
+ AMD_CG_SUPPORT_JPEG_MGCG |
1275
+ AMD_CG_SUPPORT_IH_CG |
1276
+ AMD_CG_SUPPORT_ATHUB_LS |
1277
+ AMD_CG_SUPPORT_ATHUB_MGCG |
1278
+ AMD_CG_SUPPORT_DF_MGCG;
1279
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1280
+ AMD_PG_SUPPORT_VCN |
1281
+ AMD_PG_SUPPORT_JPEG |
1282
+ AMD_PG_SUPPORT_VCN_DPG;
7301283 break;
7311284 default:
7321285 /* FIXME: not supported yet */
....@@ -744,11 +1297,19 @@
7441297 static int soc15_common_late_init(void *handle)
7451298 {
7461299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300
+ int r = 0;
7471301
7481302 if (amdgpu_sriov_vf(adev))
7491303 xgpu_ai_mailbox_get_irq(adev);
7501304
751
- return 0;
1305
+ if (adev->asic_funcs &&
1306
+ adev->asic_funcs->reset_hdp_ras_error_count)
1307
+ adev->asic_funcs->reset_hdp_ras_error_count(adev);
1308
+
1309
+ if (adev->nbio.funcs->ras_late_init)
1310
+ r = adev->nbio.funcs->ras_late_init(adev);
1311
+
1312
+ return r;
7521313 }
7531314
7541315 static int soc15_common_sw_init(void *handle)
....@@ -758,12 +1319,37 @@
7581319 if (amdgpu_sriov_vf(adev))
7591320 xgpu_ai_mailbox_add_irq_id(adev);
7601321
1322
+ adev->df.funcs->sw_init(adev);
1323
+
7611324 return 0;
7621325 }
7631326
7641327 static int soc15_common_sw_fini(void *handle)
7651328 {
1329
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1330
+
1331
+ amdgpu_nbio_ras_fini(adev);
1332
+ adev->df.funcs->sw_fini(adev);
7661333 return 0;
1334
+}
1335
+
1336
+static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1337
+{
1338
+ int i;
1339
+ struct amdgpu_ring *ring;
1340
+
1341
+ /* sdma/ih doorbell range are programed by hypervisor */
1342
+ if (!amdgpu_sriov_vf(adev)) {
1343
+ for (i = 0; i < adev->sdma.num_instances; i++) {
1344
+ ring = &adev->sdma.instance[i].ring;
1345
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
1346
+ ring->use_doorbell, ring->doorbell_index,
1347
+ adev->doorbell_index.sdma_doorbell_range);
1348
+ }
1349
+
1350
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1351
+ adev->irq.ih.doorbell_index);
1352
+ }
7671353 }
7681354
7691355 static int soc15_common_hw_init(void *handle)
....@@ -775,9 +1361,22 @@
7751361 /* enable aspm */
7761362 soc15_program_aspm(adev);
7771363 /* setup nbio registers */
778
- adev->nbio_funcs->init_registers(adev);
1364
+ adev->nbio.funcs->init_registers(adev);
1365
+ /* remap HDP registers to a hole in mmio space,
1366
+ * for the purpose of expose those registers
1367
+ * to process space
1368
+ */
1369
+ if (adev->nbio.funcs->remap_hdp_registers)
1370
+ adev->nbio.funcs->remap_hdp_registers(adev);
1371
+
7791372 /* enable the doorbell aperture */
7801373 soc15_enable_doorbell_aperture(adev, true);
1374
+ /* HW doorbell routing policy: doorbell writing not
1375
+ * in SDMA/IH/MM/ACV range will be routed to CP. So
1376
+ * we need to init SDMA/IH/MM/ACV doorbell range prior
1377
+ * to CP ip block init and ring test.
1378
+ */
1379
+ soc15_doorbell_range_init(adev);
7811380
7821381 return 0;
7831382 }
....@@ -790,6 +1389,14 @@
7901389 soc15_enable_doorbell_aperture(adev, false);
7911390 if (amdgpu_sriov_vf(adev))
7921391 xgpu_ai_mailbox_put_irq(adev);
1392
+
1393
+ if (adev->nbio.ras_if &&
1394
+ amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1395
+ if (adev->nbio.funcs->init_ras_controller_interrupt)
1396
+ amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1397
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1398
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1399
+ }
7931400
7941401 return 0;
7951402 }
....@@ -827,15 +1434,35 @@
8271434 {
8281435 uint32_t def, data;
8291436
830
- def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1437
+ if (adev->asic_type == CHIP_VEGA20 ||
1438
+ adev->asic_type == CHIP_ARCTURUS ||
1439
+ adev->asic_type == CHIP_RENOIR) {
1440
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
8311441
832
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
833
- data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
834
- else
835
- data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1442
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1443
+ data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1444
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1445
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1446
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1447
+ else
1448
+ data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1449
+ HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1450
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1451
+ HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
8361452
837
- if (def != data)
838
- WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1453
+ if (def != data)
1454
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1455
+ } else {
1456
+ def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1457
+
1458
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1459
+ data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1460
+ else
1461
+ data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1462
+
1463
+ if (def != data)
1464
+ WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1465
+ }
8391466 }
8401467
8411468 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
....@@ -912,34 +1539,39 @@
9121539 case CHIP_VEGA10:
9131540 case CHIP_VEGA12:
9141541 case CHIP_VEGA20:
915
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
916
- state == AMD_CG_STATE_GATE ? true : false);
917
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
918
- state == AMD_CG_STATE_GATE ? true : false);
1542
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1543
+ state == AMD_CG_STATE_GATE);
1544
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1545
+ state == AMD_CG_STATE_GATE);
9191546 soc15_update_hdp_light_sleep(adev,
920
- state == AMD_CG_STATE_GATE ? true : false);
1547
+ state == AMD_CG_STATE_GATE);
9211548 soc15_update_drm_clock_gating(adev,
922
- state == AMD_CG_STATE_GATE ? true : false);
1549
+ state == AMD_CG_STATE_GATE);
9231550 soc15_update_drm_light_sleep(adev,
924
- state == AMD_CG_STATE_GATE ? true : false);
1551
+ state == AMD_CG_STATE_GATE);
9251552 soc15_update_rom_medium_grain_clock_gating(adev,
926
- state == AMD_CG_STATE_GATE ? true : false);
927
- adev->df_funcs->update_medium_grain_clock_gating(adev,
928
- state == AMD_CG_STATE_GATE ? true : false);
1553
+ state == AMD_CG_STATE_GATE);
1554
+ adev->df.funcs->update_medium_grain_clock_gating(adev,
1555
+ state == AMD_CG_STATE_GATE);
9291556 break;
9301557 case CHIP_RAVEN:
931
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
932
- state == AMD_CG_STATE_GATE ? true : false);
933
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
934
- state == AMD_CG_STATE_GATE ? true : false);
1558
+ case CHIP_RENOIR:
1559
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1560
+ state == AMD_CG_STATE_GATE);
1561
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1562
+ state == AMD_CG_STATE_GATE);
9351563 soc15_update_hdp_light_sleep(adev,
936
- state == AMD_CG_STATE_GATE ? true : false);
1564
+ state == AMD_CG_STATE_GATE);
9371565 soc15_update_drm_clock_gating(adev,
938
- state == AMD_CG_STATE_GATE ? true : false);
1566
+ state == AMD_CG_STATE_GATE);
9391567 soc15_update_drm_light_sleep(adev,
940
- state == AMD_CG_STATE_GATE ? true : false);
1568
+ state == AMD_CG_STATE_GATE);
9411569 soc15_update_rom_medium_grain_clock_gating(adev,
942
- state == AMD_CG_STATE_GATE ? true : false);
1570
+ state == AMD_CG_STATE_GATE);
1571
+ break;
1572
+ case CHIP_ARCTURUS:
1573
+ soc15_update_hdp_light_sleep(adev,
1574
+ state == AMD_CG_STATE_GATE);
9431575 break;
9441576 default:
9451577 break;
....@@ -955,7 +1587,7 @@
9551587 if (amdgpu_sriov_vf(adev))
9561588 *flags = 0;
9571589
958
- adev->nbio_funcs->get_clockgating_state(adev, flags);
1590
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
9591591
9601592 /* AMD_CG_SUPPORT_HDP_LS */
9611593 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
....@@ -977,7 +1609,7 @@
9771609 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
9781610 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
9791611
980
- adev->df_funcs->get_clockgating_state(adev, flags);
1612
+ adev->df.funcs->get_clockgating_state(adev, flags);
9811613 }
9821614
9831615 static int soc15_common_set_powergating_state(void *handle,