.. | .. |
---|
23 | 23 | #include <linux/firmware.h> |
---|
24 | 24 | #include <linux/slab.h> |
---|
25 | 25 | #include <linux/module.h> |
---|
26 | | -#include <drm/drmP.h> |
---|
| 26 | +#include <linux/pci.h> |
---|
| 27 | + |
---|
27 | 28 | #include "amdgpu.h" |
---|
28 | 29 | #include "amdgpu_atombios.h" |
---|
29 | 30 | #include "amdgpu_ih.h" |
---|
.. | .. |
---|
43 | 44 | #include "hdp/hdp_4_0_sh_mask.h" |
---|
44 | 45 | #include "smuio/smuio_9_0_offset.h" |
---|
45 | 46 | #include "smuio/smuio_9_0_sh_mask.h" |
---|
| 47 | +#include "nbio/nbio_7_0_default.h" |
---|
| 48 | +#include "nbio/nbio_7_0_offset.h" |
---|
| 49 | +#include "nbio/nbio_7_0_sh_mask.h" |
---|
| 50 | +#include "nbio/nbio_7_0_smn.h" |
---|
| 51 | +#include "mp/mp_9_0_offset.h" |
---|
46 | 52 | |
---|
47 | 53 | #include "soc15.h" |
---|
48 | 54 | #include "soc15_common.h" |
---|
.. | .. |
---|
52 | 58 | #include "mmhub_v1_0.h" |
---|
53 | 59 | #include "df_v1_7.h" |
---|
54 | 60 | #include "df_v3_6.h" |
---|
| 61 | +#include "nbio_v6_1.h" |
---|
| 62 | +#include "nbio_v7_0.h" |
---|
| 63 | +#include "nbio_v7_4.h" |
---|
55 | 64 | #include "vega10_ih.h" |
---|
56 | 65 | #include "sdma_v4_0.h" |
---|
57 | 66 | #include "uvd_v7_0.h" |
---|
58 | 67 | #include "vce_v4_0.h" |
---|
59 | 68 | #include "vcn_v1_0.h" |
---|
| 69 | +#include "vcn_v2_0.h" |
---|
| 70 | +#include "jpeg_v2_0.h" |
---|
| 71 | +#include "vcn_v2_5.h" |
---|
| 72 | +#include "jpeg_v2_5.h" |
---|
60 | 73 | #include "dce_virtual.h" |
---|
61 | 74 | #include "mxgpu_ai.h" |
---|
| 75 | +#include "amdgpu_smu.h" |
---|
| 76 | +#include "amdgpu_ras.h" |
---|
| 77 | +#include "amdgpu_xgmi.h" |
---|
| 78 | +#include <uapi/linux/kfd_ioctl.h> |
---|
62 | 79 | |
---|
63 | 80 | #define mmMP0_MISC_CGTT_CTRL0 0x01b9 |
---|
64 | 81 | #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 |
---|
65 | 82 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba |
---|
66 | 83 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 |
---|
67 | 84 | |
---|
| 85 | +/* for Vega20 register name change */ |
---|
| 86 | +#define mmHDP_MEM_POWER_CTRL 0x00d4 |
---|
| 87 | +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L |
---|
| 88 | +#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L |
---|
| 89 | +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L |
---|
| 90 | +#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L |
---|
| 91 | +#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 |
---|
| 92 | + |
---|
| 93 | +/* for Vega20/arcturus regiter offset change */ |
---|
| 94 | +#define mmROM_INDEX_VG20 0x00e4 |
---|
| 95 | +#define mmROM_INDEX_VG20_BASE_IDX 0 |
---|
| 96 | +#define mmROM_DATA_VG20 0x00e5 |
---|
| 97 | +#define mmROM_DATA_VG20_BASE_IDX 0 |
---|
| 98 | + |
---|
68 | 99 | /* |
---|
69 | 100 | * Indirect registers accessor |
---|
70 | 101 | */ |
---|
71 | 102 | static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) |
---|
72 | 103 | { |
---|
73 | | - unsigned long flags, address, data; |
---|
74 | | - u32 r; |
---|
75 | | - address = adev->nbio_funcs->get_pcie_index_offset(adev); |
---|
76 | | - data = adev->nbio_funcs->get_pcie_data_offset(adev); |
---|
| 104 | + unsigned long address, data; |
---|
| 105 | + address = adev->nbio.funcs->get_pcie_index_offset(adev); |
---|
| 106 | + data = adev->nbio.funcs->get_pcie_data_offset(adev); |
---|
77 | 107 | |
---|
78 | | - spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
---|
79 | | - WREG32(address, reg); |
---|
80 | | - (void)RREG32(address); |
---|
81 | | - r = RREG32(data); |
---|
82 | | - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
---|
83 | | - return r; |
---|
| 108 | + return amdgpu_device_indirect_rreg(adev, address, data, reg); |
---|
84 | 109 | } |
---|
85 | 110 | |
---|
86 | 111 | static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
---|
87 | 112 | { |
---|
88 | | - unsigned long flags, address, data; |
---|
| 113 | + unsigned long address, data; |
---|
89 | 114 | |
---|
90 | | - address = adev->nbio_funcs->get_pcie_index_offset(adev); |
---|
91 | | - data = adev->nbio_funcs->get_pcie_data_offset(adev); |
---|
| 115 | + address = adev->nbio.funcs->get_pcie_index_offset(adev); |
---|
| 116 | + data = adev->nbio.funcs->get_pcie_data_offset(adev); |
---|
92 | 117 | |
---|
93 | | - spin_lock_irqsave(&adev->pcie_idx_lock, flags); |
---|
94 | | - WREG32(address, reg); |
---|
95 | | - (void)RREG32(address); |
---|
96 | | - WREG32(data, v); |
---|
97 | | - (void)RREG32(data); |
---|
98 | | - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
---|
| 118 | + amdgpu_device_indirect_wreg(adev, address, data, reg, v); |
---|
| 119 | +} |
---|
| 120 | + |
---|
| 121 | +static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) |
---|
| 122 | +{ |
---|
| 123 | + unsigned long address, data; |
---|
| 124 | + address = adev->nbio.funcs->get_pcie_index_offset(adev); |
---|
| 125 | + data = adev->nbio.funcs->get_pcie_data_offset(adev); |
---|
| 126 | + |
---|
| 127 | + return amdgpu_device_indirect_rreg64(adev, address, data, reg); |
---|
| 128 | +} |
---|
| 129 | + |
---|
| 130 | +static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) |
---|
| 131 | +{ |
---|
| 132 | + unsigned long address, data; |
---|
| 133 | + |
---|
| 134 | + address = adev->nbio.funcs->get_pcie_index_offset(adev); |
---|
| 135 | + data = adev->nbio.funcs->get_pcie_data_offset(adev); |
---|
| 136 | + |
---|
| 137 | + amdgpu_device_indirect_wreg64(adev, address, data, reg, v); |
---|
99 | 138 | } |
---|
100 | 139 | |
---|
101 | 140 | static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) |
---|
.. | .. |
---|
200 | 239 | |
---|
201 | 240 | static u32 soc15_get_config_memsize(struct amdgpu_device *adev) |
---|
202 | 241 | { |
---|
203 | | - return adev->nbio_funcs->get_memsize(adev); |
---|
| 242 | + return adev->nbio.funcs->get_memsize(adev); |
---|
204 | 243 | } |
---|
205 | 244 | |
---|
206 | 245 | static u32 soc15_get_xclk(struct amdgpu_device *adev) |
---|
207 | 246 | { |
---|
208 | 247 | u32 reference_clock = adev->clock.spll.reference_freq; |
---|
209 | 248 | |
---|
| 249 | + if (adev->asic_type == CHIP_RENOIR) |
---|
| 250 | + return 10000; |
---|
210 | 251 | if (adev->asic_type == CHIP_RAVEN) |
---|
211 | 252 | return reference_clock / 4; |
---|
212 | 253 | |
---|
.. | .. |
---|
223 | 264 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); |
---|
224 | 265 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); |
---|
225 | 266 | |
---|
226 | | - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); |
---|
| 267 | + WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); |
---|
227 | 268 | } |
---|
228 | 269 | |
---|
229 | 270 | static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) |
---|
.. | .. |
---|
242 | 283 | { |
---|
243 | 284 | u32 *dw_ptr; |
---|
244 | 285 | u32 i, length_dw; |
---|
| 286 | + uint32_t rom_index_offset; |
---|
| 287 | + uint32_t rom_data_offset; |
---|
245 | 288 | |
---|
246 | 289 | if (bios == NULL) |
---|
247 | 290 | return false; |
---|
.. | .. |
---|
254 | 297 | dw_ptr = (u32 *)bios; |
---|
255 | 298 | length_dw = ALIGN(length_bytes, 4) / 4; |
---|
256 | 299 | |
---|
| 300 | + switch (adev->asic_type) { |
---|
| 301 | + case CHIP_VEGA20: |
---|
| 302 | + case CHIP_ARCTURUS: |
---|
| 303 | + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20); |
---|
| 304 | + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20); |
---|
| 305 | + break; |
---|
| 306 | + default: |
---|
| 307 | + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); |
---|
| 308 | + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); |
---|
| 309 | + break; |
---|
| 310 | + } |
---|
| 311 | + |
---|
257 | 312 | /* set rom index to 0 */ |
---|
258 | | - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); |
---|
| 313 | + WREG32(rom_index_offset, 0); |
---|
259 | 314 | /* read out the rom data */ |
---|
260 | 315 | for (i = 0; i < length_dw; i++) |
---|
261 | | - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); |
---|
| 316 | + dw_ptr[i] = RREG32(rom_data_offset); |
---|
262 | 317 | |
---|
263 | 318 | return true; |
---|
264 | 319 | } |
---|
265 | | - |
---|
266 | | -struct soc15_allowed_register_entry { |
---|
267 | | - uint32_t hwip; |
---|
268 | | - uint32_t inst; |
---|
269 | | - uint32_t seg; |
---|
270 | | - uint32_t reg_offset; |
---|
271 | | - bool grbm_indexed; |
---|
272 | | -}; |
---|
273 | | - |
---|
274 | 320 | |
---|
275 | 321 | static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { |
---|
276 | 322 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, |
---|
.. | .. |
---|
288 | 334 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, |
---|
289 | 335 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, |
---|
290 | 336 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, |
---|
| 337 | + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, |
---|
291 | 338 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, |
---|
292 | 339 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, |
---|
293 | 340 | { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, |
---|
.. | .. |
---|
335 | 382 | *value = 0; |
---|
336 | 383 | for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { |
---|
337 | 384 | en = &soc15_allowed_read_registers[i]; |
---|
338 | | - if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] |
---|
| 385 | + if (adev->reg_offset[en->hwip][en->inst] && |
---|
| 386 | + reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] |
---|
339 | 387 | + en->reg_offset)) |
---|
340 | 388 | continue; |
---|
341 | 389 | |
---|
.. | .. |
---|
376 | 424 | } else { |
---|
377 | 425 | tmp = RREG32(reg); |
---|
378 | 426 | tmp &= ~(entry->and_mask); |
---|
379 | | - tmp |= entry->or_mask; |
---|
| 427 | + tmp |= (entry->or_mask & entry->and_mask); |
---|
380 | 428 | } |
---|
381 | | - WREG32(reg, tmp); |
---|
| 429 | + |
---|
| 430 | + if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || |
---|
| 431 | + reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || |
---|
| 432 | + reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || |
---|
| 433 | + reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) |
---|
| 434 | + WREG32_RLC(reg, tmp); |
---|
| 435 | + else |
---|
| 436 | + WREG32(reg, tmp); |
---|
| 437 | + |
---|
382 | 438 | } |
---|
383 | 439 | |
---|
384 | 440 | } |
---|
385 | 441 | |
---|
386 | | - |
---|
387 | | -static int soc15_asic_reset(struct amdgpu_device *adev) |
---|
| 442 | +static int soc15_asic_mode1_reset(struct amdgpu_device *adev) |
---|
388 | 443 | { |
---|
389 | 444 | u32 i; |
---|
| 445 | + int ret = 0; |
---|
390 | 446 | |
---|
391 | 447 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
---|
392 | 448 | |
---|
393 | | - dev_info(adev->dev, "GPU reset\n"); |
---|
| 449 | + dev_info(adev->dev, "GPU mode1 reset\n"); |
---|
394 | 450 | |
---|
395 | 451 | /* disable BM */ |
---|
396 | 452 | pci_clear_master(adev->pdev); |
---|
397 | 453 | |
---|
398 | | - pci_save_state(adev->pdev); |
---|
| 454 | + amdgpu_device_cache_pci_state(adev->pdev); |
---|
399 | 455 | |
---|
400 | | - psp_gpu_reset(adev); |
---|
| 456 | + ret = psp_gpu_reset(adev); |
---|
| 457 | + if (ret) |
---|
| 458 | + dev_err(adev->dev, "GPU mode1 reset failed\n"); |
---|
401 | 459 | |
---|
402 | | - pci_restore_state(adev->pdev); |
---|
| 460 | + amdgpu_device_load_pci_state(adev->pdev); |
---|
403 | 461 | |
---|
404 | 462 | /* wait for asic to come out of reset */ |
---|
405 | 463 | for (i = 0; i < adev->usec_timeout; i++) { |
---|
406 | | - u32 memsize = adev->nbio_funcs->get_memsize(adev); |
---|
| 464 | + u32 memsize = adev->nbio.funcs->get_memsize(adev); |
---|
407 | 465 | |
---|
408 | 466 | if (memsize != 0xffffffff) |
---|
409 | 467 | break; |
---|
.. | .. |
---|
412 | 470 | |
---|
413 | 471 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
---|
414 | 472 | |
---|
| 473 | + return ret; |
---|
| 474 | +} |
---|
| 475 | + |
---|
| 476 | +static int soc15_asic_baco_reset(struct amdgpu_device *adev) |
---|
| 477 | +{ |
---|
| 478 | + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
---|
| 479 | + int ret = 0; |
---|
| 480 | + |
---|
| 481 | + /* avoid NBIF got stuck when do RAS recovery in BACO reset */ |
---|
| 482 | + if (ras && ras->supported) |
---|
| 483 | + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); |
---|
| 484 | + |
---|
| 485 | + ret = amdgpu_dpm_baco_reset(adev); |
---|
| 486 | + if (ret) |
---|
| 487 | + return ret; |
---|
| 488 | + |
---|
| 489 | + /* re-enable doorbell interrupt after BACO exit */ |
---|
| 490 | + if (ras && ras->supported) |
---|
| 491 | + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); |
---|
| 492 | + |
---|
415 | 493 | return 0; |
---|
| 494 | +} |
---|
| 495 | + |
---|
| 496 | +static enum amd_reset_method |
---|
| 497 | +soc15_asic_reset_method(struct amdgpu_device *adev) |
---|
| 498 | +{ |
---|
| 499 | + bool baco_reset = false; |
---|
| 500 | + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
---|
| 501 | + |
---|
| 502 | + if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || |
---|
| 503 | + amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || |
---|
| 504 | + amdgpu_reset_method == AMD_RESET_METHOD_BACO) |
---|
| 505 | + return amdgpu_reset_method; |
---|
| 506 | + |
---|
| 507 | + if (amdgpu_reset_method != -1) |
---|
| 508 | + dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", |
---|
| 509 | + amdgpu_reset_method); |
---|
| 510 | + |
---|
| 511 | + switch (adev->asic_type) { |
---|
| 512 | + case CHIP_RAVEN: |
---|
| 513 | + case CHIP_RENOIR: |
---|
| 514 | + return AMD_RESET_METHOD_MODE2; |
---|
| 515 | + case CHIP_VEGA10: |
---|
| 516 | + case CHIP_VEGA12: |
---|
| 517 | + case CHIP_ARCTURUS: |
---|
| 518 | + baco_reset = amdgpu_dpm_is_baco_supported(adev); |
---|
| 519 | + break; |
---|
| 520 | + case CHIP_VEGA20: |
---|
| 521 | + if (adev->psp.sos_fw_version >= 0x80067) |
---|
| 522 | + baco_reset = amdgpu_dpm_is_baco_supported(adev); |
---|
| 523 | + |
---|
| 524 | + /* |
---|
| 525 | + * 1. PMFW version > 0x284300: all cases use baco |
---|
| 526 | + * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco |
---|
| 527 | + */ |
---|
| 528 | + if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400) |
---|
| 529 | + baco_reset = false; |
---|
| 530 | + break; |
---|
| 531 | + default: |
---|
| 532 | + break; |
---|
| 533 | + } |
---|
| 534 | + |
---|
| 535 | + if (baco_reset) |
---|
| 536 | + return AMD_RESET_METHOD_BACO; |
---|
| 537 | + else |
---|
| 538 | + return AMD_RESET_METHOD_MODE1; |
---|
| 539 | +} |
---|
| 540 | + |
---|
| 541 | +static int soc15_asic_reset(struct amdgpu_device *adev) |
---|
| 542 | +{ |
---|
| 543 | + /* original raven doesn't have full asic reset */ |
---|
| 544 | + if ((adev->apu_flags & AMD_APU_IS_RAVEN) && |
---|
| 545 | + !(adev->apu_flags & AMD_APU_IS_RAVEN2)) |
---|
| 546 | + return 0; |
---|
| 547 | + |
---|
| 548 | + switch (soc15_asic_reset_method(adev)) { |
---|
| 549 | + case AMD_RESET_METHOD_BACO: |
---|
| 550 | + dev_info(adev->dev, "BACO reset\n"); |
---|
| 551 | + return soc15_asic_baco_reset(adev); |
---|
| 552 | + case AMD_RESET_METHOD_MODE2: |
---|
| 553 | + dev_info(adev->dev, "MODE2 reset\n"); |
---|
| 554 | + return amdgpu_dpm_mode2_reset(adev); |
---|
| 555 | + default: |
---|
| 556 | + dev_info(adev->dev, "MODE1 reset\n"); |
---|
| 557 | + return soc15_asic_mode1_reset(adev); |
---|
| 558 | + } |
---|
| 559 | +} |
---|
| 560 | + |
---|
| 561 | +static bool soc15_supports_baco(struct amdgpu_device *adev) |
---|
| 562 | +{ |
---|
| 563 | + switch (adev->asic_type) { |
---|
| 564 | + case CHIP_VEGA10: |
---|
| 565 | + case CHIP_VEGA12: |
---|
| 566 | + case CHIP_ARCTURUS: |
---|
| 567 | + return amdgpu_dpm_is_baco_supported(adev); |
---|
| 568 | + case CHIP_VEGA20: |
---|
| 569 | + if (adev->psp.sos_fw_version >= 0x80067) |
---|
| 570 | + return amdgpu_dpm_is_baco_supported(adev); |
---|
| 571 | + return false; |
---|
| 572 | + default: |
---|
| 573 | + return false; |
---|
| 574 | + } |
---|
416 | 575 | } |
---|
417 | 576 | |
---|
418 | 577 | /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, |
---|
.. | .. |
---|
471 | 630 | static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, |
---|
472 | 631 | bool enable) |
---|
473 | 632 | { |
---|
474 | | - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); |
---|
475 | | - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); |
---|
| 633 | + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); |
---|
| 634 | + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); |
---|
476 | 635 | } |
---|
477 | 636 | |
---|
478 | 637 | static const struct amdgpu_ip_block_version vega10_common_ip_block = |
---|
.. | .. |
---|
484 | 643 | .funcs = &soc15_common_ip_funcs, |
---|
485 | 644 | }; |
---|
486 | 645 | |
---|
487 | | -int soc15_set_ip_blocks(struct amdgpu_device *adev) |
---|
| 646 | +static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) |
---|
488 | 647 | { |
---|
| 648 | + return adev->nbio.funcs->get_rev_id(adev); |
---|
| 649 | +} |
---|
| 650 | + |
---|
| 651 | +static void soc15_reg_base_init(struct amdgpu_device *adev) |
---|
| 652 | +{ |
---|
| 653 | + int r; |
---|
| 654 | + |
---|
489 | 655 | /* Set IP register base before any HW register access */ |
---|
490 | 656 | switch (adev->asic_type) { |
---|
491 | 657 | case CHIP_VEGA10: |
---|
.. | .. |
---|
493 | 659 | case CHIP_RAVEN: |
---|
494 | 660 | vega10_reg_base_init(adev); |
---|
495 | 661 | break; |
---|
| 662 | + case CHIP_RENOIR: |
---|
| 663 | + /* It's safe to do ip discovery here for Renior, |
---|
| 664 | + * it doesn't support SRIOV. */ |
---|
| 665 | + if (amdgpu_discovery) { |
---|
| 666 | + r = amdgpu_discovery_reg_base_init(adev); |
---|
| 667 | + if (r == 0) |
---|
| 668 | + break; |
---|
| 669 | + DRM_WARN("failed to init reg base from ip discovery table, " |
---|
| 670 | + "fallback to legacy init method\n"); |
---|
| 671 | + } |
---|
| 672 | + vega10_reg_base_init(adev); |
---|
| 673 | + break; |
---|
496 | 674 | case CHIP_VEGA20: |
---|
497 | 675 | vega20_reg_base_init(adev); |
---|
498 | 676 | break; |
---|
| 677 | + case CHIP_ARCTURUS: |
---|
| 678 | + arct_reg_base_init(adev); |
---|
| 679 | + break; |
---|
499 | 680 | default: |
---|
500 | | - return -EINVAL; |
---|
| 681 | + DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type); |
---|
| 682 | + break; |
---|
| 683 | + } |
---|
| 684 | +} |
---|
| 685 | + |
---|
| 686 | +void soc15_set_virt_ops(struct amdgpu_device *adev) |
---|
| 687 | +{ |
---|
| 688 | + adev->virt.ops = &xgpu_ai_virt_ops; |
---|
| 689 | + |
---|
| 690 | + /* init soc15 reg base early enough so we can |
---|
| 691 | + * request request full access for sriov before |
---|
| 692 | + * set_ip_blocks. */ |
---|
| 693 | + soc15_reg_base_init(adev); |
---|
| 694 | +} |
---|
| 695 | + |
---|
| 696 | +int soc15_set_ip_blocks(struct amdgpu_device *adev) |
---|
| 697 | +{ |
---|
| 698 | + /* for bare metal case */ |
---|
| 699 | + if (!amdgpu_sriov_vf(adev)) |
---|
| 700 | + soc15_reg_base_init(adev); |
---|
| 701 | + |
---|
| 702 | + if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) |
---|
| 703 | + adev->gmc.xgmi.supported = true; |
---|
| 704 | + |
---|
| 705 | + if (adev->flags & AMD_IS_APU) { |
---|
| 706 | + adev->nbio.funcs = &nbio_v7_0_funcs; |
---|
| 707 | + adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; |
---|
| 708 | + } else if (adev->asic_type == CHIP_VEGA20 || |
---|
| 709 | + adev->asic_type == CHIP_ARCTURUS) { |
---|
| 710 | + adev->nbio.funcs = &nbio_v7_4_funcs; |
---|
| 711 | + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; |
---|
| 712 | + } else { |
---|
| 713 | + adev->nbio.funcs = &nbio_v6_1_funcs; |
---|
| 714 | + adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; |
---|
501 | 715 | } |
---|
502 | 716 | |
---|
503 | | - if (adev->flags & AMD_IS_APU) |
---|
504 | | - adev->nbio_funcs = &nbio_v7_0_funcs; |
---|
505 | | - else if (adev->asic_type == CHIP_VEGA20) |
---|
506 | | - adev->nbio_funcs = &nbio_v7_0_funcs; |
---|
| 717 | + if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) |
---|
| 718 | + adev->df.funcs = &df_v3_6_funcs; |
---|
507 | 719 | else |
---|
508 | | - adev->nbio_funcs = &nbio_v6_1_funcs; |
---|
| 720 | + adev->df.funcs = &df_v1_7_funcs; |
---|
509 | 721 | |
---|
510 | | - if (adev->asic_type == CHIP_VEGA20) |
---|
511 | | - adev->df_funcs = &df_v3_6_funcs; |
---|
512 | | - else |
---|
513 | | - adev->df_funcs = &df_v1_7_funcs; |
---|
514 | | - adev->nbio_funcs->detect_hw_virt(adev); |
---|
515 | | - |
---|
516 | | - if (amdgpu_sriov_vf(adev)) |
---|
517 | | - adev->virt.ops = &xgpu_ai_virt_ops; |
---|
| 722 | + adev->rev_id = soc15_get_rev_id(adev); |
---|
518 | 723 | |
---|
519 | 724 | switch (adev->asic_type) { |
---|
520 | 725 | case CHIP_VEGA10: |
---|
.. | .. |
---|
522 | 727 | case CHIP_VEGA20: |
---|
523 | 728 | amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
---|
524 | 729 | amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); |
---|
525 | | - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
---|
526 | | - if (adev->asic_type != CHIP_VEGA20) { |
---|
527 | | - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); |
---|
| 730 | + |
---|
| 731 | + /* For Vega10 SR-IOV, PSP need to be initialized before IH */ |
---|
| 732 | + if (amdgpu_sriov_vf(adev)) { |
---|
| 733 | + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { |
---|
| 734 | + if (adev->asic_type == CHIP_VEGA20) |
---|
| 735 | + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); |
---|
| 736 | + else |
---|
| 737 | + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); |
---|
| 738 | + } |
---|
| 739 | + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
---|
| 740 | + } else { |
---|
| 741 | + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
---|
| 742 | + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { |
---|
| 743 | + if (adev->asic_type == CHIP_VEGA20) |
---|
| 744 | + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); |
---|
| 745 | + else |
---|
| 746 | + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); |
---|
| 747 | + } |
---|
| 748 | + } |
---|
| 749 | + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
---|
| 750 | + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); |
---|
| 751 | + if (is_support_sw_smu(adev)) { |
---|
528 | 752 | if (!amdgpu_sriov_vf(adev)) |
---|
529 | | - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); |
---|
| 753 | + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
---|
| 754 | + } else { |
---|
| 755 | + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); |
---|
530 | 756 | } |
---|
531 | 757 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
---|
532 | 758 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
---|
533 | 759 | #if defined(CONFIG_DRM_AMD_DC) |
---|
534 | 760 | else if (amdgpu_device_has_dc_support(adev)) |
---|
535 | 761 | amdgpu_device_ip_block_add(adev, &dm_ip_block); |
---|
536 | | -#else |
---|
537 | | -# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." |
---|
538 | 762 | #endif |
---|
539 | | - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
---|
540 | | - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); |
---|
541 | | - amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); |
---|
542 | | - amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); |
---|
| 763 | + if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { |
---|
| 764 | + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); |
---|
| 765 | + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); |
---|
| 766 | + } |
---|
543 | 767 | break; |
---|
544 | 768 | case CHIP_RAVEN: |
---|
545 | 769 | amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
---|
546 | 770 | amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); |
---|
547 | 771 | amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
---|
548 | | - amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); |
---|
| 772 | + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) |
---|
| 773 | + amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); |
---|
| 774 | + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
---|
| 775 | + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); |
---|
549 | 776 | amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); |
---|
550 | 777 | if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
---|
551 | 778 | amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
---|
552 | 779 | #if defined(CONFIG_DRM_AMD_DC) |
---|
553 | 780 | else if (amdgpu_device_has_dc_support(adev)) |
---|
554 | 781 | amdgpu_device_ip_block_add(adev, &dm_ip_block); |
---|
555 | | -#else |
---|
556 | | -# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." |
---|
557 | 782 | #endif |
---|
| 783 | + amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); |
---|
| 784 | + break; |
---|
| 785 | + case CHIP_ARCTURUS: |
---|
| 786 | + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
---|
| 787 | + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); |
---|
| 788 | + |
---|
| 789 | + if (amdgpu_sriov_vf(adev)) { |
---|
| 790 | + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) |
---|
| 791 | + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); |
---|
| 792 | + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
---|
| 793 | + } else { |
---|
| 794 | + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
---|
| 795 | + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) |
---|
| 796 | + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); |
---|
| 797 | + } |
---|
| 798 | + |
---|
| 799 | + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
---|
| 800 | + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
---|
558 | 801 | amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
---|
559 | 802 | amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); |
---|
560 | | - amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); |
---|
| 803 | + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); |
---|
| 804 | + |
---|
| 805 | + if (amdgpu_sriov_vf(adev)) { |
---|
| 806 | + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) |
---|
| 807 | + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); |
---|
| 808 | + } else { |
---|
| 809 | + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); |
---|
| 810 | + } |
---|
| 811 | + if (!amdgpu_sriov_vf(adev)) |
---|
| 812 | + amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); |
---|
| 813 | + break; |
---|
| 814 | + case CHIP_RENOIR: |
---|
| 815 | + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); |
---|
| 816 | + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); |
---|
| 817 | + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); |
---|
| 818 | + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) |
---|
| 819 | + amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); |
---|
| 820 | + amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); |
---|
| 821 | + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); |
---|
| 822 | + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); |
---|
| 823 | + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) |
---|
| 824 | + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); |
---|
| 825 | +#if defined(CONFIG_DRM_AMD_DC) |
---|
| 826 | + else if (amdgpu_device_has_dc_support(adev)) |
---|
| 827 | + amdgpu_device_ip_block_add(adev, &dm_ip_block); |
---|
| 828 | +#endif |
---|
| 829 | + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); |
---|
| 830 | + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); |
---|
561 | 831 | break; |
---|
562 | 832 | default: |
---|
563 | 833 | return -EINVAL; |
---|
.. | .. |
---|
566 | 836 | return 0; |
---|
567 | 837 | } |
---|
568 | 838 | |
---|
569 | | -static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) |
---|
570 | | -{ |
---|
571 | | - return adev->nbio_funcs->get_rev_id(adev); |
---|
572 | | -} |
---|
573 | | - |
---|
574 | 839 | static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) |
---|
575 | 840 | { |
---|
576 | | - adev->nbio_funcs->hdp_flush(adev, ring); |
---|
| 841 | + adev->nbio.funcs->hdp_flush(adev, ring); |
---|
577 | 842 | } |
---|
578 | 843 | |
---|
579 | 844 | static void soc15_invalidate_hdp(struct amdgpu_device *adev, |
---|
580 | 845 | struct amdgpu_ring *ring) |
---|
581 | 846 | { |
---|
582 | 847 | if (!ring || !ring->funcs->emit_wreg) |
---|
583 | | - WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); |
---|
| 848 | + WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); |
---|
584 | 849 | else |
---|
585 | 850 | amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( |
---|
586 | 851 | HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); |
---|
.. | .. |
---|
592 | 857 | return true; |
---|
593 | 858 | } |
---|
594 | 859 | |
---|
| 860 | +static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev) |
---|
| 861 | +{ |
---|
| 862 | + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) |
---|
| 863 | + return; |
---|
| 864 | + /*read back hdp ras counter to reset it to 0 */ |
---|
| 865 | + RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); |
---|
| 866 | +} |
---|
| 867 | + |
---|
| 868 | +static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, |
---|
| 869 | + uint64_t *count1) |
---|
| 870 | +{ |
---|
| 871 | + uint32_t perfctr = 0; |
---|
| 872 | + uint64_t cnt0_of, cnt1_of; |
---|
| 873 | + int tmp; |
---|
| 874 | + |
---|
| 875 | + /* This reports 0 on APUs, so return to avoid writing/reading registers |
---|
| 876 | + * that may or may not be different from their GPU counterparts |
---|
| 877 | + */ |
---|
| 878 | + if (adev->flags & AMD_IS_APU) |
---|
| 879 | + return; |
---|
| 880 | + |
---|
| 881 | + /* Set the 2 events that we wish to watch, defined above */ |
---|
| 882 | + /* Reg 40 is # received msgs */ |
---|
| 883 | + /* Reg 104 is # of posted requests sent */ |
---|
| 884 | + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); |
---|
| 885 | + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); |
---|
| 886 | + |
---|
| 887 | + /* Write to enable desired perf counters */ |
---|
| 888 | + WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); |
---|
| 889 | + /* Zero out and enable the perf counters |
---|
| 890 | + * Write 0x5: |
---|
| 891 | + * Bit 0 = Start all counters(1) |
---|
| 892 | + * Bit 2 = Global counter reset enable(1) |
---|
| 893 | + */ |
---|
| 894 | + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); |
---|
| 895 | + |
---|
| 896 | + msleep(1000); |
---|
| 897 | + |
---|
| 898 | + /* Load the shadow and disable the perf counters |
---|
| 899 | + * Write 0x2: |
---|
| 900 | + * Bit 0 = Stop counters(0) |
---|
| 901 | + * Bit 1 = Load the shadow counters(1) |
---|
| 902 | + */ |
---|
| 903 | + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); |
---|
| 904 | + |
---|
| 905 | + /* Read register values to get any >32bit overflow */ |
---|
| 906 | + tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); |
---|
| 907 | + cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); |
---|
| 908 | + cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); |
---|
| 909 | + |
---|
| 910 | + /* Get the values and add the overflow */ |
---|
| 911 | + *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); |
---|
| 912 | + *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); |
---|
| 913 | +} |
---|
| 914 | + |
---|
| 915 | +static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, |
---|
| 916 | + uint64_t *count1) |
---|
| 917 | +{ |
---|
| 918 | + uint32_t perfctr = 0; |
---|
| 919 | + uint64_t cnt0_of, cnt1_of; |
---|
| 920 | + int tmp; |
---|
| 921 | + |
---|
| 922 | + /* This reports 0 on APUs, so return to avoid writing/reading registers |
---|
| 923 | + * that may or may not be different from their GPU counterparts |
---|
| 924 | + */ |
---|
| 925 | + if (adev->flags & AMD_IS_APU) |
---|
| 926 | + return; |
---|
| 927 | + |
---|
| 928 | + /* Set the 2 events that we wish to watch, defined above */ |
---|
| 929 | + /* Reg 40 is # received msgs */ |
---|
| 930 | + /* Reg 108 is # of posted requests sent on VG20 */ |
---|
| 931 | + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, |
---|
| 932 | + EVENT0_SEL, 40); |
---|
| 933 | + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, |
---|
| 934 | + EVENT1_SEL, 108); |
---|
| 935 | + |
---|
| 936 | + /* Write to enable desired perf counters */ |
---|
| 937 | + WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); |
---|
| 938 | + /* Zero out and enable the perf counters |
---|
| 939 | + * Write 0x5: |
---|
| 940 | + * Bit 0 = Start all counters(1) |
---|
| 941 | + * Bit 2 = Global counter reset enable(1) |
---|
| 942 | + */ |
---|
| 943 | + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); |
---|
| 944 | + |
---|
| 945 | + msleep(1000); |
---|
| 946 | + |
---|
| 947 | + /* Load the shadow and disable the perf counters |
---|
| 948 | + * Write 0x2: |
---|
| 949 | + * Bit 0 = Stop counters(0) |
---|
| 950 | + * Bit 1 = Load the shadow counters(1) |
---|
| 951 | + */ |
---|
| 952 | + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); |
---|
| 953 | + |
---|
| 954 | + /* Read register values to get any >32bit overflow */ |
---|
| 955 | + tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3); |
---|
| 956 | + cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER); |
---|
| 957 | + cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER); |
---|
| 958 | + |
---|
| 959 | + /* Get the values and add the overflow */ |
---|
| 960 | + *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32); |
---|
| 961 | + *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32); |
---|
| 962 | +} |
---|
| 963 | + |
---|
| 964 | +static bool soc15_need_reset_on_init(struct amdgpu_device *adev) |
---|
| 965 | +{ |
---|
| 966 | + u32 sol_reg; |
---|
| 967 | + |
---|
| 968 | + /* Just return false for soc15 GPUs. Reset does not seem to |
---|
| 969 | + * be necessary. |
---|
| 970 | + */ |
---|
| 971 | + if (!amdgpu_passthrough(adev)) |
---|
| 972 | + return false; |
---|
| 973 | + |
---|
| 974 | + if (adev->flags & AMD_IS_APU) |
---|
| 975 | + return false; |
---|
| 976 | + |
---|
| 977 | + /* Check sOS sign of life register to confirm sys driver and sOS |
---|
| 978 | + * are already been loaded. |
---|
| 979 | + */ |
---|
| 980 | + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); |
---|
| 981 | + if (sol_reg) |
---|
| 982 | + return true; |
---|
| 983 | + |
---|
| 984 | + return false; |
---|
| 985 | +} |
---|
| 986 | + |
---|
| 987 | +static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) |
---|
| 988 | +{ |
---|
| 989 | + uint64_t nak_r, nak_g; |
---|
| 990 | + |
---|
| 991 | + /* Get the number of NAKs received and generated */ |
---|
| 992 | + nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); |
---|
| 993 | + nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); |
---|
| 994 | + |
---|
| 995 | + /* Add the total number of NAKs, i.e the number of replays */ |
---|
| 996 | + return (nak_r + nak_g); |
---|
| 997 | +} |
---|
| 998 | + |
---|
| 999 | +static void soc15_pre_asic_init(struct amdgpu_device *adev) |
---|
| 1000 | +{ |
---|
| 1001 | + gmc_v9_0_restore_registers(adev); |
---|
| 1002 | +} |
---|
| 1003 | + |
---|
595 | 1004 | static const struct amdgpu_asic_funcs soc15_asic_funcs = |
---|
596 | 1005 | { |
---|
597 | 1006 | .read_disabled_bios = &soc15_read_disabled_bios, |
---|
598 | 1007 | .read_bios_from_rom = &soc15_read_bios_from_rom, |
---|
599 | 1008 | .read_register = &soc15_read_register, |
---|
600 | 1009 | .reset = &soc15_asic_reset, |
---|
| 1010 | + .reset_method = &soc15_asic_reset_method, |
---|
601 | 1011 | .set_vga_state = &soc15_vga_set_state, |
---|
602 | 1012 | .get_xclk = &soc15_get_xclk, |
---|
603 | 1013 | .set_uvd_clocks = &soc15_set_uvd_clocks, |
---|
.. | .. |
---|
606 | 1016 | .flush_hdp = &soc15_flush_hdp, |
---|
607 | 1017 | .invalidate_hdp = &soc15_invalidate_hdp, |
---|
608 | 1018 | .need_full_reset = &soc15_need_full_reset, |
---|
| 1019 | + .init_doorbell_index = &vega10_doorbell_index_init, |
---|
| 1020 | + .get_pcie_usage = &soc15_get_pcie_usage, |
---|
| 1021 | + .need_reset_on_init = &soc15_need_reset_on_init, |
---|
| 1022 | + .get_pcie_replay_count = &soc15_get_pcie_replay_count, |
---|
| 1023 | + .supports_baco = &soc15_supports_baco, |
---|
| 1024 | + .pre_asic_init = &soc15_pre_asic_init, |
---|
| 1025 | +}; |
---|
| 1026 | + |
---|
| 1027 | +static const struct amdgpu_asic_funcs vega20_asic_funcs = |
---|
| 1028 | +{ |
---|
| 1029 | + .read_disabled_bios = &soc15_read_disabled_bios, |
---|
| 1030 | + .read_bios_from_rom = &soc15_read_bios_from_rom, |
---|
| 1031 | + .read_register = &soc15_read_register, |
---|
| 1032 | + .reset = &soc15_asic_reset, |
---|
| 1033 | + .reset_method = &soc15_asic_reset_method, |
---|
| 1034 | + .set_vga_state = &soc15_vga_set_state, |
---|
| 1035 | + .get_xclk = &soc15_get_xclk, |
---|
| 1036 | + .set_uvd_clocks = &soc15_set_uvd_clocks, |
---|
| 1037 | + .set_vce_clocks = &soc15_set_vce_clocks, |
---|
| 1038 | + .get_config_memsize = &soc15_get_config_memsize, |
---|
| 1039 | + .flush_hdp = &soc15_flush_hdp, |
---|
| 1040 | + .invalidate_hdp = &soc15_invalidate_hdp, |
---|
| 1041 | + .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count, |
---|
| 1042 | + .need_full_reset = &soc15_need_full_reset, |
---|
| 1043 | + .init_doorbell_index = &vega20_doorbell_index_init, |
---|
| 1044 | + .get_pcie_usage = &vega20_get_pcie_usage, |
---|
| 1045 | + .need_reset_on_init = &soc15_need_reset_on_init, |
---|
| 1046 | + .get_pcie_replay_count = &soc15_get_pcie_replay_count, |
---|
| 1047 | + .supports_baco = &soc15_supports_baco, |
---|
| 1048 | + .pre_asic_init = &soc15_pre_asic_init, |
---|
609 | 1049 | }; |
---|
610 | 1050 | |
---|
611 | 1051 | static int soc15_common_early_init(void *handle) |
---|
612 | 1052 | { |
---|
| 1053 | +#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) |
---|
613 | 1054 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
---|
614 | 1055 | |
---|
| 1056 | + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; |
---|
| 1057 | + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; |
---|
615 | 1058 | adev->smc_rreg = NULL; |
---|
616 | 1059 | adev->smc_wreg = NULL; |
---|
617 | 1060 | adev->pcie_rreg = &soc15_pcie_rreg; |
---|
618 | 1061 | adev->pcie_wreg = &soc15_pcie_wreg; |
---|
| 1062 | + adev->pcie_rreg64 = &soc15_pcie_rreg64; |
---|
| 1063 | + adev->pcie_wreg64 = &soc15_pcie_wreg64; |
---|
619 | 1064 | adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; |
---|
620 | 1065 | adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; |
---|
621 | 1066 | adev->didt_rreg = &soc15_didt_rreg; |
---|
.. | .. |
---|
625 | 1070 | adev->se_cac_rreg = &soc15_se_cac_rreg; |
---|
626 | 1071 | adev->se_cac_wreg = &soc15_se_cac_wreg; |
---|
627 | 1072 | |
---|
628 | | - adev->asic_funcs = &soc15_asic_funcs; |
---|
629 | 1073 | |
---|
630 | | - adev->rev_id = soc15_get_rev_id(adev); |
---|
631 | 1074 | adev->external_rev_id = 0xFF; |
---|
632 | 1075 | switch (adev->asic_type) { |
---|
633 | 1076 | case CHIP_VEGA10: |
---|
| 1077 | + adev->asic_funcs = &soc15_asic_funcs; |
---|
634 | 1078 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
635 | 1079 | AMD_CG_SUPPORT_GFX_MGLS | |
---|
636 | 1080 | AMD_CG_SUPPORT_GFX_RLC_LS | |
---|
.. | .. |
---|
654 | 1098 | adev->external_rev_id = 0x1; |
---|
655 | 1099 | break; |
---|
656 | 1100 | case CHIP_VEGA12: |
---|
| 1101 | + adev->asic_funcs = &soc15_asic_funcs; |
---|
657 | 1102 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
658 | 1103 | AMD_CG_SUPPORT_GFX_MGLS | |
---|
659 | 1104 | AMD_CG_SUPPORT_GFX_CGCG | |
---|
.. | .. |
---|
676 | 1121 | adev->external_rev_id = adev->rev_id + 0x14; |
---|
677 | 1122 | break; |
---|
678 | 1123 | case CHIP_VEGA20: |
---|
| 1124 | + adev->asic_funcs = &vega20_asic_funcs; |
---|
679 | 1125 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
680 | 1126 | AMD_CG_SUPPORT_GFX_MGLS | |
---|
681 | 1127 | AMD_CG_SUPPORT_GFX_CGCG | |
---|
.. | .. |
---|
698 | 1144 | adev->external_rev_id = adev->rev_id + 0x28; |
---|
699 | 1145 | break; |
---|
700 | 1146 | case CHIP_RAVEN: |
---|
| 1147 | + adev->asic_funcs = &soc15_asic_funcs; |
---|
| 1148 | + if (adev->pdev->device == 0x15dd) |
---|
| 1149 | + adev->apu_flags |= AMD_APU_IS_RAVEN; |
---|
| 1150 | + if (adev->pdev->device == 0x15d8) |
---|
| 1151 | + adev->apu_flags |= AMD_APU_IS_PICASSO; |
---|
| 1152 | + if (adev->rev_id >= 0x8) |
---|
| 1153 | + adev->apu_flags |= AMD_APU_IS_RAVEN2; |
---|
| 1154 | + |
---|
| 1155 | + if (adev->apu_flags & AMD_APU_IS_RAVEN2) |
---|
| 1156 | + adev->external_rev_id = adev->rev_id + 0x79; |
---|
| 1157 | + else if (adev->apu_flags & AMD_APU_IS_PICASSO) |
---|
| 1158 | + adev->external_rev_id = adev->rev_id + 0x41; |
---|
| 1159 | + else if (adev->rev_id == 1) |
---|
| 1160 | + adev->external_rev_id = adev->rev_id + 0x20; |
---|
| 1161 | + else |
---|
| 1162 | + adev->external_rev_id = adev->rev_id + 0x01; |
---|
| 1163 | + |
---|
| 1164 | + if (adev->apu_flags & AMD_APU_IS_RAVEN2) { |
---|
| 1165 | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
| 1166 | + AMD_CG_SUPPORT_GFX_MGLS | |
---|
| 1167 | + AMD_CG_SUPPORT_GFX_CP_LS | |
---|
| 1168 | + AMD_CG_SUPPORT_GFX_3D_CGCG | |
---|
| 1169 | + AMD_CG_SUPPORT_GFX_3D_CGLS | |
---|
| 1170 | + AMD_CG_SUPPORT_GFX_CGCG | |
---|
| 1171 | + AMD_CG_SUPPORT_GFX_CGLS | |
---|
| 1172 | + AMD_CG_SUPPORT_BIF_LS | |
---|
| 1173 | + AMD_CG_SUPPORT_HDP_LS | |
---|
| 1174 | + AMD_CG_SUPPORT_ROM_MGCG | |
---|
| 1175 | + AMD_CG_SUPPORT_MC_MGCG | |
---|
| 1176 | + AMD_CG_SUPPORT_MC_LS | |
---|
| 1177 | + AMD_CG_SUPPORT_SDMA_MGCG | |
---|
| 1178 | + AMD_CG_SUPPORT_SDMA_LS | |
---|
| 1179 | + AMD_CG_SUPPORT_VCN_MGCG; |
---|
| 1180 | + |
---|
| 1181 | + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; |
---|
| 1182 | + } else if (adev->apu_flags & AMD_APU_IS_PICASSO) { |
---|
| 1183 | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
| 1184 | + AMD_CG_SUPPORT_GFX_MGLS | |
---|
| 1185 | + AMD_CG_SUPPORT_GFX_CP_LS | |
---|
| 1186 | + AMD_CG_SUPPORT_GFX_3D_CGLS | |
---|
| 1187 | + AMD_CG_SUPPORT_GFX_CGCG | |
---|
| 1188 | + AMD_CG_SUPPORT_GFX_CGLS | |
---|
| 1189 | + AMD_CG_SUPPORT_BIF_LS | |
---|
| 1190 | + AMD_CG_SUPPORT_HDP_LS | |
---|
| 1191 | + AMD_CG_SUPPORT_ROM_MGCG | |
---|
| 1192 | + AMD_CG_SUPPORT_MC_MGCG | |
---|
| 1193 | + AMD_CG_SUPPORT_MC_LS | |
---|
| 1194 | + AMD_CG_SUPPORT_SDMA_MGCG | |
---|
| 1195 | + AMD_CG_SUPPORT_SDMA_LS; |
---|
| 1196 | + |
---|
| 1197 | + /* |
---|
| 1198 | + * MMHUB PG needs to be disabled for Picasso for |
---|
| 1199 | + * stability reasons. |
---|
| 1200 | + */ |
---|
| 1201 | + adev->pg_flags = AMD_PG_SUPPORT_SDMA | |
---|
| 1202 | + AMD_PG_SUPPORT_VCN; |
---|
| 1203 | + } else { |
---|
| 1204 | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
| 1205 | + AMD_CG_SUPPORT_GFX_MGLS | |
---|
| 1206 | + AMD_CG_SUPPORT_GFX_RLC_LS | |
---|
| 1207 | + AMD_CG_SUPPORT_GFX_CP_LS | |
---|
| 1208 | + AMD_CG_SUPPORT_GFX_3D_CGLS | |
---|
| 1209 | + AMD_CG_SUPPORT_GFX_CGCG | |
---|
| 1210 | + AMD_CG_SUPPORT_GFX_CGLS | |
---|
| 1211 | + AMD_CG_SUPPORT_BIF_MGCG | |
---|
| 1212 | + AMD_CG_SUPPORT_BIF_LS | |
---|
| 1213 | + AMD_CG_SUPPORT_HDP_MGCG | |
---|
| 1214 | + AMD_CG_SUPPORT_HDP_LS | |
---|
| 1215 | + AMD_CG_SUPPORT_DRM_MGCG | |
---|
| 1216 | + AMD_CG_SUPPORT_DRM_LS | |
---|
| 1217 | + AMD_CG_SUPPORT_ROM_MGCG | |
---|
| 1218 | + AMD_CG_SUPPORT_MC_MGCG | |
---|
| 1219 | + AMD_CG_SUPPORT_MC_LS | |
---|
| 1220 | + AMD_CG_SUPPORT_SDMA_MGCG | |
---|
| 1221 | + AMD_CG_SUPPORT_SDMA_LS | |
---|
| 1222 | + AMD_CG_SUPPORT_VCN_MGCG; |
---|
| 1223 | + |
---|
| 1224 | + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; |
---|
| 1225 | + } |
---|
| 1226 | + break; |
---|
| 1227 | + case CHIP_ARCTURUS: |
---|
| 1228 | + adev->asic_funcs = &vega20_asic_funcs; |
---|
701 | 1229 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
702 | 1230 | AMD_CG_SUPPORT_GFX_MGLS | |
---|
703 | | - AMD_CG_SUPPORT_GFX_RLC_LS | |
---|
704 | | - AMD_CG_SUPPORT_GFX_CP_LS | |
---|
705 | | - AMD_CG_SUPPORT_GFX_3D_CGCG | |
---|
706 | | - AMD_CG_SUPPORT_GFX_3D_CGLS | |
---|
707 | 1231 | AMD_CG_SUPPORT_GFX_CGCG | |
---|
708 | 1232 | AMD_CG_SUPPORT_GFX_CGLS | |
---|
709 | | - AMD_CG_SUPPORT_BIF_MGCG | |
---|
710 | | - AMD_CG_SUPPORT_BIF_LS | |
---|
| 1233 | + AMD_CG_SUPPORT_GFX_CP_LS | |
---|
711 | 1234 | AMD_CG_SUPPORT_HDP_MGCG | |
---|
712 | 1235 | AMD_CG_SUPPORT_HDP_LS | |
---|
713 | | - AMD_CG_SUPPORT_DRM_MGCG | |
---|
714 | | - AMD_CG_SUPPORT_DRM_LS | |
---|
715 | | - AMD_CG_SUPPORT_ROM_MGCG | |
---|
716 | | - AMD_CG_SUPPORT_MC_MGCG | |
---|
717 | | - AMD_CG_SUPPORT_MC_LS | |
---|
718 | 1236 | AMD_CG_SUPPORT_SDMA_MGCG | |
---|
719 | 1237 | AMD_CG_SUPPORT_SDMA_LS | |
---|
720 | | - AMD_CG_SUPPORT_VCN_MGCG; |
---|
| 1238 | + AMD_CG_SUPPORT_MC_MGCG | |
---|
| 1239 | + AMD_CG_SUPPORT_MC_LS | |
---|
| 1240 | + AMD_CG_SUPPORT_IH_CG | |
---|
| 1241 | + AMD_CG_SUPPORT_VCN_MGCG | |
---|
| 1242 | + AMD_CG_SUPPORT_JPEG_MGCG; |
---|
| 1243 | + adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; |
---|
| 1244 | + adev->external_rev_id = adev->rev_id + 0x32; |
---|
| 1245 | + break; |
---|
| 1246 | + case CHIP_RENOIR: |
---|
| 1247 | + adev->asic_funcs = &soc15_asic_funcs; |
---|
| 1248 | + if ((adev->pdev->device == 0x1636) || |
---|
| 1249 | + (adev->pdev->device == 0x164c)) |
---|
| 1250 | + adev->apu_flags |= AMD_APU_IS_RENOIR; |
---|
| 1251 | + else |
---|
| 1252 | + adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; |
---|
721 | 1253 | |
---|
722 | | - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; |
---|
723 | | - |
---|
724 | | - if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) |
---|
725 | | - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | |
---|
726 | | - AMD_PG_SUPPORT_CP | |
---|
727 | | - AMD_PG_SUPPORT_RLC_SMU_HS; |
---|
728 | | - |
---|
729 | | - adev->external_rev_id = 0x1; |
---|
| 1254 | + if (adev->apu_flags & AMD_APU_IS_RENOIR) |
---|
| 1255 | + adev->external_rev_id = adev->rev_id + 0x91; |
---|
| 1256 | + else |
---|
| 1257 | + adev->external_rev_id = adev->rev_id + 0xa1; |
---|
| 1258 | + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
---|
| 1259 | + AMD_CG_SUPPORT_GFX_MGLS | |
---|
| 1260 | + AMD_CG_SUPPORT_GFX_3D_CGCG | |
---|
| 1261 | + AMD_CG_SUPPORT_GFX_3D_CGLS | |
---|
| 1262 | + AMD_CG_SUPPORT_GFX_CGCG | |
---|
| 1263 | + AMD_CG_SUPPORT_GFX_CGLS | |
---|
| 1264 | + AMD_CG_SUPPORT_GFX_CP_LS | |
---|
| 1265 | + AMD_CG_SUPPORT_MC_MGCG | |
---|
| 1266 | + AMD_CG_SUPPORT_MC_LS | |
---|
| 1267 | + AMD_CG_SUPPORT_SDMA_MGCG | |
---|
| 1268 | + AMD_CG_SUPPORT_SDMA_LS | |
---|
| 1269 | + AMD_CG_SUPPORT_BIF_LS | |
---|
| 1270 | + AMD_CG_SUPPORT_HDP_LS | |
---|
| 1271 | + AMD_CG_SUPPORT_ROM_MGCG | |
---|
| 1272 | + AMD_CG_SUPPORT_VCN_MGCG | |
---|
| 1273 | + AMD_CG_SUPPORT_JPEG_MGCG | |
---|
| 1274 | + AMD_CG_SUPPORT_IH_CG | |
---|
| 1275 | + AMD_CG_SUPPORT_ATHUB_LS | |
---|
| 1276 | + AMD_CG_SUPPORT_ATHUB_MGCG | |
---|
| 1277 | + AMD_CG_SUPPORT_DF_MGCG; |
---|
| 1278 | + adev->pg_flags = AMD_PG_SUPPORT_SDMA | |
---|
| 1279 | + AMD_PG_SUPPORT_VCN | |
---|
| 1280 | + AMD_PG_SUPPORT_JPEG | |
---|
| 1281 | + AMD_PG_SUPPORT_VCN_DPG; |
---|
730 | 1282 | break; |
---|
731 | 1283 | default: |
---|
732 | 1284 | /* FIXME: not supported yet */ |
---|
.. | .. |
---|
744 | 1296 | static int soc15_common_late_init(void *handle) |
---|
745 | 1297 | { |
---|
746 | 1298 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
---|
| 1299 | + int r = 0; |
---|
747 | 1300 | |
---|
748 | 1301 | if (amdgpu_sriov_vf(adev)) |
---|
749 | 1302 | xgpu_ai_mailbox_get_irq(adev); |
---|
750 | 1303 | |
---|
751 | | - return 0; |
---|
| 1304 | + if (adev->asic_funcs && |
---|
| 1305 | + adev->asic_funcs->reset_hdp_ras_error_count) |
---|
| 1306 | + adev->asic_funcs->reset_hdp_ras_error_count(adev); |
---|
| 1307 | + |
---|
| 1308 | + if (adev->nbio.funcs->ras_late_init) |
---|
| 1309 | + r = adev->nbio.funcs->ras_late_init(adev); |
---|
| 1310 | + |
---|
| 1311 | + return r; |
---|
752 | 1312 | } |
---|
753 | 1313 | |
---|
754 | 1314 | static int soc15_common_sw_init(void *handle) |
---|
.. | .. |
---|
758 | 1318 | if (amdgpu_sriov_vf(adev)) |
---|
759 | 1319 | xgpu_ai_mailbox_add_irq_id(adev); |
---|
760 | 1320 | |
---|
| 1321 | + adev->df.funcs->sw_init(adev); |
---|
| 1322 | + |
---|
761 | 1323 | return 0; |
---|
762 | 1324 | } |
---|
763 | 1325 | |
---|
764 | 1326 | static int soc15_common_sw_fini(void *handle) |
---|
765 | 1327 | { |
---|
| 1328 | + struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
---|
| 1329 | + |
---|
| 1330 | + amdgpu_nbio_ras_fini(adev); |
---|
| 1331 | + adev->df.funcs->sw_fini(adev); |
---|
766 | 1332 | return 0; |
---|
| 1333 | +} |
---|
| 1334 | + |
---|
| 1335 | +static void soc15_doorbell_range_init(struct amdgpu_device *adev) |
---|
| 1336 | +{ |
---|
| 1337 | + int i; |
---|
| 1338 | + struct amdgpu_ring *ring; |
---|
| 1339 | + |
---|
| 1340 | + /* sdma/ih doorbell range are programed by hypervisor */ |
---|
| 1341 | + if (!amdgpu_sriov_vf(adev)) { |
---|
| 1342 | + for (i = 0; i < adev->sdma.num_instances; i++) { |
---|
| 1343 | + ring = &adev->sdma.instance[i].ring; |
---|
| 1344 | + adev->nbio.funcs->sdma_doorbell_range(adev, i, |
---|
| 1345 | + ring->use_doorbell, ring->doorbell_index, |
---|
| 1346 | + adev->doorbell_index.sdma_doorbell_range); |
---|
| 1347 | + } |
---|
| 1348 | + |
---|
| 1349 | + adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, |
---|
| 1350 | + adev->irq.ih.doorbell_index); |
---|
| 1351 | + } |
---|
767 | 1352 | } |
---|
768 | 1353 | |
---|
769 | 1354 | static int soc15_common_hw_init(void *handle) |
---|
.. | .. |
---|
775 | 1360 | /* enable aspm */ |
---|
776 | 1361 | soc15_program_aspm(adev); |
---|
777 | 1362 | /* setup nbio registers */ |
---|
778 | | - adev->nbio_funcs->init_registers(adev); |
---|
| 1363 | + adev->nbio.funcs->init_registers(adev); |
---|
| 1364 | + /* remap HDP registers to a hole in mmio space, |
---|
| 1365 | + * for the purpose of expose those registers |
---|
| 1366 | + * to process space |
---|
| 1367 | + */ |
---|
| 1368 | + if (adev->nbio.funcs->remap_hdp_registers) |
---|
| 1369 | + adev->nbio.funcs->remap_hdp_registers(adev); |
---|
| 1370 | + |
---|
779 | 1371 | /* enable the doorbell aperture */ |
---|
780 | 1372 | soc15_enable_doorbell_aperture(adev, true); |
---|
| 1373 | + /* HW doorbell routing policy: doorbell writing not |
---|
| 1374 | + * in SDMA/IH/MM/ACV range will be routed to CP. So |
---|
| 1375 | + * we need to init SDMA/IH/MM/ACV doorbell range prior |
---|
| 1376 | + * to CP ip block init and ring test. |
---|
| 1377 | + */ |
---|
| 1378 | + soc15_doorbell_range_init(adev); |
---|
781 | 1379 | |
---|
782 | 1380 | return 0; |
---|
783 | 1381 | } |
---|
.. | .. |
---|
790 | 1388 | soc15_enable_doorbell_aperture(adev, false); |
---|
791 | 1389 | if (amdgpu_sriov_vf(adev)) |
---|
792 | 1390 | xgpu_ai_mailbox_put_irq(adev); |
---|
| 1391 | + |
---|
| 1392 | + if (adev->nbio.ras_if && |
---|
| 1393 | + amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { |
---|
| 1394 | + if (adev->nbio.funcs->init_ras_controller_interrupt) |
---|
| 1395 | + amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); |
---|
| 1396 | + if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) |
---|
| 1397 | + amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); |
---|
| 1398 | + } |
---|
793 | 1399 | |
---|
794 | 1400 | return 0; |
---|
795 | 1401 | } |
---|
.. | .. |
---|
827 | 1433 | { |
---|
828 | 1434 | uint32_t def, data; |
---|
829 | 1435 | |
---|
830 | | - def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); |
---|
| 1436 | + if (adev->asic_type == CHIP_VEGA20 || |
---|
| 1437 | + adev->asic_type == CHIP_ARCTURUS || |
---|
| 1438 | + adev->asic_type == CHIP_RENOIR) { |
---|
| 1439 | + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); |
---|
831 | 1440 | |
---|
832 | | - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
---|
833 | | - data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; |
---|
834 | | - else |
---|
835 | | - data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; |
---|
| 1441 | + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
---|
| 1442 | + data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | |
---|
| 1443 | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | |
---|
| 1444 | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | |
---|
| 1445 | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; |
---|
| 1446 | + else |
---|
| 1447 | + data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | |
---|
| 1448 | + HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | |
---|
| 1449 | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | |
---|
| 1450 | + HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); |
---|
836 | 1451 | |
---|
837 | | - if (def != data) |
---|
838 | | - WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); |
---|
| 1452 | + if (def != data) |
---|
| 1453 | + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); |
---|
| 1454 | + } else { |
---|
| 1455 | + def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); |
---|
| 1456 | + |
---|
| 1457 | + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
---|
| 1458 | + data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; |
---|
| 1459 | + else |
---|
| 1460 | + data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; |
---|
| 1461 | + |
---|
| 1462 | + if (def != data) |
---|
| 1463 | + WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); |
---|
| 1464 | + } |
---|
839 | 1465 | } |
---|
840 | 1466 | |
---|
841 | 1467 | static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) |
---|
.. | .. |
---|
912 | 1538 | case CHIP_VEGA10: |
---|
913 | 1539 | case CHIP_VEGA12: |
---|
914 | 1540 | case CHIP_VEGA20: |
---|
915 | | - adev->nbio_funcs->update_medium_grain_clock_gating(adev, |
---|
916 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
917 | | - adev->nbio_funcs->update_medium_grain_light_sleep(adev, |
---|
918 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1541 | + adev->nbio.funcs->update_medium_grain_clock_gating(adev, |
---|
| 1542 | + state == AMD_CG_STATE_GATE); |
---|
| 1543 | + adev->nbio.funcs->update_medium_grain_light_sleep(adev, |
---|
| 1544 | + state == AMD_CG_STATE_GATE); |
---|
919 | 1545 | soc15_update_hdp_light_sleep(adev, |
---|
920 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1546 | + state == AMD_CG_STATE_GATE); |
---|
921 | 1547 | soc15_update_drm_clock_gating(adev, |
---|
922 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1548 | + state == AMD_CG_STATE_GATE); |
---|
923 | 1549 | soc15_update_drm_light_sleep(adev, |
---|
924 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1550 | + state == AMD_CG_STATE_GATE); |
---|
925 | 1551 | soc15_update_rom_medium_grain_clock_gating(adev, |
---|
926 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
927 | | - adev->df_funcs->update_medium_grain_clock_gating(adev, |
---|
928 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1552 | + state == AMD_CG_STATE_GATE); |
---|
| 1553 | + adev->df.funcs->update_medium_grain_clock_gating(adev, |
---|
| 1554 | + state == AMD_CG_STATE_GATE); |
---|
929 | 1555 | break; |
---|
930 | 1556 | case CHIP_RAVEN: |
---|
931 | | - adev->nbio_funcs->update_medium_grain_clock_gating(adev, |
---|
932 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
933 | | - adev->nbio_funcs->update_medium_grain_light_sleep(adev, |
---|
934 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1557 | + case CHIP_RENOIR: |
---|
| 1558 | + adev->nbio.funcs->update_medium_grain_clock_gating(adev, |
---|
| 1559 | + state == AMD_CG_STATE_GATE); |
---|
| 1560 | + adev->nbio.funcs->update_medium_grain_light_sleep(adev, |
---|
| 1561 | + state == AMD_CG_STATE_GATE); |
---|
935 | 1562 | soc15_update_hdp_light_sleep(adev, |
---|
936 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1563 | + state == AMD_CG_STATE_GATE); |
---|
937 | 1564 | soc15_update_drm_clock_gating(adev, |
---|
938 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1565 | + state == AMD_CG_STATE_GATE); |
---|
939 | 1566 | soc15_update_drm_light_sleep(adev, |
---|
940 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1567 | + state == AMD_CG_STATE_GATE); |
---|
941 | 1568 | soc15_update_rom_medium_grain_clock_gating(adev, |
---|
942 | | - state == AMD_CG_STATE_GATE ? true : false); |
---|
| 1569 | + state == AMD_CG_STATE_GATE); |
---|
| 1570 | + break; |
---|
| 1571 | + case CHIP_ARCTURUS: |
---|
| 1572 | + soc15_update_hdp_light_sleep(adev, |
---|
| 1573 | + state == AMD_CG_STATE_GATE); |
---|
943 | 1574 | break; |
---|
944 | 1575 | default: |
---|
945 | 1576 | break; |
---|
.. | .. |
---|
955 | 1586 | if (amdgpu_sriov_vf(adev)) |
---|
956 | 1587 | *flags = 0; |
---|
957 | 1588 | |
---|
958 | | - adev->nbio_funcs->get_clockgating_state(adev, flags); |
---|
| 1589 | + adev->nbio.funcs->get_clockgating_state(adev, flags); |
---|
959 | 1590 | |
---|
960 | 1591 | /* AMD_CG_SUPPORT_HDP_LS */ |
---|
961 | 1592 | data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); |
---|
.. | .. |
---|
977 | 1608 | if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) |
---|
978 | 1609 | *flags |= AMD_CG_SUPPORT_ROM_MGCG; |
---|
979 | 1610 | |
---|
980 | | - adev->df_funcs->get_clockgating_state(adev, flags); |
---|
| 1611 | + adev->df.funcs->get_clockgating_state(adev, flags); |
---|
981 | 1612 | } |
---|
982 | 1613 | |
---|
983 | 1614 | static int soc15_common_set_powergating_state(void *handle, |
---|