hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
....@@ -21,50 +21,55 @@
2121 *
2222 */
2323 #include "amdgpu.h"
24
+#include "amdgpu_ras.h"
2425 #include "mmhub_v1_0.h"
2526
2627 #include "mmhub/mmhub_1_0_offset.h"
2728 #include "mmhub/mmhub_1_0_sh_mask.h"
2829 #include "mmhub/mmhub_1_0_default.h"
29
-#include "athub/athub_1_0_offset.h"
30
-#include "athub/athub_1_0_sh_mask.h"
3130 #include "vega10_enum.h"
32
-
31
+#include "soc15.h"
3332 #include "soc15_common.h"
3433
3534 #define mmDAGB0_CNTL_MISC2_RV 0x008f
3635 #define mmDAGB0_CNTL_MISC2_RV_BASE_IDX 0
3736
38
-u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
37
+static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
3938 {
4039 u64 base = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE);
40
+ u64 top = RREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP);
4141
4242 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
4343 base <<= 24;
4444
45
+ top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
46
+ top <<= 24;
47
+
48
+ adev->gmc.fb_start = base;
49
+ adev->gmc.fb_end = top;
50
+
4551 return base;
4652 }
4753
48
-static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
54
+static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
55
+ uint64_t page_table_base)
4956 {
50
- uint64_t value;
57
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
5158
52
- BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
53
- value = adev->gart.table_addr - adev->gmc.vram_start +
54
- adev->vm_manager.vram_base_offset;
55
- value &= 0x0000FFFFFFFFF000ULL;
56
- value |= 0x1; /* valid bit */
59
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
60
+ hub->ctx_addr_distance * vmid,
61
+ lower_32_bits(page_table_base));
5762
58
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
59
- lower_32_bits(value));
60
-
61
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
62
- upper_32_bits(value));
63
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
64
+ hub->ctx_addr_distance * vmid,
65
+ upper_32_bits(page_table_base));
6366 }
6467
6568 static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
6669 {
67
- mmhub_v1_0_init_gart_pt_regs(adev);
70
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
71
+
72
+ mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
6873
6974 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
7075 (u32)(adev->gmc.gart_start >> 12));
....@@ -82,16 +87,31 @@
8287 uint64_t value;
8388 uint32_t tmp;
8489
85
- /* Disable AGP. */
90
+ /* Program the AGP BAR */
8691 WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BASE, 0);
87
- WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, 0);
88
- WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, 0x00FFFFFF);
92
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
93
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
8994
9095 /* Program the system aperture low logical page number. */
9196 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
92
- adev->gmc.vram_start >> 18);
93
- WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
94
- adev->gmc.vram_end >> 18);
97
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
98
+
99
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
100
+ /*
101
+ * Raven2 has a HW issue that it is unable to use the vram which
102
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
103
+ * workaround that increase system aperture high address (add 1)
104
+ * to get rid of the VM fault and hardware hang.
105
+ */
106
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
107
+ max((adev->gmc.fb_end >> 18) + 0x1,
108
+ adev->gmc.agp_end >> 18));
109
+ else
110
+ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
111
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
112
+
113
+ if (amdgpu_sriov_vf(adev))
114
+ return;
95115
96116 /* Set default page address. */
97117 value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
....@@ -138,6 +158,9 @@
138158 {
139159 uint32_t tmp;
140160
161
+ if (amdgpu_sriov_vf(adev))
162
+ return;
163
+
141164 /* Setup L2 cache */
142165 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
143166 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
....@@ -145,7 +168,7 @@
145168 /* XXX for emulation, Refer to closed source code.*/
146169 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
147170 0);
148
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
171
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
149172 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
150173 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
151174 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
....@@ -155,6 +178,7 @@
155178 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
156179 WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
157180
181
+ tmp = mmVM_L2_CNTL3_DEFAULT;
158182 if (adev->gmc.translate_further) {
159183 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
160184 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
....@@ -179,11 +203,16 @@
179203 tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
180204 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
181205 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
206
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
207
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
182208 WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
183209 }
184210
185211 static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
186212 {
213
+ if (amdgpu_sriov_vf(adev))
214
+ return;
215
+
187216 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
188217 0XFFFFFFFF);
189218 WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
....@@ -202,6 +231,7 @@
202231
203232 static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
204233 {
234
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
205235 unsigned num_level, block_size;
206236 uint32_t tmp;
207237 int i;
....@@ -238,263 +268,49 @@
238268 block_size);
239269 /* Send no-retry XNACK on fault to suppress VM fault storm. */
240270 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
241
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
242
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
243
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
244
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
245
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i*2,
246
- lower_32_bits(adev->vm_manager.max_pfn - 1));
247
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i*2,
248
- upper_32_bits(adev->vm_manager.max_pfn - 1));
271
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
272
+ !adev->gmc.noretry);
273
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
274
+ i * hub->ctx_distance, tmp);
275
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
276
+ i * hub->ctx_addr_distance, 0);
277
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
278
+ i * hub->ctx_addr_distance, 0);
279
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
280
+ i * hub->ctx_addr_distance,
281
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
282
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
283
+ i * hub->ctx_addr_distance,
284
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
249285 }
250286 }
251287
252288 static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
253289 {
290
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
254291 unsigned i;
255292
256293 for (i = 0; i < 18; ++i) {
257294 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
258
- 2 * i, 0xffffffff);
295
+ i * hub->eng_addr_distance, 0xffffffff);
259296 WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
260
- 2 * i, 0x1f);
297
+ i * hub->eng_addr_distance, 0x1f);
261298 }
262299 }
263300
264
-struct pctl_data {
265
- uint32_t index;
266
- uint32_t data;
267
-};
268
-
269
-static const struct pctl_data pctl0_data[] = {
270
- {0x0, 0x7a640},
271
- {0x9, 0x2a64a},
272
- {0xd, 0x2a680},
273
- {0x11, 0x6a684},
274
- {0x19, 0xea68e},
275
- {0x29, 0xa69e},
276
- {0x2b, 0x0010a6c0},
277
- {0x3d, 0x83a707},
278
- {0xc2, 0x8a7a4},
279
- {0xcc, 0x1a7b8},
280
- {0xcf, 0xfa7cc},
281
- {0xe0, 0x17a7dd},
282
- {0xf9, 0xa7dc},
283
- {0xfb, 0x12a7f5},
284
- {0x10f, 0xa808},
285
- {0x111, 0x12a810},
286
- {0x125, 0x7a82c}
287
-};
288
-#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
289
-
290
-#define PCTL0_RENG_EXEC_END_PTR 0x12d
291
-#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
292
-#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
293
-
294
-static const struct pctl_data pctl1_data[] = {
295
- {0x0, 0x39a000},
296
- {0x3b, 0x44a040},
297
- {0x81, 0x2a08d},
298
- {0x85, 0x6ba094},
299
- {0xf2, 0x18a100},
300
- {0x10c, 0x4a132},
301
- {0x112, 0xca141},
302
- {0x120, 0x2fa158},
303
- {0x151, 0x17a1d0},
304
- {0x16a, 0x1a1e9},
305
- {0x16d, 0x13a1ec},
306
- {0x182, 0x7a201},
307
- {0x18b, 0x3a20a},
308
- {0x190, 0x7a580},
309
- {0x199, 0xa590},
310
- {0x19b, 0x4a594},
311
- {0x1a1, 0x1a59c},
312
- {0x1a4, 0x7a82c},
313
- {0x1ad, 0xfa7cc},
314
- {0x1be, 0x17a7dd},
315
- {0x1d7, 0x12a810},
316
- {0x1eb, 0x4000a7e1},
317
- {0x1ec, 0x5000a7f5},
318
- {0x1ed, 0x4000a7e2},
319
- {0x1ee, 0x5000a7dc},
320
- {0x1ef, 0x4000a7e3},
321
- {0x1f0, 0x5000a7f6},
322
- {0x1f1, 0x5000a7e4}
323
-};
324
-#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
325
-
326
-#define PCTL1_RENG_EXEC_END_PTR 0x1f1
327
-#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
328
-#define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d
329
-#define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580
330
-#define PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT 0xa59d
331
-#define PCTL1_STCTRL_REG_SAVE_RANGE2_BASE 0xa82c
332
-#define PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT 0xa833
333
-
334
-static void mmhub_v1_0_power_gating_write_save_ranges(struct amdgpu_device *adev)
335
-{
336
- uint32_t tmp = 0;
337
-
338
- /* PCTL0_STCTRL_REGISTER_SAVE_RANGE0 */
339
- tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
340
- STCTRL_REGISTER_SAVE_BASE,
341
- PCTL0_STCTRL_REG_SAVE_RANGE0_BASE);
342
- tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0,
343
- STCTRL_REGISTER_SAVE_LIMIT,
344
- PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT);
345
- WREG32_SOC15(MMHUB, 0, mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0, tmp);
346
-
347
- /* PCTL1_STCTRL_REGISTER_SAVE_RANGE0 */
348
- tmp = 0;
349
- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
350
- STCTRL_REGISTER_SAVE_BASE,
351
- PCTL1_STCTRL_REG_SAVE_RANGE0_BASE);
352
- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0,
353
- STCTRL_REGISTER_SAVE_LIMIT,
354
- PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT);
355
- WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0, tmp);
356
-
357
- /* PCTL1_STCTRL_REGISTER_SAVE_RANGE1 */
358
- tmp = 0;
359
- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
360
- STCTRL_REGISTER_SAVE_BASE,
361
- PCTL1_STCTRL_REG_SAVE_RANGE1_BASE);
362
- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1,
363
- STCTRL_REGISTER_SAVE_LIMIT,
364
- PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT);
365
- WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1, tmp);
366
-
367
- /* PCTL1_STCTRL_REGISTER_SAVE_RANGE2 */
368
- tmp = 0;
369
- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
370
- STCTRL_REGISTER_SAVE_BASE,
371
- PCTL1_STCTRL_REG_SAVE_RANGE2_BASE);
372
- tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2,
373
- STCTRL_REGISTER_SAVE_LIMIT,
374
- PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT);
375
- WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2, tmp);
376
-}
377
-
378
-void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
379
-{
380
- uint32_t pctl0_misc = 0;
381
- uint32_t pctl0_reng_execute = 0;
382
- uint32_t pctl1_misc = 0;
383
- uint32_t pctl1_reng_execute = 0;
384
- int i = 0;
385
-
386
- if (amdgpu_sriov_vf(adev))
387
- return;
388
-
389
- /****************** pctl0 **********************/
390
- pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC);
391
- pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
392
-
393
- /* Light sleep must be disabled before writing to pctl0 registers */
394
- pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
395
- WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
396
-
397
- /* Write data used to access ram of register engine */
398
- for (i = 0; i < PCTL0_DATA_LEN; i++) {
399
- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_INDEX,
400
- pctl0_data[i].index);
401
- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_DATA,
402
- pctl0_data[i].data);
403
- }
404
-
405
- /* Re-enable light sleep */
406
- pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
407
- WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
408
-
409
- /****************** pctl1 **********************/
410
- pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
411
- pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
412
-
413
- /* Light sleep must be disabled before writing to pctl1 registers */
414
- pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
415
- WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
416
-
417
- /* Write data used to access ram of register engine */
418
- for (i = 0; i < PCTL1_DATA_LEN; i++) {
419
- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_INDEX,
420
- pctl1_data[i].index);
421
- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_DATA,
422
- pctl1_data[i].data);
423
- }
424
-
425
- /* Re-enable light sleep */
426
- pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
427
- WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
428
-
429
- mmhub_v1_0_power_gating_write_save_ranges(adev);
430
-
431
- /* Set the reng execute end ptr for pctl0 */
432
- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
433
- PCTL0_RENG_EXECUTE,
434
- RENG_EXECUTE_END_PTR,
435
- PCTL0_RENG_EXEC_END_PTR);
436
- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
437
-
438
- /* Set the reng execute end ptr for pctl1 */
439
- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
440
- PCTL1_RENG_EXECUTE,
441
- RENG_EXECUTE_END_PTR,
442
- PCTL1_RENG_EXEC_END_PTR);
443
- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
444
-}
445
-
446
-void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
301
+static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
447302 bool enable)
448303 {
449
- uint32_t pctl0_reng_execute = 0;
450
- uint32_t pctl1_reng_execute = 0;
451
-
452304 if (amdgpu_sriov_vf(adev))
453305 return;
454306
455
- pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
456
- pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
457
-
458307 if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
459
- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
460
- PCTL0_RENG_EXECUTE,
461
- RENG_EXECUTE_ON_PWR_UP, 1);
462
- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
463
- PCTL0_RENG_EXECUTE,
464
- RENG_EXECUTE_ON_REG_UPDATE, 1);
465
- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
308
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
466309
467
- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
468
- PCTL1_RENG_EXECUTE,
469
- RENG_EXECUTE_ON_PWR_UP, 1);
470
- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
471
- PCTL1_RENG_EXECUTE,
472
- RENG_EXECUTE_ON_REG_UPDATE, 1);
473
- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
474
-
475
- if (adev->powerplay.pp_funcs->set_powergating_by_smu)
476
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
477
-
478
- } else {
479
- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
480
- PCTL0_RENG_EXECUTE,
481
- RENG_EXECUTE_ON_PWR_UP, 0);
482
- pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
483
- PCTL0_RENG_EXECUTE,
484
- RENG_EXECUTE_ON_REG_UPDATE, 0);
485
- WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
486
-
487
- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
488
- PCTL1_RENG_EXECUTE,
489
- RENG_EXECUTE_ON_PWR_UP, 0);
490
- pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
491
- PCTL1_RENG_EXECUTE,
492
- RENG_EXECUTE_ON_REG_UPDATE, 0);
493
- WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
494310 }
495311 }
496312
497
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
313
+static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
498314 {
499315 if (amdgpu_sriov_vf(adev)) {
500316 /*
....@@ -522,14 +338,16 @@
522338 return 0;
523339 }
524340
525
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
341
+static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
526342 {
343
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
527344 u32 tmp;
528345 u32 i;
529346
530347 /* Disable all tables */
531348 for (i = 0; i < 16; i++)
532
- WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL, i, 0);
349
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_CNTL,
350
+ i * hub->ctx_distance, 0);
533351
534352 /* Setup TLB control */
535353 tmp = RREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL);
....@@ -540,11 +358,13 @@
540358 0);
541359 WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
542360
543
- /* Setup L2 cache */
544
- tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
545
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
546
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
547
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
361
+ if (!amdgpu_sriov_vf(adev)) {
362
+ /* Setup L2 cache */
363
+ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
364
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
365
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
366
+ WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, 0);
367
+ }
548368 }
549369
550370 /**
....@@ -553,9 +373,13 @@
553373 * @adev: amdgpu_device pointer
554374 * @value: true redirects VM faults to the default page
555375 */
556
-void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
376
+static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
557377 {
558378 u32 tmp;
379
+
380
+ if (amdgpu_sriov_vf(adev))
381
+ return;
382
+
559383 tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
560384 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
561385 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
....@@ -591,9 +415,9 @@
591415 WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
592416 }
593417
594
-void mmhub_v1_0_init(struct amdgpu_device *adev)
418
+static void mmhub_v1_0_init(struct amdgpu_device *adev)
595419 {
596
- struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB];
420
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
597421
598422 hub->ctx0_ptb_addr_lo32 =
599423 SOC15_REG_OFFSET(MMHUB, 0,
....@@ -601,6 +425,8 @@
601425 hub->ctx0_ptb_addr_hi32 =
602426 SOC15_REG_OFFSET(MMHUB, 0,
603427 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
428
+ hub->vm_inv_eng0_sem =
429
+ SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
604430 hub->vm_inv_eng0_req =
605431 SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
606432 hub->vm_inv_eng0_ack =
....@@ -612,6 +438,12 @@
612438 hub->vm_l2_pro_fault_cntl =
613439 SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
614440
441
+ hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
442
+ hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
443
+ mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
444
+ hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
445
+ hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
446
+ mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
615447 }
616448
617449 static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
....@@ -677,22 +509,6 @@
677509 WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2);
678510 }
679511
680
-static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
681
- bool enable)
682
-{
683
- uint32_t def, data;
684
-
685
- def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
686
-
687
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
688
- data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
689
- else
690
- data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
691
-
692
- if (def != data)
693
- WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
694
-}
695
-
696512 static void mmhub_v1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
697513 bool enable)
698514 {
....@@ -709,24 +525,7 @@
709525 WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data);
710526 }
711527
712
-static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
713
- bool enable)
714
-{
715
- uint32_t def, data;
716
-
717
- def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
718
-
719
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
720
- (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
721
- data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
722
- else
723
- data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
724
-
725
- if(def != data)
726
- WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
727
-}
728
-
729
-int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
528
+static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
730529 enum amd_clockgating_state state)
731530 {
732531 if (amdgpu_sriov_vf(adev))
....@@ -737,14 +536,11 @@
737536 case CHIP_VEGA12:
738537 case CHIP_VEGA20:
739538 case CHIP_RAVEN:
539
+ case CHIP_RENOIR:
740540 mmhub_v1_0_update_medium_grain_clock_gating(adev,
741
- state == AMD_CG_STATE_GATE ? true : false);
742
- athub_update_medium_grain_clock_gating(adev,
743
- state == AMD_CG_STATE_GATE ? true : false);
541
+ state == AMD_CG_STATE_GATE);
744542 mmhub_v1_0_update_medium_grain_light_sleep(adev,
745
- state == AMD_CG_STATE_GATE ? true : false);
746
- athub_update_medium_grain_light_sleep(adev,
747
- state == AMD_CG_STATE_GATE ? true : false);
543
+ state == AMD_CG_STATE_GATE);
748544 break;
749545 default:
750546 break;
....@@ -753,20 +549,245 @@
753549 return 0;
754550 }
755551
756
-void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
552
+static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
757553 {
758
- int data;
554
+ int data, data1;
759555
760556 if (amdgpu_sriov_vf(adev))
761557 *flags = 0;
762558
559
+ data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
560
+
561
+ data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
562
+
763563 /* AMD_CG_SUPPORT_MC_MGCG */
764
- data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
765
- if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
564
+ if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
565
+ !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
566
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
567
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
568
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
569
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
570
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
766571 *flags |= AMD_CG_SUPPORT_MC_MGCG;
767572
768573 /* AMD_CG_SUPPORT_MC_LS */
769
- data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG);
770574 if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
771575 *flags |= AMD_CG_SUPPORT_MC_LS;
772576 }
577
+
578
+static const struct soc15_ras_field_entry mmhub_v1_0_ras_fields[] = {
579
+ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
580
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
581
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
582
+ },
583
+ { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
584
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
585
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
586
+ },
587
+ { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
588
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
589
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
590
+ },
591
+ { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
592
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
593
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
594
+ },
595
+ { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
596
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
597
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
598
+ },
599
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
600
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
601
+ 0, 0,
602
+ },
603
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
604
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
605
+ 0, 0,
606
+ },
607
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
608
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
609
+ 0, 0,
610
+ },
611
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
612
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
613
+ 0, 0,
614
+ },
615
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20),
616
+ SOC15_REG_FIELD(MMEA0_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
617
+ 0, 0,
618
+ },
619
+ { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
620
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
621
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
622
+ },
623
+ { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
624
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
625
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
626
+ },
627
+ { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
628
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
629
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
630
+ },
631
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
632
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
633
+ 0, 0,
634
+ },
635
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20),
636
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
637
+ 0, 0,
638
+ },
639
+ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
640
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_SEC_COUNT),
641
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_CMDMEM_DED_COUNT),
642
+ },
643
+ { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
644
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_SEC_COUNT),
645
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_CMDMEM_DED_COUNT),
646
+ },
647
+ { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
648
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_SEC_COUNT),
649
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_DATAMEM_DED_COUNT),
650
+ },
651
+ { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
652
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_SEC_COUNT),
653
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, RRET_TAGMEM_DED_COUNT),
654
+ },
655
+ { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
656
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_SEC_COUNT),
657
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, WRET_TAGMEM_DED_COUNT),
658
+ },
659
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
660
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMRD_PAGEMEM_SED_COUNT),
661
+ 0, 0,
662
+ },
663
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
664
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, DRAMWR_PAGEMEM_SED_COUNT),
665
+ 0, 0,
666
+ },
667
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
668
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IORD_CMDMEM_SED_COUNT),
669
+ 0, 0,
670
+ },
671
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
672
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_CMDMEM_SED_COUNT),
673
+ 0, 0,
674
+ },
675
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20),
676
+ SOC15_REG_FIELD(MMEA1_EDC_CNT_VG20, IOWR_DATAMEM_SED_COUNT),
677
+ 0, 0,
678
+ },
679
+ { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
680
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_SEC_COUNT),
681
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_CMDMEM_DED_COUNT),
682
+ },
683
+ { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
684
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_SEC_COUNT),
685
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_CMDMEM_DED_COUNT),
686
+ },
687
+ { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
688
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_SEC_COUNT),
689
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_DATAMEM_DED_COUNT),
690
+ },
691
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
692
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIRD_PAGEMEM_SED_COUNT),
693
+ 0, 0,
694
+ },
695
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20),
696
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2_VG20, GMIWR_PAGEMEM_SED_COUNT),
697
+ 0, 0,
698
+ }
699
+};
700
+
701
+static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
702
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT_VG20), 0, 0, 0},
703
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2_VG20), 0, 0, 0},
704
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT_VG20), 0, 0, 0},
705
+ { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
706
+};
707
+
708
+static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
709
+ const struct soc15_reg_entry *reg,
710
+ uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
711
+{
712
+ uint32_t i;
713
+ uint32_t sec_cnt, ded_cnt;
714
+
715
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_ras_fields); i++) {
716
+ if(mmhub_v1_0_ras_fields[i].reg_offset != reg->reg_offset)
717
+ continue;
718
+
719
+ sec_cnt = (value &
720
+ mmhub_v1_0_ras_fields[i].sec_count_mask) >>
721
+ mmhub_v1_0_ras_fields[i].sec_count_shift;
722
+ if (sec_cnt) {
723
+ dev_info(adev->dev,
724
+ "MMHUB SubBlock %s, SEC %d\n",
725
+ mmhub_v1_0_ras_fields[i].name,
726
+ sec_cnt);
727
+ *sec_count += sec_cnt;
728
+ }
729
+
730
+ ded_cnt = (value &
731
+ mmhub_v1_0_ras_fields[i].ded_count_mask) >>
732
+ mmhub_v1_0_ras_fields[i].ded_count_shift;
733
+ if (ded_cnt) {
734
+ dev_info(adev->dev,
735
+ "MMHUB SubBlock %s, DED %d\n",
736
+ mmhub_v1_0_ras_fields[i].name,
737
+ ded_cnt);
738
+ *ded_count += ded_cnt;
739
+ }
740
+ }
741
+
742
+ return 0;
743
+}
744
+
745
+static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
746
+ void *ras_error_status)
747
+{
748
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
749
+ uint32_t sec_count = 0, ded_count = 0;
750
+ uint32_t i;
751
+ uint32_t reg_value;
752
+
753
+ err_data->ue_count = 0;
754
+ err_data->ce_count = 0;
755
+
756
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) {
757
+ reg_value =
758
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
759
+ if (reg_value)
760
+ mmhub_v1_0_get_ras_error_count(adev,
761
+ &mmhub_v1_0_edc_cnt_regs[i],
762
+ reg_value, &sec_count, &ded_count);
763
+ }
764
+
765
+ err_data->ce_count += sec_count;
766
+ err_data->ue_count += ded_count;
767
+}
768
+
769
+static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
770
+{
771
+ uint32_t i;
772
+
773
+ /* read back edc counter registers to reset the counters to 0 */
774
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
775
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
776
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
777
+ }
778
+}
779
+
780
+const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
781
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
782
+ .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
783
+ .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
784
+ .get_fb_location = mmhub_v1_0_get_fb_location,
785
+ .init = mmhub_v1_0_init,
786
+ .gart_enable = mmhub_v1_0_gart_enable,
787
+ .set_fault_enable_default = mmhub_v1_0_set_fault_enable_default,
788
+ .gart_disable = mmhub_v1_0_gart_disable,
789
+ .set_clockgating = mmhub_v1_0_set_clockgating,
790
+ .get_clockgating = mmhub_v1_0_get_clockgating,
791
+ .setup_vm_pt_regs = mmhub_v1_0_setup_vm_pt_regs,
792
+ .update_power_gating = mmhub_v1_0_update_power_gating,
793
+};