hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
....@@ -20,15 +20,14 @@
2020 * OTHER DEALINGS IN THE SOFTWARE.
2121 *
2222 */
23
-#include <drm/drmP.h>
23
+
2424 #include <drm/amdgpu_drm.h>
2525 #include "amdgpu.h"
2626 #include "atomfirmware.h"
2727 #include "amdgpu_atomfirmware.h"
2828 #include "atom.h"
2929 #include "atombios.h"
30
-
31
-#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
30
+#include "soc15_hw_ip.h"
3231
3332 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
3433 {
....@@ -90,9 +89,9 @@
9089 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
9190 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
9291 /* Firmware request VRAM reservation for SR-IOV */
93
- adev->fw_vram_usage.start_offset = (start_addr &
92
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
9493 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
95
- adev->fw_vram_usage.size = size << 10;
94
+ adev->mman.fw_vram_usage_size = size << 10;
9695 /* Use the default scratch size */
9796 usage_bytes = 0;
9897 } else {
....@@ -112,42 +111,27 @@
112111
113112 union igp_info {
114113 struct atom_integrated_system_info_v1_11 v11;
114
+ struct atom_integrated_system_info_v1_12 v12;
115115 };
116116
117117 union umc_info {
118118 struct atom_umc_info_v3_1 v31;
119119 };
120
-/*
121
- * Return vram width from integrated system info table, if available,
122
- * or 0 if not.
123
- */
124
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
125
-{
126
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
127
- int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
128
- integratedsysteminfo);
129
- u16 data_offset, size;
130
- union igp_info *igp_info;
131
- u8 frev, crev;
132120
133
- /* get any igp specific overrides */
134
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
135
- &frev, &crev, &data_offset)) {
136
- igp_info = (union igp_info *)
137
- (mode_info->atom_context->bios + data_offset);
138
- switch (crev) {
139
- case 11:
140
- return igp_info->v11.umachannelnumber * 64;
141
- default:
142
- return 0;
143
- }
144
- }
121
+union vram_info {
122
+ struct atom_vram_info_header_v2_3 v23;
123
+ struct atom_vram_info_header_v2_4 v24;
124
+ struct atom_vram_info_header_v2_5 v25;
125
+};
145126
146
- return 0;
147
-}
127
+union vram_module {
128
+ struct atom_vram_module_v9 v9;
129
+ struct atom_vram_module_v10 v10;
130
+ struct atom_vram_module_v11 v11;
131
+};
148132
149
-static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
150
- int atom_mem_type)
133
+static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
134
+ int atom_mem_type)
151135 {
152136 int vram_type;
153137
....@@ -174,8 +158,11 @@
174158 case ATOM_DGPU_VRAM_TYPE_GDDR5:
175159 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
176160 break;
177
- case ATOM_DGPU_VRAM_TYPE_HBM:
161
+ case ATOM_DGPU_VRAM_TYPE_HBM2:
178162 vram_type = AMDGPU_VRAM_TYPE_HBM;
163
+ break;
164
+ case ATOM_DGPU_VRAM_TYPE_GDDR6:
165
+ vram_type = AMDGPU_VRAM_TYPE_GDDR6;
179166 break;
180167 default:
181168 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
....@@ -185,26 +172,33 @@
185172
186173 return vram_type;
187174 }
188
-/*
189
- * Return vram type from either integrated system info table
190
- * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
191
- */
192
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
175
+
176
+
177
+int
178
+amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
179
+ int *vram_width, int *vram_type,
180
+ int *vram_vendor)
193181 {
194182 struct amdgpu_mode_info *mode_info = &adev->mode_info;
195
- int index;
183
+ int index, i = 0;
196184 u16 data_offset, size;
197185 union igp_info *igp_info;
198
- union umc_info *umc_info;
186
+ union vram_info *vram_info;
187
+ union vram_module *vram_module;
199188 u8 frev, crev;
200189 u8 mem_type;
190
+ u8 mem_vendor;
191
+ u32 mem_channel_number;
192
+ u32 mem_channel_width;
193
+ u32 module_id;
201194
202195 if (adev->flags & AMD_IS_APU)
203196 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
204197 integratedsysteminfo);
205198 else
206199 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
207
- umc_info);
200
+ vram_info);
201
+
208202 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
209203 index, &size,
210204 &frev, &crev, &data_offset)) {
....@@ -213,30 +207,168 @@
213207 (mode_info->atom_context->bios + data_offset);
214208 switch (crev) {
215209 case 11:
210
+ mem_channel_number = igp_info->v11.umachannelnumber;
211
+ /* channel width is 64 */
212
+ if (vram_width)
213
+ *vram_width = mem_channel_number * 64;
216214 mem_type = igp_info->v11.memorytype;
217
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
215
+ if (vram_type)
216
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
217
+ break;
218
+ case 12:
219
+ mem_channel_number = igp_info->v12.umachannelnumber;
220
+ /* channel width is 64 */
221
+ if (vram_width)
222
+ *vram_width = mem_channel_number * 64;
223
+ mem_type = igp_info->v12.memorytype;
224
+ if (vram_type)
225
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
226
+ break;
218227 default:
219
- return 0;
228
+ return -EINVAL;
220229 }
221230 } else {
222
- umc_info = (union umc_info *)
231
+ vram_info = (union vram_info *)
223232 (mode_info->atom_context->bios + data_offset);
233
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
224234 switch (crev) {
225
- case 1:
226
- mem_type = umc_info->v31.vram_type;
227
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
235
+ case 3:
236
+ if (module_id > vram_info->v23.vram_module_num)
237
+ module_id = 0;
238
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
239
+ while (i < module_id) {
240
+ vram_module = (union vram_module *)
241
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
242
+ i++;
243
+ }
244
+ mem_type = vram_module->v9.memory_type;
245
+ if (vram_type)
246
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
247
+ mem_channel_number = vram_module->v9.channel_num;
248
+ mem_channel_width = vram_module->v9.channel_width;
249
+ if (vram_width)
250
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
251
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
252
+ if (vram_vendor)
253
+ *vram_vendor = mem_vendor;
254
+ break;
255
+ case 4:
256
+ if (module_id > vram_info->v24.vram_module_num)
257
+ module_id = 0;
258
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
259
+ while (i < module_id) {
260
+ vram_module = (union vram_module *)
261
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
262
+ i++;
263
+ }
264
+ mem_type = vram_module->v10.memory_type;
265
+ if (vram_type)
266
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
267
+ mem_channel_number = vram_module->v10.channel_num;
268
+ mem_channel_width = vram_module->v10.channel_width;
269
+ if (vram_width)
270
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
271
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
272
+ if (vram_vendor)
273
+ *vram_vendor = mem_vendor;
274
+ break;
275
+ case 5:
276
+ if (module_id > vram_info->v25.vram_module_num)
277
+ module_id = 0;
278
+ vram_module = (union vram_module *)vram_info->v25.vram_module;
279
+ while (i < module_id) {
280
+ vram_module = (union vram_module *)
281
+ ((u8 *)vram_module + vram_module->v11.vram_module_size);
282
+ i++;
283
+ }
284
+ mem_type = vram_module->v11.memory_type;
285
+ if (vram_type)
286
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
287
+ mem_channel_number = vram_module->v11.channel_num;
288
+ mem_channel_width = vram_module->v11.channel_width;
289
+ if (vram_width)
290
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
291
+ mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
292
+ if (vram_vendor)
293
+ *vram_vendor = mem_vendor;
294
+ break;
228295 default:
229
- return 0;
296
+ return -EINVAL;
230297 }
231298 }
299
+
232300 }
233301
234302 return 0;
235303 }
236304
305
+/*
306
+ * Return true if vbios enabled ecc by default, if umc info table is available
307
+ * or false if ecc is not enabled or umc info table is not available
308
+ */
309
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
310
+{
311
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
312
+ int index;
313
+ u16 data_offset, size;
314
+ union umc_info *umc_info;
315
+ u8 frev, crev;
316
+ bool ecc_default_enabled = false;
317
+
318
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
319
+ umc_info);
320
+
321
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
322
+ index, &size, &frev, &crev, &data_offset)) {
323
+ /* support umc_info 3.1+ */
324
+ if ((frev == 3 && crev >= 1) || (frev > 3)) {
325
+ umc_info = (union umc_info *)
326
+ (mode_info->atom_context->bios + data_offset);
327
+ ecc_default_enabled =
328
+ (le32_to_cpu(umc_info->v31.umc_config) &
329
+ UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
330
+ }
331
+ }
332
+
333
+ return ecc_default_enabled;
334
+}
335
+
237336 union firmware_info {
238337 struct atom_firmware_info_v3_1 v31;
338
+ struct atom_firmware_info_v3_2 v32;
339
+ struct atom_firmware_info_v3_3 v33;
340
+ struct atom_firmware_info_v3_4 v34;
239341 };
342
+
343
+/*
344
+ * Return true if vbios supports sram ecc or false if not
345
+ */
346
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
347
+{
348
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
349
+ int index;
350
+ u16 data_offset, size;
351
+ union firmware_info *firmware_info;
352
+ u8 frev, crev;
353
+ bool sram_ecc_supported = false;
354
+
355
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
356
+ firmwareinfo);
357
+
358
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
359
+ index, &size, &frev, &crev, &data_offset)) {
360
+ /* support firmware_info 3.1 + */
361
+ if ((frev == 3 && crev >=1) || (frev > 3)) {
362
+ firmware_info = (union firmware_info *)
363
+ (mode_info->atom_context->bios + data_offset);
364
+ sram_ecc_supported =
365
+ (le32_to_cpu(firmware_info->v31.firmware_capability) &
366
+ ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
367
+ }
368
+ }
369
+
370
+ return sram_ecc_supported;
371
+}
240372
241373 union smu_info {
242374 struct atom_smu_info_v3_1 v31;
....@@ -342,11 +474,11 @@
342474 (mode_info->atom_context->bios + data_offset);
343475 switch (crev) {
344476 case 4:
345
- adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
346
- adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
347
- adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
348
- adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
349
- adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
477
+ adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
478
+ adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
479
+ adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
480
+ adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
481
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
350482 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
351483 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
352484 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
....@@ -366,3 +498,101 @@
366498 }
367499 return -EINVAL;
368500 }
501
+
502
+/*
503
+ * Check if VBIOS supports GDDR6 training data save/restore
504
+ */
505
+static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
506
+{
507
+ uint16_t data_offset;
508
+ int index;
509
+
510
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
511
+ firmwareinfo);
512
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
513
+ NULL, NULL, &data_offset)) {
514
+ struct atom_firmware_info_v3_1 *firmware_info =
515
+ (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
516
+ data_offset);
517
+
518
+ DRM_DEBUG("atom firmware capability:0x%08x.\n",
519
+ le32_to_cpu(firmware_info->firmware_capability));
520
+
521
+ if (le32_to_cpu(firmware_info->firmware_capability) &
522
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
523
+ return true;
524
+ }
525
+
526
+ return false;
527
+}
528
+
529
+int amdgpu_mem_train_support(struct amdgpu_device *adev)
530
+{
531
+ int ret;
532
+ uint32_t major, minor, revision, hw_v;
533
+
534
+ if (gddr6_mem_train_vbios_support(adev)) {
535
+ amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
536
+ hw_v = HW_REV(major, minor, revision);
537
+ /*
538
+ * treat 0 revision as a special case since register for MP0 and MMHUB is missing
539
+ * for some Navi10 A0, preventing driver from discovering the hwip information since
540
+ * none of the functions will be initialized, it should not cause any problems
541
+ */
542
+ switch (hw_v) {
543
+ case HW_REV(11, 0, 0):
544
+ case HW_REV(11, 0, 5):
545
+ case HW_REV(11, 0, 7):
546
+ case HW_REV(11, 0, 11):
547
+ ret = 1;
548
+ break;
549
+ default:
550
+ DRM_ERROR("memory training vbios supports but psp hw(%08x)"
551
+ " doesn't support!\n", hw_v);
552
+ ret = -1;
553
+ break;
554
+ }
555
+ } else {
556
+ ret = 0;
557
+ hw_v = -1;
558
+ }
559
+
560
+
561
+ DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
562
+ return ret;
563
+}
564
+
565
+int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
566
+{
567
+ struct atom_context *ctx = adev->mode_info.atom_context;
568
+ union firmware_info *firmware_info;
569
+ int index;
570
+ u16 data_offset, size;
571
+ u8 frev, crev;
572
+ int fw_reserved_fb_size;
573
+
574
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
575
+ firmwareinfo);
576
+
577
+ if (!amdgpu_atom_parse_data_header(ctx, index, &size,
578
+ &frev, &crev, &data_offset))
579
+ /* fail to parse data_header */
580
+ return 0;
581
+
582
+ firmware_info = (union firmware_info *)(ctx->bios + data_offset);
583
+
584
+ if (frev !=3)
585
+ return -EINVAL;
586
+
587
+ switch (crev) {
588
+ case 4:
589
+ fw_reserved_fb_size =
590
+ (firmware_info->v34.fw_reserved_size_in_kb << 10);
591
+ break;
592
+ default:
593
+ fw_reserved_fb_size = 0;
594
+ break;
595
+ }
596
+
597
+ return fw_reserved_fb_size;
598
+}