hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
....@@ -25,8 +25,9 @@
2525 * Alex Deucher
2626 * Jerome Glisse
2727 */
28
-#include <drm/drmP.h>
28
+
2929 #include "amdgpu.h"
30
+#include <drm/drm_debugfs.h>
3031 #include <drm/amdgpu_drm.h>
3132 #include "amdgpu_sched.h"
3233 #include "amdgpu_uvd.h"
....@@ -35,8 +36,37 @@
3536
3637 #include <linux/vga_switcheroo.h>
3738 #include <linux/slab.h>
39
+#include <linux/uaccess.h>
40
+#include <linux/pci.h>
3841 #include <linux/pm_runtime.h>
3942 #include "amdgpu_amdkfd.h"
43
+#include "amdgpu_gem.h"
44
+#include "amdgpu_display.h"
45
+#include "amdgpu_ras.h"
46
+
47
+void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48
+{
49
+ struct amdgpu_gpu_instance *gpu_instance;
50
+ int i;
51
+
52
+ mutex_lock(&mgpu_info.mutex);
53
+
54
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
55
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
56
+ if (gpu_instance->adev == adev) {
57
+ mgpu_info.gpu_ins[i] =
58
+ mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59
+ mgpu_info.num_gpu--;
60
+ if (adev->flags & AMD_IS_APU)
61
+ mgpu_info.num_apu--;
62
+ else
63
+ mgpu_info.num_dgpu--;
64
+ break;
65
+ }
66
+ }
67
+
68
+ mutex_unlock(&mgpu_info.mutex);
69
+}
4070
4171 /**
4272 * amdgpu_driver_unload_kms - Main unload function for KMS.
....@@ -48,53 +78,67 @@
4878 */
4979 void amdgpu_driver_unload_kms(struct drm_device *dev)
5080 {
51
- struct amdgpu_device *adev = dev->dev_private;
81
+ struct amdgpu_device *adev = drm_to_adev(dev);
5282
5383 if (adev == NULL)
5484 return;
5585
86
+ amdgpu_unregister_gpu_instance(adev);
87
+
5688 if (adev->rmmio == NULL)
57
- goto done_free;
89
+ return;
5890
59
- if (amdgpu_sriov_vf(adev))
60
- amdgpu_virt_request_full_gpu(adev, false);
61
-
62
- if (amdgpu_device_is_px(dev)) {
91
+ if (adev->runpm) {
6392 pm_runtime_get_sync(dev->dev);
6493 pm_runtime_forbid(dev->dev);
6594 }
6695
6796 amdgpu_acpi_fini(adev);
68
-
6997 amdgpu_device_fini(adev);
98
+}
7099
71
-done_free:
72
- kfree(adev);
73
- dev->dev_private = NULL;
100
+void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
101
+{
102
+ struct amdgpu_gpu_instance *gpu_instance;
103
+
104
+ mutex_lock(&mgpu_info.mutex);
105
+
106
+ if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
107
+ DRM_ERROR("Cannot register more gpu instance\n");
108
+ mutex_unlock(&mgpu_info.mutex);
109
+ return;
110
+ }
111
+
112
+ gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
113
+ gpu_instance->adev = adev;
114
+ gpu_instance->mgpu_fan_enabled = 0;
115
+
116
+ mgpu_info.num_gpu++;
117
+ if (adev->flags & AMD_IS_APU)
118
+ mgpu_info.num_apu++;
119
+ else
120
+ mgpu_info.num_dgpu++;
121
+
122
+ mutex_unlock(&mgpu_info.mutex);
74123 }
75124
76125 /**
77126 * amdgpu_driver_load_kms - Main load function for KMS.
78127 *
79
- * @dev: drm dev pointer
128
+ * @adev: pointer to struct amdgpu_device
80129 * @flags: device flags
81130 *
82131 * This is the main load function for KMS (all asics).
83132 * Returns 0 on success, error on failure.
84133 */
85
-int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
134
+int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
86135 {
87
- struct amdgpu_device *adev;
136
+ struct drm_device *dev;
88137 int r, acpi_status;
89138
90
- adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
91
- if (adev == NULL) {
92
- return -ENOMEM;
93
- }
94
- dev->dev_private = (void *)adev;
139
+ dev = adev_to_drm(adev);
95140
96
- if ((amdgpu_runtime_pm != 0) &&
97
- amdgpu_has_atpx() &&
141
+ if (amdgpu_has_atpx() &&
98142 (amdgpu_is_atpx_hybrid() ||
99143 amdgpu_has_atpx_dgpu_power_cntl()) &&
100144 ((flags & AMD_IS_APU) == 0) &&
....@@ -107,27 +151,57 @@
107151 * properly initialize the GPU MC controller and permit
108152 * VRAM allocation
109153 */
110
- r = amdgpu_device_init(adev, dev, dev->pdev, flags);
154
+ r = amdgpu_device_init(adev, flags);
111155 if (r) {
112156 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
113157 goto out;
114158 }
115159
160
+ if (amdgpu_device_supports_boco(dev) &&
161
+ (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
162
+ adev->runpm = true;
163
+ } else if (amdgpu_device_supports_baco(dev) &&
164
+ (amdgpu_runtime_pm != 0)) {
165
+ switch (adev->asic_type) {
166
+#ifdef CONFIG_DRM_AMDGPU_CIK
167
+ case CHIP_BONAIRE:
168
+ case CHIP_HAWAII:
169
+#endif
170
+ case CHIP_VEGA20:
171
+ case CHIP_ARCTURUS:
172
+ case CHIP_SIENNA_CICHLID:
173
+ case CHIP_NAVY_FLOUNDER:
174
+ /* enable runpm if runpm=1 */
175
+ if (amdgpu_runtime_pm > 0)
176
+ adev->runpm = true;
177
+ break;
178
+ case CHIP_VEGA10:
179
+ /* turn runpm on if noretry=0 */
180
+ if (!adev->gmc.noretry)
181
+ adev->runpm = true;
182
+ break;
183
+ default:
184
+ /* enable runpm on VI+ */
185
+ adev->runpm = true;
186
+ break;
187
+ }
188
+ }
189
+
116190 /* Call ACPI methods: require modeset init
117191 * but failure is not fatal
118192 */
119
- if (!r) {
120
- acpi_status = amdgpu_acpi_init(adev);
121
- if (acpi_status)
122
- dev_dbg(&dev->pdev->dev,
123
- "Error during ACPI methods call\n");
124
- }
125193
126
- if (amdgpu_device_is_px(dev)) {
127
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
194
+ acpi_status = amdgpu_acpi_init(adev);
195
+ if (acpi_status)
196
+ dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
197
+
198
+ if (adev->runpm) {
199
+ /* only need to skip on ATPX */
200
+ if (amdgpu_device_supports_boco(dev) &&
201
+ !amdgpu_is_atpx_hybrid())
202
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
128203 pm_runtime_use_autosuspend(dev->dev);
129204 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
130
- pm_runtime_set_active(dev->dev);
131205 pm_runtime_allow(dev->dev);
132206 pm_runtime_mark_last_busy(dev->dev);
133207 pm_runtime_put_autosuspend(dev->dev);
....@@ -136,7 +210,7 @@
136210 out:
137211 if (r) {
138212 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
139
- if (adev->rmmio && amdgpu_device_is_px(dev))
213
+ if (adev->rmmio && adev->runpm)
140214 pm_runtime_put_noidle(dev->dev);
141215 amdgpu_driver_unload_kms(dev);
142216 }
....@@ -207,6 +281,28 @@
207281 fw_info->ver = adev->pm.fw_version;
208282 fw_info->feature = 0;
209283 break;
284
+ case AMDGPU_INFO_FW_TA:
285
+ switch (query_fw->index) {
286
+ case 0:
287
+ fw_info->ver = adev->psp.ta_fw_version;
288
+ fw_info->feature = adev->psp.ta_xgmi_ucode_version;
289
+ break;
290
+ case 1:
291
+ fw_info->ver = adev->psp.ta_fw_version;
292
+ fw_info->feature = adev->psp.ta_ras_ucode_version;
293
+ break;
294
+ case 2:
295
+ fw_info->ver = adev->psp.ta_fw_version;
296
+ fw_info->feature = adev->psp.ta_hdcp_ucode_version;
297
+ break;
298
+ case 3:
299
+ fw_info->ver = adev->psp.ta_fw_version;
300
+ fw_info->feature = adev->psp.ta_dtm_ucode_version;
301
+ break;
302
+ default:
303
+ return -EINVAL;
304
+ }
305
+ break;
210306 case AMDGPU_INFO_FW_SDMA:
211307 if (query_fw->index >= adev->sdma.num_instances)
212308 return -EINVAL;
....@@ -221,9 +317,151 @@
221317 fw_info->ver = adev->psp.asd_fw_version;
222318 fw_info->feature = adev->psp.asd_feature_version;
223319 break;
320
+ case AMDGPU_INFO_FW_DMCU:
321
+ fw_info->ver = adev->dm.dmcu_fw_version;
322
+ fw_info->feature = 0;
323
+ break;
324
+ case AMDGPU_INFO_FW_DMCUB:
325
+ fw_info->ver = adev->dm.dmcub_fw_version;
326
+ fw_info->feature = 0;
327
+ break;
224328 default:
225329 return -EINVAL;
226330 }
331
+ return 0;
332
+}
333
+
334
+static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
335
+ struct drm_amdgpu_info *info,
336
+ struct drm_amdgpu_info_hw_ip *result)
337
+{
338
+ uint32_t ib_start_alignment = 0;
339
+ uint32_t ib_size_alignment = 0;
340
+ enum amd_ip_block_type type;
341
+ unsigned int num_rings = 0;
342
+ unsigned int i, j;
343
+
344
+ if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
345
+ return -EINVAL;
346
+
347
+ switch (info->query_hw_ip.type) {
348
+ case AMDGPU_HW_IP_GFX:
349
+ type = AMD_IP_BLOCK_TYPE_GFX;
350
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
351
+ if (adev->gfx.gfx_ring[i].sched.ready)
352
+ ++num_rings;
353
+ ib_start_alignment = 32;
354
+ ib_size_alignment = 32;
355
+ break;
356
+ case AMDGPU_HW_IP_COMPUTE:
357
+ type = AMD_IP_BLOCK_TYPE_GFX;
358
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
359
+ if (adev->gfx.compute_ring[i].sched.ready)
360
+ ++num_rings;
361
+ ib_start_alignment = 32;
362
+ ib_size_alignment = 32;
363
+ break;
364
+ case AMDGPU_HW_IP_DMA:
365
+ type = AMD_IP_BLOCK_TYPE_SDMA;
366
+ for (i = 0; i < adev->sdma.num_instances; i++)
367
+ if (adev->sdma.instance[i].ring.sched.ready)
368
+ ++num_rings;
369
+ ib_start_alignment = 256;
370
+ ib_size_alignment = 4;
371
+ break;
372
+ case AMDGPU_HW_IP_UVD:
373
+ type = AMD_IP_BLOCK_TYPE_UVD;
374
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
375
+ if (adev->uvd.harvest_config & (1 << i))
376
+ continue;
377
+
378
+ if (adev->uvd.inst[i].ring.sched.ready)
379
+ ++num_rings;
380
+ }
381
+ ib_start_alignment = 64;
382
+ ib_size_alignment = 64;
383
+ break;
384
+ case AMDGPU_HW_IP_VCE:
385
+ type = AMD_IP_BLOCK_TYPE_VCE;
386
+ for (i = 0; i < adev->vce.num_rings; i++)
387
+ if (adev->vce.ring[i].sched.ready)
388
+ ++num_rings;
389
+ ib_start_alignment = 4;
390
+ ib_size_alignment = 1;
391
+ break;
392
+ case AMDGPU_HW_IP_UVD_ENC:
393
+ type = AMD_IP_BLOCK_TYPE_UVD;
394
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
395
+ if (adev->uvd.harvest_config & (1 << i))
396
+ continue;
397
+
398
+ for (j = 0; j < adev->uvd.num_enc_rings; j++)
399
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready)
400
+ ++num_rings;
401
+ }
402
+ ib_start_alignment = 64;
403
+ ib_size_alignment = 64;
404
+ break;
405
+ case AMDGPU_HW_IP_VCN_DEC:
406
+ type = AMD_IP_BLOCK_TYPE_VCN;
407
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
408
+ if (adev->uvd.harvest_config & (1 << i))
409
+ continue;
410
+
411
+ if (adev->vcn.inst[i].ring_dec.sched.ready)
412
+ ++num_rings;
413
+ }
414
+ ib_start_alignment = 16;
415
+ ib_size_alignment = 16;
416
+ break;
417
+ case AMDGPU_HW_IP_VCN_ENC:
418
+ type = AMD_IP_BLOCK_TYPE_VCN;
419
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
420
+ if (adev->uvd.harvest_config & (1 << i))
421
+ continue;
422
+
423
+ for (j = 0; j < adev->vcn.num_enc_rings; j++)
424
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready)
425
+ ++num_rings;
426
+ }
427
+ ib_start_alignment = 64;
428
+ ib_size_alignment = 1;
429
+ break;
430
+ case AMDGPU_HW_IP_VCN_JPEG:
431
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
432
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
433
+
434
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
435
+ if (adev->jpeg.harvest_config & (1 << i))
436
+ continue;
437
+
438
+ if (adev->jpeg.inst[i].ring_dec.sched.ready)
439
+ ++num_rings;
440
+ }
441
+ ib_start_alignment = 16;
442
+ ib_size_alignment = 16;
443
+ break;
444
+ default:
445
+ return -EINVAL;
446
+ }
447
+
448
+ for (i = 0; i < adev->num_ip_blocks; i++)
449
+ if (adev->ip_blocks[i].version->type == type &&
450
+ adev->ip_blocks[i].status.valid)
451
+ break;
452
+
453
+ if (i == adev->num_ip_blocks)
454
+ return 0;
455
+
456
+ num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
457
+ num_rings);
458
+
459
+ result->hw_ip_version_major = adev->ip_blocks[i].version->major;
460
+ result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
461
+ result->capabilities_flags = 0;
462
+ result->available_rings = (1 << num_rings) - 1;
463
+ result->ib_start_alignment = ib_start_alignment;
464
+ result->ib_size_alignment = ib_size_alignment;
227465 return 0;
228466 }
229467
....@@ -244,7 +482,7 @@
244482 */
245483 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
246484 {
247
- struct amdgpu_device *adev = dev->dev_private;
485
+ struct amdgpu_device *adev = drm_to_adev(dev);
248486 struct drm_amdgpu_info *info = data;
249487 struct amdgpu_mode_info *minfo = &adev->mode_info;
250488 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
....@@ -252,7 +490,7 @@
252490 struct drm_crtc *crtc;
253491 uint32_t ui32 = 0;
254492 uint64_t ui64 = 0;
255
- int i, j, found;
493
+ int i, found;
256494 int ui32_size = sizeof(ui32);
257495
258496 if (!info->return_size || !info->return_pointer)
....@@ -279,101 +517,14 @@
279517 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
280518 case AMDGPU_INFO_HW_IP_INFO: {
281519 struct drm_amdgpu_info_hw_ip ip = {};
282
- enum amd_ip_block_type type;
283
- uint32_t ring_mask = 0;
284
- uint32_t ib_start_alignment = 0;
285
- uint32_t ib_size_alignment = 0;
520
+ int ret;
286521
287
- if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
288
- return -EINVAL;
522
+ ret = amdgpu_hw_ip_info(adev, info, &ip);
523
+ if (ret)
524
+ return ret;
289525
290
- switch (info->query_hw_ip.type) {
291
- case AMDGPU_HW_IP_GFX:
292
- type = AMD_IP_BLOCK_TYPE_GFX;
293
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
294
- ring_mask |= adev->gfx.gfx_ring[i].ready << i;
295
- ib_start_alignment = 32;
296
- ib_size_alignment = 32;
297
- break;
298
- case AMDGPU_HW_IP_COMPUTE:
299
- type = AMD_IP_BLOCK_TYPE_GFX;
300
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
301
- ring_mask |= adev->gfx.compute_ring[i].ready << i;
302
- ib_start_alignment = 32;
303
- ib_size_alignment = 32;
304
- break;
305
- case AMDGPU_HW_IP_DMA:
306
- type = AMD_IP_BLOCK_TYPE_SDMA;
307
- for (i = 0; i < adev->sdma.num_instances; i++)
308
- ring_mask |= adev->sdma.instance[i].ring.ready << i;
309
- ib_start_alignment = 256;
310
- ib_size_alignment = 4;
311
- break;
312
- case AMDGPU_HW_IP_UVD:
313
- type = AMD_IP_BLOCK_TYPE_UVD;
314
- for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
315
- if (adev->uvd.harvest_config & (1 << i))
316
- continue;
317
- ring_mask |= adev->uvd.inst[i].ring.ready;
318
- }
319
- ib_start_alignment = 64;
320
- ib_size_alignment = 64;
321
- break;
322
- case AMDGPU_HW_IP_VCE:
323
- type = AMD_IP_BLOCK_TYPE_VCE;
324
- for (i = 0; i < adev->vce.num_rings; i++)
325
- ring_mask |= adev->vce.ring[i].ready << i;
326
- ib_start_alignment = 4;
327
- ib_size_alignment = 1;
328
- break;
329
- case AMDGPU_HW_IP_UVD_ENC:
330
- type = AMD_IP_BLOCK_TYPE_UVD;
331
- for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
332
- if (adev->uvd.harvest_config & (1 << i))
333
- continue;
334
- for (j = 0; j < adev->uvd.num_enc_rings; j++)
335
- ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
336
- }
337
- ib_start_alignment = 64;
338
- ib_size_alignment = 64;
339
- break;
340
- case AMDGPU_HW_IP_VCN_DEC:
341
- type = AMD_IP_BLOCK_TYPE_VCN;
342
- ring_mask = adev->vcn.ring_dec.ready;
343
- ib_start_alignment = 16;
344
- ib_size_alignment = 16;
345
- break;
346
- case AMDGPU_HW_IP_VCN_ENC:
347
- type = AMD_IP_BLOCK_TYPE_VCN;
348
- for (i = 0; i < adev->vcn.num_enc_rings; i++)
349
- ring_mask |= adev->vcn.ring_enc[i].ready << i;
350
- ib_start_alignment = 64;
351
- ib_size_alignment = 1;
352
- break;
353
- case AMDGPU_HW_IP_VCN_JPEG:
354
- type = AMD_IP_BLOCK_TYPE_VCN;
355
- ring_mask = adev->vcn.ring_jpeg.ready;
356
- ib_start_alignment = 16;
357
- ib_size_alignment = 16;
358
- break;
359
- default:
360
- return -EINVAL;
361
- }
362
-
363
- for (i = 0; i < adev->num_ip_blocks; i++) {
364
- if (adev->ip_blocks[i].version->type == type &&
365
- adev->ip_blocks[i].status.valid) {
366
- ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
367
- ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
368
- ip.capabilities_flags = 0;
369
- ip.available_rings = ring_mask;
370
- ip.ib_start_alignment = ib_start_alignment;
371
- ip.ib_size_alignment = ib_size_alignment;
372
- break;
373
- }
374
- }
375
- return copy_to_user(out, &ip,
376
- min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
526
+ ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
527
+ return ret ? -EFAULT : 0;
377528 }
378529 case AMDGPU_INFO_HW_IP_COUNT: {
379530 enum amd_ip_block_type type;
....@@ -400,8 +551,11 @@
400551 break;
401552 case AMDGPU_HW_IP_VCN_DEC:
402553 case AMDGPU_HW_IP_VCN_ENC:
403
- case AMDGPU_HW_IP_VCN_JPEG:
404554 type = AMD_IP_BLOCK_TYPE_VCN;
555
+ break;
556
+ case AMDGPU_HW_IP_VCN_JPEG:
557
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
558
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
405559 break;
406560 default:
407561 return -EINVAL;
....@@ -443,25 +597,22 @@
443597 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
444598 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
445599 case AMDGPU_INFO_VRAM_USAGE:
446
- ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
600
+ ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
447601 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
448602 case AMDGPU_INFO_VIS_VRAM_USAGE:
449
- ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
603
+ ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
450604 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
451605 case AMDGPU_INFO_GTT_USAGE:
452
- ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
606
+ ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
453607 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
454608 case AMDGPU_INFO_GDS_CONFIG: {
455609 struct drm_amdgpu_info_gds gds_info;
456610
457611 memset(&gds_info, 0, sizeof(gds_info));
458
- gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
459
- gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
460
- gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
461
- gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
462
- gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
463
- gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
464
- gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
612
+ gds_info.compute_partition_size = adev->gds.gds_size;
613
+ gds_info.gds_total_size = adev->gds.gds_size;
614
+ gds_info.gws_per_compute_partition = adev->gds.gws_size;
615
+ gds_info.oa_per_compute_partition = adev->gds.oa_size;
465616 return copy_to_user(out, &gds_info,
466617 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
467618 }
....@@ -469,10 +620,13 @@
469620 struct drm_amdgpu_info_vram_gtt vram_gtt;
470621
471622 vram_gtt.vram_size = adev->gmc.real_vram_size -
472
- atomic64_read(&adev->vram_pin_size);
473
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
474
- atomic64_read(&adev->visible_pin_size);
475
- vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
623
+ atomic64_read(&adev->vram_pin_size) -
624
+ AMDGPU_VM_RESERVED_VRAM;
625
+ vram_gtt.vram_cpu_accessible_size =
626
+ min(adev->gmc.visible_vram_size -
627
+ atomic64_read(&adev->visible_pin_size),
628
+ vram_gtt.vram_size);
629
+ vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
476630 vram_gtt.gtt_size *= PAGE_SIZE;
477631 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
478632 return copy_to_user(out, &vram_gtt,
....@@ -480,30 +634,36 @@
480634 }
481635 case AMDGPU_INFO_MEMORY: {
482636 struct drm_amdgpu_memory_info mem;
483
-
637
+ struct ttm_resource_manager *vram_man =
638
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
639
+ struct ttm_resource_manager *gtt_man =
640
+ ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
484641 memset(&mem, 0, sizeof(mem));
485642 mem.vram.total_heap_size = adev->gmc.real_vram_size;
486643 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
487
- atomic64_read(&adev->vram_pin_size);
644
+ atomic64_read(&adev->vram_pin_size) -
645
+ AMDGPU_VM_RESERVED_VRAM;
488646 mem.vram.heap_usage =
489
- amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
647
+ amdgpu_vram_mgr_usage(vram_man);
490648 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
491649
492650 mem.cpu_accessible_vram.total_heap_size =
493651 adev->gmc.visible_vram_size;
494
- mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
495
- atomic64_read(&adev->visible_pin_size);
652
+ mem.cpu_accessible_vram.usable_heap_size =
653
+ min(adev->gmc.visible_vram_size -
654
+ atomic64_read(&adev->visible_pin_size),
655
+ mem.vram.usable_heap_size);
496656 mem.cpu_accessible_vram.heap_usage =
497
- amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
657
+ amdgpu_vram_mgr_vis_usage(vram_man);
498658 mem.cpu_accessible_vram.max_allocation =
499659 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
500660
501
- mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
661
+ mem.gtt.total_heap_size = gtt_man->size;
502662 mem.gtt.total_heap_size *= PAGE_SIZE;
503663 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
504664 atomic64_read(&adev->gart_pin_size);
505665 mem.gtt.heap_usage =
506
- amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
666
+ amdgpu_gtt_mgr_usage(gtt_man);
507667 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
508668
509669 return copy_to_user(out, &mem,
....@@ -539,15 +699,19 @@
539699 return -ENOMEM;
540700 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
541701
542
- for (i = 0; i < info->read_mmr_reg.count; i++)
702
+ amdgpu_gfx_off_ctrl(adev, false);
703
+ for (i = 0; i < info->read_mmr_reg.count; i++) {
543704 if (amdgpu_asic_read_register(adev, se_num, sh_num,
544705 info->read_mmr_reg.dword_offset + i,
545706 &regs[i])) {
546707 DRM_DEBUG_KMS("unallowed offset %#x\n",
547708 info->read_mmr_reg.dword_offset + i);
548709 kfree(regs);
710
+ amdgpu_gfx_off_ctrl(adev, true);
549711 return -EFAULT;
550712 }
713
+ }
714
+ amdgpu_gfx_off_ctrl(adev, true);
551715 n = copy_to_user(out, regs, min(size, alloc_size));
552716 kfree(regs);
553717 return n ? -EFAULT : 0;
....@@ -581,27 +745,30 @@
581745 dev_info.ids_flags = 0;
582746 if (adev->flags & AMD_IS_APU)
583747 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
584
- if (amdgpu_sriov_vf(adev))
748
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
585749 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
750
+ if (amdgpu_is_tmz(adev))
751
+ dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
586752
587753 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
588754 vm_size -= AMDGPU_VA_RESERVED_SIZE;
589755
590756 /* Older VCE FW versions are buggy and can handle only 40bits */
591
- if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
757
+ if (adev->vce.fw_version &&
758
+ adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
592759 vm_size = min(vm_size, 1ULL << 40);
593760
594761 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
595762 dev_info.virtual_address_max =
596
- min(vm_size, AMDGPU_VA_HOLE_START);
763
+ min(vm_size, AMDGPU_GMC_HOLE_START);
597764
598
- if (vm_size > AMDGPU_VA_HOLE_START) {
599
- dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
600
- dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
765
+ if (vm_size > AMDGPU_GMC_HOLE_START) {
766
+ dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
767
+ dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
601768 }
602
- dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
769
+ dev_info.virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
603770 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
604
- dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
771
+ dev_info.gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
605772 dev_info.cu_active_number = adev->gfx.cu_info.number;
606773 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
607774 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
....@@ -614,17 +781,6 @@
614781 dev_info.vce_harvest_config = adev->vce.harvest_config;
615782 dev_info.gc_double_offchip_lds_buf =
616783 adev->gfx.config.double_offchip_lds_buf;
617
-
618
- if (amdgpu_ngg) {
619
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
620
- dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
621
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
622
- dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
623
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
624
- dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
625
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
626
- dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
627
- }
628784 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
629785 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
630786 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
....@@ -632,6 +788,12 @@
632788 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
633789 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
634790 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
791
+
792
+ if (adev->family >= AMDGPU_FAMILY_NV)
793
+ dev_info.pa_sc_tile_steering_override =
794
+ adev->gfx.config.pa_sc_tile_steering_override;
795
+
796
+ dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
635797
636798 return copy_to_user(out, &dev_info,
637799 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
....@@ -793,6 +955,18 @@
793955 case AMDGPU_INFO_VRAM_LOST_COUNTER:
794956 ui32 = atomic_read(&adev->vram_lost_counter);
795957 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
958
+ case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
959
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
960
+ uint64_t ras_mask;
961
+
962
+ if (!ras)
963
+ return -EINVAL;
964
+ ras_mask = (uint64_t)ras->supported << 32 | ras->features;
965
+
966
+ return copy_to_user(out, &ras_mask,
967
+ min_t(u64, size, sizeof(ras_mask))) ?
968
+ -EFAULT : 0;
969
+ }
796970 default:
797971 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
798972 return -EINVAL;
....@@ -828,12 +1002,18 @@
8281002 */
8291003 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
8301004 {
831
- struct amdgpu_device *adev = dev->dev_private;
1005
+ struct amdgpu_device *adev = drm_to_adev(dev);
8321006 struct amdgpu_fpriv *fpriv;
8331007 int r, pasid;
8341008
8351009 /* Ensure IB tests are run on ring */
836
- flush_delayed_work(&adev->late_init_work);
1010
+ flush_delayed_work(&adev->delayed_init_work);
1011
+
1012
+
1013
+ if (amdgpu_ras_intr_triggered()) {
1014
+ DRM_ERROR("RAS Intr triggered, device disabled!!");
1015
+ return -EHWPOISON;
1016
+ }
8371017
8381018 file_priv->driver_priv = NULL;
8391019
....@@ -862,8 +1042,11 @@
8621042 goto error_vm;
8631043 }
8641044
865
- if (amdgpu_sriov_vf(adev)) {
866
- r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
1045
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1046
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1047
+
1048
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1049
+ &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
8671050 if (r)
8681051 goto error_vm;
8691052 }
....@@ -904,11 +1087,11 @@
9041087 void amdgpu_driver_postclose_kms(struct drm_device *dev,
9051088 struct drm_file *file_priv)
9061089 {
907
- struct amdgpu_device *adev = dev->dev_private;
1090
+ struct amdgpu_device *adev = drm_to_adev(dev);
9081091 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
9091092 struct amdgpu_bo_list *list;
9101093 struct amdgpu_bo *pd;
911
- unsigned int pasid;
1094
+ u32 pasid;
9121095 int handle;
9131096
9141097 if (!fpriv)
....@@ -916,14 +1099,14 @@
9161099
9171100 pm_runtime_get_sync(dev->dev);
9181101
919
- if (adev->asic_type != CHIP_RAVEN) {
1102
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
9201103 amdgpu_uvd_free_handles(adev, file_priv);
1104
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
9211105 amdgpu_vce_free_handles(adev, file_priv);
922
- }
9231106
9241107 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
9251108
926
- if (amdgpu_sriov_vf(adev)) {
1109
+ if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
9271110 /* TODO: how to handle reserve failure */
9281111 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
9291112 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
....@@ -934,11 +1117,11 @@
9341117 pasid = fpriv->vm.pasid;
9351118 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
9361119
937
- amdgpu_vm_fini(adev, &fpriv->vm);
9381120 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1121
+ amdgpu_vm_fini(adev, &fpriv->vm);
9391122
9401123 if (pasid)
941
- amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
1124
+ amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
9421125 amdgpu_bo_unref(&pd);
9431126
9441127 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
....@@ -960,15 +1143,16 @@
9601143 /**
9611144 * amdgpu_get_vblank_counter_kms - get frame count
9621145 *
963
- * @dev: drm dev pointer
964
- * @pipe: crtc to get the frame count from
1146
+ * @crtc: crtc to get the frame count from
9651147 *
9661148 * Gets the frame count on the requested crtc (all asics).
9671149 * Returns frame count on success, -EINVAL on failure.
9681150 */
969
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1151
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
9701152 {
971
- struct amdgpu_device *adev = dev->dev_private;
1153
+ struct drm_device *dev = crtc->dev;
1154
+ unsigned int pipe = crtc->index;
1155
+ struct amdgpu_device *adev = drm_to_adev(dev);
9721156 int vpos, hpos, stat;
9731157 u32 count;
9741158
....@@ -1027,15 +1211,16 @@
10271211 /**
10281212 * amdgpu_enable_vblank_kms - enable vblank interrupt
10291213 *
1030
- * @dev: drm dev pointer
1031
- * @pipe: crtc to enable vblank interrupt for
1214
+ * @crtc: crtc to enable vblank interrupt for
10321215 *
10331216 * Enable the interrupt on the requested crtc (all asics).
10341217 * Returns 0 on success, -EINVAL on failure.
10351218 */
1036
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1219
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
10371220 {
1038
- struct amdgpu_device *adev = dev->dev_private;
1221
+ struct drm_device *dev = crtc->dev;
1222
+ unsigned int pipe = crtc->index;
1223
+ struct amdgpu_device *adev = drm_to_adev(dev);
10391224 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
10401225
10411226 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
....@@ -1044,14 +1229,15 @@
10441229 /**
10451230 * amdgpu_disable_vblank_kms - disable vblank interrupt
10461231 *
1047
- * @dev: drm dev pointer
1048
- * @pipe: crtc to disable vblank interrupt for
1232
+ * @crtc: crtc to disable vblank interrupt for
10491233 *
10501234 * Disable the interrupt on the requested crtc (all asics).
10511235 */
1052
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1236
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
10531237 {
1054
- struct amdgpu_device *adev = dev->dev_private;
1238
+ struct drm_device *dev = crtc->dev;
1239
+ unsigned int pipe = crtc->index;
1240
+ struct amdgpu_device *adev = drm_to_adev(dev);
10551241 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
10561242
10571243 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
....@@ -1087,7 +1273,7 @@
10871273 {
10881274 struct drm_info_node *node = (struct drm_info_node *) m->private;
10891275 struct drm_device *dev = node->minor->dev;
1090
- struct amdgpu_device *adev = dev->dev_private;
1276
+ struct amdgpu_device *adev = drm_to_adev(dev);
10911277 struct drm_amdgpu_info_firmware fw_info;
10921278 struct drm_amdgpu_query_fw query_fw;
10931279 struct atom_context *ctx = adev->mode_info.atom_context;
....@@ -1183,8 +1369,7 @@
11831369 fw_info.feature, fw_info.ver);
11841370
11851371 /* MEC2 */
1186
- if (adev->asic_type == CHIP_KAVERI ||
1187
- (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1372
+ if (adev->gfx.mec2_fw) {
11881373 query_fw.index = 1;
11891374 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
11901375 if (ret)
....@@ -1209,6 +1394,34 @@
12091394 return ret;
12101395 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
12111396 fw_info.feature, fw_info.ver);
1397
+
1398
+ query_fw.fw_type = AMDGPU_INFO_FW_TA;
1399
+ for (i = 0; i < 4; i++) {
1400
+ query_fw.index = i;
1401
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1402
+ if (ret)
1403
+ continue;
1404
+ switch (query_fw.index) {
1405
+ case 0:
1406
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
1407
+ "RAS", fw_info.feature, fw_info.ver);
1408
+ break;
1409
+ case 1:
1410
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
1411
+ "XGMI", fw_info.feature, fw_info.ver);
1412
+ break;
1413
+ case 2:
1414
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
1415
+ "HDCP", fw_info.feature, fw_info.ver);
1416
+ break;
1417
+ case 3:
1418
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
1419
+ "DTM", fw_info.feature, fw_info.ver);
1420
+ break;
1421
+ default:
1422
+ return -EINVAL;
1423
+ }
1424
+ }
12121425
12131426 /* SMC */
12141427 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
....@@ -1237,6 +1450,22 @@
12371450 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
12381451 fw_info.feature, fw_info.ver);
12391452
1453
+ /* DMCU */
1454
+ query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1455
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1456
+ if (ret)
1457
+ return ret;
1458
+ seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1459
+ fw_info.feature, fw_info.ver);
1460
+
1461
+ /* DMCUB */
1462
+ query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1463
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1464
+ if (ret)
1465
+ return ret;
1466
+ seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1467
+ fw_info.feature, fw_info.ver);
1468
+
12401469
12411470 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
12421471