hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/gpu/drm/radeon/radeon_device.c
....@@ -25,16 +25,23 @@
2525 * Alex Deucher
2626 * Jerome Glisse
2727 */
28
+
2829 #include <linux/console.h>
29
-#include <linux/slab.h>
30
-#include <drm/drmP.h>
31
-#include <drm/drm_crtc_helper.h>
32
-#include <drm/drm_cache.h>
33
-#include <drm/radeon_drm.h>
34
-#include <linux/pm_runtime.h>
35
-#include <linux/vgaarb.h>
36
-#include <linux/vga_switcheroo.h>
3730 #include <linux/efi.h>
31
+#include <linux/pci.h>
32
+#include <linux/pm_runtime.h>
33
+#include <linux/slab.h>
34
+#include <linux/vga_switcheroo.h>
35
+#include <linux/vgaarb.h>
36
+
37
+#include <drm/drm_cache.h>
38
+#include <drm/drm_crtc_helper.h>
39
+#include <drm/drm_debugfs.h>
40
+#include <drm/drm_device.h>
41
+#include <drm/drm_file.h>
42
+#include <drm/drm_probe_helper.h>
43
+#include <drm/radeon_drm.h>
44
+
3845 #include "radeon_reg.h"
3946 #include "radeon.h"
4047 #include "atom.h"
....@@ -1015,6 +1022,7 @@
10151022 {
10161023 if (rdev->mode_info.atom_context) {
10171024 kfree(rdev->mode_info.atom_context->scratch);
1025
+ kfree(rdev->mode_info.atom_context->iio);
10181026 }
10191027 kfree(rdev->mode_info.atom_context);
10201028 rdev->mode_info.atom_context = NULL;
....@@ -1256,7 +1264,7 @@
12561264 * locking inversion with the driver load path. And the access here is
12571265 * completely racy anyway. So don't bother with locking for now.
12581266 */
1259
- return dev->open_count == 0;
1267
+ return atomic_read(&dev->open_count) == 0;
12601268 }
12611269
12621270 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
....@@ -1318,8 +1326,6 @@
13181326 init_rwsem(&rdev->pm.mclk_lock);
13191327 init_rwsem(&rdev->exclusive_lock);
13201328 init_waitqueue_head(&rdev->irq.vblank_queue);
1321
- mutex_init(&rdev->mn_lock);
1322
- hash_init(rdev->mn_hash);
13231329 r = radeon_gem_init(rdev);
13241330 if (r)
13251331 return r;
....@@ -1358,36 +1364,29 @@
13581364 else
13591365 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
13601366
1361
- /* set DMA mask + need_dma32 flags.
1367
+ /* set DMA mask.
13621368 * PCIE - can handle 40-bits.
13631369 * IGP - can handle 40-bits
13641370 * AGP - generally dma32 is safest
13651371 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
13661372 */
1367
- rdev->need_dma32 = false;
1373
+ dma_bits = 40;
13681374 if (rdev->flags & RADEON_IS_AGP)
1369
- rdev->need_dma32 = true;
1375
+ dma_bits = 32;
13701376 if ((rdev->flags & RADEON_IS_PCI) &&
13711377 (rdev->family <= CHIP_RS740))
1372
- rdev->need_dma32 = true;
1378
+ dma_bits = 32;
13731379 #ifdef CONFIG_PPC64
13741380 if (rdev->family == CHIP_CEDAR)
1375
- rdev->need_dma32 = true;
1381
+ dma_bits = 32;
13761382 #endif
13771383
1378
- dma_bits = rdev->need_dma32 ? 32 : 40;
1379
- r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1384
+ r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
13801385 if (r) {
1381
- rdev->need_dma32 = true;
1382
- dma_bits = 32;
13831386 pr_warn("radeon: No suitable DMA available\n");
1387
+ return r;
13841388 }
1385
- r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1386
- if (r) {
1387
- pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1388
- pr_warn("radeon: No coherent DMA available\n");
1389
- }
1390
- rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
1389
+ rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
13911390
13921391 /* Registers mapping */
13931392 /* TODO: block userspace mapping of io register */
....@@ -1625,6 +1624,9 @@
16251624 if (r) {
16261625 /* delay GPU reset to resume */
16271626 radeon_fence_driver_force_completion(rdev, i);
1627
+ } else {
1628
+ /* finish executing delayed work */
1629
+ flush_delayed_work(&rdev->fence_drv[i].lockup_work);
16281630 }
16291631 }
16301632