forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
....@@ -3,10 +3,16 @@
33 * Copyright (C) 2015-2018 Etnaviv Project
44 */
55
6
+#include <linux/clk.h>
67 #include <linux/component.h>
8
+#include <linux/delay.h>
79 #include <linux/dma-fence.h>
8
-#include <linux/moduleparam.h>
10
+#include <linux/dma-mapping.h>
11
+#include <linux/module.h>
912 #include <linux/of_device.h>
13
+#include <linux/platform_device.h>
14
+#include <linux/pm_runtime.h>
15
+#include <linux/regulator/consumer.h>
1016 #include <linux/thermal.h>
1117
1218 #include "etnaviv_cmdbuf.h"
....@@ -36,6 +42,8 @@
3642
3743 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
3844 {
45
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
46
+
3947 switch (param) {
4048 case ETNAVIV_PARAM_GPU_MODEL:
4149 *value = gpu->identity.model;
....@@ -139,6 +147,13 @@
139147
140148 case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
141149 *value = gpu->identity.varyings_count;
150
+ break;
151
+
152
+ case ETNAVIV_PARAM_SOFTPIN_START_ADDR:
153
+ if (priv->mmu_global->version == ETNAVIV_IOMMU_V2)
154
+ *value = ETNAVIV_SOFTPIN_START_ADDRESS;
155
+ else
156
+ *value = ~0ULL;
142157 break;
143158
144159 default:
....@@ -318,9 +333,20 @@
318333 gpu->identity.revision = etnaviv_field(chipIdentity,
319334 VIVS_HI_CHIP_IDENTITY_REVISION);
320335 } else {
336
+ u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
321337
322338 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
323339 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
340
+ gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
341
+
342
+ /*
343
+ * Reading these two registers on GC600 rev 0x19 result in a
344
+ * unhandled fault: external abort on non-linefetch
345
+ */
346
+ if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
347
+ gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
348
+ gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
349
+ }
324350
325351 /*
326352 * !!!! HACK ALERT !!!!
....@@ -335,7 +361,6 @@
335361
336362 /* Another special case */
337363 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
338
- u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
339364 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
340365
341366 if (chipDate == 0x20080814 && chipTime == 0x12051100) {
....@@ -358,11 +383,18 @@
358383 gpu->identity.model = chipModel_GC3000;
359384 gpu->identity.revision &= 0xffff;
360385 }
386
+
387
+ if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
388
+ gpu->identity.eco_id = 1;
389
+
390
+ if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
391
+ gpu->identity.eco_id = 1;
361392 }
362393
363394 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
364395 gpu->identity.model, gpu->identity.revision);
365396
397
+ gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
366398 /*
367399 * If there is a match in the HWDB, we aren't interested in the
368400 * remaining register values, as they might be wrong.
....@@ -410,7 +442,7 @@
410442 }
411443
412444 /* GC600 idle register reports zero bits where modules aren't present */
413
- if (gpu->identity.model == chipModel_GC600) {
445
+ if (gpu->identity.model == chipModel_GC600)
414446 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
415447 VIVS_HI_IDLE_STATE_RA |
416448 VIVS_HI_IDLE_STATE_SE |
....@@ -419,9 +451,6 @@
419451 VIVS_HI_IDLE_STATE_PE |
420452 VIVS_HI_IDLE_STATE_DE |
421453 VIVS_HI_IDLE_STATE_FE;
422
- } else {
423
- gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
424
- }
425454
426455 etnaviv_hw_specs(gpu);
427456 }
....@@ -493,7 +522,7 @@
493522 /* read idle register. */
494523 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
495524
496
- /* try reseting again if FE it not idle */
525
+ /* try resetting again if FE is not idle */
497526 if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
498527 dev_dbg(gpu->dev, "FE is not idle\n");
499528 continue;
....@@ -531,6 +560,12 @@
531560
532561 /* We rely on the GPU running, so program the clock */
533562 etnaviv_gpu_update_clock(gpu);
563
+
564
+ gpu->fe_running = false;
565
+ gpu->exec_state = -1;
566
+ if (gpu->mmu_context)
567
+ etnaviv_iommu_context_put(gpu->mmu_context);
568
+ gpu->mmu_context = NULL;
534569
535570 return 0;
536571 }
....@@ -594,6 +629,25 @@
594629 VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
595630 VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
596631 }
632
+
633
+ gpu->fe_running = true;
634
+}
635
+
636
+static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
637
+ struct etnaviv_iommu_context *context)
638
+{
639
+ u16 prefetch;
640
+ u32 address;
641
+
642
+ /* setup the MMU */
643
+ etnaviv_iommu_restore(gpu, context);
644
+
645
+ /* Start command processor */
646
+ prefetch = etnaviv_buffer_init(gpu);
647
+ address = etnaviv_cmdbuf_get_va(&gpu->buffer,
648
+ &gpu->mmu_context->cmdbuf_mapping);
649
+
650
+ etnaviv_gpu_start_fe(gpu, address, prefetch);
597651 }
598652
599653 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
....@@ -629,8 +683,6 @@
629683
630684 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
631685 {
632
- u16 prefetch;
633
-
634686 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
635687 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
636688 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
....@@ -676,19 +728,12 @@
676728 /* setup the pulse eater */
677729 etnaviv_gpu_setup_pulse_eater(gpu);
678730
679
- /* setup the MMU */
680
- etnaviv_iommu_restore(gpu);
681
-
682
- /* Start command processor */
683
- prefetch = etnaviv_buffer_init(gpu);
684
-
685731 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
686
- etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
687
- prefetch);
688732 }
689733
690734 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
691735 {
736
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
692737 int ret, i;
693738
694739 ret = pm_runtime_get_sync(gpu->dev);
....@@ -714,28 +759,6 @@
714759 }
715760
716761 /*
717
- * Set the GPU linear window to be at the end of the DMA window, where
718
- * the CMA area is likely to reside. This ensures that we are able to
719
- * map the command buffers while having the linear window overlap as
720
- * much RAM as possible, so we can optimize mappings for other buffers.
721
- *
722
- * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
723
- * to different views of the memory on the individual engines.
724
- */
725
- if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
726
- (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
727
- u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
728
- if (dma_mask < PHYS_OFFSET + SZ_2G)
729
- gpu->memory_base = PHYS_OFFSET;
730
- else
731
- gpu->memory_base = dma_mask - SZ_2G + 1;
732
- } else if (PHYS_OFFSET >= SZ_2G) {
733
- dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
734
- gpu->memory_base = PHYS_OFFSET;
735
- gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
736
- }
737
-
738
- /*
739762 * On cores with security features supported, we claim control over the
740763 * security states.
741764 */
....@@ -749,34 +772,46 @@
749772 goto fail;
750773 }
751774
752
- gpu->mmu = etnaviv_iommu_new(gpu);
753
- if (IS_ERR(gpu->mmu)) {
754
- dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
755
- ret = PTR_ERR(gpu->mmu);
775
+ ret = etnaviv_iommu_global_init(gpu);
776
+ if (ret)
756777 goto fail;
778
+
779
+ /*
780
+ * Set the GPU linear window to be at the end of the DMA window, where
781
+ * the CMA area is likely to reside. This ensures that we are able to
782
+ * map the command buffers while having the linear window overlap as
783
+ * much RAM as possible, so we can optimize mappings for other buffers.
784
+ *
785
+ * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
786
+ * to different views of the memory on the individual engines.
787
+ */
788
+ if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
789
+ (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
790
+ u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
791
+ if (dma_mask < PHYS_OFFSET + SZ_2G)
792
+ priv->mmu_global->memory_base = PHYS_OFFSET;
793
+ else
794
+ priv->mmu_global->memory_base = dma_mask - SZ_2G + 1;
795
+ } else if (PHYS_OFFSET >= SZ_2G) {
796
+ dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
797
+ priv->mmu_global->memory_base = PHYS_OFFSET;
798
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
757799 }
758800
759
- gpu->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(gpu);
760
- if (IS_ERR(gpu->cmdbuf_suballoc)) {
761
- dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
762
- ret = PTR_ERR(gpu->cmdbuf_suballoc);
763
- goto destroy_iommu;
764
- }
801
+ /*
802
+ * If the GPU is part of a system with DMA addressing limitations,
803
+ * request pages for our SHM backend buffers from the DMA32 zone to
804
+ * hopefully avoid performance killing SWIOTLB bounce buffering.
805
+ */
806
+ if (dma_addressing_limited(gpu->dev))
807
+ priv->shm_gfp_mask |= GFP_DMA32;
765808
766809 /* Create buffer: */
767
- ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
810
+ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
768811 PAGE_SIZE);
769812 if (ret) {
770813 dev_err(gpu->dev, "could not create command buffer\n");
771
- goto destroy_suballoc;
772
- }
773
-
774
- if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
775
- etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
776
- ret = -EINVAL;
777
- dev_err(gpu->dev,
778
- "command buffer outside valid memory window\n");
779
- goto free_buffer;
814
+ goto fail;
780815 }
781816
782817 /* Setup event management */
....@@ -789,23 +824,15 @@
789824 /* Now program the hardware */
790825 mutex_lock(&gpu->lock);
791826 etnaviv_gpu_hw_init(gpu);
792
- gpu->exec_state = -1;
793827 mutex_unlock(&gpu->lock);
794828
795829 pm_runtime_mark_last_busy(gpu->dev);
796830 pm_runtime_put_autosuspend(gpu->dev);
797831
832
+ gpu->initialized = true;
833
+
798834 return 0;
799835
800
-free_buffer:
801
- etnaviv_cmdbuf_free(&gpu->buffer);
802
- gpu->buffer.suballoc = NULL;
803
-destroy_suballoc:
804
- etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
805
- gpu->cmdbuf_suballoc = NULL;
806
-destroy_iommu:
807
- etnaviv_iommu_destroy(gpu->mmu);
808
- gpu->mmu = NULL;
809836 fail:
810837 pm_runtime_mark_last_busy(gpu->dev);
811838 pm_put:
....@@ -857,6 +884,13 @@
857884 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
858885
859886 verify_dma(gpu, &debug);
887
+
888
+ seq_puts(m, "\tidentity\n");
889
+ seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
890
+ seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
891
+ seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
892
+ seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
893
+ seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
860894
861895 seq_puts(m, "\tfeatures\n");
862896 seq_printf(m, "\t major_features: 0x%08x\n",
....@@ -937,6 +971,20 @@
937971 seq_puts(m, "\t FP is not idle\n");
938972 if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
939973 seq_puts(m, "\t TS is not idle\n");
974
+ if ((idle & VIVS_HI_IDLE_STATE_BL) == 0)
975
+ seq_puts(m, "\t BL is not idle\n");
976
+ if ((idle & VIVS_HI_IDLE_STATE_ASYNCFE) == 0)
977
+ seq_puts(m, "\t ASYNCFE is not idle\n");
978
+ if ((idle & VIVS_HI_IDLE_STATE_MC) == 0)
979
+ seq_puts(m, "\t MC is not idle\n");
980
+ if ((idle & VIVS_HI_IDLE_STATE_PPA) == 0)
981
+ seq_puts(m, "\t PPA is not idle\n");
982
+ if ((idle & VIVS_HI_IDLE_STATE_WD) == 0)
983
+ seq_puts(m, "\t WD is not idle\n");
984
+ if ((idle & VIVS_HI_IDLE_STATE_NN) == 0)
985
+ seq_puts(m, "\t NN is not idle\n");
986
+ if ((idle & VIVS_HI_IDLE_STATE_TP) == 0)
987
+ seq_puts(m, "\t TP is not idle\n");
940988 if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
941989 seq_puts(m, "\t AXI low power mode\n");
942990
....@@ -981,7 +1029,6 @@
9811029
9821030 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
9831031 {
984
- unsigned long flags;
9851032 unsigned int i = 0;
9861033
9871034 dev_err(gpu->dev, "recover hung GPU!\n");
....@@ -994,16 +1041,13 @@
9941041 etnaviv_hw_reset(gpu);
9951042
9961043 /* complete all events, the GPU won't do it after the reset */
997
- spin_lock_irqsave(&gpu->event_spinlock, flags);
1044
+ spin_lock(&gpu->event_spinlock);
9981045 for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
9991046 complete(&gpu->event_free);
10001047 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
1001
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1002
- gpu->completed_fence = gpu->active_fence;
1048
+ spin_unlock(&gpu->event_spinlock);
10031049
10041050 etnaviv_gpu_hw_init(gpu);
1005
- gpu->lastctx = NULL;
1006
- gpu->exec_state = -1;
10071051
10081052 mutex_unlock(&gpu->lock);
10091053 pm_runtime_mark_last_busy(gpu->dev);
....@@ -1038,7 +1082,7 @@
10381082 {
10391083 struct etnaviv_fence *f = to_etnaviv_fence(fence);
10401084
1041
- return fence_completed(f->gpu, f->base.seqno);
1085
+ return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
10421086 }
10431087
10441088 static void etnaviv_fence_release(struct dma_fence *fence)
....@@ -1077,6 +1121,12 @@
10771121 return &f->base;
10781122 }
10791123
1124
+/* returns true if fence a comes after fence b */
1125
+static inline bool fence_after(u32 a, u32 b)
1126
+{
1127
+ return (s32)(a - b) > 0;
1128
+}
1129
+
10801130 /*
10811131 * event management:
10821132 */
....@@ -1084,7 +1134,7 @@
10841134 static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
10851135 unsigned int *events)
10861136 {
1087
- unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
1137
+ unsigned long timeout = msecs_to_jiffies(10 * 10000);
10881138 unsigned i, acquired = 0;
10891139
10901140 for (i = 0; i < nr_events; i++) {
....@@ -1101,7 +1151,7 @@
11011151 timeout = ret;
11021152 }
11031153
1104
- spin_lock_irqsave(&gpu->event_spinlock, flags);
1154
+ spin_lock(&gpu->event_spinlock);
11051155
11061156 for (i = 0; i < nr_events; i++) {
11071157 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
....@@ -1111,7 +1161,7 @@
11111161 set_bit(event, gpu->event_bitmap);
11121162 }
11131163
1114
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1164
+ spin_unlock(&gpu->event_spinlock);
11151165
11161166 return 0;
11171167
....@@ -1124,18 +1174,11 @@
11241174
11251175 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
11261176 {
1127
- unsigned long flags;
1128
-
1129
- spin_lock_irqsave(&gpu->event_spinlock, flags);
1130
-
11311177 if (!test_bit(event, gpu->event_bitmap)) {
11321178 dev_warn(gpu->dev, "event %u is already marked as free",
11331179 event);
1134
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
11351180 } else {
11361181 clear_bit(event, gpu->event_bitmap);
1137
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
1138
-
11391182 complete(&gpu->event_free);
11401183 }
11411184 }
....@@ -1144,7 +1187,7 @@
11441187 * Cmdstream submission/retirement:
11451188 */
11461189 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1147
- u32 id, struct timespec *timeout)
1190
+ u32 id, struct drm_etnaviv_timespec *timeout)
11481191 {
11491192 struct dma_fence *fence;
11501193 int ret;
....@@ -1191,7 +1234,8 @@
11911234 * that lock in this function while waiting.
11921235 */
11931236 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1194
- struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
1237
+ struct etnaviv_gem_object *etnaviv_obj,
1238
+ struct drm_etnaviv_timespec *timeout)
11951239 {
11961240 unsigned long remaining;
11971241 long ret;
....@@ -1315,7 +1359,12 @@
13151359 goto out_unlock;
13161360 }
13171361
1318
- gpu->active_fence = gpu_fence->seqno;
1362
+ if (!gpu->fe_running)
1363
+ etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
1364
+
1365
+ if (submit->prev_mmu_context)
1366
+ etnaviv_iommu_context_put(submit->prev_mmu_context);
1367
+ submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
13191368
13201369 if (submit->nr_pmrs) {
13211370 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
....@@ -1326,8 +1375,8 @@
13261375
13271376 gpu->event[event[0]].fence = gpu_fence;
13281377 submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
1329
- etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
1330
- &submit->cmdbuf);
1378
+ etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
1379
+ event[0], &submit->cmdbuf);
13311380
13321381 if (submit->nr_pmrs) {
13331382 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
....@@ -1456,55 +1505,40 @@
14561505 {
14571506 int ret;
14581507
1459
- if (gpu->clk_reg) {
1460
- ret = clk_prepare_enable(gpu->clk_reg);
1461
- if (ret)
1462
- return ret;
1463
- }
1508
+ ret = clk_prepare_enable(gpu->clk_reg);
1509
+ if (ret)
1510
+ return ret;
14641511
1465
- if (gpu->clk_bus) {
1466
- ret = clk_prepare_enable(gpu->clk_bus);
1467
- if (ret)
1468
- goto disable_clk_reg;
1469
- }
1512
+ ret = clk_prepare_enable(gpu->clk_bus);
1513
+ if (ret)
1514
+ goto disable_clk_reg;
14701515
1471
- if (gpu->clk_core) {
1472
- ret = clk_prepare_enable(gpu->clk_core);
1473
- if (ret)
1474
- goto disable_clk_bus;
1475
- }
1516
+ ret = clk_prepare_enable(gpu->clk_core);
1517
+ if (ret)
1518
+ goto disable_clk_bus;
14761519
1477
- if (gpu->clk_shader) {
1478
- ret = clk_prepare_enable(gpu->clk_shader);
1479
- if (ret)
1480
- goto disable_clk_core;
1481
- }
1520
+ ret = clk_prepare_enable(gpu->clk_shader);
1521
+ if (ret)
1522
+ goto disable_clk_core;
14821523
14831524 return 0;
14841525
14851526 disable_clk_core:
1486
- if (gpu->clk_core)
1487
- clk_disable_unprepare(gpu->clk_core);
1527
+ clk_disable_unprepare(gpu->clk_core);
14881528 disable_clk_bus:
1489
- if (gpu->clk_bus)
1490
- clk_disable_unprepare(gpu->clk_bus);
1529
+ clk_disable_unprepare(gpu->clk_bus);
14911530 disable_clk_reg:
1492
- if (gpu->clk_reg)
1493
- clk_disable_unprepare(gpu->clk_reg);
1531
+ clk_disable_unprepare(gpu->clk_reg);
14941532
14951533 return ret;
14961534 }
14971535
14981536 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
14991537 {
1500
- if (gpu->clk_shader)
1501
- clk_disable_unprepare(gpu->clk_shader);
1502
- if (gpu->clk_core)
1503
- clk_disable_unprepare(gpu->clk_core);
1504
- if (gpu->clk_bus)
1505
- clk_disable_unprepare(gpu->clk_bus);
1506
- if (gpu->clk_reg)
1507
- clk_disable_unprepare(gpu->clk_reg);
1538
+ clk_disable_unprepare(gpu->clk_shader);
1539
+ clk_disable_unprepare(gpu->clk_core);
1540
+ clk_disable_unprepare(gpu->clk_bus);
1541
+ clk_disable_unprepare(gpu->clk_reg);
15081542
15091543 return 0;
15101544 }
....@@ -1532,7 +1566,7 @@
15321566
15331567 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
15341568 {
1535
- if (gpu->buffer.suballoc) {
1569
+ if (gpu->initialized && gpu->fe_running) {
15361570 /* Replace the last WAIT with END */
15371571 mutex_lock(&gpu->lock);
15381572 etnaviv_buffer_end(gpu);
....@@ -1544,7 +1578,11 @@
15441578 * we fail, just warn and continue.
15451579 */
15461580 etnaviv_gpu_wait_idle(gpu, 100);
1581
+
1582
+ gpu->fe_running = false;
15471583 }
1584
+
1585
+ gpu->exec_state = -1;
15481586
15491587 return etnaviv_gpu_clk_disable(gpu);
15501588 }
....@@ -1560,9 +1598,6 @@
15601598
15611599 etnaviv_gpu_update_clock(gpu);
15621600 etnaviv_gpu_hw_init(gpu);
1563
-
1564
- gpu->lastctx = NULL;
1565
- gpu->exec_state = -1;
15661601
15671602 mutex_unlock(&gpu->lock);
15681603
....@@ -1692,17 +1727,13 @@
16921727 etnaviv_gpu_hw_suspend(gpu);
16931728 #endif
16941729
1695
- if (gpu->buffer.suballoc)
1730
+ if (gpu->mmu_context)
1731
+ etnaviv_iommu_context_put(gpu->mmu_context);
1732
+
1733
+ if (gpu->initialized) {
16961734 etnaviv_cmdbuf_free(&gpu->buffer);
1697
-
1698
- if (gpu->cmdbuf_suballoc) {
1699
- etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
1700
- gpu->cmdbuf_suballoc = NULL;
1701
- }
1702
-
1703
- if (gpu->mmu) {
1704
- etnaviv_iommu_destroy(gpu->mmu);
1705
- gpu->mmu = NULL;
1735
+ etnaviv_iommu_global_fini(gpu);
1736
+ gpu->initialized = false;
17061737 }
17071738
17081739 gpu->drm = NULL;
....@@ -1730,7 +1761,6 @@
17301761 {
17311762 struct device *dev = &pdev->dev;
17321763 struct etnaviv_gpu *gpu;
1733
- struct resource *res;
17341764 int err;
17351765
17361766 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
....@@ -1742,8 +1772,7 @@
17421772 mutex_init(&gpu->fence_lock);
17431773
17441774 /* Map registers: */
1745
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1746
- gpu->mmio = devm_ioremap_resource(&pdev->dev, res);
1775
+ gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
17471776 if (IS_ERR(gpu->mmio))
17481777 return PTR_ERR(gpu->mmio);
17491778
....@@ -1762,26 +1791,26 @@
17621791 }
17631792
17641793 /* Get Clocks: */
1765
- gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
1794
+ gpu->clk_reg = devm_clk_get_optional(&pdev->dev, "reg");
17661795 DBG("clk_reg: %p", gpu->clk_reg);
17671796 if (IS_ERR(gpu->clk_reg))
1768
- gpu->clk_reg = NULL;
1797
+ return PTR_ERR(gpu->clk_reg);
17691798
1770
- gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
1799
+ gpu->clk_bus = devm_clk_get_optional(&pdev->dev, "bus");
17711800 DBG("clk_bus: %p", gpu->clk_bus);
17721801 if (IS_ERR(gpu->clk_bus))
1773
- gpu->clk_bus = NULL;
1802
+ return PTR_ERR(gpu->clk_bus);
17741803
17751804 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
17761805 DBG("clk_core: %p", gpu->clk_core);
17771806 if (IS_ERR(gpu->clk_core))
1778
- gpu->clk_core = NULL;
1807
+ return PTR_ERR(gpu->clk_core);
17791808 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
17801809
1781
- gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
1810
+ gpu->clk_shader = devm_clk_get_optional(&pdev->dev, "shader");
17821811 DBG("clk_shader: %p", gpu->clk_shader);
17831812 if (IS_ERR(gpu->clk_shader))
1784
- gpu->clk_shader = NULL;
1813
+ return PTR_ERR(gpu->clk_shader);
17851814 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
17861815
17871816 /* TODO: figure out max mapped size */
....@@ -1818,15 +1847,19 @@
18181847 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
18191848 u32 idle, mask;
18201849
1821
- /* If we have outstanding fences, we're not idle */
1822
- if (gpu->completed_fence != gpu->active_fence)
1850
+ /* If there are any jobs in the HW queue, we're not idle */
1851
+ if (atomic_read(&gpu->sched.hw_rq_count))
18231852 return -EBUSY;
18241853
1825
- /* Check whether the hardware (except FE) is idle */
1826
- mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
1854
+ /* Check whether the hardware (except FE and MC) is idle */
1855
+ mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
1856
+ VIVS_HI_IDLE_STATE_MC);
18271857 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1828
- if (idle != mask)
1858
+ if (idle != mask) {
1859
+ dev_warn_ratelimited(dev, "GPU not yet idle, mask: 0x%08x\n",
1860
+ idle);
18291861 return -EBUSY;
1862
+ }
18301863
18311864 return etnaviv_gpu_hw_suspend(gpu);
18321865 }
....@@ -1841,7 +1874,7 @@
18411874 return ret;
18421875
18431876 /* Re-initialise the basic hardware state */
1844
- if (gpu->drm && gpu->buffer.suballoc) {
1877
+ if (gpu->drm && gpu->initialized) {
18451878 ret = etnaviv_gpu_hw_resume(gpu);
18461879 if (ret) {
18471880 etnaviv_gpu_clk_disable(gpu);