forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
....@@ -4,15 +4,16 @@
44 #ifndef _A6XX_GMU_H_
55 #define _A6XX_GMU_H_
66
7
+#include <linux/iopoll.h>
78 #include <linux/interrupt.h>
89 #include "msm_drv.h"
910 #include "a6xx_hfi.h"
1011
1112 struct a6xx_gmu_bo {
13
+ struct drm_gem_object *obj;
1214 void *virt;
1315 size_t size;
1416 u64 iova;
15
- struct page **pages;
1617 };
1718
1819 /*
....@@ -25,9 +26,6 @@
2526
2627 /* the GMU is coming up for the first time or back from a power collapse */
2728 #define GMU_COLD_BOOT 1
28
-
29
-/* The GMU is being soft reset after a fault */
30
-#define GMU_RESET 2
3129
3230 /*
3331 * These define the level of control that the GMU has - the higher the number
....@@ -46,25 +44,31 @@
4644 struct a6xx_gmu {
4745 struct device *dev;
4846
47
+ struct msm_gem_address_space *aspace;
48
+
4949 void * __iomem mmio;
50
- void * __iomem pdc_mmio;
50
+ void * __iomem rscc;
5151
5252 int hfi_irq;
5353 int gmu_irq;
5454
55
- struct regulator *gx;
56
-
57
- struct iommu_domain *domain;
58
- u64 uncached_iova_base;
55
+ struct device *gxpd;
5956
6057 int idle_level;
6158
62
- struct a6xx_gmu_bo *hfi;
63
- struct a6xx_gmu_bo *debug;
59
+ struct a6xx_gmu_bo hfi;
60
+ struct a6xx_gmu_bo debug;
61
+ struct a6xx_gmu_bo icache;
62
+ struct a6xx_gmu_bo dcache;
63
+ struct a6xx_gmu_bo dummy;
64
+ struct a6xx_gmu_bo log;
6465
6566 int nr_clocks;
6667 struct clk_bulk_data *clocks;
6768 struct clk *core_clk;
69
+
70
+ /* current performance index set externally */
71
+ int current_perf_index;
6872
6973 int nr_gpu_freqs;
7074 unsigned long gpu_freqs[16];
....@@ -74,9 +78,13 @@
7478 unsigned long gmu_freqs[4];
7579 u32 cx_arc_votes[4];
7680
81
+ unsigned long freq;
82
+
7783 struct a6xx_hfi_queue queues[2];
7884
79
- struct tasklet_struct hfi_tasklet;
85
+ bool initialized;
86
+ bool hung;
87
+ bool legacy; /* a618 or a630 */
8088 };
8189
8290 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
....@@ -89,9 +97,11 @@
8997 return msm_writel(value, gmu->mmio + (offset << 2));
9098 }
9199
92
-static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
100
+static inline void
101
+gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
93102 {
94
- return msm_writel(value, gmu->pdc_mmio + (offset << 2));
103
+ memcpy_toio(gmu->mmio + (offset << 2), data, size);
104
+ wmb();
95105 }
96106
97107 static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
....@@ -103,8 +113,32 @@
103113 gmu_write(gmu, reg, val | or);
104114 }
105115
116
+static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
117
+{
118
+ u64 val;
119
+
120
+ val = (u64) msm_readl(gmu->mmio + (lo << 2));
121
+ val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
122
+
123
+ return val;
124
+}
125
+
106126 #define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
107127 readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
128
+ interval, timeout)
129
+
130
+static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
131
+{
132
+ return msm_readl(gmu->rscc + (offset << 2));
133
+}
134
+
135
+static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
136
+{
137
+ return msm_writel(value, gmu->rscc + (offset << 2));
138
+}
139
+
140
+#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
141
+ readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
108142 interval, timeout)
109143
110144 /*
....@@ -122,6 +156,7 @@
122156 GMU_OOB_BOOT_SLUMBER = 0,
123157 GMU_OOB_GPU_SET,
124158 GMU_OOB_DCVS_SET,
159
+ GMU_OOB_PERFCOUNTER_SET,
125160 };
126161
127162 /* These are the interrupt / ack bits for each OOB request that are set
....@@ -152,11 +187,25 @@
152187 #define GMU_OOB_GPU_SET_ACK 24
153188 #define GMU_OOB_GPU_SET_CLEAR 24
154189
190
+#define GMU_OOB_GPU_SET_REQUEST_NEW 30
191
+#define GMU_OOB_GPU_SET_ACK_NEW 31
192
+#define GMU_OOB_GPU_SET_CLEAR_NEW 31
193
+
194
+#define GMU_OOB_PERFCOUNTER_REQUEST 17
195
+#define GMU_OOB_PERFCOUNTER_ACK 25
196
+#define GMU_OOB_PERFCOUNTER_CLEAR 25
197
+
198
+#define GMU_OOB_PERFCOUNTER_REQUEST_NEW 28
199
+#define GMU_OOB_PERFCOUNTER_ACK_NEW 30
200
+#define GMU_OOB_PERFCOUNTER_CLEAR_NEW 30
155201
156202 void a6xx_hfi_init(struct a6xx_gmu *gmu);
157203 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
158204 void a6xx_hfi_stop(struct a6xx_gmu *gmu);
205
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
206
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
159207
160
-void a6xx_hfi_task(unsigned long data);
208
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
209
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
161210
162211 #endif