forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
....@@ -20,17 +20,23 @@
2020 * OTHER DEALINGS IN THE SOFTWARE.
2121 *
2222 */
23
+
24
+#include <linux/delay.h>
2325 #include <linux/kernel.h>
2426 #include <linux/firmware.h>
25
-#include <drm/drmP.h>
27
+#include <linux/module.h>
28
+#include <linux/pci.h>
29
+
2630 #include "amdgpu.h"
2731 #include "amdgpu_gfx.h"
2832 #include "soc15.h"
2933 #include "soc15d.h"
3034 #include "amdgpu_atomfirmware.h"
35
+#include "amdgpu_pm.h"
3136
3237 #include "gc/gc_9_0_offset.h"
3338 #include "gc/gc_9_0_sh_mask.h"
39
+
3440 #include "vega10_enum.h"
3541 #include "hdp/hdp_4_0_offset.h"
3642
....@@ -40,17 +46,21 @@
4046
4147 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
4248
49
+#include "amdgpu_ras.h"
50
+
51
+#include "gfx_v9_4.h"
52
+#include "gfx_v9_0.h"
53
+
54
+#include "asic_reg/pwr/pwr_10_0_offset.h"
55
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
56
+
4357 #define GFX9_NUM_GFX_RINGS 1
44
-#define GFX9_MEC_HPD_SIZE 2048
58
+#define GFX9_MEC_HPD_SIZE 4096
4559 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
4660 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
4761
48
-#define mmPWR_MISC_CNTL_STATUS 0x0183
49
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
50
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
51
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
52
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
53
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
62
+#define mmGCEA_PROBE_MAP 0x070c
63
+#define mmGCEA_PROBE_MAP_BASE_IDX 0
5464
5565 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
5666 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
....@@ -80,6 +90,425 @@
8090 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
8191 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
8292
93
+MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
94
+MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
95
+MODULE_FIRMWARE("amdgpu/picasso_me.bin");
96
+MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
97
+MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
98
+MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
99
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
100
+
101
+MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
102
+MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
103
+MODULE_FIRMWARE("amdgpu/raven2_me.bin");
104
+MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
105
+MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
106
+MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
107
+MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
108
+
109
+MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
110
+MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
111
+MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
112
+
113
+MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
114
+MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
115
+MODULE_FIRMWARE("amdgpu/renoir_me.bin");
116
+MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
117
+MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
118
+MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
119
+
120
+MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
121
+MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
122
+MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
123
+MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
124
+MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
125
+MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
126
+
127
+#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
128
+#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
129
+#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
130
+#define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX 0
131
+#define mmTCP_CHAN_STEER_2_ARCT 0x0b09
132
+#define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX 0
133
+#define mmTCP_CHAN_STEER_3_ARCT 0x0b0a
134
+#define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX 0
135
+#define mmTCP_CHAN_STEER_4_ARCT 0x0b0b
136
+#define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX 0
137
+#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
138
+#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
139
+
140
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
141
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
142
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
143
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
144
+
145
+enum ta_ras_gfx_subblock {
146
+ /*CPC*/
147
+ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
148
+ TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
149
+ TA_RAS_BLOCK__GFX_CPC_UCODE,
150
+ TA_RAS_BLOCK__GFX_DC_STATE_ME1,
151
+ TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
152
+ TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
153
+ TA_RAS_BLOCK__GFX_DC_STATE_ME2,
154
+ TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
155
+ TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
156
+ TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
157
+ /* CPF*/
158
+ TA_RAS_BLOCK__GFX_CPF_INDEX_START,
159
+ TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
160
+ TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
161
+ TA_RAS_BLOCK__GFX_CPF_TAG,
162
+ TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
163
+ /* CPG*/
164
+ TA_RAS_BLOCK__GFX_CPG_INDEX_START,
165
+ TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
166
+ TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
167
+ TA_RAS_BLOCK__GFX_CPG_TAG,
168
+ TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
169
+ /* GDS*/
170
+ TA_RAS_BLOCK__GFX_GDS_INDEX_START,
171
+ TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
172
+ TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
173
+ TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
174
+ TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
175
+ TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
176
+ TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
177
+ /* SPI*/
178
+ TA_RAS_BLOCK__GFX_SPI_SR_MEM,
179
+ /* SQ*/
180
+ TA_RAS_BLOCK__GFX_SQ_INDEX_START,
181
+ TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
182
+ TA_RAS_BLOCK__GFX_SQ_LDS_D,
183
+ TA_RAS_BLOCK__GFX_SQ_LDS_I,
184
+ TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
185
+ TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
186
+ /* SQC (3 ranges)*/
187
+ TA_RAS_BLOCK__GFX_SQC_INDEX_START,
188
+ /* SQC range 0*/
189
+ TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
190
+ TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
191
+ TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
192
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
193
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
194
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
195
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
196
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
197
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
198
+ TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
199
+ TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
200
+ /* SQC range 1*/
201
+ TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
202
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
203
+ TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
204
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
205
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
206
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
207
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
208
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
209
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
210
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
211
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
212
+ TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
213
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
214
+ /* SQC range 2*/
215
+ TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
216
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
217
+ TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
218
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
219
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
220
+ TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
221
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
222
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
223
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
224
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
225
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
226
+ TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
227
+ TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
228
+ TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
229
+ /* TA*/
230
+ TA_RAS_BLOCK__GFX_TA_INDEX_START,
231
+ TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
232
+ TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
233
+ TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
234
+ TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
235
+ TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
236
+ TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
237
+ /* TCA*/
238
+ TA_RAS_BLOCK__GFX_TCA_INDEX_START,
239
+ TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
240
+ TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
241
+ TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
242
+ /* TCC (5 sub-ranges)*/
243
+ TA_RAS_BLOCK__GFX_TCC_INDEX_START,
244
+ /* TCC range 0*/
245
+ TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
246
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
247
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
248
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
249
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
250
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
251
+ TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
252
+ TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
253
+ TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
254
+ TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
255
+ /* TCC range 1*/
256
+ TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
257
+ TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
258
+ TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
259
+ TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
260
+ TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
261
+ /* TCC range 2*/
262
+ TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
263
+ TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
264
+ TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
265
+ TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
266
+ TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
267
+ TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
268
+ TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
269
+ TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
270
+ TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
271
+ TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
272
+ TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
273
+ /* TCC range 3*/
274
+ TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
275
+ TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
276
+ TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
277
+ TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
278
+ TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
279
+ /* TCC range 4*/
280
+ TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
281
+ TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
282
+ TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
283
+ TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
284
+ TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
285
+ TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
286
+ TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
287
+ /* TCI*/
288
+ TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
289
+ /* TCP*/
290
+ TA_RAS_BLOCK__GFX_TCP_INDEX_START,
291
+ TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
292
+ TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
293
+ TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
294
+ TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
295
+ TA_RAS_BLOCK__GFX_TCP_DB_RAM,
296
+ TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
297
+ TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
298
+ TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
299
+ /* TD*/
300
+ TA_RAS_BLOCK__GFX_TD_INDEX_START,
301
+ TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
302
+ TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
303
+ TA_RAS_BLOCK__GFX_TD_CS_FIFO,
304
+ TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
305
+ /* EA (3 sub-ranges)*/
306
+ TA_RAS_BLOCK__GFX_EA_INDEX_START,
307
+ /* EA range 0*/
308
+ TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
309
+ TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
310
+ TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
311
+ TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
312
+ TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
313
+ TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
314
+ TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
315
+ TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
316
+ TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
317
+ TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
318
+ /* EA range 1*/
319
+ TA_RAS_BLOCK__GFX_EA_INDEX1_START,
320
+ TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
321
+ TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
322
+ TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
323
+ TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
324
+ TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
325
+ TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
326
+ TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
327
+ TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
328
+ /* EA range 2*/
329
+ TA_RAS_BLOCK__GFX_EA_INDEX2_START,
330
+ TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
331
+ TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
332
+ TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
333
+ TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
334
+ TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
335
+ TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
336
+ /* UTC VM L2 bank*/
337
+ TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
338
+ /* UTC VM walker*/
339
+ TA_RAS_BLOCK__UTC_VML2_WALKER,
340
+ /* UTC ATC L2 2MB cache*/
341
+ TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
342
+ /* UTC ATC L2 4KB cache*/
343
+ TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
344
+ TA_RAS_BLOCK__GFX_MAX
345
+};
346
+
347
+struct ras_gfx_subblock {
348
+ unsigned char *name;
349
+ int ta_subblock;
350
+ int hw_supported_error_type;
351
+ int sw_supported_error_type;
352
+};
353
+
354
+#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
355
+ [AMDGPU_RAS_BLOCK__##subblock] = { \
356
+ #subblock, \
357
+ TA_RAS_BLOCK__##subblock, \
358
+ ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
359
+ (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
360
+ }
361
+
362
+static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
363
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
364
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
365
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
366
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
367
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
368
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
369
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
370
+ AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
371
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
372
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
373
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
374
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
375
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
376
+ AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
377
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
378
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
379
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
380
+ 0),
381
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
382
+ 0),
383
+ AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
384
+ AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
385
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
386
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
387
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
388
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
389
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
390
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
391
+ 0, 0),
392
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
393
+ 0),
394
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
395
+ 0, 0),
396
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
397
+ 0),
398
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
399
+ 0, 0),
400
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
401
+ 0),
402
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
403
+ 1),
404
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
405
+ 0, 0, 0),
406
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
407
+ 0),
408
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
409
+ 0),
410
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
411
+ 0),
412
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
413
+ 0),
414
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
415
+ 0),
416
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
417
+ 0, 0),
418
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
419
+ 0),
420
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
421
+ 0),
422
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
423
+ 0, 0, 0),
424
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
425
+ 0),
426
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
427
+ 0),
428
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
429
+ 0),
430
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
431
+ 0),
432
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
433
+ 0),
434
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
435
+ 0, 0),
436
+ AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
437
+ 0),
438
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
439
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
440
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
441
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
442
+ AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
443
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
444
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
445
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
446
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
447
+ 1),
448
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
449
+ 1),
450
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
451
+ 1),
452
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
453
+ 0),
454
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
455
+ 0),
456
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
457
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
458
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
459
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
460
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
461
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
462
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
463
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
464
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
465
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
466
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
467
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
468
+ 0),
469
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
470
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
471
+ 0),
472
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
473
+ 0, 0),
474
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
475
+ 0),
476
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
477
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
478
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
479
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
480
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
481
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
482
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
483
+ AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
484
+ AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
485
+ AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
486
+ AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
487
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
488
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
489
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
490
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
491
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
492
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
493
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
494
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
495
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
496
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
497
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
498
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
499
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
500
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
501
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
502
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
503
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
504
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
505
+ AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
506
+ AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
507
+ AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
508
+ AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
509
+ AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
510
+};
511
+
83512 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
84513 {
85514 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
....@@ -91,14 +520,17 @@
91520 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
92521 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
93522 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
94
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
95
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
523
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
524
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
96525 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
97526 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
98527 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
99528 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
100529 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
101
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
530
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
531
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
532
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
533
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
102534 };
103535
104536 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
....@@ -160,7 +592,10 @@
160592 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
161593 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
162594 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
163
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
595
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
596
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
597
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
598
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
164599 };
165600
166601 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
....@@ -174,8 +609,48 @@
174609 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
175610 };
176611
612
+static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
613
+{
614
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
615
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
616
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
617
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
618
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
619
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
620
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
621
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
622
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
623
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
624
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
625
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
626
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
627
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
628
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
629
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
630
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
631
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
632
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
633
+};
634
+
635
+static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
636
+{
637
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
638
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
639
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
640
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
641
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
642
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
643
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
644
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
645
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
646
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
647
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
648
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
649
+};
650
+
177651 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
178652 {
653
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
179654 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
180655 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
181656 };
....@@ -211,7 +686,30 @@
211686 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
212687 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
213688 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
214
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
689
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
690
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
691
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
692
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
693
+};
694
+
695
+static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
696
+{
697
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
698
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
699
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
700
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
701
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
702
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
703
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
704
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
705
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
706
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
707
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
708
+};
709
+
710
+static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
711
+ {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
712
+ {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
215713 };
216714
217715 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
....@@ -238,9 +736,63 @@
238736 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
239737 };
240738
739
+static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
740
+{
741
+ static void *scratch_reg0;
742
+ static void *scratch_reg1;
743
+ static void *scratch_reg2;
744
+ static void *scratch_reg3;
745
+ static void *spare_int;
746
+ static uint32_t grbm_cntl;
747
+ static uint32_t grbm_idx;
748
+
749
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
750
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
751
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
752
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
753
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
754
+
755
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
756
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
757
+
758
+ if (amdgpu_sriov_runtime(adev)) {
759
+ pr_err("shouldn't call rlcg write register during runtime\n");
760
+ return;
761
+ }
762
+
763
+ if (offset == grbm_cntl || offset == grbm_idx) {
764
+ if (offset == grbm_cntl)
765
+ writel(v, scratch_reg2);
766
+ else if (offset == grbm_idx)
767
+ writel(v, scratch_reg3);
768
+
769
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
770
+ } else {
771
+ uint32_t i = 0;
772
+ uint32_t retries = 50000;
773
+
774
+ writel(v, scratch_reg0);
775
+ writel(offset | 0x80000000, scratch_reg1);
776
+ writel(1, spare_int);
777
+ for (i = 0; i < retries; i++) {
778
+ u32 tmp;
779
+
780
+ tmp = readl(scratch_reg1);
781
+ if (!(tmp & 0x80000000))
782
+ break;
783
+
784
+ udelay(10);
785
+ }
786
+ if (i >= retries)
787
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
788
+ }
789
+
790
+}
791
+
241792 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
242793 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
243794 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
795
+#define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
244796
245797 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
246798 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
....@@ -249,19 +801,152 @@
249801 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
250802 struct amdgpu_cu_info *cu_info);
251803 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
252
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
253804 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
805
+static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
806
+static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
807
+ void *ras_error_status);
808
+static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
809
+ void *inject_if);
810
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
811
+
812
+static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
813
+ uint64_t queue_mask)
814
+{
815
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
816
+ amdgpu_ring_write(kiq_ring,
817
+ PACKET3_SET_RESOURCES_VMID_MASK(0) |
818
+ /* vmid_mask:0* queue_type:0 (KIQ) */
819
+ PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
820
+ amdgpu_ring_write(kiq_ring,
821
+ lower_32_bits(queue_mask)); /* queue mask lo */
822
+ amdgpu_ring_write(kiq_ring,
823
+ upper_32_bits(queue_mask)); /* queue mask hi */
824
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
825
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
826
+ amdgpu_ring_write(kiq_ring, 0); /* oac mask */
827
+ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
828
+}
829
+
830
+static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
831
+ struct amdgpu_ring *ring)
832
+{
833
+ struct amdgpu_device *adev = kiq_ring->adev;
834
+ uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
835
+ uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
836
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
837
+
838
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
839
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
840
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
841
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
842
+ PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
843
+ PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
844
+ PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
845
+ PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
846
+ /*queue_type: normal compute queue */
847
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
848
+ /* alloc format: all_on_one_pipe */
849
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
850
+ PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
851
+ /* num_queues: must be 1 */
852
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1));
853
+ amdgpu_ring_write(kiq_ring,
854
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
855
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
856
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
857
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
858
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
859
+}
860
+
861
+static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
862
+ struct amdgpu_ring *ring,
863
+ enum amdgpu_unmap_queues_action action,
864
+ u64 gpu_addr, u64 seq)
865
+{
866
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
867
+
868
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
869
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
870
+ PACKET3_UNMAP_QUEUES_ACTION(action) |
871
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
872
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
873
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
874
+ amdgpu_ring_write(kiq_ring,
875
+ PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
876
+
877
+ if (action == PREEMPT_QUEUES_NO_UNMAP) {
878
+ amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
879
+ amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
880
+ amdgpu_ring_write(kiq_ring, seq);
881
+ } else {
882
+ amdgpu_ring_write(kiq_ring, 0);
883
+ amdgpu_ring_write(kiq_ring, 0);
884
+ amdgpu_ring_write(kiq_ring, 0);
885
+ }
886
+}
887
+
888
+static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
889
+ struct amdgpu_ring *ring,
890
+ u64 addr,
891
+ u64 seq)
892
+{
893
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
894
+
895
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
896
+ amdgpu_ring_write(kiq_ring,
897
+ PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
898
+ PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
899
+ PACKET3_QUERY_STATUS_COMMAND(2));
900
+ /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
901
+ amdgpu_ring_write(kiq_ring,
902
+ PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
903
+ PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
904
+ amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
905
+ amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
906
+ amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
907
+ amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
908
+}
909
+
910
+static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
911
+ uint16_t pasid, uint32_t flush_type,
912
+ bool all_hub)
913
+{
914
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
915
+ amdgpu_ring_write(kiq_ring,
916
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
917
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
918
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
919
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
920
+}
921
+
922
+static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
923
+ .kiq_set_resources = gfx_v9_0_kiq_set_resources,
924
+ .kiq_map_queues = gfx_v9_0_kiq_map_queues,
925
+ .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
926
+ .kiq_query_status = gfx_v9_0_kiq_query_status,
927
+ .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
928
+ .set_resources_size = 8,
929
+ .map_queues_size = 7,
930
+ .unmap_queues_size = 6,
931
+ .query_status_size = 7,
932
+ .invalidate_tlbs_size = 2,
933
+};
934
+
935
+static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
936
+{
937
+ adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
938
+}
254939
255940 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
256941 {
257942 switch (adev->asic_type) {
258943 case CHIP_VEGA10:
259944 soc15_program_register_sequence(adev,
260
- golden_settings_gc_9_0,
261
- ARRAY_SIZE(golden_settings_gc_9_0));
945
+ golden_settings_gc_9_0,
946
+ ARRAY_SIZE(golden_settings_gc_9_0));
262947 soc15_program_register_sequence(adev,
263
- golden_settings_gc_9_0_vg10,
264
- ARRAY_SIZE(golden_settings_gc_9_0_vg10));
948
+ golden_settings_gc_9_0_vg10,
949
+ ARRAY_SIZE(golden_settings_gc_9_0_vg10));
265950 break;
266951 case CHIP_VEGA12:
267952 soc15_program_register_sequence(adev,
....@@ -279,20 +964,35 @@
279964 golden_settings_gc_9_0_vg20,
280965 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
281966 break;
282
- case CHIP_RAVEN:
967
+ case CHIP_ARCTURUS:
283968 soc15_program_register_sequence(adev,
284
- golden_settings_gc_9_1,
285
- ARRAY_SIZE(golden_settings_gc_9_1));
286
- soc15_program_register_sequence(adev,
287
- golden_settings_gc_9_1_rv1,
288
- ARRAY_SIZE(golden_settings_gc_9_1_rv1));
969
+ golden_settings_gc_9_4_1_arct,
970
+ ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
289971 break;
972
+ case CHIP_RAVEN:
973
+ soc15_program_register_sequence(adev, golden_settings_gc_9_1,
974
+ ARRAY_SIZE(golden_settings_gc_9_1));
975
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
976
+ soc15_program_register_sequence(adev,
977
+ golden_settings_gc_9_1_rv2,
978
+ ARRAY_SIZE(golden_settings_gc_9_1_rv2));
979
+ else
980
+ soc15_program_register_sequence(adev,
981
+ golden_settings_gc_9_1_rv1,
982
+ ARRAY_SIZE(golden_settings_gc_9_1_rv1));
983
+ break;
984
+ case CHIP_RENOIR:
985
+ soc15_program_register_sequence(adev,
986
+ golden_settings_gc_9_1_rn,
987
+ ARRAY_SIZE(golden_settings_gc_9_1_rn));
988
+ return; /* for renoir, don't need common goldensetting */
290989 default:
291990 break;
292991 }
293992
294
- soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
295
- (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
993
+ if (adev->asic_type != CHIP_ARCTURUS)
994
+ soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
995
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
296996 }
297997
298998 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
....@@ -345,18 +1045,14 @@
3451045 int r;
3461046
3471047 r = amdgpu_gfx_scratch_get(adev, &scratch);
348
- if (r) {
349
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
1048
+ if (r)
3501049 return r;
351
- }
1050
+
3521051 WREG32(scratch, 0xCAFEDEAD);
3531052 r = amdgpu_ring_alloc(ring, 3);
354
- if (r) {
355
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
356
- ring->idx, r);
357
- amdgpu_gfx_scratch_free(adev, scratch);
358
- return r;
359
- }
1053
+ if (r)
1054
+ goto error_free_scratch;
1055
+
3601056 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3611057 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
3621058 amdgpu_ring_write(ring, 0xDEADBEEF);
....@@ -366,16 +1062,13 @@
3661062 tmp = RREG32(scratch);
3671063 if (tmp == 0xDEADBEEF)
3681064 break;
369
- DRM_UDELAY(1);
1065
+ udelay(1);
3701066 }
371
- if (i < adev->usec_timeout) {
372
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
373
- ring->idx, i);
374
- } else {
375
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
376
- ring->idx, scratch, tmp);
377
- r = -EINVAL;
378
- }
1067
+
1068
+ if (i >= adev->usec_timeout)
1069
+ r = -ETIMEDOUT;
1070
+
1071
+error_free_scratch:
3791072 amdgpu_gfx_scratch_free(adev, scratch);
3801073 return r;
3811074 }
....@@ -392,19 +1085,17 @@
3921085 long r;
3931086
3941087 r = amdgpu_device_wb_get(adev, &index);
395
- if (r) {
396
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
1088
+ if (r)
3971089 return r;
398
- }
3991090
4001091 gpu_addr = adev->wb.gpu_addr + (index * 4);
4011092 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
4021093 memset(&ib, 0, sizeof(ib));
403
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
404
- if (r) {
405
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1094
+ r = amdgpu_ib_get(adev, NULL, 16,
1095
+ AMDGPU_IB_POOL_DIRECT, &ib);
1096
+ if (r)
4061097 goto err1;
407
- }
1098
+
4081099 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
4091100 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
4101101 ib.ptr[2] = lower_32_bits(gpu_addr);
....@@ -418,22 +1109,17 @@
4181109
4191110 r = dma_fence_wait_timeout(f, false, timeout);
4201111 if (r == 0) {
421
- DRM_ERROR("amdgpu: IB test timed out.\n");
422
- r = -ETIMEDOUT;
423
- goto err2;
1112
+ r = -ETIMEDOUT;
1113
+ goto err2;
4241114 } else if (r < 0) {
425
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
426
- goto err2;
1115
+ goto err2;
4271116 }
4281117
4291118 tmp = adev->wb.wb[index];
430
- if (tmp == 0xDEADBEEF) {
431
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
432
- r = 0;
433
- } else {
434
- DRM_ERROR("ib test on ring %d failed\n", ring->idx);
435
- r = -EINVAL;
436
- }
1119
+ if (tmp == 0xDEADBEEF)
1120
+ r = 0;
1121
+ else
1122
+ r = -EINVAL;
4371123
4381124 err2:
4391125 amdgpu_ib_free(adev, &ib, NULL);
....@@ -483,38 +1169,168 @@
4831169 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
4841170 }
4851171
486
-static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1172
+static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
4871173 {
488
- const char *chip_name;
1174
+ adev->gfx.me_fw_write_wait = false;
1175
+ adev->gfx.mec_fw_write_wait = false;
1176
+
1177
+ if ((adev->asic_type != CHIP_ARCTURUS) &&
1178
+ ((adev->gfx.mec_fw_version < 0x000001a5) ||
1179
+ (adev->gfx.mec_feature_version < 46) ||
1180
+ (adev->gfx.pfp_fw_version < 0x000000b7) ||
1181
+ (adev->gfx.pfp_feature_version < 46)))
1182
+ DRM_WARN_ONCE("CP firmware version too old, please update!");
1183
+
1184
+ switch (adev->asic_type) {
1185
+ case CHIP_VEGA10:
1186
+ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1187
+ (adev->gfx.me_feature_version >= 42) &&
1188
+ (adev->gfx.pfp_fw_version >= 0x000000b1) &&
1189
+ (adev->gfx.pfp_feature_version >= 42))
1190
+ adev->gfx.me_fw_write_wait = true;
1191
+
1192
+ if ((adev->gfx.mec_fw_version >= 0x00000193) &&
1193
+ (adev->gfx.mec_feature_version >= 42))
1194
+ adev->gfx.mec_fw_write_wait = true;
1195
+ break;
1196
+ case CHIP_VEGA12:
1197
+ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1198
+ (adev->gfx.me_feature_version >= 44) &&
1199
+ (adev->gfx.pfp_fw_version >= 0x000000b2) &&
1200
+ (adev->gfx.pfp_feature_version >= 44))
1201
+ adev->gfx.me_fw_write_wait = true;
1202
+
1203
+ if ((adev->gfx.mec_fw_version >= 0x00000196) &&
1204
+ (adev->gfx.mec_feature_version >= 44))
1205
+ adev->gfx.mec_fw_write_wait = true;
1206
+ break;
1207
+ case CHIP_VEGA20:
1208
+ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1209
+ (adev->gfx.me_feature_version >= 44) &&
1210
+ (adev->gfx.pfp_fw_version >= 0x000000b2) &&
1211
+ (adev->gfx.pfp_feature_version >= 44))
1212
+ adev->gfx.me_fw_write_wait = true;
1213
+
1214
+ if ((adev->gfx.mec_fw_version >= 0x00000197) &&
1215
+ (adev->gfx.mec_feature_version >= 44))
1216
+ adev->gfx.mec_fw_write_wait = true;
1217
+ break;
1218
+ case CHIP_RAVEN:
1219
+ if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1220
+ (adev->gfx.me_feature_version >= 42) &&
1221
+ (adev->gfx.pfp_fw_version >= 0x000000b1) &&
1222
+ (adev->gfx.pfp_feature_version >= 42))
1223
+ adev->gfx.me_fw_write_wait = true;
1224
+
1225
+ if ((adev->gfx.mec_fw_version >= 0x00000192) &&
1226
+ (adev->gfx.mec_feature_version >= 42))
1227
+ adev->gfx.mec_fw_write_wait = true;
1228
+ break;
1229
+ default:
1230
+ adev->gfx.me_fw_write_wait = true;
1231
+ adev->gfx.mec_fw_write_wait = true;
1232
+ break;
1233
+ }
1234
+}
1235
+
1236
+struct amdgpu_gfxoff_quirk {
1237
+ u16 chip_vendor;
1238
+ u16 chip_device;
1239
+ u16 subsys_vendor;
1240
+ u16 subsys_device;
1241
+ u8 revision;
1242
+};
1243
+
1244
+static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1245
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1246
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1247
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1248
+ { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1249
+ /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1250
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1251
+ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
1252
+ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
1253
+ { 0, 0, 0, 0, 0 },
1254
+};
1255
+
1256
+static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1257
+{
1258
+ const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1259
+
1260
+ while (p && p->chip_device != 0) {
1261
+ if (pdev->vendor == p->chip_vendor &&
1262
+ pdev->device == p->chip_device &&
1263
+ pdev->subsystem_vendor == p->subsys_vendor &&
1264
+ pdev->subsystem_device == p->subsys_device &&
1265
+ pdev->revision == p->revision) {
1266
+ return true;
1267
+ }
1268
+ ++p;
1269
+ }
1270
+ return false;
1271
+}
1272
+
1273
+static bool is_raven_kicker(struct amdgpu_device *adev)
1274
+{
1275
+ if (adev->pm.fw_version >= 0x41e2b)
1276
+ return true;
1277
+ else
1278
+ return false;
1279
+}
1280
+
1281
+static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
1282
+{
1283
+ if ((adev->asic_type == CHIP_RENOIR) &&
1284
+ (adev->gfx.me_fw_version >= 0x000000a5) &&
1285
+ (adev->gfx.me_feature_version >= 52))
1286
+ return true;
1287
+ else
1288
+ return false;
1289
+}
1290
+
1291
+static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1292
+{
1293
+ if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1294
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1295
+
1296
+ switch (adev->asic_type) {
1297
+ case CHIP_VEGA10:
1298
+ case CHIP_VEGA12:
1299
+ case CHIP_VEGA20:
1300
+ break;
1301
+ case CHIP_RAVEN:
1302
+ if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1303
+ (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1304
+ ((!is_raven_kicker(adev) &&
1305
+ adev->gfx.rlc_fw_version < 531) ||
1306
+ (adev->gfx.rlc_feature_version < 1) ||
1307
+ !adev->gfx.rlc.is_rlc_v2_1))
1308
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1309
+
1310
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1311
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1312
+ AMD_PG_SUPPORT_CP |
1313
+ AMD_PG_SUPPORT_RLC_SMU_HS;
1314
+ break;
1315
+ case CHIP_RENOIR:
1316
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1317
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1318
+ AMD_PG_SUPPORT_CP |
1319
+ AMD_PG_SUPPORT_RLC_SMU_HS;
1320
+ break;
1321
+ default:
1322
+ break;
1323
+ }
1324
+}
1325
+
1326
+static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1327
+ const char *chip_name)
1328
+{
4891329 char fw_name[30];
4901330 int err;
4911331 struct amdgpu_firmware_info *info = NULL;
4921332 const struct common_firmware_header *header = NULL;
4931333 const struct gfx_firmware_header_v1_0 *cp_hdr;
494
- const struct rlc_firmware_header_v2_0 *rlc_hdr;
495
- unsigned int *tmp = NULL;
496
- unsigned int i = 0;
497
- uint16_t version_major;
498
- uint16_t version_minor;
499
-
500
- DRM_DEBUG("\n");
501
-
502
- switch (adev->asic_type) {
503
- case CHIP_VEGA10:
504
- chip_name = "vega10";
505
- break;
506
- case CHIP_VEGA12:
507
- chip_name = "vega12";
508
- break;
509
- case CHIP_VEGA20:
510
- chip_name = "vega20";
511
- break;
512
- case CHIP_RAVEN:
513
- chip_name = "raven";
514
- break;
515
- default:
516
- BUG();
517
- }
5181334
5191335 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
5201336 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
....@@ -549,7 +1365,78 @@
5491365 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
5501366 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
5511367
552
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1368
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1369
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1370
+ info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1371
+ info->fw = adev->gfx.pfp_fw;
1372
+ header = (const struct common_firmware_header *)info->fw->data;
1373
+ adev->firmware.fw_size +=
1374
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1375
+
1376
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1377
+ info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1378
+ info->fw = adev->gfx.me_fw;
1379
+ header = (const struct common_firmware_header *)info->fw->data;
1380
+ adev->firmware.fw_size +=
1381
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1382
+
1383
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1384
+ info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1385
+ info->fw = adev->gfx.ce_fw;
1386
+ header = (const struct common_firmware_header *)info->fw->data;
1387
+ adev->firmware.fw_size +=
1388
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1389
+ }
1390
+
1391
+out:
1392
+ if (err) {
1393
+ dev_err(adev->dev,
1394
+ "gfx9: Failed to load firmware \"%s\"\n",
1395
+ fw_name);
1396
+ release_firmware(adev->gfx.pfp_fw);
1397
+ adev->gfx.pfp_fw = NULL;
1398
+ release_firmware(adev->gfx.me_fw);
1399
+ adev->gfx.me_fw = NULL;
1400
+ release_firmware(adev->gfx.ce_fw);
1401
+ adev->gfx.ce_fw = NULL;
1402
+ }
1403
+ return err;
1404
+}
1405
+
1406
+static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1407
+ const char *chip_name)
1408
+{
1409
+ char fw_name[30];
1410
+ int err;
1411
+ struct amdgpu_firmware_info *info = NULL;
1412
+ const struct common_firmware_header *header = NULL;
1413
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
1414
+ unsigned int *tmp = NULL;
1415
+ unsigned int i = 0;
1416
+ uint16_t version_major;
1417
+ uint16_t version_minor;
1418
+ uint32_t smu_version;
1419
+
1420
+ /*
1421
+ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1422
+ * instead of picasso_rlc.bin.
1423
+ * Judgment method:
1424
+ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1425
+ * or revision >= 0xD8 && revision <= 0xDF
1426
+ * otherwise is PCO FP5
1427
+ */
1428
+ if (!strcmp(chip_name, "picasso") &&
1429
+ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1430
+ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1431
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1432
+ else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1433
+ (smu_version >= 0x41e2b))
1434
+ /**
1435
+ *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1436
+ */
1437
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1438
+ else
1439
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
5531440 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
5541441 if (err)
5551442 goto out;
....@@ -591,18 +1478,70 @@
5911478
5921479 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
5931480 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
594
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
1481
+ for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
5951482 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
5961483
5971484 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
5981485
5991486 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
6001487 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
601
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
1488
+ for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
6021489 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
6031490
6041491 if (adev->gfx.rlc.is_rlc_v2_1)
6051492 gfx_v9_0_init_rlc_ext_microcode(adev);
1493
+
1494
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1495
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1496
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1497
+ info->fw = adev->gfx.rlc_fw;
1498
+ header = (const struct common_firmware_header *)info->fw->data;
1499
+ adev->firmware.fw_size +=
1500
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1501
+
1502
+ if (adev->gfx.rlc.is_rlc_v2_1 &&
1503
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1504
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1505
+ adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1506
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1507
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1508
+ info->fw = adev->gfx.rlc_fw;
1509
+ adev->firmware.fw_size +=
1510
+ ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1511
+
1512
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1513
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1514
+ info->fw = adev->gfx.rlc_fw;
1515
+ adev->firmware.fw_size +=
1516
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1517
+
1518
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1519
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1520
+ info->fw = adev->gfx.rlc_fw;
1521
+ adev->firmware.fw_size +=
1522
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1523
+ }
1524
+ }
1525
+
1526
+out:
1527
+ if (err) {
1528
+ dev_err(adev->dev,
1529
+ "gfx9: Failed to load firmware \"%s\"\n",
1530
+ fw_name);
1531
+ release_firmware(adev->gfx.rlc_fw);
1532
+ adev->gfx.rlc_fw = NULL;
1533
+ }
1534
+ return err;
1535
+}
1536
+
1537
+static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1538
+ const char *chip_name)
1539
+{
1540
+ char fw_name[30];
1541
+ int err;
1542
+ struct amdgpu_firmware_info *info = NULL;
1543
+ const struct common_firmware_header *header = NULL;
1544
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
6061545
6071546 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
6081547 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
....@@ -634,57 +1573,6 @@
6341573 }
6351574
6361575 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
637
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
638
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
639
- info->fw = adev->gfx.pfp_fw;
640
- header = (const struct common_firmware_header *)info->fw->data;
641
- adev->firmware.fw_size +=
642
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
643
-
644
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
645
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
646
- info->fw = adev->gfx.me_fw;
647
- header = (const struct common_firmware_header *)info->fw->data;
648
- adev->firmware.fw_size +=
649
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
650
-
651
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
652
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
653
- info->fw = adev->gfx.ce_fw;
654
- header = (const struct common_firmware_header *)info->fw->data;
655
- adev->firmware.fw_size +=
656
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
657
-
658
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
659
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
660
- info->fw = adev->gfx.rlc_fw;
661
- header = (const struct common_firmware_header *)info->fw->data;
662
- adev->firmware.fw_size +=
663
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
664
-
665
- if (adev->gfx.rlc.is_rlc_v2_1 &&
666
- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
667
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
668
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
669
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
670
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
671
- info->fw = adev->gfx.rlc_fw;
672
- adev->firmware.fw_size +=
673
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
674
-
675
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
676
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
677
- info->fw = adev->gfx.rlc_fw;
678
- adev->firmware.fw_size +=
679
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
680
-
681
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
682
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
683
- info->fw = adev->gfx.rlc_fw;
684
- adev->firmware.fw_size +=
685
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
686
- }
687
-
6881576 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
6891577 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
6901578 info->fw = adev->gfx.mec_fw;
....@@ -707,34 +1595,90 @@
7071595 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
7081596 adev->firmware.fw_size +=
7091597 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
710
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
711
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
712
- info->fw = adev->gfx.mec2_fw;
713
- adev->firmware.fw_size +=
714
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
715
- }
7161598
1599
+ /* TODO: Determine if MEC2 JT FW loading can be removed
1600
+ for all GFX V9 asic and above */
1601
+ if (adev->asic_type != CHIP_ARCTURUS &&
1602
+ adev->asic_type != CHIP_RENOIR) {
1603
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1604
+ info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1605
+ info->fw = adev->gfx.mec2_fw;
1606
+ adev->firmware.fw_size +=
1607
+ ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1608
+ PAGE_SIZE);
1609
+ }
1610
+ }
7171611 }
7181612
7191613 out:
1614
+ gfx_v9_0_check_if_need_gfxoff(adev);
1615
+ gfx_v9_0_check_fw_write_wait(adev);
7201616 if (err) {
7211617 dev_err(adev->dev,
7221618 "gfx9: Failed to load firmware \"%s\"\n",
7231619 fw_name);
724
- release_firmware(adev->gfx.pfp_fw);
725
- adev->gfx.pfp_fw = NULL;
726
- release_firmware(adev->gfx.me_fw);
727
- adev->gfx.me_fw = NULL;
728
- release_firmware(adev->gfx.ce_fw);
729
- adev->gfx.ce_fw = NULL;
730
- release_firmware(adev->gfx.rlc_fw);
731
- adev->gfx.rlc_fw = NULL;
7321620 release_firmware(adev->gfx.mec_fw);
7331621 adev->gfx.mec_fw = NULL;
7341622 release_firmware(adev->gfx.mec2_fw);
7351623 adev->gfx.mec2_fw = NULL;
7361624 }
7371625 return err;
1626
+}
1627
+
1628
+static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1629
+{
1630
+ const char *chip_name;
1631
+ int r;
1632
+
1633
+ DRM_DEBUG("\n");
1634
+
1635
+ switch (adev->asic_type) {
1636
+ case CHIP_VEGA10:
1637
+ chip_name = "vega10";
1638
+ break;
1639
+ case CHIP_VEGA12:
1640
+ chip_name = "vega12";
1641
+ break;
1642
+ case CHIP_VEGA20:
1643
+ chip_name = "vega20";
1644
+ break;
1645
+ case CHIP_RAVEN:
1646
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1647
+ chip_name = "raven2";
1648
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1649
+ chip_name = "picasso";
1650
+ else
1651
+ chip_name = "raven";
1652
+ break;
1653
+ case CHIP_ARCTURUS:
1654
+ chip_name = "arcturus";
1655
+ break;
1656
+ case CHIP_RENOIR:
1657
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
1658
+ chip_name = "renoir";
1659
+ else
1660
+ chip_name = "green_sardine";
1661
+ break;
1662
+ default:
1663
+ BUG();
1664
+ }
1665
+
1666
+ /* No CPG in Arcturus */
1667
+ if (adev->asic_type != CHIP_ARCTURUS) {
1668
+ r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1669
+ if (r)
1670
+ return r;
1671
+ }
1672
+
1673
+ r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1674
+ if (r)
1675
+ return r;
1676
+
1677
+ r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1678
+ if (r)
1679
+ return r;
1680
+
1681
+ return r;
7381682 }
7391683
7401684 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
....@@ -806,6 +1750,50 @@
8061750 buffer[count++] = cpu_to_le32(0);
8071751 }
8081752
1753
+static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1754
+{
1755
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1756
+ uint32_t pg_always_on_cu_num = 2;
1757
+ uint32_t always_on_cu_num;
1758
+ uint32_t i, j, k;
1759
+ uint32_t mask, cu_bitmap, counter;
1760
+
1761
+ if (adev->flags & AMD_IS_APU)
1762
+ always_on_cu_num = 4;
1763
+ else if (adev->asic_type == CHIP_VEGA12)
1764
+ always_on_cu_num = 8;
1765
+ else
1766
+ always_on_cu_num = 12;
1767
+
1768
+ mutex_lock(&adev->grbm_idx_mutex);
1769
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1770
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1771
+ mask = 1;
1772
+ cu_bitmap = 0;
1773
+ counter = 0;
1774
+ gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1775
+
1776
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1777
+ if (cu_info->bitmap[i][j] & mask) {
1778
+ if (counter == pg_always_on_cu_num)
1779
+ WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1780
+ if (counter < always_on_cu_num)
1781
+ cu_bitmap |= mask;
1782
+ else
1783
+ break;
1784
+ counter++;
1785
+ }
1786
+ mask <<= 1;
1787
+ }
1788
+
1789
+ WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1790
+ cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1791
+ }
1792
+ }
1793
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1794
+ mutex_unlock(&adev->grbm_idx_mutex);
1795
+}
1796
+
8091797 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
8101798 {
8111799 uint32_t data;
....@@ -839,8 +1827,10 @@
8391827 data |= 0x00C00000;
8401828 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
8411829
842
- /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
843
- WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
1830
+ /*
1831
+ * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1832
+ * programmed in gfx_v9_0_init_always_on_cu_mask()
1833
+ */
8441834
8451835 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
8461836 * but used for RLC_LB_CNTL configuration */
....@@ -849,6 +1839,57 @@
8491839 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
8501840 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
8511841 mutex_unlock(&adev->grbm_idx_mutex);
1842
+
1843
+ gfx_v9_0_init_always_on_cu_mask(adev);
1844
+}
1845
+
1846
+static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1847
+{
1848
+ uint32_t data;
1849
+
1850
+ /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1851
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1852
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1853
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1854
+ WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1855
+
1856
+ /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1857
+ WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1858
+
1859
+ /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1860
+ WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1861
+
1862
+ mutex_lock(&adev->grbm_idx_mutex);
1863
+ /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1864
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1865
+ WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1866
+
1867
+ /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1868
+ data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1869
+ data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1870
+ data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1871
+ WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1872
+
1873
+ /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1874
+ data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1875
+ data &= 0x0000FFFF;
1876
+ data |= 0x00C00000;
1877
+ WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1878
+
1879
+ /*
1880
+ * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1881
+ * programmed in gfx_v9_0_init_always_on_cu_mask()
1882
+ */
1883
+
1884
+ /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1885
+ * but used for RLC_LB_CNTL configuration */
1886
+ data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1887
+ data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1888
+ data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1889
+ WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1890
+ mutex_unlock(&adev->grbm_idx_mutex);
1891
+
1892
+ gfx_v9_0_init_always_on_cu_mask(adev);
8521893 }
8531894
8541895 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
....@@ -856,85 +1897,13 @@
8561897 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
8571898 }
8581899
859
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
1900
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
8601901 {
861
- const __le32 *fw_data;
862
- volatile u32 *dst_ptr;
863
- int me, i, max_me = 5;
864
- u32 bo_offset = 0;
865
- u32 table_offset, table_size;
866
-
867
- /* write the cp table buffer */
868
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
869
- for (me = 0; me < max_me; me++) {
870
- if (me == 0) {
871
- const struct gfx_firmware_header_v1_0 *hdr =
872
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
873
- fw_data = (const __le32 *)
874
- (adev->gfx.ce_fw->data +
875
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
876
- table_offset = le32_to_cpu(hdr->jt_offset);
877
- table_size = le32_to_cpu(hdr->jt_size);
878
- } else if (me == 1) {
879
- const struct gfx_firmware_header_v1_0 *hdr =
880
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
881
- fw_data = (const __le32 *)
882
- (adev->gfx.pfp_fw->data +
883
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
884
- table_offset = le32_to_cpu(hdr->jt_offset);
885
- table_size = le32_to_cpu(hdr->jt_size);
886
- } else if (me == 2) {
887
- const struct gfx_firmware_header_v1_0 *hdr =
888
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
889
- fw_data = (const __le32 *)
890
- (adev->gfx.me_fw->data +
891
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
892
- table_offset = le32_to_cpu(hdr->jt_offset);
893
- table_size = le32_to_cpu(hdr->jt_size);
894
- } else if (me == 3) {
895
- const struct gfx_firmware_header_v1_0 *hdr =
896
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
897
- fw_data = (const __le32 *)
898
- (adev->gfx.mec_fw->data +
899
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
900
- table_offset = le32_to_cpu(hdr->jt_offset);
901
- table_size = le32_to_cpu(hdr->jt_size);
902
- } else if (me == 4) {
903
- const struct gfx_firmware_header_v1_0 *hdr =
904
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
905
- fw_data = (const __le32 *)
906
- (adev->gfx.mec2_fw->data +
907
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
908
- table_offset = le32_to_cpu(hdr->jt_offset);
909
- table_size = le32_to_cpu(hdr->jt_size);
910
- }
911
-
912
- for (i = 0; i < table_size; i ++) {
913
- dst_ptr[bo_offset + i] =
914
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
915
- }
916
-
917
- bo_offset += table_size;
918
- }
919
-}
920
-
921
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
922
-{
923
- /* clear state block */
924
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
925
- &adev->gfx.rlc.clear_state_gpu_addr,
926
- (void **)&adev->gfx.rlc.cs_ptr);
927
-
928
- /* jump table block */
929
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
930
- &adev->gfx.rlc.cp_table_gpu_addr,
931
- (void **)&adev->gfx.rlc.cp_table_ptr);
1902
+ return 5;
9321903 }
9331904
9341905 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
9351906 {
936
- volatile u32 *dst_ptr;
937
- u32 dws;
9381907 const struct cs_section_def *cs_data;
9391908 int r;
9401909
....@@ -943,83 +1912,36 @@
9431912 cs_data = adev->gfx.rlc.cs_data;
9441913
9451914 if (cs_data) {
946
- /* clear state block */
947
- adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
948
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
949
- AMDGPU_GEM_DOMAIN_VRAM,
950
- &adev->gfx.rlc.clear_state_obj,
951
- &adev->gfx.rlc.clear_state_gpu_addr,
952
- (void **)&adev->gfx.rlc.cs_ptr);
953
- if (r) {
954
- dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
955
- r);
956
- gfx_v9_0_rlc_fini(adev);
1915
+ /* init clear state block */
1916
+ r = amdgpu_gfx_rlc_init_csb(adev);
1917
+ if (r)
9571918 return r;
958
- }
959
- /* set up the cs buffer */
960
- dst_ptr = adev->gfx.rlc.cs_ptr;
961
- gfx_v9_0_get_csb_buffer(adev, dst_ptr);
962
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
963
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
964
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
9651919 }
9661920
967
- if (adev->asic_type == CHIP_RAVEN) {
1921
+ if (adev->flags & AMD_IS_APU) {
9681922 /* TODO: double check the cp_table_size for RV */
9691923 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
970
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
971
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
972
- &adev->gfx.rlc.cp_table_obj,
973
- &adev->gfx.rlc.cp_table_gpu_addr,
974
- (void **)&adev->gfx.rlc.cp_table_ptr);
975
- if (r) {
976
- dev_err(adev->dev,
977
- "(%d) failed to create cp table bo\n", r);
978
- gfx_v9_0_rlc_fini(adev);
1924
+ r = amdgpu_gfx_rlc_init_cpt(adev);
1925
+ if (r)
9791926 return r;
980
- }
981
-
982
- rv_init_cp_jump_table(adev);
983
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
984
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
985
-
986
- gfx_v9_0_init_lbpw(adev);
9871927 }
1928
+
1929
+ switch (adev->asic_type) {
1930
+ case CHIP_RAVEN:
1931
+ gfx_v9_0_init_lbpw(adev);
1932
+ break;
1933
+ case CHIP_VEGA20:
1934
+ gfx_v9_4_init_lbpw(adev);
1935
+ break;
1936
+ default:
1937
+ break;
1938
+ }
1939
+
1940
+ /* init spm vmid with 0xf */
1941
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
1942
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
9881943
9891944 return 0;
990
-}
991
-
992
-static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
993
-{
994
- int r;
995
-
996
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
997
- if (unlikely(r != 0))
998
- return r;
999
-
1000
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
1001
- AMDGPU_GEM_DOMAIN_VRAM);
1002
- if (!r)
1003
- adev->gfx.rlc.clear_state_gpu_addr =
1004
- amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
1005
-
1006
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1007
-
1008
- return r;
1009
-}
1010
-
1011
-static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1012
-{
1013
- int r;
1014
-
1015
- if (!adev->gfx.rlc.clear_state_obj)
1016
- return;
1017
-
1018
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1019
- if (likely(r == 0)) {
1020
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1021
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1022
- }
10231945 }
10241946
10251947 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
....@@ -1044,29 +1966,30 @@
10441966 /* take ownership of the relevant compute queues */
10451967 amdgpu_gfx_compute_queue_acquire(adev);
10461968 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1969
+ if (mec_hpd_size) {
1970
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1971
+ AMDGPU_GEM_DOMAIN_VRAM,
1972
+ &adev->gfx.mec.hpd_eop_obj,
1973
+ &adev->gfx.mec.hpd_eop_gpu_addr,
1974
+ (void **)&hpd);
1975
+ if (r) {
1976
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1977
+ gfx_v9_0_mec_fini(adev);
1978
+ return r;
1979
+ }
10471980
1048
- r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1049
- AMDGPU_GEM_DOMAIN_GTT,
1050
- &adev->gfx.mec.hpd_eop_obj,
1051
- &adev->gfx.mec.hpd_eop_gpu_addr,
1052
- (void **)&hpd);
1053
- if (r) {
1054
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1055
- gfx_v9_0_mec_fini(adev);
1056
- return r;
1981
+ memset(hpd, 0, mec_hpd_size);
1982
+
1983
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1984
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
10571985 }
1058
-
1059
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1060
-
1061
- amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1062
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
10631986
10641987 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
10651988
10661989 fw_data = (const __le32 *)
10671990 (adev->gfx.mec_fw->data +
10681991 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1069
- fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1992
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
10701993
10711994 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
10721995 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
....@@ -1089,7 +2012,7 @@
10892012
10902013 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
10912014 {
1092
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
2015
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
10932016 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
10942017 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
10952018 (address << SQ_IND_INDEX__INDEX__SHIFT) |
....@@ -1101,7 +2024,7 @@
11012024 uint32_t wave, uint32_t thread,
11022025 uint32_t regno, uint32_t num, uint32_t *out)
11032026 {
1104
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
2027
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
11052028 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
11062029 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
11072030 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
....@@ -1152,9 +2075,9 @@
11522075 }
11532076
11542077 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1155
- u32 me, u32 pipe, u32 q)
2078
+ u32 me, u32 pipe, u32 q, u32 vm)
11562079 {
1157
- soc15_grbm_select(adev, me, pipe, q, 0);
2080
+ soc15_grbm_select(adev, me, pipe, q, vm);
11582081 }
11592082
11602083 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
....@@ -1163,7 +2086,23 @@
11632086 .read_wave_data = &gfx_v9_0_read_wave_data,
11642087 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
11652088 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1166
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
2089
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2090
+ .ras_error_inject = &gfx_v9_0_ras_error_inject,
2091
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2092
+ .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2093
+};
2094
+
2095
+static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
2096
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2097
+ .select_se_sh = &gfx_v9_0_select_se_sh,
2098
+ .read_wave_data = &gfx_v9_0_read_wave_data,
2099
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2100
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2101
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2102
+ .ras_error_inject = &gfx_v9_4_ras_error_inject,
2103
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
2104
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
2105
+ .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
11672106 };
11682107
11692108 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
....@@ -1211,7 +2150,31 @@
12112150 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
12122151 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
12132152 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1214
- gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2153
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2154
+ gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2155
+ else
2156
+ gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2157
+ break;
2158
+ case CHIP_ARCTURUS:
2159
+ adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
2160
+ adev->gfx.config.max_hw_contexts = 8;
2161
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2162
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2163
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2164
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2165
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2166
+ gb_addr_config &= ~0xf3e777ff;
2167
+ gb_addr_config |= 0x22014042;
2168
+ break;
2169
+ case CHIP_RENOIR:
2170
+ adev->gfx.config.max_hw_contexts = 8;
2171
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2172
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2173
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2174
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2175
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2176
+ gb_addr_config &= ~0xf3e777ff;
2177
+ gb_addr_config |= 0x22010042;
12152178 break;
12162179 default:
12172180 BUG();
....@@ -1258,198 +2221,12 @@
12582221 return 0;
12592222 }
12602223
1261
-static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1262
- struct amdgpu_ngg_buf *ngg_buf,
1263
- int size_se,
1264
- int default_size_se)
1265
-{
1266
- int r;
1267
-
1268
- if (size_se < 0) {
1269
- dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1270
- return -EINVAL;
1271
- }
1272
- size_se = size_se ? size_se : default_size_se;
1273
-
1274
- ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1275
- r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1276
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1277
- &ngg_buf->bo,
1278
- &ngg_buf->gpu_addr,
1279
- NULL);
1280
- if (r) {
1281
- dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1282
- return r;
1283
- }
1284
- ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1285
-
1286
- return r;
1287
-}
1288
-
1289
-static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1290
-{
1291
- int i;
1292
-
1293
- for (i = 0; i < NGG_BUF_MAX; i++)
1294
- amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1295
- &adev->gfx.ngg.buf[i].gpu_addr,
1296
- NULL);
1297
-
1298
- memset(&adev->gfx.ngg.buf[0], 0,
1299
- sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1300
-
1301
- adev->gfx.ngg.init = false;
1302
-
1303
- return 0;
1304
-}
1305
-
1306
-static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1307
-{
1308
- int r;
1309
-
1310
- if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1311
- return 0;
1312
-
1313
- /* GDS reserve memory: 64 bytes alignment */
1314
- adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1315
- adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1316
- adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1317
- adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1318
- adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1319
-
1320
- /* Primitive Buffer */
1321
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1322
- amdgpu_prim_buf_per_se,
1323
- 64 * 1024);
1324
- if (r) {
1325
- dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1326
- goto err;
1327
- }
1328
-
1329
- /* Position Buffer */
1330
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1331
- amdgpu_pos_buf_per_se,
1332
- 256 * 1024);
1333
- if (r) {
1334
- dev_err(adev->dev, "Failed to create Position Buffer\n");
1335
- goto err;
1336
- }
1337
-
1338
- /* Control Sideband */
1339
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1340
- amdgpu_cntl_sb_buf_per_se,
1341
- 256);
1342
- if (r) {
1343
- dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1344
- goto err;
1345
- }
1346
-
1347
- /* Parameter Cache, not created by default */
1348
- if (amdgpu_param_buf_per_se <= 0)
1349
- goto out;
1350
-
1351
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1352
- amdgpu_param_buf_per_se,
1353
- 512 * 1024);
1354
- if (r) {
1355
- dev_err(adev->dev, "Failed to create Parameter Cache\n");
1356
- goto err;
1357
- }
1358
-
1359
-out:
1360
- adev->gfx.ngg.init = true;
1361
- return 0;
1362
-err:
1363
- gfx_v9_0_ngg_fini(adev);
1364
- return r;
1365
-}
1366
-
1367
-static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1368
-{
1369
- struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1370
- int r;
1371
- u32 data, base;
1372
-
1373
- if (!amdgpu_ngg)
1374
- return 0;
1375
-
1376
- /* Program buffer size */
1377
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1378
- adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1379
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1380
- adev->gfx.ngg.buf[NGG_POS].size >> 8);
1381
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1382
-
1383
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1384
- adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1385
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1386
- adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1387
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1388
-
1389
- /* Program buffer base address */
1390
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1391
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1392
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1393
-
1394
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1395
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1396
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1397
-
1398
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1399
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1400
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1401
-
1402
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1403
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1404
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1405
-
1406
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1407
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1408
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1409
-
1410
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1411
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1412
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1413
-
1414
- /* Clear GDS reserved memory */
1415
- r = amdgpu_ring_alloc(ring, 17);
1416
- if (r) {
1417
- DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1418
- ring->idx, r);
1419
- return r;
1420
- }
1421
-
1422
- gfx_v9_0_write_data_to_reg(ring, 0, false,
1423
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1424
- (adev->gds.mem.total_size +
1425
- adev->gfx.ngg.gds_reserve_size) >>
1426
- AMDGPU_GDS_SHIFT);
1427
-
1428
- amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1429
- amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1430
- PACKET3_DMA_DATA_DST_SEL(1) |
1431
- PACKET3_DMA_DATA_SRC_SEL(2)));
1432
- amdgpu_ring_write(ring, 0);
1433
- amdgpu_ring_write(ring, 0);
1434
- amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1435
- amdgpu_ring_write(ring, 0);
1436
- amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1437
- adev->gfx.ngg.gds_reserve_size);
1438
-
1439
- gfx_v9_0_write_data_to_reg(ring, 0, false,
1440
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1441
-
1442
- amdgpu_ring_commit(ring);
1443
-
1444
- return 0;
1445
-}
1446
-
14472224 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
14482225 int mec, int pipe, int queue)
14492226 {
1450
- int r;
14512227 unsigned irq_type;
14522228 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2229
+ unsigned int hw_prio;
14532230
14542231 ring = &adev->gfx.compute_ring[ring_id];
14552232
....@@ -1460,7 +2237,7 @@
14602237
14612238 ring->ring_obj = NULL;
14622239 ring->use_doorbell = true;
1463
- ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
2240
+ ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
14642241 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
14652242 + (ring_id * GFX9_MEC_HPD_SIZE);
14662243 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
....@@ -1468,15 +2245,12 @@
14682245 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
14692246 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
14702247 + ring->pipe;
1471
-
2248
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
2249
+ ring->queue) ?
2250
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
14722251 /* type-2 packets are deprecated on MEC, use type-3 instead */
1473
- r = amdgpu_ring_init(adev, ring, 1024,
1474
- &adev->gfx.eop_irq, irq_type);
1475
- if (r)
1476
- return r;
1477
-
1478
-
1479
- return 0;
2252
+ return amdgpu_ring_init(adev, ring, 1024,
2253
+ &adev->gfx.eop_irq, irq_type, hw_prio);
14802254 }
14812255
14822256 static int gfx_v9_0_sw_init(void *handle)
....@@ -1491,6 +2265,8 @@
14912265 case CHIP_VEGA12:
14922266 case CHIP_VEGA20:
14932267 case CHIP_RAVEN:
2268
+ case CHIP_ARCTURUS:
2269
+ case CHIP_RENOIR:
14942270 adev->gfx.mec.num_mec = 2;
14952271 break;
14962272 default:
....@@ -1500,11 +2276,6 @@
15002276
15012277 adev->gfx.mec.num_pipe_per_mec = 4;
15022278 adev->gfx.mec.num_queue_per_pipe = 8;
1503
-
1504
- /* KIQ event */
1505
- r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
1506
- if (r)
1507
- return r;
15082279
15092280 /* EOP Event */
15102281 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
....@@ -1523,6 +2294,18 @@
15232294 if (r)
15242295 return r;
15252296
2297
+ /* ECC error */
2298
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2299
+ &adev->gfx.cp_ecc_error_irq);
2300
+ if (r)
2301
+ return r;
2302
+
2303
+ /* FUE error */
2304
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2305
+ &adev->gfx.cp_ecc_error_irq);
2306
+ if (r)
2307
+ return r;
2308
+
15262309 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
15272310
15282311 gfx_v9_0_scratch_init(adev);
....@@ -1533,7 +2316,7 @@
15332316 return r;
15342317 }
15352318
1536
- r = gfx_v9_0_rlc_init(adev);
2319
+ r = adev->gfx.rlc.funcs->init(adev);
15372320 if (r) {
15382321 DRM_ERROR("Failed to init rlc BOs!\n");
15392322 return r;
....@@ -1554,9 +2337,11 @@
15542337 else
15552338 sprintf(ring->name, "gfx_%d", i);
15562339 ring->use_doorbell = true;
1557
- ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
2340
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
15582341 r = amdgpu_ring_init(adev, ring, 1024,
1559
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
2342
+ &adev->gfx.eop_irq,
2343
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2344
+ AMDGPU_RING_PRIO_DEFAULT);
15602345 if (r)
15612346 return r;
15622347 }
....@@ -1592,36 +2377,13 @@
15922377 return r;
15932378
15942379 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1595
- r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1596
- if (r)
1597
- return r;
1598
-
1599
- /* reserve GDS, GWS and OA resource for gfx */
1600
- r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1601
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1602
- &adev->gds.gds_gfx_bo, NULL, NULL);
1603
- if (r)
1604
- return r;
1605
-
1606
- r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1607
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1608
- &adev->gds.gws_gfx_bo, NULL, NULL);
1609
- if (r)
1610
- return r;
1611
-
1612
- r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1613
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1614
- &adev->gds.oa_gfx_bo, NULL, NULL);
2380
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
16152381 if (r)
16162382 return r;
16172383
16182384 adev->gfx.ce_ram_size = 0x8000;
16192385
16202386 r = gfx_v9_0_gpu_early_init(adev);
1621
- if (r)
1622
- return r;
1623
-
1624
- r = gfx_v9_0_ngg_init(adev);
16252387 if (r)
16262388 return r;
16272389
....@@ -1634,25 +2396,20 @@
16342396 int i;
16352397 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
16362398
1637
- amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1638
- amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1639
- amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
2399
+ amdgpu_gfx_ras_fini(adev);
16402400
16412401 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
16422402 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
16432403 for (i = 0; i < adev->gfx.num_compute_rings; i++)
16442404 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
16452405
1646
- amdgpu_gfx_compute_mqd_sw_fini(adev);
1647
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
2406
+ amdgpu_gfx_mqd_sw_fini(adev);
2407
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
16482408 amdgpu_gfx_kiq_fini(adev);
16492409
16502410 gfx_v9_0_mec_fini(adev);
1651
- gfx_v9_0_ngg_fini(adev);
1652
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1653
- &adev->gfx.rlc.clear_state_gpu_addr,
1654
- (void **)&adev->gfx.rlc.cs_ptr);
1655
- if (adev->asic_type == CHIP_RAVEN) {
2411
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2412
+ if (adev->flags & AMD_IS_APU) {
16562413 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
16572414 &adev->gfx.rlc.cp_table_gpu_addr,
16582415 (void **)&adev->gfx.rlc.cp_table_ptr);
....@@ -1668,7 +2425,8 @@
16682425 /* TODO */
16692426 }
16702427
1671
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
2428
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2429
+ u32 instance)
16722430 {
16732431 u32 data;
16742432
....@@ -1687,7 +2445,7 @@
16872445 else
16882446 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
16892447
1690
- WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
2448
+ WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
16912449 }
16922450
16932451 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
....@@ -1731,8 +2489,6 @@
17312489 }
17322490
17332491 #define DEFAULT_SH_MEM_BASES (0x6000)
1734
-#define FIRST_COMPUTE_VMID (8)
1735
-#define LAST_COMPUTE_VMID (16)
17362492 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
17372493 {
17382494 int i;
....@@ -1752,49 +2508,97 @@
17522508 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
17532509
17542510 mutex_lock(&adev->srbm_mutex);
1755
- for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2511
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
17562512 soc15_grbm_select(adev, 0, 0, 0, i);
17572513 /* CP and shaders */
1758
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1759
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2514
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2515
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
17602516 }
17612517 soc15_grbm_select(adev, 0, 0, 0, 0);
17622518 mutex_unlock(&adev->srbm_mutex);
2519
+
2520
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2521
+ acccess. These should be enabled by FW for target VMIDs. */
2522
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2523
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2524
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2525
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2526
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2527
+ }
17632528 }
17642529
1765
-static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
2530
+static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2531
+{
2532
+ int vmid;
2533
+
2534
+ /*
2535
+ * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2536
+ * access. Compute VMIDs should be enabled by FW for target VMIDs,
2537
+ * the driver can enable them for graphics. VMID0 should maintain
2538
+ * access so that HWS firmware can save/restore entries.
2539
+ */
2540
+ for (vmid = 1; vmid < 16; vmid++) {
2541
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2542
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2543
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2544
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2545
+ }
2546
+}
2547
+
2548
+static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2549
+{
2550
+ uint32_t tmp;
2551
+
2552
+ switch (adev->asic_type) {
2553
+ case CHIP_ARCTURUS:
2554
+ tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2555
+ tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2556
+ DISABLE_BARRIER_WAITCNT, 1);
2557
+ WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2558
+ break;
2559
+ default:
2560
+ break;
2561
+ }
2562
+}
2563
+
2564
+static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
17662565 {
17672566 u32 tmp;
17682567 int i;
17692568
1770
- WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2569
+ WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
17712570
17722571 gfx_v9_0_tiling_mode_table_init(adev);
17732572
1774
- gfx_v9_0_setup_rb(adev);
2573
+ if (adev->gfx.num_gfx_rings)
2574
+ gfx_v9_0_setup_rb(adev);
17752575 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
17762576 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
17772577
17782578 /* XXX SH_MEM regs */
17792579 /* where to put LDS, scratch, GPUVM in FSA64 space */
17802580 mutex_lock(&adev->srbm_mutex);
1781
- for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
2581
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
17822582 soc15_grbm_select(adev, 0, 0, 0, i);
17832583 /* CP and shaders */
17842584 if (i == 0) {
17852585 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
17862586 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1787
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1788
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
2587
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2588
+ !!adev->gmc.noretry);
2589
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2590
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
17892591 } else {
17902592 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
17912593 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1792
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
2594
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2595
+ !!adev->gmc.noretry);
2596
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
17932597 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
17942598 (adev->gmc.private_aperture_start >> 48));
17952599 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
17962600 (adev->gmc.shared_aperture_start >> 48));
1797
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
2601
+ WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
17982602 }
17992603 }
18002604 soc15_grbm_select(adev, 0, 0, 0, 0);
....@@ -1802,6 +2606,8 @@
18022606 mutex_unlock(&adev->srbm_mutex);
18032607
18042608 gfx_v9_0_init_compute_vmid(adev);
2609
+ gfx_v9_0_init_gds_vmid(adev);
2610
+ gfx_v9_0_init_sq_config(adev);
18052611 }
18062612
18072613 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
....@@ -1857,12 +2663,13 @@
18572663
18582664 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
18592665 {
2666
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
18602667 /* csib */
1861
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2668
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
18622669 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1863
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2670
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
18642671 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1865
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2672
+ WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
18662673 adev->gfx.rlc.clear_state_size);
18672674 }
18682675
....@@ -1916,11 +2723,10 @@
19162723 u32 tmp = 0;
19172724
19182725 u32 *register_list_format =
1919
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2726
+ kmemdup(adev->gfx.rlc.register_list_format,
2727
+ adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
19202728 if (!register_list_format)
19212729 return -ENOMEM;
1922
- memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1923
- adev->gfx.rlc.reg_list_format_size_bytes);
19242730
19252731 /* setup unique_indirect_regs array and indirect_start_offsets array */
19262732 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
....@@ -2019,7 +2825,7 @@
20192825 uint32_t default_data = 0;
20202826
20212827 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2022
- if (enable == true) {
2828
+ if (enable) {
20232829 /* enable GFXIP control over CGPG */
20242830 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
20252831 if(default_data != data)
....@@ -2075,8 +2881,8 @@
20752881 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
20762882 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
20772883 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2078
-
2079
- pwr_10_0_gfxip_control_over_cgpg(adev, true);
2884
+ if (adev->asic_type != CHIP_RENOIR)
2885
+ pwr_10_0_gfxip_control_over_cgpg(adev, true);
20802886 }
20812887 }
20822888
....@@ -2187,7 +2993,8 @@
21872993 * And it's needed by gfxoff feature.
21882994 */
21892995 if (adev->gfx.rlc.is_rlc_v2_1) {
2190
- if (adev->asic_type == CHIP_VEGA12)
2996
+ if (adev->asic_type == CHIP_VEGA12 ||
2997
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
21912998 gfx_v9_1_init_rlc_save_restore_list(adev);
21922999 gfx_v9_0_enable_save_restore_machine(adev);
21933000 }
....@@ -2285,12 +3092,10 @@
22853092 return 0;
22863093 }
22873094
2288
- gfx_v9_0_rlc_stop(adev);
3095
+ adev->gfx.rlc.funcs->stop(adev);
22893096
22903097 /* disable CG */
22913098 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2292
-
2293
- gfx_v9_0_rlc_reset(adev);
22943099
22953100 gfx_v9_0_init_pg(adev);
22963101
....@@ -2301,31 +3106,36 @@
23013106 return r;
23023107 }
23033108
2304
- if (adev->asic_type == CHIP_RAVEN) {
2305
- if (amdgpu_lbpw != 0)
3109
+ switch (adev->asic_type) {
3110
+ case CHIP_RAVEN:
3111
+ if (amdgpu_lbpw == 0)
3112
+ gfx_v9_0_enable_lbpw(adev, false);
3113
+ else
3114
+ gfx_v9_0_enable_lbpw(adev, true);
3115
+ break;
3116
+ case CHIP_VEGA20:
3117
+ if (amdgpu_lbpw > 0)
23063118 gfx_v9_0_enable_lbpw(adev, true);
23073119 else
23083120 gfx_v9_0_enable_lbpw(adev, false);
3121
+ break;
3122
+ default:
3123
+ break;
23093124 }
23103125
2311
- gfx_v9_0_rlc_start(adev);
3126
+ adev->gfx.rlc.funcs->start(adev);
23123127
23133128 return 0;
23143129 }
23153130
23163131 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
23173132 {
2318
- int i;
23193133 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
23203134
23213135 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
23223136 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
23233137 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2324
- if (!enable) {
2325
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2326
- adev->gfx.gfx_ring[i].ready = false;
2327
- }
2328
- WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
3138
+ WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
23293139 udelay(50);
23303140 }
23313141
....@@ -2513,23 +3323,19 @@
25133323
25143324 /* start the ring */
25153325 gfx_v9_0_cp_gfx_start(adev);
2516
- ring->ready = true;
3326
+ ring->sched.ready = true;
25173327
25183328 return 0;
25193329 }
25203330
25213331 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
25223332 {
2523
- int i;
2524
-
25253333 if (enable) {
2526
- WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
3334
+ WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
25273335 } else {
2528
- WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
3336
+ WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
25293337 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2530
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
2531
- adev->gfx.compute_ring[i].ready = false;
2532
- adev->gfx.kiq.ring.ready = false;
3338
+ adev->gfx.kiq.ring.sched.ready = false;
25333339 }
25343340 udelay(50);
25353341 }
....@@ -2586,100 +3392,24 @@
25863392 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
25873393 tmp &= 0xffffff00;
25883394 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2589
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3395
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
25903396 tmp |= 0x80;
2591
- WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3397
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
25923398 }
25933399
2594
-static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
3400
+static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
25953401 {
2596
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2597
- uint32_t scratch, tmp = 0;
2598
- uint64_t queue_mask = 0;
2599
- int r, i;
3402
+ struct amdgpu_device *adev = ring->adev;
26003403
2601
- for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2602
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2603
- continue;
2604
-
2605
- /* This situation may be hit in the future if a new HW
2606
- * generation exposes more than 64 queues. If so, the
2607
- * definition of queue_mask needs updating */
2608
- if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2609
- DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2610
- break;
3404
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3405
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev,
3406
+ ring->pipe,
3407
+ ring->queue)) {
3408
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3409
+ mqd->cp_hqd_queue_priority =
3410
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
26113411 }
2612
-
2613
- queue_mask |= (1ull << i);
26143412 }
2615
-
2616
- r = amdgpu_gfx_scratch_get(adev, &scratch);
2617
- if (r) {
2618
- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2619
- return r;
2620
- }
2621
- WREG32(scratch, 0xCAFEDEAD);
2622
-
2623
- r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2624
- if (r) {
2625
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2626
- amdgpu_gfx_scratch_free(adev, scratch);
2627
- return r;
2628
- }
2629
-
2630
- /* set resources */
2631
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2632
- amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2633
- PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2634
- amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2635
- amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2636
- amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2637
- amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2638
- amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2639
- amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2640
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2641
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2642
- uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2643
- uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2644
-
2645
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2646
- /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2647
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2648
- PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2649
- PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2650
- PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2651
- PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2652
- PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2653
- PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2654
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2655
- PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2656
- PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2657
- amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2658
- amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2659
- amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2660
- amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2661
- amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2662
- }
2663
- /* write to scratch for completion */
2664
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2665
- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2666
- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2667
- amdgpu_ring_commit(kiq_ring);
2668
-
2669
- for (i = 0; i < adev->usec_timeout; i++) {
2670
- tmp = RREG32(scratch);
2671
- if (tmp == 0xDEADBEEF)
2672
- break;
2673
- DRM_UDELAY(1);
2674
- }
2675
- if (i >= adev->usec_timeout) {
2676
- DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2677
- scratch, tmp);
2678
- r = -EINVAL;
2679
- }
2680
- amdgpu_gfx_scratch_free(adev, scratch);
2681
-
2682
- return r;
26833413 }
26843414
26853415 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
....@@ -2695,6 +3425,10 @@
26953425 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
26963426 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
26973427 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3428
+ mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3429
+ mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3430
+ mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3431
+ mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
26983432 mqd->compute_misc_reserved = 0x00000003;
26993433
27003434 mqd->dynamic_cu_mask_addr_lo =
....@@ -2814,8 +3548,15 @@
28143548 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
28153549 mqd->cp_hqd_ib_control = tmp;
28163550
2817
- /* activate the queue */
2818
- mqd->cp_hqd_active = 1;
3551
+ /* set static priority for a queue/ring */
3552
+ gfx_v9_0_mqd_set_priority(ring, mqd);
3553
+ mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
3554
+
3555
+ /* map_queues packet doesn't need activate the queue,
3556
+ * so only kiq need set this field.
3557
+ */
3558
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3559
+ mqd->cp_hqd_active = 1;
28193560
28203561 return 0;
28213562 }
....@@ -2829,94 +3570,103 @@
28293570 /* disable wptr polling */
28303571 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
28313572
2832
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3573
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
28333574 mqd->cp_hqd_eop_base_addr_lo);
2834
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3575
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
28353576 mqd->cp_hqd_eop_base_addr_hi);
28363577
28373578 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2838
- WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
3579
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
28393580 mqd->cp_hqd_eop_control);
28403581
28413582 /* enable doorbell? */
2842
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3583
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
28433584 mqd->cp_hqd_pq_doorbell_control);
28443585
28453586 /* disable the queue if it's active */
28463587 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2847
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3588
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
28483589 for (j = 0; j < adev->usec_timeout; j++) {
28493590 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
28503591 break;
28513592 udelay(1);
28523593 }
2853
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3594
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
28543595 mqd->cp_hqd_dequeue_request);
2855
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
3596
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
28563597 mqd->cp_hqd_pq_rptr);
2857
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3598
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
28583599 mqd->cp_hqd_pq_wptr_lo);
2859
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3600
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
28603601 mqd->cp_hqd_pq_wptr_hi);
28613602 }
28623603
28633604 /* set the pointer to the MQD */
2864
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
3605
+ WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
28653606 mqd->cp_mqd_base_addr_lo);
2866
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3607
+ WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
28673608 mqd->cp_mqd_base_addr_hi);
28683609
28693610 /* set MQD vmid to 0 */
2870
- WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
3611
+ WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
28713612 mqd->cp_mqd_control);
28723613
28733614 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2874
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
3615
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
28753616 mqd->cp_hqd_pq_base_lo);
2876
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
3617
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
28773618 mqd->cp_hqd_pq_base_hi);
28783619
28793620 /* set up the HQD, this is similar to CP_RB0_CNTL */
2880
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
3621
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
28813622 mqd->cp_hqd_pq_control);
28823623
28833624 /* set the wb address whether it's enabled or not */
2884
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3625
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
28853626 mqd->cp_hqd_pq_rptr_report_addr_lo);
2886
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3627
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
28873628 mqd->cp_hqd_pq_rptr_report_addr_hi);
28883629
28893630 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2890
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3631
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
28913632 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2892
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3633
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
28933634 mqd->cp_hqd_pq_wptr_poll_addr_hi);
28943635
28953636 /* enable the doorbell if requested */
28963637 if (ring->use_doorbell) {
28973638 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2898
- (AMDGPU_DOORBELL64_KIQ *2) << 2);
2899
- WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2900
- (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
3639
+ (adev->doorbell_index.kiq * 2) << 2);
3640
+ /* If GC has entered CGPG, ringing doorbell > first page
3641
+ * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
3642
+ * workaround this issue. And this change has to align with firmware
3643
+ * update.
3644
+ */
3645
+ if (check_if_enlarge_doorbell_range(adev))
3646
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3647
+ (adev->doorbell.size - 4));
3648
+ else
3649
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3650
+ (adev->doorbell_index.userqueue_end * 2) << 2);
29013651 }
29023652
2903
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3653
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
29043654 mqd->cp_hqd_pq_doorbell_control);
29053655
29063656 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2907
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3657
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
29083658 mqd->cp_hqd_pq_wptr_lo);
2909
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3659
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
29103660 mqd->cp_hqd_pq_wptr_hi);
29113661
29123662 /* set the vmid for the queue */
2913
- WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3663
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
29143664
2915
- WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3665
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
29163666 mqd->cp_hqd_persistent_state);
29173667
29183668 /* activate the queue */
2919
- WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
3669
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
29203670 mqd->cp_hqd_active);
29213671
29223672 if (ring->use_doorbell)
....@@ -2933,7 +3683,7 @@
29333683 /* disable the queue if it's active */
29343684 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
29353685
2936
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3686
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
29373687
29383688 for (j = 0; j < adev->usec_timeout; j++) {
29393689 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
....@@ -2945,21 +3695,21 @@
29453695 DRM_DEBUG("KIQ dequeue request failed.\n");
29463696
29473697 /* Manual disable if dequeue request times out */
2948
- WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
3698
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
29493699 }
29503700
2951
- WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3701
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
29523702 0);
29533703 }
29543704
2955
- WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
2956
- WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
2957
- WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
2958
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2959
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
2960
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
2961
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
2962
- WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3705
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3706
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3707
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3708
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3709
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3710
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3711
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3712
+ WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
29633713
29643714 return 0;
29653715 }
....@@ -2972,7 +3722,7 @@
29723722
29733723 gfx_v9_0_kiq_setting(ring);
29743724
2975
- if (adev->in_gpu_reset) { /* for GPU_RESET case */
3725
+ if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
29763726 /* reset MQD to a clean status */
29773727 if (adev->gfx.mec.mqd_backup[mqd_idx])
29783728 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
....@@ -3010,7 +3760,7 @@
30103760 struct v9_mqd *mqd = ring->mqd_ptr;
30113761 int mqd_idx = ring - &adev->gfx.compute_ring[0];
30123762
3013
- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3763
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
30143764 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
30153765 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
30163766 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
....@@ -3022,13 +3772,14 @@
30223772
30233773 if (adev->gfx.mec.mqd_backup[mqd_idx])
30243774 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3025
- } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3775
+ } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
30263776 /* reset MQD to a clean status */
30273777 if (adev->gfx.mec.mqd_backup[mqd_idx])
30283778 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
30293779
30303780 /* reset ring buffer */
30313781 ring->wptr = 0;
3782
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
30323783 amdgpu_ring_clear_ring(ring);
30333784 } else {
30343785 amdgpu_ring_clear_ring(ring);
....@@ -3039,26 +3790,35 @@
30393790
30403791 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
30413792 {
3042
- struct amdgpu_ring *ring = NULL;
3043
- int r = 0, i;
3044
-
3045
- gfx_v9_0_cp_compute_enable(adev, true);
3793
+ struct amdgpu_ring *ring;
3794
+ int r;
30463795
30473796 ring = &adev->gfx.kiq.ring;
30483797
30493798 r = amdgpu_bo_reserve(ring->mqd_obj, false);
30503799 if (unlikely(r != 0))
3051
- goto done;
3800
+ return r;
30523801
30533802 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3054
- if (!r) {
3055
- r = gfx_v9_0_kiq_init_queue(ring);
3056
- amdgpu_bo_kunmap(ring->mqd_obj);
3057
- ring->mqd_ptr = NULL;
3803
+ if (unlikely(r != 0)) {
3804
+ amdgpu_bo_unreserve(ring->mqd_obj);
3805
+ return r;
30583806 }
3807
+
3808
+ gfx_v9_0_kiq_init_queue(ring);
3809
+ amdgpu_bo_kunmap(ring->mqd_obj);
3810
+ ring->mqd_ptr = NULL;
30593811 amdgpu_bo_unreserve(ring->mqd_obj);
3060
- if (r)
3061
- goto done;
3812
+ ring->sched.ready = true;
3813
+ return 0;
3814
+}
3815
+
3816
+static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3817
+{
3818
+ struct amdgpu_ring *ring = NULL;
3819
+ int r = 0, i;
3820
+
3821
+ gfx_v9_0_cp_compute_enable(adev, true);
30623822
30633823 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
30643824 ring = &adev->gfx.compute_ring[i];
....@@ -3077,7 +3837,7 @@
30773837 goto done;
30783838 }
30793839
3080
- r = gfx_v9_0_kiq_kcq_enable(adev);
3840
+ r = amdgpu_gfx_enable_kcq(adev);
30813841 done:
30823842 return r;
30833843 }
....@@ -3091,44 +3851,42 @@
30913851 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
30923852
30933853 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3094
- /* legacy firmware loading */
3095
- r = gfx_v9_0_cp_gfx_load_microcode(adev);
3096
- if (r)
3097
- return r;
3854
+ if (adev->asic_type != CHIP_ARCTURUS) {
3855
+ /* legacy firmware loading */
3856
+ r = gfx_v9_0_cp_gfx_load_microcode(adev);
3857
+ if (r)
3858
+ return r;
3859
+ }
30983860
30993861 r = gfx_v9_0_cp_compute_load_microcode(adev);
31003862 if (r)
31013863 return r;
31023864 }
31033865
3104
- r = gfx_v9_0_cp_gfx_resume(adev);
3105
- if (r)
3106
- return r;
3107
-
31083866 r = gfx_v9_0_kiq_resume(adev);
31093867 if (r)
31103868 return r;
31113869
3112
- ring = &adev->gfx.gfx_ring[0];
3113
- r = amdgpu_ring_test_ring(ring);
3114
- if (r) {
3115
- ring->ready = false;
3116
- return r;
3870
+ if (adev->asic_type != CHIP_ARCTURUS) {
3871
+ r = gfx_v9_0_cp_gfx_resume(adev);
3872
+ if (r)
3873
+ return r;
31173874 }
31183875
3119
- ring = &adev->gfx.kiq.ring;
3120
- ring->ready = true;
3121
- r = amdgpu_ring_test_ring(ring);
3876
+ r = gfx_v9_0_kcq_resume(adev);
31223877 if (r)
3123
- ring->ready = false;
3878
+ return r;
3879
+
3880
+ if (adev->asic_type != CHIP_ARCTURUS) {
3881
+ ring = &adev->gfx.gfx_ring[0];
3882
+ r = amdgpu_ring_test_helper(ring);
3883
+ if (r)
3884
+ return r;
3885
+ }
31243886
31253887 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
31263888 ring = &adev->gfx.compute_ring[i];
3127
-
3128
- ring->ready = true;
3129
- r = amdgpu_ring_test_ring(ring);
3130
- if (r)
3131
- ring->ready = false;
3889
+ amdgpu_ring_test_helper(ring);
31323890 }
31333891
31343892 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
....@@ -3136,9 +3894,27 @@
31363894 return 0;
31373895 }
31383896
3897
+static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3898
+{
3899
+ u32 tmp;
3900
+
3901
+ if (adev->asic_type != CHIP_ARCTURUS)
3902
+ return;
3903
+
3904
+ tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3905
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3906
+ adev->df.hash_status.hash_64k);
3907
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3908
+ adev->df.hash_status.hash_2m);
3909
+ tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3910
+ adev->df.hash_status.hash_1g);
3911
+ WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3912
+}
3913
+
31393914 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
31403915 {
3141
- gfx_v9_0_cp_gfx_enable(adev, enable);
3916
+ if (adev->asic_type != CHIP_ARCTURUS)
3917
+ gfx_v9_0_cp_gfx_enable(adev, enable);
31423918 gfx_v9_0_cp_compute_enable(adev, enable);
31433919 }
31443920
....@@ -3147,15 +3923,14 @@
31473923 int r;
31483924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
31493925
3150
- gfx_v9_0_init_golden_registers(adev);
3926
+ if (!amdgpu_sriov_vf(adev))
3927
+ gfx_v9_0_init_golden_registers(adev);
31513928
3152
- gfx_v9_0_gpu_init(adev);
3929
+ gfx_v9_0_constants_init(adev);
31533930
3154
- r = gfx_v9_0_csb_vram_pin(adev);
3155
- if (r)
3156
- return r;
3931
+ gfx_v9_0_init_tcp_config(adev);
31573932
3158
- r = gfx_v9_0_rlc_resume(adev);
3933
+ r = adev->gfx.rlc.funcs->resume(adev);
31593934 if (r)
31603935 return r;
31613936
....@@ -3163,78 +3938,22 @@
31633938 if (r)
31643939 return r;
31653940
3166
- r = gfx_v9_0_ngg_en(adev);
3167
- if (r)
3168
- return r;
3169
-
3170
- return r;
3171
-}
3172
-
3173
-static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
3174
-{
3175
- struct amdgpu_device *adev = kiq_ring->adev;
3176
- uint32_t scratch, tmp = 0;
3177
- int r, i;
3178
-
3179
- r = amdgpu_gfx_scratch_get(adev, &scratch);
3180
- if (r) {
3181
- DRM_ERROR("Failed to get scratch reg (%d).\n", r);
3182
- return r;
3183
- }
3184
- WREG32(scratch, 0xCAFEDEAD);
3185
-
3186
- r = amdgpu_ring_alloc(kiq_ring, 10);
3187
- if (r) {
3188
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3189
- amdgpu_gfx_scratch_free(adev, scratch);
3190
- return r;
3191
- }
3192
-
3193
- /* unmap queues */
3194
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3195
- amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3196
- PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3197
- PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3198
- PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3199
- PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3200
- amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3201
- amdgpu_ring_write(kiq_ring, 0);
3202
- amdgpu_ring_write(kiq_ring, 0);
3203
- amdgpu_ring_write(kiq_ring, 0);
3204
- /* write to scratch for completion */
3205
- amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3206
- amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
3207
- amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
3208
- amdgpu_ring_commit(kiq_ring);
3209
-
3210
- for (i = 0; i < adev->usec_timeout; i++) {
3211
- tmp = RREG32(scratch);
3212
- if (tmp == 0xDEADBEEF)
3213
- break;
3214
- DRM_UDELAY(1);
3215
- }
3216
- if (i >= adev->usec_timeout) {
3217
- DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
3218
- r = -EINVAL;
3219
- }
3220
- amdgpu_gfx_scratch_free(adev, scratch);
32213941 return r;
32223942 }
32233943
32243944 static int gfx_v9_0_hw_fini(void *handle)
32253945 {
32263946 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3227
- int i;
32283947
3229
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
3230
- AMD_PG_STATE_UNGATE);
3231
-
3948
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
3949
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
32323950 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
32333951 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
32343952
3235
- /* disable KCQ to avoid CPC touch memory not valid anymore */
3236
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
3237
- gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
3953
+ /* DF freeze and kcq disable will fail */
3954
+ if (!amdgpu_ras_intr_triggered())
3955
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
3956
+ amdgpu_gfx_disable_kcq(adev);
32383957
32393958 if (amdgpu_sriov_vf(adev)) {
32403959 gfx_v9_0_cp_gfx_enable(adev, false);
....@@ -3250,7 +3969,7 @@
32503969 /* Use deinitialize sequence from CAIL when unbinding device from driver,
32513970 * otherwise KIQ is hanging when binding back
32523971 */
3253
- if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3972
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
32543973 mutex_lock(&adev->srbm_mutex);
32553974 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
32563975 adev->gfx.kiq.ring.pipe,
....@@ -3261,29 +3980,19 @@
32613980 }
32623981
32633982 gfx_v9_0_cp_enable(adev, false);
3264
- gfx_v9_0_rlc_stop(adev);
3265
-
3266
- gfx_v9_0_csb_vram_unpin(adev);
3983
+ adev->gfx.rlc.funcs->stop(adev);
32673984
32683985 return 0;
32693986 }
32703987
32713988 static int gfx_v9_0_suspend(void *handle)
32723989 {
3273
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3274
-
3275
- adev->gfx.in_suspend = true;
3276
- return gfx_v9_0_hw_fini(adev);
3990
+ return gfx_v9_0_hw_fini(handle);
32773991 }
32783992
32793993 static int gfx_v9_0_resume(void *handle)
32803994 {
3281
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3282
- int r;
3283
-
3284
- r = gfx_v9_0_hw_init(adev);
3285
- adev->gfx.in_suspend = false;
3286
- return r;
3995
+ return gfx_v9_0_hw_init(handle);
32873996 }
32883997
32893998 static bool gfx_v9_0_is_idle(void *handle)
....@@ -3344,10 +4053,11 @@
33444053
33454054 if (grbm_soft_reset) {
33464055 /* stop the rlc */
3347
- gfx_v9_0_rlc_stop(adev);
4056
+ adev->gfx.rlc.funcs->stop(adev);
33484057
3349
- /* Disable GFX parsing/prefetching */
3350
- gfx_v9_0_cp_gfx_enable(adev, false);
4058
+ if (adev->asic_type != CHIP_ARCTURUS)
4059
+ /* Disable GFX parsing/prefetching */
4060
+ gfx_v9_0_cp_gfx_enable(adev, false);
33514061
33524062 /* Disable MEC parsing/prefetching */
33534063 gfx_v9_0_cp_compute_enable(adev, false);
....@@ -3372,15 +4082,114 @@
33724082 return 0;
33734083 }
33744084
4085
+static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4086
+{
4087
+ signed long r, cnt = 0;
4088
+ unsigned long flags;
4089
+ uint32_t seq, reg_val_offs = 0;
4090
+ uint64_t value = 0;
4091
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4092
+ struct amdgpu_ring *ring = &kiq->ring;
4093
+
4094
+ BUG_ON(!ring->funcs->emit_rreg);
4095
+
4096
+ spin_lock_irqsave(&kiq->ring_lock, flags);
4097
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4098
+ pr_err("critical bug! too many kiq readers\n");
4099
+ goto failed_unlock;
4100
+ }
4101
+ amdgpu_ring_alloc(ring, 32);
4102
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4103
+ amdgpu_ring_write(ring, 9 | /* src: register*/
4104
+ (5 << 8) | /* dst: memory */
4105
+ (1 << 16) | /* count sel */
4106
+ (1 << 20)); /* write confirm */
4107
+ amdgpu_ring_write(ring, 0);
4108
+ amdgpu_ring_write(ring, 0);
4109
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4110
+ reg_val_offs * 4));
4111
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4112
+ reg_val_offs * 4));
4113
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4114
+ if (r)
4115
+ goto failed_undo;
4116
+
4117
+ amdgpu_ring_commit(ring);
4118
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
4119
+
4120
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4121
+
4122
+ /* don't wait anymore for gpu reset case because this way may
4123
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4124
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4125
+ * never return if we keep waiting in virt_kiq_rreg, which cause
4126
+ * gpu_recover() hang there.
4127
+ *
4128
+ * also don't wait anymore for IRQ context
4129
+ * */
4130
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
4131
+ goto failed_kiq_read;
4132
+
4133
+ might_sleep();
4134
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4135
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4136
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4137
+ }
4138
+
4139
+ if (cnt > MAX_KIQ_REG_TRY)
4140
+ goto failed_kiq_read;
4141
+
4142
+ mb();
4143
+ value = (uint64_t)adev->wb.wb[reg_val_offs] |
4144
+ (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4145
+ amdgpu_device_wb_free(adev, reg_val_offs);
4146
+ return value;
4147
+
4148
+failed_undo:
4149
+ amdgpu_ring_undo(ring);
4150
+failed_unlock:
4151
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
4152
+failed_kiq_read:
4153
+ if (reg_val_offs)
4154
+ amdgpu_device_wb_free(adev, reg_val_offs);
4155
+ pr_err("failed to read gpu clock\n");
4156
+ return ~0;
4157
+}
4158
+
33754159 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
33764160 {
3377
- uint64_t clock;
4161
+ uint64_t clock, clock_lo, clock_hi, hi_check;
33784162
3379
- mutex_lock(&adev->gfx.gpu_clock_mutex);
3380
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3381
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3382
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3383
- mutex_unlock(&adev->gfx.gpu_clock_mutex);
4163
+ switch (adev->asic_type) {
4164
+ case CHIP_RENOIR:
4165
+ preempt_disable();
4166
+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4167
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4168
+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
4169
+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
4170
+ * roughly every 42 seconds.
4171
+ */
4172
+ if (hi_check != clock_hi) {
4173
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
4174
+ clock_hi = hi_check;
4175
+ }
4176
+ preempt_enable();
4177
+ clock = clock_lo | (clock_hi << 32ULL);
4178
+ break;
4179
+ default:
4180
+ amdgpu_gfx_off_ctrl(adev, false);
4181
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
4182
+ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4183
+ clock = gfx_v9_0_kiq_read_clock(adev);
4184
+ } else {
4185
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4186
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4187
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4188
+ }
4189
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
4190
+ amdgpu_gfx_off_ctrl(adev, true);
4191
+ break;
4192
+ }
33844193 return clock;
33854194 }
33864195
....@@ -3391,15 +4200,6 @@
33914200 uint32_t oa_base, uint32_t oa_size)
33924201 {
33934202 struct amdgpu_device *adev = ring->adev;
3394
-
3395
- gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3396
- gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3397
-
3398
- gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3399
- gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3400
-
3401
- oa_base = oa_base >> AMDGPU_OA_SHIFT;
3402
- oa_size = oa_size >> AMDGPU_OA_SHIFT;
34034203
34044204 /* GDS Base */
34054205 gfx_v9_0_write_data_to_reg(ring, 0, false,
....@@ -3422,16 +4222,508 @@
34224222 (1 << (oa_size + oa_base)) - (1 << oa_base));
34234223 }
34244224
4225
+static const u32 vgpr_init_compute_shader[] =
4226
+{
4227
+ 0xb07c0000, 0xbe8000ff,
4228
+ 0x000000f8, 0xbf110800,
4229
+ 0x7e000280, 0x7e020280,
4230
+ 0x7e040280, 0x7e060280,
4231
+ 0x7e080280, 0x7e0a0280,
4232
+ 0x7e0c0280, 0x7e0e0280,
4233
+ 0x80808800, 0xbe803200,
4234
+ 0xbf84fff5, 0xbf9c0000,
4235
+ 0xd28c0001, 0x0001007f,
4236
+ 0xd28d0001, 0x0002027e,
4237
+ 0x10020288, 0xb8810904,
4238
+ 0xb7814000, 0xd1196a01,
4239
+ 0x00000301, 0xbe800087,
4240
+ 0xbefc00c1, 0xd89c4000,
4241
+ 0x00020201, 0xd89cc080,
4242
+ 0x00040401, 0x320202ff,
4243
+ 0x00000800, 0x80808100,
4244
+ 0xbf84fff8, 0x7e020280,
4245
+ 0xbf810000, 0x00000000,
4246
+};
4247
+
4248
+static const u32 sgpr_init_compute_shader[] =
4249
+{
4250
+ 0xb07c0000, 0xbe8000ff,
4251
+ 0x0000005f, 0xbee50080,
4252
+ 0xbe812c65, 0xbe822c65,
4253
+ 0xbe832c65, 0xbe842c65,
4254
+ 0xbe852c65, 0xb77c0005,
4255
+ 0x80808500, 0xbf84fff8,
4256
+ 0xbe800080, 0xbf810000,
4257
+};
4258
+
4259
+static const u32 vgpr_init_compute_shader_arcturus[] = {
4260
+ 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4261
+ 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4262
+ 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4263
+ 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4264
+ 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4265
+ 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4266
+ 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4267
+ 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4268
+ 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4269
+ 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4270
+ 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4271
+ 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4272
+ 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4273
+ 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4274
+ 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4275
+ 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4276
+ 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4277
+ 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4278
+ 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4279
+ 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4280
+ 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4281
+ 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4282
+ 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4283
+ 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4284
+ 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4285
+ 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4286
+ 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4287
+ 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4288
+ 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4289
+ 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4290
+ 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4291
+ 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4292
+ 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4293
+ 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4294
+ 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4295
+ 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4296
+ 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4297
+ 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4298
+ 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4299
+ 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4300
+ 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4301
+ 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4302
+ 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4303
+ 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4304
+ 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4305
+ 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4306
+ 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4307
+ 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4308
+ 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4309
+ 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4310
+ 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4311
+ 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4312
+ 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4313
+ 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4314
+ 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4315
+ 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4316
+ 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4317
+ 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4318
+ 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4319
+ 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4320
+ 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4321
+ 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4322
+ 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4323
+ 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4324
+ 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4325
+ 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4326
+ 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4327
+ 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4328
+ 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4329
+ 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4330
+ 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4331
+ 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4332
+ 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4333
+ 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4334
+ 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4335
+ 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4336
+ 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4337
+ 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4338
+ 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4339
+ 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4340
+ 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4341
+ 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4342
+ 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4343
+ 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4344
+ 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4345
+ 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4346
+ 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4347
+ 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4348
+ 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4349
+ 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4350
+ 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4351
+ 0xbf84fff8, 0xbf810000,
4352
+};
4353
+
4354
+/* When below register arrays changed, please update gpr_reg_size,
4355
+ and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4356
+ to cover all gfx9 ASICs */
4357
+static const struct soc15_reg_entry vgpr_init_regs[] = {
4358
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4359
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4360
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4361
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4362
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4363
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
4364
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4365
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4366
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4367
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4368
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4369
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4370
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4371
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4372
+};
4373
+
4374
+static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4375
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4376
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4377
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4378
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4379
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4380
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
4381
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4382
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4383
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4384
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4385
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4386
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4387
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4388
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4389
+};
4390
+
4391
+static const struct soc15_reg_entry sgpr1_init_regs[] = {
4392
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4393
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4394
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4395
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4396
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4397
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4398
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4399
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4400
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4401
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4402
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4403
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4404
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4405
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4406
+};
4407
+
4408
+static const struct soc15_reg_entry sgpr2_init_regs[] = {
4409
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4410
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4411
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4412
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4413
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4414
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4415
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4416
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4417
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4418
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4419
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4420
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4421
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4422
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4423
+};
4424
+
4425
+static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4426
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4427
+ { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4428
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4429
+ { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4430
+ { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4431
+ { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4432
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4433
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4434
+ { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4435
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4436
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4437
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4438
+ { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4439
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4440
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4441
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4442
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4443
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4444
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4445
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4446
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4447
+ { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4448
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4449
+ { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4450
+ { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4451
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4452
+ { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4453
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4454
+ { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4455
+ { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4456
+ { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4457
+ { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4458
+ { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4459
+};
4460
+
4461
+static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4462
+{
4463
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4464
+ int i, r;
4465
+
4466
+ /* only support when RAS is enabled */
4467
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4468
+ return 0;
4469
+
4470
+ r = amdgpu_ring_alloc(ring, 7);
4471
+ if (r) {
4472
+ DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4473
+ ring->name, r);
4474
+ return r;
4475
+ }
4476
+
4477
+ WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4478
+ WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4479
+
4480
+ amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4481
+ amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4482
+ PACKET3_DMA_DATA_DST_SEL(1) |
4483
+ PACKET3_DMA_DATA_SRC_SEL(2) |
4484
+ PACKET3_DMA_DATA_ENGINE(0)));
4485
+ amdgpu_ring_write(ring, 0);
4486
+ amdgpu_ring_write(ring, 0);
4487
+ amdgpu_ring_write(ring, 0);
4488
+ amdgpu_ring_write(ring, 0);
4489
+ amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4490
+ adev->gds.gds_size);
4491
+
4492
+ amdgpu_ring_commit(ring);
4493
+
4494
+ for (i = 0; i < adev->usec_timeout; i++) {
4495
+ if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4496
+ break;
4497
+ udelay(1);
4498
+ }
4499
+
4500
+ if (i >= adev->usec_timeout)
4501
+ r = -ETIMEDOUT;
4502
+
4503
+ WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4504
+
4505
+ return r;
4506
+}
4507
+
4508
+static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4509
+{
4510
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4511
+ struct amdgpu_ib ib;
4512
+ struct dma_fence *f = NULL;
4513
+ int r, i;
4514
+ unsigned total_size, vgpr_offset, sgpr_offset;
4515
+ u64 gpu_addr;
4516
+
4517
+ int compute_dim_x = adev->gfx.config.max_shader_engines *
4518
+ adev->gfx.config.max_cu_per_sh *
4519
+ adev->gfx.config.max_sh_per_se;
4520
+ int sgpr_work_group_size = 5;
4521
+ int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4522
+ int vgpr_init_shader_size;
4523
+ const u32 *vgpr_init_shader_ptr;
4524
+ const struct soc15_reg_entry *vgpr_init_regs_ptr;
4525
+
4526
+ /* only support when RAS is enabled */
4527
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4528
+ return 0;
4529
+
4530
+ /* bail if the compute ring is not ready */
4531
+ if (!ring->sched.ready)
4532
+ return 0;
4533
+
4534
+ if (adev->asic_type == CHIP_ARCTURUS) {
4535
+ vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4536
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4537
+ vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4538
+ } else {
4539
+ vgpr_init_shader_ptr = vgpr_init_compute_shader;
4540
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4541
+ vgpr_init_regs_ptr = vgpr_init_regs;
4542
+ }
4543
+
4544
+ total_size =
4545
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4546
+ total_size +=
4547
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4548
+ total_size +=
4549
+ (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4550
+ total_size = ALIGN(total_size, 256);
4551
+ vgpr_offset = total_size;
4552
+ total_size += ALIGN(vgpr_init_shader_size, 256);
4553
+ sgpr_offset = total_size;
4554
+ total_size += sizeof(sgpr_init_compute_shader);
4555
+
4556
+ /* allocate an indirect buffer to put the commands in */
4557
+ memset(&ib, 0, sizeof(ib));
4558
+ r = amdgpu_ib_get(adev, NULL, total_size,
4559
+ AMDGPU_IB_POOL_DIRECT, &ib);
4560
+ if (r) {
4561
+ DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4562
+ return r;
4563
+ }
4564
+
4565
+ /* load the compute shaders */
4566
+ for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4567
+ ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4568
+
4569
+ for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4570
+ ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4571
+
4572
+ /* init the ib length to 0 */
4573
+ ib.length_dw = 0;
4574
+
4575
+ /* VGPR */
4576
+ /* write the register state for the compute dispatch */
4577
+ for (i = 0; i < gpr_reg_size; i++) {
4578
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4579
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4580
+ - PACKET3_SET_SH_REG_START;
4581
+ ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4582
+ }
4583
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4584
+ gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4585
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4586
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4587
+ - PACKET3_SET_SH_REG_START;
4588
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4589
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4590
+
4591
+ /* write dispatch packet */
4592
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4593
+ ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4594
+ ib.ptr[ib.length_dw++] = 1; /* y */
4595
+ ib.ptr[ib.length_dw++] = 1; /* z */
4596
+ ib.ptr[ib.length_dw++] =
4597
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4598
+
4599
+ /* write CS partial flush packet */
4600
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4601
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4602
+
4603
+ /* SGPR1 */
4604
+ /* write the register state for the compute dispatch */
4605
+ for (i = 0; i < gpr_reg_size; i++) {
4606
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4607
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4608
+ - PACKET3_SET_SH_REG_START;
4609
+ ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4610
+ }
4611
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4612
+ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4613
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4614
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4615
+ - PACKET3_SET_SH_REG_START;
4616
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4617
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4618
+
4619
+ /* write dispatch packet */
4620
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4621
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4622
+ ib.ptr[ib.length_dw++] = 1; /* y */
4623
+ ib.ptr[ib.length_dw++] = 1; /* z */
4624
+ ib.ptr[ib.length_dw++] =
4625
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4626
+
4627
+ /* write CS partial flush packet */
4628
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4629
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4630
+
4631
+ /* SGPR2 */
4632
+ /* write the register state for the compute dispatch */
4633
+ for (i = 0; i < gpr_reg_size; i++) {
4634
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4635
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4636
+ - PACKET3_SET_SH_REG_START;
4637
+ ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4638
+ }
4639
+ /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4640
+ gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4641
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4642
+ ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4643
+ - PACKET3_SET_SH_REG_START;
4644
+ ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4645
+ ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4646
+
4647
+ /* write dispatch packet */
4648
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4649
+ ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4650
+ ib.ptr[ib.length_dw++] = 1; /* y */
4651
+ ib.ptr[ib.length_dw++] = 1; /* z */
4652
+ ib.ptr[ib.length_dw++] =
4653
+ REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4654
+
4655
+ /* write CS partial flush packet */
4656
+ ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4657
+ ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4658
+
4659
+ /* shedule the ib on the ring */
4660
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4661
+ if (r) {
4662
+ DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4663
+ goto fail;
4664
+ }
4665
+
4666
+ /* wait for the GPU to finish processing the IB */
4667
+ r = dma_fence_wait(f, false);
4668
+ if (r) {
4669
+ DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4670
+ goto fail;
4671
+ }
4672
+
4673
+fail:
4674
+ amdgpu_ib_free(adev, &ib, NULL);
4675
+ dma_fence_put(f);
4676
+
4677
+ return r;
4678
+}
4679
+
34254680 static int gfx_v9_0_early_init(void *handle)
34264681 {
34274682 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
34284683
3429
- adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3430
- adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4684
+ if (adev->asic_type == CHIP_ARCTURUS)
4685
+ adev->gfx.num_gfx_rings = 0;
4686
+ else
4687
+ adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4688
+ adev->gfx.num_compute_rings = amdgpu_num_kcq;
4689
+ gfx_v9_0_set_kiq_pm4_funcs(adev);
34314690 gfx_v9_0_set_ring_funcs(adev);
34324691 gfx_v9_0_set_irq_funcs(adev);
34334692 gfx_v9_0_set_gds_init(adev);
34344693 gfx_v9_0_set_rlc_funcs(adev);
4694
+
4695
+ return 0;
4696
+}
4697
+
4698
+static int gfx_v9_0_ecc_late_init(void *handle)
4699
+{
4700
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4701
+ int r;
4702
+
4703
+ /*
4704
+ * Temp workaround to fix the issue that CP firmware fails to
4705
+ * update read pointer when CPDMA is writing clearing operation
4706
+ * to GDS in suspend/resume sequence on several cards. So just
4707
+ * limit this operation in cold boot sequence.
4708
+ */
4709
+ if (!adev->in_suspend) {
4710
+ r = gfx_v9_0_do_edc_gds_workarounds(adev);
4711
+ if (r)
4712
+ return r;
4713
+ }
4714
+
4715
+ /* requires IBs so do in late init after IB pool is initialized */
4716
+ r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4717
+ if (r)
4718
+ return r;
4719
+
4720
+ if (adev->gfx.funcs &&
4721
+ adev->gfx.funcs->reset_ras_error_count)
4722
+ adev->gfx.funcs->reset_ras_error_count(adev);
4723
+
4724
+ r = amdgpu_gfx_ras_late_init(adev);
4725
+ if (r)
4726
+ return r;
34354727
34364728 return 0;
34374729 }
....@@ -3449,67 +4741,54 @@
34494741 if (r)
34504742 return r;
34514743
4744
+ r = gfx_v9_0_ecc_late_init(handle);
4745
+ if (r)
4746
+ return r;
4747
+
34524748 return 0;
34534749 }
34544750
3455
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
4751
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
34564752 {
3457
- uint32_t rlc_setting, data;
3458
- unsigned i;
3459
-
3460
- if (adev->gfx.rlc.in_safe_mode)
3461
- return;
4753
+ uint32_t rlc_setting;
34624754
34634755 /* if RLC is not enabled, do nothing */
34644756 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
34654757 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3466
- return;
4758
+ return false;
34674759
3468
- if (adev->cg_flags &
3469
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3470
- AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3471
- data = RLC_SAFE_MODE__CMD_MASK;
3472
- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3473
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4760
+ return true;
4761
+}
34744762
3475
- /* wait for RLC_SAFE_MODE */
3476
- for (i = 0; i < adev->usec_timeout; i++) {
3477
- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3478
- break;
3479
- udelay(1);
3480
- }
3481
- adev->gfx.rlc.in_safe_mode = true;
4763
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4764
+{
4765
+ uint32_t data;
4766
+ unsigned i;
4767
+
4768
+ data = RLC_SAFE_MODE__CMD_MASK;
4769
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4770
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4771
+
4772
+ /* wait for RLC_SAFE_MODE */
4773
+ for (i = 0; i < adev->usec_timeout; i++) {
4774
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4775
+ break;
4776
+ udelay(1);
34824777 }
34834778 }
34844779
3485
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
4780
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
34864781 {
3487
- uint32_t rlc_setting, data;
4782
+ uint32_t data;
34884783
3489
- if (!adev->gfx.rlc.in_safe_mode)
3490
- return;
3491
-
3492
- /* if RLC is not enabled, do nothing */
3493
- rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3494
- if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3495
- return;
3496
-
3497
- if (adev->cg_flags &
3498
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3499
- /*
3500
- * Try to exit safe mode only if it is already in safe
3501
- * mode.
3502
- */
3503
- data = RLC_SAFE_MODE__CMD_MASK;
3504
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3505
- adev->gfx.rlc.in_safe_mode = false;
3506
- }
4784
+ data = RLC_SAFE_MODE__CMD_MASK;
4785
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
35074786 }
35084787
35094788 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
35104789 bool enable)
35114790 {
3512
- gfx_v9_0_enter_rlc_safe_mode(adev);
4791
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
35134792
35144793 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
35154794 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
....@@ -3517,10 +4796,11 @@
35174796 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
35184797 } else {
35194798 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3520
- gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4799
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4800
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
35214801 }
35224802
3523
- gfx_v9_0_exit_rlc_safe_mode(adev);
4803
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
35244804 }
35254805
35264806 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
....@@ -3546,6 +4826,8 @@
35464826 bool enable)
35474827 {
35484828 uint32_t data, def;
4829
+
4830
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
35494831
35504832 /* It is disabled by HW by default */
35514833 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
....@@ -3611,6 +4893,8 @@
36114893 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
36124894 }
36134895 }
4896
+
4897
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
36144898 }
36154899
36164900 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
....@@ -3618,10 +4902,13 @@
36184902 {
36194903 uint32_t data, def;
36204904
3621
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
4905
+ if (adev->asic_type == CHIP_ARCTURUS)
4906
+ return;
4907
+
4908
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
36224909
36234910 /* Enable 3D CGCG/CGLS */
3624
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4911
+ if (enable) {
36254912 /* write cmd to clear cgcg/cgls ov */
36264913 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
36274914 /* unset CGCG override */
....@@ -3633,8 +4920,12 @@
36334920 /* enable 3Dcgcg FSM(0x0000363f) */
36344921 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
36354922
3636
- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3637
- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4923
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4924
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4925
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4926
+ else
4927
+ data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
4928
+
36384929 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
36394930 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
36404931 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
....@@ -3658,7 +4949,7 @@
36584949 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
36594950 }
36604951
3661
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
4952
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
36624953 }
36634954
36644955 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
....@@ -3666,7 +4957,7 @@
36664957 {
36674958 uint32_t def, data;
36684959
3669
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
4960
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
36704961
36714962 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
36724963 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
....@@ -3683,8 +4974,12 @@
36834974 /* enable cgcg FSM(0x0000363F) */
36844975 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
36854976
3686
- data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3687
- RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4977
+ if (adev->asic_type == CHIP_ARCTURUS)
4978
+ data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4979
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4980
+ else
4981
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4982
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
36884983 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
36894984 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
36904985 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
....@@ -3706,7 +5001,7 @@
37065001 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
37075002 }
37085003
3709
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
5004
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
37105005 }
37115006
37125007 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
....@@ -3734,19 +5029,83 @@
37345029 return 0;
37355030 }
37365031
5032
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5033
+{
5034
+ u32 reg, data;
5035
+
5036
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
5037
+ if (amdgpu_sriov_is_pp_one_vf(adev))
5038
+ data = RREG32_NO_KIQ(reg);
5039
+ else
5040
+ data = RREG32(reg);
5041
+
5042
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5043
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5044
+
5045
+ if (amdgpu_sriov_is_pp_one_vf(adev))
5046
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5047
+ else
5048
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5049
+}
5050
+
5051
+static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5052
+ uint32_t offset,
5053
+ struct soc15_reg_rlcg *entries, int arr_size)
5054
+{
5055
+ int i;
5056
+ uint32_t reg;
5057
+
5058
+ if (!entries)
5059
+ return false;
5060
+
5061
+ for (i = 0; i < arr_size; i++) {
5062
+ const struct soc15_reg_rlcg *entry;
5063
+
5064
+ entry = &entries[i];
5065
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5066
+ if (offset == reg)
5067
+ return true;
5068
+ }
5069
+
5070
+ return false;
5071
+}
5072
+
5073
+static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5074
+{
5075
+ return gfx_v9_0_check_rlcg_range(adev, offset,
5076
+ (void *)rlcg_access_gc_9_0,
5077
+ ARRAY_SIZE(rlcg_access_gc_9_0));
5078
+}
5079
+
37375080 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3738
- .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3739
- .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
5081
+ .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5082
+ .set_safe_mode = gfx_v9_0_set_safe_mode,
5083
+ .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5084
+ .init = gfx_v9_0_rlc_init,
5085
+ .get_csb_size = gfx_v9_0_get_csb_size,
5086
+ .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5087
+ .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5088
+ .resume = gfx_v9_0_rlc_resume,
5089
+ .stop = gfx_v9_0_rlc_stop,
5090
+ .reset = gfx_v9_0_rlc_reset,
5091
+ .start = gfx_v9_0_rlc_start,
5092
+ .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5093
+ .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5094
+ .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
37405095 };
37415096
37425097 static int gfx_v9_0_set_powergating_state(void *handle,
37435098 enum amd_powergating_state state)
37445099 {
37455100 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3746
- bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
5101
+ bool enable = (state == AMD_PG_STATE_GATE);
37475102
37485103 switch (adev->asic_type) {
37495104 case CHIP_RAVEN:
5105
+ case CHIP_RENOIR:
5106
+ if (!enable)
5107
+ amdgpu_gfx_off_ctrl(adev, false);
5108
+
37505109 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
37515110 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
37525111 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
....@@ -3766,14 +5125,11 @@
37665125 /* update mgcg state */
37675126 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
37685127
3769
- /* set gfx off through smu */
3770
- if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
3771
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
5128
+ if (enable)
5129
+ amdgpu_gfx_off_ctrl(adev, true);
37725130 break;
37735131 case CHIP_VEGA12:
3774
- /* set gfx off through smu */
3775
- if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
3776
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
5132
+ amdgpu_gfx_off_ctrl(adev, enable);
37775133 break;
37785134 default:
37795135 break;
....@@ -3795,8 +5151,10 @@
37955151 case CHIP_VEGA12:
37965152 case CHIP_VEGA20:
37975153 case CHIP_RAVEN:
5154
+ case CHIP_ARCTURUS:
5155
+ case CHIP_RENOIR:
37985156 gfx_v9_0_update_gfx_clock_gating(adev,
3799
- state == AMD_CG_STATE_GATE ? true : false);
5157
+ state == AMD_CG_STATE_GATE);
38005158 break;
38015159 default:
38025160 break;
....@@ -3813,12 +5171,12 @@
38135171 *flags = 0;
38145172
38155173 /* AMD_CG_SUPPORT_GFX_MGCG */
3816
- data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
5174
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
38175175 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
38185176 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
38195177
38205178 /* AMD_CG_SUPPORT_GFX_CGCG */
3821
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
5179
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
38225180 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
38235181 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
38245182
....@@ -3827,23 +5185,25 @@
38275185 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
38285186
38295187 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3830
- data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
5188
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
38315189 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
38325190 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
38335191
38345192 /* AMD_CG_SUPPORT_GFX_CP_LS */
3835
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
5193
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
38365194 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
38375195 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
38385196
3839
- /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3840
- data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3841
- if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3842
- *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5197
+ if (adev->asic_type != CHIP_ARCTURUS) {
5198
+ /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5199
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5200
+ if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5201
+ *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
38435202
3844
- /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3845
- if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3846
- *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5203
+ /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5204
+ if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5205
+ *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5206
+ }
38475207 }
38485208
38495209 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
....@@ -3885,7 +5245,7 @@
38855245 {
38865246 struct amdgpu_device *adev = ring->adev;
38875247 u32 ref_and_mask, reg_mem_engine;
3888
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
5248
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
38895249
38905250 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
38915251 switch (ring->me) {
....@@ -3905,15 +5265,17 @@
39055265 }
39065266
39075267 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3908
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
3909
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
5268
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5269
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
39105270 ref_and_mask, ref_and_mask, 0x20);
39115271 }
39125272
39135273 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3914
- struct amdgpu_ib *ib,
3915
- unsigned vmid, bool ctx_switch)
5274
+ struct amdgpu_job *job,
5275
+ struct amdgpu_ib *ib,
5276
+ uint32_t flags)
39165277 {
5278
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
39175279 u32 header, control = 0;
39185280
39195281 if (ib->flags & AMDGPU_IB_FLAG_CE)
....@@ -3926,7 +5288,7 @@
39265288 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
39275289 control |= INDIRECT_BUFFER_PRE_ENB(1);
39285290
3929
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
5291
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
39305292 gfx_v9_0_ring_emit_de_meta(ring);
39315293 }
39325294
....@@ -3942,20 +5304,38 @@
39425304 }
39435305
39445306 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3945
- struct amdgpu_ib *ib,
3946
- unsigned vmid, bool ctx_switch)
5307
+ struct amdgpu_job *job,
5308
+ struct amdgpu_ib *ib,
5309
+ uint32_t flags)
39475310 {
3948
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5311
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5312
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
39495313
3950
- amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5314
+ /* Currently, there is a high possibility to get wave ID mismatch
5315
+ * between ME and GDS, leading to a hw deadlock, because ME generates
5316
+ * different wave IDs than the GDS expects. This situation happens
5317
+ * randomly when at least 5 compute pipes use GDS ordered append.
5318
+ * The wave IDs generated by ME are also wrong after suspend/resume.
5319
+ * Those are probably bugs somewhere else in the kernel driver.
5320
+ *
5321
+ * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5322
+ * GDS to 0 for this ring (me/pipe).
5323
+ */
5324
+ if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5325
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5326
+ amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5327
+ amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5328
+ }
5329
+
5330
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
39515331 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3952
- amdgpu_ring_write(ring,
5332
+ amdgpu_ring_write(ring,
39535333 #ifdef __BIG_ENDIAN
3954
- (2 << 0) |
5334
+ (2 << 0) |
39555335 #endif
3956
- lower_32_bits(ib->gpu_addr));
3957
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3958
- amdgpu_ring_write(ring, control);
5336
+ lower_32_bits(ib->gpu_addr));
5337
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5338
+ amdgpu_ring_write(ring, control);
39595339 }
39605340
39615341 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
....@@ -4031,105 +5411,6 @@
40315411 else
40325412 BUG();
40335413 return wptr;
4034
-}
4035
-
4036
-static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4037
- bool acquire)
4038
-{
4039
- struct amdgpu_device *adev = ring->adev;
4040
- int pipe_num, tmp, reg;
4041
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4042
-
4043
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4044
-
4045
- /* first me only has 2 entries, GFX and HP3D */
4046
- if (ring->me > 0)
4047
- pipe_num -= 2;
4048
-
4049
- reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4050
- tmp = RREG32(reg);
4051
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4052
- WREG32(reg, tmp);
4053
-}
4054
-
4055
-static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4056
- struct amdgpu_ring *ring,
4057
- bool acquire)
4058
-{
4059
- int i, pipe;
4060
- bool reserve;
4061
- struct amdgpu_ring *iring;
4062
-
4063
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
4064
- pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4065
- if (acquire)
4066
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4067
- else
4068
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4069
-
4070
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4071
- /* Clear all reservations - everyone reacquires all resources */
4072
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4073
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4074
- true);
4075
-
4076
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4077
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4078
- true);
4079
- } else {
4080
- /* Lower all pipes without a current reservation */
4081
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4082
- iring = &adev->gfx.gfx_ring[i];
4083
- pipe = amdgpu_gfx_queue_to_bit(adev,
4084
- iring->me,
4085
- iring->pipe,
4086
- 0);
4087
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4088
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4089
- }
4090
-
4091
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4092
- iring = &adev->gfx.compute_ring[i];
4093
- pipe = amdgpu_gfx_queue_to_bit(adev,
4094
- iring->me,
4095
- iring->pipe,
4096
- 0);
4097
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4098
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4099
- }
4100
- }
4101
-
4102
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4103
-}
4104
-
4105
-static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4106
- struct amdgpu_ring *ring,
4107
- bool acquire)
4108
-{
4109
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4110
- uint32_t queue_priority = acquire ? 0xf : 0x0;
4111
-
4112
- mutex_lock(&adev->srbm_mutex);
4113
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4114
-
4115
- WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4116
- WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4117
-
4118
- soc15_grbm_select(adev, 0, 0, 0, 0);
4119
- mutex_unlock(&adev->srbm_mutex);
4120
-}
4121
-
4122
-static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4123
- enum drm_sched_priority priority)
4124
-{
4125
- struct amdgpu_device *adev = ring->adev;
4126
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4127
-
4128
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4129
- return;
4130
-
4131
- gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4132
- gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
41335414 }
41345415
41355416 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
....@@ -4219,10 +5500,13 @@
42195500 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
42205501 }
42215502
4222
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
5503
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5504
+ bool secure)
42235505 {
5506
+ uint32_t v = secure ? FRAME_TMZ : 0;
5507
+
42245508 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4225
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
5509
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
42265510 }
42275511
42285512 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
....@@ -4231,8 +5515,6 @@
42315515
42325516 if (amdgpu_sriov_vf(ring->adev))
42335517 gfx_v9_0_ring_emit_ce_meta(ring);
4234
-
4235
- gfx_v9_0_ring_emit_tmz(ring, true);
42365518
42375519 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
42385520 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
....@@ -4284,7 +5566,8 @@
42845566 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
42855567 }
42865568
4287
-static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
5569
+static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5570
+ uint32_t reg_val_offs)
42885571 {
42895572 struct amdgpu_device *adev = ring->adev;
42905573
....@@ -4295,9 +5578,9 @@
42955578 amdgpu_ring_write(ring, reg);
42965579 amdgpu_ring_write(ring, 0);
42975580 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4298
- adev->virt.reg_val_offs * 4));
5581
+ reg_val_offs * 4));
42995582 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4300
- adev->virt.reg_val_offs * 4));
5583
+ reg_val_offs * 4));
43015584 }
43025585
43035586 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
....@@ -4334,13 +5617,28 @@
43345617 uint32_t ref, uint32_t mask)
43355618 {
43365619 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5620
+ struct amdgpu_device *adev = ring->adev;
5621
+ bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5622
+ adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
43375623
4338
- if (amdgpu_sriov_vf(ring->adev))
5624
+ if (fw_version_ok)
43395625 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
43405626 ref, mask, 0x20);
43415627 else
43425628 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
43435629 ref, mask);
5630
+}
5631
+
5632
+static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5633
+{
5634
+ struct amdgpu_device *adev = ring->adev;
5635
+ uint32_t value = 0;
5636
+
5637
+ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5638
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5639
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5640
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5641
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
43445642 }
43455643
43465644 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
....@@ -4448,13 +5746,52 @@
44485746 return 0;
44495747 }
44505748
5749
+#define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
5750
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5751
+ CP_ECC_ERROR_INT_ENABLE, 1)
5752
+
5753
+#define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
5754
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5755
+ CP_ECC_ERROR_INT_ENABLE, 0)
5756
+
5757
+static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5758
+ struct amdgpu_irq_src *source,
5759
+ unsigned type,
5760
+ enum amdgpu_interrupt_state state)
5761
+{
5762
+ switch (state) {
5763
+ case AMDGPU_IRQ_STATE_DISABLE:
5764
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5765
+ CP_ECC_ERROR_INT_ENABLE, 0);
5766
+ DISABLE_ECC_ON_ME_PIPE(1, 0);
5767
+ DISABLE_ECC_ON_ME_PIPE(1, 1);
5768
+ DISABLE_ECC_ON_ME_PIPE(1, 2);
5769
+ DISABLE_ECC_ON_ME_PIPE(1, 3);
5770
+ break;
5771
+
5772
+ case AMDGPU_IRQ_STATE_ENABLE:
5773
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5774
+ CP_ECC_ERROR_INT_ENABLE, 1);
5775
+ ENABLE_ECC_ON_ME_PIPE(1, 0);
5776
+ ENABLE_ECC_ON_ME_PIPE(1, 1);
5777
+ ENABLE_ECC_ON_ME_PIPE(1, 2);
5778
+ ENABLE_ECC_ON_ME_PIPE(1, 3);
5779
+ break;
5780
+ default:
5781
+ break;
5782
+ }
5783
+
5784
+ return 0;
5785
+}
5786
+
5787
+
44515788 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
44525789 struct amdgpu_irq_src *src,
44535790 unsigned type,
44545791 enum amdgpu_interrupt_state state)
44555792 {
44565793 switch (type) {
4457
- case AMDGPU_CP_IRQ_GFX_EOP:
5794
+ case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
44585795 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
44595796 break;
44605797 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
....@@ -4519,12 +5856,39 @@
45195856 return 0;
45205857 }
45215858
5859
+static void gfx_v9_0_fault(struct amdgpu_device *adev,
5860
+ struct amdgpu_iv_entry *entry)
5861
+{
5862
+ u8 me_id, pipe_id, queue_id;
5863
+ struct amdgpu_ring *ring;
5864
+ int i;
5865
+
5866
+ me_id = (entry->ring_id & 0x0c) >> 2;
5867
+ pipe_id = (entry->ring_id & 0x03) >> 0;
5868
+ queue_id = (entry->ring_id & 0x70) >> 4;
5869
+
5870
+ switch (me_id) {
5871
+ case 0:
5872
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5873
+ break;
5874
+ case 1:
5875
+ case 2:
5876
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5877
+ ring = &adev->gfx.compute_ring[i];
5878
+ if (ring->me == me_id && ring->pipe == pipe_id &&
5879
+ ring->queue == queue_id)
5880
+ drm_sched_fault(&ring->sched);
5881
+ }
5882
+ break;
5883
+ }
5884
+}
5885
+
45225886 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
45235887 struct amdgpu_irq_src *source,
45245888 struct amdgpu_iv_entry *entry)
45255889 {
45265890 DRM_ERROR("Illegal register access in command stream\n");
4527
- schedule_work(&adev->reset_work);
5891
+ gfx_v9_0_fault(adev, entry);
45285892 return 0;
45295893 }
45305894
....@@ -4533,70 +5897,822 @@
45335897 struct amdgpu_iv_entry *entry)
45345898 {
45355899 DRM_ERROR("Illegal instruction in command stream\n");
4536
- schedule_work(&adev->reset_work);
5900
+ gfx_v9_0_fault(adev, entry);
45375901 return 0;
45385902 }
45395903
4540
-static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4541
- struct amdgpu_irq_src *src,
4542
- unsigned int type,
4543
- enum amdgpu_interrupt_state state)
4544
-{
4545
- uint32_t tmp, target;
4546
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
45475904
4548
- if (ring->me == 1)
4549
- target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4550
- else
4551
- target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4552
- target += ring->pipe;
4553
-
4554
- switch (type) {
4555
- case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4556
- if (state == AMDGPU_IRQ_STATE_DISABLE) {
4557
- tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4558
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4559
- GENERIC2_INT_ENABLE, 0);
4560
- WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4561
-
4562
- tmp = RREG32(target);
4563
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4564
- GENERIC2_INT_ENABLE, 0);
4565
- WREG32(target, tmp);
4566
- } else {
4567
- tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4568
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4569
- GENERIC2_INT_ENABLE, 1);
4570
- WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4571
-
4572
- tmp = RREG32(target);
4573
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4574
- GENERIC2_INT_ENABLE, 1);
4575
- WREG32(target, tmp);
4576
- }
4577
- break;
4578
- default:
4579
- BUG(); /* kiq only support GENERIC2_INT now */
4580
- break;
5905
+static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5906
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5907
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5908
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5909
+ },
5910
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5911
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5912
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5913
+ },
5914
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5915
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5916
+ 0, 0
5917
+ },
5918
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5919
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5920
+ 0, 0
5921
+ },
5922
+ { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5923
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5924
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5925
+ },
5926
+ { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5927
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5928
+ 0, 0
5929
+ },
5930
+ { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5931
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5932
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5933
+ },
5934
+ { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5935
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5936
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5937
+ },
5938
+ { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5939
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5940
+ 0, 0
5941
+ },
5942
+ { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5943
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5944
+ 0, 0
5945
+ },
5946
+ { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5947
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5948
+ 0, 0
5949
+ },
5950
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5951
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5952
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5953
+ },
5954
+ { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5955
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5956
+ 0, 0
5957
+ },
5958
+ { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5959
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5960
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5961
+ },
5962
+ { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5963
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5964
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5965
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5966
+ },
5967
+ { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5968
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5969
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5970
+ 0, 0
5971
+ },
5972
+ { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5973
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5974
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5975
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
5976
+ },
5977
+ { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5978
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5979
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5980
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
5981
+ },
5982
+ { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5983
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5984
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5985
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
5986
+ },
5987
+ { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5988
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5989
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5990
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
5991
+ },
5992
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
5993
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
5994
+ 0, 0
5995
+ },
5996
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5997
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5998
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
5999
+ },
6000
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6001
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
6002
+ 0, 0
6003
+ },
6004
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6005
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
6006
+ 0, 0
6007
+ },
6008
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6009
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
6010
+ 0, 0
6011
+ },
6012
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
6013
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
6014
+ 0, 0
6015
+ },
6016
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6017
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
6018
+ 0, 0
6019
+ },
6020
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
6021
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
6022
+ 0, 0
6023
+ },
6024
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6025
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
6026
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
6027
+ },
6028
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6029
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
6030
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
6031
+ },
6032
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6033
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
6034
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
6035
+ },
6036
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6037
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
6038
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
6039
+ },
6040
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6041
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
6042
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
6043
+ },
6044
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6045
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6046
+ 0, 0
6047
+ },
6048
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6049
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6050
+ 0, 0
6051
+ },
6052
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6053
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6054
+ 0, 0
6055
+ },
6056
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6057
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6058
+ 0, 0
6059
+ },
6060
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6061
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6062
+ 0, 0
6063
+ },
6064
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6065
+ SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6066
+ 0, 0
6067
+ },
6068
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6069
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6070
+ 0, 0
6071
+ },
6072
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6073
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6074
+ 0, 0
6075
+ },
6076
+ { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6077
+ SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6078
+ 0, 0
6079
+ },
6080
+ { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6081
+ SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6082
+ 0, 0
6083
+ },
6084
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6085
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6086
+ 0, 0
6087
+ },
6088
+ { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6089
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6090
+ 0, 0
6091
+ },
6092
+ { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6093
+ SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6094
+ 0, 0
6095
+ },
6096
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6097
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6098
+ 0, 0
6099
+ },
6100
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6101
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6102
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6103
+ },
6104
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6105
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6106
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6107
+ },
6108
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6109
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6110
+ 0, 0
6111
+ },
6112
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6113
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6114
+ 0, 0
6115
+ },
6116
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6117
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6118
+ 0, 0
6119
+ },
6120
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6121
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6122
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6123
+ },
6124
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6125
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6126
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6127
+ },
6128
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6129
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6130
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6131
+ },
6132
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6133
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6134
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6135
+ },
6136
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6137
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6138
+ 0, 0
6139
+ },
6140
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6141
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6142
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6143
+ },
6144
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6145
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6146
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6147
+ },
6148
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6149
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6150
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6151
+ },
6152
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6153
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6154
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6155
+ },
6156
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6157
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6158
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6159
+ },
6160
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6161
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6162
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6163
+ },
6164
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6165
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6166
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6167
+ },
6168
+ { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6169
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6170
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6171
+ },
6172
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6173
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6174
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6175
+ },
6176
+ { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6177
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6178
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6179
+ },
6180
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6181
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6182
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6183
+ },
6184
+ { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6185
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6186
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6187
+ },
6188
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6189
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6190
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6191
+ },
6192
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6193
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6194
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6195
+ },
6196
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6197
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6198
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6199
+ },
6200
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6201
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6202
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6203
+ },
6204
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6205
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6206
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6207
+ },
6208
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6209
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6210
+ 0, 0
6211
+ },
6212
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6213
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6214
+ 0, 0
6215
+ },
6216
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6217
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6218
+ 0, 0
6219
+ },
6220
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6221
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6222
+ 0, 0
6223
+ },
6224
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6225
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6226
+ 0, 0
6227
+ },
6228
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6229
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6230
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6231
+ },
6232
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6233
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6234
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6235
+ },
6236
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6237
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6238
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6239
+ },
6240
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6241
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6242
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6243
+ },
6244
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6245
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6246
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6247
+ },
6248
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6249
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6250
+ 0, 0
6251
+ },
6252
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6253
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6254
+ 0, 0
6255
+ },
6256
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6257
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6258
+ 0, 0
6259
+ },
6260
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6261
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6262
+ 0, 0
6263
+ },
6264
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6265
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6266
+ 0, 0
6267
+ },
6268
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6269
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6270
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6271
+ },
6272
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6273
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6274
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6275
+ },
6276
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6277
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6278
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6279
+ },
6280
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6281
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6282
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6283
+ },
6284
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6285
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6286
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6287
+ },
6288
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6289
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6290
+ 0, 0
6291
+ },
6292
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6293
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6294
+ 0, 0
6295
+ },
6296
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6297
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6298
+ 0, 0
6299
+ },
6300
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6301
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6302
+ 0, 0
6303
+ },
6304
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6305
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6306
+ 0, 0
6307
+ },
6308
+ { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6309
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6310
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6311
+ },
6312
+ { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6313
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6314
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6315
+ },
6316
+ { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6317
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6318
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6319
+ },
6320
+ { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6321
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6322
+ 0, 0
6323
+ },
6324
+ { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6325
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6326
+ 0, 0
6327
+ },
6328
+ { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6329
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6330
+ 0, 0
6331
+ },
6332
+ { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6333
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6334
+ 0, 0
6335
+ },
6336
+ { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6337
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6338
+ 0, 0
6339
+ },
6340
+ { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6341
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6342
+ 0, 0
45816343 }
6344
+};
6345
+
6346
+static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6347
+ void *inject_if)
6348
+{
6349
+ struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6350
+ int ret;
6351
+ struct ta_ras_trigger_error_input block_info = { 0 };
6352
+
6353
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6354
+ return -EINVAL;
6355
+
6356
+ if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6357
+ return -EINVAL;
6358
+
6359
+ if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6360
+ return -EPERM;
6361
+
6362
+ if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6363
+ info->head.type)) {
6364
+ DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6365
+ ras_gfx_subblocks[info->head.sub_block_index].name,
6366
+ info->head.type);
6367
+ return -EPERM;
6368
+ }
6369
+
6370
+ if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6371
+ info->head.type)) {
6372
+ DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6373
+ ras_gfx_subblocks[info->head.sub_block_index].name,
6374
+ info->head.type);
6375
+ return -EPERM;
6376
+ }
6377
+
6378
+ block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6379
+ block_info.sub_block_index =
6380
+ ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6381
+ block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6382
+ block_info.address = info->address;
6383
+ block_info.value = info->value;
6384
+
6385
+ mutex_lock(&adev->grbm_idx_mutex);
6386
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
6387
+ mutex_unlock(&adev->grbm_idx_mutex);
6388
+
6389
+ return ret;
6390
+}
6391
+
6392
+static const char *vml2_mems[] = {
6393
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6394
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6395
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6396
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6397
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6398
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6399
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6400
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6401
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6402
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6403
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6404
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6405
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6406
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6407
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6408
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6409
+};
6410
+
6411
+static const char *vml2_walker_mems[] = {
6412
+ "UTC_VML2_CACHE_PDE0_MEM0",
6413
+ "UTC_VML2_CACHE_PDE0_MEM1",
6414
+ "UTC_VML2_CACHE_PDE1_MEM0",
6415
+ "UTC_VML2_CACHE_PDE1_MEM1",
6416
+ "UTC_VML2_CACHE_PDE2_MEM0",
6417
+ "UTC_VML2_CACHE_PDE2_MEM1",
6418
+ "UTC_VML2_RDIF_LOG_FIFO",
6419
+};
6420
+
6421
+static const char *atc_l2_cache_2m_mems[] = {
6422
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6423
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6424
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6425
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6426
+};
6427
+
6428
+static const char *atc_l2_cache_4k_mems[] = {
6429
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6430
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6431
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6432
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6433
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6434
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6435
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6436
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6437
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6438
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6439
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6440
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6441
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6442
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6443
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6444
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6445
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6446
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6447
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6448
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6449
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6450
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6451
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6452
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6453
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6454
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6455
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6456
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6457
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6458
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6459
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6460
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6461
+};
6462
+
6463
+static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6464
+ struct ras_err_data *err_data)
6465
+{
6466
+ uint32_t i, data;
6467
+ uint32_t sec_count, ded_count;
6468
+
6469
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6470
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6471
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6472
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6473
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6474
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6475
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6476
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6477
+
6478
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6479
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6480
+ data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6481
+
6482
+ sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6483
+ if (sec_count) {
6484
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6485
+ "SEC %d\n", i, vml2_mems[i], sec_count);
6486
+ err_data->ce_count += sec_count;
6487
+ }
6488
+
6489
+ ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6490
+ if (ded_count) {
6491
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6492
+ "DED %d\n", i, vml2_mems[i], ded_count);
6493
+ err_data->ue_count += ded_count;
6494
+ }
6495
+ }
6496
+
6497
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6498
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6499
+ data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6500
+
6501
+ sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6502
+ SEC_COUNT);
6503
+ if (sec_count) {
6504
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6505
+ "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6506
+ err_data->ce_count += sec_count;
6507
+ }
6508
+
6509
+ ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6510
+ DED_COUNT);
6511
+ if (ded_count) {
6512
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6513
+ "DED %d\n", i, vml2_walker_mems[i], ded_count);
6514
+ err_data->ue_count += ded_count;
6515
+ }
6516
+ }
6517
+
6518
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6519
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6520
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6521
+
6522
+ sec_count = (data & 0x00006000L) >> 0xd;
6523
+ if (sec_count) {
6524
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6525
+ "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6526
+ sec_count);
6527
+ err_data->ce_count += sec_count;
6528
+ }
6529
+ }
6530
+
6531
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6532
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6533
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6534
+
6535
+ sec_count = (data & 0x00006000L) >> 0xd;
6536
+ if (sec_count) {
6537
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6538
+ "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6539
+ sec_count);
6540
+ err_data->ce_count += sec_count;
6541
+ }
6542
+
6543
+ ded_count = (data & 0x00018000L) >> 0xf;
6544
+ if (ded_count) {
6545
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6546
+ "DED %d\n", i, atc_l2_cache_4k_mems[i],
6547
+ ded_count);
6548
+ err_data->ue_count += ded_count;
6549
+ }
6550
+ }
6551
+
6552
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6553
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6554
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6555
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6556
+
45826557 return 0;
45836558 }
45846559
4585
-static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4586
- struct amdgpu_irq_src *source,
4587
- struct amdgpu_iv_entry *entry)
6560
+static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6561
+ const struct soc15_reg_entry *reg,
6562
+ uint32_t se_id, uint32_t inst_id, uint32_t value,
6563
+ uint32_t *sec_count, uint32_t *ded_count)
45886564 {
4589
- u8 me_id, pipe_id, queue_id;
4590
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
6565
+ uint32_t i;
6566
+ uint32_t sec_cnt, ded_cnt;
45916567
4592
- me_id = (entry->ring_id & 0x0c) >> 2;
4593
- pipe_id = (entry->ring_id & 0x03) >> 0;
4594
- queue_id = (entry->ring_id & 0x70) >> 4;
4595
- DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4596
- me_id, pipe_id, queue_id);
6568
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6569
+ if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6570
+ gfx_v9_0_ras_fields[i].seg != reg->seg ||
6571
+ gfx_v9_0_ras_fields[i].inst != reg->inst)
6572
+ continue;
45976573
4598
- amdgpu_fence_process(ring);
6574
+ sec_cnt = (value &
6575
+ gfx_v9_0_ras_fields[i].sec_count_mask) >>
6576
+ gfx_v9_0_ras_fields[i].sec_count_shift;
6577
+ if (sec_cnt) {
6578
+ dev_info(adev->dev, "GFX SubBlock %s, "
6579
+ "Instance[%d][%d], SEC %d\n",
6580
+ gfx_v9_0_ras_fields[i].name,
6581
+ se_id, inst_id,
6582
+ sec_cnt);
6583
+ *sec_count += sec_cnt;
6584
+ }
6585
+
6586
+ ded_cnt = (value &
6587
+ gfx_v9_0_ras_fields[i].ded_count_mask) >>
6588
+ gfx_v9_0_ras_fields[i].ded_count_shift;
6589
+ if (ded_cnt) {
6590
+ dev_info(adev->dev, "GFX SubBlock %s, "
6591
+ "Instance[%d][%d], DED %d\n",
6592
+ gfx_v9_0_ras_fields[i].name,
6593
+ se_id, inst_id,
6594
+ ded_cnt);
6595
+ *ded_count += ded_cnt;
6596
+ }
6597
+ }
6598
+
45996599 return 0;
6600
+}
6601
+
6602
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6603
+{
6604
+ int i, j, k;
6605
+
6606
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6607
+ return;
6608
+
6609
+ /* read back registers to clear the counters */
6610
+ mutex_lock(&adev->grbm_idx_mutex);
6611
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6612
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6613
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6614
+ gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6615
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6616
+ }
6617
+ }
6618
+ }
6619
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6620
+ mutex_unlock(&adev->grbm_idx_mutex);
6621
+
6622
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6623
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6624
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6625
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6626
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6627
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6628
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6629
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6630
+
6631
+ for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6632
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6633
+ RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6634
+ }
6635
+
6636
+ for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6637
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6638
+ RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6639
+ }
6640
+
6641
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6642
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6643
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6644
+ }
6645
+
6646
+ for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6647
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6648
+ RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6649
+ }
6650
+
6651
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6652
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6653
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6654
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6655
+}
6656
+
6657
+static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6658
+ void *ras_error_status)
6659
+{
6660
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6661
+ uint32_t sec_count = 0, ded_count = 0;
6662
+ uint32_t i, j, k;
6663
+ uint32_t reg_value;
6664
+
6665
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6666
+ return -EINVAL;
6667
+
6668
+ err_data->ue_count = 0;
6669
+ err_data->ce_count = 0;
6670
+
6671
+ mutex_lock(&adev->grbm_idx_mutex);
6672
+
6673
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6674
+ for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6675
+ for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6676
+ gfx_v9_0_select_se_sh(adev, j, 0, k);
6677
+ reg_value =
6678
+ RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6679
+ if (reg_value)
6680
+ gfx_v9_0_ras_error_count(adev,
6681
+ &gfx_v9_0_edc_counter_regs[i],
6682
+ j, k, reg_value,
6683
+ &sec_count, &ded_count);
6684
+ }
6685
+ }
6686
+ }
6687
+
6688
+ err_data->ce_count += sec_count;
6689
+ err_data->ue_count += ded_count;
6690
+
6691
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6692
+ mutex_unlock(&adev->grbm_idx_mutex);
6693
+
6694
+ gfx_v9_0_query_utc_edc_status(adev, err_data);
6695
+
6696
+ return 0;
6697
+}
6698
+
6699
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6700
+{
6701
+ const unsigned int cp_coher_cntl =
6702
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6703
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6704
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6705
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6706
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6707
+
6708
+ /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
6709
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6710
+ amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
6711
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6712
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
6713
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6714
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6715
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
46006716 }
46016717
46026718 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
....@@ -4622,7 +6738,7 @@
46226738 .align_mask = 0xff,
46236739 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
46246740 .support_64bit_ptrs = true,
4625
- .vmhub = AMDGPU_GFXHUB,
6741
+ .vmhub = AMDGPU_GFXHUB_0,
46266742 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
46276743 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
46286744 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
....@@ -4645,7 +6761,8 @@
46456761 3 + /* CNTX_CTRL */
46466762 5 + /* HDP_INVL */
46476763 8 + 8 + /* FENCE x2 */
4648
- 2, /* SWITCH_BUFFER */
6764
+ 2 + /* SWITCH_BUFFER */
6765
+ 7, /* gfx_v9_0_emit_mem_sync */
46496766 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
46506767 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
46516768 .emit_fence = gfx_v9_0_ring_emit_fence,
....@@ -4661,10 +6778,12 @@
46616778 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
46626779 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
46636780 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4664
- .emit_tmz = gfx_v9_0_ring_emit_tmz,
6781
+ .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
46656782 .emit_wreg = gfx_v9_0_ring_emit_wreg,
46666783 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
46676784 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6785
+ .soft_recovery = gfx_v9_0_ring_soft_recovery,
6786
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
46686787 };
46696788
46706789 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
....@@ -4672,7 +6791,7 @@
46726791 .align_mask = 0xff,
46736792 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
46746793 .support_64bit_ptrs = true,
4675
- .vmhub = AMDGPU_GFXHUB,
6794
+ .vmhub = AMDGPU_GFXHUB_0,
46766795 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
46776796 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
46786797 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
....@@ -4684,8 +6803,9 @@
46846803 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
46856804 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
46866805 2 + /* gfx_v9_0_ring_emit_vm_flush */
4687
- 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4688
- .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
6806
+ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6807
+ 7, /* gfx_v9_0_emit_mem_sync */
6808
+ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
46896809 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
46906810 .emit_fence = gfx_v9_0_ring_emit_fence,
46916811 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
....@@ -4696,10 +6816,10 @@
46966816 .test_ib = gfx_v9_0_ring_test_ib,
46976817 .insert_nop = amdgpu_ring_insert_nop,
46986818 .pad_ib = amdgpu_ring_generic_pad_ib,
4699
- .set_priority = gfx_v9_0_ring_set_priority_compute,
47006819 .emit_wreg = gfx_v9_0_ring_emit_wreg,
47016820 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
47026821 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6822
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
47036823 };
47046824
47056825 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
....@@ -4707,7 +6827,7 @@
47076827 .align_mask = 0xff,
47086828 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
47096829 .support_64bit_ptrs = true,
4710
- .vmhub = AMDGPU_GFXHUB,
6830
+ .vmhub = AMDGPU_GFXHUB_0,
47116831 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
47126832 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
47136833 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
....@@ -4720,11 +6840,9 @@
47206840 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
47216841 2 + /* gfx_v9_0_ring_emit_vm_flush */
47226842 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4723
- .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4724
- .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6843
+ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
47256844 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
47266845 .test_ring = gfx_v9_0_ring_test_ring,
4727
- .test_ib = gfx_v9_0_ring_test_ib,
47286846 .insert_nop = amdgpu_ring_insert_nop,
47296847 .pad_ib = amdgpu_ring_generic_pad_ib,
47306848 .emit_rreg = gfx_v9_0_ring_emit_rreg,
....@@ -4746,11 +6864,6 @@
47466864 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
47476865 }
47486866
4749
-static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4750
- .set = gfx_v9_0_kiq_set_interrupt_state,
4751
- .process = gfx_v9_0_kiq_irq,
4752
-};
4753
-
47546867 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
47556868 .set = gfx_v9_0_set_eop_interrupt_state,
47566869 .process = gfx_v9_0_eop_irq,
....@@ -4766,6 +6879,12 @@
47666879 .process = gfx_v9_0_priv_inst_irq,
47676880 };
47686881
6882
+static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6883
+ .set = gfx_v9_0_set_cp_ecc_error_state,
6884
+ .process = amdgpu_gfx_cp_ecc_error_irq,
6885
+};
6886
+
6887
+
47696888 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
47706889 {
47716890 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
....@@ -4777,8 +6896,8 @@
47776896 adev->gfx.priv_inst_irq.num_types = 1;
47786897 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
47796898
4780
- adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4781
- adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
6899
+ adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6900
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
47826901 }
47836902
47846903 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
....@@ -4788,6 +6907,8 @@
47886907 case CHIP_VEGA12:
47896908 case CHIP_VEGA20:
47906909 case CHIP_RAVEN:
6910
+ case CHIP_ARCTURUS:
6911
+ case CHIP_RENOIR:
47916912 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
47926913 break;
47936914 default:
....@@ -4798,29 +6919,46 @@
47986919 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
47996920 {
48006921 /* init asci gds info */
4801
- adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4802
- adev->gds.gws.total_size = 64;
4803
- adev->gds.oa.total_size = 16;
4804
-
4805
- if (adev->gds.mem.total_size == 64 * 1024) {
4806
- adev->gds.mem.gfx_partition_size = 4096;
4807
- adev->gds.mem.cs_partition_size = 4096;
4808
-
4809
- adev->gds.gws.gfx_partition_size = 4;
4810
- adev->gds.gws.cs_partition_size = 4;
4811
-
4812
- adev->gds.oa.gfx_partition_size = 4;
4813
- adev->gds.oa.cs_partition_size = 1;
4814
- } else {
4815
- adev->gds.mem.gfx_partition_size = 1024;
4816
- adev->gds.mem.cs_partition_size = 1024;
4817
-
4818
- adev->gds.gws.gfx_partition_size = 16;
4819
- adev->gds.gws.cs_partition_size = 16;
4820
-
4821
- adev->gds.oa.gfx_partition_size = 4;
4822
- adev->gds.oa.cs_partition_size = 4;
6922
+ switch (adev->asic_type) {
6923
+ case CHIP_VEGA10:
6924
+ case CHIP_VEGA12:
6925
+ case CHIP_VEGA20:
6926
+ adev->gds.gds_size = 0x10000;
6927
+ break;
6928
+ case CHIP_RAVEN:
6929
+ case CHIP_ARCTURUS:
6930
+ adev->gds.gds_size = 0x1000;
6931
+ break;
6932
+ default:
6933
+ adev->gds.gds_size = 0x10000;
6934
+ break;
48236935 }
6936
+
6937
+ switch (adev->asic_type) {
6938
+ case CHIP_VEGA10:
6939
+ case CHIP_VEGA20:
6940
+ adev->gds.gds_compute_max_wave_id = 0x7ff;
6941
+ break;
6942
+ case CHIP_VEGA12:
6943
+ adev->gds.gds_compute_max_wave_id = 0x27f;
6944
+ break;
6945
+ case CHIP_RAVEN:
6946
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
6947
+ adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6948
+ else
6949
+ adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6950
+ break;
6951
+ case CHIP_ARCTURUS:
6952
+ adev->gds.gds_compute_max_wave_id = 0xfff;
6953
+ break;
6954
+ default:
6955
+ /* this really depends on the chip */
6956
+ adev->gds.gds_compute_max_wave_id = 0x7ff;
6957
+ break;
6958
+ }
6959
+
6960
+ adev->gds.gws_size = 64;
6961
+ adev->gds.oa_size = 16;
48246962 }
48256963
48266964 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
....@@ -4857,12 +6995,21 @@
48576995 {
48586996 int i, j, k, counter, active_cu_number = 0;
48596997 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4860
- unsigned disable_masks[4 * 2];
6998
+ unsigned disable_masks[4 * 4];
48616999
48627000 if (!adev || !cu_info)
48637001 return -EINVAL;
48647002
4865
- amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7003
+ /*
7004
+ * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
7005
+ */
7006
+ if (adev->gfx.config.max_shader_engines *
7007
+ adev->gfx.config.max_sh_per_se > 16)
7008
+ return -EINVAL;
7009
+
7010
+ amdgpu_gfx_parse_disable_cu(disable_masks,
7011
+ adev->gfx.config.max_shader_engines,
7012
+ adev->gfx.config.max_sh_per_se);
48667013
48677014 mutex_lock(&adev->grbm_idx_mutex);
48687015 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
....@@ -4871,11 +7018,23 @@
48717018 ao_bitmap = 0;
48727019 counter = 0;
48737020 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4874
- if (i < 4 && j < 2)
4875
- gfx_v9_0_set_user_cu_inactive_bitmap(
4876
- adev, disable_masks[i * 2 + j]);
7021
+ gfx_v9_0_set_user_cu_inactive_bitmap(
7022
+ adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
48777023 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4878
- cu_info->bitmap[i][j] = bitmap;
7024
+
7025
+ /*
7026
+ * The bitmap(and ao_cu_bitmap) in cu_info structure is
7027
+ * 4x4 size array, and it's usually suitable for Vega
7028
+ * ASICs which has 4*2 SE/SH layout.
7029
+ * But for Arcturus, SE/SH layout is changed to 8*1.
7030
+ * To mostly reduce the impact, we make it compatible
7031
+ * with current bitmap array as below:
7032
+ * SE4,SH0 --> bitmap[0][1]
7033
+ * SE5,SH0 --> bitmap[1][1]
7034
+ * SE6,SH0 --> bitmap[2][1]
7035
+ * SE7,SH0 --> bitmap[3][1]
7036
+ */
7037
+ cu_info->bitmap[i % 4][j + i / 4] = bitmap;
48797038
48807039 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
48817040 if (bitmap & mask) {
....@@ -4888,7 +7047,7 @@
48887047 active_cu_number += counter;
48897048 if (i < 2 && j < 2)
48907049 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4891
- cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7050
+ cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
48927051 }
48937052 }
48947053 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);