From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 31 Jan 2024 03:29:01 +0000
Subject: [PATCH] add lvds1024*800

---
 kernel/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm |  614 +++++++++++++++++++------------------------------------
 1 files changed, 213 insertions(+), 401 deletions(-)

diff --git a/kernel/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/kernel/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 0bb9c57..75f29d1 100644
--- a/kernel/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/kernel/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -24,76 +24,9 @@
  * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
  */
 
-/* HW (GFX9) source code for CWSR trap handler */
-/* Version 18 + multiple trap handler */
-
-// this performance-optimal version was originally from Seven Xu at SRDC
-
-// Revison #18	 --...
-/* Rev History
-** #1. Branch from gc dv.   //gfxip/gfx9/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
-** #4. SR Memory Layout:
-**			 1. VGPR-SGPR-HWREG-{LDS}
-**			 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
-** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
-** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
-** #7. Update: 1. don't barrier if noLDS
-** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
-**	       2. Fix SQ issue by s_sleep 2
-** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
-**	       2. optimize s_buffer save by burst 16sgprs...
-** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
-** #11. Update 1. Add 2 more timestamp for debug version
-** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
-** #13. Integ  1. Always use MUBUF for PV trap shader...
-** #14. Update 1. s_buffer_store soft clause...
-** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
-** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
-** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
-**	       2. PERF - Save LDS before save VGPR to cover LDS save long latency...
-** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
-**	       2. FUNC - Handle non-CWSR traps
-*/
-
-var G8SR_WDMEM_HWREG_OFFSET = 0
-var G8SR_WDMEM_SGPR_OFFSET  = 128  // in bytes
-
-// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
-
-var G8SR_DEBUG_TIMESTAMP = 0
-var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4	// ts_save_d timestamp offset relative to SGPR_SR_memory_offset
-var s_g8sr_ts_save_s	= s[34:35]   // save start
-var s_g8sr_ts_sq_save_msg  = s[36:37]	// The save shader send SAVEWAVE msg to spi
-var s_g8sr_ts_spi_wrexec   = s[38:39]	// the SPI write the sr address to SQ
-var s_g8sr_ts_save_d	= s[40:41]   // save end
-var s_g8sr_ts_restore_s = s[42:43]   // restore start
-var s_g8sr_ts_restore_d = s[44:45]   // restore end
-
-var G8SR_VGPR_SR_IN_DWX4 = 0
-var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000	 // DWx4 stride is 4*4Bytes
-var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4  = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
-
-
-/*************************************************************************/
-/*		    control on how to run the shader			 */
-/*************************************************************************/
-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
-var EMU_RUN_HACK		    =	0
-var EMU_RUN_HACK_RESTORE_NORMAL	    =	0
-var EMU_RUN_HACK_SAVE_NORMAL_EXIT   =	0
-var EMU_RUN_HACK_SAVE_SINGLE_WAVE   =	0
-var EMU_RUN_HACK_SAVE_FIRST_TIME    =	0		    //for interrupted restore in which the first save is through EMU_RUN_HACK
-var SAVE_LDS			    =	1
-var WG_BASE_ADDR_LO		    =	0x9000a000
-var WG_BASE_ADDR_HI		    =	0x0
-var WAVE_SPACE			    =	0x5000		    //memory size that each wave occupies in workgroup state mem
-var CTX_SAVE_CONTROL		    =	0x0
-var CTX_RESTORE_CONTROL		    =	CTX_SAVE_CONTROL
-var SIM_RUN_HACK		    =	0		    //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
-var SGPR_SAVE_USE_SQC		    =	1		    //use SQC D$ to do the write
-var USE_MTBUF_INSTEAD_OF_MUBUF	    =	0		    //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
-var SWIZZLE_EN			    =	0		    //whether we use swizzled buffer addressing
 var ACK_SQC_STORE		    =	1		    //workaround for suspected SQC store bug causing incorrect stores under concurrency
+var SAVE_AFTER_XNACK_ERROR	    =	1		    //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
+var SINGLE_STEP_MISSED_WORKAROUND   =	1		    //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
 
 /**************************************************************************/
 /*			variables					  */
@@ -107,6 +40,7 @@
 var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE    = 1
 var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT  = 3
 var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE   = 29
+var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK    = 0x400000
 
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT	= 12
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE	= 9
@@ -127,11 +61,14 @@
 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT	=   11
 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE	=   21
 var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK	=   0x800
+var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK	=   0x10000000
 
 var SQ_WAVE_IB_STS_RCNT_SHIFT		=   16			//FIXME
 var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT	=   15			//FIXME
 var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK	= 0x1F8000
 var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG	= 0x00007FFF	//FIXME
+
+var SQ_WAVE_MODE_DEBUG_EN_MASK		=   0x800
 
 var SQ_BUF_RSRC_WORD1_ATC_SHIFT	    =	24
 var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT   =	27
@@ -150,10 +87,10 @@
 var S_SAVE_SPI_INIT_FIRST_WAVE_MASK	=   0x04000000		//bit[26]: FirstWaveInTG
 var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT	=   26
 
-var S_SAVE_PC_HI_RCNT_SHIFT		=   28			//FIXME	 check with Brian to ensure all fields other than PC[47:0] can be used
-var S_SAVE_PC_HI_RCNT_MASK		=   0xF0000000		//FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT	=   27			//FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK	=   0x08000000		//FIXME
+var S_SAVE_PC_HI_RCNT_SHIFT		=   27			//FIXME	 check with Brian to ensure all fields other than PC[47:0] can be used
+var S_SAVE_PC_HI_RCNT_MASK		=   0xF8000000		//FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT	=   26			//FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_MASK	=   0x04000000		//FIXME
 
 var s_save_spi_init_lo		    =	exec_lo
 var s_save_spi_init_hi		    =	exec_hi
@@ -162,8 +99,8 @@
 var s_save_pc_hi	    =	ttmp1
 var s_save_exec_lo	    =	ttmp2
 var s_save_exec_hi	    =	ttmp3
-var s_save_tmp		    =	ttmp4
-var s_save_trapsts	    =	ttmp5		//not really used until the end of the SAVE routine
+var s_save_tmp		    =	ttmp14
+var s_save_trapsts	    =	ttmp15		//not really used until the end of the SAVE routine
 var s_save_xnack_mask_lo    =	ttmp6
 var s_save_xnack_mask_hi    =	ttmp7
 var s_save_buf_rsrc0	    =	ttmp8
@@ -171,9 +108,9 @@
 var s_save_buf_rsrc2	    =	ttmp10
 var s_save_buf_rsrc3	    =	ttmp11
 var s_save_status	    =	ttmp12
-var s_save_mem_offset	    =	ttmp14
+var s_save_mem_offset	    =	ttmp4
 var s_save_alloc_size	    =	s_save_trapsts		//conflict
-var s_save_m0		    =	ttmp15
+var s_save_m0		    =	ttmp5
 var s_save_ttmps_lo	    =	s_save_tmp		//no conflict
 var s_save_ttmps_hi	    =	s_save_trapsts		//no conflict
 
@@ -197,20 +134,22 @@
 var s_restore_spi_init_hi		    =	exec_hi
 
 var s_restore_mem_offset	=   ttmp12
+var s_restore_accvgpr_offset	=   ttmp13
 var s_restore_alloc_size	=   ttmp3
 var s_restore_tmp		=   ttmp2
 var s_restore_mem_offset_save	=   s_restore_tmp	//no conflict
+var s_restore_accvgpr_offset_save = ttmp7
 
 var s_restore_m0	    =	s_restore_alloc_size	//no conflict
 
-var s_restore_mode	    =	ttmp7
+var s_restore_mode	    =	s_restore_accvgpr_offset_save
 
 var s_restore_pc_lo	    =	ttmp0
 var s_restore_pc_hi	    =	ttmp1
-var s_restore_exec_lo	    =	ttmp14
-var s_restore_exec_hi	    = 	ttmp15
-var s_restore_status	    =	ttmp4
-var s_restore_trapsts	    =	ttmp5
+var s_restore_exec_lo	    =	ttmp4
+var s_restore_exec_hi	    = 	ttmp5
+var s_restore_status	    =	ttmp14
+var s_restore_trapsts	    =	ttmp15
 var s_restore_xnack_mask_lo =	xnack_mask_lo
 var s_restore_xnack_mask_hi =	xnack_mask_hi
 var s_restore_buf_rsrc0	    =	ttmp8
@@ -226,20 +165,11 @@
 /* Shader Main*/
 
 shader main
-  asic(GFX9)
+  asic(DEFAULT)
   type(CS)
 
 
-    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))		    //hack to use trap_id for determining save/restore
-	//FIXME VCCZ un-init assertion s_getreg_b32	s_save_status, hwreg(HW_REG_STATUS)	    //save STATUS since we will change SCC
-	s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000		    //change SCC
-	s_cmp_eq_u32 s_save_tmp, 0x007e0000			    //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
-	s_cbranch_scc0 L_JUMP_TO_RESTORE			    //do not need to recover STATUS here  since we are going to RESTORE
-	//FIXME	 s_setreg_b32	hwreg(HW_REG_STATUS),	s_save_status	    //need to recover STATUS since we are going to SAVE
-	s_branch L_SKIP_RESTORE					    //NOT restore, SAVE actually
-    else
 	s_branch L_SKIP_RESTORE					    //NOT restore. might be a regular trap or save
-    end
 
 L_JUMP_TO_RESTORE:
     s_branch L_RESTORE						    //restore
@@ -248,12 +178,29 @@
 
     s_getreg_b32    s_save_status, hwreg(HW_REG_STATUS)				    //save STATUS since we will change SCC
     s_andn2_b32	    s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK	    //check whether this is for save
+
+if SINGLE_STEP_MISSED_WORKAROUND
+    // No single step exceptions if MODE.DEBUG_EN=0.
+    s_getreg_b32    ttmp2, hwreg(HW_REG_MODE)
+    s_and_b32       ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+    s_cbranch_scc0  L_NO_SINGLE_STEP_WORKAROUND
+
+    // Second-level trap already handled exception if STATUS.HALT=1.
+    s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+    // Prioritize single step exception over context save.
+    // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+    s_cbranch_scc0  L_FETCH_2ND_TRAP
+
+L_NO_SINGLE_STEP_WORKAROUND:
+end
+
     s_getreg_b32    s_save_trapsts, hwreg(HW_REG_TRAPSTS)
     s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK    //check whether this is for save
     s_cbranch_scc1  L_SAVE					//this is the operation for save
 
     // *********    Handle non-CWSR traps	*******************
-if (!EMU_RUN_HACK)
+
     // Illegal instruction is a non-maskable exception which blocks context save.
     // Halt the wavefront and return from the trap.
     s_and_b32       ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
@@ -266,10 +213,16 @@
 
 L_HALT_WAVE:
     // If STATUS.HALT is set then this fault must come from SQC instruction fetch.
-    // We cannot prevent further faults so just terminate the wavefront.
+    // We cannot prevent further faults. Spin wait until context saved.
     s_and_b32       ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
     s_cbranch_scc0  L_NOT_ALREADY_HALTED
-    s_endpgm
+
+L_WAIT_CTX_SAVE:
+    s_sleep         0x10
+    s_getreg_b32    ttmp2, hwreg(HW_REG_TRAPSTS)
+    s_and_b32       ttmp2, ttmp2, SQ_WAVE_TRAPSTS_SAVECTX_MASK
+    s_cbranch_scc0  L_WAIT_CTX_SAVE
+
 L_NOT_ALREADY_HALTED:
     s_or_b32        s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
 
@@ -293,12 +246,12 @@
     // Read second-level TBA/TMA from first-level TMA and jump if available.
     // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
     // ttmp12 holds SQ_WAVE_STATUS
-    s_getreg_b32    ttmp4, hwreg(HW_REG_SQ_SHADER_TMA_LO)
-    s_getreg_b32    ttmp5, hwreg(HW_REG_SQ_SHADER_TMA_HI)
-    s_lshl_b64      [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8
-    s_load_dwordx2  [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA
+    s_getreg_b32    ttmp14, hwreg(HW_REG_SQ_SHADER_TMA_LO)
+    s_getreg_b32    ttmp15, hwreg(HW_REG_SQ_SHADER_TMA_HI)
+    s_lshl_b64      [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+    s_load_dwordx2  [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
     s_waitcnt       lgkmcnt(0)
-    s_load_dwordx2  [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA
+    s_load_dwordx2  [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
     s_waitcnt       lgkmcnt(0)
     s_and_b64       [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
     s_cbranch_scc0  L_NO_NEXT_TRAP // second-level trap handler not been set
@@ -324,7 +277,7 @@
     set_status_without_spi_prio(s_save_status, ttmp2)
 
     s_rfe_b64       [ttmp0, ttmp1]
-end
+
     // *********	End handling of non-CWSR traps	 *******************
 
 /**************************************************************************/
@@ -332,12 +285,6 @@
 /**************************************************************************/
 
 L_SAVE:
-
-if G8SR_DEBUG_TIMESTAMP
-	s_memrealtime	s_g8sr_ts_save_s
-	s_waitcnt lgkmcnt(0)	     //FIXME, will cause xnack??
-end
-
     s_and_b32	    s_save_pc_hi, s_save_pc_hi, 0x0000ffff    //pc[47:32]
 
     s_mov_b32	    s_save_tmp, 0							    //clear saveCtx bit
@@ -359,16 +306,7 @@
     s_mov_b32	    s_save_exec_hi, exec_hi
     s_mov_b64	    exec,   0x0								    //clear EXEC to get ready to receive
 
-if G8SR_DEBUG_TIMESTAMP
-	s_memrealtime  s_g8sr_ts_sq_save_msg
-	s_waitcnt lgkmcnt(0)
-end
-
-    if (EMU_RUN_HACK)
-
-    else
 	s_sendmsg   sendmsg(MSG_SAVEWAVE)  //send SPI a message and wait for SPI's write to EXEC
-    end
 
     // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
     s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
@@ -377,35 +315,9 @@
   L_SLEEP:
     s_sleep 0x2		       // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
 
-    if (EMU_RUN_HACK)
-
-    else
 	s_cbranch_execz L_SLEEP
-    end
 
-if G8SR_DEBUG_TIMESTAMP
-	s_memrealtime  s_g8sr_ts_spi_wrexec
-	s_waitcnt lgkmcnt(0)
-end
-
-    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
-	//calculate wd_addr using absolute thread id
-	v_readlane_b32 s_save_tmp, v9, 0
-	s_lshr_b32 s_save_tmp, s_save_tmp, 6
-	s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
-	s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
-	s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
-	s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
-    else
-    end
-    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
-	s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
-	s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
-	s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
-    else
-    end
-
-    // Save trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+    // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
     // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
     get_vgpr_size_bytes(s_save_ttmps_lo)
     get_sgpr_size_bytes(s_save_ttmps_hi)
@@ -413,13 +325,11 @@
     s_add_u32	    s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo
     s_addc_u32	    s_save_ttmps_hi, s_save_spi_init_hi, 0x0
     s_and_b32	    s_save_ttmps_hi, s_save_ttmps_hi, 0xFFFF
-    s_store_dwordx2 [ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x40 glc:1
+    s_store_dwordx4 [ttmp4, ttmp5, ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x50 glc:1
     ack_sqc_store_workaround()
-    s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x48 glc:1
+    s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x60 glc:1
     ack_sqc_store_workaround()
-    s_store_dword   ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x58 glc:1
-    ack_sqc_store_workaround()
-    s_store_dwordx2 [ttmp14, ttmp15], [s_save_ttmps_lo, s_save_ttmps_hi], 0x5C glc:1
+    s_store_dword   ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x74 glc:1
     ack_sqc_store_workaround()
 
     /*	    setup Resource Contants    */
@@ -455,20 +365,10 @@
 
 
     s_mov_b32	    s_save_buf_rsrc2, 0x4				//NUM_RECORDS	in bytes
-    if (SWIZZLE_EN)
-	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
-    end
 
 
     write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)			//M0
-
-    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
-	s_add_u32 s_save_pc_lo, s_save_pc_lo, 4		    //pc[31:0]+4
-	s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0	    //carry bit over
-    end
-
     write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)		    //PC
     write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
     write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)		//EXEC
@@ -506,17 +406,9 @@
     s_add_u32	    s_save_alloc_size, s_save_alloc_size, 1
     s_lshl_b32	    s_save_alloc_size, s_save_alloc_size, 4			    //Number of SGPRs = (sgpr_size + 1) * 16   (non-zero value)
 
-    if (SGPR_SAVE_USE_SQC)
 	s_lshl_b32	s_save_buf_rsrc2,   s_save_alloc_size, 2		    //NUM_RECORDS in bytes
-    else
-	s_lshl_b32	s_save_buf_rsrc2,   s_save_alloc_size, 8		    //NUM_RECORDS in bytes (64 threads)
-    end
 
-    if (SWIZZLE_EN)
-	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
-    end
 
 
     // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
@@ -559,30 +451,25 @@
     s_mov_b32	    xnack_mask_lo, 0x0
     s_mov_b32	    xnack_mask_hi, 0x0
 
-    if (SWIZZLE_EN)
-	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
-    end
 
 
     // VGPR Allocated in 4-GPR granularity
 
-if G8SR_VGPR_SR_IN_DWX4
-	// the const stride for DWx4 is 4*4 bytes
-	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
-	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
+if SAVE_AFTER_XNACK_ERROR
+	check_if_tcp_store_ok()
+	s_cbranch_scc1 L_SAVE_FIRST_VGPRS_WITH_TCP
 
-	buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+	write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+	s_branch L_SAVE_LDS
 
-	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
-	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE  // reset const stride to 4 bytes
-else
+L_SAVE_FIRST_VGPRS_WITH_TCP:
+end
+
 	buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
 	buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256
 	buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*2
 	buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*3
-end
 
 
 
@@ -617,66 +504,34 @@
     s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
 
 
-    if (SWIZZLE_EN)
-	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0	      //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_save_buf_rsrc2,  0x1000000		      //NUM_RECORDS in bytes
-    end
 
     s_mov_b32	    m0, 0x0						  //lds_offset initial value = 0
 
 
-var LDS_DMA_ENABLE = 0
-var UNROLL = 0
-if UNROLL==0 && LDS_DMA_ENABLE==1
-	s_mov_b32  s3, 256*2
-	s_nop 0
-	s_nop 0
-	s_nop 0
-  L_SAVE_LDS_LOOP:
-	//TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
-    if (SAVE_LDS)     //SPI always alloc LDS space in 128DW granularity
-	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1		// first 64DW
-	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
-    end
-
-    s_add_u32	    m0, m0, s3						//every buffer_store_lds does 256 bytes
-    s_add_u32	    s_save_mem_offset, s_save_mem_offset, s3				//mem offset increased by 256 bytes
-    s_cmp_lt_u32    m0, s_save_alloc_size						//scc=(m0 < s_save_alloc_size) ? 1 : 0
-    s_cbranch_scc1  L_SAVE_LDS_LOOP							//LDS save is complete?
-
-elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL	, has ichace miss
-      // store from higest LDS address to lowest
-      s_mov_b32	 s3, 256*2
-      s_sub_u32	 m0, s_save_alloc_size, s3
-      s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
-      s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9   // how many 128 trunks...
-      s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size   // store from higheset addr to lowest
-      s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4   // PC offset increment,  each LDS save block cost 6*4 Bytes instruction
-      s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4   //2is the below 2 inst...//s_addc and s_setpc
-      s_nop 0
-      s_nop 0
-      s_nop 0	//pad 3 dw to let LDS_DMA align with 64Bytes
-      s_getpc_b64 s[0:1]			      // reuse s[0:1], since s[0:1] already saved
-      s_add_u32	  s0, s0,s_save_alloc_size
-      s_addc_u32  s1, s1, 0
-      s_setpc_b64 s[0:1]
-
-
-       for var i =0; i< 128; i++
-	    // be careful to make here a 64Byte aligned address, which could improve performance...
-	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0		// first 64DW
-	    buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256		  // second 64DW
-
-	if i!=127
-	s_sub_u32  m0, m0, s3	   // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e.  pack more LDS_DMA inst to one Cacheline
-	    s_sub_u32  s_save_mem_offset, s_save_mem_offset,  s3
-	    end
-       end
-
-else   // BUFFER_STORE
       v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
       v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2	// tid
+
+if SAVE_AFTER_XNACK_ERROR
+	check_if_tcp_store_ok()
+	s_cbranch_scc1 L_SAVE_LDS_WITH_TCP
+
+	v_lshlrev_b32 v2, 2, v3
+L_SAVE_LDS_LOOP_SQC:
+	ds_read2_b32 v[0:1], v2 offset0:0 offset1:0x40
+	s_waitcnt lgkmcnt(0)
+
+	write_vgprs_to_mem_with_sqc(v0, 2, s_save_buf_rsrc0, s_save_mem_offset)
+
+	v_add_u32 v2, 0x200, v2
+	v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+	s_cbranch_vccnz L_SAVE_LDS_LOOP_SQC
+
+	s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_WITH_TCP:
+end
+
       v_mul_i32_i24 v2, v3, 8	// tid*8
       v_mov_b32 v3, 256*2
       s_mov_b32 m0, 0x10000
@@ -697,8 +552,6 @@
       // restore rsrc3
       s_mov_b32 s_save_buf_rsrc3, s0
 
-end
-
 L_SAVE_LDS_DONE:
 
 
@@ -716,44 +569,9 @@
     s_add_u32	    s_save_alloc_size, s_save_alloc_size, 1
     s_lshl_b32	    s_save_alloc_size, s_save_alloc_size, 2			    //Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)	  //FIXME for GFX, zero is possible
     s_lshl_b32	    s_save_buf_rsrc2,  s_save_alloc_size, 8			    //NUM_RECORDS in bytes (64 threads*4)
-    if (SWIZZLE_EN)
-	s_add_u32	s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_save_buf_rsrc2,  0x1000000				    //NUM_RECORDS in bytes
-    end
 
 
-    // VGPR Allocated in 4-GPR granularity
-
-if G8SR_VGPR_SR_IN_DWX4
-	// the const stride for DWx4 is 4*4 bytes
-	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
-	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
-
-	s_mov_b32	  m0, 4	    // skip first 4 VGPRs
-	s_cmp_lt_u32	  m0, s_save_alloc_size
-	s_cbranch_scc0	  L_SAVE_VGPR_LOOP_END	    // no more vgprs
-
-	s_set_gpr_idx_on  m0, 0x1   // This will change M0
-	s_add_u32	  s_save_alloc_size, s_save_alloc_size, 0x1000	// because above inst change m0
-L_SAVE_VGPR_LOOP:
-	v_mov_b32	  v0, v0   // v0 = v[0+m0]
-	v_mov_b32	  v1, v1
-	v_mov_b32	  v2, v2
-	v_mov_b32	  v3, v3
-
-
-	buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
-	s_add_u32	  m0, m0, 4
-	s_add_u32	  s_save_mem_offset, s_save_mem_offset, 256*4
-	s_cmp_lt_u32	  m0, s_save_alloc_size
-    s_cbranch_scc1  L_SAVE_VGPR_LOOP						    //VGPR save is complete?
-    s_set_gpr_idx_off
-L_SAVE_VGPR_LOOP_END:
-
-	s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
-	s_or_b32  s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE  // reset const stride to 4 bytes
-else
     // VGPR store using dw burst
     s_mov_b32	      m0, 0x4	//VGPR initial index value =0
     s_cmp_lt_u32      m0, s_save_alloc_size
@@ -763,57 +581,82 @@
     s_set_gpr_idx_on	m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
     s_add_u32	    s_save_alloc_size, s_save_alloc_size, 0x1000		    //add 0x1000 since we compare m0 against it later
 
+if SAVE_AFTER_XNACK_ERROR
+	check_if_tcp_store_ok()
+	s_cbranch_scc1 L_SAVE_VGPR_LOOP
+
+L_SAVE_VGPR_LOOP_SQC:
+	write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+
+	s_add_u32 m0, m0, 4
+	s_cmp_lt_u32 m0, s_save_alloc_size
+	s_cbranch_scc1 L_SAVE_VGPR_LOOP_SQC
+
+	s_set_gpr_idx_off
+	s_branch L_SAVE_VGPR_END
+end
+
   L_SAVE_VGPR_LOOP:
     v_mov_b32	    v0, v0		//v0 = v[0+m0]
     v_mov_b32	    v1, v1		//v0 = v[0+m0]
     v_mov_b32	    v2, v2		//v0 = v[0+m0]
     v_mov_b32	    v3, v3		//v0 = v[0+m0]
 
-    if(USE_MTBUF_INSTEAD_OF_MUBUF)
-	tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
-    else
 	buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
 	buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256
 	buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*2
 	buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1  offset:256*3
-    end
 
     s_add_u32	    m0, m0, 4							    //next vgpr index
     s_add_u32	    s_save_mem_offset, s_save_mem_offset, 256*4			    //every buffer_store_dword does 256 bytes
     s_cmp_lt_u32    m0, s_save_alloc_size					    //scc = (m0 < s_save_alloc_size) ? 1 : 0
     s_cbranch_scc1  L_SAVE_VGPR_LOOP						    //VGPR save is complete?
     s_set_gpr_idx_off
-end
 
 L_SAVE_VGPR_END:
 
+if ASIC_TARGET_ARCTURUS
+    // Save ACC VGPRs
+    s_mov_b32 m0, 0x0 //VGPR initial index value =0
+    s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
 
+if SAVE_AFTER_XNACK_ERROR
+    check_if_tcp_store_ok()
+    s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP
 
-
-
-
-    /*	   S_PGM_END_SAVED  */				    //FIXME  graphics ONLY
-    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
-	s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff    //pc[47:32]
-	s_add_u32 s_save_pc_lo, s_save_pc_lo, 4		    //pc[31:0]+4
-	s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0	    //carry bit over
-	s_rfe_b64 s_save_pc_lo				    //Return to the main shader program
-    else
+L_SAVE_ACCVGPR_LOOP_SQC:
+    for var vgpr = 0; vgpr < 4; ++ vgpr
+        v_accvgpr_read v[vgpr], acc[vgpr]  // v[N] = acc[N+m0]
     end
 
-// Save Done timestamp
-if G8SR_DEBUG_TIMESTAMP
-	s_memrealtime	s_g8sr_ts_save_d
-	// SGPR SR memory offset : size(VGPR)
-	get_vgpr_size_bytes(s_save_mem_offset)
-	s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
-	s_waitcnt lgkmcnt(0)	     //FIXME, will cause xnack??
-	// Need reset rsrc2??
-	s_mov_b32 m0, s_save_mem_offset
-	s_mov_b32 s_save_buf_rsrc2,  0x1000000					//NUM_RECORDS in bytes
-	s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0	    glc:1
+    write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+
+    s_add_u32 m0, m0, 4
+    s_cmp_lt_u32 m0, s_save_alloc_size
+    s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP_SQC
+
+    s_set_gpr_idx_off
+    s_branch L_SAVE_ACCVGPR_END
 end
 
+L_SAVE_ACCVGPR_LOOP:
+    for var vgpr = 0; vgpr < 4; ++ vgpr
+        v_accvgpr_read v[vgpr], acc[vgpr]  // v[N] = acc[N+m0]
+    end
+
+    buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+    buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+    buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+    buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+
+    s_add_u32 m0, m0, 4
+    s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
+    s_cmp_lt_u32 m0, s_save_alloc_size
+    s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP
+    s_set_gpr_idx_off
+
+L_SAVE_ACCVGPR_END:
+end
 
     s_branch	L_END_PGM
 
@@ -825,27 +668,6 @@
 
 L_RESTORE:
     /*	    Setup Resource Contants    */
-    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
-	//calculate wd_addr using absolute thread id
-	v_readlane_b32 s_restore_tmp, v9, 0
-	s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
-	s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
-	s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
-	s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
-	s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
-    else
-    end
-
-if G8SR_DEBUG_TIMESTAMP
-	s_memrealtime	s_g8sr_ts_restore_s
-	s_waitcnt lgkmcnt(0)	     //FIXME, will cause xnack??
-	// tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
-	s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
-	s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1]   //backup ts to ttmp0/1, sicne exec will be finally restored..
-end
-
-
-
     s_mov_b32	    s_restore_buf_rsrc0,    s_restore_spi_init_lo							    //base_addr_lo
     s_and_b32	    s_restore_buf_rsrc1,    s_restore_spi_init_hi, 0x0000FFFF						    //base_addr_hi
     s_or_b32	    s_restore_buf_rsrc1,    s_restore_buf_rsrc1,  S_RESTORE_BUF_RSRC_WORD1_STRIDE
@@ -887,18 +709,12 @@
     s_add_u32  s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()	     //FIXME, Check if offset overflow???
 
 
-    if (SWIZZLE_EN)
-	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
-    end
     s_mov_b32	    m0, 0x0								    //lds_offset initial value = 0
 
   L_RESTORE_LDS_LOOP:
-    if (SAVE_LDS)
 	buffer_load_dword   v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1		       // first 64DW
 	buffer_load_dword   v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256	       // second 64DW
-    end
     s_add_u32	    m0, m0, 256*2						// 128 DW
     s_add_u32	    s_restore_mem_offset, s_restore_mem_offset, 256*2		//mem offset increased by 128DW
     s_cmp_lt_u32    m0, s_restore_alloc_size					//scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -917,56 +733,43 @@
     s_add_u32	    s_restore_alloc_size, s_restore_alloc_size, 1
     s_lshl_b32	    s_restore_alloc_size, s_restore_alloc_size, 2			    //Number of VGPRs = (vgpr_size + 1) * 4    (non-zero value)
     s_lshl_b32	    s_restore_buf_rsrc2,  s_restore_alloc_size, 8			    //NUM_RECORDS in bytes (64 threads*4)
-    if (SWIZZLE_EN)
-	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
+
+if ASIC_TARGET_ARCTURUS
+    s_mov_b32	    s_restore_accvgpr_offset, s_restore_buf_rsrc2                           //ACC VGPRs at end of VGPRs
+end
+
 	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
-    end
 
-if G8SR_VGPR_SR_IN_DWX4
-     get_vgpr_size_bytes(s_restore_mem_offset)
-     s_sub_u32	       s_restore_mem_offset, s_restore_mem_offset, 256*4
-
-     // the const stride for DWx4 is 4*4 bytes
-     s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
-     s_or_b32  s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4  // const stride to 4*4 bytes
-
-     s_mov_b32	       m0, s_restore_alloc_size
-     s_set_gpr_idx_on  m0, 0x8	  // Note.. This will change m0
-
-L_RESTORE_VGPR_LOOP:
-     buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
-     s_waitcnt vmcnt(0)
-     s_sub_u32	       m0, m0, 4
-     v_mov_b32	       v0, v0	// v[0+m0] = v0
-     v_mov_b32	       v1, v1
-     v_mov_b32	       v2, v2
-     v_mov_b32	       v3, v3
-     s_sub_u32	       s_restore_mem_offset, s_restore_mem_offset, 256*4
-     s_cmp_eq_u32      m0, 0x8000
-     s_cbranch_scc0    L_RESTORE_VGPR_LOOP
-     s_set_gpr_idx_off
-
-     s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF   // reset const stride to 0
-     s_or_b32  s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE  // const stride to 4*4 bytes
-
-else
     // VGPR load using dw burst
     s_mov_b32	    s_restore_mem_offset_save, s_restore_mem_offset	// restore start with v1, v0 will be the last
     s_add_u32	    s_restore_mem_offset, s_restore_mem_offset, 256*4
+if ASIC_TARGET_ARCTURUS
+    s_mov_b32	    s_restore_accvgpr_offset_save, s_restore_accvgpr_offset
+    s_add_u32	    s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4
+end
     s_mov_b32	    m0, 4				//VGPR initial index value = 1
     s_set_gpr_idx_on  m0, 0x8			    //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
     s_add_u32	    s_restore_alloc_size, s_restore_alloc_size, 0x8000			    //add 0x8000 since we compare m0 against it later
 
   L_RESTORE_VGPR_LOOP:
-    if(USE_MTBUF_INSTEAD_OF_MUBUF)
-	tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
-    else
+
+if ASIC_TARGET_ARCTURUS
+	buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1
+	buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256
+	buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*2
+	buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*3
+	s_add_u32 s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4
+	s_waitcnt vmcnt(0)
+
+	for var vgpr = 0; vgpr < 4; ++ vgpr
+		v_accvgpr_write acc[vgpr], v[vgpr]
+	end
+end
+
 	buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
 	buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
 	buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
 	buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
-    end
     s_waitcnt	    vmcnt(0)								    //ensure data ready
     v_mov_b32	    v0, v0								    //v[0+m0] = v0
     v_mov_b32	    v1, v1
@@ -978,16 +781,22 @@
     s_cbranch_scc1  L_RESTORE_VGPR_LOOP							    //VGPR restore (except v0) is complete?
     s_set_gpr_idx_off
 											    /* VGPR restore on v0 */
-    if(USE_MTBUF_INSTEAD_OF_MUBUF)
-	tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
-    else
+if ASIC_TARGET_ARCTURUS
+	buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1
+	buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256
+	buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*2
+	buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*3
+	s_waitcnt vmcnt(0)
+
+	for var vgpr = 0; vgpr < 4; ++ vgpr
+		v_accvgpr_write acc[vgpr], v[vgpr]
+	end
+end
+
 	buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1
 	buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256
 	buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256*2
 	buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save    slc:1 glc:1 offset:256*3
-    end
-
-end
 
     /*		restore SGPRs	    */
     //////////////////////////////
@@ -1003,16 +812,8 @@
     s_add_u32	    s_restore_alloc_size, s_restore_alloc_size, 1
     s_lshl_b32	    s_restore_alloc_size, s_restore_alloc_size, 4			    //Number of SGPRs = (sgpr_size + 1) * 16   (non-zero value)
 
-    if (SGPR_SAVE_USE_SQC)
 	s_lshl_b32	s_restore_buf_rsrc2,	s_restore_alloc_size, 2			    //NUM_RECORDS in bytes
-    else
-	s_lshl_b32	s_restore_buf_rsrc2,	s_restore_alloc_size, 8			    //NUM_RECORDS in bytes (64 threads)
-    end
-    if (SWIZZLE_EN)
-	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
-    end
 
     s_mov_b32 m0, s_restore_alloc_size
 
@@ -1040,11 +841,6 @@
   L_RESTORE_HWREG:
 
 
-if G8SR_DEBUG_TIMESTAMP
-      s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
-      s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
-end
-
     // HWREG SR memory offset : size(VGPR)+size(SGPR)
     get_vgpr_size_bytes(s_restore_mem_offset)
     get_sgpr_size_bytes(s_restore_tmp)
@@ -1052,11 +848,7 @@
 
 
     s_mov_b32	    s_restore_buf_rsrc2, 0x4						    //NUM_RECORDS   in bytes
-    if (SWIZZLE_EN)
-	s_add_u32	s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0			    //FIXME need to use swizzle to enable bounds checking?
-    else
 	s_mov_b32	s_restore_buf_rsrc2,  0x1000000					    //NUM_RECORDS in bytes
-    end
 
     read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)		    //M0
     read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)		//PC
@@ -1071,16 +863,6 @@
 
     s_waitcnt	    lgkmcnt(0)											    //from now on, it is safe to restore STATUS and IB_STS
 
-    //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
-    if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
-	s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8		 //pc[31:0]+8	  //two back-to-back s_trap are used (first for save and second for restore)
-	s_addc_u32  s_restore_pc_hi, s_restore_pc_hi, 0x0	 //carry bit over
-    end
-    if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
-	s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4		 //pc[31:0]+4	  // save is hack through s_trap but restore is normal
-	s_addc_u32  s_restore_pc_hi, s_restore_pc_hi, 0x0	 //carry bit over
-    end
-
     s_mov_b32	    m0,		s_restore_m0
     s_mov_b32	    exec_lo,	s_restore_exec_lo
     s_mov_b32	    exec_hi,	s_restore_exec_hi
@@ -1093,7 +875,7 @@
     //s_setreg_b32  hwreg(HW_REG_TRAPSTS),  s_restore_trapsts	   //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
     s_setreg_b32    hwreg(HW_REG_MODE),	    s_restore_mode
 
-    // Restore trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic
+    // Restore trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
     // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
     get_vgpr_size_bytes(s_restore_ttmps_lo)
     get_sgpr_size_bytes(s_restore_ttmps_hi)
@@ -1101,10 +883,9 @@
     s_add_u32	    s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0
     s_addc_u32	    s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0
     s_and_b32	    s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF
-    s_load_dwordx2  [ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x40 glc:1
-    s_load_dwordx4  [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x48 glc:1
-    s_load_dword    ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x58 glc:1
-    s_load_dwordx2  [ttmp14, ttmp15], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x5C glc:1
+    s_load_dwordx4  [ttmp4, ttmp5, ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x50 glc:1
+    s_load_dwordx4  [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x60 glc:1
+    s_load_dword    ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x74 glc:1
     s_waitcnt	    lgkmcnt(0)
 
     //reuse s_restore_m0 as a temp register
@@ -1127,11 +908,6 @@
     set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
 
     s_barrier							//barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
-
-if G8SR_DEBUG_TIMESTAMP
-    s_memrealtime s_g8sr_ts_restore_d
-    s_waitcnt lgkmcnt(0)
-end
 
 //  s_rfe_b64 s_restore_pc_lo					//Return to the main shader program and resume execution
     s_rfe_restore_b64  s_restore_pc_lo, s_restore_m0		// s_restore_m0[0] is used to set STATUS.inst_atc
@@ -1187,7 +963,39 @@
     s_sub_u32	    s_mem_offset, s_mem_offset, 4*16
 end
 
+function check_if_tcp_store_ok
+	// If STATUS.ALLOW_REPLAY=0 and TRAPSTS.XNACK_ERROR=1 then TCP stores will fail.
+	s_and_b32 s_save_tmp, s_save_status, SQ_WAVE_STATUS_ALLOW_REPLAY_MASK
+	s_cbranch_scc1 L_TCP_STORE_CHECK_DONE
 
+	s_getreg_b32 s_save_tmp, hwreg(HW_REG_TRAPSTS)
+	s_andn2_b32 s_save_tmp, SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK, s_save_tmp
+
+L_TCP_STORE_CHECK_DONE:
+end
+
+function write_vgpr_to_mem_with_sqc(v, s_rsrc, s_mem_offset)
+	s_mov_b32 s4, 0
+
+L_WRITE_VGPR_LANE_LOOP:
+	for var lane = 0; lane < 4; ++ lane
+		v_readlane_b32 s[lane], v, s4
+		s_add_u32 s4, s4, 1
+	end
+
+	s_buffer_store_dwordx4 s[0:3], s_rsrc, s_mem_offset glc:1
+	ack_sqc_store_workaround()
+
+	s_add_u32 s_mem_offset, s_mem_offset, 0x10
+	s_cmp_eq_u32 s4, 0x40
+	s_cbranch_scc0 L_WRITE_VGPR_LANE_LOOP
+end
+
+function write_vgprs_to_mem_with_sqc(v, n_vgprs, s_rsrc, s_mem_offset)
+	for var vgpr = 0; vgpr < n_vgprs; ++ vgpr
+		write_vgpr_to_mem_with_sqc(v[vgpr], s_rsrc, s_mem_offset)
+	end
+end
 
 function get_lds_size_bytes(s_lds_size_byte)
     // SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW
@@ -1199,6 +1007,10 @@
     s_getreg_b32   s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)	 //vpgr_size
     s_add_u32	   s_vgpr_size_byte, s_vgpr_size_byte, 1
     s_lshl_b32	   s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4	(non-zero value)   //FIXME for GFX, zero is possible
+
+if ASIC_TARGET_ARCTURUS
+    s_lshl_b32     s_vgpr_size_byte, s_vgpr_size_byte, 1  // Double size for ACC VGPRs
+end
 end
 
 function get_sgpr_size_bytes(s_sgpr_size_byte)

--
Gitblit v1.6.2