From 61598093bbdd283a7edc367d900f223070ead8d2 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:43:03 +0000
Subject: [PATCH] add ax88772C AX88772C_eeprom_tools

---
 kernel/drivers/gpu/arm/bifrost/tl/mali_kbase_tracepoints.c | 2294 ++++++++++++++++++++++++++++++++++++++++++----------------
 1 files changed, 1,649 insertions(+), 645 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/tl/mali_kbase_tracepoints.c b/kernel/drivers/gpu/arm/bifrost/tl/mali_kbase_tracepoints.c
index 2c0de01..f62c755 100644
--- a/kernel/drivers/gpu/arm/bifrost/tl/mali_kbase_tracepoints.c
+++ b/kernel/drivers/gpu/arm/bifrost/tl/mali_kbase_tracepoints.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
 /*
  *
- * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -20,7 +20,7 @@
  */
 
 /*
- * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py.
+ * THIS FILE IS AUTOGENERATED BY generate_tracepoints.py.
  * DO NOT EDIT.
  */
 
@@ -30,7 +30,7 @@
 
 /* clang-format off */
 
-/* Message ids of trace events that are recorded in the timeline stream. */
+/* Message ids of trace events that are recorded in the obj stream. */
 enum tl_msg_id_obj {
 	KBASE_TL_NEW_CTX,
 	KBASE_TL_NEW_GPU,
@@ -52,10 +52,6 @@
 	KBASE_TL_RET_ATOM_AS,
 	KBASE_TL_NRET_ATOM_AS,
 	KBASE_TL_ATTRIB_ATOM_CONFIG,
-	KBASE_TL_ATTRIB_ATOM_PRIORITY,
-	KBASE_TL_ATTRIB_ATOM_STATE,
-	KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
-	KBASE_TL_ATTRIB_ATOM_JIT,
 	KBASE_TL_JIT_USEDPAGES,
 	KBASE_TL_ATTRIB_ATOM_JITALLOCINFO,
 	KBASE_TL_ATTRIB_ATOM_JITFREEINFO,
@@ -71,9 +67,29 @@
 	KBASE_TL_ARBITER_STOPPED,
 	KBASE_TL_ARBITER_REQUESTED,
 	KBASE_JD_GPU_SOFT_RESET,
+	KBASE_JD_TILER_HEAP_CHUNK_ALLOC,
+	KBASE_TL_JS_SCHED_START,
+	KBASE_TL_JS_SCHED_END,
+	KBASE_TL_JD_SUBMIT_ATOM_START,
+	KBASE_TL_JD_SUBMIT_ATOM_END,
+	KBASE_TL_JD_DONE_NO_LOCK_START,
+	KBASE_TL_JD_DONE_NO_LOCK_END,
+	KBASE_TL_JD_DONE_START,
+	KBASE_TL_JD_DONE_END,
+	KBASE_TL_JD_ATOM_COMPLETE,
+	KBASE_TL_RUN_ATOM_START,
+	KBASE_TL_RUN_ATOM_END,
+	KBASE_TL_ATTRIB_ATOM_PRIORITY,
+	KBASE_TL_ATTRIB_ATOM_STATE,
+	KBASE_TL_ATTRIB_ATOM_PRIORITIZED,
+	KBASE_TL_ATTRIB_ATOM_JIT,
 	KBASE_TL_KBASE_NEW_DEVICE,
+	KBASE_TL_KBASE_GPUCMDQUEUE_KICK,
 	KBASE_TL_KBASE_DEVICE_PROGRAM_CSG,
 	KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG,
+	KBASE_TL_KBASE_DEVICE_HALTING_CSG,
+	KBASE_TL_KBASE_DEVICE_SUSPEND_CSG,
+	KBASE_TL_KBASE_DEVICE_CSG_IDLE,
 	KBASE_TL_KBASE_NEW_CTX,
 	KBASE_TL_KBASE_DEL_CTX,
 	KBASE_TL_KBASE_CTX_ASSIGN_AS,
@@ -84,17 +100,19 @@
 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT,
 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT,
 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET,
+	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION,
+	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION,
 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT,
 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT,
 	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE,
-	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER,
-	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND,
 	KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
 	KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
 	KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC,
 	KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE,
 	KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE,
 	KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE,
+	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER,
+	KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START,
@@ -102,6 +120,9 @@
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET,
+	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START,
+	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END,
+	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START,
@@ -119,25 +140,15 @@
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START,
 	KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END,
+	KBASE_TL_KBASE_CSFFW_FW_RELOADING,
+	KBASE_TL_KBASE_CSFFW_FW_ENABLING,
+	KBASE_TL_KBASE_CSFFW_FW_REQUEST_SLEEP,
+	KBASE_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP,
+	KBASE_TL_KBASE_CSFFW_FW_REQUEST_HALT,
+	KBASE_TL_KBASE_CSFFW_FW_DISABLING,
+	KBASE_TL_KBASE_CSFFW_FW_OFF,
 	KBASE_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW,
-	KBASE_TL_KBASE_CSFFW_RESET,
 	KBASE_OBJ_MSG_COUNT,
-};
-
-/* Message ids of trace events that are recorded in the auxiliary stream. */
-enum tl_msg_id_aux {
-	KBASE_AUX_PM_STATE,
-	KBASE_AUX_PAGEFAULT,
-	KBASE_AUX_PAGESALLOC,
-	KBASE_AUX_DEVFREQ_TARGET,
-	KBASE_AUX_PROTECTED_ENTER_START,
-	KBASE_AUX_PROTECTED_ENTER_END,
-	KBASE_AUX_PROTECTED_LEAVE_START,
-	KBASE_AUX_PROTECTED_LEAVE_END,
-	KBASE_AUX_JIT_STATS,
-	KBASE_AUX_TILER_HEAP_STATS,
-	KBASE_AUX_EVENT_JOB_SLOT,
-	KBASE_AUX_MSG_COUNT,
 };
 
 #define OBJ_TP_LIST \
@@ -221,22 +232,6 @@
 		"atom job slot attributes", \
 		"@pLLI", \
 		"atom,descriptor,affinity,config") \
-	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITY, \
-		"atom priority", \
-		"@pI", \
-		"atom,prio") \
-	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_STATE, \
-		"atom state", \
-		"@pI", \
-		"atom,state") \
-	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITIZED, \
-		"atom caused priority change", \
-		"@p", \
-		"atom") \
-	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_JIT, \
-		"jit done for atom", \
-		"@pLLILILLL", \
-		"atom,edit_addr,new_addr,jit_flags,mem_flags,j_id,com_pgs,extent,va_pgs") \
 	TRACEPOINT_DESC(KBASE_TL_JIT_USEDPAGES, \
 		"used pages for jit", \
 		"@LI", \
@@ -297,16 +292,96 @@
 		"gpu soft reset", \
 		"@p", \
 		"gpu") \
+	TRACEPOINT_DESC(KBASE_JD_TILER_HEAP_CHUNK_ALLOC, \
+		"Tiler Heap Chunk Allocation", \
+		"@ILL", \
+		"ctx_nr,heap_id,chunk_va") \
+	TRACEPOINT_DESC(KBASE_TL_JS_SCHED_START, \
+		"Scheduling starts", \
+		"@I", \
+		"dummy") \
+	TRACEPOINT_DESC(KBASE_TL_JS_SCHED_END, \
+		"Scheduling ends", \
+		"@I", \
+		"dummy") \
+	TRACEPOINT_DESC(KBASE_TL_JD_SUBMIT_ATOM_START, \
+		"Submitting an atom starts", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_JD_SUBMIT_ATOM_END, \
+		"Submitting an atom ends", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_JD_DONE_NO_LOCK_START, \
+		"Within function kbase_jd_done_nolock", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_JD_DONE_NO_LOCK_END, \
+		"Within function kbase_jd_done_nolock - end", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_JD_DONE_START, \
+		"Start of kbase_jd_done", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_JD_DONE_END, \
+		"End of kbase_jd_done", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_JD_ATOM_COMPLETE, \
+		"Atom marked complete", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_RUN_ATOM_START, \
+		"Running of atom starts", \
+		"@pI", \
+		"atom,atom_nr") \
+	TRACEPOINT_DESC(KBASE_TL_RUN_ATOM_END, \
+		"Running of atom ends", \
+		"@pI", \
+		"atom,atom_nr") \
+	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITY, \
+		"atom priority", \
+		"@pI", \
+		"atom,prio") \
+	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_STATE, \
+		"atom state", \
+		"@pI", \
+		"atom,state") \
+	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_PRIORITIZED, \
+		"atom caused priority change", \
+		"@p", \
+		"atom") \
+	TRACEPOINT_DESC(KBASE_TL_ATTRIB_ATOM_JIT, \
+		"jit done for atom", \
+		"@pLLILILLL", \
+		"atom,edit_addr,new_addr,jit_flags,mem_flags,j_id,com_pgs,extent,va_pgs") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_NEW_DEVICE, \
 		"New KBase Device", \
-		"@IIII", \
-		"kbase_device_id,kbase_device_gpu_core_count,kbase_device_max_num_csgs,kbase_device_as_count") \
+		"@IIIIIII", \
+		"kbase_device_id,kbase_device_gpu_core_count,kbase_device_max_num_csgs,kbase_device_as_count,kbase_device_sb_entry_count,kbase_device_has_cross_stream_sync,kbase_device_supports_gpu_sleep") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_GPUCMDQUEUE_KICK, \
+		"Kernel receives a request to process new GPU queue instructions", \
+		"@IL", \
+		"kernel_ctx_id,buffer_gpu_addr") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_PROGRAM_CSG, \
 		"CSG is programmed to a slot", \
-		"@III", \
-		"kbase_device_id,gpu_cmdq_grp_handle,kbase_device_csg_slot_index") \
+		"@IIIII", \
+		"kbase_device_id,kernel_ctx_id,gpu_cmdq_grp_handle,kbase_device_csg_slot_index,kbase_device_csg_slot_resuming") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG, \
 		"CSG is deprogrammed from a slot", \
+		"@II", \
+		"kbase_device_id,kbase_device_csg_slot_index") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_HALTING_CSG, \
+		"CSG is halting", \
+		"@III", \
+		"kbase_device_id,kbase_device_csg_slot_index,kbase_device_csg_slot_suspending") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_SUSPEND_CSG, \
+		"CSG is suspended", \
+		"@II", \
+		"kbase_device_id,kbase_device_csg_slot_index") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_DEVICE_CSG_IDLE, \
+		"KBase device is notified that CSG is idle.", \
 		"@II", \
 		"kbase_device_id,kbase_device_csg_slot_index") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_NEW_CTX, \
@@ -327,8 +402,8 @@
 		"kernel_ctx_id") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_NEW_KCPUQUEUE, \
 		"New KCPU Queue", \
-		"@pII", \
-		"kcpu_queue,kernel_ctx_id,kcpuq_num_pending_cmds") \
+		"@pIII", \
+		"kcpu_queue,kcpu_queue_id,kernel_ctx_id,kcpuq_num_pending_cmds") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_DEL_KCPUQUEUE, \
 		"Delete KCPU Queue", \
 		"@p", \
@@ -344,11 +419,19 @@
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT, \
 		"KCPU Queue enqueues Wait on Cross Queue Sync Object", \
 		"@pLII", \
-		"kcpu_queue,cqs_obj_gpu_addr,cqs_obj_compare_value,cqs_obj_inherit_error") \
+		"kcpu_queue,cqs_obj_gpu_addr,compare_value,inherit_error") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET, \
 		"KCPU Queue enqueues Set on Cross Queue Sync Object", \
 		"@pL", \
 		"kcpu_queue,cqs_obj_gpu_addr") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION, \
+		"KCPU Queue enqueues Wait Operation on Cross Queue Sync Object", \
+		"@pLLIII", \
+		"kcpu_queue,cqs_obj_gpu_addr,compare_value,condition,data_type,inherit_error") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION, \
+		"KCPU Queue enqueues Set Operation on Cross Queue Sync Object", \
+		"@pLLII", \
+		"kcpu_queue,cqs_obj_gpu_addr,value,operation,data_type") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT, \
 		"KCPU Queue enqueues Map Import", \
 		"@pL", \
@@ -361,14 +444,6 @@
 		"KCPU Queue enqueues Unmap Import ignoring reference count", \
 		"@pL", \
 		"kcpu_queue,map_import_buf_gpu_addr") \
-	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER, \
-		"KCPU Queue enqueues Error Barrier", \
-		"@p", \
-		"kcpu_queue") \
-	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND, \
-		"KCPU Queue enqueues Group Suspend", \
-		"@ppI", \
-		"kcpu_queue,group_suspend_buf,gpu_cmdq_grp_handle") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC, \
 		"Begin array of KCPU Queue enqueues JIT Alloc", \
 		"@p", \
@@ -393,6 +468,14 @@
 		"End array of KCPU Queue enqueues JIT Free", \
 		"@p", \
 		"kcpu_queue") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER, \
+		"KCPU Queue enqueues Error Barrier", \
+		"@p", \
+		"kcpu_queue") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND, \
+		"KCPU Queue enqueues Group Suspend", \
+		"@ppI", \
+		"kcpu_queue,group_suspend_buf,gpu_cmdq_grp_handle") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START, \
 		"KCPU Queue starts a Signal on Fence", \
 		"@p", \
@@ -410,15 +493,27 @@
 		"@pI", \
 		"kcpu_queue,execute_error") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START, \
-		"KCPU Queue starts a Wait on an array of Cross Queue Sync Objects", \
+		"KCPU Queue starts a Wait on Cross Queue Sync Object", \
 		"@p", \
 		"kcpu_queue") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END, \
-		"KCPU Queue ends a Wait on an array of Cross Queue Sync Objects", \
+		"KCPU Queue ends a Wait on Cross Queue Sync Object", \
 		"@pI", \
 		"kcpu_queue,execute_error") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET, \
-		"KCPU Queue executes a Set on an array of Cross Queue Sync Objects", \
+		"KCPU Queue executes a Set on Cross Queue Sync Object", \
+		"@pI", \
+		"kcpu_queue,execute_error") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START, \
+		"KCPU Queue starts a Wait Operation on Cross Queue Sync Object", \
+		"@p", \
+		"kcpu_queue") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END, \
+		"KCPU Queue ends a Wait Operation on Cross Queue Sync Object", \
+		"@pI", \
+		"kcpu_queue,execute_error") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION, \
+		"KCPU Queue executes a Set Operation on Cross Queue Sync Object", \
 		"@pI", \
 		"kcpu_queue,execute_error") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START, \
@@ -489,25 +584,66 @@
 		"KCPU Queue ends a group suspend", \
 		"@pI", \
 		"kcpu_queue,execute_error") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_RELOADING, \
+		"CSF FW is being reloaded", \
+		"@L", \
+		"csffw_cycle") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_ENABLING, \
+		"CSF FW is being enabled", \
+		"@L", \
+		"csffw_cycle") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_REQUEST_SLEEP, \
+		"CSF FW sleep is requested", \
+		"@L", \
+		"csffw_cycle") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP, \
+		"CSF FW wake up is requested", \
+		"@L", \
+		"csffw_cycle") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_REQUEST_HALT, \
+		"CSF FW halt is requested", \
+		"@L", \
+		"csffw_cycle") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_DISABLING, \
+		"CSF FW is being disabled", \
+		"@L", \
+		"csffw_cycle") \
+	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_FW_OFF, \
+		"CSF FW is off", \
+		"@L", \
+		"csffw_cycle") \
 	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW, \
 		"An overflow has happened with the CSFFW Timeline stream", \
 		"@LL", \
-		"csffw_timestamp,csffw_cycle") \
-	TRACEPOINT_DESC(KBASE_TL_KBASE_CSFFW_RESET, \
-		"A reset has happened with the CSFFW", \
-		"@L", \
-		"csffw_cycle") \
+		"csffw_timestamp,csffw_cycle")
 
-#define MIPE_HEADER_BLOB_VAR_NAME		__obj_desc_header
-#define MIPE_HEADER_STREAM_ID			TL_STREAM_ID_KERNEL
-#define MIPE_HEADER_PKT_CLASS			TL_PACKET_CLASS_OBJ
-#define MIPE_HEADER_TRACEPOINT_LIST		OBJ_TP_LIST
-#define MIPE_HEADER_TRACEPOINT_LIST_SIZE	KBASE_OBJ_MSG_COUNT
+#define MIPE_HEADER_BLOB_VAR_NAME        __obj_desc_header
+#define MIPE_HEADER_STREAM_ID            TL_STREAM_ID_KERNEL
+#define MIPE_HEADER_PKT_CLASS            TL_PACKET_CLASS_OBJ
+#define MIPE_HEADER_TRACEPOINT_LIST      OBJ_TP_LIST
+#define MIPE_HEADER_TRACEPOINT_LIST_SIZE KBASE_OBJ_MSG_COUNT
 
 #include "mali_kbase_mipe_gen_header.h"
 
 const char   *obj_desc_header = (const char *) &__obj_desc_header;
 const size_t  obj_desc_header_size = sizeof(__obj_desc_header);
+
+/* Message ids of trace events that are recorded in the aux stream. */
+enum tl_msg_id_aux {
+	KBASE_AUX_PM_STATE,
+	KBASE_AUX_PAGEFAULT,
+	KBASE_AUX_PAGESALLOC,
+	KBASE_AUX_DEVFREQ_TARGET,
+	KBASE_AUX_JIT_STATS,
+	KBASE_AUX_TILER_HEAP_STATS,
+	KBASE_AUX_EVENT_JOB_SLOT,
+	KBASE_AUX_PROTECTED_ENTER_START,
+	KBASE_AUX_PROTECTED_ENTER_END,
+	KBASE_AUX_MMU_COMMAND,
+	KBASE_AUX_PROTECTED_LEAVE_START,
+	KBASE_AUX_PROTECTED_LEAVE_END,
+	KBASE_AUX_MSG_COUNT,
+};
 
 #define AUX_TP_LIST \
 	TRACEPOINT_DESC(KBASE_AUX_PM_STATE, \
@@ -526,22 +662,6 @@
 		"New device frequency target", \
 		"@L", \
 		"target_freq") \
-	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_ENTER_START, \
-		"enter protected mode start", \
-		"@p", \
-		"gpu") \
-	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_ENTER_END, \
-		"enter protected mode end", \
-		"@p", \
-		"gpu") \
-	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_LEAVE_START, \
-		"leave protected mode start", \
-		"@p", \
-		"gpu") \
-	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_LEAVE_END, \
-		"leave protected mode end", \
-		"@p", \
-		"gpu") \
 	TRACEPOINT_DESC(KBASE_AUX_JIT_STATS, \
 		"per-bin JIT statistics", \
 		"@IIIIII", \
@@ -554,12 +674,32 @@
 		"event on a given job slot", \
 		"@pIII", \
 		"ctx,slot_nr,atom_nr,event") \
+	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_ENTER_START, \
+		"enter protected mode start", \
+		"@p", \
+		"gpu") \
+	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_ENTER_END, \
+		"enter protected mode end", \
+		"@p", \
+		"gpu") \
+	TRACEPOINT_DESC(KBASE_AUX_MMU_COMMAND, \
+		"mmu commands with synchronicity info", \
+		"@IIILI", \
+		"kernel_ctx_id,mmu_cmd_id,mmu_synchronicity,mmu_lock_addr,mmu_lock_page_num") \
+	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_LEAVE_START, \
+		"leave protected mode start", \
+		"@p", \
+		"gpu") \
+	TRACEPOINT_DESC(KBASE_AUX_PROTECTED_LEAVE_END, \
+		"leave protected mode end", \
+		"@p", \
+		"gpu")
 
-#define MIPE_HEADER_BLOB_VAR_NAME		__aux_desc_header
-#define MIPE_HEADER_STREAM_ID        		TL_STREAM_ID_KERNEL
-#define MIPE_HEADER_PKT_CLASS        		TL_PACKET_CLASS_AUX
-#define MIPE_HEADER_TRACEPOINT_LIST		AUX_TP_LIST
-#define MIPE_HEADER_TRACEPOINT_LIST_SIZE	KBASE_AUX_MSG_COUNT
+#define MIPE_HEADER_BLOB_VAR_NAME        __aux_desc_header
+#define MIPE_HEADER_STREAM_ID            TL_STREAM_ID_KERNEL
+#define MIPE_HEADER_PKT_CLASS            TL_PACKET_CLASS_AUX
+#define MIPE_HEADER_TRACEPOINT_LIST      AUX_TP_LIST
+#define MIPE_HEADER_TRACEPOINT_LIST_SIZE KBASE_AUX_MSG_COUNT
 
 #include "mali_kbase_mipe_gen_header.h"
 
@@ -570,7 +710,8 @@
 	struct kbase_tlstream *stream,
 	const void *ctx,
 	u32 ctx_nr,
-	u32 tgid)
+	u32 tgid
+)
 {
 	const u32 msg_id = KBASE_TL_NEW_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -600,7 +741,8 @@
 	struct kbase_tlstream *stream,
 	const void *gpu,
 	u32 gpu_id,
-	u32 core_count)
+	u32 core_count
+)
 {
 	const u32 msg_id = KBASE_TL_NEW_GPU;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -630,7 +772,8 @@
 	struct kbase_tlstream *stream,
 	const void *lpu,
 	u32 lpu_nr,
-	u32 lpu_fn)
+	u32 lpu_fn
+)
 {
 	const u32 msg_id = KBASE_TL_NEW_LPU;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -659,7 +802,8 @@
 void __kbase_tlstream_tl_new_atom(
 	struct kbase_tlstream *stream,
 	const void *atom,
-	u32 atom_nr)
+	u32 atom_nr
+)
 {
 	const u32 msg_id = KBASE_TL_NEW_ATOM;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -685,7 +829,8 @@
 void __kbase_tlstream_tl_new_as(
 	struct kbase_tlstream *stream,
 	const void *address_space,
-	u32 as_nr)
+	u32 as_nr
+)
 {
 	const u32 msg_id = KBASE_TL_NEW_AS;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -710,7 +855,8 @@
 
 void __kbase_tlstream_tl_del_ctx(
 	struct kbase_tlstream *stream,
-	const void *ctx)
+	const void *ctx
+)
 {
 	const u32 msg_id = KBASE_TL_DEL_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -732,7 +878,8 @@
 
 void __kbase_tlstream_tl_del_atom(
 	struct kbase_tlstream *stream,
-	const void *atom)
+	const void *atom
+)
 {
 	const u32 msg_id = KBASE_TL_DEL_ATOM;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -755,7 +902,8 @@
 void __kbase_tlstream_tl_lifelink_lpu_gpu(
 	struct kbase_tlstream *stream,
 	const void *lpu,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_TL_LIFELINK_LPU_GPU;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -781,7 +929,8 @@
 void __kbase_tlstream_tl_lifelink_as_gpu(
 	struct kbase_tlstream *stream,
 	const void *address_space,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_TL_LIFELINK_AS_GPU;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -807,7 +956,8 @@
 void __kbase_tlstream_tl_ret_ctx_lpu(
 	struct kbase_tlstream *stream,
 	const void *ctx,
-	const void *lpu)
+	const void *lpu
+)
 {
 	const u32 msg_id = KBASE_TL_RET_CTX_LPU;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -833,7 +983,8 @@
 void __kbase_tlstream_tl_ret_atom_ctx(
 	struct kbase_tlstream *stream,
 	const void *atom,
-	const void *ctx)
+	const void *ctx
+)
 {
 	const u32 msg_id = KBASE_TL_RET_ATOM_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -860,15 +1011,16 @@
 	struct kbase_tlstream *stream,
 	const void *atom,
 	const void *lpu,
-	const char *attrib_match_list)
+	const char *attrib_match_list
+)
 {
 	const u32 msg_id = KBASE_TL_RET_ATOM_LPU;
-	const size_t s0 = sizeof(u32) + sizeof(char)
+	const size_t s2 = sizeof(u32) + sizeof(char)
 		+ strnlen(attrib_match_list, STRLEN_MAX);
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
 		+ sizeof(atom)
 		+ sizeof(lpu)
-		+ s0
+		+ s2
 		;
 	char *buffer;
 	unsigned long acq_flags;
@@ -883,7 +1035,7 @@
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &lpu, sizeof(lpu));
 	pos = kbasep_serialize_string(buffer,
-		pos, attrib_match_list, s0);
+		pos, attrib_match_list, s2);
 
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
@@ -891,7 +1043,8 @@
 void __kbase_tlstream_tl_nret_ctx_lpu(
 	struct kbase_tlstream *stream,
 	const void *ctx,
-	const void *lpu)
+	const void *lpu
+)
 {
 	const u32 msg_id = KBASE_TL_NRET_CTX_LPU;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -917,7 +1070,8 @@
 void __kbase_tlstream_tl_nret_atom_ctx(
 	struct kbase_tlstream *stream,
 	const void *atom,
-	const void *ctx)
+	const void *ctx
+)
 {
 	const u32 msg_id = KBASE_TL_NRET_ATOM_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -943,7 +1097,8 @@
 void __kbase_tlstream_tl_nret_atom_lpu(
 	struct kbase_tlstream *stream,
 	const void *atom,
-	const void *lpu)
+	const void *lpu
+)
 {
 	const u32 msg_id = KBASE_TL_NRET_ATOM_LPU;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -969,7 +1124,8 @@
 void __kbase_tlstream_tl_ret_as_ctx(
 	struct kbase_tlstream *stream,
 	const void *address_space,
-	const void *ctx)
+	const void *ctx
+)
 {
 	const u32 msg_id = KBASE_TL_RET_AS_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -995,7 +1151,8 @@
 void __kbase_tlstream_tl_nret_as_ctx(
 	struct kbase_tlstream *stream,
 	const void *address_space,
-	const void *ctx)
+	const void *ctx
+)
 {
 	const u32 msg_id = KBASE_TL_NRET_AS_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1021,7 +1178,8 @@
 void __kbase_tlstream_tl_ret_atom_as(
 	struct kbase_tlstream *stream,
 	const void *atom,
-	const void *address_space)
+	const void *address_space
+)
 {
 	const u32 msg_id = KBASE_TL_RET_ATOM_AS;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1047,7 +1205,8 @@
 void __kbase_tlstream_tl_nret_atom_as(
 	struct kbase_tlstream *stream,
 	const void *atom,
-	const void *address_space)
+	const void *address_space
+)
 {
 	const u32 msg_id = KBASE_TL_NRET_ATOM_AS;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1075,7 +1234,8 @@
 	const void *atom,
 	u64 descriptor,
 	u64 affinity,
-	u32 config)
+	u32 config
+)
 {
 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_CONFIG;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1104,138 +1264,11 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
-void __kbase_tlstream_tl_attrib_atom_priority(
-	struct kbase_tlstream *stream,
-	const void *atom,
-	u32 prio)
-{
-	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(atom)
-		+ sizeof(prio)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &atom, sizeof(atom));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &prio, sizeof(prio));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_state(
-	struct kbase_tlstream *stream,
-	const void *atom,
-	u32 state)
-{
-	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(atom)
-		+ sizeof(state)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &atom, sizeof(atom));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &state, sizeof(state));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_prioritized(
-	struct kbase_tlstream *stream,
-	const void *atom)
-{
-	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(atom)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &atom, sizeof(atom));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_tl_attrib_atom_jit(
-	struct kbase_tlstream *stream,
-	const void *atom,
-	u64 edit_addr,
-	u64 new_addr,
-	u32 jit_flags,
-	u64 mem_flags,
-	u32 j_id,
-	u64 com_pgs,
-	u64 extent,
-	u64 va_pgs)
-{
-	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(atom)
-		+ sizeof(edit_addr)
-		+ sizeof(new_addr)
-		+ sizeof(jit_flags)
-		+ sizeof(mem_flags)
-		+ sizeof(j_id)
-		+ sizeof(com_pgs)
-		+ sizeof(extent)
-		+ sizeof(va_pgs)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &atom, sizeof(atom));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &edit_addr, sizeof(edit_addr));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &new_addr, sizeof(new_addr));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &jit_flags, sizeof(jit_flags));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &mem_flags, sizeof(mem_flags));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &j_id, sizeof(j_id));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &com_pgs, sizeof(com_pgs));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &extent, sizeof(extent));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &va_pgs, sizeof(va_pgs));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
 void __kbase_tlstream_tl_jit_usedpages(
 	struct kbase_tlstream *stream,
 	u64 used_pages,
-	u32 j_id)
+	u32 j_id
+)
 {
 	const u32 msg_id = KBASE_TL_JIT_USEDPAGES;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1268,7 +1301,8 @@
 	u32 bin_id,
 	u32 max_allocs,
 	u32 jit_flags,
-	u32 usg_id)
+	u32 usg_id
+)
 {
 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITALLOCINFO;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1315,7 +1349,8 @@
 void __kbase_tlstream_tl_attrib_atom_jitfreeinfo(
 	struct kbase_tlstream *stream,
 	const void *atom,
-	u32 j_id)
+	u32 j_id
+)
 {
 	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JITFREEINFO;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1343,7 +1378,8 @@
 	const void *address_space,
 	u64 transtab,
 	u64 memattr,
-	u64 transcfg)
+	u64 transcfg
+)
 {
 	const u32 msg_id = KBASE_TL_ATTRIB_AS_CONFIG;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1374,7 +1410,8 @@
 
 void __kbase_tlstream_tl_event_lpu_softstop(
 	struct kbase_tlstream *stream,
-	const void *lpu)
+	const void *lpu
+)
 {
 	const u32 msg_id = KBASE_TL_EVENT_LPU_SOFTSTOP;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1396,7 +1433,8 @@
 
 void __kbase_tlstream_tl_event_atom_softstop_ex(
 	struct kbase_tlstream *stream,
-	const void *atom)
+	const void *atom
+)
 {
 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_EX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1418,7 +1456,8 @@
 
 void __kbase_tlstream_tl_event_atom_softstop_issue(
 	struct kbase_tlstream *stream,
-	const void *atom)
+	const void *atom
+)
 {
 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1440,7 +1479,8 @@
 
 void __kbase_tlstream_tl_event_atom_softjob_start(
 	struct kbase_tlstream *stream,
-	const void *atom)
+	const void *atom
+)
 {
 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1462,7 +1502,8 @@
 
 void __kbase_tlstream_tl_event_atom_softjob_end(
 	struct kbase_tlstream *stream,
-	const void *atom)
+	const void *atom
+)
 {
 	const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1484,7 +1525,8 @@
 
 void __kbase_tlstream_tl_arbiter_granted(
 	struct kbase_tlstream *stream,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_TL_ARBITER_GRANTED;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1506,7 +1548,8 @@
 
 void __kbase_tlstream_tl_arbiter_started(
 	struct kbase_tlstream *stream,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_TL_ARBITER_STARTED;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1528,7 +1571,8 @@
 
 void __kbase_tlstream_tl_arbiter_stop_requested(
 	struct kbase_tlstream *stream,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_TL_ARBITER_STOP_REQUESTED;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1550,7 +1594,8 @@
 
 void __kbase_tlstream_tl_arbiter_stopped(
 	struct kbase_tlstream *stream,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_TL_ARBITER_STOPPED;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1572,7 +1617,8 @@
 
 void __kbase_tlstream_tl_arbiter_requested(
 	struct kbase_tlstream *stream,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_TL_ARBITER_REQUESTED;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1594,7 +1640,8 @@
 
 void __kbase_tlstream_jd_gpu_soft_reset(
 	struct kbase_tlstream *stream,
-	const void *gpu)
+	const void *gpu
+)
 {
 	const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1614,263 +1661,18 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
-void __kbase_tlstream_aux_pm_state(
-	struct kbase_tlstream *stream,
-	u32 core_type,
-	u64 core_state_bitset)
-{
-	const u32 msg_id = KBASE_AUX_PM_STATE;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(core_type)
-		+ sizeof(core_state_bitset)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &core_type, sizeof(core_type));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &core_state_bitset, sizeof(core_state_bitset));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_pagefault(
-	struct kbase_tlstream *stream,
-	u32 ctx_nr,
-	u32 as_nr,
-	u64 page_cnt_change)
-{
-	const u32 msg_id = KBASE_AUX_PAGEFAULT;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(ctx_nr)
-		+ sizeof(as_nr)
-		+ sizeof(page_cnt_change)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &ctx_nr, sizeof(ctx_nr));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &as_nr, sizeof(as_nr));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &page_cnt_change, sizeof(page_cnt_change));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_pagesalloc(
-	struct kbase_tlstream *stream,
-	u32 ctx_nr,
-	u64 page_cnt)
-{
-	const u32 msg_id = KBASE_AUX_PAGESALLOC;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(ctx_nr)
-		+ sizeof(page_cnt)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &ctx_nr, sizeof(ctx_nr));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &page_cnt, sizeof(page_cnt));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_devfreq_target(
-	struct kbase_tlstream *stream,
-	u64 target_freq)
-{
-	const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(target_freq)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &target_freq, sizeof(target_freq));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_protected_enter_start(
-	struct kbase_tlstream *stream,
-	const void *gpu)
-{
-	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(gpu)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &gpu, sizeof(gpu));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_protected_enter_end(
-	struct kbase_tlstream *stream,
-	const void *gpu)
-{
-	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(gpu)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &gpu, sizeof(gpu));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_protected_leave_start(
-	struct kbase_tlstream *stream,
-	const void *gpu)
-{
-	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(gpu)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &gpu, sizeof(gpu));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_protected_leave_end(
-	struct kbase_tlstream *stream,
-	const void *gpu)
-{
-	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(gpu)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &gpu, sizeof(gpu));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_jit_stats(
-	struct kbase_tlstream *stream,
-	u32 ctx_nr,
-	u32 bid,
-	u32 max_allocs,
-	u32 allocs,
-	u32 va_pages,
-	u32 ph_pages)
-{
-	const u32 msg_id = KBASE_AUX_JIT_STATS;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(ctx_nr)
-		+ sizeof(bid)
-		+ sizeof(max_allocs)
-		+ sizeof(allocs)
-		+ sizeof(va_pages)
-		+ sizeof(ph_pages)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &ctx_nr, sizeof(ctx_nr));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &bid, sizeof(bid));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &max_allocs, sizeof(max_allocs));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &allocs, sizeof(allocs));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &va_pages, sizeof(va_pages));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &ph_pages, sizeof(ph_pages));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_aux_tiler_heap_stats(
+void __kbase_tlstream_jd_tiler_heap_chunk_alloc(
 	struct kbase_tlstream *stream,
 	u32 ctx_nr,
 	u64 heap_id,
-	u32 va_pages,
-	u32 ph_pages,
-	u32 max_chunks,
-	u32 chunk_size,
-	u32 chunk_count,
-	u32 target_in_flight,
-	u32 nr_in_flight)
+	u64 chunk_va
+)
 {
-	const u32 msg_id = KBASE_AUX_TILER_HEAP_STATS;
+	const u32 msg_id = KBASE_JD_TILER_HEAP_CHUNK_ALLOC;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
 		+ sizeof(ctx_nr)
 		+ sizeof(heap_id)
-		+ sizeof(va_pages)
-		+ sizeof(ph_pages)
-		+ sizeof(max_chunks)
-		+ sizeof(chunk_size)
-		+ sizeof(chunk_count)
-		+ sizeof(target_in_flight)
-		+ sizeof(nr_in_flight)
+		+ sizeof(chunk_va)
 		;
 	char *buffer;
 	unsigned long acq_flags;
@@ -1885,36 +1687,19 @@
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &heap_id, sizeof(heap_id));
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &va_pages, sizeof(va_pages));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &ph_pages, sizeof(ph_pages));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &max_chunks, sizeof(max_chunks));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &chunk_size, sizeof(chunk_size));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &chunk_count, sizeof(chunk_count));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &target_in_flight, sizeof(target_in_flight));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &nr_in_flight, sizeof(nr_in_flight));
+		pos, &chunk_va, sizeof(chunk_va));
 
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
-void __kbase_tlstream_aux_event_job_slot(
+void __kbase_tlstream_tl_js_sched_start(
 	struct kbase_tlstream *stream,
-	const void *ctx,
-	u32 slot_nr,
-	u32 atom_nr,
-	u32 event)
+	u32 dummy
+)
 {
-	const u32 msg_id = KBASE_AUX_EVENT_JOB_SLOT;
+	const u32 msg_id = KBASE_TL_JS_SCHED_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(ctx)
-		+ sizeof(slot_nr)
-		+ sizeof(atom_nr)
-		+ sizeof(event)
+		+ sizeof(dummy)
 		;
 	char *buffer;
 	unsigned long acq_flags;
@@ -1925,13 +1710,377 @@
 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
 	pos = kbasep_serialize_timestamp(buffer, pos);
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &ctx, sizeof(ctx));
+		pos, &dummy, sizeof(dummy));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_js_sched_end(
+	struct kbase_tlstream *stream,
+	u32 dummy
+)
+{
+	const u32 msg_id = KBASE_TL_JS_SCHED_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(dummy)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &slot_nr, sizeof(slot_nr));
+		pos, &dummy, sizeof(dummy));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jd_submit_atom_start(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_JD_SUBMIT_ATOM_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jd_submit_atom_end(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_JD_SUBMIT_ATOM_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jd_done_no_lock_start(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_JD_DONE_NO_LOCK_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jd_done_no_lock_end(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_JD_DONE_NO_LOCK_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jd_done_start(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_JD_DONE_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jd_done_end(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_JD_DONE_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_jd_atom_complete(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_JD_ATOM_COMPLETE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_run_atom_start(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 atom_nr
+)
+{
+	const u32 msg_id = KBASE_TL_RUN_ATOM_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(atom_nr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &atom_nr, sizeof(atom_nr));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_run_atom_end(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 atom_nr
+)
+{
+	const u32 msg_id = KBASE_TL_RUN_ATOM_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(atom_nr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &event, sizeof(event));
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom_nr, sizeof(atom_nr));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_priority(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 prio
+)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITY;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(prio)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &prio, sizeof(prio));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_state(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u32 state
+)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_STATE;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(state)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &state, sizeof(state));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_prioritized(
+	struct kbase_tlstream *stream,
+	const void *atom
+)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_PRIORITIZED;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_attrib_atom_jit(
+	struct kbase_tlstream *stream,
+	const void *atom,
+	u64 edit_addr,
+	u64 new_addr,
+	u32 jit_flags,
+	u64 mem_flags,
+	u32 j_id,
+	u64 com_pgs,
+	u64 extent,
+	u64 va_pgs
+)
+{
+	const u32 msg_id = KBASE_TL_ATTRIB_ATOM_JIT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(atom)
+		+ sizeof(edit_addr)
+		+ sizeof(new_addr)
+		+ sizeof(jit_flags)
+		+ sizeof(mem_flags)
+		+ sizeof(j_id)
+		+ sizeof(com_pgs)
+		+ sizeof(extent)
+		+ sizeof(va_pgs)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom, sizeof(atom));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &edit_addr, sizeof(edit_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &new_addr, sizeof(new_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &jit_flags, sizeof(jit_flags));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &mem_flags, sizeof(mem_flags));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &j_id, sizeof(j_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &com_pgs, sizeof(com_pgs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &extent, sizeof(extent));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &va_pgs, sizeof(va_pgs));
 
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
@@ -1941,7 +2090,11 @@
 	u32 kbase_device_id,
 	u32 kbase_device_gpu_core_count,
 	u32 kbase_device_max_num_csgs,
-	u32 kbase_device_as_count)
+	u32 kbase_device_as_count,
+	u32 kbase_device_sb_entry_count,
+	u32 kbase_device_has_cross_stream_sync,
+	u32 kbase_device_supports_gpu_sleep
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_NEW_DEVICE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -1949,6 +2102,9 @@
 		+ sizeof(kbase_device_gpu_core_count)
 		+ sizeof(kbase_device_max_num_csgs)
 		+ sizeof(kbase_device_as_count)
+		+ sizeof(kbase_device_sb_entry_count)
+		+ sizeof(kbase_device_has_cross_stream_sync)
+		+ sizeof(kbase_device_supports_gpu_sleep)
 		;
 	char *buffer;
 	unsigned long acq_flags;
@@ -1966,6 +2122,39 @@
 		pos, &kbase_device_max_num_csgs, sizeof(kbase_device_max_num_csgs));
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &kbase_device_as_count, sizeof(kbase_device_as_count));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_sb_entry_count, sizeof(kbase_device_sb_entry_count));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_has_cross_stream_sync, sizeof(kbase_device_has_cross_stream_sync));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_supports_gpu_sleep, sizeof(kbase_device_supports_gpu_sleep));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_gpucmdqueue_kick(
+	struct kbase_tlstream *stream,
+	u32 kernel_ctx_id,
+	u64 buffer_gpu_addr
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_GPUCMDQUEUE_KICK;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kernel_ctx_id)
+		+ sizeof(buffer_gpu_addr)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &buffer_gpu_addr, sizeof(buffer_gpu_addr));
 
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
@@ -1973,13 +2162,51 @@
 void __kbase_tlstream_tl_kbase_device_program_csg(
 	struct kbase_tlstream *stream,
 	u32 kbase_device_id,
+	u32 kernel_ctx_id,
 	u32 gpu_cmdq_grp_handle,
-	u32 kbase_device_csg_slot_index)
+	u32 kbase_device_csg_slot_index,
+	u32 kbase_device_csg_slot_resuming
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_DEVICE_PROGRAM_CSG;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
 		+ sizeof(kbase_device_id)
+		+ sizeof(kernel_ctx_id)
 		+ sizeof(gpu_cmdq_grp_handle)
+		+ sizeof(kbase_device_csg_slot_index)
+		+ sizeof(kbase_device_csg_slot_resuming)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_id, sizeof(kbase_device_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_csg_slot_resuming, sizeof(kbase_device_csg_slot_resuming));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_device_deprogram_csg(
+	struct kbase_tlstream *stream,
+	u32 kbase_device_id,
+	u32 kbase_device_csg_slot_index
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kbase_device_id)
 		+ sizeof(kbase_device_csg_slot_index)
 		;
 	char *buffer;
@@ -1993,19 +2220,76 @@
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &kbase_device_id, sizeof(kbase_device_id));
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
+		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_device_halting_csg(
+	struct kbase_tlstream *stream,
+	u32 kbase_device_id,
+	u32 kbase_device_csg_slot_index,
+	u32 kbase_device_csg_slot_suspending
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_DEVICE_HALTING_CSG;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kbase_device_id)
+		+ sizeof(kbase_device_csg_slot_index)
+		+ sizeof(kbase_device_csg_slot_suspending)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_id, sizeof(kbase_device_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_csg_slot_suspending, sizeof(kbase_device_csg_slot_suspending));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_device_suspend_csg(
+	struct kbase_tlstream *stream,
+	u32 kbase_device_id,
+	u32 kbase_device_csg_slot_index
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_DEVICE_SUSPEND_CSG;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kbase_device_id)
+		+ sizeof(kbase_device_csg_slot_index)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kbase_device_id, sizeof(kbase_device_id));
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &kbase_device_csg_slot_index, sizeof(kbase_device_csg_slot_index));
 
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
-void __kbase_tlstream_tl_kbase_device_deprogram_csg(
+void __kbase_tlstream_tl_kbase_device_csg_idle(
 	struct kbase_tlstream *stream,
 	u32 kbase_device_id,
-	u32 kbase_device_csg_slot_index)
+	u32 kbase_device_csg_slot_index
+)
 {
-	const u32 msg_id = KBASE_TL_KBASE_DEVICE_DEPROGRAM_CSG;
+	const u32 msg_id = KBASE_TL_KBASE_DEVICE_CSG_IDLE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
 		+ sizeof(kbase_device_id)
 		+ sizeof(kbase_device_csg_slot_index)
@@ -2029,7 +2313,8 @@
 void __kbase_tlstream_tl_kbase_new_ctx(
 	struct kbase_tlstream *stream,
 	u32 kernel_ctx_id,
-	u32 kbase_device_id)
+	u32 kbase_device_id
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_NEW_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2054,7 +2339,8 @@
 
 void __kbase_tlstream_tl_kbase_del_ctx(
 	struct kbase_tlstream *stream,
-	u32 kernel_ctx_id)
+	u32 kernel_ctx_id
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_DEL_CTX;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2077,7 +2363,8 @@
 void __kbase_tlstream_tl_kbase_ctx_assign_as(
 	struct kbase_tlstream *stream,
 	u32 kernel_ctx_id,
-	u32 kbase_device_as_index)
+	u32 kbase_device_as_index
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_CTX_ASSIGN_AS;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2102,7 +2389,8 @@
 
 void __kbase_tlstream_tl_kbase_ctx_unassign_as(
 	struct kbase_tlstream *stream,
-	u32 kernel_ctx_id)
+	u32 kernel_ctx_id
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_CTX_UNASSIGN_AS;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2125,12 +2413,15 @@
 void __kbase_tlstream_tl_kbase_new_kcpuqueue(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
+	u32 kcpu_queue_id,
 	u32 kernel_ctx_id,
-	u32 kcpuq_num_pending_cmds)
+	u32 kcpuq_num_pending_cmds
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_NEW_KCPUQUEUE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
 		+ sizeof(kcpu_queue)
+		+ sizeof(kcpu_queue_id)
 		+ sizeof(kernel_ctx_id)
 		+ sizeof(kcpuq_num_pending_cmds)
 		;
@@ -2145,6 +2436,8 @@
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &kcpu_queue, sizeof(kcpu_queue));
 	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue_id, sizeof(kcpu_queue_id));
+	pos = kbasep_serialize_bytes(buffer,
 		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &kcpuq_num_pending_cmds, sizeof(kcpuq_num_pending_cmds));
@@ -2154,7 +2447,8 @@
 
 void __kbase_tlstream_tl_kbase_del_kcpuqueue(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_DEL_KCPUQUEUE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2177,7 +2471,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_signal(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	const void *fence)
+	const void *fence
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2203,7 +2498,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_fence_wait(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	const void *fence)
+	const void *fence
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_FENCE_WAIT;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2230,15 +2526,16 @@
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
 	u64 cqs_obj_gpu_addr,
-	u32 cqs_obj_compare_value,
-	u32 cqs_obj_inherit_error)
+	u32 compare_value,
+	u32 inherit_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
 		+ sizeof(kcpu_queue)
 		+ sizeof(cqs_obj_gpu_addr)
-		+ sizeof(cqs_obj_compare_value)
-		+ sizeof(cqs_obj_inherit_error)
+		+ sizeof(compare_value)
+		+ sizeof(inherit_error)
 		;
 	char *buffer;
 	unsigned long acq_flags;
@@ -2253,9 +2550,9 @@
 	pos = kbasep_serialize_bytes(buffer,
 		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &cqs_obj_compare_value, sizeof(cqs_obj_compare_value));
+		pos, &compare_value, sizeof(compare_value));
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &cqs_obj_inherit_error, sizeof(cqs_obj_inherit_error));
+		pos, &inherit_error, sizeof(inherit_error));
 
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
@@ -2263,7 +2560,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u64 cqs_obj_gpu_addr)
+	u64 cqs_obj_gpu_addr
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2286,10 +2584,93 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_wait_operation(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 cqs_obj_gpu_addr,
+	u64 compare_value,
+	u32 condition,
+	u32 data_type,
+	u32 inherit_error
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_WAIT_OPERATION;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(cqs_obj_gpu_addr)
+		+ sizeof(compare_value)
+		+ sizeof(condition)
+		+ sizeof(data_type)
+		+ sizeof(inherit_error)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &compare_value, sizeof(compare_value));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &condition, sizeof(condition));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &data_type, sizeof(data_type));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &inherit_error, sizeof(inherit_error));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_cqs_set_operation(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u64 cqs_obj_gpu_addr,
+	u64 value,
+	u32 operation,
+	u32 data_type
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_CQS_SET_OPERATION;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(cqs_obj_gpu_addr)
+		+ sizeof(value)
+		+ sizeof(operation)
+		+ sizeof(data_type)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &cqs_obj_gpu_addr, sizeof(cqs_obj_gpu_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &value, sizeof(value));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &operation, sizeof(operation));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &data_type, sizeof(data_type));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_map_import(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u64 map_import_buf_gpu_addr)
+	u64 map_import_buf_gpu_addr
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_MAP_IMPORT;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2315,7 +2696,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u64 map_import_buf_gpu_addr)
+	u64 map_import_buf_gpu_addr
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2341,7 +2723,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_unmap_import_force(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u64 map_import_buf_gpu_addr)
+	u64 map_import_buf_gpu_addr
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT_FORCE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2364,61 +2747,10 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
-void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier(
-	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
-{
-	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(kcpu_queue)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &kcpu_queue, sizeof(kcpu_queue));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
-void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend(
-	struct kbase_tlstream *stream,
-	const void *kcpu_queue,
-	const void *group_suspend_buf,
-	u32 gpu_cmdq_grp_handle)
-{
-	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND;
-	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(kcpu_queue)
-		+ sizeof(group_suspend_buf)
-		+ sizeof(gpu_cmdq_grp_handle)
-		;
-	char *buffer;
-	unsigned long acq_flags;
-	size_t pos = 0;
-
-	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
-
-	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
-	pos = kbasep_serialize_timestamp(buffer, pos);
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &kcpu_queue, sizeof(kcpu_queue));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &group_suspend_buf, sizeof(group_suspend_buf));
-	pos = kbasep_serialize_bytes(buffer,
-		pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
-
-	kbase_tlstream_msgbuf_release(stream, acq_flags);
-}
-
 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_alloc(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2449,7 +2781,8 @@
 	u32 jit_alloc_bin_id,
 	u32 jit_alloc_max_allocations,
 	u32 jit_alloc_flags,
-	u32 jit_alloc_usage_id)
+	u32 jit_alloc_usage_id
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2498,7 +2831,8 @@
 
 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_alloc(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2520,7 +2854,8 @@
 
 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_enqueue_jit_free(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2543,7 +2878,8 @@
 void __kbase_tlstream_tl_kbase_array_item_kcpuqueue_enqueue_jit_free(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 jit_alloc_jit_id)
+	u32 jit_alloc_jit_id
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2568,7 +2904,8 @@
 
 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_enqueue_jit_free(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2588,9 +2925,64 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_error_barrier(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_ERROR_BARRIER;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_enqueue_group_suspend(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	const void *group_suspend_buf,
+	u32 gpu_cmdq_grp_handle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_ENQUEUE_GROUP_SUSPEND;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(group_suspend_buf)
+		+ sizeof(gpu_cmdq_grp_handle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &group_suspend_buf, sizeof(group_suspend_buf));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu_cmdq_grp_handle, sizeof(gpu_cmdq_grp_handle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2613,7 +3005,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_signal_end(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2638,7 +3031,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2661,7 +3055,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_fence_wait_end(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_FENCE_WAIT_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2686,7 +3081,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2709,7 +3105,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_end(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2735,7 +3132,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2758,9 +3156,87 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_start(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_wait_operation_end(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u32 execute_error
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_WAIT_OPERATION_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(execute_error)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &execute_error, sizeof(execute_error));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_kcpuqueue_execute_cqs_set_operation(
+	struct kbase_tlstream *stream,
+	const void *kcpu_queue,
+	u32 execute_error
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_CQS_SET_OPERATION;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kcpu_queue)
+		+ sizeof(execute_error)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kcpu_queue, sizeof(kcpu_queue));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &execute_error, sizeof(execute_error));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2783,7 +3259,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_map_import_end(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_MAP_IMPORT_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2808,7 +3285,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2831,7 +3309,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_end(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2856,7 +3335,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2879,7 +3359,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_unmap_import_force_end(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_FORCE_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2904,7 +3385,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_alloc_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_ALLOC_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2926,7 +3408,8 @@
 
 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_alloc_end(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2951,7 +3434,8 @@
 	const void *kcpu_queue,
 	u32 execute_error,
 	u64 jit_alloc_gpu_alloc_addr,
-	u64 jit_alloc_mmu_flags)
+	u64 jit_alloc_mmu_flags
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -2982,7 +3466,8 @@
 
 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_alloc_end(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3004,7 +3489,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_jit_free_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_JIT_FREE_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3026,7 +3512,8 @@
 
 void __kbase_tlstream_tl_kbase_array_begin_kcpuqueue_execute_jit_free_end(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3050,7 +3537,8 @@
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
 	u32 execute_error,
-	u64 jit_free_pages_used)
+	u64 jit_free_pages_used
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3078,7 +3566,8 @@
 
 void __kbase_tlstream_tl_kbase_array_end_kcpuqueue_execute_jit_free_end(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3100,7 +3589,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_error_barrier(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_ERROR_BARRIER;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3122,7 +3612,8 @@
 
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_start(
 	struct kbase_tlstream *stream,
-	const void *kcpu_queue)
+	const void *kcpu_queue
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_START;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3145,7 +3636,8 @@
 void __kbase_tlstream_tl_kbase_kcpuqueue_execute_group_suspend_end(
 	struct kbase_tlstream *stream,
 	const void *kcpu_queue,
-	u32 execute_error)
+	u32 execute_error
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_KCPUQUEUE_EXECUTE_GROUP_SUSPEND_END;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3168,10 +3660,172 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
+void __kbase_tlstream_tl_kbase_csffw_fw_reloading(
+	struct kbase_tlstream *stream,
+	u64 csffw_cycle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_RELOADING;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(csffw_cycle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &csffw_cycle, sizeof(csffw_cycle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_csffw_fw_enabling(
+	struct kbase_tlstream *stream,
+	u64 csffw_cycle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_ENABLING;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(csffw_cycle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &csffw_cycle, sizeof(csffw_cycle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_csffw_fw_request_sleep(
+	struct kbase_tlstream *stream,
+	u64 csffw_cycle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_REQUEST_SLEEP;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(csffw_cycle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &csffw_cycle, sizeof(csffw_cycle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_csffw_fw_request_wakeup(
+	struct kbase_tlstream *stream,
+	u64 csffw_cycle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(csffw_cycle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &csffw_cycle, sizeof(csffw_cycle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_csffw_fw_request_halt(
+	struct kbase_tlstream *stream,
+	u64 csffw_cycle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_REQUEST_HALT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(csffw_cycle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &csffw_cycle, sizeof(csffw_cycle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_csffw_fw_disabling(
+	struct kbase_tlstream *stream,
+	u64 csffw_cycle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_DISABLING;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(csffw_cycle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &csffw_cycle, sizeof(csffw_cycle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_tl_kbase_csffw_fw_off(
+	struct kbase_tlstream *stream,
+	u64 csffw_cycle
+)
+{
+	const u32 msg_id = KBASE_TL_KBASE_CSFFW_FW_OFF;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(csffw_cycle)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &csffw_cycle, sizeof(csffw_cycle));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
 void __kbase_tlstream_tl_kbase_csffw_tlstream_overflow(
 	struct kbase_tlstream *stream,
 	u64 csffw_timestamp,
-	u64 csffw_cycle)
+	u64 csffw_cycle
+)
 {
 	const u32 msg_id = KBASE_TL_KBASE_CSFFW_TLSTREAM_OVERFLOW;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
@@ -3194,13 +3848,16 @@
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }
 
-void __kbase_tlstream_tl_kbase_csffw_reset(
+void __kbase_tlstream_aux_pm_state(
 	struct kbase_tlstream *stream,
-	u64 csffw_cycle)
+	u32 core_type,
+	u64 core_state_bitset
+)
 {
-	const u32 msg_id = KBASE_TL_KBASE_CSFFW_RESET;
+	const u32 msg_id = KBASE_AUX_PM_STATE;
 	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
-		+ sizeof(csffw_cycle)
+		+ sizeof(core_type)
+		+ sizeof(core_state_bitset)
 		;
 	char *buffer;
 	unsigned long acq_flags;
@@ -3211,7 +3868,354 @@
 	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
 	pos = kbasep_serialize_timestamp(buffer, pos);
 	pos = kbasep_serialize_bytes(buffer,
-		pos, &csffw_cycle, sizeof(csffw_cycle));
+		pos, &core_type, sizeof(core_type));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &core_state_bitset, sizeof(core_state_bitset));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagefault(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u32 as_nr,
+	u64 page_cnt_change
+)
+{
+	const u32 msg_id = KBASE_AUX_PAGEFAULT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx_nr)
+		+ sizeof(as_nr)
+		+ sizeof(page_cnt_change)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &as_nr, sizeof(as_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &page_cnt_change, sizeof(page_cnt_change));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_pagesalloc(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u64 page_cnt
+)
+{
+	const u32 msg_id = KBASE_AUX_PAGESALLOC;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx_nr)
+		+ sizeof(page_cnt)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &page_cnt, sizeof(page_cnt));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_devfreq_target(
+	struct kbase_tlstream *stream,
+	u64 target_freq
+)
+{
+	const u32 msg_id = KBASE_AUX_DEVFREQ_TARGET;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(target_freq)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &target_freq, sizeof(target_freq));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_jit_stats(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u32 bid,
+	u32 max_allocs,
+	u32 allocs,
+	u32 va_pages,
+	u32 ph_pages
+)
+{
+	const u32 msg_id = KBASE_AUX_JIT_STATS;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx_nr)
+		+ sizeof(bid)
+		+ sizeof(max_allocs)
+		+ sizeof(allocs)
+		+ sizeof(va_pages)
+		+ sizeof(ph_pages)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &bid, sizeof(bid));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &max_allocs, sizeof(max_allocs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &allocs, sizeof(allocs));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &va_pages, sizeof(va_pages));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ph_pages, sizeof(ph_pages));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_tiler_heap_stats(
+	struct kbase_tlstream *stream,
+	u32 ctx_nr,
+	u64 heap_id,
+	u32 va_pages,
+	u32 ph_pages,
+	u32 max_chunks,
+	u32 chunk_size,
+	u32 chunk_count,
+	u32 target_in_flight,
+	u32 nr_in_flight
+)
+{
+	const u32 msg_id = KBASE_AUX_TILER_HEAP_STATS;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx_nr)
+		+ sizeof(heap_id)
+		+ sizeof(va_pages)
+		+ sizeof(ph_pages)
+		+ sizeof(max_chunks)
+		+ sizeof(chunk_size)
+		+ sizeof(chunk_count)
+		+ sizeof(target_in_flight)
+		+ sizeof(nr_in_flight)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx_nr, sizeof(ctx_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &heap_id, sizeof(heap_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &va_pages, sizeof(va_pages));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ph_pages, sizeof(ph_pages));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &max_chunks, sizeof(max_chunks));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &chunk_size, sizeof(chunk_size));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &chunk_count, sizeof(chunk_count));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &target_in_flight, sizeof(target_in_flight));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &nr_in_flight, sizeof(nr_in_flight));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_event_job_slot(
+	struct kbase_tlstream *stream,
+	const void *ctx,
+	u32 slot_nr,
+	u32 atom_nr,
+	u32 event
+)
+{
+	const u32 msg_id = KBASE_AUX_EVENT_JOB_SLOT;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(ctx)
+		+ sizeof(slot_nr)
+		+ sizeof(atom_nr)
+		+ sizeof(event)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &ctx, sizeof(ctx));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &slot_nr, sizeof(slot_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &atom_nr, sizeof(atom_nr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &event, sizeof(event));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_start(
+	struct kbase_tlstream *stream,
+	const void *gpu
+)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_enter_end(
+	struct kbase_tlstream *stream,
+	const void *gpu
+)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_ENTER_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_mmu_command(
+	struct kbase_tlstream *stream,
+	u32 kernel_ctx_id,
+	u32 mmu_cmd_id,
+	u32 mmu_synchronicity,
+	u64 mmu_lock_addr,
+	u32 mmu_lock_page_num
+)
+{
+	const u32 msg_id = KBASE_AUX_MMU_COMMAND;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(kernel_ctx_id)
+		+ sizeof(mmu_cmd_id)
+		+ sizeof(mmu_synchronicity)
+		+ sizeof(mmu_lock_addr)
+		+ sizeof(mmu_lock_page_num)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &kernel_ctx_id, sizeof(kernel_ctx_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &mmu_cmd_id, sizeof(mmu_cmd_id));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &mmu_synchronicity, sizeof(mmu_synchronicity));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &mmu_lock_addr, sizeof(mmu_lock_addr));
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &mmu_lock_page_num, sizeof(mmu_lock_page_num));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_start(
+	struct kbase_tlstream *stream,
+	const void *gpu
+)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_START;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
+
+	kbase_tlstream_msgbuf_release(stream, acq_flags);
+}
+
+void __kbase_tlstream_aux_protected_leave_end(
+	struct kbase_tlstream *stream,
+	const void *gpu
+)
+{
+	const u32 msg_id = KBASE_AUX_PROTECTED_LEAVE_END;
+	const size_t msg_size = sizeof(msg_id) + sizeof(u64)
+		+ sizeof(gpu)
+		;
+	char *buffer;
+	unsigned long acq_flags;
+	size_t pos = 0;
+
+	buffer = kbase_tlstream_msgbuf_acquire(stream, msg_size, &acq_flags);
+
+	pos = kbasep_serialize_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+	pos = kbasep_serialize_timestamp(buffer, pos);
+	pos = kbasep_serialize_bytes(buffer,
+		pos, &gpu, sizeof(gpu));
 
 	kbase_tlstream_msgbuf_release(stream, acq_flags);
 }

--
Gitblit v1.6.2