.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
23 | 23 | #include <mali_kbase.h> |
---|
24 | 24 | #include <linux/seq_file.h> |
---|
25 | 25 | #include <linux/delay.h> |
---|
26 | | -#include <csf/mali_kbase_csf_trace_buffer.h> |
---|
| 26 | +#include <backend/gpu/mali_kbase_pm_internal.h> |
---|
27 | 27 | |
---|
28 | 28 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
---|
29 | 29 | #include "mali_kbase_csf_tl_reader.h" |
---|
| 30 | + |
---|
| 31 | +/* Wait time to be used cumulatively for all the CSG slots. |
---|
| 32 | + * Since scheduler lock is held when STATUS_UPDATE request is sent, there won't be |
---|
| 33 | + * any other Host request pending on the FW side and usually FW would be responsive |
---|
| 34 | + * to the Doorbell IRQs as it won't do any polling for a long time and also it won't |
---|
| 35 | + * have to wait for any HW state transition to complete for publishing the status. |
---|
| 36 | + * So it is reasonable to expect that handling of STATUS_UPDATE request would be |
---|
| 37 | + * relatively very quick. |
---|
| 38 | + */ |
---|
| 39 | +#define STATUS_UPDATE_WAIT_TIMEOUT 500 |
---|
| 40 | + |
---|
| 41 | +/* The bitmask of CSG slots for which the STATUS_UPDATE request completed. |
---|
| 42 | + * The access to it is serialized with scheduler lock, so at a time it would |
---|
| 43 | + * get used either for "active_groups" or per context "groups" debugfs file. |
---|
| 44 | + */ |
---|
| 45 | +static DECLARE_BITMAP(csg_slots_status_updated, MAX_SUPPORTED_CSGS); |
---|
| 46 | + |
---|
| 47 | +static |
---|
| 48 | +bool csg_slot_status_update_finish(struct kbase_device *kbdev, u32 csg_nr) |
---|
| 49 | +{ |
---|
| 50 | + struct kbase_csf_cmd_stream_group_info const *const ginfo = |
---|
| 51 | + &kbdev->csf.global_iface.groups[csg_nr]; |
---|
| 52 | + |
---|
| 53 | + return !((kbase_csf_firmware_csg_input_read(ginfo, CSG_REQ) ^ |
---|
| 54 | + kbase_csf_firmware_csg_output(ginfo, CSG_ACK)) & |
---|
| 55 | + CSG_REQ_STATUS_UPDATE_MASK); |
---|
| 56 | +} |
---|
| 57 | + |
---|
| 58 | +static |
---|
| 59 | +bool csg_slots_status_update_finish(struct kbase_device *kbdev, |
---|
| 60 | + const unsigned long *slots_mask) |
---|
| 61 | +{ |
---|
| 62 | + const u32 max_csg_slots = kbdev->csf.global_iface.group_num; |
---|
| 63 | + bool changed = false; |
---|
| 64 | + u32 csg_nr; |
---|
| 65 | + |
---|
| 66 | + lockdep_assert_held(&kbdev->csf.scheduler.lock); |
---|
| 67 | + |
---|
| 68 | + for_each_set_bit(csg_nr, slots_mask, max_csg_slots) { |
---|
| 69 | + if (csg_slot_status_update_finish(kbdev, csg_nr)) { |
---|
| 70 | + set_bit(csg_nr, csg_slots_status_updated); |
---|
| 71 | + changed = true; |
---|
| 72 | + } |
---|
| 73 | + } |
---|
| 74 | + |
---|
| 75 | + return changed; |
---|
| 76 | +} |
---|
| 77 | + |
---|
| 78 | +static void wait_csg_slots_status_update_finish(struct kbase_device *kbdev, |
---|
| 79 | + unsigned long *slots_mask) |
---|
| 80 | +{ |
---|
| 81 | + const u32 max_csg_slots = kbdev->csf.global_iface.group_num; |
---|
| 82 | + long remaining = kbase_csf_timeout_in_jiffies(STATUS_UPDATE_WAIT_TIMEOUT); |
---|
| 83 | + |
---|
| 84 | + lockdep_assert_held(&kbdev->csf.scheduler.lock); |
---|
| 85 | + |
---|
| 86 | + bitmap_zero(csg_slots_status_updated, max_csg_slots); |
---|
| 87 | + |
---|
| 88 | + while (!bitmap_empty(slots_mask, max_csg_slots) && remaining) { |
---|
| 89 | + remaining = wait_event_timeout(kbdev->csf.event_wait, |
---|
| 90 | + csg_slots_status_update_finish(kbdev, slots_mask), |
---|
| 91 | + remaining); |
---|
| 92 | + if (likely(remaining)) { |
---|
| 93 | + bitmap_andnot(slots_mask, slots_mask, |
---|
| 94 | + csg_slots_status_updated, max_csg_slots); |
---|
| 95 | + } else { |
---|
| 96 | + dev_warn(kbdev->dev, |
---|
| 97 | + "STATUS_UPDATE request timed out for slots 0x%lx", |
---|
| 98 | + slots_mask[0]); |
---|
| 99 | + } |
---|
| 100 | + } |
---|
| 101 | +} |
---|
| 102 | + |
---|
| 103 | +void kbase_csf_debugfs_update_active_groups_status(struct kbase_device *kbdev) |
---|
| 104 | +{ |
---|
| 105 | + u32 max_csg_slots = kbdev->csf.global_iface.group_num; |
---|
| 106 | + DECLARE_BITMAP(used_csgs, MAX_SUPPORTED_CSGS) = { 0 }; |
---|
| 107 | + u32 csg_nr; |
---|
| 108 | + unsigned long flags; |
---|
| 109 | + |
---|
| 110 | + lockdep_assert_held(&kbdev->csf.scheduler.lock); |
---|
| 111 | + |
---|
| 112 | + /* Global doorbell ring for CSG STATUS_UPDATE request or User doorbell |
---|
| 113 | + * ring for Extract offset update, shall not be made when MCU has been |
---|
| 114 | + * put to sleep otherwise it will undesirably make MCU exit the sleep |
---|
| 115 | + * state. Also it isn't really needed as FW will implicitly update the |
---|
| 116 | + * status of all on-slot groups when MCU sleep request is sent to it. |
---|
| 117 | + */ |
---|
| 118 | + if (kbdev->csf.scheduler.state == SCHED_SLEEPING) { |
---|
| 119 | + /* Wait for the MCU sleep request to complete. */ |
---|
| 120 | + kbase_pm_wait_for_desired_state(kbdev); |
---|
| 121 | + bitmap_copy(csg_slots_status_updated, |
---|
| 122 | + kbdev->csf.scheduler.csg_inuse_bitmap, max_csg_slots); |
---|
| 123 | + return; |
---|
| 124 | + } |
---|
| 125 | + |
---|
| 126 | + for (csg_nr = 0; csg_nr < max_csg_slots; csg_nr++) { |
---|
| 127 | + struct kbase_queue_group *const group = |
---|
| 128 | + kbdev->csf.scheduler.csg_slots[csg_nr].resident_group; |
---|
| 129 | + if (!group) |
---|
| 130 | + continue; |
---|
| 131 | + /* Ring the User doorbell for FW to update the Extract offset */ |
---|
| 132 | + kbase_csf_ring_doorbell(kbdev, group->doorbell_nr); |
---|
| 133 | + set_bit(csg_nr, used_csgs); |
---|
| 134 | + } |
---|
| 135 | + |
---|
| 136 | + /* Return early if there are no on-slot groups */ |
---|
| 137 | + if (bitmap_empty(used_csgs, max_csg_slots)) |
---|
| 138 | + return; |
---|
| 139 | + |
---|
| 140 | + kbase_csf_scheduler_spin_lock(kbdev, &flags); |
---|
| 141 | + for_each_set_bit(csg_nr, used_csgs, max_csg_slots) { |
---|
| 142 | + struct kbase_csf_cmd_stream_group_info const *const ginfo = |
---|
| 143 | + &kbdev->csf.global_iface.groups[csg_nr]; |
---|
| 144 | + kbase_csf_firmware_csg_input_mask(ginfo, CSG_REQ, |
---|
| 145 | + ~kbase_csf_firmware_csg_output(ginfo, CSG_ACK), |
---|
| 146 | + CSG_REQ_STATUS_UPDATE_MASK); |
---|
| 147 | + } |
---|
| 148 | + |
---|
| 149 | + BUILD_BUG_ON(MAX_SUPPORTED_CSGS > (sizeof(used_csgs[0]) * BITS_PER_BYTE)); |
---|
| 150 | + kbase_csf_ring_csg_slots_doorbell(kbdev, used_csgs[0]); |
---|
| 151 | + kbase_csf_scheduler_spin_unlock(kbdev, flags); |
---|
| 152 | + wait_csg_slots_status_update_finish(kbdev, used_csgs); |
---|
| 153 | + /* Wait for the User doobell ring to take effect */ |
---|
| 154 | + msleep(100); |
---|
| 155 | +} |
---|
| 156 | + |
---|
| 157 | +#define MAX_SCHED_STATE_STRING_LEN (16) |
---|
| 158 | +static const char *scheduler_state_to_string(struct kbase_device *kbdev, |
---|
| 159 | + enum kbase_csf_scheduler_state sched_state) |
---|
| 160 | +{ |
---|
| 161 | + switch (sched_state) { |
---|
| 162 | + case SCHED_BUSY: |
---|
| 163 | + return "BUSY"; |
---|
| 164 | + case SCHED_INACTIVE: |
---|
| 165 | + return "INACTIVE"; |
---|
| 166 | + case SCHED_SUSPENDED: |
---|
| 167 | + return "SUSPENDED"; |
---|
| 168 | +#ifdef KBASE_PM_RUNTIME |
---|
| 169 | + case SCHED_SLEEPING: |
---|
| 170 | + return "SLEEPING"; |
---|
| 171 | +#endif |
---|
| 172 | + default: |
---|
| 173 | + dev_warn(kbdev->dev, "Unknown Scheduler state %d", sched_state); |
---|
| 174 | + return NULL; |
---|
| 175 | + } |
---|
| 176 | +} |
---|
30 | 177 | |
---|
31 | 178 | /** |
---|
32 | 179 | * blocked_reason_to_string() - Convert blocking reason id to a string |
---|
.. | .. |
---|
55 | 202 | return cs_blocked_reason[reason_id]; |
---|
56 | 203 | } |
---|
57 | 204 | |
---|
| 205 | +static bool sb_source_supported(u32 glb_version) |
---|
| 206 | +{ |
---|
| 207 | + bool supported = false; |
---|
| 208 | + |
---|
| 209 | + if (((GLB_VERSION_MAJOR_GET(glb_version) == 3) && |
---|
| 210 | + (GLB_VERSION_MINOR_GET(glb_version) >= 5)) || |
---|
| 211 | + ((GLB_VERSION_MAJOR_GET(glb_version) == 2) && |
---|
| 212 | + (GLB_VERSION_MINOR_GET(glb_version) >= 6)) || |
---|
| 213 | + ((GLB_VERSION_MAJOR_GET(glb_version) == 1) && |
---|
| 214 | + (GLB_VERSION_MINOR_GET(glb_version) >= 3))) |
---|
| 215 | + supported = true; |
---|
| 216 | + |
---|
| 217 | + return supported; |
---|
| 218 | +} |
---|
| 219 | + |
---|
58 | 220 | static void kbasep_csf_scheduler_dump_active_queue_cs_status_wait( |
---|
59 | | - struct seq_file *file, u32 wait_status, u32 wait_sync_value, |
---|
60 | | - u64 wait_sync_live_value, u64 wait_sync_pointer, u32 sb_status, |
---|
61 | | - u32 blocked_reason) |
---|
| 221 | + struct seq_file *file, u32 glb_version, u32 wait_status, u32 wait_sync_value, |
---|
| 222 | + u64 wait_sync_live_value, u64 wait_sync_pointer, u32 sb_status, u32 blocked_reason) |
---|
62 | 223 | { |
---|
63 | 224 | #define WAITING "Waiting" |
---|
64 | 225 | #define NOT_WAITING "Not waiting" |
---|
65 | 226 | |
---|
66 | 227 | seq_printf(file, "SB_MASK: %d\n", |
---|
67 | 228 | CS_STATUS_WAIT_SB_MASK_GET(wait_status)); |
---|
| 229 | + if (sb_source_supported(glb_version)) |
---|
| 230 | + seq_printf(file, "SB_SOURCE: %d\n", CS_STATUS_WAIT_SB_SOURCE_GET(wait_status)); |
---|
68 | 231 | seq_printf(file, "PROGRESS_WAIT: %s\n", |
---|
69 | 232 | CS_STATUS_WAIT_PROGRESS_WAIT_GET(wait_status) ? |
---|
70 | 233 | WAITING : NOT_WAITING); |
---|
.. | .. |
---|
134 | 297 | struct kbase_vmap_struct *mapping; |
---|
135 | 298 | u64 *evt; |
---|
136 | 299 | u64 wait_sync_live_value; |
---|
| 300 | + u32 glb_version; |
---|
137 | 301 | |
---|
138 | 302 | if (!queue) |
---|
139 | 303 | return; |
---|
140 | 304 | |
---|
| 305 | + glb_version = queue->kctx->kbdev->csf.global_iface.version; |
---|
| 306 | + |
---|
141 | 307 | if (WARN_ON(queue->csi_index == KBASEP_IF_NR_INVALID || |
---|
142 | 308 | !queue->group)) |
---|
143 | 309 | return; |
---|
144 | | - |
---|
145 | | - /* Ring the doorbell to have firmware update CS_EXTRACT */ |
---|
146 | | - kbase_csf_ring_cs_user_doorbell(queue->kctx->kbdev, queue); |
---|
147 | | - msleep(100); |
---|
148 | 310 | |
---|
149 | 311 | addr = (u32 *)queue->user_io_addr; |
---|
150 | 312 | cs_insert = addr[CS_INSERT_LO/4] | ((u64)addr[CS_INSERT_HI/4] << 32); |
---|
.. | .. |
---|
154 | 316 | cs_active = addr[CS_ACTIVE/4]; |
---|
155 | 317 | |
---|
156 | 318 | #define KBASEP_CSF_DEBUGFS_CS_HEADER_USER_IO \ |
---|
157 | | - "Bind Idx, Ringbuf addr, Prio, Insert offset, Extract offset, Active, Doorbell\n" |
---|
| 319 | + "Bind Idx, Ringbuf addr, Size, Prio, Insert offset, Extract offset, Active, Doorbell\n" |
---|
158 | 320 | |
---|
159 | | - seq_printf(file, KBASEP_CSF_DEBUGFS_CS_HEADER_USER_IO "%8d, %16llx, %4u, %16llx, %16llx, %6u, %8d\n", |
---|
160 | | - queue->csi_index, queue->base_addr, queue->priority, |
---|
161 | | - cs_insert, cs_extract, cs_active, queue->doorbell_nr); |
---|
| 321 | + seq_printf(file, KBASEP_CSF_DEBUGFS_CS_HEADER_USER_IO "%8d, %16llx, %8x, %4u, %16llx, %16llx, %6u, %8d\n", |
---|
| 322 | + queue->csi_index, queue->base_addr, |
---|
| 323 | + queue->size, |
---|
| 324 | + queue->priority, cs_insert, cs_extract, cs_active, queue->doorbell_nr); |
---|
162 | 325 | |
---|
163 | 326 | /* Print status information for blocked group waiting for sync object. For on-slot queues, |
---|
164 | 327 | * if cs_trace is enabled, dump the interface's cs_trace configuration. |
---|
165 | 328 | */ |
---|
166 | 329 | if (kbase_csf_scheduler_group_get_slot(queue->group) < 0) { |
---|
| 330 | + seq_printf(file, "SAVED_CMD_PTR: 0x%llx\n", queue->saved_cmd_ptr); |
---|
167 | 331 | if (CS_STATUS_WAIT_SYNC_WAIT_GET(queue->status_wait)) { |
---|
168 | 332 | wait_status = queue->status_wait; |
---|
169 | 333 | wait_sync_value = queue->sync_value; |
---|
.. | .. |
---|
180 | 344 | } |
---|
181 | 345 | |
---|
182 | 346 | kbasep_csf_scheduler_dump_active_queue_cs_status_wait( |
---|
183 | | - file, wait_status, wait_sync_value, |
---|
184 | | - wait_sync_live_value, wait_sync_pointer, |
---|
185 | | - sb_status, blocked_reason); |
---|
| 347 | + file, glb_version, wait_status, wait_sync_value, |
---|
| 348 | + wait_sync_live_value, wait_sync_pointer, sb_status, blocked_reason); |
---|
186 | 349 | } |
---|
187 | 350 | } else { |
---|
188 | 351 | struct kbase_device const *const kbdev = |
---|
.. | .. |
---|
237 | 400 | } |
---|
238 | 401 | |
---|
239 | 402 | kbasep_csf_scheduler_dump_active_queue_cs_status_wait( |
---|
240 | | - file, wait_status, wait_sync_value, |
---|
241 | | - wait_sync_live_value, wait_sync_pointer, sb_status, |
---|
242 | | - blocked_reason); |
---|
| 403 | + file, glb_version, wait_status, wait_sync_value, wait_sync_live_value, |
---|
| 404 | + wait_sync_pointer, sb_status, blocked_reason); |
---|
243 | 405 | /* Dealing with cs_trace */ |
---|
244 | 406 | if (kbase_csf_scheduler_queue_has_trace(queue)) |
---|
245 | 407 | kbasep_csf_scheduler_dump_active_cs_trace(file, stream); |
---|
.. | .. |
---|
250 | 412 | seq_puts(file, "\n"); |
---|
251 | 413 | } |
---|
252 | 414 | |
---|
253 | | -/* Waiting timeout for STATUS_UPDATE acknowledgment, in milliseconds */ |
---|
254 | | -#define CSF_STATUS_UPDATE_TO_MS (100) |
---|
255 | | - |
---|
256 | 415 | static void kbasep_csf_scheduler_dump_active_group(struct seq_file *file, |
---|
257 | 416 | struct kbase_queue_group *const group) |
---|
258 | 417 | { |
---|
259 | 418 | if (kbase_csf_scheduler_group_get_slot(group) >= 0) { |
---|
260 | 419 | struct kbase_device *const kbdev = group->kctx->kbdev; |
---|
261 | | - unsigned long flags; |
---|
262 | 420 | u32 ep_c, ep_r; |
---|
263 | 421 | char exclusive; |
---|
| 422 | + char idle = 'N'; |
---|
264 | 423 | struct kbase_csf_cmd_stream_group_info const *const ginfo = |
---|
265 | 424 | &kbdev->csf.global_iface.groups[group->csg_nr]; |
---|
266 | | - long remaining = |
---|
267 | | - kbase_csf_timeout_in_jiffies(CSF_STATUS_UPDATE_TO_MS); |
---|
268 | 425 | u8 slot_priority = |
---|
269 | 426 | kbdev->csf.scheduler.csg_slots[group->csg_nr].priority; |
---|
270 | | - |
---|
271 | | - kbase_csf_scheduler_spin_lock(kbdev, &flags); |
---|
272 | | - kbase_csf_firmware_csg_input_mask(ginfo, CSG_REQ, |
---|
273 | | - ~kbase_csf_firmware_csg_output(ginfo, CSG_ACK), |
---|
274 | | - CSG_REQ_STATUS_UPDATE_MASK); |
---|
275 | | - kbase_csf_scheduler_spin_unlock(kbdev, flags); |
---|
276 | | - kbase_csf_ring_csg_doorbell(kbdev, group->csg_nr); |
---|
277 | | - |
---|
278 | | - remaining = wait_event_timeout(kbdev->csf.event_wait, |
---|
279 | | - !((kbase_csf_firmware_csg_input_read(ginfo, CSG_REQ) ^ |
---|
280 | | - kbase_csf_firmware_csg_output(ginfo, CSG_ACK)) & |
---|
281 | | - CSG_REQ_STATUS_UPDATE_MASK), remaining); |
---|
282 | 427 | |
---|
283 | 428 | ep_c = kbase_csf_firmware_csg_output(ginfo, |
---|
284 | 429 | CSG_STATUS_EP_CURRENT); |
---|
.. | .. |
---|
291 | 436 | else |
---|
292 | 437 | exclusive = '0'; |
---|
293 | 438 | |
---|
294 | | - if (!remaining) { |
---|
295 | | - dev_err(kbdev->dev, |
---|
296 | | - "Timed out for STATUS_UPDATE on group %d on slot %d", |
---|
297 | | - group->handle, group->csg_nr); |
---|
| 439 | + if (kbase_csf_firmware_csg_output(ginfo, CSG_STATUS_STATE) & |
---|
| 440 | + CSG_STATUS_STATE_IDLE_MASK) |
---|
| 441 | + idle = 'Y'; |
---|
298 | 442 | |
---|
| 443 | + if (!test_bit(group->csg_nr, csg_slots_status_updated)) { |
---|
299 | 444 | seq_printf(file, "*** Warn: Timed out for STATUS_UPDATE on slot %d\n", |
---|
300 | 445 | group->csg_nr); |
---|
301 | | - seq_printf(file, "*** The following group-record is likely stale\n"); |
---|
| 446 | + seq_puts(file, "*** The following group-record is likely stale\n"); |
---|
302 | 447 | } |
---|
303 | 448 | |
---|
304 | | - seq_puts(file, "GroupID, CSG NR, CSG Prio, Run State, Priority, C_EP(Alloc/Req), F_EP(Alloc/Req), T_EP(Alloc/Req), Exclusive\n"); |
---|
305 | | - seq_printf(file, "%7d, %6d, %8d, %9d, %8d, %11d/%3d, %11d/%3d, %11d/%3d, %9c\n", |
---|
| 449 | + seq_puts(file, "GroupID, CSG NR, CSG Prio, Run State, Priority, C_EP(Alloc/Req), F_EP(Alloc/Req), T_EP(Alloc/Req), Exclusive, Idle\n"); |
---|
| 450 | + seq_printf(file, "%7d, %6d, %8d, %9d, %8d, %11d/%3d, %11d/%3d, %11d/%3d, %9c, %4c\n", |
---|
306 | 451 | group->handle, |
---|
307 | 452 | group->csg_nr, |
---|
308 | 453 | slot_priority, |
---|
.. | .. |
---|
314 | 459 | CSG_STATUS_EP_REQ_FRAGMENT_EP_GET(ep_r), |
---|
315 | 460 | CSG_STATUS_EP_CURRENT_TILER_EP_GET(ep_c), |
---|
316 | 461 | CSG_STATUS_EP_REQ_TILER_EP_GET(ep_r), |
---|
317 | | - exclusive); |
---|
| 462 | + exclusive, |
---|
| 463 | + idle); |
---|
318 | 464 | } else { |
---|
319 | 465 | seq_puts(file, "GroupID, CSG NR, Run State, Priority\n"); |
---|
320 | 466 | seq_printf(file, "%7d, %6d, %9d, %8d\n", |
---|
.. | .. |
---|
352 | 498 | { |
---|
353 | 499 | u32 gr; |
---|
354 | 500 | struct kbase_context *const kctx = file->private; |
---|
355 | | - struct kbase_device *const kbdev = kctx->kbdev; |
---|
| 501 | + struct kbase_device *kbdev; |
---|
356 | 502 | |
---|
357 | 503 | if (WARN_ON(!kctx)) |
---|
358 | 504 | return -EINVAL; |
---|
| 505 | + |
---|
| 506 | + kbdev = kctx->kbdev; |
---|
359 | 507 | |
---|
360 | 508 | seq_printf(file, "MALI_CSF_CSG_DEBUGFS_VERSION: v%u\n", |
---|
361 | 509 | MALI_CSF_CSG_DEBUGFS_VERSION); |
---|
362 | 510 | |
---|
363 | 511 | mutex_lock(&kctx->csf.lock); |
---|
364 | 512 | kbase_csf_scheduler_lock(kbdev); |
---|
| 513 | + kbase_csf_debugfs_update_active_groups_status(kbdev); |
---|
365 | 514 | for (gr = 0; gr < MAX_QUEUE_GROUP_NUM; gr++) { |
---|
366 | 515 | struct kbase_queue_group *const group = |
---|
367 | 516 | kctx->csf.queue_groups[gr]; |
---|
.. | .. |
---|
395 | 544 | MALI_CSF_CSG_DEBUGFS_VERSION); |
---|
396 | 545 | |
---|
397 | 546 | kbase_csf_scheduler_lock(kbdev); |
---|
| 547 | + kbase_csf_debugfs_update_active_groups_status(kbdev); |
---|
398 | 548 | for (csg_nr = 0; csg_nr < num_groups; csg_nr++) { |
---|
399 | 549 | struct kbase_queue_group *const group = |
---|
400 | 550 | kbdev->csf.scheduler.csg_slots[csg_nr].resident_group; |
---|
.. | .. |
---|
436 | 586 | void kbase_csf_queue_group_debugfs_init(struct kbase_context *kctx) |
---|
437 | 587 | { |
---|
438 | 588 | struct dentry *file; |
---|
439 | | -#if (KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE) |
---|
440 | 589 | const mode_t mode = 0444; |
---|
441 | | -#else |
---|
442 | | - const mode_t mode = 0400; |
---|
443 | | -#endif |
---|
444 | 590 | |
---|
445 | 591 | if (WARN_ON(!kctx || IS_ERR_OR_NULL(kctx->kctx_dentry))) |
---|
446 | 592 | return; |
---|
.. | .. |
---|
492 | 638 | return 0; |
---|
493 | 639 | } |
---|
494 | 640 | |
---|
495 | | -DEFINE_SIMPLE_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_enabled_fops, |
---|
496 | | - &kbasep_csf_debugfs_scheduling_timer_enabled_get, |
---|
497 | | - &kbasep_csf_debugfs_scheduling_timer_enabled_set, |
---|
498 | | - "%llu\n"); |
---|
499 | | -DEFINE_SIMPLE_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_kick_fops, |
---|
500 | | - NULL, |
---|
501 | | - &kbasep_csf_debugfs_scheduling_timer_kick_set, |
---|
502 | | - "%llu\n"); |
---|
| 641 | +DEFINE_DEBUGFS_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_enabled_fops, |
---|
| 642 | + &kbasep_csf_debugfs_scheduling_timer_enabled_get, |
---|
| 643 | + &kbasep_csf_debugfs_scheduling_timer_enabled_set, "%llu\n"); |
---|
| 644 | +DEFINE_DEBUGFS_ATTRIBUTE(kbasep_csf_debugfs_scheduling_timer_kick_fops, NULL, |
---|
| 645 | + &kbasep_csf_debugfs_scheduling_timer_kick_set, "%llu\n"); |
---|
503 | 646 | |
---|
504 | 647 | /** |
---|
505 | | - * kbase_csf_debugfs_scheduler_suspend_get() - get if the scheduler is suspended. |
---|
| 648 | + * kbase_csf_debugfs_scheduler_state_get() - Get the state of scheduler. |
---|
506 | 649 | * |
---|
507 | | - * @data: The debugfs dentry private data, a pointer to kbase_device |
---|
508 | | - * @val: The debugfs output value, boolean: 1 suspended, 0 otherwise |
---|
| 650 | + * @file: Object of the file that is being read. |
---|
| 651 | + * @user_buf: User buffer that contains the string. |
---|
| 652 | + * @count: Length of user buffer |
---|
| 653 | + * @ppos: Offset within file object |
---|
509 | 654 | * |
---|
510 | | - * Return: 0 |
---|
| 655 | + * This function will return the current Scheduler state to Userspace |
---|
| 656 | + * Scheduler may exit that state by the time the state string is received |
---|
| 657 | + * by the Userspace. |
---|
| 658 | + * |
---|
| 659 | + * Return: 0 if Scheduler was found in an unexpected state, or the |
---|
| 660 | + * size of the state string if it was copied successfully to the |
---|
| 661 | + * User buffer or a negative value in case of an error. |
---|
511 | 662 | */ |
---|
512 | | -static int kbase_csf_debugfs_scheduler_suspend_get( |
---|
513 | | - void *data, u64 *val) |
---|
| 663 | +static ssize_t kbase_csf_debugfs_scheduler_state_get(struct file *file, |
---|
| 664 | + char __user *user_buf, size_t count, loff_t *ppos) |
---|
514 | 665 | { |
---|
515 | | - struct kbase_device *kbdev = data; |
---|
| 666 | + struct kbase_device *kbdev = file->private_data; |
---|
516 | 667 | struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; |
---|
| 668 | + const char *state_string; |
---|
517 | 669 | |
---|
518 | 670 | kbase_csf_scheduler_lock(kbdev); |
---|
519 | | - *val = (scheduler->state == SCHED_SUSPENDED); |
---|
| 671 | + state_string = scheduler_state_to_string(kbdev, scheduler->state); |
---|
520 | 672 | kbase_csf_scheduler_unlock(kbdev); |
---|
521 | 673 | |
---|
522 | | - return 0; |
---|
| 674 | + if (!state_string) |
---|
| 675 | + count = 0; |
---|
| 676 | + |
---|
| 677 | + return simple_read_from_buffer(user_buf, count, ppos, |
---|
| 678 | + state_string, strlen(state_string)); |
---|
523 | 679 | } |
---|
524 | 680 | |
---|
525 | 681 | /** |
---|
526 | | - * kbase_csf_debugfs_scheduler_suspend_set() - set the scheduler to suspended. |
---|
| 682 | + * kbase_csf_debugfs_scheduler_state_set() - Set the state of scheduler. |
---|
527 | 683 | * |
---|
528 | | - * @data: The debugfs dentry private data, a pointer to kbase_device |
---|
529 | | - * @val: The debugfs input value, boolean: 1 suspend, 0 otherwise |
---|
| 684 | + * @file: Object of the file that is being written to. |
---|
| 685 | + * @ubuf: User buffer that contains the string. |
---|
| 686 | + * @count: Length of user buffer |
---|
| 687 | + * @ppos: Offset within file object |
---|
530 | 688 | * |
---|
531 | | - * Return: Negative value if already in requested state, 0 otherwise. |
---|
| 689 | + * This function will update the Scheduler state as per the state string |
---|
| 690 | + * passed by the Userspace. Scheduler may or may not remain in new state |
---|
| 691 | + * for long. |
---|
| 692 | + * |
---|
| 693 | + * Return: Negative value if the string doesn't correspond to a valid Scheduler |
---|
| 694 | + * state or if copy from user buffer failed, otherwise the length of |
---|
| 695 | + * the User buffer. |
---|
532 | 696 | */ |
---|
533 | | -static int kbase_csf_debugfs_scheduler_suspend_set( |
---|
534 | | - void *data, u64 val) |
---|
| 697 | +static ssize_t kbase_csf_debugfs_scheduler_state_set(struct file *file, |
---|
| 698 | + const char __user *ubuf, size_t count, loff_t *ppos) |
---|
535 | 699 | { |
---|
536 | | - struct kbase_device *kbdev = data; |
---|
537 | | - struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; |
---|
538 | | - enum kbase_csf_scheduler_state state; |
---|
| 700 | + struct kbase_device *kbdev = file->private_data; |
---|
| 701 | + char buf[MAX_SCHED_STATE_STRING_LEN]; |
---|
| 702 | + ssize_t ret = count; |
---|
539 | 703 | |
---|
540 | | - kbase_csf_scheduler_lock(kbdev); |
---|
541 | | - state = scheduler->state; |
---|
542 | | - kbase_csf_scheduler_unlock(kbdev); |
---|
| 704 | + CSTD_UNUSED(ppos); |
---|
543 | 705 | |
---|
544 | | - if (val && (state != SCHED_SUSPENDED)) |
---|
| 706 | + count = min_t(size_t, sizeof(buf) - 1, count); |
---|
| 707 | + if (copy_from_user(buf, ubuf, count)) |
---|
| 708 | + return -EFAULT; |
---|
| 709 | + |
---|
| 710 | + buf[count] = 0; |
---|
| 711 | + |
---|
| 712 | + if (sysfs_streq(buf, "SUSPENDED")) |
---|
545 | 713 | kbase_csf_scheduler_pm_suspend(kbdev); |
---|
546 | | - else if (!val && (state == SCHED_SUSPENDED)) |
---|
547 | | - kbase_csf_scheduler_pm_resume(kbdev); |
---|
548 | | - else |
---|
549 | | - return -1; |
---|
| 714 | +#ifdef KBASE_PM_RUNTIME |
---|
| 715 | + else if (sysfs_streq(buf, "SLEEPING")) |
---|
| 716 | + kbase_csf_scheduler_force_sleep(kbdev); |
---|
| 717 | +#endif |
---|
| 718 | + else if (sysfs_streq(buf, "INACTIVE")) |
---|
| 719 | + kbase_csf_scheduler_force_wakeup(kbdev); |
---|
| 720 | + else { |
---|
| 721 | + dev_dbg(kbdev->dev, "Bad scheduler state %s", buf); |
---|
| 722 | + ret = -EINVAL; |
---|
| 723 | + } |
---|
550 | 724 | |
---|
551 | | - return 0; |
---|
| 725 | + return ret; |
---|
552 | 726 | } |
---|
553 | 727 | |
---|
554 | | -DEFINE_SIMPLE_ATTRIBUTE(kbasep_csf_debugfs_scheduler_suspend_fops, |
---|
555 | | - &kbase_csf_debugfs_scheduler_suspend_get, |
---|
556 | | - &kbase_csf_debugfs_scheduler_suspend_set, |
---|
557 | | - "%llu\n"); |
---|
| 728 | +static const struct file_operations kbasep_csf_debugfs_scheduler_state_fops = { |
---|
| 729 | + .owner = THIS_MODULE, |
---|
| 730 | + .read = kbase_csf_debugfs_scheduler_state_get, |
---|
| 731 | + .write = kbase_csf_debugfs_scheduler_state_set, |
---|
| 732 | + .open = simple_open, |
---|
| 733 | + .llseek = default_llseek, |
---|
| 734 | +}; |
---|
558 | 735 | |
---|
559 | 736 | void kbase_csf_debugfs_init(struct kbase_device *kbdev) |
---|
560 | 737 | { |
---|
.. | .. |
---|
568 | 745 | debugfs_create_file("scheduling_timer_kick", 0200, |
---|
569 | 746 | kbdev->mali_debugfs_directory, kbdev, |
---|
570 | 747 | &kbasep_csf_debugfs_scheduling_timer_kick_fops); |
---|
571 | | - debugfs_create_file("scheduler_suspend", 0644, |
---|
| 748 | + debugfs_create_file("scheduler_state", 0644, |
---|
572 | 749 | kbdev->mali_debugfs_directory, kbdev, |
---|
573 | | - &kbasep_csf_debugfs_scheduler_suspend_fops); |
---|
| 750 | + &kbasep_csf_debugfs_scheduler_state_fops); |
---|
574 | 751 | |
---|
575 | 752 | kbase_csf_tl_reader_debugfs_init(kbdev); |
---|
576 | | - kbase_csf_firmware_trace_buffer_debugfs_init(kbdev); |
---|
577 | 753 | } |
---|
578 | 754 | |
---|
579 | 755 | #else |
---|