| .. | .. |
|---|
| 1 | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
|---|
| 2 | 2 | /* |
|---|
| 3 | 3 | * |
|---|
| 4 | | - * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. |
|---|
| 4 | + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. |
|---|
| 5 | 5 | * |
|---|
| 6 | 6 | * This program is free software and is provided to you under the terms of the |
|---|
| 7 | 7 | * GNU General Public License version 2 as published by the Free Software |
|---|
| .. | .. |
|---|
| 35 | 35 | /** |
|---|
| 36 | 36 | * kbase_pm_dev_idle - The GPU is idle. |
|---|
| 37 | 37 | * |
|---|
| 38 | | - * The OS may choose to turn off idle devices |
|---|
| 39 | | - * |
|---|
| 40 | 38 | * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 39 | + * |
|---|
| 40 | + * The OS may choose to turn off idle devices |
|---|
| 41 | 41 | */ |
|---|
| 42 | 42 | void kbase_pm_dev_idle(struct kbase_device *kbdev); |
|---|
| 43 | 43 | |
|---|
| 44 | 44 | /** |
|---|
| 45 | 45 | * kbase_pm_dev_activate - The GPU is active. |
|---|
| 46 | 46 | * |
|---|
| 47 | | - * The OS should avoid opportunistically turning off the GPU while it is active |
|---|
| 48 | | - * |
|---|
| 49 | 47 | * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 48 | + * |
|---|
| 49 | + * The OS should avoid opportunistically turning off the GPU while it is active |
|---|
| 50 | 50 | */ |
|---|
| 51 | 51 | void kbase_pm_dev_activate(struct kbase_device *kbdev); |
|---|
| 52 | 52 | |
|---|
| .. | .. |
|---|
| 54 | 54 | * kbase_pm_get_present_cores - Get details of the cores that are present in |
|---|
| 55 | 55 | * the device. |
|---|
| 56 | 56 | * |
|---|
| 57 | | - * This function can be called by the active power policy to return a bitmask of |
|---|
| 58 | | - * the cores (of a specified type) present in the GPU device and also a count of |
|---|
| 59 | | - * the number of cores. |
|---|
| 60 | | - * |
|---|
| 61 | 57 | * @kbdev: The kbase device structure for the device (must be a valid |
|---|
| 62 | 58 | * pointer) |
|---|
| 63 | 59 | * @type: The type of core (see the enum kbase_pm_core_type enumeration) |
|---|
| 60 | + * |
|---|
| 61 | + * This function can be called by the active power policy to return a bitmask of |
|---|
| 62 | + * the cores (of a specified type) present in the GPU device and also a count of |
|---|
| 63 | + * the number of cores. |
|---|
| 64 | 64 | * |
|---|
| 65 | 65 | * Return: The bit mask of cores present |
|---|
| 66 | 66 | */ |
|---|
| .. | .. |
|---|
| 71 | 71 | * kbase_pm_get_active_cores - Get details of the cores that are currently |
|---|
| 72 | 72 | * active in the device. |
|---|
| 73 | 73 | * |
|---|
| 74 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 75 | + * @type: The type of core (see the enum kbase_pm_core_type enumeration) |
|---|
| 76 | + * |
|---|
| 74 | 77 | * This function can be called by the active power policy to return a bitmask of |
|---|
| 75 | 78 | * the cores (of a specified type) that are actively processing work (i.e. |
|---|
| 76 | 79 | * turned on *and* busy). |
|---|
| 77 | | - * |
|---|
| 78 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 79 | | - * @type: The type of core (see the enum kbase_pm_core_type enumeration) |
|---|
| 80 | 80 | * |
|---|
| 81 | 81 | * Return: The bit mask of active cores |
|---|
| 82 | 82 | */ |
|---|
| .. | .. |
|---|
| 87 | 87 | * kbase_pm_get_trans_cores - Get details of the cores that are currently |
|---|
| 88 | 88 | * transitioning between power states. |
|---|
| 89 | 89 | * |
|---|
| 90 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 91 | + * @type: The type of core (see the enum kbase_pm_core_type enumeration) |
|---|
| 92 | + * |
|---|
| 90 | 93 | * This function can be called by the active power policy to return a bitmask of |
|---|
| 91 | 94 | * the cores (of a specified type) that are currently transitioning between |
|---|
| 92 | 95 | * power states. |
|---|
| 93 | | - * |
|---|
| 94 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 95 | | - * @type: The type of core (see the enum kbase_pm_core_type enumeration) |
|---|
| 96 | 96 | * |
|---|
| 97 | 97 | * Return: The bit mask of transitioning cores |
|---|
| 98 | 98 | */ |
|---|
| .. | .. |
|---|
| 103 | 103 | * kbase_pm_get_ready_cores - Get details of the cores that are currently |
|---|
| 104 | 104 | * powered and ready for jobs. |
|---|
| 105 | 105 | * |
|---|
| 106 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 107 | + * @type: The type of core (see the enum kbase_pm_core_type enumeration) |
|---|
| 108 | + * |
|---|
| 106 | 109 | * This function can be called by the active power policy to return a bitmask of |
|---|
| 107 | 110 | * the cores (of a specified type) that are powered and ready for jobs (they may |
|---|
| 108 | 111 | * or may not be currently executing jobs). |
|---|
| 109 | | - * |
|---|
| 110 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 111 | | - * @type: The type of core (see the enum kbase_pm_core_type enumeration) |
|---|
| 112 | 112 | * |
|---|
| 113 | 113 | * Return: The bit mask of ready cores |
|---|
| 114 | 114 | */ |
|---|
| .. | .. |
|---|
| 119 | 119 | * kbase_pm_clock_on - Turn the clock for the device on, and enable device |
|---|
| 120 | 120 | * interrupts. |
|---|
| 121 | 121 | * |
|---|
| 122 | | - * This function can be used by a power policy to turn the clock for the GPU on. |
|---|
| 123 | | - * It should be modified during integration to perform the necessary actions to |
|---|
| 124 | | - * ensure that the GPU is fully powered and clocked. |
|---|
| 125 | | - * |
|---|
| 126 | 122 | * @kbdev: The kbase device structure for the device (must be a valid |
|---|
| 127 | 123 | * pointer) |
|---|
| 128 | 124 | * @is_resume: true if clock on due to resume after suspend, false otherwise |
|---|
| 125 | + * |
|---|
| 126 | + * This function can be used by a power policy to turn the clock for the GPU on. |
|---|
| 127 | + * It should be modified during integration to perform the necessary actions to |
|---|
| 128 | + * ensure that the GPU is fully powered and clocked. |
|---|
| 129 | 129 | */ |
|---|
| 130 | 130 | void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume); |
|---|
| 131 | 131 | |
|---|
| .. | .. |
|---|
| 133 | 133 | * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the |
|---|
| 134 | 134 | * device off. |
|---|
| 135 | 135 | * |
|---|
| 136 | + * @kbdev: The kbase device structure for the device (must be a valid |
|---|
| 137 | + * pointer) |
|---|
| 138 | + * |
|---|
| 136 | 139 | * This function can be used by a power policy to turn the clock for the GPU |
|---|
| 137 | 140 | * off. It should be modified during integration to perform the necessary |
|---|
| 138 | 141 | * actions to turn the clock off (if this is possible in the integration). |
|---|
| 139 | 142 | * |
|---|
| 140 | | - * @kbdev: The kbase device structure for the device (must be a valid |
|---|
| 141 | | - * pointer) |
|---|
| 143 | + * If runtime PM is enabled and @power_runtime_gpu_idle_callback is used |
|---|
| 144 | + * then this function would usually be invoked from the runtime suspend |
|---|
| 145 | + * callback function. |
|---|
| 142 | 146 | * |
|---|
| 143 | 147 | * Return: true if clock was turned off, or |
|---|
| 144 | 148 | * false if clock can not be turned off due to pending page/bus fault |
|---|
| .. | .. |
|---|
| 149 | 153 | /** |
|---|
| 150 | 154 | * kbase_pm_enable_interrupts - Enable interrupts on the device. |
|---|
| 151 | 155 | * |
|---|
| 152 | | - * Interrupts are also enabled after a call to kbase_pm_clock_on(). |
|---|
| 153 | | - * |
|---|
| 154 | 156 | * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 157 | + * |
|---|
| 158 | + * Interrupts are also enabled after a call to kbase_pm_clock_on(). |
|---|
| 155 | 159 | */ |
|---|
| 156 | 160 | void kbase_pm_enable_interrupts(struct kbase_device *kbdev); |
|---|
| 157 | 161 | |
|---|
| 158 | 162 | /** |
|---|
| 159 | 163 | * kbase_pm_disable_interrupts - Disable interrupts on the device. |
|---|
| 160 | 164 | * |
|---|
| 165 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 166 | + * |
|---|
| 161 | 167 | * This prevents delivery of Power Management interrupts to the CPU so that |
|---|
| 162 | 168 | * kbase_pm_update_state() will not be called from the IRQ handler |
|---|
| 163 | 169 | * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called. |
|---|
| 164 | 170 | * |
|---|
| 165 | 171 | * Interrupts are also disabled after a call to kbase_pm_clock_off(). |
|---|
| 166 | | - * |
|---|
| 167 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 168 | 172 | */ |
|---|
| 169 | 173 | void kbase_pm_disable_interrupts(struct kbase_device *kbdev); |
|---|
| 170 | 174 | |
|---|
| .. | .. |
|---|
| 172 | 176 | * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts() |
|---|
| 173 | 177 | * that does not take the hwaccess_lock |
|---|
| 174 | 178 | * |
|---|
| 175 | | - * Caller must hold the hwaccess_lock. |
|---|
| 176 | | - * |
|---|
| 177 | 179 | * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 180 | + * |
|---|
| 181 | + * Caller must hold the hwaccess_lock. |
|---|
| 178 | 182 | */ |
|---|
| 179 | 183 | void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev); |
|---|
| 180 | 184 | |
|---|
| .. | .. |
|---|
| 193 | 197 | |
|---|
| 194 | 198 | /** |
|---|
| 195 | 199 | * kbase_pm_reset_done - The GPU has been reset successfully. |
|---|
| 200 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 196 | 201 | * |
|---|
| 197 | 202 | * This function must be called by the GPU interrupt handler when the |
|---|
| 198 | 203 | * RESET_COMPLETED bit is set. It signals to the power management initialization |
|---|
| 199 | 204 | * code that the GPU has been successfully reset. |
|---|
| 200 | | - * |
|---|
| 201 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 202 | 205 | */ |
|---|
| 203 | 206 | void kbase_pm_reset_done(struct kbase_device *kbdev); |
|---|
| 204 | 207 | |
|---|
| .. | .. |
|---|
| 206 | 209 | /** |
|---|
| 207 | 210 | * kbase_pm_wait_for_desired_state - Wait for the desired power state to be |
|---|
| 208 | 211 | * reached |
|---|
| 212 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 209 | 213 | * |
|---|
| 210 | 214 | * Wait for the L2 and MCU state machines to reach the states corresponding |
|---|
| 211 | 215 | * to the values of 'kbase_pm_is_l2_desired' and 'kbase_pm_is_mcu_desired'. |
|---|
| .. | .. |
|---|
| 220 | 224 | * power off in progress and kbase_pm_context_active() was called instead of |
|---|
| 221 | 225 | * kbase_csf_scheduler_pm_active(). |
|---|
| 222 | 226 | * |
|---|
| 223 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 224 | | - * |
|---|
| 225 | 227 | * Return: 0 on success, error code on error |
|---|
| 226 | 228 | */ |
|---|
| 227 | 229 | int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev); |
|---|
| .. | .. |
|---|
| 229 | 231 | /** |
|---|
| 230 | 232 | * kbase_pm_wait_for_desired_state - Wait for the desired power state to be |
|---|
| 231 | 233 | * reached |
|---|
| 234 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 232 | 235 | * |
|---|
| 233 | 236 | * Wait for the L2 and shader power state machines to reach the states |
|---|
| 234 | 237 | * corresponding to the values of 'l2_desired' and 'shaders_desired'. |
|---|
| .. | .. |
|---|
| 242 | 245 | * NOTE: This may not wait until the correct state is reached if there is a |
|---|
| 243 | 246 | * power off in progress. To correctly wait for the desired state the caller |
|---|
| 244 | 247 | * must ensure that this is not the case by, for example, calling |
|---|
| 245 | | - * kbase_pm_wait_for_poweroff_complete() |
|---|
| 246 | | - * |
|---|
| 247 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 248 | + * kbase_pm_wait_for_poweroff_work_complete() |
|---|
| 248 | 249 | * |
|---|
| 249 | 250 | * Return: 0 on success, error code on error |
|---|
| 250 | 251 | */ |
|---|
| .. | .. |
|---|
| 254 | 255 | /** |
|---|
| 255 | 256 | * kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on |
|---|
| 256 | 257 | * |
|---|
| 258 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 259 | + * |
|---|
| 257 | 260 | * Wait for the L2 to be powered on, and for the L2 and the state machines of |
|---|
| 258 | 261 | * its dependent stack components to stabilise. |
|---|
| 259 | 262 | * |
|---|
| .. | .. |
|---|
| 262 | 265 | * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock, |
|---|
| 263 | 266 | * because this function will take that lock itself. |
|---|
| 264 | 267 | * |
|---|
| 265 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 266 | | - * |
|---|
| 267 | 268 | * Return: 0 on success, error code on error |
|---|
| 268 | 269 | */ |
|---|
| 269 | 270 | int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev); |
|---|
| 271 | + |
|---|
| 272 | +#if MALI_USE_CSF |
|---|
| 273 | +/** |
|---|
| 274 | + * kbase_pm_wait_for_cores_down_scale - Wait for the downscaling of shader cores |
|---|
| 275 | + * |
|---|
| 276 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 277 | + * |
|---|
| 278 | + * This function can be called to ensure that the downscaling of cores is |
|---|
| 279 | + * effectively complete and it would be safe to lower the voltage. |
|---|
| 280 | + * The function assumes that caller had exercised the MCU state machine for the |
|---|
| 281 | + * downscale request through the kbase_pm_update_state() function. |
|---|
| 282 | + * |
|---|
| 283 | + * This function needs to be used by the caller to safely wait for the completion |
|---|
| 284 | + * of downscale request, instead of kbase_pm_wait_for_desired_state(). |
|---|
| 285 | + * The downscale request would trigger a state change in MCU state machine |
|---|
| 286 | + * and so when MCU reaches the stable ON state, it can be inferred that |
|---|
| 287 | + * downscaling is complete. But it has been observed that the wake up of the |
|---|
| 288 | + * waiting thread can get delayed by few milli seconds and by the time the |
|---|
| 289 | + * thread wakes up the power down transition could have started (after the |
|---|
| 290 | + * completion of downscale request). |
|---|
| 291 | + * On the completion of power down transition another wake up signal would be |
|---|
| 292 | + * sent, but again by the time thread wakes up the power up transition can begin. |
|---|
| 293 | + * And the power up transition could then get blocked inside the platform specific |
|---|
| 294 | + * callback_power_on() function due to the thread that called into Kbase (from the |
|---|
| 295 | + * platform specific code) to perform the downscaling and then ended up waiting |
|---|
| 296 | + * for the completion of downscale request. |
|---|
| 297 | + * |
|---|
| 298 | + * Return: 0 on success, error code on error or remaining jiffies on timeout. |
|---|
| 299 | + */ |
|---|
| 300 | +int kbase_pm_wait_for_cores_down_scale(struct kbase_device *kbdev); |
|---|
| 301 | +#endif |
|---|
| 270 | 302 | |
|---|
| 271 | 303 | /** |
|---|
| 272 | 304 | * kbase_pm_update_dynamic_cores_onoff - Update the L2 and shader power state |
|---|
| 273 | 305 | * machines after changing shader core |
|---|
| 274 | 306 | * availability |
|---|
| 307 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 275 | 308 | * |
|---|
| 276 | 309 | * It can be called in any status, so need to check the l2 and shader core |
|---|
| 277 | 310 | * power status in this function or it will break shader/l2 state machine |
|---|
| 278 | 311 | * |
|---|
| 279 | 312 | * Caller must hold hwaccess_lock |
|---|
| 280 | | - * |
|---|
| 281 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 282 | 313 | */ |
|---|
| 283 | 314 | void kbase_pm_update_dynamic_cores_onoff(struct kbase_device *kbdev); |
|---|
| 284 | 315 | |
|---|
| .. | .. |
|---|
| 301 | 332 | * kbase_pm_state_machine_init - Initialize the state machines, primarily the |
|---|
| 302 | 333 | * shader poweroff timer |
|---|
| 303 | 334 | * @kbdev: Device pointer |
|---|
| 335 | + * |
|---|
| 336 | + * Return: 0 on success, error code on error |
|---|
| 304 | 337 | */ |
|---|
| 305 | 338 | int kbase_pm_state_machine_init(struct kbase_device *kbdev); |
|---|
| 306 | 339 | |
|---|
| .. | .. |
|---|
| 314 | 347 | * kbase_pm_update_cores_state - Update the desired state of shader cores from |
|---|
| 315 | 348 | * the Power Policy, and begin any power |
|---|
| 316 | 349 | * transitions. |
|---|
| 350 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 317 | 351 | * |
|---|
| 318 | 352 | * This function will update the desired_xx_state members of |
|---|
| 319 | 353 | * struct kbase_pm_device_data by calling into the current Power Policy. It will |
|---|
| 320 | 354 | * then begin power transitions to make the hardware acheive the desired shader |
|---|
| 321 | 355 | * core state. |
|---|
| 322 | | - * |
|---|
| 323 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 324 | 356 | */ |
|---|
| 325 | 357 | void kbase_pm_update_cores_state(struct kbase_device *kbdev); |
|---|
| 326 | 358 | |
|---|
| 327 | 359 | /** |
|---|
| 328 | 360 | * kbasep_pm_metrics_init - Initialize the metrics gathering framework. |
|---|
| 361 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 329 | 362 | * |
|---|
| 330 | 363 | * This must be called before other metric gathering APIs are called. |
|---|
| 331 | 364 | * |
|---|
| 332 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 333 | 365 | * |
|---|
| 334 | 366 | * Return: 0 on success, error code on error |
|---|
| 335 | 367 | */ |
|---|
| .. | .. |
|---|
| 337 | 369 | |
|---|
| 338 | 370 | /** |
|---|
| 339 | 371 | * kbasep_pm_metrics_term - Terminate the metrics gathering framework. |
|---|
| 372 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 340 | 373 | * |
|---|
| 341 | 374 | * This must be called when metric gathering is no longer required. It is an |
|---|
| 342 | 375 | * error to call any metrics gathering function (other than |
|---|
| 343 | 376 | * kbasep_pm_metrics_init()) after calling this function. |
|---|
| 344 | | - * |
|---|
| 345 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 346 | 377 | */ |
|---|
| 347 | 378 | void kbasep_pm_metrics_term(struct kbase_device *kbdev); |
|---|
| 348 | 379 | |
|---|
| 349 | 380 | /** |
|---|
| 350 | 381 | * kbase_pm_report_vsync - Function to be called by the frame buffer driver to |
|---|
| 351 | 382 | * update the vsync metric. |
|---|
| 383 | + * @kbdev: The kbase device structure for the device (must be a |
|---|
| 384 | + * valid pointer) |
|---|
| 385 | + * @buffer_updated: True if the buffer has been updated on this VSync, |
|---|
| 386 | + * false otherwise |
|---|
| 352 | 387 | * |
|---|
| 353 | 388 | * This function should be called by the frame buffer driver to update whether |
|---|
| 354 | 389 | * the system is hitting the vsync target or not. buffer_updated should be true |
|---|
| 355 | 390 | * if the vsync corresponded with a new frame being displayed, otherwise it |
|---|
| 356 | 391 | * should be false. This function does not need to be called every vsync, but |
|---|
| 357 | 392 | * only when the value of @buffer_updated differs from a previous call. |
|---|
| 358 | | - * |
|---|
| 359 | | - * @kbdev: The kbase device structure for the device (must be a |
|---|
| 360 | | - * valid pointer) |
|---|
| 361 | | - * @buffer_updated: True if the buffer has been updated on this VSync, |
|---|
| 362 | | - * false otherwise |
|---|
| 363 | 393 | */ |
|---|
| 364 | 394 | void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated); |
|---|
| 365 | 395 | |
|---|
| .. | .. |
|---|
| 377 | 407 | /** |
|---|
| 378 | 408 | * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is |
|---|
| 379 | 409 | * needed |
|---|
| 410 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 380 | 411 | * |
|---|
| 381 | 412 | * If the caller is the first caller then the GPU cycle counters will be enabled |
|---|
| 382 | 413 | * along with the l2 cache |
|---|
| .. | .. |
|---|
| 384 | 415 | * The GPU must be powered when calling this function (i.e. |
|---|
| 385 | 416 | * kbase_pm_context_active() must have been called). |
|---|
| 386 | 417 | * |
|---|
| 387 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 388 | 418 | */ |
|---|
| 389 | 419 | void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev); |
|---|
| 390 | 420 | |
|---|
| 391 | 421 | /** |
|---|
| 392 | 422 | * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is |
|---|
| 393 | 423 | * needed (l2 cache already on) |
|---|
| 424 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 394 | 425 | * |
|---|
| 395 | 426 | * This is a version of the above function |
|---|
| 396 | 427 | * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the |
|---|
| .. | .. |
|---|
| 401 | 432 | * The GPU must be powered when calling this function (i.e. |
|---|
| 402 | 433 | * kbase_pm_context_active() must have been called) and the l2 cache must be |
|---|
| 403 | 434 | * powered on. |
|---|
| 404 | | - * |
|---|
| 405 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 406 | 435 | */ |
|---|
| 407 | 436 | void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev); |
|---|
| 408 | 437 | |
|---|
| 409 | 438 | /** |
|---|
| 410 | 439 | * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no |
|---|
| 411 | 440 | * longer in use |
|---|
| 441 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 412 | 442 | * |
|---|
| 413 | 443 | * If the caller is the last caller then the GPU cycle counters will be |
|---|
| 414 | 444 | * disabled. A request must have been made before a call to this. |
|---|
| .. | .. |
|---|
| 416 | 446 | * Caller must not hold the hwaccess_lock, as it will be taken in this function. |
|---|
| 417 | 447 | * If the caller is already holding this lock then |
|---|
| 418 | 448 | * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead. |
|---|
| 419 | | - * |
|---|
| 420 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 421 | 449 | */ |
|---|
| 422 | 450 | void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev); |
|---|
| 423 | 451 | |
|---|
| 424 | 452 | /** |
|---|
| 425 | 453 | * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter() |
|---|
| 426 | 454 | * that does not take hwaccess_lock |
|---|
| 455 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 427 | 456 | * |
|---|
| 428 | 457 | * Caller must hold the hwaccess_lock. |
|---|
| 429 | | - * |
|---|
| 430 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 431 | 458 | */ |
|---|
| 432 | 459 | void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev); |
|---|
| 433 | 460 | |
|---|
| 434 | 461 | /** |
|---|
| 435 | | - * kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to |
|---|
| 436 | | - * complete |
|---|
| 462 | + * kbase_pm_wait_for_poweroff_work_complete - Wait for the poweroff workqueue to |
|---|
| 463 | + * complete |
|---|
| 437 | 464 | * |
|---|
| 438 | 465 | * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 466 | + * |
|---|
| 467 | + * This function effectively just waits for the @gpu_poweroff_wait_work work |
|---|
| 468 | + * item to complete, if it was enqueued. GPU may not have been powered down |
|---|
| 469 | + * before this function returns. |
|---|
| 439 | 470 | */ |
|---|
| 440 | | -void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev); |
|---|
| 471 | +void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev); |
|---|
| 472 | + |
|---|
| 473 | +/** |
|---|
| 474 | + * kbase_pm_wait_for_gpu_power_down - Wait for the GPU power down to complete |
|---|
| 475 | + * |
|---|
| 476 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 477 | + * |
|---|
| 478 | + * This function waits for the actual gpu power down to complete. |
|---|
| 479 | + */ |
|---|
| 480 | +void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev); |
|---|
| 441 | 481 | |
|---|
| 442 | 482 | /** |
|---|
| 443 | 483 | * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device |
|---|
| 484 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 444 | 485 | * |
|---|
| 445 | 486 | * Setup the power management callbacks and initialize/enable the runtime-pm |
|---|
| 446 | 487 | * for the Mali GPU platform device, using the callback function. This must be |
|---|
| 447 | 488 | * called before the kbase_pm_register_access_enable() function. |
|---|
| 448 | 489 | * |
|---|
| 449 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 490 | + * Return: 0 on success, error code on error |
|---|
| 450 | 491 | */ |
|---|
| 451 | 492 | int kbase_pm_runtime_init(struct kbase_device *kbdev); |
|---|
| 452 | 493 | |
|---|
| .. | .. |
|---|
| 459 | 500 | |
|---|
| 460 | 501 | /** |
|---|
| 461 | 502 | * kbase_pm_register_access_enable - Enable access to GPU registers |
|---|
| 503 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 462 | 504 | * |
|---|
| 463 | 505 | * Enables access to the GPU registers before power management has powered up |
|---|
| 464 | 506 | * the GPU with kbase_pm_powerup(). |
|---|
| .. | .. |
|---|
| 469 | 511 | * |
|---|
| 470 | 512 | * This should only be used before power management is powered up with |
|---|
| 471 | 513 | * kbase_pm_powerup() |
|---|
| 472 | | - * |
|---|
| 473 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 474 | 514 | */ |
|---|
| 475 | 515 | void kbase_pm_register_access_enable(struct kbase_device *kbdev); |
|---|
| 476 | 516 | |
|---|
| 477 | 517 | /** |
|---|
| 478 | 518 | * kbase_pm_register_access_disable - Disable early register access |
|---|
| 519 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 479 | 520 | * |
|---|
| 480 | 521 | * Disables access to the GPU registers enabled earlier by a call to |
|---|
| 481 | 522 | * kbase_pm_register_access_enable(). |
|---|
| .. | .. |
|---|
| 486 | 527 | * |
|---|
| 487 | 528 | * This should only be used before power management is powered up with |
|---|
| 488 | 529 | * kbase_pm_powerup() |
|---|
| 489 | | - * |
|---|
| 490 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 491 | 530 | */ |
|---|
| 492 | 531 | void kbase_pm_register_access_disable(struct kbase_device *kbdev); |
|---|
| 493 | 532 | |
|---|
| .. | .. |
|---|
| 498 | 537 | /** |
|---|
| 499 | 538 | * kbase_pm_metrics_is_active - Check if the power management metrics |
|---|
| 500 | 539 | * collection is active. |
|---|
| 540 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 501 | 541 | * |
|---|
| 502 | 542 | * Note that this returns if the power management metrics collection was |
|---|
| 503 | 543 | * active at the time of calling, it is possible that after the call the metrics |
|---|
| .. | .. |
|---|
| 505 | 545 | * |
|---|
| 506 | 546 | * The caller must handle the consequence that the state may have changed. |
|---|
| 507 | 547 | * |
|---|
| 508 | | - * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 509 | 548 | * Return: true if metrics collection was active else false. |
|---|
| 510 | 549 | */ |
|---|
| 511 | 550 | bool kbase_pm_metrics_is_active(struct kbase_device *kbdev); |
|---|
| .. | .. |
|---|
| 541 | 580 | /** |
|---|
| 542 | 581 | * kbase_platform_dvfs_event - Report utilisation to DVFS code for CSF GPU |
|---|
| 543 | 582 | * |
|---|
| 544 | | - * Function provided by platform specific code when DVFS is enabled to allow |
|---|
| 545 | | - * the power management metrics system to report utilisation. |
|---|
| 546 | | - * |
|---|
| 547 | 583 | * @kbdev: The kbase device structure for the device (must be a |
|---|
| 548 | 584 | * valid pointer) |
|---|
| 549 | 585 | * @utilisation: The current calculated utilisation by the metrics system. |
|---|
| 586 | + * |
|---|
| 587 | + * Function provided by platform specific code when DVFS is enabled to allow |
|---|
| 588 | + * the power management metrics system to report utilisation. |
|---|
| 589 | + * |
|---|
| 550 | 590 | * Return: Returns 0 on failure and non zero on success. |
|---|
| 551 | 591 | */ |
|---|
| 552 | 592 | int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation); |
|---|
| .. | .. |
|---|
| 554 | 594 | /** |
|---|
| 555 | 595 | * kbase_platform_dvfs_event - Report utilisation to DVFS code for JM GPU |
|---|
| 556 | 596 | * |
|---|
| 557 | | - * Function provided by platform specific code when DVFS is enabled to allow |
|---|
| 558 | | - * the power management metrics system to report utilisation. |
|---|
| 559 | | - * |
|---|
| 560 | 597 | * @kbdev: The kbase device structure for the device (must be a |
|---|
| 561 | 598 | * valid pointer) |
|---|
| 562 | 599 | * @utilisation: The current calculated utilisation by the metrics system. |
|---|
| 563 | 600 | * @util_gl_share: The current calculated gl share of utilisation. |
|---|
| 564 | 601 | * @util_cl_share: The current calculated cl share of utilisation per core |
|---|
| 565 | 602 | * group. |
|---|
| 603 | + * Function provided by platform specific code when DVFS is enabled to allow |
|---|
| 604 | + * the power management metrics system to report utilisation. |
|---|
| 605 | + * |
|---|
| 566 | 606 | * Return: Returns 0 on failure and non zero on success. |
|---|
| 567 | 607 | */ |
|---|
| 568 | 608 | int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation, |
|---|
| .. | .. |
|---|
| 635 | 675 | */ |
|---|
| 636 | 676 | void kbase_pm_reset_complete(struct kbase_device *kbdev); |
|---|
| 637 | 677 | |
|---|
| 678 | +#if !MALI_USE_CSF |
|---|
| 638 | 679 | /** |
|---|
| 639 | 680 | * kbase_pm_protected_override_enable - Enable the protected mode override |
|---|
| 640 | 681 | * @kbdev: Device pointer |
|---|
| .. | .. |
|---|
| 707 | 748 | * to enter protected mode. |
|---|
| 708 | 749 | */ |
|---|
| 709 | 750 | void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev); |
|---|
| 751 | +#endif |
|---|
| 710 | 752 | |
|---|
| 711 | 753 | /* If true, the driver should explicitly control corestack power management, |
|---|
| 712 | 754 | * instead of relying on the Power Domain Controller. |
|---|
| .. | .. |
|---|
| 735 | 777 | * Return: true if MCU needs to be enabled. |
|---|
| 736 | 778 | */ |
|---|
| 737 | 779 | bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev); |
|---|
| 780 | + |
|---|
| 781 | +/** |
|---|
| 782 | + * kbase_pm_is_mcu_inactive - Check if the MCU is inactive (i.e. either |
|---|
| 783 | + * it is disabled or it is in sleep) |
|---|
| 784 | + * |
|---|
| 785 | + * @kbdev: kbase device |
|---|
| 786 | + * @state: state of the MCU state machine. |
|---|
| 787 | + * |
|---|
| 788 | + * This function must be called with hwaccess_lock held. |
|---|
| 789 | + * L2 cache can be turned off if this function returns true. |
|---|
| 790 | + * |
|---|
| 791 | + * Return: true if MCU is inactive |
|---|
| 792 | + */ |
|---|
| 793 | +bool kbase_pm_is_mcu_inactive(struct kbase_device *kbdev, |
|---|
| 794 | + enum kbase_mcu_state state); |
|---|
| 738 | 795 | |
|---|
| 739 | 796 | /** |
|---|
| 740 | 797 | * kbase_pm_idle_groups_sched_suspendable - Check whether the scheduler can be |
|---|
| .. | .. |
|---|
| 774 | 831 | |
|---|
| 775 | 832 | /** |
|---|
| 776 | 833 | * kbase_pm_no_mcu_core_pwroff - Check whether the PM is required to keep the |
|---|
| 777 | | - * MCU core powered in accordance to the active |
|---|
| 834 | + * MCU shader Core powered in accordance to the active |
|---|
| 778 | 835 | * power management policy |
|---|
| 779 | 836 | * |
|---|
| 780 | 837 | * @kbdev: Device pointer |
|---|
| .. | .. |
|---|
| 788 | 845 | return kbdev->pm.backend.csf_pm_sched_flags & |
|---|
| 789 | 846 | CSF_DYNAMIC_PM_CORE_KEEP_ON; |
|---|
| 790 | 847 | } |
|---|
| 848 | + |
|---|
| 849 | +/** |
|---|
| 850 | + * kbase_pm_mcu_is_in_desired_state - Check if MCU is in stable ON/OFF state. |
|---|
| 851 | + * |
|---|
| 852 | + * @kbdev: Device pointer |
|---|
| 853 | + * |
|---|
| 854 | + * Return: true if MCU is in stable ON/OFF state. |
|---|
| 855 | + */ |
|---|
| 856 | +static inline bool kbase_pm_mcu_is_in_desired_state(struct kbase_device *kbdev) |
|---|
| 857 | +{ |
|---|
| 858 | + bool in_desired_state = true; |
|---|
| 859 | + |
|---|
| 860 | + if (kbase_pm_is_mcu_desired(kbdev) && kbdev->pm.backend.mcu_state != KBASE_MCU_ON) |
|---|
| 861 | + in_desired_state = false; |
|---|
| 862 | + else if (!kbase_pm_is_mcu_desired(kbdev) && |
|---|
| 863 | + (kbdev->pm.backend.mcu_state != KBASE_MCU_OFF) && |
|---|
| 864 | + (kbdev->pm.backend.mcu_state != KBASE_MCU_IN_SLEEP)) |
|---|
| 865 | + in_desired_state = false; |
|---|
| 866 | + |
|---|
| 867 | + return in_desired_state; |
|---|
| 868 | +} |
|---|
| 869 | + |
|---|
| 791 | 870 | #endif |
|---|
| 871 | + |
|---|
| 872 | +/** |
|---|
| 873 | + * kbase_pm_l2_is_in_desired_state - Check if L2 is in stable ON/OFF state. |
|---|
| 874 | + * |
|---|
| 875 | + * @kbdev: Device pointer |
|---|
| 876 | + * |
|---|
| 877 | + * Return: true if L2 is in stable ON/OFF state. |
|---|
| 878 | + */ |
|---|
| 879 | +static inline bool kbase_pm_l2_is_in_desired_state(struct kbase_device *kbdev) |
|---|
| 880 | +{ |
|---|
| 881 | + bool in_desired_state = true; |
|---|
| 882 | + |
|---|
| 883 | + if (kbase_pm_is_l2_desired(kbdev) && kbdev->pm.backend.l2_state != KBASE_L2_ON) |
|---|
| 884 | + in_desired_state = false; |
|---|
| 885 | + else if (!kbase_pm_is_l2_desired(kbdev) && kbdev->pm.backend.l2_state != KBASE_L2_OFF) |
|---|
| 886 | + in_desired_state = false; |
|---|
| 887 | + |
|---|
| 888 | + return in_desired_state; |
|---|
| 889 | +} |
|---|
| 792 | 890 | |
|---|
| 793 | 891 | /** |
|---|
| 794 | 892 | * kbase_pm_lock - Lock all necessary mutexes to perform PM actions |
|---|
| .. | .. |
|---|
| 818 | 916 | #endif /* !MALI_USE_CSF */ |
|---|
| 819 | 917 | } |
|---|
| 820 | 918 | |
|---|
| 919 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
|---|
| 920 | +/** |
|---|
| 921 | + * kbase_pm_gpu_sleep_allowed - Check if the GPU is allowed to be put in sleep |
|---|
| 922 | + * |
|---|
| 923 | + * @kbdev: Device pointer |
|---|
| 924 | + * |
|---|
| 925 | + * This function is called on GPU idle notification and if it returns false then |
|---|
| 926 | + * GPU power down will be triggered by suspending the CSGs and halting the MCU. |
|---|
| 927 | + * |
|---|
| 928 | + * Return: true if the GPU is allowed to be in the sleep state. |
|---|
| 929 | + */ |
|---|
| 930 | +static inline bool kbase_pm_gpu_sleep_allowed(struct kbase_device *kbdev) |
|---|
| 931 | +{ |
|---|
| 932 | + /* If the autosuspend_delay has been set to 0 then it doesn't make |
|---|
| 933 | + * sense to first put GPU to sleep state and then power it down, |
|---|
| 934 | + * instead would be better to power it down right away. |
|---|
| 935 | + * Also need to do the same when autosuspend_delay is set to a negative |
|---|
| 936 | + * value, which implies that runtime pm is effectively disabled by the |
|---|
| 937 | + * kernel. |
|---|
| 938 | + * A high positive value of autosuspend_delay can be used to keep the |
|---|
| 939 | + * GPU in sleep state for a long time. |
|---|
| 940 | + */ |
|---|
| 941 | + if (unlikely(!kbdev->dev->power.autosuspend_delay || |
|---|
| 942 | + (kbdev->dev->power.autosuspend_delay < 0))) |
|---|
| 943 | + return false; |
|---|
| 944 | + |
|---|
| 945 | + return kbdev->pm.backend.gpu_sleep_supported; |
|---|
| 946 | +} |
|---|
| 947 | + |
|---|
| 948 | +/** |
|---|
| 949 | + * kbase_pm_enable_db_mirror_interrupt - Enable the doorbell mirror interrupt to |
|---|
| 950 | + * detect the User doorbell rings. |
|---|
| 951 | + * |
|---|
| 952 | + * @kbdev: Device pointer |
|---|
| 953 | + * |
|---|
| 954 | + * This function is called just before sending the sleep request to MCU firmware |
|---|
| 955 | + * so that User doorbell rings can be detected whilst GPU remains in the sleep |
|---|
| 956 | + * state. |
|---|
| 957 | + * |
|---|
| 958 | + */ |
|---|
| 959 | +static inline void kbase_pm_enable_db_mirror_interrupt(struct kbase_device *kbdev) |
|---|
| 960 | +{ |
|---|
| 961 | + lockdep_assert_held(&kbdev->hwaccess_lock); |
|---|
| 962 | + |
|---|
| 963 | + if (!kbdev->pm.backend.db_mirror_interrupt_enabled) { |
|---|
| 964 | + u32 irq_mask = kbase_reg_read(kbdev, |
|---|
| 965 | + GPU_CONTROL_REG(GPU_IRQ_MASK)); |
|---|
| 966 | + |
|---|
| 967 | + WARN_ON(irq_mask & DOORBELL_MIRROR); |
|---|
| 968 | + |
|---|
| 969 | + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), |
|---|
| 970 | + irq_mask | DOORBELL_MIRROR); |
|---|
| 971 | + kbdev->pm.backend.db_mirror_interrupt_enabled = true; |
|---|
| 972 | + } |
|---|
| 973 | +} |
|---|
| 974 | + |
|---|
| 975 | +/** |
|---|
| 976 | + * kbase_pm_disable_db_mirror_interrupt - Disable the doorbell mirror interrupt. |
|---|
| 977 | + * |
|---|
| 978 | + * @kbdev: Device pointer |
|---|
| 979 | + * |
|---|
| 980 | + * This function is called when doorbell mirror interrupt is received or MCU |
|---|
| 981 | + * needs to be reactivated by enabling the doorbell notification. |
|---|
| 982 | + */ |
|---|
| 983 | +static inline void kbase_pm_disable_db_mirror_interrupt(struct kbase_device *kbdev) |
|---|
| 984 | +{ |
|---|
| 985 | + lockdep_assert_held(&kbdev->hwaccess_lock); |
|---|
| 986 | + |
|---|
| 987 | + if (kbdev->pm.backend.db_mirror_interrupt_enabled) { |
|---|
| 988 | + u32 irq_mask = kbase_reg_read(kbdev, |
|---|
| 989 | + GPU_CONTROL_REG(GPU_IRQ_MASK)); |
|---|
| 990 | + |
|---|
| 991 | + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), |
|---|
| 992 | + irq_mask & ~DOORBELL_MIRROR); |
|---|
| 993 | + kbdev->pm.backend.db_mirror_interrupt_enabled = false; |
|---|
| 994 | + } |
|---|
| 995 | +} |
|---|
| 996 | +#endif |
|---|
| 997 | + |
|---|
| 998 | +/** |
|---|
| 999 | + * kbase_pm_l2_allow_mmu_page_migration - L2 state allows MMU page migration or not |
|---|
| 1000 | + * |
|---|
| 1001 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
|---|
| 1002 | + * |
|---|
| 1003 | + * Check whether the L2 state is in power transition phase or not. If it is, the MMU |
|---|
| 1004 | + * page migration should be deferred. The caller must hold hwaccess_lock, and, if MMU |
|---|
| 1005 | + * page migration is intended, immediately start the MMU migration action without |
|---|
| 1006 | + * dropping the lock. When page migration begins, a flag is set in kbdev that would |
|---|
| 1007 | + * prevent the L2 state machine traversing into power transition phases, until |
|---|
| 1008 | + * the MMU migration action ends. |
|---|
| 1009 | + * |
|---|
| 1010 | + * Return: true if MMU page migration is allowed |
|---|
| 1011 | + */ |
|---|
| 1012 | +static inline bool kbase_pm_l2_allow_mmu_page_migration(struct kbase_device *kbdev) |
|---|
| 1013 | +{ |
|---|
| 1014 | + struct kbase_pm_backend_data *backend = &kbdev->pm.backend; |
|---|
| 1015 | + |
|---|
| 1016 | + lockdep_assert_held(&kbdev->hwaccess_lock); |
|---|
| 1017 | + |
|---|
| 1018 | + return (backend->l2_state != KBASE_L2_PEND_ON && backend->l2_state != KBASE_L2_PEND_OFF); |
|---|
| 1019 | +} |
|---|
| 1020 | + |
|---|
| 821 | 1021 | #endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */ |
|---|