.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
36 | 36 | #include <linux/of.h> |
---|
37 | 37 | |
---|
38 | 38 | static const struct kbase_pm_policy *const all_policy_list[] = { |
---|
| 39 | +#if IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) |
---|
| 40 | + &kbase_pm_always_on_policy_ops, |
---|
39 | 41 | &kbase_pm_coarse_demand_policy_ops, |
---|
40 | | - &kbase_pm_always_on_policy_ops |
---|
| 42 | +#else /* CONFIG_MALI_BIFROST_NO_MALI */ |
---|
| 43 | + &kbase_pm_coarse_demand_policy_ops, |
---|
| 44 | + &kbase_pm_always_on_policy_ops, |
---|
| 45 | +#endif /* CONFIG_MALI_BIFROST_NO_MALI */ |
---|
41 | 46 | }; |
---|
42 | 47 | |
---|
43 | 48 | void kbase_pm_policy_init(struct kbase_device *kbdev) |
---|
.. | .. |
---|
175 | 180 | |
---|
176 | 181 | shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev); |
---|
177 | 182 | |
---|
178 | | - if (shaders_desired && kbase_pm_is_l2_desired(kbdev)) { |
---|
| 183 | + if (shaders_desired && kbase_pm_is_l2_desired(kbdev)) |
---|
179 | 184 | kbase_pm_update_state(kbdev); |
---|
180 | | - } |
---|
181 | 185 | #endif |
---|
182 | 186 | } |
---|
183 | 187 | |
---|
184 | 188 | void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev) |
---|
185 | 189 | { |
---|
186 | | - bool shaders_desired; |
---|
| 190 | + bool shaders_desired = false; |
---|
187 | 191 | |
---|
188 | 192 | lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
189 | 193 | |
---|
.. | .. |
---|
192 | 196 | if (kbdev->pm.backend.poweroff_wait_in_progress) |
---|
193 | 197 | return; |
---|
194 | 198 | |
---|
| 199 | +#if !MALI_USE_CSF |
---|
195 | 200 | if (kbdev->pm.backend.protected_transition_override) |
---|
196 | 201 | /* We are trying to change in/out of protected mode - force all |
---|
197 | 202 | * cores off so that the L2 powers down |
---|
.. | .. |
---|
199 | 204 | shaders_desired = false; |
---|
200 | 205 | else |
---|
201 | 206 | shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev); |
---|
202 | | - |
---|
203 | | -#if MALI_USE_CSF |
---|
204 | | - /* On CSF GPUs, Host driver isn't supposed to do the power management |
---|
205 | | - * for shader cores. CSF firmware will power up the cores appropriately |
---|
206 | | - * and so from Driver's standpoint 'shaders_desired' flag shall always |
---|
207 | | - * remain 0. |
---|
208 | | - */ |
---|
209 | | - shaders_desired = false; |
---|
210 | 207 | #endif |
---|
| 208 | + |
---|
211 | 209 | if (kbdev->pm.backend.shaders_desired != shaders_desired) { |
---|
212 | 210 | KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, kbdev->pm.backend.shaders_desired); |
---|
213 | 211 | |
---|
.. | .. |
---|
250 | 248 | #if MALI_USE_CSF |
---|
251 | 249 | static int policy_change_wait_for_L2_off(struct kbase_device *kbdev) |
---|
252 | 250 | { |
---|
253 | | -#define WAIT_DURATION_MS (3000) |
---|
254 | 251 | long remaining; |
---|
255 | | - long timeout = kbase_csf_timeout_in_jiffies(WAIT_DURATION_MS); |
---|
| 252 | + long timeout = kbase_csf_timeout_in_jiffies(kbase_get_timeout_ms(kbdev, CSF_PM_TIMEOUT)); |
---|
256 | 253 | int err = 0; |
---|
257 | 254 | |
---|
258 | 255 | /* Wait for L2 becoming off, by which the MCU is also implicitly off |
---|
.. | .. |
---|
295 | 292 | unsigned int new_policy_csf_pm_sched_flags; |
---|
296 | 293 | bool sched_suspend; |
---|
297 | 294 | bool reset_gpu = false; |
---|
| 295 | + bool reset_op_prevented = true; |
---|
| 296 | + struct kbase_csf_scheduler *scheduler = NULL; |
---|
298 | 297 | #endif |
---|
299 | 298 | |
---|
300 | 299 | KBASE_DEBUG_ASSERT(kbdev != NULL); |
---|
.. | .. |
---|
303 | 302 | KBASE_KTRACE_ADD(kbdev, PM_SET_POLICY, NULL, new_policy->id); |
---|
304 | 303 | |
---|
305 | 304 | #if MALI_USE_CSF |
---|
| 305 | + scheduler = &kbdev->csf.scheduler; |
---|
| 306 | + KBASE_DEBUG_ASSERT(scheduler != NULL); |
---|
| 307 | + |
---|
306 | 308 | /* Serialize calls on kbase_pm_set_policy() */ |
---|
307 | 309 | mutex_lock(&kbdev->pm.backend.policy_change_lock); |
---|
308 | 310 | |
---|
| 311 | + if (kbase_reset_gpu_prevent_and_wait(kbdev)) { |
---|
| 312 | + dev_warn(kbdev->dev, "Set PM policy failing to prevent gpu reset"); |
---|
| 313 | + reset_op_prevented = false; |
---|
| 314 | + } |
---|
| 315 | + |
---|
| 316 | + /* In case of CSF, the scheduler may be invoked to suspend. In that |
---|
| 317 | + * case, there is a risk that the L2 may be turned on by the time we |
---|
| 318 | + * check it here. So we hold the scheduler lock to avoid other operations |
---|
| 319 | + * interfering with the policy change and vice versa. |
---|
| 320 | + */ |
---|
| 321 | + mutex_lock(&scheduler->lock); |
---|
309 | 322 | spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
310 | 323 | /* policy_change_clamp_state_to_off, when needed, is set/cleared in |
---|
311 | 324 | * this function, a very limited temporal scope for covering the |
---|
.. | .. |
---|
318 | 331 | * the always_on policy, reflected by the CSF_DYNAMIC_PM_CORE_KEEP_ON |
---|
319 | 332 | * flag bit. |
---|
320 | 333 | */ |
---|
321 | | - sched_suspend = kbdev->csf.firmware_inited && |
---|
| 334 | + sched_suspend = reset_op_prevented && |
---|
322 | 335 | (CSF_DYNAMIC_PM_CORE_KEEP_ON & |
---|
323 | | - (new_policy_csf_pm_sched_flags | |
---|
324 | | - kbdev->pm.backend.csf_pm_sched_flags)); |
---|
| 336 | + (new_policy_csf_pm_sched_flags | kbdev->pm.backend.csf_pm_sched_flags)); |
---|
325 | 337 | |
---|
326 | 338 | spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
327 | 339 | |
---|
328 | | - if (sched_suspend) |
---|
329 | | - kbase_csf_scheduler_pm_suspend(kbdev); |
---|
| 340 | + if (sched_suspend) { |
---|
| 341 | + /* Update the suspend flag to reflect actually suspend being done ! */ |
---|
| 342 | + sched_suspend = !kbase_csf_scheduler_pm_suspend_no_lock(kbdev); |
---|
| 343 | + /* Set the reset recovery flag if the required suspend failed */ |
---|
| 344 | + reset_gpu = !sched_suspend; |
---|
| 345 | + } |
---|
330 | 346 | |
---|
331 | 347 | spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
332 | | - /* If the current active policy is always_on, one needs to clamp the |
---|
333 | | - * MCU/L2 for reaching off-state |
---|
334 | | - */ |
---|
335 | | - if (sched_suspend) |
---|
336 | | - kbdev->pm.backend.policy_change_clamp_state_to_off = |
---|
337 | | - CSF_DYNAMIC_PM_CORE_KEEP_ON & kbdev->pm.backend.csf_pm_sched_flags; |
---|
338 | 348 | |
---|
| 349 | + kbdev->pm.backend.policy_change_clamp_state_to_off = sched_suspend; |
---|
339 | 350 | kbase_pm_update_state(kbdev); |
---|
340 | 351 | spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
341 | 352 | |
---|
.. | .. |
---|
394 | 405 | |
---|
395 | 406 | #if MALI_USE_CSF |
---|
396 | 407 | /* Reverse the suspension done */ |
---|
| 408 | + if (sched_suspend) |
---|
| 409 | + kbase_csf_scheduler_pm_resume_no_lock(kbdev); |
---|
| 410 | + mutex_unlock(&scheduler->lock); |
---|
| 411 | + |
---|
| 412 | + if (reset_op_prevented) |
---|
| 413 | + kbase_reset_gpu_allow(kbdev); |
---|
| 414 | + |
---|
397 | 415 | if (reset_gpu) { |
---|
398 | 416 | dev_warn(kbdev->dev, "Resorting to GPU reset for policy change\n"); |
---|
399 | 417 | if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE)) |
---|
400 | 418 | kbase_reset_gpu(kbdev); |
---|
401 | 419 | kbase_reset_gpu_wait(kbdev); |
---|
402 | | - } else if (sched_suspend) |
---|
403 | | - kbase_csf_scheduler_pm_resume(kbdev); |
---|
| 420 | + } |
---|
404 | 421 | |
---|
405 | 422 | mutex_unlock(&kbdev->pm.backend.policy_change_lock); |
---|
406 | 423 | #endif |
---|