.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
32 | 32 | #include <mali_kbase_hwaccess_jm.h> |
---|
33 | 33 | #include <backend/gpu/mali_kbase_js_internal.h> |
---|
34 | 34 | #include <backend/gpu/mali_kbase_jm_internal.h> |
---|
| 35 | +#else |
---|
| 36 | +#include <linux/pm_runtime.h> |
---|
| 37 | +#include <mali_kbase_reset_gpu.h> |
---|
35 | 38 | #endif /* !MALI_USE_CSF */ |
---|
36 | | -#include <mali_kbase_hwcnt_context.h> |
---|
| 39 | +#include <hwcnt/mali_kbase_hwcnt_context.h> |
---|
37 | 40 | #include <backend/gpu/mali_kbase_pm_internal.h> |
---|
38 | 41 | #include <backend/gpu/mali_kbase_devfreq.h> |
---|
39 | 42 | #include <mali_kbase_dummy_job_wa.h> |
---|
.. | .. |
---|
69 | 72 | callbacks->power_runtime_idle_callback; |
---|
70 | 73 | kbdev->pm.backend.callback_soft_reset = |
---|
71 | 74 | callbacks->soft_reset_callback; |
---|
| 75 | + kbdev->pm.backend.callback_power_runtime_gpu_idle = |
---|
| 76 | + callbacks->power_runtime_gpu_idle_callback; |
---|
| 77 | + kbdev->pm.backend.callback_power_runtime_gpu_active = |
---|
| 78 | + callbacks->power_runtime_gpu_active_callback; |
---|
72 | 79 | |
---|
73 | 80 | if (callbacks->power_runtime_init_callback) |
---|
74 | 81 | return callbacks->power_runtime_init_callback(kbdev); |
---|
.. | .. |
---|
86 | 93 | kbdev->pm.backend.callback_power_runtime_off = NULL; |
---|
87 | 94 | kbdev->pm.backend.callback_power_runtime_idle = NULL; |
---|
88 | 95 | kbdev->pm.backend.callback_soft_reset = NULL; |
---|
| 96 | + kbdev->pm.backend.callback_power_runtime_gpu_idle = NULL; |
---|
| 97 | + kbdev->pm.backend.callback_power_runtime_gpu_active = NULL; |
---|
89 | 98 | |
---|
90 | 99 | return 0; |
---|
91 | 100 | } |
---|
92 | 101 | |
---|
93 | 102 | void kbase_pm_runtime_term(struct kbase_device *kbdev) |
---|
94 | 103 | { |
---|
95 | | - if (kbdev->pm.callback_power_runtime_term) { |
---|
| 104 | + if (kbdev->pm.callback_power_runtime_term) |
---|
96 | 105 | kbdev->pm.callback_power_runtime_term(kbdev); |
---|
97 | | - } |
---|
98 | 106 | } |
---|
99 | 107 | |
---|
100 | 108 | void kbase_pm_register_access_enable(struct kbase_device *kbdev) |
---|
.. | .. |
---|
120 | 128 | |
---|
121 | 129 | callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS; |
---|
122 | 130 | |
---|
| 131 | + kbdev->pm.backend.gpu_powered = false; |
---|
| 132 | + |
---|
123 | 133 | if (callbacks) |
---|
124 | 134 | callbacks->power_off_callback(kbdev); |
---|
125 | | - |
---|
126 | | - kbdev->pm.backend.gpu_powered = false; |
---|
127 | 135 | } |
---|
128 | 136 | |
---|
129 | 137 | int kbase_hwaccess_pm_init(struct kbase_device *kbdev) |
---|
.. | .. |
---|
192 | 200 | INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work, |
---|
193 | 201 | kbase_pm_hwcnt_disable_worker); |
---|
194 | 202 | kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx); |
---|
| 203 | + |
---|
| 204 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
---|
| 205 | + kbdev->pm.backend.gpu_sleep_supported = |
---|
| 206 | + kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_GPU_SLEEP) && |
---|
| 207 | + !kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TURSEHW_1997) && |
---|
| 208 | + kbdev->pm.backend.callback_power_runtime_gpu_active && |
---|
| 209 | + kbdev->pm.backend.callback_power_runtime_gpu_idle; |
---|
| 210 | +#endif |
---|
195 | 211 | |
---|
196 | 212 | if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) { |
---|
197 | 213 | kbdev->pm.backend.l2_always_on = false; |
---|
.. | .. |
---|
263 | 279 | */ |
---|
264 | 280 | } |
---|
265 | 281 | |
---|
| 282 | +static void pm_handle_power_off(struct kbase_device *kbdev) |
---|
| 283 | +{ |
---|
| 284 | + struct kbase_pm_backend_data *backend = &kbdev->pm.backend; |
---|
| 285 | +#if MALI_USE_CSF |
---|
| 286 | + enum kbase_mcu_state mcu_state; |
---|
| 287 | +#endif |
---|
| 288 | + unsigned long flags; |
---|
| 289 | + |
---|
| 290 | + lockdep_assert_held(&kbdev->pm.lock); |
---|
| 291 | + |
---|
| 292 | + if (backend->poweron_required) |
---|
| 293 | + return; |
---|
| 294 | + |
---|
| 295 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 296 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
---|
| 297 | + if (kbdev->pm.backend.gpu_wakeup_override) { |
---|
| 298 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 299 | + return; |
---|
| 300 | + } |
---|
| 301 | +#endif |
---|
| 302 | + WARN_ON(backend->shaders_state != |
---|
| 303 | + KBASE_SHADERS_OFF_CORESTACK_OFF || |
---|
| 304 | + backend->l2_state != KBASE_L2_OFF); |
---|
| 305 | +#if MALI_USE_CSF |
---|
| 306 | + mcu_state = backend->mcu_state; |
---|
| 307 | + WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state)); |
---|
| 308 | +#endif |
---|
| 309 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 310 | + |
---|
| 311 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
---|
| 312 | + if (backend->callback_power_runtime_gpu_idle) { |
---|
| 313 | + WARN_ON(backend->gpu_idled); |
---|
| 314 | + backend->callback_power_runtime_gpu_idle(kbdev); |
---|
| 315 | + backend->gpu_idled = true; |
---|
| 316 | + return; |
---|
| 317 | + } |
---|
| 318 | +#endif |
---|
| 319 | + |
---|
| 320 | + /* Disable interrupts and turn the clock off */ |
---|
| 321 | + if (!kbase_pm_clock_off(kbdev)) { |
---|
| 322 | + /* |
---|
| 323 | + * Page/bus faults are pending, must drop locks to |
---|
| 324 | + * process. Interrupts are disabled so no more faults |
---|
| 325 | + * should be generated at this point. |
---|
| 326 | + */ |
---|
| 327 | + kbase_pm_unlock(kbdev); |
---|
| 328 | + kbase_flush_mmu_wqs(kbdev); |
---|
| 329 | + kbase_pm_lock(kbdev); |
---|
| 330 | + |
---|
| 331 | +#ifdef CONFIG_MALI_ARBITER_SUPPORT |
---|
| 332 | + /* poweron_required may have changed while pm lock |
---|
| 333 | + * was released. |
---|
| 334 | + */ |
---|
| 335 | + if (kbase_pm_is_gpu_lost(kbdev)) |
---|
| 336 | + backend->poweron_required = false; |
---|
| 337 | +#endif |
---|
| 338 | + |
---|
| 339 | + /* Turn off clock now that fault have been handled. We |
---|
| 340 | + * dropped locks so poweron_required may have changed - |
---|
| 341 | + * power back on if this is the case (effectively only |
---|
| 342 | + * re-enabling of the interrupts would be done in this |
---|
| 343 | + * case, as the clocks to GPU were not withdrawn yet). |
---|
| 344 | + */ |
---|
| 345 | + if (backend->poweron_required) |
---|
| 346 | + kbase_pm_clock_on(kbdev, false); |
---|
| 347 | + else |
---|
| 348 | + WARN_ON(!kbase_pm_clock_off(kbdev)); |
---|
| 349 | + } |
---|
| 350 | +} |
---|
| 351 | + |
---|
266 | 352 | static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data) |
---|
267 | 353 | { |
---|
268 | 354 | struct kbase_device *kbdev = container_of(data, struct kbase_device, |
---|
.. | .. |
---|
270 | 356 | struct kbase_pm_device_data *pm = &kbdev->pm; |
---|
271 | 357 | struct kbase_pm_backend_data *backend = &pm->backend; |
---|
272 | 358 | unsigned long flags; |
---|
| 359 | + |
---|
| 360 | + KBASE_KTRACE_ADD(kbdev, PM_POWEROFF_WAIT_WQ, NULL, 0); |
---|
273 | 361 | |
---|
274 | 362 | #if !MALI_USE_CSF |
---|
275 | 363 | /* Wait for power transitions to complete. We do this with no locks held |
---|
.. | .. |
---|
280 | 368 | |
---|
281 | 369 | kbase_pm_lock(kbdev); |
---|
282 | 370 | |
---|
283 | | -#ifdef CONFIG_MALI_ARBITER_SUPPORT |
---|
284 | | - if (kbase_pm_is_gpu_lost(kbdev)) |
---|
285 | | - backend->poweron_required = false; |
---|
286 | | -#endif |
---|
287 | | - |
---|
288 | | - if (!backend->poweron_required) { |
---|
289 | | - unsigned long flags; |
---|
290 | | - |
---|
291 | | - spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
292 | | - WARN_ON(backend->shaders_state != |
---|
293 | | - KBASE_SHADERS_OFF_CORESTACK_OFF || |
---|
294 | | - backend->l2_state != KBASE_L2_OFF); |
---|
295 | | - spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
296 | | - |
---|
297 | | - /* Disable interrupts and turn the clock off */ |
---|
298 | | - if (!kbase_pm_clock_off(kbdev)) { |
---|
299 | | - /* |
---|
300 | | - * Page/bus faults are pending, must drop locks to |
---|
301 | | - * process. Interrupts are disabled so no more faults |
---|
302 | | - * should be generated at this point. |
---|
303 | | - */ |
---|
304 | | - kbase_pm_unlock(kbdev); |
---|
305 | | - kbase_flush_mmu_wqs(kbdev); |
---|
306 | | - kbase_pm_lock(kbdev); |
---|
307 | | - |
---|
308 | | -#ifdef CONFIG_MALI_ARBITER_SUPPORT |
---|
309 | | - /* poweron_required may have changed while pm lock |
---|
310 | | - * was released. |
---|
311 | | - */ |
---|
312 | | - if (kbase_pm_is_gpu_lost(kbdev)) |
---|
313 | | - backend->poweron_required = false; |
---|
314 | | -#endif |
---|
315 | | - |
---|
316 | | - /* Turn off clock now that fault have been handled. We |
---|
317 | | - * dropped locks so poweron_required may have changed - |
---|
318 | | - * power back on if this is the case (effectively only |
---|
319 | | - * re-enabling of the interrupts would be done in this |
---|
320 | | - * case, as the clocks to GPU were not withdrawn yet). |
---|
321 | | - */ |
---|
322 | | - if (backend->poweron_required) |
---|
323 | | - kbase_pm_clock_on(kbdev, false); |
---|
324 | | - else |
---|
325 | | - WARN_ON(!kbase_pm_clock_off(kbdev)); |
---|
326 | | - } |
---|
327 | | - } |
---|
| 371 | + pm_handle_power_off(kbdev); |
---|
328 | 372 | |
---|
329 | 373 | spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
330 | 374 | backend->poweroff_wait_in_progress = false; |
---|
.. | .. |
---|
378 | 422 | return; |
---|
379 | 423 | |
---|
380 | 424 | /* Stop the metrics gathering framework */ |
---|
381 | | - if (kbase_pm_metrics_is_active(kbdev)) |
---|
382 | | - kbase_pm_metrics_stop(kbdev); |
---|
| 425 | + kbase_pm_metrics_stop(kbdev); |
---|
383 | 426 | |
---|
384 | 427 | /* Keep the current freq to restore it upon resume */ |
---|
385 | 428 | kbdev->previous_frequency = clk_get_rate(clk); |
---|
.. | .. |
---|
512 | 555 | spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
513 | 556 | } |
---|
514 | 557 | |
---|
| 558 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
---|
| 559 | +/** |
---|
| 560 | + * kbase_pm_do_poweroff_sync - Do the synchronous power down of GPU |
---|
| 561 | + * |
---|
| 562 | + * @kbdev: The kbase device structure for the device (must be a valid pointer) |
---|
| 563 | + * |
---|
| 564 | + * This function is called at the time of system suspend or device unload |
---|
| 565 | + * to power down the GPU synchronously. This is needed as the power down of GPU |
---|
| 566 | + * would usually happen from the runtime suspend callback function (if gpu_active |
---|
| 567 | + * and gpu_idle callbacks are used) and runtime suspend operation is disabled |
---|
| 568 | + * when system suspend takes place. |
---|
| 569 | + * The function first waits for the @gpu_poweroff_wait_work to complete, which |
---|
| 570 | + * could have been enqueued after the last PM reference was released. |
---|
| 571 | + * |
---|
| 572 | + * Return: 0 on success, negative value otherwise. |
---|
| 573 | + */ |
---|
| 574 | +static int kbase_pm_do_poweroff_sync(struct kbase_device *kbdev) |
---|
| 575 | +{ |
---|
| 576 | + struct kbase_pm_backend_data *backend = &kbdev->pm.backend; |
---|
| 577 | + unsigned long flags; |
---|
| 578 | + int ret = 0; |
---|
| 579 | + |
---|
| 580 | + WARN_ON(kbdev->pm.active_count); |
---|
| 581 | + |
---|
| 582 | + kbase_pm_wait_for_poweroff_work_complete(kbdev); |
---|
| 583 | + |
---|
| 584 | + kbase_pm_lock(kbdev); |
---|
| 585 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 586 | + WARN_ON(backend->poweroff_wait_in_progress); |
---|
| 587 | + WARN_ON(backend->gpu_sleep_mode_active); |
---|
| 588 | + if (backend->gpu_powered) { |
---|
| 589 | + |
---|
| 590 | + backend->mcu_desired = false; |
---|
| 591 | + backend->l2_desired = false; |
---|
| 592 | + kbase_pm_update_state(kbdev); |
---|
| 593 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 594 | + |
---|
| 595 | + ret = kbase_pm_wait_for_desired_state(kbdev); |
---|
| 596 | + if (ret) { |
---|
| 597 | + dev_warn( |
---|
| 598 | + kbdev->dev, |
---|
| 599 | + "Wait for pm state change failed on synchronous power off"); |
---|
| 600 | + ret = -EBUSY; |
---|
| 601 | + goto out; |
---|
| 602 | + } |
---|
| 603 | + |
---|
| 604 | + /* Due to the power policy, GPU could have been kept active |
---|
| 605 | + * throughout and so need to invoke the idle callback before |
---|
| 606 | + * the power down. |
---|
| 607 | + */ |
---|
| 608 | + if (backend->callback_power_runtime_gpu_idle && |
---|
| 609 | + !backend->gpu_idled) { |
---|
| 610 | + backend->callback_power_runtime_gpu_idle(kbdev); |
---|
| 611 | + backend->gpu_idled = true; |
---|
| 612 | + } |
---|
| 613 | + |
---|
| 614 | + if (!kbase_pm_clock_off(kbdev)) { |
---|
| 615 | + dev_warn( |
---|
| 616 | + kbdev->dev, |
---|
| 617 | + "Failed to turn off GPU clocks on synchronous power off, MMU faults pending"); |
---|
| 618 | + ret = -EBUSY; |
---|
| 619 | + } |
---|
| 620 | + } else { |
---|
| 621 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 622 | + } |
---|
| 623 | + |
---|
| 624 | +out: |
---|
| 625 | + kbase_pm_unlock(kbdev); |
---|
| 626 | + return ret; |
---|
| 627 | +} |
---|
| 628 | +#endif |
---|
| 629 | + |
---|
515 | 630 | void kbase_pm_do_poweroff(struct kbase_device *kbdev) |
---|
516 | 631 | { |
---|
517 | 632 | unsigned long flags; |
---|
.. | .. |
---|
561 | 676 | return ret; |
---|
562 | 677 | } |
---|
563 | 678 | |
---|
564 | | -void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev) |
---|
| 679 | +void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev) |
---|
565 | 680 | { |
---|
566 | 681 | wait_event_killable(kbdev->pm.backend.poweroff_wait, |
---|
567 | 682 | is_poweroff_in_progress(kbdev)); |
---|
568 | 683 | } |
---|
569 | | -KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_complete); |
---|
| 684 | +KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_work_complete); |
---|
| 685 | + |
---|
| 686 | +/** |
---|
| 687 | + * is_gpu_powered_down - Check whether GPU is powered down |
---|
| 688 | + * |
---|
| 689 | + * @kbdev: kbase device |
---|
| 690 | + * |
---|
| 691 | + * Return: true if GPU is powered down, false otherwise |
---|
| 692 | + */ |
---|
| 693 | +static bool is_gpu_powered_down(struct kbase_device *kbdev) |
---|
| 694 | +{ |
---|
| 695 | + bool ret; |
---|
| 696 | + unsigned long flags; |
---|
| 697 | + |
---|
| 698 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 699 | + ret = !kbdev->pm.backend.gpu_powered; |
---|
| 700 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 701 | + |
---|
| 702 | + return ret; |
---|
| 703 | +} |
---|
| 704 | + |
---|
| 705 | +void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev) |
---|
| 706 | +{ |
---|
| 707 | + wait_event_killable(kbdev->pm.backend.poweroff_wait, |
---|
| 708 | + is_gpu_powered_down(kbdev)); |
---|
| 709 | +} |
---|
| 710 | +KBASE_EXPORT_TEST_API(kbase_pm_wait_for_gpu_power_down); |
---|
570 | 711 | |
---|
571 | 712 | int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev, |
---|
572 | 713 | unsigned int flags) |
---|
.. | .. |
---|
612 | 753 | * cores off |
---|
613 | 754 | */ |
---|
614 | 755 | kbdev->pm.active_count = 1; |
---|
| 756 | +#if MALI_USE_CSF && KBASE_PM_RUNTIME |
---|
| 757 | + if (kbdev->pm.backend.callback_power_runtime_gpu_active) { |
---|
| 758 | + /* Take the RPM reference count to match with the internal |
---|
| 759 | + * PM reference count |
---|
| 760 | + */ |
---|
| 761 | + kbdev->pm.backend.callback_power_runtime_gpu_active(kbdev); |
---|
| 762 | + WARN_ON(kbdev->pm.backend.gpu_idled); |
---|
| 763 | + } |
---|
| 764 | +#endif |
---|
615 | 765 | |
---|
616 | 766 | spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, |
---|
617 | 767 | irq_flags); |
---|
.. | .. |
---|
653 | 803 | { |
---|
654 | 804 | KBASE_DEBUG_ASSERT(kbdev != NULL); |
---|
655 | 805 | |
---|
| 806 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
---|
| 807 | + WARN_ON(kbase_pm_do_poweroff_sync(kbdev)); |
---|
| 808 | +#else |
---|
656 | 809 | mutex_lock(&kbdev->pm.lock); |
---|
657 | 810 | kbase_pm_do_poweroff(kbdev); |
---|
658 | 811 | mutex_unlock(&kbdev->pm.lock); |
---|
659 | 812 | |
---|
660 | | - kbase_pm_wait_for_poweroff_complete(kbdev); |
---|
| 813 | + kbase_pm_wait_for_poweroff_work_complete(kbdev); |
---|
| 814 | +#endif |
---|
661 | 815 | } |
---|
662 | 816 | |
---|
663 | 817 | KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt); |
---|
.. | .. |
---|
710 | 864 | kbase_pm_update_state(kbdev); |
---|
711 | 865 | |
---|
712 | 866 | #if !MALI_USE_CSF |
---|
713 | | - kbase_backend_slot_update(kbdev); |
---|
| 867 | + kbase_backend_slot_update(kbdev); |
---|
714 | 868 | #endif /* !MALI_USE_CSF */ |
---|
715 | 869 | |
---|
716 | 870 | spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
.. | .. |
---|
735 | 889 | lockdep_assert_held(&kbdev->pm.lock); |
---|
736 | 890 | |
---|
737 | 891 | if (kbase_dummy_job_wa_enabled(kbdev)) { |
---|
738 | | - dev_warn(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled"); |
---|
| 892 | + dev_warn_once(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled"); |
---|
739 | 893 | new_core_mask_js0 = kbdev->pm.debug_core_mask[0]; |
---|
740 | 894 | } |
---|
741 | 895 | |
---|
.. | .. |
---|
759 | 913 | kbase_pm_update_active(kbdev); |
---|
760 | 914 | } |
---|
761 | 915 | |
---|
762 | | -void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev) |
---|
| 916 | +int kbase_hwaccess_pm_suspend(struct kbase_device *kbdev) |
---|
763 | 917 | { |
---|
| 918 | + int ret = 0; |
---|
| 919 | + |
---|
| 920 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
---|
| 921 | + ret = kbase_pm_do_poweroff_sync(kbdev); |
---|
| 922 | + if (ret) |
---|
| 923 | + return ret; |
---|
| 924 | +#else |
---|
764 | 925 | /* Force power off the GPU and all cores (regardless of policy), only |
---|
765 | 926 | * after the PM active count reaches zero (otherwise, we risk turning it |
---|
766 | 927 | * off prematurely) |
---|
.. | .. |
---|
775 | 936 | |
---|
776 | 937 | kbase_pm_unlock(kbdev); |
---|
777 | 938 | |
---|
778 | | - kbase_pm_wait_for_poweroff_complete(kbdev); |
---|
| 939 | + kbase_pm_wait_for_poweroff_work_complete(kbdev); |
---|
| 940 | +#endif |
---|
| 941 | + |
---|
| 942 | + WARN_ON(kbdev->pm.backend.gpu_powered); |
---|
| 943 | + WARN_ON(atomic_read(&kbdev->faults_pending)); |
---|
779 | 944 | |
---|
780 | 945 | if (kbdev->pm.backend.callback_power_suspend) |
---|
781 | 946 | kbdev->pm.backend.callback_power_suspend(kbdev); |
---|
| 947 | + |
---|
| 948 | + return ret; |
---|
782 | 949 | } |
---|
783 | 950 | |
---|
784 | 951 | void kbase_hwaccess_pm_resume(struct kbase_device *kbdev) |
---|
.. | .. |
---|
807 | 974 | void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev) |
---|
808 | 975 | { |
---|
809 | 976 | unsigned long flags; |
---|
810 | | - ktime_t end_timestamp = ktime_get(); |
---|
| 977 | + ktime_t end_timestamp = ktime_get_raw(); |
---|
811 | 978 | struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state; |
---|
812 | 979 | |
---|
813 | 980 | if (!kbdev->arb.arb_if) |
---|
.. | .. |
---|
844 | 1011 | |
---|
845 | 1012 | /* Cancel any pending HWC dumps */ |
---|
846 | 1013 | spin_lock_irqsave(&kbdev->hwcnt.lock, flags); |
---|
847 | | - kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE; |
---|
848 | | - kbdev->hwcnt.backend.triggered = 1; |
---|
849 | | - wake_up(&kbdev->hwcnt.backend.wait); |
---|
| 1014 | + if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING || |
---|
| 1015 | + kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) { |
---|
| 1016 | + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT; |
---|
| 1017 | + kbdev->hwcnt.backend.triggered = 1; |
---|
| 1018 | + wake_up(&kbdev->hwcnt.backend.wait); |
---|
| 1019 | + } |
---|
850 | 1020 | spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); |
---|
851 | 1021 | } |
---|
852 | 1022 | mutex_unlock(&arb_vm_state->vm_state_lock); |
---|
.. | .. |
---|
854 | 1024 | } |
---|
855 | 1025 | |
---|
856 | 1026 | #endif /* CONFIG_MALI_ARBITER_SUPPORT */ |
---|
| 1027 | + |
---|
| 1028 | +#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) |
---|
| 1029 | +int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev) |
---|
| 1030 | +{ |
---|
| 1031 | + unsigned long flags; |
---|
| 1032 | + |
---|
| 1033 | + lockdep_assert_held(&kbdev->pm.lock); |
---|
| 1034 | + |
---|
| 1035 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 1036 | + /* Set the override flag to force the power up of L2 cache */ |
---|
| 1037 | + kbdev->pm.backend.gpu_wakeup_override = true; |
---|
| 1038 | + kbase_pm_update_state(kbdev); |
---|
| 1039 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1040 | + |
---|
| 1041 | + return kbase_pm_wait_for_desired_state(kbdev); |
---|
| 1042 | +} |
---|
| 1043 | + |
---|
| 1044 | +static int pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device *kbdev) |
---|
| 1045 | +{ |
---|
| 1046 | + unsigned long flags; |
---|
| 1047 | + int ret; |
---|
| 1048 | + |
---|
| 1049 | + lockdep_assert_held(&kbdev->csf.scheduler.lock); |
---|
| 1050 | + lockdep_assert_held(&kbdev->pm.lock); |
---|
| 1051 | + |
---|
| 1052 | +#ifdef CONFIG_MALI_BIFROST_DEBUG |
---|
| 1053 | + /* In case of no active CSG on slot, powering up L2 could be skipped and |
---|
| 1054 | + * proceed directly to suspend GPU. |
---|
| 1055 | + * ToDo: firmware has to be reloaded after wake-up as no halt command |
---|
| 1056 | + * has been sent when GPU was put to sleep mode. |
---|
| 1057 | + */ |
---|
| 1058 | + if (!kbase_csf_scheduler_get_nr_active_csgs(kbdev)) |
---|
| 1059 | + dev_info( |
---|
| 1060 | + kbdev->dev, |
---|
| 1061 | + "No active CSGs. Can skip the power up of L2 and go for suspension directly"); |
---|
| 1062 | +#endif |
---|
| 1063 | + |
---|
| 1064 | + ret = kbase_pm_force_mcu_wakeup_after_sleep(kbdev); |
---|
| 1065 | + if (ret) { |
---|
| 1066 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 1067 | + dev_warn( |
---|
| 1068 | + kbdev->dev, |
---|
| 1069 | + "Waiting for MCU to wake up failed on runtime suspend"); |
---|
| 1070 | + kbdev->pm.backend.gpu_wakeup_override = false; |
---|
| 1071 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1072 | + return ret; |
---|
| 1073 | + } |
---|
| 1074 | + |
---|
| 1075 | + /* Check if a Doorbell mirror interrupt occurred meanwhile */ |
---|
| 1076 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 1077 | + if (kbdev->pm.backend.gpu_sleep_mode_active && |
---|
| 1078 | + kbdev->pm.backend.exit_gpu_sleep_mode) { |
---|
| 1079 | + dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend after L2 power up"); |
---|
| 1080 | + kbdev->pm.backend.gpu_wakeup_override = false; |
---|
| 1081 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1082 | + return -EBUSY; |
---|
| 1083 | + } |
---|
| 1084 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1085 | + /* Need to release the kbdev->pm.lock to avoid lock ordering issue |
---|
| 1086 | + * with kctx->reg.lock, which is taken if the sync wait condition is |
---|
| 1087 | + * evaluated after the CSG suspend operation. |
---|
| 1088 | + */ |
---|
| 1089 | + kbase_pm_unlock(kbdev); |
---|
| 1090 | + ret = kbase_csf_scheduler_handle_runtime_suspend(kbdev); |
---|
| 1091 | + kbase_pm_lock(kbdev); |
---|
| 1092 | + |
---|
| 1093 | + /* Power down L2 cache */ |
---|
| 1094 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 1095 | + kbdev->pm.backend.gpu_wakeup_override = false; |
---|
| 1096 | + kbase_pm_update_state(kbdev); |
---|
| 1097 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1098 | + |
---|
| 1099 | + /* After re-acquiring the kbdev->pm.lock, check if the device |
---|
| 1100 | + * became active (or active then idle) meanwhile. |
---|
| 1101 | + */ |
---|
| 1102 | + if (kbdev->pm.active_count || |
---|
| 1103 | + kbdev->pm.backend.poweroff_wait_in_progress) { |
---|
| 1104 | + dev_dbg(kbdev->dev, |
---|
| 1105 | + "Device became active on runtime suspend after suspending Scheduler"); |
---|
| 1106 | + ret = -EBUSY; |
---|
| 1107 | + } |
---|
| 1108 | + |
---|
| 1109 | + if (ret) |
---|
| 1110 | + return ret; |
---|
| 1111 | + |
---|
| 1112 | + ret = kbase_pm_wait_for_desired_state(kbdev); |
---|
| 1113 | + if (ret) |
---|
| 1114 | + dev_warn(kbdev->dev, "Wait for power down failed on runtime suspend"); |
---|
| 1115 | + |
---|
| 1116 | + return ret; |
---|
| 1117 | +} |
---|
| 1118 | + |
---|
| 1119 | +int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev) |
---|
| 1120 | +{ |
---|
| 1121 | + enum kbase_mcu_state mcu_state; |
---|
| 1122 | + bool exit_early = false; |
---|
| 1123 | + unsigned long flags; |
---|
| 1124 | + int ret = 0; |
---|
| 1125 | + |
---|
| 1126 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 1127 | + /* This check is needed for the case where Kbase had invoked the |
---|
| 1128 | + * @power_off_callback directly. |
---|
| 1129 | + */ |
---|
| 1130 | + if (!kbdev->pm.backend.gpu_powered) { |
---|
| 1131 | + dev_dbg(kbdev->dev, "GPU already powered down on runtime suspend"); |
---|
| 1132 | + exit_early = true; |
---|
| 1133 | + } |
---|
| 1134 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1135 | + |
---|
| 1136 | + if (exit_early) |
---|
| 1137 | + goto out; |
---|
| 1138 | + |
---|
| 1139 | + ret = kbase_reset_gpu_try_prevent(kbdev); |
---|
| 1140 | + if (ret == -ENOMEM) { |
---|
| 1141 | + dev_dbg(kbdev->dev, "Quit runtime suspend as GPU is in bad state"); |
---|
| 1142 | + /* Finish the runtime suspend, no point in trying again as GPU is |
---|
| 1143 | + * in irrecoverable bad state. |
---|
| 1144 | + */ |
---|
| 1145 | + goto out; |
---|
| 1146 | + } else if (ret) { |
---|
| 1147 | + dev_dbg(kbdev->dev, "Quit runtime suspend for failing to prevent gpu reset"); |
---|
| 1148 | + ret = -EBUSY; |
---|
| 1149 | + goto out; |
---|
| 1150 | + } |
---|
| 1151 | + |
---|
| 1152 | + kbase_csf_scheduler_lock(kbdev); |
---|
| 1153 | + kbase_pm_lock(kbdev); |
---|
| 1154 | + |
---|
| 1155 | + /* |
---|
| 1156 | + * This is to handle the case where GPU device becomes active and idle |
---|
| 1157 | + * very quickly whilst the runtime suspend callback is executing. |
---|
| 1158 | + * This is useful for the following scenario :- |
---|
| 1159 | + * - GPU goes idle and pm_callback_runtime_gpu_idle() is called. |
---|
| 1160 | + * - Auto-suspend timer expires and kbase_device_runtime_suspend() |
---|
| 1161 | + * is called. |
---|
| 1162 | + * - GPU becomes active and pm_callback_runtime_gpu_active() calls |
---|
| 1163 | + * pm_runtime_get(). |
---|
| 1164 | + * - Shortly after that GPU becomes idle again. |
---|
| 1165 | + * - kbase_pm_handle_runtime_suspend() gets called. |
---|
| 1166 | + * - pm_callback_runtime_gpu_idle() is called. |
---|
| 1167 | + * |
---|
| 1168 | + * We do not want to power down the GPU immediately after it goes idle. |
---|
| 1169 | + * So if we notice that GPU had become active when the runtime suspend |
---|
| 1170 | + * had already kicked in, we abort the runtime suspend. |
---|
| 1171 | + * By aborting the runtime suspend, we defer the power down of GPU. |
---|
| 1172 | + * |
---|
| 1173 | + * This check also helps prevent warnings regarding L2 and MCU states |
---|
| 1174 | + * inside the pm_handle_power_off() function. The warning stems from |
---|
| 1175 | + * the fact that pm.lock is released before invoking Scheduler function |
---|
| 1176 | + * to suspend the CSGs. |
---|
| 1177 | + */ |
---|
| 1178 | + if (kbdev->pm.active_count || |
---|
| 1179 | + kbdev->pm.backend.poweroff_wait_in_progress) { |
---|
| 1180 | + dev_dbg(kbdev->dev, "Device became active on runtime suspend"); |
---|
| 1181 | + ret = -EBUSY; |
---|
| 1182 | + goto unlock; |
---|
| 1183 | + } |
---|
| 1184 | + |
---|
| 1185 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 1186 | + if (kbdev->pm.backend.gpu_sleep_mode_active && |
---|
| 1187 | + kbdev->pm.backend.exit_gpu_sleep_mode) { |
---|
| 1188 | + dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend before L2 power up"); |
---|
| 1189 | + ret = -EBUSY; |
---|
| 1190 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1191 | + goto unlock; |
---|
| 1192 | + } |
---|
| 1193 | + |
---|
| 1194 | + mcu_state = kbdev->pm.backend.mcu_state; |
---|
| 1195 | + WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state)); |
---|
| 1196 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 1197 | + |
---|
| 1198 | + if (mcu_state == KBASE_MCU_IN_SLEEP) { |
---|
| 1199 | + ret = pm_handle_mcu_sleep_on_runtime_suspend(kbdev); |
---|
| 1200 | + if (ret) |
---|
| 1201 | + goto unlock; |
---|
| 1202 | + } |
---|
| 1203 | + |
---|
| 1204 | + /* Disable interrupts and turn off the GPU clocks */ |
---|
| 1205 | + if (!kbase_pm_clock_off(kbdev)) { |
---|
| 1206 | + dev_warn(kbdev->dev, "Failed to turn off GPU clocks on runtime suspend, MMU faults pending"); |
---|
| 1207 | + |
---|
| 1208 | + WARN_ON(!kbdev->poweroff_pending); |
---|
| 1209 | + /* Previous call to kbase_pm_clock_off() would have disabled |
---|
| 1210 | + * the interrupts and also synchronized with the interrupt |
---|
| 1211 | + * handlers, so more fault work items can't be enqueued. |
---|
| 1212 | + * |
---|
| 1213 | + * Can't wait for the completion of MMU fault work items as |
---|
| 1214 | + * there is a possibility of a deadlock since the fault work |
---|
| 1215 | + * items would do the group termination which requires the |
---|
| 1216 | + * Scheduler lock. |
---|
| 1217 | + */ |
---|
| 1218 | + ret = -EBUSY; |
---|
| 1219 | + goto unlock; |
---|
| 1220 | + } |
---|
| 1221 | + |
---|
| 1222 | + wake_up(&kbdev->pm.backend.poweroff_wait); |
---|
| 1223 | + WARN_ON(kbdev->pm.backend.gpu_powered); |
---|
| 1224 | + dev_dbg(kbdev->dev, "GPU power down complete"); |
---|
| 1225 | + |
---|
| 1226 | +unlock: |
---|
| 1227 | + kbase_pm_unlock(kbdev); |
---|
| 1228 | + kbase_csf_scheduler_unlock(kbdev); |
---|
| 1229 | + kbase_reset_gpu_allow(kbdev); |
---|
| 1230 | +out: |
---|
| 1231 | + if (ret) { |
---|
| 1232 | + ret = -EBUSY; |
---|
| 1233 | + pm_runtime_mark_last_busy(kbdev->dev); |
---|
| 1234 | + } |
---|
| 1235 | + |
---|
| 1236 | + return ret; |
---|
| 1237 | +} |
---|
| 1238 | +#endif |
---|