// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note /* * * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software * Foundation, and any use by you of this program is subject to the terms * of such GNU license. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, you can access it online at * http://www.gnu.org/licenses/gpl-2.0.html. * */ #include #include #include #include #include #include #include #include "mali_kbase_config_platform.h" static void enable_gpu_power_control(struct kbase_device *kbdev) { unsigned int i; #if defined(CONFIG_REGULATOR) for (i = 0; i < kbdev->nr_regulators; i++) { if (WARN_ON(kbdev->regulators[i] == NULL)) ; else if (!regulator_is_enabled(kbdev->regulators[i])) WARN_ON(regulator_enable(kbdev->regulators[i])); } #endif for (i = 0; i < kbdev->nr_clocks; i++) { if (WARN_ON(kbdev->clocks[i] == NULL)) ; else if (!__clk_is_enabled(kbdev->clocks[i])) WARN_ON(clk_prepare_enable(kbdev->clocks[i])); } } static void disable_gpu_power_control(struct kbase_device *kbdev) { unsigned int i; for (i = 0; i < kbdev->nr_clocks; i++) { if (WARN_ON(kbdev->clocks[i] == NULL)) ; else if (__clk_is_enabled(kbdev->clocks[i])) { clk_disable_unprepare(kbdev->clocks[i]); WARN_ON(__clk_is_enabled(kbdev->clocks[i])); } } #if defined(CONFIG_REGULATOR) for (i = 0; i < kbdev->nr_regulators; i++) { if (WARN_ON(kbdev->regulators[i] == NULL)) ; else if (regulator_is_enabled(kbdev->regulators[i])) WARN_ON(regulator_disable(kbdev->regulators[i])); } #endif } static int pm_callback_power_on(struct kbase_device *kbdev) { int ret = 1; /* Assume GPU has been powered off */ int error; unsigned long flags; dev_dbg(kbdev->dev, "%s %pK\n", __func__, (void *)kbdev->dev->pm_domain); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); WARN_ON(kbdev->pm.backend.gpu_powered); #if MALI_USE_CSF if (likely(kbdev->csf.firmware_inited)) { WARN_ON(!kbdev->pm.active_count); WARN_ON(kbdev->pm.runtime_active); } spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); enable_gpu_power_control(kbdev); CSTD_UNUSED(error); #else spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); #ifdef KBASE_PM_RUNTIME error = pm_runtime_get_sync(kbdev->dev); if (error == 1) { /* * Let core know that the chip has not been * powered off, so we can save on re-initialization. */ ret = 0; } dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d\n", error); #else enable_gpu_power_control(kbdev); #endif /* KBASE_PM_RUNTIME */ #endif /* MALI_USE_CSF */ return ret; } static void pm_callback_power_off(struct kbase_device *kbdev) { unsigned long flags; dev_dbg(kbdev->dev, "%s\n", __func__); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); WARN_ON(kbdev->pm.backend.gpu_powered); #if MALI_USE_CSF if (likely(kbdev->csf.firmware_inited)) { #ifdef CONFIG_MALI_BIFROST_DEBUG WARN_ON(kbase_csf_scheduler_get_nr_active_csgs(kbdev)); #endif WARN_ON(kbdev->pm.backend.mcu_state != KBASE_MCU_OFF); } spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); /* Power down the GPU immediately */ disable_gpu_power_control(kbdev); #else /* MALI_USE_CSF */ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); #ifdef KBASE_PM_RUNTIME pm_runtime_mark_last_busy(kbdev->dev); pm_runtime_put_autosuspend(kbdev->dev); #else /* Power down the GPU immediately as runtime PM is disabled */ disable_gpu_power_control(kbdev); #endif #endif /* MALI_USE_CSF */ } #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) static void pm_callback_runtime_gpu_active(struct kbase_device *kbdev) { unsigned long flags; int error; lockdep_assert_held(&kbdev->pm.lock); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); WARN_ON(!kbdev->pm.backend.gpu_powered); WARN_ON(!kbdev->pm.active_count); WARN_ON(kbdev->pm.runtime_active); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); if (pm_runtime_status_suspended(kbdev->dev)) { error = pm_runtime_get_sync(kbdev->dev); dev_dbg(kbdev->dev, "pm_runtime_get_sync returned %d", error); } else { /* Call the async version here, otherwise there could be * a deadlock if the runtime suspend operation is ongoing. * Caller would have taken the kbdev->pm.lock and/or the * scheduler lock, and the runtime suspend callback function * will also try to acquire the same lock(s). */ error = pm_runtime_get(kbdev->dev); dev_dbg(kbdev->dev, "pm_runtime_get returned %d", error); } kbdev->pm.runtime_active = true; } static void pm_callback_runtime_gpu_idle(struct kbase_device *kbdev) { unsigned long flags; lockdep_assert_held(&kbdev->pm.lock); dev_dbg(kbdev->dev, "%s", __func__); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); WARN_ON(!kbdev->pm.backend.gpu_powered); WARN_ON(kbdev->pm.backend.l2_state != KBASE_L2_OFF); WARN_ON(kbdev->pm.active_count); WARN_ON(!kbdev->pm.runtime_active); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); pm_runtime_mark_last_busy(kbdev->dev); pm_runtime_put_autosuspend(kbdev->dev); kbdev->pm.runtime_active = false; } #endif #ifdef KBASE_PM_RUNTIME static int kbase_device_runtime_init(struct kbase_device *kbdev) { int ret = 0; dev_dbg(kbdev->dev, "%s\n", __func__); pm_runtime_set_autosuspend_delay(kbdev->dev, AUTO_SUSPEND_DELAY); pm_runtime_use_autosuspend(kbdev->dev); pm_runtime_set_active(kbdev->dev); pm_runtime_enable(kbdev->dev); if (!pm_runtime_enabled(kbdev->dev)) { dev_warn(kbdev->dev, "pm_runtime not enabled"); ret = -EINVAL; } else if (atomic_read(&kbdev->dev->power.usage_count)) { dev_warn(kbdev->dev, "%s: Device runtime usage count unexpectedly non zero %d", __func__, atomic_read(&kbdev->dev->power.usage_count)); ret = -EINVAL; } return ret; } static void kbase_device_runtime_disable(struct kbase_device *kbdev) { dev_dbg(kbdev->dev, "%s\n", __func__); if (atomic_read(&kbdev->dev->power.usage_count)) dev_warn(kbdev->dev, "%s: Device runtime usage count unexpectedly non zero %d", __func__, atomic_read(&kbdev->dev->power.usage_count)); pm_runtime_disable(kbdev->dev); } #endif /* KBASE_PM_RUNTIME */ static int pm_callback_runtime_on(struct kbase_device *kbdev) { dev_dbg(kbdev->dev, "%s\n", __func__); #if !MALI_USE_CSF enable_gpu_power_control(kbdev); #endif return 0; } static void pm_callback_runtime_off(struct kbase_device *kbdev) { dev_dbg(kbdev->dev, "%s\n", __func__); #if !MALI_USE_CSF disable_gpu_power_control(kbdev); #endif } static void pm_callback_resume(struct kbase_device *kbdev) { int ret = pm_callback_runtime_on(kbdev); WARN_ON(ret); } static void pm_callback_suspend(struct kbase_device *kbdev) { pm_callback_runtime_off(kbdev); } struct kbase_pm_callback_conf pm_callbacks = { .power_on_callback = pm_callback_power_on, .power_off_callback = pm_callback_power_off, .power_suspend_callback = pm_callback_suspend, .power_resume_callback = pm_callback_resume, #ifdef KBASE_PM_RUNTIME .power_runtime_init_callback = kbase_device_runtime_init, .power_runtime_term_callback = kbase_device_runtime_disable, .power_runtime_on_callback = pm_callback_runtime_on, .power_runtime_off_callback = pm_callback_runtime_off, #else /* KBASE_PM_RUNTIME */ .power_runtime_init_callback = NULL, .power_runtime_term_callback = NULL, .power_runtime_on_callback = NULL, .power_runtime_off_callback = NULL, #endif /* KBASE_PM_RUNTIME */ #if MALI_USE_CSF && defined(KBASE_PM_RUNTIME) .power_runtime_gpu_idle_callback = pm_callback_runtime_gpu_idle, .power_runtime_gpu_active_callback = pm_callback_runtime_gpu_active, #else .power_runtime_gpu_idle_callback = NULL, .power_runtime_gpu_active_callback = NULL, #endif };