From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_backend.c |  510 +++++++++++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 446 insertions(+), 64 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_backend.c b/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_backend.c
index 38baa0d..5c71fdf 100644
--- a/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_backend.c
+++ b/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_backend.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
 /*
  *
- * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -32,8 +32,11 @@
 #include <mali_kbase_hwaccess_jm.h>
 #include <backend/gpu/mali_kbase_js_internal.h>
 #include <backend/gpu/mali_kbase_jm_internal.h>
+#else
+#include <linux/pm_runtime.h>
+#include <mali_kbase_reset_gpu.h>
 #endif /* !MALI_USE_CSF */
-#include <mali_kbase_hwcnt_context.h>
+#include <hwcnt/mali_kbase_hwcnt_context.h>
 #include <backend/gpu/mali_kbase_pm_internal.h>
 #include <backend/gpu/mali_kbase_devfreq.h>
 #include <mali_kbase_dummy_job_wa.h>
@@ -69,6 +72,10 @@
 					callbacks->power_runtime_idle_callback;
 		kbdev->pm.backend.callback_soft_reset =
 					callbacks->soft_reset_callback;
+		kbdev->pm.backend.callback_power_runtime_gpu_idle =
+					callbacks->power_runtime_gpu_idle_callback;
+		kbdev->pm.backend.callback_power_runtime_gpu_active =
+					callbacks->power_runtime_gpu_active_callback;
 
 		if (callbacks->power_runtime_init_callback)
 			return callbacks->power_runtime_init_callback(kbdev);
@@ -86,15 +93,16 @@
 	kbdev->pm.backend.callback_power_runtime_off = NULL;
 	kbdev->pm.backend.callback_power_runtime_idle = NULL;
 	kbdev->pm.backend.callback_soft_reset = NULL;
+	kbdev->pm.backend.callback_power_runtime_gpu_idle = NULL;
+	kbdev->pm.backend.callback_power_runtime_gpu_active = NULL;
 
 	return 0;
 }
 
 void kbase_pm_runtime_term(struct kbase_device *kbdev)
 {
-	if (kbdev->pm.callback_power_runtime_term) {
+	if (kbdev->pm.callback_power_runtime_term)
 		kbdev->pm.callback_power_runtime_term(kbdev);
-	}
 }
 
 void kbase_pm_register_access_enable(struct kbase_device *kbdev)
@@ -120,10 +128,10 @@
 
 	callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
 
+	kbdev->pm.backend.gpu_powered = false;
+
 	if (callbacks)
 		callbacks->power_off_callback(kbdev);
-
-	kbdev->pm.backend.gpu_powered = false;
 }
 
 int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
@@ -192,6 +200,14 @@
 	INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
 		kbase_pm_hwcnt_disable_worker);
 	kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
+
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+	kbdev->pm.backend.gpu_sleep_supported =
+		kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_GPU_SLEEP) &&
+		!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TURSEHW_1997) &&
+		kbdev->pm.backend.callback_power_runtime_gpu_active &&
+		kbdev->pm.backend.callback_power_runtime_gpu_idle;
+#endif
 
 	if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) {
 		kbdev->pm.backend.l2_always_on = false;
@@ -263,6 +279,76 @@
 	 */
 }
 
+static void pm_handle_power_off(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+#if MALI_USE_CSF
+	enum kbase_mcu_state mcu_state;
+#endif
+	unsigned long flags;
+
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	if (backend->poweron_required)
+		return;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+	if (kbdev->pm.backend.gpu_wakeup_override) {
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return;
+	}
+#endif
+	WARN_ON(backend->shaders_state !=
+			KBASE_SHADERS_OFF_CORESTACK_OFF ||
+		backend->l2_state != KBASE_L2_OFF);
+#if MALI_USE_CSF
+	mcu_state = backend->mcu_state;
+	WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state));
+#endif
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+	if (backend->callback_power_runtime_gpu_idle) {
+		WARN_ON(backend->gpu_idled);
+		backend->callback_power_runtime_gpu_idle(kbdev);
+		backend->gpu_idled = true;
+		return;
+	}
+#endif
+
+	/* Disable interrupts and turn the clock off */
+	if (!kbase_pm_clock_off(kbdev)) {
+		/*
+		 * Page/bus faults are pending, must drop locks to
+		 * process.  Interrupts are disabled so no more faults
+		 * should be generated at this point.
+		 */
+		kbase_pm_unlock(kbdev);
+		kbase_flush_mmu_wqs(kbdev);
+		kbase_pm_lock(kbdev);
+
+#ifdef CONFIG_MALI_ARBITER_SUPPORT
+		/* poweron_required may have changed while pm lock
+		 * was released.
+		 */
+		if (kbase_pm_is_gpu_lost(kbdev))
+			backend->poweron_required = false;
+#endif
+
+		/* Turn off clock now that fault have been handled. We
+		 * dropped locks so poweron_required may have changed -
+		 * power back on if this is the case (effectively only
+		 * re-enabling of the interrupts would be done in this
+		 * case, as the clocks to GPU were not withdrawn yet).
+		 */
+		if (backend->poweron_required)
+			kbase_pm_clock_on(kbdev, false);
+		else
+			WARN_ON(!kbase_pm_clock_off(kbdev));
+	}
+}
+
 static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
 {
 	struct kbase_device *kbdev = container_of(data, struct kbase_device,
@@ -270,6 +356,8 @@
 	struct kbase_pm_device_data *pm = &kbdev->pm;
 	struct kbase_pm_backend_data *backend = &pm->backend;
 	unsigned long flags;
+
+	KBASE_KTRACE_ADD(kbdev, PM_POWEROFF_WAIT_WQ, NULL, 0);
 
 #if !MALI_USE_CSF
 	/* Wait for power transitions to complete. We do this with no locks held
@@ -280,51 +368,7 @@
 
 	kbase_pm_lock(kbdev);
 
-#ifdef CONFIG_MALI_ARBITER_SUPPORT
-	if (kbase_pm_is_gpu_lost(kbdev))
-		backend->poweron_required = false;
-#endif
-
-	if (!backend->poweron_required) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
-		WARN_ON(backend->shaders_state !=
-					KBASE_SHADERS_OFF_CORESTACK_OFF ||
-			backend->l2_state != KBASE_L2_OFF);
-		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
-		/* Disable interrupts and turn the clock off */
-		if (!kbase_pm_clock_off(kbdev)) {
-			/*
-			 * Page/bus faults are pending, must drop locks to
-			 * process.  Interrupts are disabled so no more faults
-			 * should be generated at this point.
-			 */
-			kbase_pm_unlock(kbdev);
-			kbase_flush_mmu_wqs(kbdev);
-			kbase_pm_lock(kbdev);
-
-#ifdef CONFIG_MALI_ARBITER_SUPPORT
-			/* poweron_required may have changed while pm lock
-			 * was released.
-			 */
-			if (kbase_pm_is_gpu_lost(kbdev))
-				backend->poweron_required = false;
-#endif
-
-			/* Turn off clock now that fault have been handled. We
-			 * dropped locks so poweron_required may have changed -
-			 * power back on if this is the case (effectively only
-			 * re-enabling of the interrupts would be done in this
-			 * case, as the clocks to GPU were not withdrawn yet).
-			 */
-			if (backend->poweron_required)
-				kbase_pm_clock_on(kbdev, false);
-			else
-				WARN_ON(!kbase_pm_clock_off(kbdev));
-		}
-	}
+	pm_handle_power_off(kbdev);
 
 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
 	backend->poweroff_wait_in_progress = false;
@@ -378,8 +422,7 @@
 		return;
 
 	/* Stop the metrics gathering framework */
-	if (kbase_pm_metrics_is_active(kbdev))
-		kbase_pm_metrics_stop(kbdev);
+	kbase_pm_metrics_stop(kbdev);
 
 	/* Keep the current freq to restore it upon resume */
 	kbdev->previous_frequency = clk_get_rate(clk);
@@ -512,6 +555,78 @@
 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
 }
 
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+/**
+ * kbase_pm_do_poweroff_sync - Do the synchronous power down of GPU
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function is called at the time of system suspend or device unload
+ * to power down the GPU synchronously. This is needed as the power down of GPU
+ * would usually happen from the runtime suspend callback function (if gpu_active
+ * and gpu_idle callbacks are used) and runtime suspend operation is disabled
+ * when system suspend takes place.
+ * The function first waits for the @gpu_poweroff_wait_work to complete, which
+ * could have been enqueued after the last PM reference was released.
+ *
+ * Return: 0 on success, negative value otherwise.
+ */
+static int kbase_pm_do_poweroff_sync(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+	unsigned long flags;
+	int ret = 0;
+
+	WARN_ON(kbdev->pm.active_count);
+
+	kbase_pm_wait_for_poweroff_work_complete(kbdev);
+
+	kbase_pm_lock(kbdev);
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	WARN_ON(backend->poweroff_wait_in_progress);
+	WARN_ON(backend->gpu_sleep_mode_active);
+	if (backend->gpu_powered) {
+
+		backend->mcu_desired = false;
+		backend->l2_desired = false;
+		kbase_pm_update_state(kbdev);
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+		ret = kbase_pm_wait_for_desired_state(kbdev);
+		if (ret) {
+			dev_warn(
+				kbdev->dev,
+				"Wait for pm state change failed on synchronous power off");
+			ret = -EBUSY;
+			goto out;
+		}
+
+		/* Due to the power policy, GPU could have been kept active
+		 * throughout and so need to invoke the idle callback before
+		 * the power down.
+		 */
+		if (backend->callback_power_runtime_gpu_idle &&
+		    !backend->gpu_idled) {
+			backend->callback_power_runtime_gpu_idle(kbdev);
+			backend->gpu_idled = true;
+		}
+
+		if (!kbase_pm_clock_off(kbdev)) {
+			dev_warn(
+				kbdev->dev,
+				"Failed to turn off GPU clocks on synchronous power off, MMU faults pending");
+			ret = -EBUSY;
+		}
+	} else {
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	}
+
+out:
+	kbase_pm_unlock(kbdev);
+	return ret;
+}
+#endif
+
 void kbase_pm_do_poweroff(struct kbase_device *kbdev)
 {
 	unsigned long flags;
@@ -561,12 +676,38 @@
 	return ret;
 }
 
-void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev)
+void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev)
 {
 	wait_event_killable(kbdev->pm.backend.poweroff_wait,
 			is_poweroff_in_progress(kbdev));
 }
-KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_complete);
+KBASE_EXPORT_TEST_API(kbase_pm_wait_for_poweroff_work_complete);
+
+/**
+ * is_gpu_powered_down - Check whether GPU is powered down
+ *
+ * @kbdev: kbase device
+ *
+ * Return: true if GPU is powered down, false otherwise
+ */
+static bool is_gpu_powered_down(struct kbase_device *kbdev)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	ret = !kbdev->pm.backend.gpu_powered;
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return ret;
+}
+
+void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev)
+{
+	wait_event_killable(kbdev->pm.backend.poweroff_wait,
+			is_gpu_powered_down(kbdev));
+}
+KBASE_EXPORT_TEST_API(kbase_pm_wait_for_gpu_power_down);
 
 int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
 		unsigned int flags)
@@ -612,6 +753,15 @@
 	 * cores off
 	 */
 	kbdev->pm.active_count = 1;
+#if MALI_USE_CSF && KBASE_PM_RUNTIME
+	if (kbdev->pm.backend.callback_power_runtime_gpu_active) {
+		/* Take the RPM reference count to match with the internal
+		 * PM reference count
+		 */
+		kbdev->pm.backend.callback_power_runtime_gpu_active(kbdev);
+		WARN_ON(kbdev->pm.backend.gpu_idled);
+	}
+#endif
 
 	spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
 								irq_flags);
@@ -653,11 +803,15 @@
 {
 	KBASE_DEBUG_ASSERT(kbdev != NULL);
 
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+	WARN_ON(kbase_pm_do_poweroff_sync(kbdev));
+#else
 	mutex_lock(&kbdev->pm.lock);
 	kbase_pm_do_poweroff(kbdev);
 	mutex_unlock(&kbdev->pm.lock);
 
-	kbase_pm_wait_for_poweroff_complete(kbdev);
+	kbase_pm_wait_for_poweroff_work_complete(kbdev);
+#endif
 }
 
 KBASE_EXPORT_TEST_API(kbase_hwaccess_pm_halt);
@@ -710,7 +864,7 @@
 	kbase_pm_update_state(kbdev);
 
 #if !MALI_USE_CSF
-		kbase_backend_slot_update(kbdev);
+	kbase_backend_slot_update(kbdev);
 #endif /* !MALI_USE_CSF */
 
 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -735,7 +889,7 @@
 	lockdep_assert_held(&kbdev->pm.lock);
 
 	if (kbase_dummy_job_wa_enabled(kbdev)) {
-		dev_warn(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled");
+		dev_warn_once(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled");
 		new_core_mask_js0 = kbdev->pm.debug_core_mask[0];
 	}
 
@@ -759,8 +913,15 @@
 	kbase_pm_update_active(kbdev);
 }
 
-void kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
+int kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
 {
+	int ret = 0;
+
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+	ret = kbase_pm_do_poweroff_sync(kbdev);
+	if (ret)
+		return ret;
+#else
 	/* Force power off the GPU and all cores (regardless of policy), only
 	 * after the PM active count reaches zero (otherwise, we risk turning it
 	 * off prematurely)
@@ -775,10 +936,16 @@
 
 	kbase_pm_unlock(kbdev);
 
-	kbase_pm_wait_for_poweroff_complete(kbdev);
+	kbase_pm_wait_for_poweroff_work_complete(kbdev);
+#endif
+
+	WARN_ON(kbdev->pm.backend.gpu_powered);
+	WARN_ON(atomic_read(&kbdev->faults_pending));
 
 	if (kbdev->pm.backend.callback_power_suspend)
 		kbdev->pm.backend.callback_power_suspend(kbdev);
+
+	return ret;
 }
 
 void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
@@ -807,7 +974,7 @@
 void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
 {
 	unsigned long flags;
-	ktime_t end_timestamp = ktime_get();
+	ktime_t end_timestamp = ktime_get_raw();
 	struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
 
 	if (!kbdev->arb.arb_if)
@@ -844,9 +1011,12 @@
 
 		/* Cancel any pending HWC dumps */
 		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
-		kbdev->hwcnt.backend.triggered = 1;
-		wake_up(&kbdev->hwcnt.backend.wait);
+		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING ||
+				kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+			kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
+			kbdev->hwcnt.backend.triggered = 1;
+			wake_up(&kbdev->hwcnt.backend.wait);
+		}
 		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 	}
 	mutex_unlock(&arb_vm_state->vm_state_lock);
@@ -854,3 +1024,215 @@
 }
 
 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
+
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	lockdep_assert_held(&kbdev->pm.lock);
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	/* Set the override flag to force the power up of L2 cache */
+	kbdev->pm.backend.gpu_wakeup_override = true;
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	return kbase_pm_wait_for_desired_state(kbdev);
+}
+
+static int pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+	int ret;
+
+	lockdep_assert_held(&kbdev->csf.scheduler.lock);
+	lockdep_assert_held(&kbdev->pm.lock);
+
+#ifdef CONFIG_MALI_BIFROST_DEBUG
+	/* In case of no active CSG on slot, powering up L2 could be skipped and
+	 * proceed directly to suspend GPU.
+	 * ToDo: firmware has to be reloaded after wake-up as no halt command
+	 * has been sent when GPU was put to sleep mode.
+	 */
+	if (!kbase_csf_scheduler_get_nr_active_csgs(kbdev))
+		dev_info(
+			kbdev->dev,
+			"No active CSGs. Can skip the power up of L2 and go for suspension directly");
+#endif
+
+	ret = kbase_pm_force_mcu_wakeup_after_sleep(kbdev);
+	if (ret) {
+		spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+		dev_warn(
+			kbdev->dev,
+			"Waiting for MCU to wake up failed on runtime suspend");
+		kbdev->pm.backend.gpu_wakeup_override = false;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return ret;
+	}
+
+	/* Check if a Doorbell mirror interrupt occurred meanwhile */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (kbdev->pm.backend.gpu_sleep_mode_active &&
+	    kbdev->pm.backend.exit_gpu_sleep_mode) {
+		dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend after L2 power up");
+		kbdev->pm.backend.gpu_wakeup_override = false;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		return -EBUSY;
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+	/* Need to release the kbdev->pm.lock to avoid lock ordering issue
+	 * with kctx->reg.lock, which is taken if the sync wait condition is
+	 * evaluated after the CSG suspend operation.
+	 */
+	kbase_pm_unlock(kbdev);
+	ret = kbase_csf_scheduler_handle_runtime_suspend(kbdev);
+	kbase_pm_lock(kbdev);
+
+	/* Power down L2 cache */
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	kbdev->pm.backend.gpu_wakeup_override = false;
+	kbase_pm_update_state(kbdev);
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	/* After re-acquiring the kbdev->pm.lock, check if the device
+	 * became active (or active then idle) meanwhile.
+	 */
+	if (kbdev->pm.active_count ||
+	    kbdev->pm.backend.poweroff_wait_in_progress) {
+		dev_dbg(kbdev->dev,
+			"Device became active on runtime suspend after suspending Scheduler");
+		ret = -EBUSY;
+	}
+
+	if (ret)
+		return ret;
+
+	ret = kbase_pm_wait_for_desired_state(kbdev);
+	if (ret)
+		dev_warn(kbdev->dev, "Wait for power down failed on runtime suspend");
+
+	return ret;
+}
+
+int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev)
+{
+	enum kbase_mcu_state mcu_state;
+	bool exit_early = false;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	/* This check is needed for the case where Kbase had invoked the
+	 * @power_off_callback directly.
+	 */
+	if (!kbdev->pm.backend.gpu_powered) {
+		dev_dbg(kbdev->dev, "GPU already powered down on runtime suspend");
+		exit_early = true;
+	}
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (exit_early)
+		goto out;
+
+	ret = kbase_reset_gpu_try_prevent(kbdev);
+	if (ret == -ENOMEM) {
+		dev_dbg(kbdev->dev, "Quit runtime suspend as GPU is in bad state");
+		/* Finish the runtime suspend, no point in trying again as GPU is
+		 * in irrecoverable bad state.
+		 */
+		goto out;
+	} else if (ret) {
+		dev_dbg(kbdev->dev, "Quit runtime suspend for failing to prevent gpu reset");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	kbase_csf_scheduler_lock(kbdev);
+	kbase_pm_lock(kbdev);
+
+	/*
+	 * This is to handle the case where GPU device becomes active and idle
+	 * very quickly whilst the runtime suspend callback is executing.
+	 * This is useful for the following scenario :-
+	 * - GPU goes idle and pm_callback_runtime_gpu_idle() is called.
+	 * - Auto-suspend timer expires and kbase_device_runtime_suspend()
+	 *   is called.
+	 * - GPU becomes active and pm_callback_runtime_gpu_active() calls
+	 *   pm_runtime_get().
+	 * - Shortly after that GPU becomes idle again.
+	 * - kbase_pm_handle_runtime_suspend() gets called.
+	 * - pm_callback_runtime_gpu_idle() is called.
+	 *
+	 * We do not want to power down the GPU immediately after it goes idle.
+	 * So if we notice that GPU had become active when the runtime suspend
+	 * had already kicked in, we abort the runtime suspend.
+	 * By aborting the runtime suspend, we defer the power down of GPU.
+	 *
+	 * This check also helps prevent warnings regarding L2 and MCU states
+	 * inside the pm_handle_power_off() function. The warning stems from
+	 * the fact that pm.lock is released before invoking Scheduler function
+	 * to suspend the CSGs.
+	 */
+	if (kbdev->pm.active_count ||
+	    kbdev->pm.backend.poweroff_wait_in_progress) {
+		dev_dbg(kbdev->dev, "Device became active on runtime suspend");
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+	if (kbdev->pm.backend.gpu_sleep_mode_active &&
+	    kbdev->pm.backend.exit_gpu_sleep_mode) {
+		dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend before L2 power up");
+		ret = -EBUSY;
+		spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		goto unlock;
+	}
+
+	mcu_state = kbdev->pm.backend.mcu_state;
+	WARN_ON(!kbase_pm_is_mcu_inactive(kbdev, mcu_state));
+	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+	if (mcu_state == KBASE_MCU_IN_SLEEP) {
+		ret = pm_handle_mcu_sleep_on_runtime_suspend(kbdev);
+		if (ret)
+			goto unlock;
+	}
+
+	/* Disable interrupts and turn off the GPU clocks */
+	if (!kbase_pm_clock_off(kbdev)) {
+		dev_warn(kbdev->dev, "Failed to turn off GPU clocks on runtime suspend, MMU faults pending");
+
+		WARN_ON(!kbdev->poweroff_pending);
+		/* Previous call to kbase_pm_clock_off() would have disabled
+		 * the interrupts and also synchronized with the interrupt
+		 * handlers, so more fault work items can't be enqueued.
+		 *
+		 * Can't wait for the completion of MMU fault work items as
+		 * there is a possibility of a deadlock since the fault work
+		 * items would do the group termination which requires the
+		 * Scheduler lock.
+		 */
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	wake_up(&kbdev->pm.backend.poweroff_wait);
+	WARN_ON(kbdev->pm.backend.gpu_powered);
+	dev_dbg(kbdev->dev, "GPU power down complete");
+
+unlock:
+	kbase_pm_unlock(kbdev);
+	kbase_csf_scheduler_unlock(kbdev);
+	kbase_reset_gpu_allow(kbdev);
+out:
+	if (ret) {
+		ret = -EBUSY;
+		pm_runtime_mark_last_busy(kbdev->dev);
+	}
+
+	return ret;
+}
+#endif

--
Gitblit v1.6.2