From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_internal.h |  348 +++++++++++++++++++++++++++++++++++++++++++++------------
 1 files changed, 274 insertions(+), 74 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_internal.h b/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_internal.h
index 9ec5890..e66ce57 100644
--- a/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_internal.h
+++ b/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_pm_internal.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
  *
- * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -35,18 +35,18 @@
 /**
  * kbase_pm_dev_idle - The GPU is idle.
  *
- * The OS may choose to turn off idle devices
- *
  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * The OS may choose to turn off idle devices
  */
 void kbase_pm_dev_idle(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_dev_activate - The GPU is active.
  *
- * The OS should avoid opportunistically turning off the GPU while it is active
- *
  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * The OS should avoid opportunistically turning off the GPU while it is active
  */
 void kbase_pm_dev_activate(struct kbase_device *kbdev);
 
@@ -54,13 +54,13 @@
  * kbase_pm_get_present_cores - Get details of the cores that are present in
  *                              the device.
  *
- * This function can be called by the active power policy to return a bitmask of
- * the cores (of a specified type) present in the GPU device and also a count of
- * the number of cores.
- *
  * @kbdev: The kbase device structure for the device (must be a valid
  *         pointer)
  * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
+ * This function can be called by the active power policy to return a bitmask of
+ * the cores (of a specified type) present in the GPU device and also a count of
+ * the number of cores.
  *
  * Return: The bit mask of cores present
  */
@@ -71,12 +71,12 @@
  * kbase_pm_get_active_cores - Get details of the cores that are currently
  *                             active in the device.
  *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
  * This function can be called by the active power policy to return a bitmask of
  * the cores (of a specified type) that are actively processing work (i.e.
  * turned on *and* busy).
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
- * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
  *
  * Return: The bit mask of active cores
  */
@@ -87,12 +87,12 @@
  * kbase_pm_get_trans_cores - Get details of the cores that are currently
  *                            transitioning between power states.
  *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
  * This function can be called by the active power policy to return a bitmask of
  * the cores (of a specified type) that are currently transitioning between
  * power states.
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
- * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
  *
  * Return: The bit mask of transitioning cores
  */
@@ -103,12 +103,12 @@
  * kbase_pm_get_ready_cores - Get details of the cores that are currently
  *                            powered and ready for jobs.
  *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
+ *
  * This function can be called by the active power policy to return a bitmask of
  * the cores (of a specified type) that are powered and ready for jobs (they may
  * or may not be currently executing jobs).
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
- * @type:  The type of core (see the enum kbase_pm_core_type enumeration)
  *
  * Return: The bit mask of ready cores
  */
@@ -119,13 +119,13 @@
  * kbase_pm_clock_on - Turn the clock for the device on, and enable device
  *                     interrupts.
  *
- * This function can be used by a power policy to turn the clock for the GPU on.
- * It should be modified during integration to perform the necessary actions to
- * ensure that the GPU is fully powered and clocked.
- *
  * @kbdev:     The kbase device structure for the device (must be a valid
  *             pointer)
  * @is_resume: true if clock on due to resume after suspend, false otherwise
+ *
+ * This function can be used by a power policy to turn the clock for the GPU on.
+ * It should be modified during integration to perform the necessary actions to
+ * ensure that the GPU is fully powered and clocked.
  */
 void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume);
 
@@ -133,12 +133,16 @@
  * kbase_pm_clock_off - Disable device interrupts, and turn the clock for the
  *                      device off.
  *
+ * @kbdev:      The kbase device structure for the device (must be a valid
+ *              pointer)
+ *
  * This function can be used by a power policy to turn the clock for the GPU
  * off. It should be modified during integration to perform the necessary
  * actions to turn the clock off (if this is possible in the integration).
  *
- * @kbdev:      The kbase device structure for the device (must be a valid
- *              pointer)
+ * If runtime PM is enabled and @power_runtime_gpu_idle_callback is used
+ * then this function would usually be invoked from the runtime suspend
+ * callback function.
  *
  * Return: true  if clock was turned off, or
  *         false if clock can not be turned off due to pending page/bus fault
@@ -149,22 +153,22 @@
 /**
  * kbase_pm_enable_interrupts - Enable interrupts on the device.
  *
- * Interrupts are also enabled after a call to kbase_pm_clock_on().
- *
  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Interrupts are also enabled after a call to kbase_pm_clock_on().
  */
 void kbase_pm_enable_interrupts(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_disable_interrupts - Disable interrupts on the device.
  *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
  * This prevents delivery of Power Management interrupts to the CPU so that
  * kbase_pm_update_state() will not be called from the IRQ handler
  * until kbase_pm_enable_interrupts() or kbase_pm_clock_on() is called.
  *
  * Interrupts are also disabled after a call to kbase_pm_clock_off().
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_disable_interrupts(struct kbase_device *kbdev);
 
@@ -172,9 +176,9 @@
  * kbase_pm_disable_interrupts_nolock - Version of kbase_pm_disable_interrupts()
  *                                      that does not take the hwaccess_lock
  *
- * Caller must hold the hwaccess_lock.
- *
  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Caller must hold the hwaccess_lock.
  */
 void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev);
 
@@ -193,12 +197,11 @@
 
 /**
  * kbase_pm_reset_done - The GPU has been reset successfully.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * This function must be called by the GPU interrupt handler when the
  * RESET_COMPLETED bit is set. It signals to the power management initialization
  * code that the GPU has been successfully reset.
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_reset_done(struct kbase_device *kbdev);
 
@@ -206,6 +209,7 @@
 /**
  * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
  *                                   reached
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Wait for the L2 and MCU state machines to reach the states corresponding
  * to the values of 'kbase_pm_is_l2_desired' and 'kbase_pm_is_mcu_desired'.
@@ -220,8 +224,6 @@
  * power off in progress and kbase_pm_context_active() was called instead of
  * kbase_csf_scheduler_pm_active().
  *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
- *
  * Return: 0 on success, error code on error
  */
 int kbase_pm_wait_for_desired_state(struct kbase_device *kbdev);
@@ -229,6 +231,7 @@
 /**
  * kbase_pm_wait_for_desired_state - Wait for the desired power state to be
  *                                   reached
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Wait for the L2 and shader power state machines to reach the states
  * corresponding to the values of 'l2_desired' and 'shaders_desired'.
@@ -242,9 +245,7 @@
  * NOTE: This may not wait until the correct state is reached if there is a
  * power off in progress. To correctly wait for the desired state the caller
  * must ensure that this is not the case by, for example, calling
- * kbase_pm_wait_for_poweroff_complete()
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * kbase_pm_wait_for_poweroff_work_complete()
  *
  * Return: 0 on success, error code on error
  */
@@ -254,6 +255,8 @@
 /**
  * kbase_pm_wait_for_l2_powered - Wait for the L2 cache to be powered on
  *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
  * Wait for the L2 to be powered on, and for the L2 and the state machines of
  * its dependent stack components to stabilise.
  *
@@ -262,23 +265,51 @@
  * Unlike kbase_pm_update_state(), the caller must not hold hwaccess_lock,
  * because this function will take that lock itself.
  *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
- *
  * Return: 0 on success, error code on error
  */
 int kbase_pm_wait_for_l2_powered(struct kbase_device *kbdev);
+
+#if MALI_USE_CSF
+/**
+ * kbase_pm_wait_for_cores_down_scale - Wait for the downscaling of shader cores
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function can be called to ensure that the downscaling of cores is
+ * effectively complete and it would be safe to lower the voltage.
+ * The function assumes that caller had exercised the MCU state machine for the
+ * downscale request through the kbase_pm_update_state() function.
+ *
+ * This function needs to be used by the caller to safely wait for the completion
+ * of downscale request, instead of kbase_pm_wait_for_desired_state().
+ * The downscale request would trigger a state change in MCU state machine
+ * and so when MCU reaches the stable ON state, it can be inferred that
+ * downscaling is complete. But it has been observed that the wake up of the
+ * waiting thread can get delayed by few milli seconds and by the time the
+ * thread wakes up the power down transition could have started (after the
+ * completion of downscale request).
+ * On the completion of power down transition another wake up signal would be
+ * sent, but again by the time thread wakes up the power up transition can begin.
+ * And the power up transition could then get blocked inside the platform specific
+ * callback_power_on() function due to the thread that called into Kbase (from the
+ * platform specific code) to perform the downscaling and then ended up waiting
+ * for the completion of downscale request.
+ *
+ * Return: 0 on success, error code on error or remaining jiffies on timeout.
+ */
+int kbase_pm_wait_for_cores_down_scale(struct kbase_device *kbdev);
+#endif
 
 /**
  * kbase_pm_update_dynamic_cores_onoff - Update the L2 and shader power state
  *                                       machines after changing shader core
  *                                       availability
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * It can be called in any status, so need to check the l2 and shader core
  * power status in this function or it will break shader/l2 state machine
  *
  * Caller must hold hwaccess_lock
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_update_dynamic_cores_onoff(struct kbase_device *kbdev);
 
@@ -301,6 +332,8 @@
  * kbase_pm_state_machine_init - Initialize the state machines, primarily the
  *                               shader poweroff timer
  * @kbdev: Device pointer
+ *
+ * Return: 0 on success, error code on error
  */
 int kbase_pm_state_machine_init(struct kbase_device *kbdev);
 
@@ -314,22 +347,21 @@
  * kbase_pm_update_cores_state - Update the desired state of shader cores from
  *                               the Power Policy, and begin any power
  *                               transitions.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * This function will update the desired_xx_state members of
  * struct kbase_pm_device_data by calling into the current Power Policy. It will
  * then begin power transitions to make the hardware acheive the desired shader
  * core state.
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_update_cores_state(struct kbase_device *kbdev);
 
 /**
  * kbasep_pm_metrics_init - Initialize the metrics gathering framework.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * This must be called before other metric gathering APIs are called.
  *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Return: 0 on success, error code on error
  */
@@ -337,29 +369,27 @@
 
 /**
  * kbasep_pm_metrics_term - Terminate the metrics gathering framework.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * This must be called when metric gathering is no longer required. It is an
  * error to call any metrics gathering function (other than
  * kbasep_pm_metrics_init()) after calling this function.
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbasep_pm_metrics_term(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_report_vsync - Function to be called by the frame buffer driver to
  *                         update the vsync metric.
+ * @kbdev:          The kbase device structure for the device (must be a
+ *                  valid pointer)
+ * @buffer_updated: True if the buffer has been updated on this VSync,
+ *                  false otherwise
  *
  * This function should be called by the frame buffer driver to update whether
  * the system is hitting the vsync target or not. buffer_updated should be true
  * if the vsync corresponded with a new frame being displayed, otherwise it
  * should be false. This function does not need to be called every vsync, but
  * only when the value of @buffer_updated differs from a previous call.
- *
- * @kbdev:          The kbase device structure for the device (must be a
- *                  valid pointer)
- * @buffer_updated: True if the buffer has been updated on this VSync,
- *                  false otherwise
  */
 void kbase_pm_report_vsync(struct kbase_device *kbdev, int buffer_updated);
 
@@ -377,6 +407,7 @@
 /**
  * kbase_pm_request_gpu_cycle_counter - Mark that the GPU cycle counter is
  *                                      needed
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * If the caller is the first caller then the GPU cycle counters will be enabled
  * along with the l2 cache
@@ -384,13 +415,13 @@
  * The GPU must be powered when calling this function (i.e.
  * kbase_pm_context_active() must have been called).
  *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_request_gpu_cycle_counter(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_request_gpu_cycle_counter_l2_is_on - Mark GPU cycle counter is
  *                                               needed (l2 cache already on)
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * This is a version of the above function
  * (kbase_pm_request_gpu_cycle_counter()) suitable for being called when the
@@ -401,14 +432,13 @@
  * The GPU must be powered when calling this function (i.e.
  * kbase_pm_context_active() must have been called) and the l2 cache must be
  * powered on.
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_release_gpu_cycle_counter - Mark that the GPU cycle counter is no
  *                                      longer in use
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * If the caller is the last caller then the GPU cycle counters will be
  * disabled. A request must have been made before a call to this.
@@ -416,37 +446,48 @@
  * Caller must not hold the hwaccess_lock, as it will be taken in this function.
  * If the caller is already holding this lock then
  * kbase_pm_release_gpu_cycle_counter_nolock() must be used instead.
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_release_gpu_cycle_counter_nolock - Version of kbase_pm_release_gpu_cycle_counter()
  *                                             that does not take hwaccess_lock
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Caller must hold the hwaccess_lock.
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev);
 
 /**
- * kbase_pm_wait_for_poweroff_complete - Wait for the poweroff workqueue to
- *                                       complete
+ * kbase_pm_wait_for_poweroff_work_complete - Wait for the poweroff workqueue to
+ *                                            complete
  *
  * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function effectively just waits for the @gpu_poweroff_wait_work work
+ * item to complete, if it was enqueued. GPU may not have been powered down
+ * before this function returns.
  */
-void kbase_pm_wait_for_poweroff_complete(struct kbase_device *kbdev);
+void kbase_pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_wait_for_gpu_power_down - Wait for the GPU power down to complete
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This function waits for the actual gpu power down to complete.
+ */
+void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_runtime_init - Initialize runtime-pm for Mali GPU platform device
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Setup the power management callbacks and initialize/enable the runtime-pm
  * for the Mali GPU platform device, using the callback function. This must be
  * called before the kbase_pm_register_access_enable() function.
  *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ * Return: 0 on success, error code on error
  */
 int kbase_pm_runtime_init(struct kbase_device *kbdev);
 
@@ -459,6 +500,7 @@
 
 /**
  * kbase_pm_register_access_enable - Enable access to GPU registers
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Enables access to the GPU registers before power management has powered up
  * the GPU with kbase_pm_powerup().
@@ -469,13 +511,12 @@
  *
  * This should only be used before power management is powered up with
  * kbase_pm_powerup()
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_register_access_enable(struct kbase_device *kbdev);
 
 /**
  * kbase_pm_register_access_disable - Disable early register access
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Disables access to the GPU registers enabled earlier by a call to
  * kbase_pm_register_access_enable().
@@ -486,8 +527,6 @@
  *
  * This should only be used before power management is powered up with
  * kbase_pm_powerup()
- *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  */
 void kbase_pm_register_access_disable(struct kbase_device *kbdev);
 
@@ -498,6 +537,7 @@
 /**
  * kbase_pm_metrics_is_active - Check if the power management metrics
  *                              collection is active.
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
  *
  * Note that this returns if the power management metrics collection was
  * active at the time of calling, it is possible that after the call the metrics
@@ -505,7 +545,6 @@
  *
  * The caller must handle the consequence that the state may have changed.
  *
- * @kbdev: The kbase device structure for the device (must be a valid pointer)
  * Return: true if metrics collection was active else false.
  */
 bool kbase_pm_metrics_is_active(struct kbase_device *kbdev);
@@ -541,12 +580,13 @@
 /**
  * kbase_platform_dvfs_event - Report utilisation to DVFS code for CSF GPU
  *
- * Function provided by platform specific code when DVFS is enabled to allow
- * the power management metrics system to report utilisation.
- *
  * @kbdev:         The kbase device structure for the device (must be a
  *                 valid pointer)
  * @utilisation:   The current calculated utilisation by the metrics system.
+ *
+ * Function provided by platform specific code when DVFS is enabled to allow
+ * the power management metrics system to report utilisation.
+ *
  * Return:         Returns 0 on failure and non zero on success.
  */
 int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation);
@@ -554,15 +594,15 @@
 /**
  * kbase_platform_dvfs_event - Report utilisation to DVFS code for JM GPU
  *
- * Function provided by platform specific code when DVFS is enabled to allow
- * the power management metrics system to report utilisation.
- *
  * @kbdev:         The kbase device structure for the device (must be a
  *                 valid pointer)
  * @utilisation:   The current calculated utilisation by the metrics system.
  * @util_gl_share: The current calculated gl share of utilisation.
  * @util_cl_share: The current calculated cl share of utilisation per core
  *                 group.
+ * Function provided by platform specific code when DVFS is enabled to allow
+ * the power management metrics system to report utilisation.
+ *
  * Return:         Returns 0 on failure and non zero on success.
  */
 int kbase_platform_dvfs_event(struct kbase_device *kbdev, u32 utilisation,
@@ -635,6 +675,7 @@
  */
 void kbase_pm_reset_complete(struct kbase_device *kbdev);
 
+#if !MALI_USE_CSF
 /**
  * kbase_pm_protected_override_enable - Enable the protected mode override
  * @kbdev: Device pointer
@@ -707,6 +748,7 @@
  * to enter protected mode.
  */
 void kbase_pm_protected_entry_override_disable(struct kbase_device *kbdev);
+#endif
 
 /* If true, the driver should explicitly control corestack power management,
  * instead of relying on the Power Domain Controller.
@@ -735,6 +777,21 @@
  * Return: true if MCU needs to be enabled.
  */
 bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev);
+
+/**
+ * kbase_pm_is_mcu_inactive - Check if the MCU is inactive (i.e. either
+ *                            it is disabled or it is in sleep)
+ *
+ * @kbdev: kbase device
+ * @state: state of the MCU state machine.
+ *
+ * This function must be called with hwaccess_lock held.
+ * L2 cache can be turned off if this function returns true.
+ *
+ * Return: true if MCU is inactive
+ */
+bool kbase_pm_is_mcu_inactive(struct kbase_device *kbdev,
+			      enum kbase_mcu_state state);
 
 /**
  * kbase_pm_idle_groups_sched_suspendable - Check whether the scheduler can be
@@ -774,7 +831,7 @@
 
 /**
  * kbase_pm_no_mcu_core_pwroff - Check whether the PM is required to keep the
- *                               MCU core powered in accordance to the active
+ *                               MCU shader Core powered in accordance to the active
  *                               power management policy
  *
  * @kbdev: Device pointer
@@ -788,7 +845,48 @@
 	return kbdev->pm.backend.csf_pm_sched_flags &
 		CSF_DYNAMIC_PM_CORE_KEEP_ON;
 }
+
+/**
+ * kbase_pm_mcu_is_in_desired_state - Check if MCU is in stable ON/OFF state.
+ *
+ * @kbdev: Device pointer
+ *
+ * Return: true if MCU is in stable ON/OFF state.
+ */
+static inline bool kbase_pm_mcu_is_in_desired_state(struct kbase_device *kbdev)
+{
+	bool in_desired_state = true;
+
+	if (kbase_pm_is_mcu_desired(kbdev) && kbdev->pm.backend.mcu_state != KBASE_MCU_ON)
+		in_desired_state = false;
+	else if (!kbase_pm_is_mcu_desired(kbdev) &&
+		 (kbdev->pm.backend.mcu_state != KBASE_MCU_OFF) &&
+		 (kbdev->pm.backend.mcu_state != KBASE_MCU_IN_SLEEP))
+		in_desired_state = false;
+
+	return in_desired_state;
+}
+
 #endif
+
+/**
+ * kbase_pm_l2_is_in_desired_state - Check if L2 is in stable ON/OFF state.
+ *
+ * @kbdev: Device pointer
+ *
+ * Return: true if L2 is in stable ON/OFF state.
+ */
+static inline bool kbase_pm_l2_is_in_desired_state(struct kbase_device *kbdev)
+{
+	bool in_desired_state = true;
+
+	if (kbase_pm_is_l2_desired(kbdev) && kbdev->pm.backend.l2_state != KBASE_L2_ON)
+		in_desired_state = false;
+	else if (!kbase_pm_is_l2_desired(kbdev) && kbdev->pm.backend.l2_state != KBASE_L2_OFF)
+		in_desired_state = false;
+
+	return in_desired_state;
+}
 
 /**
  * kbase_pm_lock - Lock all necessary mutexes to perform PM actions
@@ -818,4 +916,106 @@
 #endif /* !MALI_USE_CSF */
 }
 
+#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
+/**
+ * kbase_pm_gpu_sleep_allowed - Check if the GPU is allowed to be put in sleep
+ *
+ * @kbdev: Device pointer
+ *
+ * This function is called on GPU idle notification and if it returns false then
+ * GPU power down will be triggered by suspending the CSGs and halting the MCU.
+ *
+ * Return: true if the GPU is allowed to be in the sleep state.
+ */
+static inline bool kbase_pm_gpu_sleep_allowed(struct kbase_device *kbdev)
+{
+	/* If the autosuspend_delay has been set to 0 then it doesn't make
+	 * sense to first put GPU to sleep state and then power it down,
+	 * instead would be better to power it down right away.
+	 * Also need to do the same when autosuspend_delay is set to a negative
+	 * value, which implies that runtime pm is effectively disabled by the
+	 * kernel.
+	 * A high positive value of autosuspend_delay can be used to keep the
+	 * GPU in sleep state for a long time.
+	 */
+	if (unlikely(!kbdev->dev->power.autosuspend_delay ||
+		     (kbdev->dev->power.autosuspend_delay < 0)))
+		return false;
+
+	return kbdev->pm.backend.gpu_sleep_supported;
+}
+
+/**
+ * kbase_pm_enable_db_mirror_interrupt - Enable the doorbell mirror interrupt to
+ *                                       detect the User doorbell rings.
+ *
+ * @kbdev: Device pointer
+ *
+ * This function is called just before sending the sleep request to MCU firmware
+ * so that User doorbell rings can be detected whilst GPU remains in the sleep
+ * state.
+ *
+ */
+static inline void kbase_pm_enable_db_mirror_interrupt(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (!kbdev->pm.backend.db_mirror_interrupt_enabled) {
+		u32 irq_mask = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(GPU_IRQ_MASK));
+
+		WARN_ON(irq_mask & DOORBELL_MIRROR);
+
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask | DOORBELL_MIRROR);
+		kbdev->pm.backend.db_mirror_interrupt_enabled = true;
+	}
+}
+
+/**
+ * kbase_pm_disable_db_mirror_interrupt - Disable the doorbell mirror interrupt.
+ *
+ * @kbdev: Device pointer
+ *
+ * This function is called when doorbell mirror interrupt is received or MCU
+ * needs to be reactivated by enabling the doorbell notification.
+ */
+static inline void kbase_pm_disable_db_mirror_interrupt(struct kbase_device *kbdev)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	if (kbdev->pm.backend.db_mirror_interrupt_enabled) {
+		u32 irq_mask = kbase_reg_read(kbdev,
+				GPU_CONTROL_REG(GPU_IRQ_MASK));
+
+		kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+				irq_mask & ~DOORBELL_MIRROR);
+		kbdev->pm.backend.db_mirror_interrupt_enabled = false;
+	}
+}
+#endif
+
+/**
+ * kbase_pm_l2_allow_mmu_page_migration - L2 state allows MMU page migration or not
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Check whether the L2 state is in power transition phase or not. If it is, the MMU
+ * page migration should be deferred. The caller must hold hwaccess_lock, and, if MMU
+ * page migration is intended, immediately start the MMU migration action without
+ * dropping the lock. When page migration begins, a flag is set in kbdev that would
+ * prevent the L2 state machine traversing into power transition phases, until
+ * the MMU migration action ends.
+ *
+ * Return: true if MMU page migration is allowed
+ */
+static inline bool kbase_pm_l2_allow_mmu_page_migration(struct kbase_device *kbdev)
+{
+	struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	return (backend->l2_state != KBASE_L2_PEND_ON && backend->l2_state != KBASE_L2_PEND_OFF);
+}
+
 #endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */

--
Gitblit v1.6.2