From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB

---
 kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_scheduler.h |  217 ++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 202 insertions(+), 15 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_scheduler.h b/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_scheduler.h
index 428ecbe..d22d7c8 100644
--- a/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_scheduler.h
+++ b/kernel/drivers/gpu/arm/bifrost/csf/mali_kbase_csf_scheduler.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
  *
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -23,6 +23,7 @@
 #define _KBASE_CSF_SCHEDULER_H_
 
 #include "mali_kbase_csf.h"
+#include "mali_kbase_csf_event.h"
 
 /**
  * kbase_csf_scheduler_queue_start() - Enable the running of GPU command queue
@@ -35,7 +36,9 @@
  * If the CSG is already scheduled and resident, the CSI will be started
  * right away, otherwise once the group is made resident.
  *
- * Return: 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure. -EBUSY is returned to
+ * indicate to the caller that queue could not be enabled due to Scheduler
+ * state and the caller can try to enable the queue after sometime.
  */
 int kbase_csf_scheduler_queue_start(struct kbase_queue *queue);
 
@@ -250,13 +253,13 @@
  * kbase_csf_scheduler_group_copy_suspend_buf - Suspend a queue
  *		group and copy suspend buffer.
  *
- * This function is called to suspend a queue group and copy the suspend_buffer
- * contents to the input buffer provided.
- *
  * @group:	Pointer to the queue group to be suspended.
  * @sus_buf:	Pointer to the structure which contains details of the
  *		user buffer and its kernel pinned pages to which we need to copy
  *		the group suspend buffer.
+ *
+ * This function is called to suspend a queue group and copy the suspend_buffer
+ * contents to the input buffer provided.
  *
  * Return:	0 on success, or negative on failure.
  */
@@ -374,7 +377,11 @@
  * kbase_csf_scheduler_pm_active - Perform scheduler power active operation
  *
  * Note: This function will increase the scheduler's internal pm_active_count
- * value, ensuring that both GPU and MCU are powered for access.
+ * value, ensuring that both GPU and MCU are powered for access. The MCU may
+ * not have actually become active when this function returns, so need to
+ * call kbase_csf_scheduler_wait_mcu_active() for that.
+ *
+ * This function should not be called with global scheduler lock held.
  *
  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
  */
@@ -384,11 +391,36 @@
  * kbase_csf_scheduler_pm_idle - Perform the scheduler power idle operation
  *
  * Note: This function will decrease the scheduler's internal pm_active_count
- * value. On reaching 0, the MCU and GPU could be powered off.
+ * value. On reaching 0, the MCU and GPU could be powered off. This function
+ * should not be called with global scheduler lock held.
  *
  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
  */
 void kbase_csf_scheduler_pm_idle(struct kbase_device *kbdev);
+
+/**
+ * kbase_csf_scheduler_wait_mcu_active - Wait for the MCU to actually become active
+ *
+ * @kbdev: Instance of a GPU platform device that implements a CSF interface.
+ *
+ * This function will wait for the MCU to actually become active. It is supposed
+ * to be called after calling kbase_csf_scheduler_pm_active(). It is needed as
+ * kbase_csf_scheduler_pm_active() may not make the MCU active right away.
+ *
+ * Return: 0 if the MCU was successfully activated otherwise an error code.
+ */
+int kbase_csf_scheduler_wait_mcu_active(struct kbase_device *kbdev);
+
+/**
+ * kbase_csf_scheduler_pm_resume_no_lock - Reactivate the scheduler on system resume
+ *
+ * @kbdev: Instance of a GPU platform device that implements a CSF interface.
+ *
+ * This function will make the scheduler resume the scheduling of queue groups
+ * and take the power managemenet reference, if there are any runnable groups.
+ * The caller must have acquired the global Scheduler lock.
+ */
+void kbase_csf_scheduler_pm_resume_no_lock(struct kbase_device *kbdev);
 
 /**
  * kbase_csf_scheduler_pm_resume - Reactivate the scheduler on system resume
@@ -401,14 +433,29 @@
 void kbase_csf_scheduler_pm_resume(struct kbase_device *kbdev);
 
 /**
+ * kbase_csf_scheduler_pm_suspend_no_lock - Idle the scheduler on system suspend
+ *
+ * @kbdev: Instance of a GPU platform device that implements a CSF interface.
+ *
+ * This function will make the scheduler suspend all the running queue groups
+ * and drop its power managemenet reference.
+ * The caller must have acquired the global Scheduler lock.
+ *
+ * Return: 0 on success.
+ */
+int kbase_csf_scheduler_pm_suspend_no_lock(struct kbase_device *kbdev);
+
+/**
  * kbase_csf_scheduler_pm_suspend - Idle the scheduler on system suspend
  *
  * @kbdev: Instance of a GPU platform device that implements a CSF interface.
  *
  * This function will make the scheduler suspend all the running queue groups
  * and drop its power managemenet reference.
+ *
+ * Return: 0 on success.
  */
-void kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev);
+int kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev);
 
 /**
  * kbase_csf_scheduler_all_csgs_idle() - Check if the scheduler internal
@@ -427,7 +474,7 @@
 }
 
 /**
- * kbase_csf_scheduler_advance_tick_nolock() - Advance the scheduling tick
+ * kbase_csf_scheduler_tick_advance_nolock() - Advance the scheduling tick
  *
  * @kbdev: Pointer to the device
  *
@@ -437,23 +484,23 @@
  * The caller must hold the interrupt lock.
  */
 static inline void
-kbase_csf_scheduler_advance_tick_nolock(struct kbase_device *kbdev)
+kbase_csf_scheduler_tick_advance_nolock(struct kbase_device *kbdev)
 {
 	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
 
 	lockdep_assert_held(&scheduler->interrupt_lock);
 
 	if (scheduler->tick_timer_active) {
-		KBASE_KTRACE_ADD(kbdev, SCHEDULER_ADVANCE_TICK, NULL, 0u);
+		KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_ADVANCE, NULL, 0u);
 		scheduler->tick_timer_active = false;
 		queue_work(scheduler->wq, &scheduler->tick_work);
 	} else {
-		KBASE_KTRACE_ADD(kbdev, SCHEDULER_NOADVANCE_TICK, NULL, 0u);
+		KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_NOADVANCE, NULL, 0u);
 	}
 }
 
 /**
- * kbase_csf_scheduler_advance_tick() - Advance the scheduling tick
+ * kbase_csf_scheduler_tick_advance() - Advance the scheduling tick
  *
  * @kbdev: Pointer to the device
  *
@@ -461,14 +508,52 @@
  * immediate execution, but only if the tick hrtimer is active. If the timer
  * is inactive then the tick work item is already in flight.
  */
-static inline void kbase_csf_scheduler_advance_tick(struct kbase_device *kbdev)
+static inline void kbase_csf_scheduler_tick_advance(struct kbase_device *kbdev)
 {
 	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
 	unsigned long flags;
 
 	spin_lock_irqsave(&scheduler->interrupt_lock, flags);
-	kbase_csf_scheduler_advance_tick_nolock(kbdev);
+	kbase_csf_scheduler_tick_advance_nolock(kbdev);
 	spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
+}
+
+/**
+ * kbase_csf_scheduler_invoke_tick() - Invoke the scheduling tick
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function will queue the scheduling tick work item for immediate
+ * execution if tick timer is not active. This can be called from interrupt
+ * context to resume the scheduling after GPU was put to sleep.
+ */
+static inline void kbase_csf_scheduler_invoke_tick(struct kbase_device *kbdev)
+{
+	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
+	unsigned long flags;
+
+	KBASE_KTRACE_ADD(kbdev, SCHEDULER_TICK_INVOKE, NULL, 0u);
+	spin_lock_irqsave(&scheduler->interrupt_lock, flags);
+	if (!scheduler->tick_timer_active)
+		queue_work(scheduler->wq, &scheduler->tick_work);
+	spin_unlock_irqrestore(&scheduler->interrupt_lock, flags);
+}
+
+/**
+ * kbase_csf_scheduler_invoke_tock() - Invoke the scheduling tock
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function will queue the scheduling tock work item for immediate
+ * execution.
+ */
+static inline void kbase_csf_scheduler_invoke_tock(struct kbase_device *kbdev)
+{
+	struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
+
+	KBASE_KTRACE_ADD(kbdev, SCHEDULER_TOCK_INVOKE, NULL, 0u);
+	if (atomic_cmpxchg(&scheduler->pending_tock_work, false, true) == false)
+		mod_delayed_work(scheduler->wq, &scheduler->tock_work, 0);
 }
 
 /**
@@ -491,4 +576,106 @@
 	return (queue->trace_buffer_size && queue->trace_buffer_base);
 }
 
+#ifdef KBASE_PM_RUNTIME
+/**
+ * kbase_csf_scheduler_reval_idleness_post_sleep() - Check GPU's idleness after
+ *                                                   putting MCU to sleep state
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function re-evaluates the idleness of on-slot queue groups after MCU
+ * was put to the sleep state and invokes the scheduling tick if any of the
+ * on-slot queue group became non-idle.
+ * CSG_OUTPUT_BLOCK.CSG_STATUS_STATE.IDLE bit is checked to determine the
+ * idleness which is updated by MCU firmware on handling of the sleep request.
+ *
+ * This function is needed to detect if more work was flushed in the window
+ * between the GPU idle notification and the enabling of Doorbell mirror
+ * interrupt (from MCU state machine). Once Doorbell mirror interrupt is
+ * enabled, Host can receive the notification on User doorbell rings.
+ */
+void kbase_csf_scheduler_reval_idleness_post_sleep(struct kbase_device *kbdev);
+
+/**
+ * kbase_csf_scheduler_handle_runtime_suspend() - Handle runtime suspend by
+ *                                                suspending CSGs.
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function is called from the runtime suspend callback function for
+ * suspending all the on-slot queue groups. If any of the group is found to
+ * be non-idle after the completion of CSG suspend operation or the CSG
+ * suspend operation times out, then the scheduling tick is invoked and an
+ * error is returned so that the GPU power down can be aborted.
+ *
+ * Return: 0 if all the CSGs were suspended, otherwise an error code.
+ */
+int kbase_csf_scheduler_handle_runtime_suspend(struct kbase_device *kbdev);
+#endif
+
+/**
+ * kbase_csf_scheduler_process_gpu_idle_event() - Process GPU idle IRQ
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function is called when a GPU idle IRQ has been raised.
+ */
+void kbase_csf_scheduler_process_gpu_idle_event(struct kbase_device *kbdev);
+
+/**
+ * kbase_csf_scheduler_get_nr_active_csgs() - Get the number of active CSGs
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function calculates the number of CSG slots that have a queue group
+ * resident on them.
+ *
+ * Note: This function should not be used if the interrupt_lock is held. Use
+ * kbase_csf_scheduler_get_nr_active_csgs_locked() instead.
+ *
+ * Return: number of active CSGs.
+ */
+u32 kbase_csf_scheduler_get_nr_active_csgs(struct kbase_device *kbdev);
+
+/**
+ * kbase_csf_scheduler_get_nr_active_csgs_locked() - Get the number of active
+ *                                                   CSGs
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function calculates the number of CSG slots that have a queue group
+ * resident on them.
+ *
+ * Note: This function should be called with interrupt_lock held.
+ *
+ * Return: number of active CSGs.
+ */
+u32 kbase_csf_scheduler_get_nr_active_csgs_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_csf_scheduler_force_wakeup() - Forcefully resume the scheduling of CSGs
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function is called to forcefully resume the scheduling of CSGs, even
+ * when there wasn't any work submitted for them.
+ * This function is only used for testing purpose.
+ */
+void kbase_csf_scheduler_force_wakeup(struct kbase_device *kbdev);
+
+#ifdef KBASE_PM_RUNTIME
+/**
+ * kbase_csf_scheduler_force_sleep() - Forcefully put the Scheduler to sleeping
+ *                                     state.
+ *
+ * @kbdev: Pointer to the device
+ *
+ * This function is called to forcefully put the Scheduler to sleeping state
+ * and trigger the sleep of MCU. If the CSGs are not idle, then the Scheduler
+ * would get reactivated again immediately.
+ * This function is only used for testing purpose.
+ */
+void kbase_csf_scheduler_force_sleep(struct kbase_device *kbdev);
+#endif
+
 #endif /* _KBASE_CSF_SCHEDULER_H_ */

--
Gitblit v1.6.2