From cde9070d9970eef1f7ec2360586c802a16230ad8 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:43:50 +0000
Subject: [PATCH] rtl88x2CE_WiFi_linux driver

---
 kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_instr_backend.c |  194 ++++++++++++++++++++++++++++++++++++++----------
 1 files changed, 153 insertions(+), 41 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_instr_backend.c b/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_instr_backend.c
index 0c5fe9d..53578de 100644
--- a/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_instr_backend.c
+++ b/kernel/drivers/gpu/arm/bifrost/backend/gpu/mali_kbase_instr_backend.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
 /*
  *
- * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2022 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -29,6 +29,20 @@
 #include <device/mali_kbase_device.h>
 #include <backend/gpu/mali_kbase_instr_internal.h>
 
+static int wait_prfcnt_ready(struct kbase_device *kbdev)
+{
+	u32 loops;
+
+	for (loops = 0; loops < KBASE_PRFCNT_ACTIVE_MAX_LOOPS; loops++) {
+		const u32 prfcnt_active = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)) &
+								  GPU_STATUS_PRFCNT_ACTIVE;
+		if (!prfcnt_active)
+			return 0;
+	}
+
+	dev_err(kbdev->dev, "PRFCNT_ACTIVE bit stuck\n");
+	return -EBUSY;
+}
 
 int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
 					struct kbase_context *kctx,
@@ -43,14 +57,20 @@
 
 	/* alignment failure */
 	if ((enable->dump_buffer == 0ULL) || (enable->dump_buffer & (2048 - 1)))
-		goto out_err;
+		return err;
 
 	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
 	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED) {
 		/* Instrumentation is already enabled */
 		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-		goto out_err;
+		return err;
+	}
+
+	if (kbase_is_gpu_removed(kbdev)) {
+		/* GPU has been removed by Arbiter */
+		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+		return err;
 	}
 
 	/* Enable interrupt */
@@ -75,8 +95,18 @@
 	prfcnt_config |= enable->counter_set << PRFCNT_CONFIG_SETSELECT_SHIFT;
 #endif
 
+	/* Wait until prfcnt config register can be written */
+	err = wait_prfcnt_ready(kbdev);
+	if (err)
+		return err;
+
 	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
 			prfcnt_config | PRFCNT_CONFIG_MODE_OFF);
+
+	/* Wait until prfcnt is disabled before writing configuration registers */
+	err = wait_prfcnt_ready(kbdev);
+	if (err)
+		return err;
 
 	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
 					enable->dump_buffer & 0xFFFFFFFF);
@@ -105,37 +135,68 @@
 
 	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 
-	err = 0;
-
 	dev_dbg(kbdev->dev, "HW counters dumping set-up for context %pK", kctx);
-	return err;
- out_err:
-	return err;
+	return 0;
+}
+
+static void kbasep_instr_hwc_disable_hw_prfcnt(struct kbase_device *kbdev)
+{
+	u32 irq_mask;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&kbdev->hwcnt.lock);
+
+	if (kbase_is_gpu_removed(kbdev))
+		/* GPU has been removed by Arbiter */
+		return;
+
+	/* Disable interrupt */
+	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask & ~PRFCNT_SAMPLE_COMPLETED);
+
+	/* Wait until prfcnt config register can be written, then disable the counters.
+	 * Return value is ignored as we are disabling anyway.
+	 */
+	wait_prfcnt_ready(kbdev);
+	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0);
+
+	kbdev->hwcnt.kctx = NULL;
+	kbdev->hwcnt.addr = 0ULL;
+	kbdev->hwcnt.addr_bytes = 0ULL;
 }
 
 int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
 {
 	unsigned long flags, pm_flags;
-	int err = -EINVAL;
-	u32 irq_mask;
 	struct kbase_device *kbdev = kctx->kbdev;
 
 	while (1) {
 		spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
 		spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
+		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_UNRECOVERABLE_ERROR) {
+			/* Instrumentation is in unrecoverable error state,
+			 * there is nothing for us to do.
+			 */
+			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
+			/* Already disabled, return no error. */
+			return 0;
+		}
+
 		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DISABLED) {
 			/* Instrumentation is not enabled */
 			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
-			goto out;
+			return -EINVAL;
 		}
 
 		if (kbdev->hwcnt.kctx != kctx) {
 			/* Instrumentation has been setup for another context */
 			spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 			spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
-			goto out;
+			return -EINVAL;
 		}
 
 		if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_IDLE)
@@ -152,17 +213,7 @@
 	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
 	kbdev->hwcnt.backend.triggered = 0;
 
-	/* Disable interrupt */
-	irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
-	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
-				irq_mask & ~PRFCNT_SAMPLE_COMPLETED);
-
-	/* Disable the counters */
-	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0);
-
-	kbdev->hwcnt.kctx = NULL;
-	kbdev->hwcnt.addr = 0ULL;
-	kbdev->hwcnt.addr_bytes = 0ULL;
+	kbasep_instr_hwc_disable_hw_prfcnt(kbdev);
 
 	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
@@ -170,9 +221,7 @@
 	dev_dbg(kbdev->dev, "HW counters dumping disabled for context %pK",
 									kctx);
 
-	err = 0;
- out:
-	return err;
+	return 0;
 }
 
 int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
@@ -190,8 +239,13 @@
 
 	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_IDLE) {
 		/* HW counters are disabled or another dump is ongoing, or we're
-		 * resetting
+		 * resetting, or we are in unrecoverable error state.
 		 */
+		goto unlock;
+	}
+
+	if (kbase_is_gpu_removed(kbdev)) {
+		/* GPU has been removed by Arbiter */
 		goto unlock;
 	}
 
@@ -200,6 +254,11 @@
 	/* Mark that we're dumping - the PF handler can signal that we faulted
 	 */
 	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DUMPING;
+
+	/* Wait until prfcnt is ready to request dump */
+	err = wait_prfcnt_ready(kbdev);
+	if (err)
+		goto unlock;
 
 	/* Reconfigure the dump address */
 	kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
@@ -216,11 +275,8 @@
 
 	dev_dbg(kbdev->dev, "HW counters dumping done for context %pK", kctx);
 
-	err = 0;
-
  unlock:
 	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
-
 	return err;
 }
 KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_request_dump);
@@ -255,6 +311,10 @@
 
 	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
+	/* If the state is in unrecoverable error, we already wake_up the waiter
+	 * and don't need to do any action when sample is done.
+	 */
+
 	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
 		kbdev->hwcnt.backend.triggered = 1;
 		wake_up(&kbdev->hwcnt.backend.wait);
@@ -283,6 +343,8 @@
 	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
 		err = -EINVAL;
 		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+	} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_UNRECOVERABLE_ERROR) {
+		err = -EIO;
 	} else {
 		/* Dump done */
 		KBASE_DEBUG_ASSERT(kbdev->hwcnt.backend.state ==
@@ -303,25 +365,75 @@
 
 	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
 
-	/* Check it's the context previously set up and we're not already
-	 * dumping
+	/* Check it's the context previously set up and we're not in IDLE
+	 * state.
 	 */
 	if (kbdev->hwcnt.kctx != kctx || kbdev->hwcnt.backend.state !=
 							KBASE_INSTR_STATE_IDLE)
-		goto out;
+		goto unlock;
+
+	if (kbase_is_gpu_removed(kbdev)) {
+		/* GPU has been removed by Arbiter */
+		goto unlock;
+	}
+
+	/* Wait until prfcnt is ready to clear */
+	err = wait_prfcnt_ready(kbdev);
+	if (err)
+		goto unlock;
 
 	/* Clear the counters */
 	KBASE_KTRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, 0);
 	kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
 						GPU_COMMAND_PRFCNT_CLEAR);
 
-	err = 0;
-
-out:
+unlock:
 	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
 	return err;
 }
 KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_clear);
+
+void kbase_instr_hwcnt_on_unrecoverable_error(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	/* If we already in unrecoverable error state, early return. */
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_UNRECOVERABLE_ERROR) {
+		spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+		return;
+	}
+
+	kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_UNRECOVERABLE_ERROR;
+
+	/* Need to disable HW if it's not disabled yet. */
+	if (kbdev->hwcnt.backend.state != KBASE_INSTR_STATE_DISABLED)
+		kbasep_instr_hwc_disable_hw_prfcnt(kbdev);
+
+	/* Wake up any waiters. */
+	kbdev->hwcnt.backend.triggered = 1;
+	wake_up(&kbdev->hwcnt.backend.wait);
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_on_unrecoverable_error);
+
+void kbase_instr_hwcnt_on_before_reset(struct kbase_device *kbdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
+
+	/* A reset is the only way to exit the unrecoverable error state */
+	if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_UNRECOVERABLE_ERROR)
+		kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_DISABLED;
+
+	spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+}
+KBASE_EXPORT_SYMBOL(kbase_instr_hwcnt_on_before_reset);
 
 int kbase_instr_backend_init(struct kbase_device *kbdev)
 {
@@ -336,12 +448,12 @@
 #ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
 /* Use the build time option for the override default. */
 #if defined(CONFIG_MALI_BIFROST_PRFCNT_SET_SECONDARY)
-	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_SET_SECONDARY;
+	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_PHYSICAL_SET_SECONDARY;
 #elif defined(CONFIG_MALI_PRFCNT_SET_TERTIARY)
-	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_SET_TERTIARY;
+	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_PHYSICAL_SET_TERTIARY;
 #else
 	/* Default to primary */
-	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_SET_PRIMARY;
+	kbdev->hwcnt.backend.override_counter_set = KBASE_HWCNT_PHYSICAL_SET_PRIMARY;
 #endif
 #endif
 	return 0;
@@ -361,8 +473,8 @@
 	 *
 	 * Valid inputs are the values accepted bythe SET_SELECT bits of the
 	 * PRFCNT_CONFIG register as defined in the architecture specification.
-	*/
-	debugfs_create_u8("hwcnt_set_select", S_IRUGO | S_IWUSR,
+	 */
+	debugfs_create_u8("hwcnt_set_select", 0644,
 			  kbdev->mali_debugfs_directory,
 			  (u8 *)&kbdev->hwcnt.backend.override_counter_set);
 }

--
Gitblit v1.6.2