From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/gpu/arm/bifrost/device/mali_kbase_device_hw.c | 229 ++++++++++++++++++++++++++++++++++++++++++---------------
1 files changed, 169 insertions(+), 60 deletions(-)
diff --git a/kernel/drivers/gpu/arm/bifrost/device/mali_kbase_device_hw.c b/kernel/drivers/gpu/arm/bifrost/device/mali_kbase_device_hw.c
index 7624831..d554950 100644
--- a/kernel/drivers/gpu/arm/bifrost/device/mali_kbase_device_hw.c
+++ b/kernel/drivers/gpu/arm/bifrost/device/mali_kbase_device_hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2014-2016, 2018-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016, 2018-2022 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,44 +28,6 @@
#include <mmu/mali_kbase_mmu.h>
#if !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
-void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
-{
- KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
- KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
-
- writel(value, kbdev->reg + offset);
-
-#if IS_ENABLED(CONFIG_DEBUG_FS)
- if (unlikely(kbdev->io_history.enabled))
- kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
- value, 1);
-#endif /* CONFIG_DEBUG_FS */
- dev_dbg(kbdev->dev, "w: reg %08x val %08x", offset, value);
-}
-
-KBASE_EXPORT_TEST_API(kbase_reg_write);
-
-u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
-{
- u32 val;
-
- KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
- KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
-
- val = readl(kbdev->reg + offset);
-
-#if IS_ENABLED(CONFIG_DEBUG_FS)
- if (unlikely(kbdev->io_history.enabled))
- kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
- val, 0);
-#endif /* CONFIG_DEBUG_FS */
- dev_dbg(kbdev->dev, "r: reg %08x val %08x", offset, val);
-
- return val;
-}
-
-KBASE_EXPORT_TEST_API(kbase_reg_read);
-
bool kbase_is_gpu_removed(struct kbase_device *kbdev)
{
u32 val;
@@ -76,7 +38,145 @@
}
#endif /* !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */
-void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev)
+static int busy_wait_on_irq(struct kbase_device *kbdev, u32 irq_bit)
+{
+ char *irq_flag_name;
+ /* Previously MMU-AS command was used for L2 cache flush on page-table update.
+ * And we're using the same max-loops count for GPU command, because amount of
+ * L2 cache flush overhead are same between them.
+ */
+ unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
+
+ /* Wait for the GPU cache clean operation to complete */
+ while (--max_loops &&
+ !(kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) & irq_bit)) {
+ ;
+ }
+
+ /* reset gpu if time-out occurred */
+ if (max_loops == 0) {
+ switch (irq_bit) {
+ case CLEAN_CACHES_COMPLETED:
+ irq_flag_name = "CLEAN_CACHES_COMPLETED";
+ break;
+ case FLUSH_PA_RANGE_COMPLETED:
+ irq_flag_name = "FLUSH_PA_RANGE_COMPLETED";
+ break;
+ default:
+ irq_flag_name = "UNKNOWN";
+ break;
+ }
+
+ dev_err(kbdev->dev,
+ "Stuck waiting on %s bit, might be caused by slow/unstable GPU clock or possible faulty FPGA connector\n",
+ irq_flag_name);
+
+ if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_NONE))
+ kbase_reset_gpu_locked(kbdev);
+ return -EBUSY;
+ }
+
+ /* Clear the interrupt bit. */
+ KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, irq_bit);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), irq_bit);
+
+ return 0;
+}
+
+#if MALI_USE_CSF
+#define U64_LO_MASK ((1ULL << 32) - 1)
+#define U64_HI_MASK (~U64_LO_MASK)
+
+int kbase_gpu_cache_flush_pa_range_and_busy_wait(struct kbase_device *kbdev, phys_addr_t phys,
+ size_t nr_bytes, u32 flush_op)
+{
+ u64 start_pa, end_pa;
+ int ret = 0;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* 1. Clear the interrupt FLUSH_PA_RANGE_COMPLETED bit. */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), FLUSH_PA_RANGE_COMPLETED);
+
+ /* 2. Issue GPU_CONTROL.COMMAND.FLUSH_PA_RANGE operation. */
+ start_pa = phys;
+ end_pa = start_pa + nr_bytes - 1;
+
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND_ARG0_LO), start_pa & U64_LO_MASK);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND_ARG0_HI),
+ (start_pa & U64_HI_MASK) >> 32);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND_ARG1_LO), end_pa & U64_LO_MASK);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND_ARG1_HI), (end_pa & U64_HI_MASK) >> 32);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), flush_op);
+
+ /* 3. Busy-wait irq status to be enabled. */
+ ret = busy_wait_on_irq(kbdev, (u32)FLUSH_PA_RANGE_COMPLETED);
+
+ return ret;
+}
+#endif /* MALI_USE_CSF */
+
+int kbase_gpu_cache_flush_and_busy_wait(struct kbase_device *kbdev,
+ u32 flush_op)
+{
+ int need_to_wake_up = 0;
+ int ret = 0;
+
+ /* hwaccess_lock must be held to avoid any sync issue with
+ * kbase_gpu_start_cache_clean() / kbase_clean_caches_done()
+ */
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* 1. Check if kbdev->cache_clean_in_progress is set.
+ * If it is set, it means there are threads waiting for
+ * CLEAN_CACHES_COMPLETED irq to be raised and that the
+ * corresponding irq mask bit is set.
+ * We'll clear the irq mask bit and busy-wait for the cache
+ * clean operation to complete before submitting the cache
+ * clean command required after the GPU page table update.
+ * Pended flush commands will be merged to requested command.
+ */
+ if (kbdev->cache_clean_in_progress) {
+ /* disable irq first */
+ u32 irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask & ~CLEAN_CACHES_COMPLETED);
+
+ /* busy wait irq status to be enabled */
+ ret = busy_wait_on_irq(kbdev, (u32)CLEAN_CACHES_COMPLETED);
+ if (ret)
+ return ret;
+
+ /* merge pended command if there's any */
+ flush_op = GPU_COMMAND_FLUSH_CACHE_MERGE(
+ kbdev->cache_clean_queued, flush_op);
+
+ /* enable wake up notify flag */
+ need_to_wake_up = 1;
+ } else {
+ /* Clear the interrupt CLEAN_CACHES_COMPLETED bit. */
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR),
+ CLEAN_CACHES_COMPLETED);
+ }
+
+ /* 2. Issue GPU_CONTROL.COMMAND.FLUSH_CACHE operation. */
+ KBASE_KTRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, flush_op);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), flush_op);
+
+ /* 3. Busy-wait irq status to be enabled. */
+ ret = busy_wait_on_irq(kbdev, (u32)CLEAN_CACHES_COMPLETED);
+ if (ret)
+ return ret;
+
+ /* 4. Wake-up blocked threads when there is any. */
+ if (need_to_wake_up)
+ kbase_gpu_cache_clean_wait_complete(kbdev);
+
+ return ret;
+}
+
+void kbase_gpu_start_cache_clean_nolock(struct kbase_device *kbdev,
+ u32 flush_op)
{
u32 irq_mask;
@@ -85,10 +185,11 @@
if (kbdev->cache_clean_in_progress) {
/* If this is called while another clean is in progress, we
* can't rely on the current one to flush any new changes in
- * the cache. Instead, trigger another cache clean immediately
- * after this one finishes.
+ * the cache. Instead, accumulate all cache clean operations
+ * and trigger that immediately after this one finishes.
*/
- kbdev->cache_clean_queued = true;
+ kbdev->cache_clean_queued = GPU_COMMAND_FLUSH_CACHE_MERGE(
+ kbdev->cache_clean_queued, flush_op);
return;
}
@@ -97,19 +198,18 @@
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
irq_mask | CLEAN_CACHES_COMPLETED);
- KBASE_KTRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, 0);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAN_INV_CACHES);
+ KBASE_KTRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, flush_op);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), flush_op);
kbdev->cache_clean_in_progress = true;
}
-void kbase_gpu_start_cache_clean(struct kbase_device *kbdev)
+void kbase_gpu_start_cache_clean(struct kbase_device *kbdev, u32 flush_op)
{
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_gpu_start_cache_clean_nolock(kbdev);
+ kbase_gpu_start_cache_clean_nolock(kbdev, flush_op);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
@@ -117,7 +217,7 @@
{
lockdep_assert_held(&kbdev->hwaccess_lock);
- kbdev->cache_clean_queued = false;
+ kbdev->cache_clean_queued = 0;
kbdev->cache_clean_in_progress = false;
wake_up(&kbdev->cache_clean_wait);
}
@@ -129,19 +229,28 @@
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- if (kbdev->cache_clean_queued) {
- kbdev->cache_clean_queued = false;
+ if (kbdev->cache_clean_in_progress) {
+ /* Clear the interrupt CLEAN_CACHES_COMPLETED bit if set.
+ * It might have already been done by kbase_gpu_cache_flush_and_busy_wait.
+ */
+ KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, CLEAN_CACHES_COMPLETED);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), CLEAN_CACHES_COMPLETED);
- KBASE_KTRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, 0);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAN_INV_CACHES);
- } else {
- /* Disable interrupt */
- irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
- irq_mask & ~CLEAN_CACHES_COMPLETED);
+ if (kbdev->cache_clean_queued) {
+ u32 pended_flush_op = kbdev->cache_clean_queued;
- kbase_gpu_cache_clean_wait_complete(kbdev);
+ kbdev->cache_clean_queued = 0;
+
+ KBASE_KTRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, pended_flush_op);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), pended_flush_op);
+ } else {
+ /* Disable interrupt */
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
+ irq_mask & ~CLEAN_CACHES_COMPLETED);
+
+ kbase_gpu_cache_clean_wait_complete(kbdev);
+ }
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
--
Gitblit v1.6.2