From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB

---
 kernel/drivers/gpu/arm/bifrost/mmu/mali_kbase_mmu_hw_direct.c |  590 +++++++++++++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 511 insertions(+), 79 deletions(-)

diff --git a/kernel/drivers/gpu/arm/bifrost/mmu/mali_kbase_mmu_hw_direct.c b/kernel/drivers/gpu/arm/bifrost/mmu/mali_kbase_mmu_hw_direct.c
index a99b988..3f6da35 100644
--- a/kernel/drivers/gpu/arm/bifrost/mmu/mali_kbase_mmu_hw_direct.c
+++ b/kernel/drivers/gpu/arm/bifrost/mmu/mali_kbase_mmu_hw_direct.c
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
 /*
  *
- * (C) COPYRIGHT 2014-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
  *
  * This program is free software and is provided to you under the terms of the
  * GNU General Public License version 2 as published by the Free Software
@@ -19,110 +19,303 @@
  *
  */
 
+#include <device/mali_kbase_device.h>
 #include <linux/bitops.h>
 #include <mali_kbase.h>
+#include <mali_kbase_ctx_sched.h>
 #include <mali_kbase_mem.h>
+#include <mali_kbase_reset_gpu.h>
 #include <mmu/mali_kbase_mmu_hw.h>
 #include <tl/mali_kbase_tracepoints.h>
-#include <device/mali_kbase_device.h>
+#include <linux/delay.h>
+
+#if MALI_USE_CSF
+/**
+ * mmu_has_flush_skip_pgd_levels() - Check if the GPU has the feature
+ *                                   AS_LOCKADDR_FLUSH_SKIP_LEVELS
+ *
+ * @gpu_props:  GPU properties for the GPU instance.
+ *
+ * This function returns whether a cache flush can apply the skip flags of
+ * AS_LOCKADDR_FLUSH_SKIP_LEVELS.
+ *
+ * Return: True if cache flush has the said feature.
+ */
+static bool mmu_has_flush_skip_pgd_levels(struct kbase_gpu_props const *gpu_props)
+{
+	u32 const signature =
+		gpu_props->props.raw_props.gpu_id & (GPU_ID2_ARCH_MAJOR | GPU_ID2_ARCH_REV);
+
+	return signature >= (u32)GPU_ID2_PRODUCT_MAKE(12, 0, 4, 0);
+}
+#endif
 
 /**
  * lock_region() - Generate lockaddr to lock memory region in MMU
- * @pfn:       Starting page frame number of the region to lock
- * @num_pages: Number of pages to lock. It must be greater than 0.
- * @lockaddr:  Address and size of memory region to lock
+ *
+ * @gpu_props: GPU properties for finding the MMU lock region size.
+ * @lockaddr:  Address and size of memory region to lock.
+ * @op_param:  Pointer to a struct containing the starting page frame number of
+ *             the region to lock, the number of pages to lock and page table
+ *             levels to skip when flushing (if supported).
  *
  * The lockaddr value is a combination of the starting address and
  * the size of the region that encompasses all the memory pages to lock.
  *
- * The size is expressed as a logarithm: it is represented in a way
- * that is compatible with the HW specification and it also determines
- * how many of the lowest bits of the address are cleared.
+ * Bits 5:0 are used to represent the size, which must be a power of 2.
+ * The smallest amount of memory to be locked corresponds to 32 kB,
+ * i.e. 8 memory pages, because a MMU cache line is made of 64 bytes
+ * and every page table entry is 8 bytes. Therefore it is not possible
+ * to lock less than 8 memory pages at a time.
+ *
+ * The size is expressed as a logarithm minus one:
+ * - A value of 14 is thus interpreted as log(32 kB) = 15, where 32 kB
+ *   is the smallest possible size.
+ * - Likewise, a value of 47 is interpreted as log(256 TB) = 48, where 256 TB
+ *   is the largest possible size (implementation defined value according
+ *   to the HW spec).
+ *
+ * Bits 11:6 are reserved.
+ *
+ * Bits 63:12 are used to represent the base address of the region to lock.
+ * Only the upper bits of the address are used; lowest bits are cleared
+ * to avoid confusion.
+ *
+ * The address is aligned to a multiple of the region size. This has profound
+ * implications on the region size itself: often the MMU will lock a region
+ * larger than the given number of pages, because the lock region cannot start
+ * from any arbitrary address.
  *
  * Return: 0 if success, or an error code on failure.
  */
-static int lock_region(u64 pfn, u32 num_pages, u64 *lockaddr)
+static int lock_region(struct kbase_gpu_props const *gpu_props, u64 *lockaddr,
+		       const struct kbase_mmu_hw_op_param *op_param)
 {
-	const u64 lockaddr_base = pfn << PAGE_SHIFT;
-	u64 lockaddr_size_log2, region_frame_number_start,
-		region_frame_number_end;
+	const u64 lockaddr_base = op_param->vpfn << PAGE_SHIFT;
+	const u64 lockaddr_end = ((op_param->vpfn + op_param->nr) << PAGE_SHIFT) - 1;
+	u64 lockaddr_size_log2;
 
-	if (num_pages == 0)
+	if (op_param->nr == 0)
 		return -EINVAL;
 
-	/* The size is expressed as a logarithm and should take into account
-	 * the possibility that some pages might spill into the next region.
+	/* The MMU lock region is a self-aligned region whose size
+	 * is a power of 2 and that contains both start and end
+	 * of the address range determined by pfn and num_pages.
+	 * The size of the MMU lock region can be defined as the
+	 * largest divisor that yields the same result when both
+	 * start and end addresses are divided by it.
+	 *
+	 * For instance: pfn=0x4F000 num_pages=2 describe the
+	 * address range between 0x4F000 and 0x50FFF. It is only
+	 * 2 memory pages. However there isn't a single lock region
+	 * of 8 kB that encompasses both addresses because 0x4F000
+	 * would fall into the [0x4E000, 0x4FFFF] region while
+	 * 0x50000 would fall into the [0x50000, 0x51FFF] region.
+	 * The minimum lock region size that includes the entire
+	 * address range is 128 kB, and the region would be
+	 * [0x40000, 0x5FFFF].
+	 *
+	 * The region size can be found by comparing the desired
+	 * start and end addresses and finding the highest bit
+	 * that differs. The smallest naturally aligned region
+	 * must include this bit change, hence the desired region
+	 * starts with this bit (and subsequent bits) set to 0
+	 * and ends with the bit (and subsequent bits) set to 1.
+	 *
+	 * In the example above: 0x4F000 ^ 0x50FFF = 0x1FFFF
+	 * therefore the highest bit that differs is bit #16
+	 * and the region size (as a logarithm) is 16 + 1 = 17, i.e. 128 kB.
 	 */
-	lockaddr_size_log2 = fls(num_pages) + PAGE_SHIFT - 1;
+	lockaddr_size_log2 = fls64(lockaddr_base ^ lockaddr_end);
 
-	/* Round up if the number of pages is not a power of 2. */
-	if (num_pages != ((u32)1 << (lockaddr_size_log2 - PAGE_SHIFT)))
-		lockaddr_size_log2 += 1;
-
-	/* Round up if some memory pages spill into the next region. */
-	region_frame_number_start = pfn >> (lockaddr_size_log2 - PAGE_SHIFT);
-	region_frame_number_end =
-	    (pfn + num_pages - 1) >> (lockaddr_size_log2 - PAGE_SHIFT);
-
-	if (region_frame_number_start < region_frame_number_end)
-		lockaddr_size_log2 += 1;
-
-	/* Represent the size according to the HW specification. */
-	lockaddr_size_log2 = MAX(lockaddr_size_log2,
-		KBASE_LOCK_REGION_MIN_SIZE_LOG2);
-
+	/* Cap the size against minimum and maximum values allowed. */
 	if (lockaddr_size_log2 > KBASE_LOCK_REGION_MAX_SIZE_LOG2)
 		return -EINVAL;
 
-	/* The lowest bits are cleared and then set to size - 1 to represent
-	 * the size in a way that is compatible with the HW specification.
+	lockaddr_size_log2 =
+		MAX(lockaddr_size_log2, kbase_get_lock_region_min_size_log2(gpu_props));
+
+	/* Represent the result in a way that is compatible with HW spec.
+	 *
+	 * Upper bits are used for the base address, whose lower bits
+	 * are cleared to avoid confusion because they are going to be ignored
+	 * by the MMU anyway, since lock regions shall be aligned with
+	 * a multiple of their size and cannot start from any address.
+	 *
+	 * Lower bits are used for the size, which is represented as
+	 * logarithm minus one of the actual size.
 	 */
 	*lockaddr = lockaddr_base & ~((1ull << lockaddr_size_log2) - 1);
 	*lockaddr |= lockaddr_size_log2 - 1;
 
+#if MALI_USE_CSF
+	if (mmu_has_flush_skip_pgd_levels(gpu_props))
+		*lockaddr =
+			AS_LOCKADDR_FLUSH_SKIP_LEVELS_SET(*lockaddr, op_param->flush_skip_levels);
+#endif
+
 	return 0;
 }
 
-static int wait_ready(struct kbase_device *kbdev,
-		unsigned int as_nr)
+/**
+ * wait_ready() - Wait for previously issued MMU command to complete.
+ *
+ * @kbdev:        Kbase device to wait for a MMU command to complete.
+ * @as_nr:        Address space to wait for a MMU command to complete.
+ *
+ * Reset GPU if the wait for previously issued command fails.
+ *
+ * Return: 0 on successful completion. negative error on failure.
+ */
+static int wait_ready(struct kbase_device *kbdev, unsigned int as_nr)
 {
-	unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
-	u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+	const ktime_t wait_loop_start = ktime_get_raw();
+	const u32 mmu_as_inactive_wait_time_ms = kbdev->mmu_as_inactive_wait_time_ms;
+	s64 diff;
 
-	/* Wait for the MMU status to indicate there is no active command, in
-	 * case one is pending. Do not log remaining register accesses.
-	 */
-	while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
-		val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+	if (unlikely(kbdev->as[as_nr].is_unresponsive))
+		return -EBUSY;
 
-	if (max_loops == 0) {
-		dev_err(kbdev->dev, "AS_ACTIVE bit stuck, might be caused by slow/unstable GPU clock or possible faulty FPGA connector\n");
-		return -1;
-	}
+	do {
+		unsigned int i;
 
-	/* If waiting in loop was performed, log last read value. */
-	if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
-		kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
+		for (i = 0; i < 1000; i++) {
+			/* Wait for the MMU status to indicate there is no active command */
+			if (!(kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS)) &
+			      AS_STATUS_AS_ACTIVE))
+				return 0;
+		}
 
-	return 0;
+		diff = ktime_to_ms(ktime_sub(ktime_get_raw(), wait_loop_start));
+	} while (diff < mmu_as_inactive_wait_time_ms);
+
+	dev_err(kbdev->dev,
+		"AS_ACTIVE bit stuck for as %u. Might be caused by unstable GPU clk/pwr or faulty system",
+		as_nr);
+	kbdev->as[as_nr].is_unresponsive = true;
+	if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
+		kbase_reset_gpu_locked(kbdev);
+
+	return -ETIMEDOUT;
 }
 
 static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
 {
-	int status;
-
 	/* write AS_COMMAND when MMU is ready to accept another command */
-	status = wait_ready(kbdev, as_nr);
-	if (status == 0)
+	const int status = wait_ready(kbdev, as_nr);
+
+	if (likely(status == 0))
 		kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd);
+	else if (status == -EBUSY) {
+		dev_dbg(kbdev->dev,
+			"Skipped the wait for AS_ACTIVE bit for as %u, before sending MMU command %u",
+			as_nr, cmd);
+	} else {
+		dev_err(kbdev->dev,
+			"Wait for AS_ACTIVE bit failed for as %u, before sending MMU command %u",
+			as_nr, cmd);
+	}
 
 	return status;
 }
+
+#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
+static int wait_cores_power_trans_complete(struct kbase_device *kbdev)
+{
+#define WAIT_TIMEOUT 1000 /* 1ms timeout */
+#define DELAY_TIME_IN_US 1
+	const int max_iterations = WAIT_TIMEOUT;
+	int loop;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	for (loop = 0; loop < max_iterations; loop++) {
+		u32 lo =
+		    kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_LO));
+		u32 hi =
+		    kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_HI));
+
+		if (!lo && !hi)
+			break;
+
+		udelay(DELAY_TIME_IN_US);
+	}
+
+	if (loop == max_iterations) {
+		dev_warn(kbdev->dev, "SHADER_PWRTRANS set for too long");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/**
+ * apply_hw_issue_GPU2019_3901_wa - Apply WA for the HW issue GPU2019_3901
+ *
+ * @kbdev:             Kbase device to issue the MMU operation on.
+ * @mmu_cmd:           Pointer to the variable contain the value of MMU command
+ *                     that needs to be sent to flush the L2 cache and do an
+ *                     implicit unlock.
+ * @as_nr:             Address space number for which MMU command needs to be
+ *                     sent.
+ *
+ * This function ensures that the flush of LSC is not missed for the pages that
+ * were unmapped from the GPU, due to the power down transition of shader cores.
+ *
+ * Return: 0 if the WA was successfully applied, non-zero otherwise.
+ */
+static int apply_hw_issue_GPU2019_3901_wa(struct kbase_device *kbdev, u32 *mmu_cmd,
+					  unsigned int as_nr)
+{
+	int ret = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	/* Check if L2 is OFF. The cores also must be OFF if L2 is not up, so
+	 * the workaround can be safely skipped.
+	 */
+	if (kbdev->pm.backend.l2_state != KBASE_L2_OFF) {
+		if (*mmu_cmd != AS_COMMAND_FLUSH_MEM) {
+			dev_warn(kbdev->dev,
+				 "Unexpected mmu command received");
+			return -EINVAL;
+		}
+
+		/* Wait for the LOCK MMU command to complete, issued by the caller */
+		ret = wait_ready(kbdev, as_nr);
+		if (unlikely(ret))
+			return ret;
+
+		ret = kbase_gpu_cache_flush_and_busy_wait(kbdev,
+				GPU_COMMAND_CACHE_CLN_INV_LSC);
+		if (unlikely(ret))
+			return ret;
+
+		ret = wait_cores_power_trans_complete(kbdev);
+		if (unlikely(ret)) {
+			if (kbase_prepare_to_reset_gpu_locked(kbdev,
+							      RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
+				kbase_reset_gpu_locked(kbdev);
+			return ret;
+		}
+
+		/* As LSC is guaranteed to have been flushed we can use FLUSH_PT
+		 * MMU command to only flush the L2.
+		 */
+		*mmu_cmd = AS_COMMAND_FLUSH_PT;
+	}
+
+	return ret;
+}
+#endif
 
 void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
 {
 	struct kbase_mmu_setup *current_setup = &as->current_setup;
 	u64 transcfg = 0;
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
 
 	transcfg = current_setup->transcfg;
 
@@ -167,45 +360,284 @@
 			transcfg);
 
 	write_cmd(kbdev, as->number, AS_COMMAND_UPDATE);
+#if MALI_USE_CSF
+	/* Wait for UPDATE command to complete */
+	wait_ready(kbdev, as->number);
+#endif
 }
 
-int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
-		u64 vpfn, u32 nr, u32 op,
-		unsigned int handling_irq)
+/**
+ * mmu_command_instr - Record an MMU command for instrumentation purposes.
+ *
+ * @kbdev:          Kbase device used to issue MMU operation on.
+ * @kctx_id:        Kernel context ID for MMU command tracepoint.
+ * @cmd:            Command issued to the MMU.
+ * @lock_addr:      Address of memory region locked for the operation.
+ * @mmu_sync_info:  Indicates whether this call is synchronous wrt MMU ops.
+ */
+static void mmu_command_instr(struct kbase_device *kbdev, u32 kctx_id, u32 cmd, u64 lock_addr,
+				    enum kbase_caller_mmu_sync_info mmu_sync_info)
+{
+	u64 lock_addr_base = AS_LOCKADDR_LOCKADDR_BASE_GET(lock_addr);
+	u32 lock_addr_size = AS_LOCKADDR_LOCKADDR_SIZE_GET(lock_addr);
+
+	bool is_mmu_synchronous = (mmu_sync_info == CALLER_MMU_SYNC);
+
+	KBASE_TLSTREAM_AUX_MMU_COMMAND(kbdev, kctx_id, cmd, is_mmu_synchronous, lock_addr_base,
+				       lock_addr_size);
+}
+
+/* Helper function to program the LOCKADDR register before LOCK/UNLOCK command
+ * is issued.
+ */
+static int mmu_hw_set_lock_addr(struct kbase_device *kbdev, int as_nr, u64 *lock_addr,
+				const struct kbase_mmu_hw_op_param *op_param)
 {
 	int ret;
 
-	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+	ret = lock_region(&kbdev->gpu_props, lock_addr, op_param);
 
-	if (op == AS_COMMAND_UNLOCK) {
-		/* Unlock doesn't require a lock first */
-		ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
-	} else {
-		u64 lock_addr;
+	if (!ret) {
+		/* Set the region that needs to be updated */
+		kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_LOCKADDR_LO),
+				*lock_addr & 0xFFFFFFFFUL);
+		kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_LOCKADDR_HI),
+				(*lock_addr >> 32) & 0xFFFFFFFFUL);
+	}
+	return ret;
+}
 
-		ret = lock_region(vpfn, nr, &lock_addr);
+/**
+ * mmu_hw_do_lock_no_wait - Issue LOCK command to the MMU and return without
+ *                          waiting for it's completion.
+ *
+ * @kbdev:      Kbase device to issue the MMU operation on.
+ * @as:         Address space to issue the MMU operation on.
+ * @lock_addr:  Address of memory region locked for this operation.
+ * @op_param:   Pointer to a struct containing information about the MMU operation.
+ *
+ * Return: 0 if issuing the command was successful, otherwise an error code.
+ */
+static int mmu_hw_do_lock_no_wait(struct kbase_device *kbdev, struct kbase_as *as, u64 *lock_addr,
+				  const struct kbase_mmu_hw_op_param *op_param)
+{
+	int ret;
 
-		if (!ret) {
-			/* Lock the region that needs to be updated */
-			kbase_reg_write(kbdev,
-				MMU_AS_REG(as->number, AS_LOCKADDR_LO),
-				lock_addr & 0xFFFFFFFFUL);
-			kbase_reg_write(kbdev,
-				MMU_AS_REG(as->number, AS_LOCKADDR_HI),
-				(lock_addr >> 32) & 0xFFFFFFFFUL);
-			write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
+	ret = mmu_hw_set_lock_addr(kbdev, as->number, lock_addr, op_param);
 
-			/* Run the MMU operation */
-			write_cmd(kbdev, as->number, op);
+	if (likely(!ret))
+		ret = write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
 
-			/* Wait for the flush to complete */
-			ret = wait_ready(kbdev, as->number);
-		}
+	return ret;
+}
+
+/**
+ * mmu_hw_do_lock - Issue LOCK command to the MMU and wait for its completion.
+ *
+ * @kbdev:      Kbase device to issue the MMU operation on.
+ * @as:         Address space to issue the MMU operation on.
+ * @op_param:   Pointer to a struct containing information about the MMU operation.
+ *
+ * Return: 0 if issuing the LOCK command was successful, otherwise an error code.
+ */
+static int mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
+			  const struct kbase_mmu_hw_op_param *op_param)
+{
+	int ret;
+	u64 lock_addr = 0x0;
+
+	if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+		return -EINVAL;
+
+	ret = mmu_hw_do_lock_no_wait(kbdev, as, &lock_addr, op_param);
+
+	if (!ret)
+		ret = wait_ready(kbdev, as->number);
+
+	if (!ret)
+		mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_LOCK, lock_addr,
+				  op_param->mmu_sync_info);
+
+	return ret;
+}
+
+int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
+			 const struct kbase_mmu_hw_op_param *op_param)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	return mmu_hw_do_lock(kbdev, as, op_param);
+}
+
+int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as *as,
+				   const struct kbase_mmu_hw_op_param *op_param)
+{
+	int ret = 0;
+
+	if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+		return -EINVAL;
+
+	ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+
+	/* Wait for UNLOCK command to complete */
+	if (likely(!ret))
+		ret = wait_ready(kbdev, as->number);
+
+	if (likely(!ret)) {
+		u64 lock_addr = 0x0;
+		/* read MMU_AS_CONTROL.LOCKADDR register */
+		lock_addr |= (u64)kbase_reg_read(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI))
+			     << 32;
+		lock_addr |= (u64)kbase_reg_read(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO));
+
+		mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_UNLOCK,
+				  lock_addr, op_param->mmu_sync_info);
 	}
 
 	return ret;
 }
 
+int kbase_mmu_hw_do_unlock(struct kbase_device *kbdev, struct kbase_as *as,
+			   const struct kbase_mmu_hw_op_param *op_param)
+{
+	int ret = 0;
+	u64 lock_addr = 0x0;
+
+	if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+		return -EINVAL;
+
+	ret = mmu_hw_set_lock_addr(kbdev, as->number, &lock_addr, op_param);
+
+	if (!ret)
+		ret = kbase_mmu_hw_do_unlock_no_addr(kbdev, as,
+						     op_param);
+
+	return ret;
+}
+
+/**
+ * mmu_hw_do_flush - Flush MMU and wait for its completion.
+ *
+ * @kbdev:           Kbase device to issue the MMU operation on.
+ * @as:              Address space to issue the MMU operation on.
+ * @op_param:        Pointer to a struct containing information about the MMU operation.
+ * @hwaccess_locked: Flag to indicate if the lock has been held.
+ *
+ * Return: 0 if flushing MMU was successful, otherwise an error code.
+ */
+static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
+	const struct kbase_mmu_hw_op_param *op_param, bool hwaccess_locked)
+{
+	int ret;
+	u64 lock_addr = 0x0;
+	u32 mmu_cmd = AS_COMMAND_FLUSH_MEM;
+
+	if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+		return -EINVAL;
+
+	/* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
+	 * this point would be unexpected.
+	 */
+	if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
+	    op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
+		dev_err(kbdev->dev, "Unexpected flush operation received");
+		return -EINVAL;
+	}
+
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+	if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
+		mmu_cmd = AS_COMMAND_FLUSH_PT;
+
+	/* Lock the region that needs to be updated */
+	ret = mmu_hw_do_lock_no_wait(kbdev, as, &lock_addr, op_param);
+	if (ret)
+		return ret;
+
+#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
+	/* WA for the BASE_HW_ISSUE_GPU2019_3901. */
+	if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3901) &&
+	    mmu_cmd == AS_COMMAND_FLUSH_MEM) {
+		if (!hwaccess_locked) {
+			unsigned long flags = 0;
+
+			spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+			ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
+			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+		} else {
+			ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
+		}
+
+		if (ret)
+			return ret;
+	}
+#endif
+
+	ret = write_cmd(kbdev, as->number, mmu_cmd);
+
+	/* Wait for the command to complete */
+	if (likely(!ret))
+		ret = wait_ready(kbdev, as->number);
+
+	if (likely(!ret))
+		mmu_command_instr(kbdev, op_param->kctx_id, mmu_cmd, lock_addr,
+				  op_param->mmu_sync_info);
+
+	return ret;
+}
+
+int kbase_mmu_hw_do_flush_locked(struct kbase_device *kbdev, struct kbase_as *as,
+				 const struct kbase_mmu_hw_op_param *op_param)
+{
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+
+	return mmu_hw_do_flush(kbdev, as, op_param, true);
+}
+
+int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
+			  const struct kbase_mmu_hw_op_param *op_param)
+{
+	return mmu_hw_do_flush(kbdev, as, op_param, false);
+}
+
+int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_as *as,
+				      const struct kbase_mmu_hw_op_param *op_param)
+{
+	int ret, ret2;
+	u32 gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2_LSC;
+
+	if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
+		return -EINVAL;
+
+	/* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
+	 * this point would be unexpected.
+	 */
+	if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
+	    op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
+		dev_err(kbdev->dev, "Unexpected flush operation received");
+		return -EINVAL;
+	}
+
+	lockdep_assert_held(&kbdev->hwaccess_lock);
+	lockdep_assert_held(&kbdev->mmu_hw_mutex);
+
+	if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
+		gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2;
+
+	/* 1. Issue MMU_AS_CONTROL.COMMAND.LOCK operation. */
+	ret = mmu_hw_do_lock(kbdev, as, op_param);
+	if (ret)
+		return ret;
+
+	/* 2. Issue GPU_CONTROL.COMMAND.FLUSH_CACHES operation */
+	ret = kbase_gpu_cache_flush_and_busy_wait(kbdev, gpu_cmd);
+
+	/* 3. Issue MMU_AS_CONTROL.COMMAND.UNLOCK operation. */
+	ret2 = kbase_mmu_hw_do_unlock_no_addr(kbdev, as, op_param);
+
+	return ret ?: ret2;
+}
+
 void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
 		enum kbase_mmu_fault_type type)
 {

--
Gitblit v1.6.2