From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/gpu/arm/bifrost/mali_kbase_dummy_job_wa.c | 13 +++++++++----
1 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/kernel/drivers/gpu/arm/bifrost/mali_kbase_dummy_job_wa.c b/kernel/drivers/gpu/arm/bifrost/mali_kbase_dummy_job_wa.c
index 1e91ba0..0e0dab9 100644
--- a/kernel/drivers/gpu/arm/bifrost/mali_kbase_dummy_job_wa.c
+++ b/kernel/drivers/gpu/arm/bifrost/mali_kbase_dummy_job_wa.c
@@ -239,7 +239,7 @@
return failed ? -EFAULT : 0;
}
-static ssize_t show_dummy_job_wa_info(struct device * const dev,
+static ssize_t dummy_job_wa_info_show(struct device * const dev,
struct device_attribute * const attr, char * const buf)
{
struct kbase_device *const kbdev = dev_get_drvdata(dev);
@@ -254,7 +254,7 @@
return err;
}
-static DEVICE_ATTR(dummy_job_wa_info, 0444, show_dummy_job_wa_info, NULL);
+static DEVICE_ATTR_RO(dummy_job_wa_info);
static bool wa_blob_load_needed(struct kbase_device *kbdev)
{
@@ -280,6 +280,11 @@
u32 blob_offset;
int err;
struct kbase_context *kctx;
+
+ /* Calls to this function are inherently asynchronous, with respect to
+ * MMU operations.
+ */
+ const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
lockdep_assert_held(&kbdev->fw_load_lock);
@@ -375,8 +380,8 @@
nr_pages = PFN_UP(blob->size);
flags = blob->map_flags | BASE_MEM_FLAG_MAP_FIXED;
- va_region = kbase_mem_alloc(kctx, nr_pages, nr_pages,
- 0, &flags, &gpu_va);
+ va_region = kbase_mem_alloc(kctx, nr_pages, nr_pages, 0, &flags,
+ &gpu_va, mmu_sync_info);
if (!va_region) {
dev_err(kbdev->dev, "Failed to allocate for blob\n");
--
Gitblit v1.6.2