From e636c8d336489bf3eed5878299e6cc045bbad077 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:17:29 +0000
Subject: [PATCH] debug lk
---
kernel/drivers/gpu/arm/bifrost/context/mali_kbase_context.c | 101 +++++++++++++++++++++++++++++++++++++-------------
1 files changed, 74 insertions(+), 27 deletions(-)
diff --git a/kernel/drivers/gpu/arm/bifrost/context/mali_kbase_context.c b/kernel/drivers/gpu/arm/bifrost/context/mali_kbase_context.c
index b2e7025..88be6c2 100644
--- a/kernel/drivers/gpu/arm/bifrost/context/mali_kbase_context.c
+++ b/kernel/drivers/gpu/arm/bifrost/context/mali_kbase_context.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -22,6 +22,12 @@
/*
* Base kernel context APIs
*/
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/task.h>
+#else
+#include <linux/sched.h>
+#endif
#include <mali_kbase.h>
#include <gpu/mali_kbase_gpu_regmap.h>
@@ -129,16 +135,50 @@
/* creating a context is considered a disjoint event */
kbase_disjoint_event(kctx->kbdev);
- kctx->as_nr = KBASEP_AS_NR_INVALID;
-
- atomic_set(&kctx->refcount, 0);
-
- spin_lock_init(&kctx->mm_update_lock);
kctx->process_mm = NULL;
+ kctx->task = NULL;
atomic_set(&kctx->nonmapped_pages, 0);
atomic_set(&kctx->permanent_mapped_pages, 0);
- kctx->tgid = current->tgid;
- kctx->pid = current->pid;
+ kctx->tgid = task_tgid_vnr(current);
+ kctx->pid = task_pid_vnr(current);
+
+ /* Check if this is a Userspace created context */
+ if (likely(kctx->filp)) {
+ struct pid *pid_struct;
+
+ rcu_read_lock();
+ pid_struct = find_get_pid(kctx->tgid);
+ if (likely(pid_struct)) {
+ struct task_struct *task = pid_task(pid_struct, PIDTYPE_PID);
+
+ if (likely(task)) {
+ /* Take a reference on the task to avoid slow lookup
+ * later on from the page allocation loop.
+ */
+ get_task_struct(task);
+ kctx->task = task;
+ } else {
+ dev_err(kctx->kbdev->dev,
+ "Failed to get task pointer for %s/%d",
+ current->comm, kctx->pid);
+ err = -ESRCH;
+ }
+
+ put_pid(pid_struct);
+ } else {
+ dev_err(kctx->kbdev->dev,
+ "Failed to get pid pointer for %s/%d",
+ current->comm, kctx->pid);
+ err = -ESRCH;
+ }
+ rcu_read_unlock();
+
+ if (unlikely(err))
+ return err;
+
+ kbase_mem_mmgrab();
+ kctx->process_mm = current->mm;
+ }
atomic_set(&kctx->used_pages, 0);
@@ -152,6 +192,7 @@
init_waitqueue_head(&kctx->event_queue);
atomic_set(&kctx->event_count, 0);
+
#if !MALI_USE_CSF
atomic_set(&kctx->event_closed, false);
#if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
@@ -159,20 +200,28 @@
#endif
#endif
+#if MALI_USE_CSF
+ atomic64_set(&kctx->num_fixable_allocs, 0);
+ atomic64_set(&kctx->num_fixed_allocs, 0);
+#endif
+
+ kbase_gpu_vm_lock(kctx);
bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
+ kbase_gpu_vm_unlock(kctx);
kctx->id = atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
- mutex_init(&kctx->legacy_hwcnt_lock);
-
mutex_lock(&kctx->kbdev->kctx_list_lock);
-
err = kbase_insert_kctx_to_process(kctx);
- if (err)
- dev_err(kctx->kbdev->dev,
- "(err:%d) failed to insert kctx to kbase_process\n", err);
-
mutex_unlock(&kctx->kbdev->kctx_list_lock);
+ if (err) {
+ dev_err(kctx->kbdev->dev,
+ "(err:%d) failed to insert kctx to kbase_process", err);
+ if (likely(kctx->filp)) {
+ mmdrop(kctx->process_mm);
+ put_task_struct(kctx->task);
+ }
+ }
return err;
}
@@ -235,7 +284,9 @@
/* Add checks, so that the terminating process Should not
* hold any gpu_memory.
*/
+ spin_lock(&kctx->kbdev->gpu_mem_usage_lock);
WARN_ON(kprcs->total_gpu_pages);
+ spin_unlock(&kctx->kbdev->gpu_mem_usage_lock);
WARN_ON(!RB_EMPTY_ROOT(&kprcs->dma_buf_root));
kfree(kprcs);
}
@@ -243,14 +294,7 @@
void kbase_context_common_term(struct kbase_context *kctx)
{
- unsigned long flags;
int pages;
-
- mutex_lock(&kctx->kbdev->mmu_hw_mutex);
- spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
- kbase_ctx_sched_remove_ctx(kctx);
- spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
- mutex_unlock(&kctx->kbdev->mmu_hw_mutex);
pages = atomic_read(&kctx->used_pages);
if (pages != 0)
@@ -263,15 +307,18 @@
kbase_remove_kctx_from_process(kctx);
mutex_unlock(&kctx->kbdev->kctx_list_lock);
+ if (likely(kctx->filp)) {
+ mmdrop(kctx->process_mm);
+ put_task_struct(kctx->task);
+ }
+
KBASE_KTRACE_ADD(kctx->kbdev, CORE_CTX_DESTROY, kctx, 0u);
}
int kbase_context_mem_pool_group_init(struct kbase_context *kctx)
{
- return kbase_mem_pool_group_init(&kctx->mem_pools,
- kctx->kbdev,
- &kctx->kbdev->mem_pool_defaults,
- &kctx->kbdev->mem_pools);
+ return kbase_mem_pool_group_init(&kctx->mem_pools, kctx->kbdev,
+ &kctx->kbdev->mem_pool_defaults, &kctx->kbdev->mem_pools);
}
void kbase_context_mem_pool_group_term(struct kbase_context *kctx)
@@ -283,7 +330,7 @@
{
return kbase_mmu_init(
kctx->kbdev, &kctx->mmu, kctx,
- base_context_mmu_group_id_get(kctx->create_flags));
+ kbase_context_mmu_group_id_get(kctx->create_flags));
}
void kbase_context_mmu_term(struct kbase_context *kctx)
--
Gitblit v1.6.2