hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/arm/bifrost/context/mali_kbase_context.c
....@@ -1,7 +1,7 @@
11 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
22 /*
33 *
4
- * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
4
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
55 *
66 * This program is free software and is provided to you under the terms of the
77 * GNU General Public License version 2 as published by the Free Software
....@@ -22,6 +22,12 @@
2222 /*
2323 * Base kernel context APIs
2424 */
25
+#include <linux/version.h>
26
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
27
+#include <linux/sched/task.h>
28
+#else
29
+#include <linux/sched.h>
30
+#endif
2531
2632 #include <mali_kbase.h>
2733 #include <gpu/mali_kbase_gpu_regmap.h>
....@@ -129,16 +135,50 @@
129135 /* creating a context is considered a disjoint event */
130136 kbase_disjoint_event(kctx->kbdev);
131137
132
- kctx->as_nr = KBASEP_AS_NR_INVALID;
133
-
134
- atomic_set(&kctx->refcount, 0);
135
-
136
- spin_lock_init(&kctx->mm_update_lock);
137138 kctx->process_mm = NULL;
139
+ kctx->task = NULL;
138140 atomic_set(&kctx->nonmapped_pages, 0);
139141 atomic_set(&kctx->permanent_mapped_pages, 0);
140
- kctx->tgid = current->tgid;
141
- kctx->pid = current->pid;
142
+ kctx->tgid = task_tgid_vnr(current);
143
+ kctx->pid = task_pid_vnr(current);
144
+
145
+ /* Check if this is a Userspace created context */
146
+ if (likely(kctx->filp)) {
147
+ struct pid *pid_struct;
148
+
149
+ rcu_read_lock();
150
+ pid_struct = find_get_pid(kctx->tgid);
151
+ if (likely(pid_struct)) {
152
+ struct task_struct *task = pid_task(pid_struct, PIDTYPE_PID);
153
+
154
+ if (likely(task)) {
155
+ /* Take a reference on the task to avoid slow lookup
156
+ * later on from the page allocation loop.
157
+ */
158
+ get_task_struct(task);
159
+ kctx->task = task;
160
+ } else {
161
+ dev_err(kctx->kbdev->dev,
162
+ "Failed to get task pointer for %s/%d",
163
+ current->comm, kctx->pid);
164
+ err = -ESRCH;
165
+ }
166
+
167
+ put_pid(pid_struct);
168
+ } else {
169
+ dev_err(kctx->kbdev->dev,
170
+ "Failed to get pid pointer for %s/%d",
171
+ current->comm, kctx->pid);
172
+ err = -ESRCH;
173
+ }
174
+ rcu_read_unlock();
175
+
176
+ if (unlikely(err))
177
+ return err;
178
+
179
+ kbase_mem_mmgrab();
180
+ kctx->process_mm = current->mm;
181
+ }
142182
143183 atomic_set(&kctx->used_pages, 0);
144184
....@@ -152,6 +192,7 @@
152192
153193 init_waitqueue_head(&kctx->event_queue);
154194 atomic_set(&kctx->event_count, 0);
195
+
155196 #if !MALI_USE_CSF
156197 atomic_set(&kctx->event_closed, false);
157198 #if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
....@@ -159,20 +200,28 @@
159200 #endif
160201 #endif
161202
203
+#if MALI_USE_CSF
204
+ atomic64_set(&kctx->num_fixable_allocs, 0);
205
+ atomic64_set(&kctx->num_fixed_allocs, 0);
206
+#endif
207
+
208
+ kbase_gpu_vm_lock(kctx);
162209 bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
210
+ kbase_gpu_vm_unlock(kctx);
163211
164212 kctx->id = atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
165213
166
- mutex_init(&kctx->legacy_hwcnt_lock);
167
-
168214 mutex_lock(&kctx->kbdev->kctx_list_lock);
169
-
170215 err = kbase_insert_kctx_to_process(kctx);
171
- if (err)
172
- dev_err(kctx->kbdev->dev,
173
- "(err:%d) failed to insert kctx to kbase_process\n", err);
174
-
175216 mutex_unlock(&kctx->kbdev->kctx_list_lock);
217
+ if (err) {
218
+ dev_err(kctx->kbdev->dev,
219
+ "(err:%d) failed to insert kctx to kbase_process", err);
220
+ if (likely(kctx->filp)) {
221
+ mmdrop(kctx->process_mm);
222
+ put_task_struct(kctx->task);
223
+ }
224
+ }
176225
177226 return err;
178227 }
....@@ -235,7 +284,9 @@
235284 /* Add checks, so that the terminating process Should not
236285 * hold any gpu_memory.
237286 */
287
+ spin_lock(&kctx->kbdev->gpu_mem_usage_lock);
238288 WARN_ON(kprcs->total_gpu_pages);
289
+ spin_unlock(&kctx->kbdev->gpu_mem_usage_lock);
239290 WARN_ON(!RB_EMPTY_ROOT(&kprcs->dma_buf_root));
240291 kfree(kprcs);
241292 }
....@@ -243,14 +294,7 @@
243294
244295 void kbase_context_common_term(struct kbase_context *kctx)
245296 {
246
- unsigned long flags;
247297 int pages;
248
-
249
- mutex_lock(&kctx->kbdev->mmu_hw_mutex);
250
- spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
251
- kbase_ctx_sched_remove_ctx(kctx);
252
- spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
253
- mutex_unlock(&kctx->kbdev->mmu_hw_mutex);
254298
255299 pages = atomic_read(&kctx->used_pages);
256300 if (pages != 0)
....@@ -263,15 +307,18 @@
263307 kbase_remove_kctx_from_process(kctx);
264308 mutex_unlock(&kctx->kbdev->kctx_list_lock);
265309
310
+ if (likely(kctx->filp)) {
311
+ mmdrop(kctx->process_mm);
312
+ put_task_struct(kctx->task);
313
+ }
314
+
266315 KBASE_KTRACE_ADD(kctx->kbdev, CORE_CTX_DESTROY, kctx, 0u);
267316 }
268317
269318 int kbase_context_mem_pool_group_init(struct kbase_context *kctx)
270319 {
271
- return kbase_mem_pool_group_init(&kctx->mem_pools,
272
- kctx->kbdev,
273
- &kctx->kbdev->mem_pool_defaults,
274
- &kctx->kbdev->mem_pools);
320
+ return kbase_mem_pool_group_init(&kctx->mem_pools, kctx->kbdev,
321
+ &kctx->kbdev->mem_pool_defaults, &kctx->kbdev->mem_pools);
275322 }
276323
277324 void kbase_context_mem_pool_group_term(struct kbase_context *kctx)
....@@ -283,7 +330,7 @@
283330 {
284331 return kbase_mmu_init(
285332 kctx->kbdev, &kctx->mmu, kctx,
286
- base_context_mmu_group_id_get(kctx->create_flags));
333
+ kbase_context_mmu_group_id_get(kctx->create_flags));
287334 }
288335
289336 void kbase_context_mmu_term(struct kbase_context *kctx)