.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
22 | 22 | /* |
---|
23 | 23 | * Base kernel context APIs |
---|
24 | 24 | */ |
---|
| 25 | +#include <linux/version.h> |
---|
| 26 | +#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE |
---|
| 27 | +#include <linux/sched/task.h> |
---|
| 28 | +#else |
---|
| 29 | +#include <linux/sched.h> |
---|
| 30 | +#endif |
---|
25 | 31 | |
---|
26 | 32 | #include <mali_kbase.h> |
---|
27 | 33 | #include <gpu/mali_kbase_gpu_regmap.h> |
---|
.. | .. |
---|
129 | 135 | /* creating a context is considered a disjoint event */ |
---|
130 | 136 | kbase_disjoint_event(kctx->kbdev); |
---|
131 | 137 | |
---|
132 | | - kctx->as_nr = KBASEP_AS_NR_INVALID; |
---|
133 | | - |
---|
134 | | - atomic_set(&kctx->refcount, 0); |
---|
135 | | - |
---|
136 | | - spin_lock_init(&kctx->mm_update_lock); |
---|
137 | 138 | kctx->process_mm = NULL; |
---|
| 139 | + kctx->task = NULL; |
---|
138 | 140 | atomic_set(&kctx->nonmapped_pages, 0); |
---|
139 | 141 | atomic_set(&kctx->permanent_mapped_pages, 0); |
---|
140 | | - kctx->tgid = current->tgid; |
---|
141 | | - kctx->pid = current->pid; |
---|
| 142 | + kctx->tgid = task_tgid_vnr(current); |
---|
| 143 | + kctx->pid = task_pid_vnr(current); |
---|
| 144 | + |
---|
| 145 | + /* Check if this is a Userspace created context */ |
---|
| 146 | + if (likely(kctx->filp)) { |
---|
| 147 | + struct pid *pid_struct; |
---|
| 148 | + |
---|
| 149 | + rcu_read_lock(); |
---|
| 150 | + pid_struct = find_get_pid(kctx->tgid); |
---|
| 151 | + if (likely(pid_struct)) { |
---|
| 152 | + struct task_struct *task = pid_task(pid_struct, PIDTYPE_PID); |
---|
| 153 | + |
---|
| 154 | + if (likely(task)) { |
---|
| 155 | + /* Take a reference on the task to avoid slow lookup |
---|
| 156 | + * later on from the page allocation loop. |
---|
| 157 | + */ |
---|
| 158 | + get_task_struct(task); |
---|
| 159 | + kctx->task = task; |
---|
| 160 | + } else { |
---|
| 161 | + dev_err(kctx->kbdev->dev, |
---|
| 162 | + "Failed to get task pointer for %s/%d", |
---|
| 163 | + current->comm, kctx->pid); |
---|
| 164 | + err = -ESRCH; |
---|
| 165 | + } |
---|
| 166 | + |
---|
| 167 | + put_pid(pid_struct); |
---|
| 168 | + } else { |
---|
| 169 | + dev_err(kctx->kbdev->dev, |
---|
| 170 | + "Failed to get pid pointer for %s/%d", |
---|
| 171 | + current->comm, kctx->pid); |
---|
| 172 | + err = -ESRCH; |
---|
| 173 | + } |
---|
| 174 | + rcu_read_unlock(); |
---|
| 175 | + |
---|
| 176 | + if (unlikely(err)) |
---|
| 177 | + return err; |
---|
| 178 | + |
---|
| 179 | + kbase_mem_mmgrab(); |
---|
| 180 | + kctx->process_mm = current->mm; |
---|
| 181 | + } |
---|
142 | 182 | |
---|
143 | 183 | atomic_set(&kctx->used_pages, 0); |
---|
144 | 184 | |
---|
.. | .. |
---|
152 | 192 | |
---|
153 | 193 | init_waitqueue_head(&kctx->event_queue); |
---|
154 | 194 | atomic_set(&kctx->event_count, 0); |
---|
| 195 | + |
---|
155 | 196 | #if !MALI_USE_CSF |
---|
156 | 197 | atomic_set(&kctx->event_closed, false); |
---|
157 | 198 | #if IS_ENABLED(CONFIG_GPU_TRACEPOINTS) |
---|
.. | .. |
---|
159 | 200 | #endif |
---|
160 | 201 | #endif |
---|
161 | 202 | |
---|
| 203 | +#if MALI_USE_CSF |
---|
| 204 | + atomic64_set(&kctx->num_fixable_allocs, 0); |
---|
| 205 | + atomic64_set(&kctx->num_fixed_allocs, 0); |
---|
| 206 | +#endif |
---|
| 207 | + |
---|
| 208 | + kbase_gpu_vm_lock(kctx); |
---|
162 | 209 | bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG); |
---|
| 210 | + kbase_gpu_vm_unlock(kctx); |
---|
163 | 211 | |
---|
164 | 212 | kctx->id = atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1; |
---|
165 | 213 | |
---|
166 | | - mutex_init(&kctx->legacy_hwcnt_lock); |
---|
167 | | - |
---|
168 | 214 | mutex_lock(&kctx->kbdev->kctx_list_lock); |
---|
169 | | - |
---|
170 | 215 | err = kbase_insert_kctx_to_process(kctx); |
---|
171 | | - if (err) |
---|
172 | | - dev_err(kctx->kbdev->dev, |
---|
173 | | - "(err:%d) failed to insert kctx to kbase_process\n", err); |
---|
174 | | - |
---|
175 | 216 | mutex_unlock(&kctx->kbdev->kctx_list_lock); |
---|
| 217 | + if (err) { |
---|
| 218 | + dev_err(kctx->kbdev->dev, |
---|
| 219 | + "(err:%d) failed to insert kctx to kbase_process", err); |
---|
| 220 | + if (likely(kctx->filp)) { |
---|
| 221 | + mmdrop(kctx->process_mm); |
---|
| 222 | + put_task_struct(kctx->task); |
---|
| 223 | + } |
---|
| 224 | + } |
---|
176 | 225 | |
---|
177 | 226 | return err; |
---|
178 | 227 | } |
---|
.. | .. |
---|
235 | 284 | /* Add checks, so that the terminating process Should not |
---|
236 | 285 | * hold any gpu_memory. |
---|
237 | 286 | */ |
---|
| 287 | + spin_lock(&kctx->kbdev->gpu_mem_usage_lock); |
---|
238 | 288 | WARN_ON(kprcs->total_gpu_pages); |
---|
| 289 | + spin_unlock(&kctx->kbdev->gpu_mem_usage_lock); |
---|
239 | 290 | WARN_ON(!RB_EMPTY_ROOT(&kprcs->dma_buf_root)); |
---|
240 | 291 | kfree(kprcs); |
---|
241 | 292 | } |
---|
.. | .. |
---|
243 | 294 | |
---|
244 | 295 | void kbase_context_common_term(struct kbase_context *kctx) |
---|
245 | 296 | { |
---|
246 | | - unsigned long flags; |
---|
247 | 297 | int pages; |
---|
248 | | - |
---|
249 | | - mutex_lock(&kctx->kbdev->mmu_hw_mutex); |
---|
250 | | - spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags); |
---|
251 | | - kbase_ctx_sched_remove_ctx(kctx); |
---|
252 | | - spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags); |
---|
253 | | - mutex_unlock(&kctx->kbdev->mmu_hw_mutex); |
---|
254 | 298 | |
---|
255 | 299 | pages = atomic_read(&kctx->used_pages); |
---|
256 | 300 | if (pages != 0) |
---|
.. | .. |
---|
263 | 307 | kbase_remove_kctx_from_process(kctx); |
---|
264 | 308 | mutex_unlock(&kctx->kbdev->kctx_list_lock); |
---|
265 | 309 | |
---|
| 310 | + if (likely(kctx->filp)) { |
---|
| 311 | + mmdrop(kctx->process_mm); |
---|
| 312 | + put_task_struct(kctx->task); |
---|
| 313 | + } |
---|
| 314 | + |
---|
266 | 315 | KBASE_KTRACE_ADD(kctx->kbdev, CORE_CTX_DESTROY, kctx, 0u); |
---|
267 | 316 | } |
---|
268 | 317 | |
---|
269 | 318 | int kbase_context_mem_pool_group_init(struct kbase_context *kctx) |
---|
270 | 319 | { |
---|
271 | | - return kbase_mem_pool_group_init(&kctx->mem_pools, |
---|
272 | | - kctx->kbdev, |
---|
273 | | - &kctx->kbdev->mem_pool_defaults, |
---|
274 | | - &kctx->kbdev->mem_pools); |
---|
| 320 | + return kbase_mem_pool_group_init(&kctx->mem_pools, kctx->kbdev, |
---|
| 321 | + &kctx->kbdev->mem_pool_defaults, &kctx->kbdev->mem_pools); |
---|
275 | 322 | } |
---|
276 | 323 | |
---|
277 | 324 | void kbase_context_mem_pool_group_term(struct kbase_context *kctx) |
---|
.. | .. |
---|
283 | 330 | { |
---|
284 | 331 | return kbase_mmu_init( |
---|
285 | 332 | kctx->kbdev, &kctx->mmu, kctx, |
---|
286 | | - base_context_mmu_group_id_get(kctx->create_flags)); |
---|
| 333 | + kbase_context_mmu_group_id_get(kctx->create_flags)); |
---|
287 | 334 | } |
---|
288 | 335 | |
---|
289 | 336 | void kbase_context_mmu_term(struct kbase_context *kctx) |
---|