.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2017-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2017-2023 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
23 | 23 | #include <mali_kbase_defs.h> |
---|
24 | 24 | #include "mali_kbase_ctx_sched.h" |
---|
25 | 25 | #include "tl/mali_kbase_tracepoints.h" |
---|
| 26 | +#if MALI_USE_CSF |
---|
| 27 | +#include "mali_kbase_reset_gpu.h" |
---|
| 28 | +#else |
---|
| 29 | +#include <mali_kbase_hwaccess_jm.h> |
---|
| 30 | +#endif |
---|
26 | 31 | |
---|
27 | 32 | /* Helper for ktrace */ |
---|
28 | 33 | #if KBASE_KTRACE_ENABLE |
---|
.. | .. |
---|
62 | 67 | WARN_ON(kbdev->as_to_kctx[i] != NULL); |
---|
63 | 68 | WARN_ON(!(kbdev->as_free & (1u << i))); |
---|
64 | 69 | } |
---|
| 70 | +} |
---|
| 71 | + |
---|
| 72 | +void kbase_ctx_sched_init_ctx(struct kbase_context *kctx) |
---|
| 73 | +{ |
---|
| 74 | + kctx->as_nr = KBASEP_AS_NR_INVALID; |
---|
| 75 | + atomic_set(&kctx->refcount, 0); |
---|
65 | 76 | } |
---|
66 | 77 | |
---|
67 | 78 | /* kbasep_ctx_sched_find_as_for_ctx - Find a free address space |
---|
.. | .. |
---|
108 | 119 | if (atomic_inc_return(&kctx->refcount) == 1) { |
---|
109 | 120 | int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx); |
---|
110 | 121 | |
---|
111 | | - if (free_as != KBASEP_AS_NR_INVALID) { |
---|
| 122 | + if (free_as >= 0) { |
---|
112 | 123 | kbdev->as_free &= ~(1u << free_as); |
---|
113 | 124 | /* Only program the MMU if the context has not been |
---|
114 | 125 | * assigned the same address space before. |
---|
.. | .. |
---|
124 | 135 | kbdev, prev_kctx->id); |
---|
125 | 136 | prev_kctx->as_nr = KBASEP_AS_NR_INVALID; |
---|
126 | 137 | } |
---|
127 | | - |
---|
128 | 138 | kctx->as_nr = free_as; |
---|
129 | 139 | kbdev->as_to_kctx[free_as] = kctx; |
---|
130 | 140 | KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS( |
---|
.. | .. |
---|
150 | 160 | struct kbase_device *const kbdev = kctx->kbdev; |
---|
151 | 161 | |
---|
152 | 162 | lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
153 | | - WARN_ON(atomic_read(&kctx->refcount) == 0); |
---|
154 | | - WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID); |
---|
155 | | - WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx); |
---|
| 163 | +#if MALI_USE_CSF |
---|
| 164 | + /* We expect the context to be active when this function is called, |
---|
| 165 | + * except for the case where a page fault is reported for it during |
---|
| 166 | + * the GPU reset sequence, in which case we can expect the refcount |
---|
| 167 | + * to be 0. |
---|
| 168 | + */ |
---|
| 169 | + WARN_ON(!atomic_read(&kctx->refcount) && !kbase_reset_gpu_is_active(kbdev)); |
---|
| 170 | +#else |
---|
| 171 | + /* We expect the context to be active (and thus refcount should be non-zero) |
---|
| 172 | + * when this function is called |
---|
| 173 | + */ |
---|
| 174 | + WARN_ON(!atomic_read(&kctx->refcount)); |
---|
| 175 | +#endif |
---|
| 176 | + if (likely((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS))) |
---|
| 177 | + WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx); |
---|
| 178 | + else |
---|
| 179 | + WARN(true, "Invalid as_nr(%d)", kctx->as_nr); |
---|
156 | 180 | |
---|
157 | 181 | atomic_inc(&kctx->refcount); |
---|
158 | 182 | } |
---|
.. | .. |
---|
166 | 190 | |
---|
167 | 191 | new_ref_count = atomic_dec_return(&kctx->refcount); |
---|
168 | 192 | if (new_ref_count == 0) { |
---|
169 | | - kbdev->as_free |= (1u << kctx->as_nr); |
---|
170 | | - if (kbase_ctx_flag(kctx, KCTX_AS_DISABLED_ON_FAULT)) { |
---|
171 | | - KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS( |
---|
172 | | - kbdev, kctx->id); |
---|
173 | | - kbdev->as_to_kctx[kctx->as_nr] = NULL; |
---|
174 | | - kctx->as_nr = KBASEP_AS_NR_INVALID; |
---|
175 | | - kbase_ctx_flag_clear(kctx, KCTX_AS_DISABLED_ON_FAULT); |
---|
| 193 | + if (likely((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS))) { |
---|
| 194 | + kbdev->as_free |= (1u << kctx->as_nr); |
---|
| 195 | + if (kbase_ctx_flag(kctx, KCTX_AS_DISABLED_ON_FAULT)) { |
---|
| 196 | + KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(kbdev, kctx->id); |
---|
| 197 | + kbdev->as_to_kctx[kctx->as_nr] = NULL; |
---|
| 198 | + kctx->as_nr = KBASEP_AS_NR_INVALID; |
---|
| 199 | + kbase_ctx_flag_clear(kctx, KCTX_AS_DISABLED_ON_FAULT); |
---|
| 200 | +#if !MALI_USE_CSF |
---|
| 201 | + kbase_backend_slot_kctx_purge_locked(kbdev, kctx); |
---|
| 202 | +#endif |
---|
| 203 | + } |
---|
176 | 204 | } |
---|
177 | 205 | } |
---|
178 | 206 | |
---|
.. | .. |
---|
182 | 210 | void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx) |
---|
183 | 211 | { |
---|
184 | 212 | struct kbase_device *const kbdev = kctx->kbdev; |
---|
| 213 | + unsigned long flags; |
---|
185 | 214 | |
---|
186 | | - lockdep_assert_held(&kbdev->mmu_hw_mutex); |
---|
187 | | - lockdep_assert_held(&kbdev->hwaccess_lock); |
---|
| 215 | + mutex_lock(&kbdev->mmu_hw_mutex); |
---|
| 216 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
188 | 217 | |
---|
189 | 218 | WARN_ON(atomic_read(&kctx->refcount) != 0); |
---|
190 | 219 | |
---|
191 | | - if (kctx->as_nr != KBASEP_AS_NR_INVALID) { |
---|
| 220 | + if ((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS)) { |
---|
192 | 221 | if (kbdev->pm.backend.gpu_powered) |
---|
193 | 222 | kbase_mmu_disable(kctx); |
---|
194 | 223 | |
---|
.. | .. |
---|
196 | 225 | kbdev->as_to_kctx[kctx->as_nr] = NULL; |
---|
197 | 226 | kctx->as_nr = KBASEP_AS_NR_INVALID; |
---|
198 | 227 | } |
---|
| 228 | + |
---|
| 229 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 230 | + mutex_unlock(&kbdev->mmu_hw_mutex); |
---|
199 | 231 | } |
---|
200 | 232 | |
---|
201 | 233 | void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev) |
---|
.. | .. |
---|
210 | 242 | for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) { |
---|
211 | 243 | struct kbase_context *kctx; |
---|
212 | 244 | |
---|
| 245 | + kbdev->as[i].is_unresponsive = false; |
---|
213 | 246 | #if MALI_USE_CSF |
---|
214 | 247 | if ((i == MCU_AS_NR) && kbdev->csf.firmware_inited) { |
---|
215 | 248 | kbase_mmu_update(kbdev, &kbdev->csf.mcu_mmu, |
---|
.. | .. |
---|
308 | 341 | bool kbase_ctx_sched_inc_refcount_nolock(struct kbase_context *kctx) |
---|
309 | 342 | { |
---|
310 | 343 | bool result = false; |
---|
311 | | - int as_nr; |
---|
312 | 344 | |
---|
313 | 345 | if (WARN_ON(kctx == NULL)) |
---|
314 | 346 | return result; |
---|
315 | 347 | |
---|
316 | 348 | lockdep_assert_held(&kctx->kbdev->hwaccess_lock); |
---|
317 | 349 | |
---|
318 | | - as_nr = kctx->as_nr; |
---|
319 | 350 | if (atomic_read(&kctx->refcount) > 0) { |
---|
320 | | - KBASE_DEBUG_ASSERT(as_nr >= 0); |
---|
| 351 | + KBASE_DEBUG_ASSERT(kctx->as_nr >= 0); |
---|
321 | 352 | |
---|
322 | 353 | kbase_ctx_sched_retain_ctx_refcount(kctx); |
---|
323 | 354 | KBASE_KTRACE_ADD(kctx->kbdev, SCHED_RETAIN_CTX_NOLOCK, kctx, |
---|