.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note |
---|
2 | 2 | /* |
---|
3 | 3 | * |
---|
4 | | - * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved. |
---|
| 4 | + * (C) COPYRIGHT 2020-2022 ARM Limited. All rights reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software and is provided to you under the terms of the |
---|
7 | 7 | * GNU General Public License version 2 as published by the Free Software |
---|
.. | .. |
---|
24 | 24 | #include <backend/gpu/mali_kbase_instr_internal.h> |
---|
25 | 25 | #include <backend/gpu/mali_kbase_pm_internal.h> |
---|
26 | 26 | #include <device/mali_kbase_device.h> |
---|
| 27 | +#include <device/mali_kbase_device_internal.h> |
---|
27 | 28 | #include <mali_kbase_reset_gpu.h> |
---|
28 | 29 | #include <mmu/mali_kbase_mmu.h> |
---|
29 | 30 | #include <mali_kbase_ctx_sched.h> |
---|
.. | .. |
---|
80 | 81 | } |
---|
81 | 82 | } else |
---|
82 | 83 | kbase_report_gpu_fault(kbdev, status, as_nr, as_valid); |
---|
| 84 | + |
---|
83 | 85 | } |
---|
84 | 86 | |
---|
85 | 87 | void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val) |
---|
.. | .. |
---|
114 | 116 | GPU_EXCEPTION_TYPE_SW_FAULT_0, |
---|
115 | 117 | } } }; |
---|
116 | 118 | |
---|
| 119 | + kbase_debug_csf_fault_notify(kbdev, scheduler->active_protm_grp->kctx, |
---|
| 120 | + DF_GPU_PROTECTED_FAULT); |
---|
| 121 | + |
---|
117 | 122 | scheduler->active_protm_grp->faulted = true; |
---|
118 | 123 | kbase_csf_add_group_fatal_error( |
---|
119 | 124 | scheduler->active_protm_grp, &err_payload); |
---|
.. | .. |
---|
124 | 129 | if (kbase_prepare_to_reset_gpu( |
---|
125 | 130 | kbdev, RESET_FLAGS_HWC_UNRECOVERABLE_ERROR)) |
---|
126 | 131 | kbase_reset_gpu(kbdev); |
---|
| 132 | + |
---|
| 133 | + /* Defer the clearing to the GPU reset sequence */ |
---|
| 134 | + val &= ~GPU_PROTECTED_FAULT; |
---|
127 | 135 | } |
---|
128 | 136 | |
---|
129 | 137 | if (val & RESET_COMPLETED) |
---|
130 | 138 | kbase_pm_reset_done(kbdev); |
---|
131 | 139 | |
---|
132 | | - KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, val); |
---|
133 | | - kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val); |
---|
| 140 | + /* Defer clearing CLEAN_CACHES_COMPLETED to kbase_clean_caches_done. |
---|
| 141 | + * We need to acquire hwaccess_lock to avoid a race condition with |
---|
| 142 | + * kbase_gpu_cache_flush_and_busy_wait |
---|
| 143 | + */ |
---|
| 144 | + KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, val & ~CLEAN_CACHES_COMPLETED); |
---|
| 145 | + kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val & ~CLEAN_CACHES_COMPLETED); |
---|
| 146 | + |
---|
| 147 | +#ifdef KBASE_PM_RUNTIME |
---|
| 148 | + if (val & DOORBELL_MIRROR) { |
---|
| 149 | + unsigned long flags; |
---|
| 150 | + |
---|
| 151 | + dev_dbg(kbdev->dev, "Doorbell mirror interrupt received"); |
---|
| 152 | + spin_lock_irqsave(&kbdev->hwaccess_lock, flags); |
---|
| 153 | + kbase_pm_disable_db_mirror_interrupt(kbdev); |
---|
| 154 | + kbdev->pm.backend.exit_gpu_sleep_mode = true; |
---|
| 155 | + kbase_csf_scheduler_invoke_tick(kbdev); |
---|
| 156 | + spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); |
---|
| 157 | + } |
---|
| 158 | +#endif |
---|
134 | 159 | |
---|
135 | 160 | /* kbase_pm_check_transitions (called by kbase_pm_power_changed) must |
---|
136 | 161 | * be called after the IRQ has been cleared. This is because it might |
---|
.. | .. |
---|
160 | 185 | |
---|
161 | 186 | KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ_DONE, NULL, val); |
---|
162 | 187 | } |
---|
| 188 | + |
---|
| 189 | +#if !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) |
---|
| 190 | +bool kbase_is_register_accessible(u32 offset) |
---|
| 191 | +{ |
---|
| 192 | +#ifdef CONFIG_MALI_BIFROST_DEBUG |
---|
| 193 | + if (((offset >= MCU_SUBSYSTEM_BASE) && (offset < IPA_CONTROL_BASE)) || |
---|
| 194 | + ((offset >= GPU_CONTROL_MCU_BASE) && (offset < USER_BASE))) { |
---|
| 195 | + WARN(1, "Invalid register offset 0x%x", offset); |
---|
| 196 | + return false; |
---|
| 197 | + } |
---|
| 198 | +#endif |
---|
| 199 | + |
---|
| 200 | + return true; |
---|
| 201 | +} |
---|
| 202 | +#endif /* !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */ |
---|
| 203 | + |
---|
| 204 | +#if IS_ENABLED(CONFIG_MALI_REAL_HW) |
---|
| 205 | +void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value) |
---|
| 206 | +{ |
---|
| 207 | + if (WARN_ON(!kbdev->pm.backend.gpu_powered)) |
---|
| 208 | + return; |
---|
| 209 | + |
---|
| 210 | + if (WARN_ON(kbdev->dev == NULL)) |
---|
| 211 | + return; |
---|
| 212 | + |
---|
| 213 | + if (!kbase_is_register_accessible(offset)) |
---|
| 214 | + return; |
---|
| 215 | + |
---|
| 216 | + writel(value, kbdev->reg + offset); |
---|
| 217 | + |
---|
| 218 | +#if IS_ENABLED(CONFIG_DEBUG_FS) |
---|
| 219 | + if (unlikely(kbdev->io_history.enabled)) |
---|
| 220 | + kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset, |
---|
| 221 | + value, 1); |
---|
| 222 | +#endif /* CONFIG_DEBUG_FS */ |
---|
| 223 | + dev_dbg(kbdev->dev, "w: reg %08x val %08x", offset, value); |
---|
| 224 | +} |
---|
| 225 | +KBASE_EXPORT_TEST_API(kbase_reg_write); |
---|
| 226 | + |
---|
| 227 | +u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset) |
---|
| 228 | +{ |
---|
| 229 | + u32 val; |
---|
| 230 | + |
---|
| 231 | + if (WARN_ON(!kbdev->pm.backend.gpu_powered)) |
---|
| 232 | + return 0; |
---|
| 233 | + |
---|
| 234 | + if (WARN_ON(kbdev->dev == NULL)) |
---|
| 235 | + return 0; |
---|
| 236 | + |
---|
| 237 | + if (!kbase_is_register_accessible(offset)) |
---|
| 238 | + return 0; |
---|
| 239 | + |
---|
| 240 | + val = readl(kbdev->reg + offset); |
---|
| 241 | + |
---|
| 242 | +#if IS_ENABLED(CONFIG_DEBUG_FS) |
---|
| 243 | + if (unlikely(kbdev->io_history.enabled)) |
---|
| 244 | + kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset, |
---|
| 245 | + val, 0); |
---|
| 246 | +#endif /* CONFIG_DEBUG_FS */ |
---|
| 247 | + dev_dbg(kbdev->dev, "r: reg %08x val %08x", offset, val); |
---|
| 248 | + |
---|
| 249 | + return val; |
---|
| 250 | +} |
---|
| 251 | +KBASE_EXPORT_TEST_API(kbase_reg_read); |
---|
| 252 | +#endif /* !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) */ |
---|