| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Debug and Guest Debug support |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2015 - Linaro Ltd |
|---|
| 5 | 6 | * Author: Alex Bennée <alex.bennee@linaro.org> |
|---|
| 6 | | - * |
|---|
| 7 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 8 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 9 | | - * published by the Free Software Foundation. |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 14 | | - * GNU General Public License for more details. |
|---|
| 15 | | - * |
|---|
| 16 | | - * You should have received a copy of the GNU General Public License |
|---|
| 17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 18 | 7 | */ |
|---|
| 19 | 8 | |
|---|
| 20 | 9 | #include <linux/kvm_host.h> |
|---|
| .. | .. |
|---|
| 76 | 65 | |
|---|
| 77 | 66 | void kvm_arm_init_debug(void) |
|---|
| 78 | 67 | { |
|---|
| 79 | | - __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2)); |
|---|
| 68 | + __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2)); |
|---|
| 80 | 69 | } |
|---|
| 81 | 70 | |
|---|
| 82 | 71 | /** |
|---|
| .. | .. |
|---|
| 152 | 141 | * @vcpu: the vcpu pointer |
|---|
| 153 | 142 | * |
|---|
| 154 | 143 | * This is called before each entry into the hypervisor to setup any |
|---|
| 155 | | - * debug related registers. |
|---|
| 144 | + * debug related registers. Currently this just ensures we will trap |
|---|
| 145 | + * access to: |
|---|
| 146 | + * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR) |
|---|
| 147 | + * - Debug ROM Address (MDCR_EL2_TDRA) |
|---|
| 148 | + * - OS related registers (MDCR_EL2_TDOSA) |
|---|
| 149 | + * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB) |
|---|
| 150 | + * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB) |
|---|
| 156 | 151 | * |
|---|
| 157 | 152 | * Additionally, KVM only traps guest accesses to the debug registers if |
|---|
| 158 | 153 | * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY |
|---|
| .. | .. |
|---|
| 275 | 270 | } |
|---|
| 276 | 271 | } |
|---|
| 277 | 272 | |
|---|
| 278 | | - |
|---|
| 279 | | -/* |
|---|
| 280 | | - * After successfully emulating an instruction, we might want to |
|---|
| 281 | | - * return to user space with a KVM_EXIT_DEBUG. We can only do this |
|---|
| 282 | | - * once the emulation is complete, though, so for userspace emulations |
|---|
| 283 | | - * we have to wait until we have re-entered KVM before calling this |
|---|
| 284 | | - * helper. |
|---|
| 285 | | - * |
|---|
| 286 | | - * Return true (and set exit_reason) to return to userspace or false |
|---|
| 287 | | - * if no further action is required. |
|---|
| 288 | | - */ |
|---|
| 289 | | -bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) |
|---|
| 273 | +void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu) |
|---|
| 290 | 274 | { |
|---|
| 291 | | - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
|---|
| 292 | | - run->exit_reason = KVM_EXIT_DEBUG; |
|---|
| 293 | | - run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT; |
|---|
| 294 | | - return true; |
|---|
| 295 | | - } |
|---|
| 296 | | - return false; |
|---|
| 275 | + u64 dfr0; |
|---|
| 276 | + |
|---|
| 277 | + /* For VHE, there is nothing to do */ |
|---|
| 278 | + if (has_vhe()) |
|---|
| 279 | + return; |
|---|
| 280 | + |
|---|
| 281 | + dfr0 = read_sysreg(id_aa64dfr0_el1); |
|---|
| 282 | + /* |
|---|
| 283 | + * If SPE is present on this CPU and is available at current EL, |
|---|
| 284 | + * we may need to check if the host state needs to be saved. |
|---|
| 285 | + */ |
|---|
| 286 | + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) && |
|---|
| 287 | + !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT))) |
|---|
| 288 | + vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE; |
|---|
| 289 | + |
|---|
| 290 | + /* Check if we have TRBE implemented and available at the host */ |
|---|
| 291 | + if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) && |
|---|
| 292 | + !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG)) |
|---|
| 293 | + vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE; |
|---|
| 294 | +} |
|---|
| 295 | + |
|---|
| 296 | +void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu) |
|---|
| 297 | +{ |
|---|
| 298 | + vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE | |
|---|
| 299 | + KVM_ARM64_DEBUG_STATE_SAVE_TRBE); |
|---|
| 297 | 300 | } |
|---|