| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2012,2013 - ARM Ltd |
|---|
| 3 | 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
|---|
| .. | .. |
|---|
| 5 | 6 | * Derived from arch/arm/include/kvm_emulate.h |
|---|
| 6 | 7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
|---|
| 7 | 8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 10 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 11 | | - * published by the Free Software Foundation. |
|---|
| 12 | | - * |
|---|
| 13 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 14 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 15 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 16 | | - * GNU General Public License for more details. |
|---|
| 17 | | - * |
|---|
| 18 | | - * You should have received a copy of the GNU General Public License |
|---|
| 19 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 20 | 9 | */ |
|---|
| 21 | 10 | |
|---|
| 22 | 11 | #ifndef __ARM64_KVM_EMULATE_H__ |
|---|
| .. | .. |
|---|
| 24 | 13 | |
|---|
| 25 | 14 | #include <linux/kvm_host.h> |
|---|
| 26 | 15 | |
|---|
| 16 | +#include <asm/debug-monitors.h> |
|---|
| 27 | 17 | #include <asm/esr.h> |
|---|
| 28 | 18 | #include <asm/kvm_arm.h> |
|---|
| 29 | 19 | #include <asm/kvm_hyp.h> |
|---|
| 30 | | -#include <asm/kvm_mmio.h> |
|---|
| 31 | 20 | #include <asm/ptrace.h> |
|---|
| 32 | 21 | #include <asm/cputype.h> |
|---|
| 33 | 22 | #include <asm/virt.h> |
|---|
| 34 | 23 | |
|---|
| 35 | | -unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); |
|---|
| 36 | | -unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); |
|---|
| 37 | | -void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); |
|---|
| 24 | +#define CURRENT_EL_SP_EL0_VECTOR 0x0 |
|---|
| 25 | +#define CURRENT_EL_SP_ELx_VECTOR 0x200 |
|---|
| 26 | +#define LOWER_EL_AArch64_VECTOR 0x400 |
|---|
| 27 | +#define LOWER_EL_AArch32_VECTOR 0x600 |
|---|
| 28 | + |
|---|
| 29 | +enum exception_type { |
|---|
| 30 | + except_type_sync = 0, |
|---|
| 31 | + except_type_irq = 0x80, |
|---|
| 32 | + except_type_fiq = 0x100, |
|---|
| 33 | + except_type_serror = 0x180, |
|---|
| 34 | +}; |
|---|
| 38 | 35 | |
|---|
| 39 | 36 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
|---|
| 40 | | -void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); |
|---|
| 37 | +void kvm_skip_instr32(struct kvm_vcpu *vcpu); |
|---|
| 41 | 38 | |
|---|
| 42 | 39 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
|---|
| 43 | 40 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
|---|
| 44 | 41 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
|---|
| 45 | 42 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
|---|
| 46 | | -void kvm_inject_undef32(struct kvm_vcpu *vcpu); |
|---|
| 47 | | -void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); |
|---|
| 48 | | -void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); |
|---|
| 49 | 43 | |
|---|
| 50 | | -static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) |
|---|
| 44 | +static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) |
|---|
| 51 | 45 | { |
|---|
| 52 | 46 | return !(vcpu->arch.hcr_el2 & HCR_RW); |
|---|
| 53 | 47 | } |
|---|
| .. | .. |
|---|
| 63 | 57 | /* trap error record accesses */ |
|---|
| 64 | 58 | vcpu->arch.hcr_el2 |= HCR_TERR; |
|---|
| 65 | 59 | } |
|---|
| 66 | | - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) |
|---|
| 60 | + |
|---|
| 61 | + if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) { |
|---|
| 67 | 62 | vcpu->arch.hcr_el2 |= HCR_FWB; |
|---|
| 63 | + } else { |
|---|
| 64 | + /* |
|---|
| 65 | + * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C |
|---|
| 66 | + * get set in SCTLR_EL1 such that we can detect when the guest |
|---|
| 67 | + * MMU gets turned on and do the necessary cache maintenance |
|---|
| 68 | + * then. |
|---|
| 69 | + */ |
|---|
| 70 | + vcpu->arch.hcr_el2 |= HCR_TVM; |
|---|
| 71 | + } |
|---|
| 68 | 72 | |
|---|
| 69 | 73 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) |
|---|
| 70 | 74 | vcpu->arch.hcr_el2 &= ~HCR_RW; |
|---|
| .. | .. |
|---|
| 76 | 80 | */ |
|---|
| 77 | 81 | if (!vcpu_el1_is_32bit(vcpu)) |
|---|
| 78 | 82 | vcpu->arch.hcr_el2 |= HCR_TID3; |
|---|
| 83 | + |
|---|
| 84 | + if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || |
|---|
| 85 | + vcpu_el1_is_32bit(vcpu)) |
|---|
| 86 | + vcpu->arch.hcr_el2 |= HCR_TID2; |
|---|
| 79 | 87 | } |
|---|
| 80 | 88 | |
|---|
| 81 | 89 | static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) |
|---|
| .. | .. |
|---|
| 83 | 91 | return (unsigned long *)&vcpu->arch.hcr_el2; |
|---|
| 84 | 92 | } |
|---|
| 85 | 93 | |
|---|
| 86 | | -static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) |
|---|
| 94 | +static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) |
|---|
| 87 | 95 | { |
|---|
| 88 | 96 | vcpu->arch.hcr_el2 &= ~HCR_TWE; |
|---|
| 97 | + if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || |
|---|
| 98 | + vcpu->kvm->arch.vgic.nassgireq) |
|---|
| 99 | + vcpu->arch.hcr_el2 &= ~HCR_TWI; |
|---|
| 100 | + else |
|---|
| 101 | + vcpu->arch.hcr_el2 |= HCR_TWI; |
|---|
| 89 | 102 | } |
|---|
| 90 | 103 | |
|---|
| 91 | | -static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) |
|---|
| 104 | +static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) |
|---|
| 92 | 105 | { |
|---|
| 93 | 106 | vcpu->arch.hcr_el2 |= HCR_TWE; |
|---|
| 107 | + vcpu->arch.hcr_el2 |= HCR_TWI; |
|---|
| 108 | +} |
|---|
| 109 | + |
|---|
| 110 | +static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) |
|---|
| 111 | +{ |
|---|
| 112 | + vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); |
|---|
| 113 | +} |
|---|
| 114 | + |
|---|
| 115 | +static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) |
|---|
| 116 | +{ |
|---|
| 117 | + vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); |
|---|
| 94 | 118 | } |
|---|
| 95 | 119 | |
|---|
| 96 | 120 | static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) |
|---|
| .. | .. |
|---|
| 103 | 127 | vcpu->arch.vsesr_el2 = vsesr; |
|---|
| 104 | 128 | } |
|---|
| 105 | 129 | |
|---|
| 106 | | -static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
|---|
| 130 | +static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
|---|
| 107 | 131 | { |
|---|
| 108 | | - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; |
|---|
| 132 | + return (unsigned long *)&vcpu_gp_regs(vcpu)->pc; |
|---|
| 109 | 133 | } |
|---|
| 110 | 134 | |
|---|
| 111 | | -static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) |
|---|
| 135 | +static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
|---|
| 112 | 136 | { |
|---|
| 113 | | - return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; |
|---|
| 137 | + return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate; |
|---|
| 114 | 138 | } |
|---|
| 115 | 139 | |
|---|
| 116 | | -static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) |
|---|
| 117 | | -{ |
|---|
| 118 | | - if (vcpu->arch.sysregs_loaded_on_cpu) |
|---|
| 119 | | - return read_sysreg_el1(elr); |
|---|
| 120 | | - else |
|---|
| 121 | | - return *__vcpu_elr_el1(vcpu); |
|---|
| 122 | | -} |
|---|
| 123 | | - |
|---|
| 124 | | -static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) |
|---|
| 125 | | -{ |
|---|
| 126 | | - if (vcpu->arch.sysregs_loaded_on_cpu) |
|---|
| 127 | | - write_sysreg_el1(v, elr); |
|---|
| 128 | | - else |
|---|
| 129 | | - *__vcpu_elr_el1(vcpu) = v; |
|---|
| 130 | | -} |
|---|
| 131 | | - |
|---|
| 132 | | -static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
|---|
| 133 | | -{ |
|---|
| 134 | | - return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; |
|---|
| 135 | | -} |
|---|
| 136 | | - |
|---|
| 137 | | -static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) |
|---|
| 140 | +static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) |
|---|
| 138 | 141 | { |
|---|
| 139 | 142 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); |
|---|
| 140 | 143 | } |
|---|
| 141 | 144 | |
|---|
| 142 | | -static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) |
|---|
| 145 | +static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) |
|---|
| 143 | 146 | { |
|---|
| 144 | 147 | if (vcpu_mode_is_32bit(vcpu)) |
|---|
| 145 | 148 | return kvm_condition_valid32(vcpu); |
|---|
| 146 | 149 | |
|---|
| 147 | 150 | return true; |
|---|
| 148 | | -} |
|---|
| 149 | | - |
|---|
| 150 | | -static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) |
|---|
| 151 | | -{ |
|---|
| 152 | | - if (vcpu_mode_is_32bit(vcpu)) |
|---|
| 153 | | - kvm_skip_instr32(vcpu, is_wide_instr); |
|---|
| 154 | | - else |
|---|
| 155 | | - *vcpu_pc(vcpu) += 4; |
|---|
| 156 | 151 | } |
|---|
| 157 | 152 | |
|---|
| 158 | 153 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
|---|
| .. | .. |
|---|
| 165 | 160 | * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on |
|---|
| 166 | 161 | * AArch32 with banked registers. |
|---|
| 167 | 162 | */ |
|---|
| 168 | | -static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, |
|---|
| 163 | +static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, |
|---|
| 169 | 164 | u8 reg_num) |
|---|
| 170 | 165 | { |
|---|
| 171 | | - return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; |
|---|
| 166 | + return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num]; |
|---|
| 172 | 167 | } |
|---|
| 173 | 168 | |
|---|
| 174 | | -static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, |
|---|
| 169 | +static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, |
|---|
| 175 | 170 | unsigned long val) |
|---|
| 176 | 171 | { |
|---|
| 177 | 172 | if (reg_num != 31) |
|---|
| 178 | | - vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; |
|---|
| 179 | | -} |
|---|
| 180 | | - |
|---|
| 181 | | -static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) |
|---|
| 182 | | -{ |
|---|
| 183 | | - if (vcpu_mode_is_32bit(vcpu)) |
|---|
| 184 | | - return vcpu_read_spsr32(vcpu); |
|---|
| 185 | | - |
|---|
| 186 | | - if (vcpu->arch.sysregs_loaded_on_cpu) |
|---|
| 187 | | - return read_sysreg_el1(spsr); |
|---|
| 188 | | - else |
|---|
| 189 | | - return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; |
|---|
| 190 | | -} |
|---|
| 191 | | - |
|---|
| 192 | | -static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) |
|---|
| 193 | | -{ |
|---|
| 194 | | - if (vcpu_mode_is_32bit(vcpu)) { |
|---|
| 195 | | - vcpu_write_spsr32(vcpu, v); |
|---|
| 196 | | - return; |
|---|
| 197 | | - } |
|---|
| 198 | | - |
|---|
| 199 | | - if (vcpu->arch.sysregs_loaded_on_cpu) |
|---|
| 200 | | - write_sysreg_el1(v, spsr); |
|---|
| 201 | | - else |
|---|
| 202 | | - vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; |
|---|
| 173 | + vcpu_gp_regs(vcpu)->regs[reg_num] = val; |
|---|
| 203 | 174 | } |
|---|
| 204 | 175 | |
|---|
| 205 | 176 | /* |
|---|
| .. | .. |
|---|
| 248 | 219 | return mode != PSR_MODE_EL0t; |
|---|
| 249 | 220 | } |
|---|
| 250 | 221 | |
|---|
| 251 | | -static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) |
|---|
| 222 | +static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) |
|---|
| 252 | 223 | { |
|---|
| 253 | 224 | return vcpu->arch.fault.esr_el2; |
|---|
| 254 | 225 | } |
|---|
| 255 | 226 | |
|---|
| 256 | | -static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) |
|---|
| 227 | +static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) |
|---|
| 257 | 228 | { |
|---|
| 258 | | - u32 esr = kvm_vcpu_get_hsr(vcpu); |
|---|
| 229 | + u32 esr = kvm_vcpu_get_esr(vcpu); |
|---|
| 259 | 230 | |
|---|
| 260 | 231 | if (esr & ESR_ELx_CV) |
|---|
| 261 | 232 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; |
|---|
| .. | .. |
|---|
| 263 | 234 | return -1; |
|---|
| 264 | 235 | } |
|---|
| 265 | 236 | |
|---|
| 266 | | -static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) |
|---|
| 237 | +static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) |
|---|
| 267 | 238 | { |
|---|
| 268 | 239 | return vcpu->arch.fault.far_el2; |
|---|
| 269 | 240 | } |
|---|
| 270 | 241 | |
|---|
| 271 | | -static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) |
|---|
| 242 | +static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) |
|---|
| 272 | 243 | { |
|---|
| 273 | 244 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; |
|---|
| 274 | 245 | } |
|---|
| .. | .. |
|---|
| 280 | 251 | |
|---|
| 281 | 252 | static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) |
|---|
| 282 | 253 | { |
|---|
| 283 | | - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; |
|---|
| 254 | + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK; |
|---|
| 284 | 255 | } |
|---|
| 285 | 256 | |
|---|
| 286 | | -static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
|---|
| 257 | +static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
|---|
| 287 | 258 | { |
|---|
| 288 | | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
|---|
| 259 | + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV); |
|---|
| 260 | +} |
|---|
| 261 | + |
|---|
| 262 | +static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) |
|---|
| 263 | +{ |
|---|
| 264 | + return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); |
|---|
| 289 | 265 | } |
|---|
| 290 | 266 | |
|---|
| 291 | 267 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
|---|
| 292 | 268 | { |
|---|
| 293 | | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
|---|
| 269 | + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE); |
|---|
| 294 | 270 | } |
|---|
| 295 | 271 | |
|---|
| 296 | 272 | static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) |
|---|
| 297 | 273 | { |
|---|
| 298 | | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); |
|---|
| 274 | + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF); |
|---|
| 299 | 275 | } |
|---|
| 300 | 276 | |
|---|
| 301 | | -static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
|---|
| 277 | +static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
|---|
| 302 | 278 | { |
|---|
| 303 | | - return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
|---|
| 279 | + return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
|---|
| 304 | 280 | } |
|---|
| 305 | 281 | |
|---|
| 306 | | -static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) |
|---|
| 282 | +static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) |
|---|
| 307 | 283 | { |
|---|
| 308 | | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
|---|
| 284 | + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); |
|---|
| 309 | 285 | } |
|---|
| 310 | 286 | |
|---|
| 311 | | -static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
|---|
| 287 | +/* Always check for S1PTW *before* using this. */ |
|---|
| 288 | +static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
|---|
| 312 | 289 | { |
|---|
| 313 | | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || |
|---|
| 314 | | - kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */ |
|---|
| 290 | + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR; |
|---|
| 315 | 291 | } |
|---|
| 316 | 292 | |
|---|
| 317 | 293 | static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) |
|---|
| 318 | 294 | { |
|---|
| 319 | | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); |
|---|
| 295 | + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM); |
|---|
| 320 | 296 | } |
|---|
| 321 | 297 | |
|---|
| 322 | | -static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
|---|
| 298 | +static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
|---|
| 323 | 299 | { |
|---|
| 324 | | - return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
|---|
| 300 | + return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
|---|
| 325 | 301 | } |
|---|
| 326 | 302 | |
|---|
| 327 | 303 | /* This one is not specific to Data Abort */ |
|---|
| 328 | | -static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) |
|---|
| 304 | +static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) |
|---|
| 329 | 305 | { |
|---|
| 330 | | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); |
|---|
| 306 | + return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL); |
|---|
| 331 | 307 | } |
|---|
| 332 | 308 | |
|---|
| 333 | | -static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
|---|
| 309 | +static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
|---|
| 334 | 310 | { |
|---|
| 335 | | - return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); |
|---|
| 311 | + return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); |
|---|
| 336 | 312 | } |
|---|
| 337 | 313 | |
|---|
| 338 | 314 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
|---|
| .. | .. |
|---|
| 345 | 321 | return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); |
|---|
| 346 | 322 | } |
|---|
| 347 | 323 | |
|---|
| 348 | | -static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
|---|
| 324 | +static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
|---|
| 349 | 325 | { |
|---|
| 350 | | - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; |
|---|
| 326 | + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC; |
|---|
| 351 | 327 | } |
|---|
| 352 | 328 | |
|---|
| 353 | | -static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) |
|---|
| 329 | +static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) |
|---|
| 354 | 330 | { |
|---|
| 355 | | - return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; |
|---|
| 331 | + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; |
|---|
| 356 | 332 | } |
|---|
| 357 | 333 | |
|---|
| 358 | | -static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
|---|
| 334 | +static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu) |
|---|
| 335 | +{ |
|---|
| 336 | + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL; |
|---|
| 337 | +} |
|---|
| 338 | + |
|---|
| 339 | +static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) |
|---|
| 359 | 340 | { |
|---|
| 360 | 341 | switch (kvm_vcpu_trap_get_fault(vcpu)) { |
|---|
| 361 | 342 | case FSC_SEA: |
|---|
| .. | .. |
|---|
| 374 | 355 | } |
|---|
| 375 | 356 | } |
|---|
| 376 | 357 | |
|---|
| 377 | | -static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) |
|---|
| 358 | +static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) |
|---|
| 378 | 359 | { |
|---|
| 379 | | - u32 esr = kvm_vcpu_get_hsr(vcpu); |
|---|
| 380 | | - return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; |
|---|
| 360 | + u32 esr = kvm_vcpu_get_esr(vcpu); |
|---|
| 361 | + return ESR_ELx_SYS64_ISS_RT(esr); |
|---|
| 362 | +} |
|---|
| 363 | + |
|---|
| 364 | +static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
|---|
| 365 | +{ |
|---|
| 366 | + if (kvm_vcpu_abt_iss1tw(vcpu)) |
|---|
| 367 | + return true; |
|---|
| 368 | + |
|---|
| 369 | + if (kvm_vcpu_trap_is_iabt(vcpu)) |
|---|
| 370 | + return false; |
|---|
| 371 | + |
|---|
| 372 | + return kvm_vcpu_dabt_iswrite(vcpu); |
|---|
| 381 | 373 | } |
|---|
| 382 | 374 | |
|---|
| 383 | 375 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
|---|
| .. | .. |
|---|
| 466 | 458 | return data; /* Leave LE untouched */ |
|---|
| 467 | 459 | } |
|---|
| 468 | 460 | |
|---|
| 461 | +static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) |
|---|
| 462 | +{ |
|---|
| 463 | + vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC; |
|---|
| 464 | +} |
|---|
| 465 | + |
|---|
| 466 | +static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) |
|---|
| 467 | +{ |
|---|
| 468 | + return test_bit(feature, vcpu->arch.features); |
|---|
| 469 | +} |
|---|
| 470 | + |
|---|
| 469 | 471 | #endif /* __ARM64_KVM_EMULATE_H__ */ |
|---|