.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2015 - ARM Ltd |
---|
3 | 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify |
---|
6 | | - * it under the terms of the GNU General Public License version 2 as |
---|
7 | | - * published by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
16 | 5 | */ |
---|
17 | 6 | |
---|
18 | 7 | #ifndef __ARM64_KVM_HYP_H__ |
---|
.. | .. |
---|
20 | 9 | |
---|
21 | 10 | #include <linux/compiler.h> |
---|
22 | 11 | #include <linux/kvm_host.h> |
---|
| 12 | +#include <asm/alternative.h> |
---|
23 | 13 | #include <asm/sysreg.h> |
---|
24 | 14 | |
---|
25 | | -#define __hyp_text __section(.hyp.text) notrace |
---|
| 15 | +DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); |
---|
| 16 | +DECLARE_PER_CPU(unsigned long, kvm_hyp_vector); |
---|
| 17 | +DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); |
---|
26 | 18 | |
---|
27 | 19 | #define read_sysreg_elx(r,nvh,vh) \ |
---|
28 | 20 | ({ \ |
---|
29 | 21 | u64 reg; \ |
---|
30 | | - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ |
---|
| 22 | + asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ |
---|
31 | 23 | __mrs_s("%0", r##vh), \ |
---|
32 | 24 | ARM64_HAS_VIRT_HOST_EXTN) \ |
---|
33 | 25 | : "=r" (reg)); \ |
---|
.. | .. |
---|
37 | 29 | #define write_sysreg_elx(v,r,nvh,vh) \ |
---|
38 | 30 | do { \ |
---|
39 | 31 | u64 __val = (u64)(v); \ |
---|
40 | | - asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ |
---|
| 32 | + asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \ |
---|
41 | 33 | __msr_s(r##vh, "%x0"), \ |
---|
42 | 34 | ARM64_HAS_VIRT_HOST_EXTN) \ |
---|
43 | 35 | : : "rZ" (__val)); \ |
---|
.. | .. |
---|
46 | 38 | /* |
---|
47 | 39 | * Unified accessors for registers that have a different encoding |
---|
48 | 40 | * between VHE and non-VHE. They must be specified without their "ELx" |
---|
49 | | - * encoding. |
---|
| 41 | + * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. |
---|
50 | 42 | */ |
---|
51 | | -#define read_sysreg_el2(r) \ |
---|
52 | | - ({ \ |
---|
53 | | - u64 reg; \ |
---|
54 | | - asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ |
---|
55 | | - "mrs %0, " __stringify(r##_EL1),\ |
---|
56 | | - ARM64_HAS_VIRT_HOST_EXTN) \ |
---|
57 | | - : "=r" (reg)); \ |
---|
58 | | - reg; \ |
---|
59 | | - }) |
---|
60 | | - |
---|
61 | | -#define write_sysreg_el2(v,r) \ |
---|
62 | | - do { \ |
---|
63 | | - u64 __val = (u64)(v); \ |
---|
64 | | - asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ |
---|
65 | | - "msr " __stringify(r##_EL1) ", %x0",\ |
---|
66 | | - ARM64_HAS_VIRT_HOST_EXTN) \ |
---|
67 | | - : : "rZ" (__val)); \ |
---|
68 | | - } while (0) |
---|
69 | 43 | |
---|
70 | 44 | #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) |
---|
71 | 45 | #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) |
---|
72 | 46 | #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) |
---|
73 | 47 | #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) |
---|
| 48 | +#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) |
---|
| 49 | +#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) |
---|
74 | 50 | |
---|
75 | | -/* The VHE specific system registers and their encoding */ |
---|
76 | | -#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) |
---|
77 | | -#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) |
---|
78 | | -#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) |
---|
79 | | -#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) |
---|
80 | | -#define tcr_EL12 sys_reg(3, 5, 2, 0, 2) |
---|
81 | | -#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) |
---|
82 | | -#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) |
---|
83 | | -#define esr_EL12 sys_reg(3, 5, 5, 2, 0) |
---|
84 | | -#define far_EL12 sys_reg(3, 5, 6, 0, 0) |
---|
85 | | -#define mair_EL12 sys_reg(3, 5, 10, 2, 0) |
---|
86 | | -#define amair_EL12 sys_reg(3, 5, 10, 3, 0) |
---|
87 | | -#define vbar_EL12 sys_reg(3, 5, 12, 0, 0) |
---|
88 | | -#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) |
---|
89 | | -#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) |
---|
90 | | -#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) |
---|
91 | | -#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) |
---|
92 | | -#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) |
---|
93 | | -#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) |
---|
94 | | -#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) |
---|
95 | | -#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) |
---|
96 | | -#define spsr_EL12 sys_reg(3, 5, 4, 0, 0) |
---|
97 | | -#define elr_EL12 sys_reg(3, 5, 4, 0, 1) |
---|
98 | | - |
---|
99 | | -/** |
---|
100 | | - * hyp_alternate_select - Generates patchable code sequences that are |
---|
101 | | - * used to switch between two implementations of a function, depending |
---|
102 | | - * on the availability of a feature. |
---|
103 | | - * |
---|
104 | | - * @fname: a symbol name that will be defined as a function returning a |
---|
105 | | - * function pointer whose type will match @orig and @alt |
---|
106 | | - * @orig: A pointer to the default function, as returned by @fname when |
---|
107 | | - * @cond doesn't hold |
---|
108 | | - * @alt: A pointer to the alternate function, as returned by @fname |
---|
109 | | - * when @cond holds |
---|
110 | | - * @cond: a CPU feature (as described in asm/cpufeature.h) |
---|
| 51 | +/* |
---|
| 52 | + * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the |
---|
| 53 | + * static inline can allow the compiler to out-of-line this. KVM always wants |
---|
| 54 | + * the macro version as its always inlined. |
---|
111 | 55 | */ |
---|
112 | | -#define hyp_alternate_select(fname, orig, alt, cond) \ |
---|
113 | | -typeof(orig) * __hyp_text fname(void) \ |
---|
114 | | -{ \ |
---|
115 | | - typeof(alt) *val = orig; \ |
---|
116 | | - asm volatile(ALTERNATIVE("nop \n", \ |
---|
117 | | - "mov %0, %1 \n", \ |
---|
118 | | - cond) \ |
---|
119 | | - : "+r" (val) : "r" (alt)); \ |
---|
120 | | - return val; \ |
---|
121 | | -} |
---|
| 56 | +#define __kvm_swab32(x) ___constant_swab32(x) |
---|
122 | 57 | |
---|
123 | 58 | int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); |
---|
124 | 59 | |
---|
125 | | -void __vgic_v3_save_state(struct kvm_vcpu *vcpu); |
---|
126 | | -void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); |
---|
127 | | -void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu); |
---|
128 | | -void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu); |
---|
129 | | -void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu); |
---|
130 | | -void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu); |
---|
| 60 | +void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if); |
---|
| 61 | +void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if); |
---|
| 62 | +void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if); |
---|
| 63 | +void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if); |
---|
| 64 | +void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if); |
---|
| 65 | +void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if); |
---|
131 | 66 | int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); |
---|
132 | 67 | |
---|
| 68 | +#ifdef __KVM_NVHE_HYPERVISOR__ |
---|
133 | 69 | void __timer_enable_traps(struct kvm_vcpu *vcpu); |
---|
134 | 70 | void __timer_disable_traps(struct kvm_vcpu *vcpu); |
---|
| 71 | +#endif |
---|
135 | 72 | |
---|
| 73 | +#ifdef __KVM_NVHE_HYPERVISOR__ |
---|
136 | 74 | void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); |
---|
137 | 75 | void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); |
---|
| 76 | +#else |
---|
138 | 77 | void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); |
---|
139 | 78 | void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); |
---|
140 | 79 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); |
---|
141 | 80 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); |
---|
142 | | -void __sysreg32_save_state(struct kvm_vcpu *vcpu); |
---|
143 | | -void __sysreg32_restore_state(struct kvm_vcpu *vcpu); |
---|
| 81 | +#endif |
---|
144 | 82 | |
---|
145 | 83 | void __debug_switch_to_guest(struct kvm_vcpu *vcpu); |
---|
146 | 84 | void __debug_switch_to_host(struct kvm_vcpu *vcpu); |
---|
| 85 | + |
---|
| 86 | +#ifdef __KVM_NVHE_HYPERVISOR__ |
---|
147 | 87 | void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu); |
---|
148 | 88 | void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); |
---|
149 | | - |
---|
| 89 | +#endif |
---|
150 | 90 | |
---|
151 | 91 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); |
---|
152 | 92 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); |
---|
153 | | -bool __fpsimd_enabled(void); |
---|
| 93 | +void __sve_save_state(void *sve_pffr, u32 *fpsr); |
---|
| 94 | +void __sve_restore_state(void *sve_pffr, u32 *fpsr); |
---|
154 | 95 | |
---|
| 96 | +#ifndef __KVM_NVHE_HYPERVISOR__ |
---|
155 | 97 | void activate_traps_vhe_load(struct kvm_vcpu *vcpu); |
---|
156 | 98 | void deactivate_traps_vhe_put(void); |
---|
| 99 | +#endif |
---|
157 | 100 | |
---|
158 | | -u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); |
---|
159 | | -void __noreturn __hyp_do_panic(unsigned long, ...); |
---|
| 101 | +u64 __guest_enter(struct kvm_vcpu *vcpu); |
---|
| 102 | + |
---|
| 103 | +bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt); |
---|
| 104 | + |
---|
| 105 | +void __noreturn hyp_panic(void); |
---|
| 106 | +#ifdef __KVM_NVHE_HYPERVISOR__ |
---|
| 107 | +void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, |
---|
| 108 | + u64 elr, u64 par); |
---|
| 109 | +#endif |
---|
| 110 | + |
---|
| 111 | +#ifdef __KVM_NVHE_HYPERVISOR__ |
---|
| 112 | +void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size, |
---|
| 113 | + phys_addr_t pgd, void *sp, void *cont_fn); |
---|
| 114 | +int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, |
---|
| 115 | + unsigned long *per_cpu_base, u32 hyp_va_bits); |
---|
| 116 | +void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); |
---|
| 117 | +#endif |
---|
| 118 | + |
---|
| 119 | +extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); |
---|
| 120 | +extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); |
---|
160 | 121 | |
---|
161 | 122 | #endif /* __ARM64_KVM_HYP_H__ */ |
---|
162 | | - |
---|