.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2012,2013 - ARM Ltd |
---|
3 | 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
---|
.. | .. |
---|
5 | 6 | * Derived from arch/arm/kvm/reset.c |
---|
6 | 7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
---|
7 | 8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
---|
8 | | - * |
---|
9 | | - * This program is free software; you can redistribute it and/or modify |
---|
10 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
11 | | - * published by the Free Software Foundation. |
---|
12 | | - * |
---|
13 | | - * This program is distributed in the hope that it will be useful, |
---|
14 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
15 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
16 | | - * GNU General Public License for more details. |
---|
17 | | - * |
---|
18 | | - * You should have received a copy of the GNU General Public License |
---|
19 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
20 | 9 | */ |
---|
21 | 10 | |
---|
22 | 11 | #include <linux/errno.h> |
---|
| 12 | +#include <linux/kernel.h> |
---|
23 | 13 | #include <linux/kvm_host.h> |
---|
24 | 14 | #include <linux/kvm.h> |
---|
25 | 15 | #include <linux/hw_breakpoint.h> |
---|
| 16 | +#include <linux/slab.h> |
---|
| 17 | +#include <linux/string.h> |
---|
| 18 | +#include <linux/types.h> |
---|
26 | 19 | |
---|
27 | 20 | #include <kvm/arm_arch_timer.h> |
---|
28 | 21 | |
---|
| 22 | +#include <asm/cpufeature.h> |
---|
29 | 23 | #include <asm/cputype.h> |
---|
| 24 | +#include <asm/fpsimd.h> |
---|
30 | 25 | #include <asm/ptrace.h> |
---|
31 | 26 | #include <asm/kvm_arm.h> |
---|
32 | 27 | #include <asm/kvm_asm.h> |
---|
33 | | -#include <asm/kvm_coproc.h> |
---|
34 | 28 | #include <asm/kvm_emulate.h> |
---|
35 | 29 | #include <asm/kvm_mmu.h> |
---|
| 30 | +#include <asm/virt.h> |
---|
| 31 | + |
---|
| 32 | +/* Maximum phys_shift supported for any VM on this host */ |
---|
| 33 | +static u32 kvm_ipa_limit; |
---|
36 | 34 | |
---|
37 | 35 | /* |
---|
38 | 36 | * ARMv8 Reset Values |
---|
39 | 37 | */ |
---|
40 | | -static const struct kvm_regs default_regs_reset = { |
---|
41 | | - .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | |
---|
42 | | - PSR_F_BIT | PSR_D_BIT), |
---|
43 | | -}; |
---|
| 38 | +#define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \ |
---|
| 39 | + PSR_F_BIT | PSR_D_BIT) |
---|
44 | 40 | |
---|
45 | | -static const struct kvm_regs default_regs_reset32 = { |
---|
46 | | - .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | |
---|
47 | | - PSR_AA32_I_BIT | PSR_AA32_F_BIT), |
---|
48 | | -}; |
---|
| 41 | +#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ |
---|
| 42 | + PSR_AA32_I_BIT | PSR_AA32_F_BIT) |
---|
49 | 43 | |
---|
50 | | -static bool cpu_has_32bit_el1(void) |
---|
| 44 | +unsigned int kvm_sve_max_vl; |
---|
| 45 | + |
---|
| 46 | +int kvm_arm_init_sve(void) |
---|
51 | 47 | { |
---|
52 | | - u64 pfr0; |
---|
| 48 | + if (system_supports_sve()) { |
---|
| 49 | + kvm_sve_max_vl = sve_max_virtualisable_vl; |
---|
53 | 50 | |
---|
54 | | - pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
---|
55 | | - return !!(pfr0 & 0x20); |
---|
56 | | -} |
---|
| 51 | + /* |
---|
| 52 | + * The get_sve_reg()/set_sve_reg() ioctl interface will need |
---|
| 53 | + * to be extended with multiple register slice support in |
---|
| 54 | + * order to support vector lengths greater than |
---|
| 55 | + * SVE_VL_ARCH_MAX: |
---|
| 56 | + */ |
---|
| 57 | + if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) |
---|
| 58 | + kvm_sve_max_vl = SVE_VL_ARCH_MAX; |
---|
57 | 59 | |
---|
58 | | -/** |
---|
59 | | - * kvm_arch_dev_ioctl_check_extension |
---|
60 | | - * |
---|
61 | | - * We currently assume that the number of HW registers is uniform |
---|
62 | | - * across all CPUs (see cpuinfo_sanity_check). |
---|
63 | | - */ |
---|
64 | | -int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) |
---|
65 | | -{ |
---|
66 | | - int r; |
---|
67 | | - |
---|
68 | | - switch (ext) { |
---|
69 | | - case KVM_CAP_ARM_EL1_32BIT: |
---|
70 | | - r = cpu_has_32bit_el1(); |
---|
71 | | - break; |
---|
72 | | - case KVM_CAP_GUEST_DEBUG_HW_BPS: |
---|
73 | | - r = get_num_brps(); |
---|
74 | | - break; |
---|
75 | | - case KVM_CAP_GUEST_DEBUG_HW_WPS: |
---|
76 | | - r = get_num_wrps(); |
---|
77 | | - break; |
---|
78 | | - case KVM_CAP_ARM_PMU_V3: |
---|
79 | | - r = kvm_arm_support_pmu_v3(); |
---|
80 | | - break; |
---|
81 | | - case KVM_CAP_ARM_INJECT_SERROR_ESR: |
---|
82 | | - r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); |
---|
83 | | - break; |
---|
84 | | - case KVM_CAP_SET_GUEST_DEBUG: |
---|
85 | | - case KVM_CAP_VCPU_ATTRIBUTES: |
---|
86 | | - case KVM_CAP_VCPU_EVENTS: |
---|
87 | | - r = 1; |
---|
88 | | - break; |
---|
89 | | - default: |
---|
90 | | - r = 0; |
---|
| 60 | + /* |
---|
| 61 | + * Don't even try to make use of vector lengths that |
---|
| 62 | + * aren't available on all CPUs, for now: |
---|
| 63 | + */ |
---|
| 64 | + if (kvm_sve_max_vl < sve_max_vl) |
---|
| 65 | + pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", |
---|
| 66 | + kvm_sve_max_vl); |
---|
91 | 67 | } |
---|
92 | 68 | |
---|
93 | | - return r; |
---|
| 69 | + return 0; |
---|
| 70 | +} |
---|
| 71 | + |
---|
| 72 | +static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) |
---|
| 73 | +{ |
---|
| 74 | + if (!system_supports_sve()) |
---|
| 75 | + return -EINVAL; |
---|
| 76 | + |
---|
| 77 | + vcpu->arch.sve_max_vl = kvm_sve_max_vl; |
---|
| 78 | + |
---|
| 79 | + /* |
---|
| 80 | + * Userspace can still customize the vector lengths by writing |
---|
| 81 | + * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until |
---|
| 82 | + * kvm_arm_vcpu_finalize(), which freezes the configuration. |
---|
| 83 | + */ |
---|
| 84 | + vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; |
---|
| 85 | + |
---|
| 86 | + return 0; |
---|
| 87 | +} |
---|
| 88 | + |
---|
| 89 | +/* |
---|
| 90 | + * Finalize vcpu's maximum SVE vector length, allocating |
---|
| 91 | + * vcpu->arch.sve_state as necessary. |
---|
| 92 | + */ |
---|
| 93 | +static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) |
---|
| 94 | +{ |
---|
| 95 | + void *buf; |
---|
| 96 | + unsigned int vl; |
---|
| 97 | + |
---|
| 98 | + vl = vcpu->arch.sve_max_vl; |
---|
| 99 | + |
---|
| 100 | + /* |
---|
| 101 | + * Responsibility for these properties is shared between |
---|
| 102 | + * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and |
---|
| 103 | + * set_sve_vls(). Double-check here just to be sure: |
---|
| 104 | + */ |
---|
| 105 | + if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || |
---|
| 106 | + vl > SVE_VL_ARCH_MAX)) |
---|
| 107 | + return -EIO; |
---|
| 108 | + |
---|
| 109 | + buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); |
---|
| 110 | + if (!buf) |
---|
| 111 | + return -ENOMEM; |
---|
| 112 | + |
---|
| 113 | + vcpu->arch.sve_state = buf; |
---|
| 114 | + vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; |
---|
| 115 | + return 0; |
---|
| 116 | +} |
---|
| 117 | + |
---|
| 118 | +int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) |
---|
| 119 | +{ |
---|
| 120 | + switch (feature) { |
---|
| 121 | + case KVM_ARM_VCPU_SVE: |
---|
| 122 | + if (!vcpu_has_sve(vcpu)) |
---|
| 123 | + return -EINVAL; |
---|
| 124 | + |
---|
| 125 | + if (kvm_arm_vcpu_sve_finalized(vcpu)) |
---|
| 126 | + return -EPERM; |
---|
| 127 | + |
---|
| 128 | + return kvm_vcpu_finalize_sve(vcpu); |
---|
| 129 | + } |
---|
| 130 | + |
---|
| 131 | + return -EINVAL; |
---|
| 132 | +} |
---|
| 133 | + |
---|
| 134 | +bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) |
---|
| 135 | +{ |
---|
| 136 | + if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) |
---|
| 137 | + return false; |
---|
| 138 | + |
---|
| 139 | + return true; |
---|
| 140 | +} |
---|
| 141 | + |
---|
| 142 | +void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) |
---|
| 143 | +{ |
---|
| 144 | + kfree(vcpu->arch.sve_state); |
---|
| 145 | +} |
---|
| 146 | + |
---|
| 147 | +static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) |
---|
| 148 | +{ |
---|
| 149 | + if (vcpu_has_sve(vcpu)) |
---|
| 150 | + memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); |
---|
| 151 | +} |
---|
| 152 | + |
---|
| 153 | +static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) |
---|
| 154 | +{ |
---|
| 155 | + /* |
---|
| 156 | + * For now make sure that both address/generic pointer authentication |
---|
| 157 | + * features are requested by the userspace together and the system |
---|
| 158 | + * supports these capabilities. |
---|
| 159 | + */ |
---|
| 160 | + if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || |
---|
| 161 | + !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) || |
---|
| 162 | + !system_has_full_ptr_auth()) |
---|
| 163 | + return -EINVAL; |
---|
| 164 | + |
---|
| 165 | + vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; |
---|
| 166 | + return 0; |
---|
| 167 | +} |
---|
| 168 | + |
---|
| 169 | +static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) |
---|
| 170 | +{ |
---|
| 171 | + struct kvm_vcpu *tmp; |
---|
| 172 | + bool is32bit; |
---|
| 173 | + int i; |
---|
| 174 | + |
---|
| 175 | + is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); |
---|
| 176 | + if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit) |
---|
| 177 | + return false; |
---|
| 178 | + |
---|
| 179 | + /* Check that the vcpus are either all 32bit or all 64bit */ |
---|
| 180 | + kvm_for_each_vcpu(i, tmp, vcpu->kvm) { |
---|
| 181 | + if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit) |
---|
| 182 | + return false; |
---|
| 183 | + } |
---|
| 184 | + |
---|
| 185 | + return true; |
---|
94 | 186 | } |
---|
95 | 187 | |
---|
96 | 188 | /** |
---|
.. | .. |
---|
99 | 191 | * |
---|
100 | 192 | * This function finds the right table above and sets the registers on |
---|
101 | 193 | * the virtual CPU struct to their architecturally defined reset |
---|
102 | | - * values. |
---|
| 194 | + * values, except for registers whose reset is deferred until |
---|
| 195 | + * kvm_arm_vcpu_finalize(). |
---|
103 | 196 | * |
---|
104 | 197 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT |
---|
105 | 198 | * ioctl or as part of handling a request issued by another VCPU in the PSCI |
---|
106 | 199 | * handling code. In the first case, the VCPU will not be loaded, and in the |
---|
107 | 200 | * second case the VCPU will be loaded. Because this function operates purely |
---|
108 | | - * on the memory-backed valus of system registers, we want to do a full put if |
---|
| 201 | + * on the memory-backed values of system registers, we want to do a full put if |
---|
109 | 202 | * we were loaded (handling a request) and load the values back at the end of |
---|
110 | 203 | * the function. Otherwise we leave the state alone. In both cases, we |
---|
111 | 204 | * disable preemption around the vcpu reset as we would otherwise race with |
---|
.. | .. |
---|
113 | 206 | */ |
---|
114 | 207 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
---|
115 | 208 | { |
---|
116 | | - const struct kvm_regs *cpu_reset; |
---|
117 | | - int ret = -EINVAL; |
---|
| 209 | + struct vcpu_reset_state reset_state; |
---|
| 210 | + int ret; |
---|
118 | 211 | bool loaded; |
---|
| 212 | + u32 pstate; |
---|
| 213 | + |
---|
| 214 | + mutex_lock(&vcpu->kvm->lock); |
---|
| 215 | + reset_state = vcpu->arch.reset_state; |
---|
| 216 | + WRITE_ONCE(vcpu->arch.reset_state.reset, false); |
---|
| 217 | + mutex_unlock(&vcpu->kvm->lock); |
---|
119 | 218 | |
---|
120 | 219 | /* Reset PMU outside of the non-preemptible section */ |
---|
121 | 220 | kvm_pmu_vcpu_reset(vcpu); |
---|
.. | .. |
---|
125 | 224 | if (loaded) |
---|
126 | 225 | kvm_arch_vcpu_put(vcpu); |
---|
127 | 226 | |
---|
| 227 | + if (!kvm_arm_vcpu_sve_finalized(vcpu)) { |
---|
| 228 | + if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { |
---|
| 229 | + ret = kvm_vcpu_enable_sve(vcpu); |
---|
| 230 | + if (ret) |
---|
| 231 | + goto out; |
---|
| 232 | + } |
---|
| 233 | + } else { |
---|
| 234 | + kvm_vcpu_reset_sve(vcpu); |
---|
| 235 | + } |
---|
| 236 | + |
---|
| 237 | + if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || |
---|
| 238 | + test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { |
---|
| 239 | + if (kvm_vcpu_enable_ptrauth(vcpu)) { |
---|
| 240 | + ret = -EINVAL; |
---|
| 241 | + goto out; |
---|
| 242 | + } |
---|
| 243 | + } |
---|
| 244 | + |
---|
| 245 | + if (!vcpu_allowed_register_width(vcpu)) { |
---|
| 246 | + ret = -EINVAL; |
---|
| 247 | + goto out; |
---|
| 248 | + } |
---|
| 249 | + |
---|
128 | 250 | switch (vcpu->arch.target) { |
---|
129 | 251 | default: |
---|
130 | 252 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { |
---|
131 | | - if (!cpu_has_32bit_el1()) |
---|
132 | | - goto out; |
---|
133 | | - cpu_reset = &default_regs_reset32; |
---|
| 253 | + pstate = VCPU_RESET_PSTATE_SVC; |
---|
134 | 254 | } else { |
---|
135 | | - cpu_reset = &default_regs_reset; |
---|
| 255 | + pstate = VCPU_RESET_PSTATE_EL1; |
---|
136 | 256 | } |
---|
137 | 257 | |
---|
| 258 | + if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { |
---|
| 259 | + ret = -EINVAL; |
---|
| 260 | + goto out; |
---|
| 261 | + } |
---|
138 | 262 | break; |
---|
139 | 263 | } |
---|
140 | 264 | |
---|
141 | 265 | /* Reset core registers */ |
---|
142 | | - memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); |
---|
| 266 | + memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); |
---|
| 267 | + memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); |
---|
| 268 | + vcpu->arch.ctxt.spsr_abt = 0; |
---|
| 269 | + vcpu->arch.ctxt.spsr_und = 0; |
---|
| 270 | + vcpu->arch.ctxt.spsr_irq = 0; |
---|
| 271 | + vcpu->arch.ctxt.spsr_fiq = 0; |
---|
| 272 | + vcpu_gp_regs(vcpu)->pstate = pstate; |
---|
143 | 273 | |
---|
144 | 274 | /* Reset system registers */ |
---|
145 | 275 | kvm_reset_sys_regs(vcpu); |
---|
.. | .. |
---|
148 | 278 | * Additional reset state handling that PSCI may have imposed on us. |
---|
149 | 279 | * Must be done after all the sys_reg reset. |
---|
150 | 280 | */ |
---|
151 | | - if (vcpu->arch.reset_state.reset) { |
---|
152 | | - unsigned long target_pc = vcpu->arch.reset_state.pc; |
---|
| 281 | + if (reset_state.reset) { |
---|
| 282 | + unsigned long target_pc = reset_state.pc; |
---|
153 | 283 | |
---|
154 | 284 | /* Gracefully handle Thumb2 entry point */ |
---|
155 | 285 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { |
---|
.. | .. |
---|
158 | 288 | } |
---|
159 | 289 | |
---|
160 | 290 | /* Propagate caller endianness */ |
---|
161 | | - if (vcpu->arch.reset_state.be) |
---|
| 291 | + if (reset_state.be) |
---|
162 | 292 | kvm_vcpu_set_be(vcpu); |
---|
163 | 293 | |
---|
164 | 294 | *vcpu_pc(vcpu) = target_pc; |
---|
165 | | - vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); |
---|
166 | | - |
---|
167 | | - vcpu->arch.reset_state.reset = false; |
---|
| 295 | + vcpu_set_reg(vcpu, 0, reset_state.r0); |
---|
168 | 296 | } |
---|
169 | | - |
---|
170 | | - /* Default workaround setup is enabled (if supported) */ |
---|
171 | | - if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) |
---|
172 | | - vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; |
---|
173 | 297 | |
---|
174 | 298 | /* Reset timer */ |
---|
175 | 299 | ret = kvm_timer_vcpu_reset(vcpu); |
---|
.. | .. |
---|
179 | 303 | preempt_enable(); |
---|
180 | 304 | return ret; |
---|
181 | 305 | } |
---|
| 306 | + |
---|
| 307 | +u32 get_kvm_ipa_limit(void) |
---|
| 308 | +{ |
---|
| 309 | + return kvm_ipa_limit; |
---|
| 310 | +} |
---|
| 311 | + |
---|
| 312 | +int kvm_set_ipa_limit(void) |
---|
| 313 | +{ |
---|
| 314 | + unsigned int parange, tgran_2; |
---|
| 315 | + u64 mmfr0; |
---|
| 316 | + |
---|
| 317 | + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
---|
| 318 | + parange = cpuid_feature_extract_unsigned_field(mmfr0, |
---|
| 319 | + ID_AA64MMFR0_PARANGE_SHIFT); |
---|
| 320 | + /* |
---|
| 321 | + * IPA size beyond 48 bits could not be supported |
---|
| 322 | + * on either 4K or 16K page size. Hence let's cap |
---|
| 323 | + * it to 48 bits, in case it's reported as larger |
---|
| 324 | + * on the system. |
---|
| 325 | + */ |
---|
| 326 | + if (PAGE_SIZE != SZ_64K) |
---|
| 327 | + parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48); |
---|
| 328 | + |
---|
| 329 | + /* |
---|
| 330 | + * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at |
---|
| 331 | + * Stage-2. If not, things will stop very quickly. |
---|
| 332 | + */ |
---|
| 333 | + switch (PAGE_SIZE) { |
---|
| 334 | + default: |
---|
| 335 | + case SZ_4K: |
---|
| 336 | + tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT; |
---|
| 337 | + break; |
---|
| 338 | + case SZ_16K: |
---|
| 339 | + tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT; |
---|
| 340 | + break; |
---|
| 341 | + case SZ_64K: |
---|
| 342 | + tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT; |
---|
| 343 | + break; |
---|
| 344 | + } |
---|
| 345 | + |
---|
| 346 | + switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) { |
---|
| 347 | + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE: |
---|
| 348 | + kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n"); |
---|
| 349 | + return -EINVAL; |
---|
| 350 | + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT: |
---|
| 351 | + kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n"); |
---|
| 352 | + break; |
---|
| 353 | + case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX: |
---|
| 354 | + kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n"); |
---|
| 355 | + break; |
---|
| 356 | + default: |
---|
| 357 | + kvm_err("Unsupported value for TGRAN_2, giving up\n"); |
---|
| 358 | + return -EINVAL; |
---|
| 359 | + } |
---|
| 360 | + |
---|
| 361 | + kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange); |
---|
| 362 | + kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit, |
---|
| 363 | + ((kvm_ipa_limit < KVM_PHYS_SHIFT) ? |
---|
| 364 | + " (Reduced IPA size, limited VM/VMM compatibility)" : "")); |
---|
| 365 | + |
---|
| 366 | + return 0; |
---|
| 367 | +} |
---|
| 368 | + |
---|
| 369 | +int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) |
---|
| 370 | +{ |
---|
| 371 | + u64 mmfr0, mmfr1; |
---|
| 372 | + u32 phys_shift; |
---|
| 373 | + |
---|
| 374 | + if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) |
---|
| 375 | + return -EINVAL; |
---|
| 376 | + |
---|
| 377 | + phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); |
---|
| 378 | + if (phys_shift) { |
---|
| 379 | + if (phys_shift > kvm_ipa_limit || |
---|
| 380 | + phys_shift < 32) |
---|
| 381 | + return -EINVAL; |
---|
| 382 | + } else { |
---|
| 383 | + phys_shift = KVM_PHYS_SHIFT; |
---|
| 384 | + if (phys_shift > kvm_ipa_limit) { |
---|
| 385 | + pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n", |
---|
| 386 | + current->comm); |
---|
| 387 | + return -EINVAL; |
---|
| 388 | + } |
---|
| 389 | + } |
---|
| 390 | + |
---|
| 391 | + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
---|
| 392 | + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
---|
| 393 | + kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); |
---|
| 394 | + |
---|
| 395 | + return 0; |
---|
| 396 | +} |
---|