From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/arch/arm64/kvm/guest.c | 591 ++++++++++++++++++++++++++++++++++++++++++++++++++++-------
1 files changed, 520 insertions(+), 71 deletions(-)
diff --git a/kernel/arch/arm64/kvm/guest.c b/kernel/arch/arm64/kvm/guest.c
index 870e594..27b783a 100644
--- a/kernel/arch/arm64/kvm/guest.c
+++ b/kernel/arch/arm64/kvm/guest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
@@ -5,51 +6,48 @@
* Derived from arch/arm/kvm/guest.c:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bits.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/nospec.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
+#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
+#include <asm/sigcontext.h>
#include "trace.h"
-#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
-#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
-
struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT(hvc_exit_stat),
- VCPU_STAT(wfe_exit_stat),
- VCPU_STAT(wfi_exit_stat),
- VCPU_STAT(mmio_exit_user),
- VCPU_STAT(mmio_exit_kernel),
- VCPU_STAT(exits),
+ VCPU_STAT("halt_successful_poll", halt_successful_poll),
+ VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
+ VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
+ VCPU_STAT("halt_wakeup", halt_wakeup),
+ VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
+ VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
+ VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
+ VCPU_STAT("mmio_exit_user", mmio_exit_user),
+ VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
+ VCPU_STAT("exits", exits),
+ VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
+ VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
{ NULL }
};
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+static bool core_reg_offset_is_vreg(u64 off)
{
- return 0;
+ return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
+ off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
}
static u64 core_reg_offset_from_id(u64 id)
@@ -57,9 +55,8 @@
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
-static int validate_core_offset(const struct kvm_one_reg *reg)
+static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
{
- u64 off = core_reg_offset_from_id(reg->id);
int size;
switch (off) {
@@ -89,11 +86,83 @@
return -EINVAL;
}
- if (KVM_REG_SIZE(reg->id) == size &&
- IS_ALIGNED(off, size / sizeof(__u32)))
- return 0;
+ if (!IS_ALIGNED(off, size / sizeof(__u32)))
+ return -EINVAL;
- return -EINVAL;
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
+ return -EINVAL;
+
+ return size;
+}
+
+static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ u64 off = core_reg_offset_from_id(reg->id);
+ int size = core_reg_size_from_offset(vcpu, off);
+
+ if (size < 0)
+ return NULL;
+
+ if (KVM_REG_SIZE(reg->id) != size)
+ return NULL;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
+ off /= 2;
+ return &vcpu->arch.ctxt.regs.regs[off];
+
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ return &vcpu->arch.ctxt.regs.sp;
+
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ return &vcpu->arch.ctxt.regs.pc;
+
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ return &vcpu->arch.ctxt.regs.pstate;
+
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
+
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
+ return &vcpu->arch.ctxt.spsr_abt;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
+ return &vcpu->arch.ctxt.spsr_und;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
+ return &vcpu->arch.ctxt.spsr_irq;
+
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
+ return &vcpu->arch.ctxt.spsr_fiq;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
+ off /= 4;
+ return &vcpu->arch.ctxt.fp_regs.vregs[off];
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ return &vcpu->arch.ctxt.fp_regs.fpsr;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ return &vcpu->arch.ctxt.fp_regs.fpcr;
+
+ default:
+ return NULL;
+ }
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -105,8 +174,8 @@
* off the index in the "array".
*/
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
- struct kvm_regs *regs = vcpu_gp_regs(vcpu);
- int nr_regs = sizeof(*regs) / sizeof(__u32);
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
+ void *addr;
u32 off;
/* Our ID is an index into the kvm_regs struct. */
@@ -115,10 +184,11 @@
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ addr = core_reg_addr(vcpu, reg);
+ if (!addr)
return -EINVAL;
- if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
+ if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
@@ -127,10 +197,9 @@
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
- struct kvm_regs *regs = vcpu_gp_regs(vcpu);
- int nr_regs = sizeof(*regs) / sizeof(__u32);
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
__uint128_t tmp;
- void *valp = &tmp;
+ void *valp = &tmp, *addr;
u64 off;
int err = 0;
@@ -140,7 +209,8 @@
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ addr = core_reg_addr(vcpu, reg);
+ if (!addr)
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
@@ -155,7 +225,7 @@
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
- if (!system_supports_32bit_el0())
+ if (!kvm_supports_32bit_el0())
return -EINVAL;
break;
case PSR_AA32_MODE_FIQ:
@@ -178,16 +248,265 @@
}
}
- memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
+ memcpy(addr, valp, KVM_REG_SIZE(reg->id));
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
- int i;
+ int i, nr_reg;
- for (i = 0; i < 16; i++)
- *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
+ switch (*vcpu_cpsr(vcpu)) {
+ /*
+ * Either we are dealing with user mode, and only the
+ * first 15 registers (+ PC) must be narrowed to 32bit.
+ * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
+ */
+ case PSR_AA32_MODE_USR:
+ case PSR_AA32_MODE_SYS:
+ nr_reg = 15;
+ break;
+
+ /*
+ * Otherwide, this is a priviledged mode, and *all* the
+ * registers must be narrowed to 32bit.
+ */
+ default:
+ nr_reg = 31;
+ break;
+ }
+
+ for (i = 0; i < nr_reg; i++)
+ vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
+
+ *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
}
out:
return err;
+}
+
+#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
+#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
+#define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
+
+static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
+ return -EINVAL;
+
+ memset(vqs, 0, sizeof(vqs));
+
+ max_vq = vcpu_sve_max_vq(vcpu);
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (sve_vq_available(vq))
+ vqs[vq_word(vq)] |= vq_mask(vq);
+
+ if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM; /* too late! */
+
+ if (WARN_ON(vcpu->arch.sve_state))
+ return -EINVAL;
+
+ if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
+ return -EFAULT;
+
+ max_vq = 0;
+ for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
+ if (vq_present(vqs, vq))
+ max_vq = vq;
+
+ if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
+ return -EINVAL;
+
+ /*
+ * Vector lengths supported by the host can't currently be
+ * hidden from the guest individually: instead we can only set a
+ * maximum via ZCR_EL2.LEN. So, make sure the available vector
+ * lengths match the set requested exactly up to the requested
+ * maximum:
+ */
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (vq_present(vqs, vq) != sve_vq_available(vq))
+ return -EINVAL;
+
+ /* Can't run with no vector lengths at all: */
+ if (max_vq < SVE_VQ_MIN)
+ return -EINVAL;
+
+ /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
+ vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
+
+ return 0;
+}
+
+#define SVE_REG_SLICE_SHIFT 0
+#define SVE_REG_SLICE_BITS 5
+#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
+#define SVE_REG_ID_BITS 5
+
+#define SVE_REG_SLICE_MASK \
+ GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
+ SVE_REG_SLICE_SHIFT)
+#define SVE_REG_ID_MASK \
+ GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
+
+#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
+
+#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
+#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
+
+/*
+ * Number of register slices required to cover each whole SVE register.
+ * NOTE: Only the first slice every exists, for now.
+ * If you are tempted to modify this, you must also rework sve_reg_to_region()
+ * to match:
+ */
+#define vcpu_sve_slices(vcpu) 1
+
+/* Bounds of a single SVE register slice within vcpu->arch.sve_state */
+struct sve_state_reg_region {
+ unsigned int koffset; /* offset into sve_state in kernel memory */
+ unsigned int klen; /* length in kernel memory */
+ unsigned int upad; /* extra trailing padding in user memory */
+};
+
+/*
+ * Validate SVE register ID and get sanitised bounds for user/kernel SVE
+ * register copy
+ */
+static int sve_reg_to_region(struct sve_state_reg_region *region,
+ struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ /* reg ID ranges for Z- registers */
+ const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
+ const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* reg ID ranges for P- registers and FFR (which are contiguous) */
+ const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
+ const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
+
+ unsigned int vq;
+ unsigned int reg_num;
+
+ unsigned int reqoffset, reqlen; /* User-requested offset and length */
+ unsigned int maxlen; /* Maximum permitted length */
+
+ size_t sve_state_size;
+
+ const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* Verify that the P-regs and FFR really do have contiguous IDs: */
+ BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
+
+ /* Verify that we match the UAPI header: */
+ BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
+
+ reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
+
+ if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = vcpu_sve_max_vq(vcpu);
+
+ reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_ZREG_SIZE;
+ maxlen = SVE_SIG_ZREG_SIZE(vq);
+ } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = vcpu_sve_max_vq(vcpu);
+
+ reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_PREG_SIZE;
+ maxlen = SVE_SIG_PREG_SIZE(vq);
+ } else {
+ return -EINVAL;
+ }
+
+ sve_state_size = vcpu_sve_state_size(vcpu);
+ if (WARN_ON(!sve_state_size))
+ return -EINVAL;
+
+ region->koffset = array_index_nospec(reqoffset, sve_state_size);
+ region->klen = min(maxlen, reqlen);
+ region->upad = reqlen - region->klen;
+
+ return 0;
+}
+
+static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ char __user *uptr = (char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return get_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(®ion, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
+ region.klen) ||
+ clear_user(uptr + region.klen, region.upad))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ const char __user *uptr = (const char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return set_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(®ion, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
+ region.klen))
+ return -EFAULT;
+
+ return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
@@ -200,9 +519,52 @@
return -EINVAL;
}
-static unsigned long num_core_regs(void)
+static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
{
- return sizeof(struct kvm_regs) / sizeof(__u32);
+ unsigned int i;
+ int n = 0;
+
+ for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+ u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
+ int size = core_reg_size_from_offset(vcpu, i);
+
+ if (size < 0)
+ continue;
+
+ switch (size) {
+ case sizeof(__u32):
+ reg |= KVM_REG_SIZE_U32;
+ break;
+
+ case sizeof(__u64):
+ reg |= KVM_REG_SIZE_U64;
+ break;
+
+ case sizeof(__uint128_t):
+ reg |= KVM_REG_SIZE_U128;
+ break;
+
+ default:
+ WARN_ON(1);
+ continue;
+ }
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
+{
+ return copy_core_reg_indices(vcpu, NULL);
}
/**
@@ -258,6 +620,67 @@
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
+static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
+ + 1; /* KVM_REG_ARM64_SVE_VLS */
+}
+
+static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+ u64 reg;
+ unsigned int i, n;
+ int num_regs = 0;
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ /*
+ * Enumerate this first, so that userspace can save/restore in
+ * the order reported by KVM_GET_REG_LIST:
+ */
+ reg = KVM_REG_ARM64_SVE_VLS;
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ ++num_regs;
+
+ for (i = 0; i < slices; i++) {
+ for (n = 0; n < SVE_NUM_ZREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_ZREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ for (n = 0; n < SVE_NUM_PREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_PREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ reg = KVM_REG_ARM64_SVE_FFR(i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ return num_regs;
+}
+
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
@@ -265,8 +688,15 @@
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
- return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
- + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
+ unsigned long res = 0;
+
+ res += num_core_regs(vcpu);
+ res += num_sve_regs(vcpu);
+ res += kvm_arm_num_sys_reg_descs(vcpu);
+ res += kvm_arm_get_fw_num_regs(vcpu);
+ res += NUM_TIMER_REGS;
+
+ return res;
}
/**
@@ -276,23 +706,25 @@
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- unsigned int i;
- const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
int ret;
- for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
- if (put_user(core_reg | i, uindices))
- return -EFAULT;
- uindices++;
- }
+ ret = copy_core_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_sve_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += NUM_TIMER_REGS;
@@ -305,12 +737,11 @@
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we want a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return get_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_get_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -324,12 +755,11 @@
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we set a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return set_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_set_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
@@ -358,6 +788,12 @@
if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
+ /*
+ * We never return a pending ext_dabt here because we deliver it to
+ * the virtual CPU directly when setting the event and it's no longer
+ * 'pending' at this point.
+ */
+
return 0;
}
@@ -366,6 +802,7 @@
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
+ bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
@@ -378,6 +815,9 @@
} else if (serror_pending) {
kvm_inject_vabt(vcpu);
}
+
+ if (ext_dabt_pending)
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 0;
}
@@ -398,15 +838,15 @@
return KVM_ARM_TARGET_CORTEX_A53;
case ARM_CPU_PART_CORTEX_A57:
return KVM_ARM_TARGET_CORTEX_A57;
- };
+ }
break;
case ARM_CPU_IMP_APM:
switch (part_number) {
case APM_CPU_PART_POTENZA:
return KVM_ARM_TARGET_XGENE_POTENZA;
- };
+ }
break;
- };
+ }
/* Return a default generic target */
return KVM_ARM_TARGET_GENERIC_V8;
@@ -504,6 +944,9 @@
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVTIME_CTRL:
+ ret = kvm_arm_pvtime_set_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -524,6 +967,9 @@
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_get_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVTIME_CTRL:
+ ret = kvm_arm_pvtime_get_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -544,6 +990,9 @@
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_has_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_PVTIME_CTRL:
+ ret = kvm_arm_pvtime_has_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
--
Gitblit v1.6.2