forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/arm64/kvm/guest.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2012,2013 - ARM Ltd
34 * Author: Marc Zyngier <marc.zyngier@arm.com>
....@@ -5,51 +6,48 @@
56 * Derived from arch/arm/kvm/guest.c:
67 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
78 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
209 */
2110
11
+#include <linux/bits.h>
2212 #include <linux/errno.h>
2313 #include <linux/err.h>
14
+#include <linux/nospec.h>
2415 #include <linux/kvm_host.h>
2516 #include <linux/module.h>
17
+#include <linux/stddef.h>
18
+#include <linux/string.h>
2619 #include <linux/vmalloc.h>
2720 #include <linux/fs.h>
2821 #include <kvm/arm_psci.h>
2922 #include <asm/cputype.h>
3023 #include <linux/uaccess.h>
24
+#include <asm/fpsimd.h>
3125 #include <asm/kvm.h>
3226 #include <asm/kvm_emulate.h>
33
-#include <asm/kvm_coproc.h>
27
+#include <asm/sigcontext.h>
3428
3529 #include "trace.h"
3630
37
-#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
38
-#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
39
-
4031 struct kvm_stats_debugfs_item debugfs_entries[] = {
41
- VCPU_STAT(hvc_exit_stat),
42
- VCPU_STAT(wfe_exit_stat),
43
- VCPU_STAT(wfi_exit_stat),
44
- VCPU_STAT(mmio_exit_user),
45
- VCPU_STAT(mmio_exit_kernel),
46
- VCPU_STAT(exits),
32
+ VCPU_STAT("halt_successful_poll", halt_successful_poll),
33
+ VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
34
+ VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
35
+ VCPU_STAT("halt_wakeup", halt_wakeup),
36
+ VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
37
+ VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
38
+ VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
39
+ VCPU_STAT("mmio_exit_user", mmio_exit_user),
40
+ VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
41
+ VCPU_STAT("exits", exits),
42
+ VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
43
+ VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
4744 { NULL }
4845 };
4946
50
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
47
+static bool core_reg_offset_is_vreg(u64 off)
5148 {
52
- return 0;
49
+ return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
50
+ off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
5351 }
5452
5553 static u64 core_reg_offset_from_id(u64 id)
....@@ -57,9 +55,8 @@
5755 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
5856 }
5957
60
-static int validate_core_offset(const struct kvm_one_reg *reg)
58
+static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
6159 {
62
- u64 off = core_reg_offset_from_id(reg->id);
6360 int size;
6461
6562 switch (off) {
....@@ -89,11 +86,83 @@
8986 return -EINVAL;
9087 }
9188
92
- if (KVM_REG_SIZE(reg->id) == size &&
93
- IS_ALIGNED(off, size / sizeof(__u32)))
94
- return 0;
89
+ if (!IS_ALIGNED(off, size / sizeof(__u32)))
90
+ return -EINVAL;
9591
96
- return -EINVAL;
92
+ /*
93
+ * The KVM_REG_ARM64_SVE regs must be used instead of
94
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
95
+ * SVE-enabled vcpus:
96
+ */
97
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
98
+ return -EINVAL;
99
+
100
+ return size;
101
+}
102
+
103
+static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
104
+{
105
+ u64 off = core_reg_offset_from_id(reg->id);
106
+ int size = core_reg_size_from_offset(vcpu, off);
107
+
108
+ if (size < 0)
109
+ return NULL;
110
+
111
+ if (KVM_REG_SIZE(reg->id) != size)
112
+ return NULL;
113
+
114
+ switch (off) {
115
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
116
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
117
+ off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
118
+ off /= 2;
119
+ return &vcpu->arch.ctxt.regs.regs[off];
120
+
121
+ case KVM_REG_ARM_CORE_REG(regs.sp):
122
+ return &vcpu->arch.ctxt.regs.sp;
123
+
124
+ case KVM_REG_ARM_CORE_REG(regs.pc):
125
+ return &vcpu->arch.ctxt.regs.pc;
126
+
127
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
128
+ return &vcpu->arch.ctxt.regs.pstate;
129
+
130
+ case KVM_REG_ARM_CORE_REG(sp_el1):
131
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
132
+
133
+ case KVM_REG_ARM_CORE_REG(elr_el1):
134
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
135
+
136
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
137
+ return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
138
+
139
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
140
+ return &vcpu->arch.ctxt.spsr_abt;
141
+
142
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
143
+ return &vcpu->arch.ctxt.spsr_und;
144
+
145
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
146
+ return &vcpu->arch.ctxt.spsr_irq;
147
+
148
+ case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
149
+ return &vcpu->arch.ctxt.spsr_fiq;
150
+
151
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
152
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
153
+ off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
154
+ off /= 4;
155
+ return &vcpu->arch.ctxt.fp_regs.vregs[off];
156
+
157
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
158
+ return &vcpu->arch.ctxt.fp_regs.fpsr;
159
+
160
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
161
+ return &vcpu->arch.ctxt.fp_regs.fpcr;
162
+
163
+ default:
164
+ return NULL;
165
+ }
97166 }
98167
99168 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
....@@ -105,8 +174,8 @@
105174 * off the index in the "array".
106175 */
107176 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
108
- struct kvm_regs *regs = vcpu_gp_regs(vcpu);
109
- int nr_regs = sizeof(*regs) / sizeof(__u32);
177
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
178
+ void *addr;
110179 u32 off;
111180
112181 /* Our ID is an index into the kvm_regs struct. */
....@@ -115,10 +184,11 @@
115184 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
116185 return -ENOENT;
117186
118
- if (validate_core_offset(reg))
187
+ addr = core_reg_addr(vcpu, reg);
188
+ if (!addr)
119189 return -EINVAL;
120190
121
- if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
191
+ if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
122192 return -EFAULT;
123193
124194 return 0;
....@@ -127,10 +197,9 @@
127197 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
128198 {
129199 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
130
- struct kvm_regs *regs = vcpu_gp_regs(vcpu);
131
- int nr_regs = sizeof(*regs) / sizeof(__u32);
200
+ int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
132201 __uint128_t tmp;
133
- void *valp = &tmp;
202
+ void *valp = &tmp, *addr;
134203 u64 off;
135204 int err = 0;
136205
....@@ -140,7 +209,8 @@
140209 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
141210 return -ENOENT;
142211
143
- if (validate_core_offset(reg))
212
+ addr = core_reg_addr(vcpu, reg);
213
+ if (!addr)
144214 return -EINVAL;
145215
146216 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
....@@ -155,7 +225,7 @@
155225 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
156226 switch (mode) {
157227 case PSR_AA32_MODE_USR:
158
- if (!system_supports_32bit_el0())
228
+ if (!kvm_supports_32bit_el0())
159229 return -EINVAL;
160230 break;
161231 case PSR_AA32_MODE_FIQ:
....@@ -178,16 +248,265 @@
178248 }
179249 }
180250
181
- memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
251
+ memcpy(addr, valp, KVM_REG_SIZE(reg->id));
182252
183253 if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
184
- int i;
254
+ int i, nr_reg;
185255
186
- for (i = 0; i < 16; i++)
187
- *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
256
+ switch (*vcpu_cpsr(vcpu)) {
257
+ /*
258
+ * Either we are dealing with user mode, and only the
259
+ * first 15 registers (+ PC) must be narrowed to 32bit.
260
+ * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
261
+ */
262
+ case PSR_AA32_MODE_USR:
263
+ case PSR_AA32_MODE_SYS:
264
+ nr_reg = 15;
265
+ break;
266
+
267
+ /*
268
+ * Otherwide, this is a priviledged mode, and *all* the
269
+ * registers must be narrowed to 32bit.
270
+ */
271
+ default:
272
+ nr_reg = 31;
273
+ break;
274
+ }
275
+
276
+ for (i = 0; i < nr_reg; i++)
277
+ vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
278
+
279
+ *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
188280 }
189281 out:
190282 return err;
283
+}
284
+
285
+#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
286
+#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
287
+#define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
288
+
289
+static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
290
+{
291
+ unsigned int max_vq, vq;
292
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
293
+
294
+ if (!vcpu_has_sve(vcpu))
295
+ return -ENOENT;
296
+
297
+ if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
298
+ return -EINVAL;
299
+
300
+ memset(vqs, 0, sizeof(vqs));
301
+
302
+ max_vq = vcpu_sve_max_vq(vcpu);
303
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
304
+ if (sve_vq_available(vq))
305
+ vqs[vq_word(vq)] |= vq_mask(vq);
306
+
307
+ if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
308
+ return -EFAULT;
309
+
310
+ return 0;
311
+}
312
+
313
+static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
314
+{
315
+ unsigned int max_vq, vq;
316
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
317
+
318
+ if (!vcpu_has_sve(vcpu))
319
+ return -ENOENT;
320
+
321
+ if (kvm_arm_vcpu_sve_finalized(vcpu))
322
+ return -EPERM; /* too late! */
323
+
324
+ if (WARN_ON(vcpu->arch.sve_state))
325
+ return -EINVAL;
326
+
327
+ if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
328
+ return -EFAULT;
329
+
330
+ max_vq = 0;
331
+ for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
332
+ if (vq_present(vqs, vq))
333
+ max_vq = vq;
334
+
335
+ if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
336
+ return -EINVAL;
337
+
338
+ /*
339
+ * Vector lengths supported by the host can't currently be
340
+ * hidden from the guest individually: instead we can only set a
341
+ * maximum via ZCR_EL2.LEN. So, make sure the available vector
342
+ * lengths match the set requested exactly up to the requested
343
+ * maximum:
344
+ */
345
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
346
+ if (vq_present(vqs, vq) != sve_vq_available(vq))
347
+ return -EINVAL;
348
+
349
+ /* Can't run with no vector lengths at all: */
350
+ if (max_vq < SVE_VQ_MIN)
351
+ return -EINVAL;
352
+
353
+ /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
354
+ vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
355
+
356
+ return 0;
357
+}
358
+
359
+#define SVE_REG_SLICE_SHIFT 0
360
+#define SVE_REG_SLICE_BITS 5
361
+#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
362
+#define SVE_REG_ID_BITS 5
363
+
364
+#define SVE_REG_SLICE_MASK \
365
+ GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
366
+ SVE_REG_SLICE_SHIFT)
367
+#define SVE_REG_ID_MASK \
368
+ GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
369
+
370
+#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
371
+
372
+#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
373
+#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
374
+
375
+/*
376
+ * Number of register slices required to cover each whole SVE register.
377
+ * NOTE: Only the first slice every exists, for now.
378
+ * If you are tempted to modify this, you must also rework sve_reg_to_region()
379
+ * to match:
380
+ */
381
+#define vcpu_sve_slices(vcpu) 1
382
+
383
+/* Bounds of a single SVE register slice within vcpu->arch.sve_state */
384
+struct sve_state_reg_region {
385
+ unsigned int koffset; /* offset into sve_state in kernel memory */
386
+ unsigned int klen; /* length in kernel memory */
387
+ unsigned int upad; /* extra trailing padding in user memory */
388
+};
389
+
390
+/*
391
+ * Validate SVE register ID and get sanitised bounds for user/kernel SVE
392
+ * register copy
393
+ */
394
+static int sve_reg_to_region(struct sve_state_reg_region *region,
395
+ struct kvm_vcpu *vcpu,
396
+ const struct kvm_one_reg *reg)
397
+{
398
+ /* reg ID ranges for Z- registers */
399
+ const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
400
+ const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
401
+ SVE_NUM_SLICES - 1);
402
+
403
+ /* reg ID ranges for P- registers and FFR (which are contiguous) */
404
+ const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
405
+ const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
406
+
407
+ unsigned int vq;
408
+ unsigned int reg_num;
409
+
410
+ unsigned int reqoffset, reqlen; /* User-requested offset and length */
411
+ unsigned int maxlen; /* Maximum permitted length */
412
+
413
+ size_t sve_state_size;
414
+
415
+ const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
416
+ SVE_NUM_SLICES - 1);
417
+
418
+ /* Verify that the P-regs and FFR really do have contiguous IDs: */
419
+ BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
420
+
421
+ /* Verify that we match the UAPI header: */
422
+ BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
423
+
424
+ reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
425
+
426
+ if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
427
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
428
+ return -ENOENT;
429
+
430
+ vq = vcpu_sve_max_vq(vcpu);
431
+
432
+ reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
433
+ SVE_SIG_REGS_OFFSET;
434
+ reqlen = KVM_SVE_ZREG_SIZE;
435
+ maxlen = SVE_SIG_ZREG_SIZE(vq);
436
+ } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
437
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
438
+ return -ENOENT;
439
+
440
+ vq = vcpu_sve_max_vq(vcpu);
441
+
442
+ reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
443
+ SVE_SIG_REGS_OFFSET;
444
+ reqlen = KVM_SVE_PREG_SIZE;
445
+ maxlen = SVE_SIG_PREG_SIZE(vq);
446
+ } else {
447
+ return -EINVAL;
448
+ }
449
+
450
+ sve_state_size = vcpu_sve_state_size(vcpu);
451
+ if (WARN_ON(!sve_state_size))
452
+ return -EINVAL;
453
+
454
+ region->koffset = array_index_nospec(reqoffset, sve_state_size);
455
+ region->klen = min(maxlen, reqlen);
456
+ region->upad = reqlen - region->klen;
457
+
458
+ return 0;
459
+}
460
+
461
+static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
462
+{
463
+ int ret;
464
+ struct sve_state_reg_region region;
465
+ char __user *uptr = (char __user *)reg->addr;
466
+
467
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
468
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
469
+ return get_sve_vls(vcpu, reg);
470
+
471
+ /* Try to interpret reg ID as an architectural SVE register... */
472
+ ret = sve_reg_to_region(&region, vcpu, reg);
473
+ if (ret)
474
+ return ret;
475
+
476
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
477
+ return -EPERM;
478
+
479
+ if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
480
+ region.klen) ||
481
+ clear_user(uptr + region.klen, region.upad))
482
+ return -EFAULT;
483
+
484
+ return 0;
485
+}
486
+
487
+static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
488
+{
489
+ int ret;
490
+ struct sve_state_reg_region region;
491
+ const char __user *uptr = (const char __user *)reg->addr;
492
+
493
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
494
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
495
+ return set_sve_vls(vcpu, reg);
496
+
497
+ /* Try to interpret reg ID as an architectural SVE register... */
498
+ ret = sve_reg_to_region(&region, vcpu, reg);
499
+ if (ret)
500
+ return ret;
501
+
502
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
503
+ return -EPERM;
504
+
505
+ if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
506
+ region.klen))
507
+ return -EFAULT;
508
+
509
+ return 0;
191510 }
192511
193512 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
....@@ -200,9 +519,52 @@
200519 return -EINVAL;
201520 }
202521
203
-static unsigned long num_core_regs(void)
522
+static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
523
+ u64 __user *uindices)
204524 {
205
- return sizeof(struct kvm_regs) / sizeof(__u32);
525
+ unsigned int i;
526
+ int n = 0;
527
+
528
+ for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
529
+ u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
530
+ int size = core_reg_size_from_offset(vcpu, i);
531
+
532
+ if (size < 0)
533
+ continue;
534
+
535
+ switch (size) {
536
+ case sizeof(__u32):
537
+ reg |= KVM_REG_SIZE_U32;
538
+ break;
539
+
540
+ case sizeof(__u64):
541
+ reg |= KVM_REG_SIZE_U64;
542
+ break;
543
+
544
+ case sizeof(__uint128_t):
545
+ reg |= KVM_REG_SIZE_U128;
546
+ break;
547
+
548
+ default:
549
+ WARN_ON(1);
550
+ continue;
551
+ }
552
+
553
+ if (uindices) {
554
+ if (put_user(reg, uindices))
555
+ return -EFAULT;
556
+ uindices++;
557
+ }
558
+
559
+ n++;
560
+ }
561
+
562
+ return n;
563
+}
564
+
565
+static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
566
+{
567
+ return copy_core_reg_indices(vcpu, NULL);
206568 }
207569
208570 /**
....@@ -258,6 +620,67 @@
258620 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
259621 }
260622
623
+static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
624
+{
625
+ const unsigned int slices = vcpu_sve_slices(vcpu);
626
+
627
+ if (!vcpu_has_sve(vcpu))
628
+ return 0;
629
+
630
+ /* Policed by KVM_GET_REG_LIST: */
631
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
632
+
633
+ return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
634
+ + 1; /* KVM_REG_ARM64_SVE_VLS */
635
+}
636
+
637
+static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
638
+ u64 __user *uindices)
639
+{
640
+ const unsigned int slices = vcpu_sve_slices(vcpu);
641
+ u64 reg;
642
+ unsigned int i, n;
643
+ int num_regs = 0;
644
+
645
+ if (!vcpu_has_sve(vcpu))
646
+ return 0;
647
+
648
+ /* Policed by KVM_GET_REG_LIST: */
649
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
650
+
651
+ /*
652
+ * Enumerate this first, so that userspace can save/restore in
653
+ * the order reported by KVM_GET_REG_LIST:
654
+ */
655
+ reg = KVM_REG_ARM64_SVE_VLS;
656
+ if (put_user(reg, uindices++))
657
+ return -EFAULT;
658
+ ++num_regs;
659
+
660
+ for (i = 0; i < slices; i++) {
661
+ for (n = 0; n < SVE_NUM_ZREGS; n++) {
662
+ reg = KVM_REG_ARM64_SVE_ZREG(n, i);
663
+ if (put_user(reg, uindices++))
664
+ return -EFAULT;
665
+ num_regs++;
666
+ }
667
+
668
+ for (n = 0; n < SVE_NUM_PREGS; n++) {
669
+ reg = KVM_REG_ARM64_SVE_PREG(n, i);
670
+ if (put_user(reg, uindices++))
671
+ return -EFAULT;
672
+ num_regs++;
673
+ }
674
+
675
+ reg = KVM_REG_ARM64_SVE_FFR(i);
676
+ if (put_user(reg, uindices++))
677
+ return -EFAULT;
678
+ num_regs++;
679
+ }
680
+
681
+ return num_regs;
682
+}
683
+
261684 /**
262685 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
263686 *
....@@ -265,8 +688,15 @@
265688 */
266689 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
267690 {
268
- return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
269
- + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
691
+ unsigned long res = 0;
692
+
693
+ res += num_core_regs(vcpu);
694
+ res += num_sve_regs(vcpu);
695
+ res += kvm_arm_num_sys_reg_descs(vcpu);
696
+ res += kvm_arm_get_fw_num_regs(vcpu);
697
+ res += NUM_TIMER_REGS;
698
+
699
+ return res;
270700 }
271701
272702 /**
....@@ -276,23 +706,25 @@
276706 */
277707 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
278708 {
279
- unsigned int i;
280
- const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
281709 int ret;
282710
283
- for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
284
- if (put_user(core_reg | i, uindices))
285
- return -EFAULT;
286
- uindices++;
287
- }
711
+ ret = copy_core_reg_indices(vcpu, uindices);
712
+ if (ret < 0)
713
+ return ret;
714
+ uindices += ret;
715
+
716
+ ret = copy_sve_reg_indices(vcpu, uindices);
717
+ if (ret < 0)
718
+ return ret;
719
+ uindices += ret;
288720
289721 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
290
- if (ret)
722
+ if (ret < 0)
291723 return ret;
292724 uindices += kvm_arm_get_fw_num_regs(vcpu);
293725
294726 ret = copy_timer_indices(vcpu, uindices);
295
- if (ret)
727
+ if (ret < 0)
296728 return ret;
297729 uindices += NUM_TIMER_REGS;
298730
....@@ -305,12 +737,11 @@
305737 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
306738 return -EINVAL;
307739
308
- /* Register group 16 means we want a core register. */
309
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
310
- return get_core_reg(vcpu, reg);
311
-
312
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
313
- return kvm_arm_get_fw_reg(vcpu, reg);
740
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
741
+ case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
742
+ case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
743
+ case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
744
+ }
314745
315746 if (is_timer_reg(reg->id))
316747 return get_timer_reg(vcpu, reg);
....@@ -324,12 +755,11 @@
324755 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
325756 return -EINVAL;
326757
327
- /* Register group 16 means we set a core register. */
328
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
329
- return set_core_reg(vcpu, reg);
330
-
331
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
332
- return kvm_arm_set_fw_reg(vcpu, reg);
758
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
759
+ case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
760
+ case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
761
+ case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
762
+ }
333763
334764 if (is_timer_reg(reg->id))
335765 return set_timer_reg(vcpu, reg);
....@@ -358,6 +788,12 @@
358788 if (events->exception.serror_pending && events->exception.serror_has_esr)
359789 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
360790
791
+ /*
792
+ * We never return a pending ext_dabt here because we deliver it to
793
+ * the virtual CPU directly when setting the event and it's no longer
794
+ * 'pending' at this point.
795
+ */
796
+
361797 return 0;
362798 }
363799
....@@ -366,6 +802,7 @@
366802 {
367803 bool serror_pending = events->exception.serror_pending;
368804 bool has_esr = events->exception.serror_has_esr;
805
+ bool ext_dabt_pending = events->exception.ext_dabt_pending;
369806
370807 if (serror_pending && has_esr) {
371808 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
....@@ -378,6 +815,9 @@
378815 } else if (serror_pending) {
379816 kvm_inject_vabt(vcpu);
380817 }
818
+
819
+ if (ext_dabt_pending)
820
+ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
381821
382822 return 0;
383823 }
....@@ -398,15 +838,15 @@
398838 return KVM_ARM_TARGET_CORTEX_A53;
399839 case ARM_CPU_PART_CORTEX_A57:
400840 return KVM_ARM_TARGET_CORTEX_A57;
401
- };
841
+ }
402842 break;
403843 case ARM_CPU_IMP_APM:
404844 switch (part_number) {
405845 case APM_CPU_PART_POTENZA:
406846 return KVM_ARM_TARGET_XGENE_POTENZA;
407
- };
847
+ }
408848 break;
409
- };
849
+ }
410850
411851 /* Return a default generic target */
412852 return KVM_ARM_TARGET_GENERIC_V8;
....@@ -504,6 +944,9 @@
504944 case KVM_ARM_VCPU_TIMER_CTRL:
505945 ret = kvm_arm_timer_set_attr(vcpu, attr);
506946 break;
947
+ case KVM_ARM_VCPU_PVTIME_CTRL:
948
+ ret = kvm_arm_pvtime_set_attr(vcpu, attr);
949
+ break;
507950 default:
508951 ret = -ENXIO;
509952 break;
....@@ -524,6 +967,9 @@
524967 case KVM_ARM_VCPU_TIMER_CTRL:
525968 ret = kvm_arm_timer_get_attr(vcpu, attr);
526969 break;
970
+ case KVM_ARM_VCPU_PVTIME_CTRL:
971
+ ret = kvm_arm_pvtime_get_attr(vcpu, attr);
972
+ break;
527973 default:
528974 ret = -ENXIO;
529975 break;
....@@ -544,6 +990,9 @@
544990 case KVM_ARM_VCPU_TIMER_CTRL:
545991 ret = kvm_arm_timer_has_attr(vcpu, attr);
546992 break;
993
+ case KVM_ARM_VCPU_PVTIME_CTRL:
994
+ ret = kvm_arm_pvtime_has_attr(vcpu, attr);
995
+ break;
547996 default:
548997 ret = -ENXIO;
549998 break;