hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/include/asm/svm.h
....@@ -3,10 +3,54 @@
33 #define __SVM_H
44
55 #include <uapi/asm/svm.h>
6
+#include <uapi/asm/kvm.h>
67
8
+/*
9
+ * 32-bit intercept words in the VMCB Control Area, starting
10
+ * at Byte offset 000h.
11
+ */
12
+
13
+enum intercept_words {
14
+ INTERCEPT_CR = 0,
15
+ INTERCEPT_DR,
16
+ INTERCEPT_EXCEPTION,
17
+ INTERCEPT_WORD3,
18
+ INTERCEPT_WORD4,
19
+ INTERCEPT_WORD5,
20
+ MAX_INTERCEPT,
21
+};
722
823 enum {
9
- INTERCEPT_INTR,
24
+ /* Byte offset 000h (word 0) */
25
+ INTERCEPT_CR0_READ = 0,
26
+ INTERCEPT_CR3_READ = 3,
27
+ INTERCEPT_CR4_READ = 4,
28
+ INTERCEPT_CR8_READ = 8,
29
+ INTERCEPT_CR0_WRITE = 16,
30
+ INTERCEPT_CR3_WRITE = 16 + 3,
31
+ INTERCEPT_CR4_WRITE = 16 + 4,
32
+ INTERCEPT_CR8_WRITE = 16 + 8,
33
+ /* Byte offset 004h (word 1) */
34
+ INTERCEPT_DR0_READ = 32,
35
+ INTERCEPT_DR1_READ,
36
+ INTERCEPT_DR2_READ,
37
+ INTERCEPT_DR3_READ,
38
+ INTERCEPT_DR4_READ,
39
+ INTERCEPT_DR5_READ,
40
+ INTERCEPT_DR6_READ,
41
+ INTERCEPT_DR7_READ,
42
+ INTERCEPT_DR0_WRITE = 48,
43
+ INTERCEPT_DR1_WRITE,
44
+ INTERCEPT_DR2_WRITE,
45
+ INTERCEPT_DR3_WRITE,
46
+ INTERCEPT_DR4_WRITE,
47
+ INTERCEPT_DR5_WRITE,
48
+ INTERCEPT_DR6_WRITE,
49
+ INTERCEPT_DR7_WRITE,
50
+ /* Byte offset 008h (word 2) */
51
+ INTERCEPT_EXCEPTION_OFFSET = 64,
52
+ /* Byte offset 00Ch (word 3) */
53
+ INTERCEPT_INTR = 96,
1054 INTERCEPT_NMI,
1155 INTERCEPT_SMI,
1256 INTERCEPT_INIT,
....@@ -38,7 +82,8 @@
3882 INTERCEPT_TASK_SWITCH,
3983 INTERCEPT_FERR_FREEZE,
4084 INTERCEPT_SHUTDOWN,
41
- INTERCEPT_VMRUN,
85
+ /* Byte offset 010h (word 4) */
86
+ INTERCEPT_VMRUN = 128,
4287 INTERCEPT_VMMCALL,
4388 INTERCEPT_VMLOAD,
4489 INTERCEPT_VMSAVE,
....@@ -52,15 +97,19 @@
5297 INTERCEPT_MWAIT,
5398 INTERCEPT_MWAIT_COND,
5499 INTERCEPT_XSETBV,
100
+ INTERCEPT_RDPRU,
101
+ /* Byte offset 014h (word 5) */
102
+ INTERCEPT_INVLPGB = 160,
103
+ INTERCEPT_INVLPGB_ILLEGAL,
104
+ INTERCEPT_INVPCID,
105
+ INTERCEPT_MCOMMIT,
106
+ INTERCEPT_TLBSYNC,
55107 };
56108
57109
58110 struct __attribute__ ((__packed__)) vmcb_control_area {
59
- u32 intercept_cr;
60
- u32 intercept_dr;
61
- u32 intercept_exceptions;
62
- u64 intercept;
63
- u8 reserved_1[40];
111
+ u32 intercepts[MAX_INTERCEPT];
112
+ u32 reserved_1[15 - MAX_INTERCEPT];
64113 u16 pause_filter_thresh;
65114 u16 pause_filter_count;
66115 u64 iopm_base_pa;
....@@ -95,7 +144,6 @@
95144 u8 reserved_6[8]; /* Offset 0xe8 */
96145 u64 avic_logical_id; /* Offset 0xf0 */
97146 u64 avic_physical_id; /* Offset 0xf8 */
98
- u8 reserved_7[768];
99147 };
100148
101149
....@@ -152,14 +200,14 @@
152200 #define SVM_NESTED_CTL_NP_ENABLE BIT(0)
153201 #define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
154202
155
-struct __attribute__ ((__packed__)) vmcb_seg {
203
+struct vmcb_seg {
156204 u16 selector;
157205 u16 attrib;
158206 u32 limit;
159207 u64 base;
160
-};
208
+} __packed;
161209
162
-struct __attribute__ ((__packed__)) vmcb_save_area {
210
+struct vmcb_save_area {
163211 struct vmcb_seg es;
164212 struct vmcb_seg cs;
165213 struct vmcb_seg ss;
....@@ -202,12 +250,67 @@
202250 u64 br_to;
203251 u64 last_excp_from;
204252 u64 last_excp_to;
205
-};
206253
207
-struct __attribute__ ((__packed__)) vmcb {
208
- struct vmcb_control_area control;
254
+ /*
255
+ * The following part of the save area is valid only for
256
+ * SEV-ES guests when referenced through the GHCB.
257
+ */
258
+ u8 reserved_7[104];
259
+ u64 reserved_8; /* rax already available at 0x01f8 */
260
+ u64 rcx;
261
+ u64 rdx;
262
+ u64 rbx;
263
+ u64 reserved_9; /* rsp already available at 0x01d8 */
264
+ u64 rbp;
265
+ u64 rsi;
266
+ u64 rdi;
267
+ u64 r8;
268
+ u64 r9;
269
+ u64 r10;
270
+ u64 r11;
271
+ u64 r12;
272
+ u64 r13;
273
+ u64 r14;
274
+ u64 r15;
275
+ u8 reserved_10[16];
276
+ u64 sw_exit_code;
277
+ u64 sw_exit_info_1;
278
+ u64 sw_exit_info_2;
279
+ u64 sw_scratch;
280
+ u8 reserved_11[56];
281
+ u64 xcr0;
282
+ u8 valid_bitmap[16];
283
+ u64 x87_state_gpa;
284
+} __packed;
285
+
286
+struct ghcb {
209287 struct vmcb_save_area save;
210
-};
288
+ u8 reserved_save[2048 - sizeof(struct vmcb_save_area)];
289
+
290
+ u8 shared_buffer[2032];
291
+
292
+ u8 reserved_1[10];
293
+ u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */
294
+ u32 ghcb_usage;
295
+} __packed;
296
+
297
+
298
+#define EXPECTED_VMCB_SAVE_AREA_SIZE 1032
299
+#define EXPECTED_VMCB_CONTROL_AREA_SIZE 256
300
+#define EXPECTED_GHCB_SIZE PAGE_SIZE
301
+
302
+static inline void __unused_size_checks(void)
303
+{
304
+ BUILD_BUG_ON(sizeof(struct vmcb_save_area) != EXPECTED_VMCB_SAVE_AREA_SIZE);
305
+ BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
306
+ BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
307
+}
308
+
309
+struct vmcb {
310
+ struct vmcb_control_area control;
311
+ u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
312
+ struct vmcb_save_area save;
313
+} __packed;
211314
212315 #define SVM_CPUID_FUNC 0x8000000a
213316
....@@ -233,32 +336,6 @@
233336 #define SVM_SELECTOR_WRITE_MASK (1 << 1)
234337 #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
235338 #define SVM_SELECTOR_CODE_MASK (1 << 3)
236
-
237
-#define INTERCEPT_CR0_READ 0
238
-#define INTERCEPT_CR3_READ 3
239
-#define INTERCEPT_CR4_READ 4
240
-#define INTERCEPT_CR8_READ 8
241
-#define INTERCEPT_CR0_WRITE (16 + 0)
242
-#define INTERCEPT_CR3_WRITE (16 + 3)
243
-#define INTERCEPT_CR4_WRITE (16 + 4)
244
-#define INTERCEPT_CR8_WRITE (16 + 8)
245
-
246
-#define INTERCEPT_DR0_READ 0
247
-#define INTERCEPT_DR1_READ 1
248
-#define INTERCEPT_DR2_READ 2
249
-#define INTERCEPT_DR3_READ 3
250
-#define INTERCEPT_DR4_READ 4
251
-#define INTERCEPT_DR5_READ 5
252
-#define INTERCEPT_DR6_READ 6
253
-#define INTERCEPT_DR7_READ 7
254
-#define INTERCEPT_DR0_WRITE (16 + 0)
255
-#define INTERCEPT_DR1_WRITE (16 + 1)
256
-#define INTERCEPT_DR2_WRITE (16 + 2)
257
-#define INTERCEPT_DR3_WRITE (16 + 3)
258
-#define INTERCEPT_DR4_WRITE (16 + 4)
259
-#define INTERCEPT_DR5_WRITE (16 + 5)
260
-#define INTERCEPT_DR6_WRITE (16 + 6)
261
-#define INTERCEPT_DR7_WRITE (16 + 7)
262339
263340 #define SVM_EVTINJ_VEC_MASK 0xff
264341
....@@ -292,11 +369,47 @@
292369
293370 #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
294371
295
-#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
296
-#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
297
-#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
298
-#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd"
299
-#define SVM_STGI ".byte 0x0f, 0x01, 0xdc"
300
-#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
372
+/* GHCB Accessor functions */
373
+
374
+#define GHCB_BITMAP_IDX(field) \
375
+ (offsetof(struct vmcb_save_area, field) / sizeof(u64))
376
+
377
+#define DEFINE_GHCB_ACCESSORS(field) \
378
+ static inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \
379
+ { \
380
+ return test_bit(GHCB_BITMAP_IDX(field), \
381
+ (unsigned long *)&ghcb->save.valid_bitmap); \
382
+ } \
383
+ \
384
+ static inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \
385
+ { \
386
+ __set_bit(GHCB_BITMAP_IDX(field), \
387
+ (unsigned long *)&ghcb->save.valid_bitmap); \
388
+ ghcb->save.field = value; \
389
+ }
390
+
391
+DEFINE_GHCB_ACCESSORS(cpl)
392
+DEFINE_GHCB_ACCESSORS(rip)
393
+DEFINE_GHCB_ACCESSORS(rsp)
394
+DEFINE_GHCB_ACCESSORS(rax)
395
+DEFINE_GHCB_ACCESSORS(rcx)
396
+DEFINE_GHCB_ACCESSORS(rdx)
397
+DEFINE_GHCB_ACCESSORS(rbx)
398
+DEFINE_GHCB_ACCESSORS(rbp)
399
+DEFINE_GHCB_ACCESSORS(rsi)
400
+DEFINE_GHCB_ACCESSORS(rdi)
401
+DEFINE_GHCB_ACCESSORS(r8)
402
+DEFINE_GHCB_ACCESSORS(r9)
403
+DEFINE_GHCB_ACCESSORS(r10)
404
+DEFINE_GHCB_ACCESSORS(r11)
405
+DEFINE_GHCB_ACCESSORS(r12)
406
+DEFINE_GHCB_ACCESSORS(r13)
407
+DEFINE_GHCB_ACCESSORS(r14)
408
+DEFINE_GHCB_ACCESSORS(r15)
409
+DEFINE_GHCB_ACCESSORS(sw_exit_code)
410
+DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
411
+DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
412
+DEFINE_GHCB_ACCESSORS(sw_scratch)
413
+DEFINE_GHCB_ACCESSORS(xcr0)
301414
302415 #endif