hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/tools/testing/selftests/kvm/dirty_log_test.c
....@@ -5,6 +5,8 @@
55 * Copyright (C) 2018, Red Hat, Inc.
66 */
77
8
+#define _GNU_SOURCE /* for program_invocation_name */
9
+
810 #include <stdio.h>
911 #include <stdlib.h>
1012 #include <unistd.h>
....@@ -15,76 +17,188 @@
1517
1618 #include "test_util.h"
1719 #include "kvm_util.h"
20
+#include "processor.h"
1821
19
-#define DEBUG printf
22
+#define VCPU_ID 1
2023
21
-#define VCPU_ID 1
2224 /* The memory slot index to track dirty pages */
23
-#define TEST_MEM_SLOT_INDEX 1
24
-/*
25
- * GPA offset of the testing memory slot. Must be bigger than the
26
- * default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES.
27
- */
28
-#define TEST_MEM_OFFSET (1ULL << 30) /* 1G */
29
-/* Size of the testing memory slot */
30
-#define TEST_MEM_PAGES (1ULL << 18) /* 1G for 4K pages */
25
+#define TEST_MEM_SLOT_INDEX 1
26
+
27
+/* Default guest test virtual memory offset */
28
+#define DEFAULT_GUEST_TEST_MEM 0xc0000000
29
+
3130 /* How many pages to dirty for each guest loop */
32
-#define TEST_PAGES_PER_LOOP 1024
31
+#define TEST_PAGES_PER_LOOP 1024
32
+
3333 /* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
34
-#define TEST_HOST_LOOP_N 32UL
34
+#define TEST_HOST_LOOP_N 32UL
35
+
3536 /* Interval for each host loop (ms) */
36
-#define TEST_HOST_LOOP_INTERVAL 10UL
37
+#define TEST_HOST_LOOP_INTERVAL 10UL
38
+
39
+/* Dirty bitmaps are always little endian, so we need to swap on big endian */
40
+#if defined(__s390x__)
41
+# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
42
+# define test_bit_le(nr, addr) \
43
+ test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
44
+# define set_bit_le(nr, addr) \
45
+ set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
46
+# define clear_bit_le(nr, addr) \
47
+ clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
48
+# define test_and_set_bit_le(nr, addr) \
49
+ test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
50
+# define test_and_clear_bit_le(nr, addr) \
51
+ test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
52
+#else
53
+# define test_bit_le test_bit
54
+# define set_bit_le set_bit
55
+# define clear_bit_le clear_bit
56
+# define test_and_set_bit_le test_and_set_bit
57
+# define test_and_clear_bit_le test_and_clear_bit
58
+#endif
3759
3860 /*
39
- * Guest variables. We use these variables to share data between host
40
- * and guest. There are two copies of the variables, one in host memory
41
- * (which is unused) and one in guest memory. When the host wants to
42
- * access these variables, it needs to call addr_gva2hva() to access the
43
- * guest copy.
61
+ * Guest/Host shared variables. Ensure addr_gva2hva() and/or
62
+ * sync_global_to/from_guest() are used when accessing from
63
+ * the host. READ/WRITE_ONCE() should also be used with anything
64
+ * that may change.
4465 */
45
-uint64_t guest_random_array[TEST_PAGES_PER_LOOP];
46
-uint64_t guest_iteration;
47
-uint64_t guest_page_size;
66
+static uint64_t host_page_size;
67
+static uint64_t guest_page_size;
68
+static uint64_t guest_num_pages;
69
+static uint64_t random_array[TEST_PAGES_PER_LOOP];
70
+static uint64_t iteration;
4871
4972 /*
50
- * Writes to the first byte of a random page within the testing memory
51
- * region continuously.
73
+ * Guest physical memory offset of the testing memory slot.
74
+ * This will be set to the topmost valid physical address minus
75
+ * the test memory size.
5276 */
53
-void guest_code(void)
77
+static uint64_t guest_test_phys_mem;
78
+
79
+/*
80
+ * Guest virtual memory offset of the testing memory slot.
81
+ * Must not conflict with identity mapped test code.
82
+ */
83
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
84
+
85
+/*
86
+ * Continuously write to the first 8 bytes of a random pages within
87
+ * the testing memory region.
88
+ */
89
+static void guest_code(void)
5490 {
55
- int i = 0;
56
- uint64_t volatile *array = guest_random_array;
57
- uint64_t volatile *guest_addr;
91
+ uint64_t addr;
92
+ int i;
93
+
94
+ /*
95
+ * On s390x, all pages of a 1M segment are initially marked as dirty
96
+ * when a page of the segment is written to for the very first time.
97
+ * To compensate this specialty in this test, we need to touch all
98
+ * pages during the first iteration.
99
+ */
100
+ for (i = 0; i < guest_num_pages; i++) {
101
+ addr = guest_test_virt_mem + i * guest_page_size;
102
+ *(uint64_t *)addr = READ_ONCE(iteration);
103
+ }
58104
59105 while (true) {
60106 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
61
- /*
62
- * Write to the first 8 bytes of a random page
63
- * on the testing memory region.
64
- */
65
- guest_addr = (uint64_t *)
66
- (TEST_MEM_OFFSET +
67
- (array[i] % TEST_MEM_PAGES) * guest_page_size);
68
- *guest_addr = guest_iteration;
107
+ addr = guest_test_virt_mem;
108
+ addr += (READ_ONCE(random_array[i]) % guest_num_pages)
109
+ * guest_page_size;
110
+ addr &= ~(host_page_size - 1);
111
+ *(uint64_t *)addr = READ_ONCE(iteration);
69112 }
113
+
70114 /* Tell the host that we need more random numbers */
71115 GUEST_SYNC(1);
72116 }
73117 }
74118
75
-/*
76
- * Host variables. These variables should only be used by the host
77
- * rather than the guest.
78
- */
79
-bool host_quit;
119
+/* Host variables */
120
+static bool host_quit;
80121
81122 /* Points to the test VM memory region on which we track dirty logs */
82
-void *host_test_mem;
123
+static void *host_test_mem;
124
+static uint64_t host_num_pages;
83125
84126 /* For statistics only */
85
-uint64_t host_dirty_count;
86
-uint64_t host_clear_count;
87
-uint64_t host_track_next_count;
127
+static uint64_t host_dirty_count;
128
+static uint64_t host_clear_count;
129
+static uint64_t host_track_next_count;
130
+
131
+enum log_mode_t {
132
+ /* Only use KVM_GET_DIRTY_LOG for logging */
133
+ LOG_MODE_DIRTY_LOG = 0,
134
+
135
+ /* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
136
+ LOG_MODE_CLEAR_LOG = 1,
137
+
138
+ LOG_MODE_NUM,
139
+
140
+ /* Run all supported modes */
141
+ LOG_MODE_ALL = LOG_MODE_NUM,
142
+};
143
+
144
+/* Mode of logging to test. Default is to run all supported modes */
145
+static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
146
+/* Logging mode for current run */
147
+static enum log_mode_t host_log_mode;
148
+
149
+static bool clear_log_supported(void)
150
+{
151
+ return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
152
+}
153
+
154
+static void clear_log_create_vm_done(struct kvm_vm *vm)
155
+{
156
+ struct kvm_enable_cap cap = {};
157
+ u64 manual_caps;
158
+
159
+ manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
160
+ TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
161
+ manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
162
+ KVM_DIRTY_LOG_INITIALLY_SET);
163
+ cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
164
+ cap.args[0] = manual_caps;
165
+ vm_enable_cap(vm, &cap);
166
+}
167
+
168
+static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
169
+ void *bitmap, uint32_t num_pages)
170
+{
171
+ kvm_vm_get_dirty_log(vm, slot, bitmap);
172
+}
173
+
174
+static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
175
+ void *bitmap, uint32_t num_pages)
176
+{
177
+ kvm_vm_get_dirty_log(vm, slot, bitmap);
178
+ kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
179
+}
180
+
181
+struct log_mode {
182
+ const char *name;
183
+ /* Return true if this mode is supported, otherwise false */
184
+ bool (*supported)(void);
185
+ /* Hook when the vm creation is done (before vcpu creation) */
186
+ void (*create_vm_done)(struct kvm_vm *vm);
187
+ /* Hook to collect the dirty pages into the bitmap provided */
188
+ void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
189
+ void *bitmap, uint32_t num_pages);
190
+} log_modes[LOG_MODE_NUM] = {
191
+ {
192
+ .name = "dirty-log",
193
+ .collect_dirty_pages = dirty_log_collect_dirty_pages,
194
+ },
195
+ {
196
+ .name = "clear-log",
197
+ .supported = clear_log_supported,
198
+ .create_vm_done = clear_log_create_vm_done,
199
+ .collect_dirty_pages = clear_log_collect_dirty_pages,
200
+ },
201
+};
88202
89203 /*
90204 * We use this bitmap to track some pages that should have its dirty
....@@ -93,73 +207,105 @@
93207 * page bit is cleared in the latest bitmap, then the system must
94208 * report that write in the next get dirty log call.
95209 */
96
-unsigned long *host_bmap_track;
210
+static unsigned long *host_bmap_track;
97211
98
-void generate_random_array(uint64_t *guest_array, uint64_t size)
212
+static void log_modes_dump(void)
213
+{
214
+ int i;
215
+
216
+ printf("all");
217
+ for (i = 0; i < LOG_MODE_NUM; i++)
218
+ printf(", %s", log_modes[i].name);
219
+ printf("\n");
220
+}
221
+
222
+static bool log_mode_supported(void)
223
+{
224
+ struct log_mode *mode = &log_modes[host_log_mode];
225
+
226
+ if (mode->supported)
227
+ return mode->supported();
228
+
229
+ return true;
230
+}
231
+
232
+static void log_mode_create_vm_done(struct kvm_vm *vm)
233
+{
234
+ struct log_mode *mode = &log_modes[host_log_mode];
235
+
236
+ if (mode->create_vm_done)
237
+ mode->create_vm_done(vm);
238
+}
239
+
240
+static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
241
+ void *bitmap, uint32_t num_pages)
242
+{
243
+ struct log_mode *mode = &log_modes[host_log_mode];
244
+
245
+ TEST_ASSERT(mode->collect_dirty_pages != NULL,
246
+ "collect_dirty_pages() is required for any log mode!");
247
+ mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
248
+}
249
+
250
+static void generate_random_array(uint64_t *guest_array, uint64_t size)
99251 {
100252 uint64_t i;
101253
102
- for (i = 0; i < size; i++) {
254
+ for (i = 0; i < size; i++)
103255 guest_array[i] = random();
104
- }
105256 }
106257
107
-void *vcpu_worker(void *data)
258
+static void *vcpu_worker(void *data)
108259 {
109260 int ret;
110
- uint64_t loops, *guest_array, pages_count = 0;
111261 struct kvm_vm *vm = data;
262
+ uint64_t *guest_array;
263
+ uint64_t pages_count = 0;
112264 struct kvm_run *run;
113
- struct guest_args args;
114265
115266 run = vcpu_state(vm, VCPU_ID);
116267
117
- /* Retrieve the guest random array pointer and cache it */
118
- guest_array = addr_gva2hva(vm, (vm_vaddr_t)guest_random_array);
119
-
120
- DEBUG("VCPU starts\n");
121
-
268
+ guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
122269 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
123270
124271 while (!READ_ONCE(host_quit)) {
125
- /* Let the guest to dirty these random pages */
272
+ /* Let the guest dirty the random pages */
126273 ret = _vcpu_run(vm, VCPU_ID);
127
- guest_args_read(vm, VCPU_ID, &args);
128
- if (run->exit_reason == KVM_EXIT_IO &&
129
- args.port == GUEST_PORT_SYNC) {
274
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
275
+ if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
130276 pages_count += TEST_PAGES_PER_LOOP;
131277 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
132278 } else {
133
- TEST_ASSERT(false,
134
- "Invalid guest sync status: "
135
- "exit_reason=%s\n",
136
- exit_reason_str(run->exit_reason));
279
+ TEST_FAIL("Invalid guest sync status: "
280
+ "exit_reason=%s\n",
281
+ exit_reason_str(run->exit_reason));
137282 }
138283 }
139284
140
- DEBUG("VCPU exits, dirtied %"PRIu64" pages\n", pages_count);
285
+ pr_info("Dirtied %"PRIu64" pages\n", pages_count);
141286
142287 return NULL;
143288 }
144289
145
-void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
290
+static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
146291 {
292
+ uint64_t step = vm_num_host_pages(mode, 1);
147293 uint64_t page;
148
- uint64_t volatile *value_ptr;
294
+ uint64_t *value_ptr;
149295
150
- for (page = 0; page < TEST_MEM_PAGES; page++) {
151
- value_ptr = host_test_mem + page * getpagesize();
296
+ for (page = 0; page < host_num_pages; page += step) {
297
+ value_ptr = host_test_mem + page * host_page_size;
152298
153299 /* If this is a special page that we were tracking... */
154
- if (test_and_clear_bit(page, host_bmap_track)) {
300
+ if (test_and_clear_bit_le(page, host_bmap_track)) {
155301 host_track_next_count++;
156
- TEST_ASSERT(test_bit(page, bmap),
302
+ TEST_ASSERT(test_bit_le(page, bmap),
157303 "Page %"PRIu64" should have its dirty bit "
158304 "set in this iteration but it is missing",
159305 page);
160306 }
161307
162
- if (test_bit(page, bmap)) {
308
+ if (test_and_clear_bit_le(page, bmap)) {
163309 host_dirty_count++;
164310 /*
165311 * If the bit is set, the value written onto
....@@ -202,41 +348,257 @@
202348 * should report its dirtyness in the
203349 * next run
204350 */
205
- set_bit(page, host_bmap_track);
351
+ set_bit_le(page, host_bmap_track);
206352 }
207353 }
208354 }
209355 }
210356
211
-void help(char *name)
357
+static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
358
+ uint64_t extra_mem_pages, void *guest_code)
212359 {
360
+ struct kvm_vm *vm;
361
+ uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
362
+
363
+ pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
364
+
365
+ vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
366
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
367
+#ifdef __x86_64__
368
+ vm_create_irqchip(vm);
369
+#endif
370
+ log_mode_create_vm_done(vm);
371
+ vm_vcpu_add_default(vm, vcpuid, guest_code);
372
+ return vm;
373
+}
374
+
375
+#define DIRTY_MEM_BITS 30 /* 1G */
376
+#define PAGE_SHIFT_4K 12
377
+
378
+static void run_test(enum vm_guest_mode mode, unsigned long iterations,
379
+ unsigned long interval, uint64_t phys_offset)
380
+{
381
+ pthread_t vcpu_thread;
382
+ struct kvm_vm *vm;
383
+ unsigned long *bmap;
384
+
385
+ if (!log_mode_supported()) {
386
+ print_skip("Log mode '%s' not supported",
387
+ log_modes[host_log_mode].name);
388
+ return;
389
+ }
390
+
391
+ /*
392
+ * We reserve page table for 2 times of extra dirty mem which
393
+ * will definitely cover the original (1G+) test range. Here
394
+ * we do the calculation with 4K page size which is the
395
+ * smallest so the page number will be enough for all archs
396
+ * (e.g., 64K page size guest will need even less memory for
397
+ * page tables).
398
+ */
399
+ vm = create_vm(mode, VCPU_ID,
400
+ 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
401
+ guest_code);
402
+
403
+ guest_page_size = vm_get_page_size(vm);
404
+ /*
405
+ * A little more than 1G of guest page sized pages. Cover the
406
+ * case where the size is not aligned to 64 pages.
407
+ */
408
+ guest_num_pages = (1ul << (DIRTY_MEM_BITS -
409
+ vm_get_page_shift(vm))) + 3;
410
+ guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
411
+
412
+ host_page_size = getpagesize();
413
+ host_num_pages = vm_num_host_pages(mode, guest_num_pages);
414
+
415
+ if (!phys_offset) {
416
+ guest_test_phys_mem = (vm_get_max_gfn(vm) -
417
+ guest_num_pages) * guest_page_size;
418
+ guest_test_phys_mem &= ~(host_page_size - 1);
419
+ } else {
420
+ guest_test_phys_mem = phys_offset;
421
+ }
422
+
423
+#ifdef __s390x__
424
+ /* Align to 1M (segment size) */
425
+ guest_test_phys_mem &= ~((1 << 20) - 1);
426
+#endif
427
+
428
+ pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
429
+
430
+ bmap = bitmap_alloc(host_num_pages);
431
+ host_bmap_track = bitmap_alloc(host_num_pages);
432
+
433
+ /* Add an extra memory slot for testing dirty logging */
434
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
435
+ guest_test_phys_mem,
436
+ TEST_MEM_SLOT_INDEX,
437
+ guest_num_pages,
438
+ KVM_MEM_LOG_DIRTY_PAGES);
439
+
440
+ /* Do mapping for the dirty track memory slot */
441
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
442
+
443
+ /* Cache the HVA pointer of the region */
444
+ host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
445
+
446
+#ifdef __x86_64__
447
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
448
+#endif
449
+ ucall_init(vm, NULL);
450
+
451
+ /* Export the shared variables to the guest */
452
+ sync_global_to_guest(vm, host_page_size);
453
+ sync_global_to_guest(vm, guest_page_size);
454
+ sync_global_to_guest(vm, guest_test_virt_mem);
455
+ sync_global_to_guest(vm, guest_num_pages);
456
+
457
+ /* Start the iterations */
458
+ iteration = 1;
459
+ sync_global_to_guest(vm, iteration);
460
+ host_quit = false;
461
+ host_dirty_count = 0;
462
+ host_clear_count = 0;
463
+ host_track_next_count = 0;
464
+
465
+ pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
466
+
467
+ while (iteration < iterations) {
468
+ /* Give the vcpu thread some time to dirty some pages */
469
+ usleep(interval * 1000);
470
+ log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
471
+ bmap, host_num_pages);
472
+ vm_dirty_log_verify(mode, bmap);
473
+ iteration++;
474
+ sync_global_to_guest(vm, iteration);
475
+ }
476
+
477
+ /* Tell the vcpu thread to quit */
478
+ host_quit = true;
479
+ pthread_join(vcpu_thread, NULL);
480
+
481
+ pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
482
+ "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
483
+ host_track_next_count);
484
+
485
+ free(bmap);
486
+ free(host_bmap_track);
487
+ ucall_uninit(vm);
488
+ kvm_vm_free(vm);
489
+}
490
+
491
+struct guest_mode {
492
+ bool supported;
493
+ bool enabled;
494
+};
495
+static struct guest_mode guest_modes[NUM_VM_MODES];
496
+
497
+#define guest_mode_init(mode, supported, enabled) ({ \
498
+ guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
499
+})
500
+
501
+static void help(char *name)
502
+{
503
+ int i;
504
+
213505 puts("");
214
- printf("usage: %s [-i iterations] [-I interval] [-h]\n", name);
506
+ printf("usage: %s [-h] [-i iterations] [-I interval] "
507
+ "[-p offset] [-m mode]\n", name);
215508 puts("");
216509 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
217510 TEST_HOST_LOOP_N);
218511 printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
219512 TEST_HOST_LOOP_INTERVAL);
513
+ printf(" -p: specify guest physical test memory offset\n"
514
+ " Warning: a low offset can conflict with the loaded test code.\n");
515
+ printf(" -M: specify the host logging mode "
516
+ "(default: run all log modes). Supported modes: \n\t");
517
+ log_modes_dump();
518
+ printf(" -m: specify the guest mode ID to test "
519
+ "(default: test all supported modes)\n"
520
+ " This option may be used multiple times.\n"
521
+ " Guest mode IDs:\n");
522
+ for (i = 0; i < NUM_VM_MODES; ++i) {
523
+ printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
524
+ guest_modes[i].supported ? " (supported)" : "");
525
+ }
220526 puts("");
221527 exit(0);
222528 }
223529
224530 int main(int argc, char *argv[])
225531 {
226
- pthread_t vcpu_thread;
227
- struct kvm_vm *vm;
228
- uint64_t volatile *psize, *iteration;
229
- unsigned long *bmap, iterations = TEST_HOST_LOOP_N,
230
- interval = TEST_HOST_LOOP_INTERVAL;
231
- int opt;
532
+ unsigned long iterations = TEST_HOST_LOOP_N;
533
+ unsigned long interval = TEST_HOST_LOOP_INTERVAL;
534
+ bool mode_selected = false;
535
+ uint64_t phys_offset = 0;
536
+ unsigned int mode;
537
+ int opt, i, j;
232538
233
- while ((opt = getopt(argc, argv, "hi:I:")) != -1) {
539
+#ifdef __x86_64__
540
+ guest_mode_init(VM_MODE_PXXV48_4K, true, true);
541
+#endif
542
+#ifdef __aarch64__
543
+ guest_mode_init(VM_MODE_P40V48_4K, true, true);
544
+ guest_mode_init(VM_MODE_P40V48_64K, true, true);
545
+
546
+ {
547
+ unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
548
+
549
+ if (limit >= 52)
550
+ guest_mode_init(VM_MODE_P52V48_64K, true, true);
551
+ if (limit >= 48) {
552
+ guest_mode_init(VM_MODE_P48V48_4K, true, true);
553
+ guest_mode_init(VM_MODE_P48V48_64K, true, true);
554
+ }
555
+ }
556
+#endif
557
+#ifdef __s390x__
558
+ guest_mode_init(VM_MODE_P40V48_4K, true, true);
559
+#endif
560
+
561
+ while ((opt = getopt(argc, argv, "hi:I:p:m:M:")) != -1) {
234562 switch (opt) {
235563 case 'i':
236564 iterations = strtol(optarg, NULL, 10);
237565 break;
238566 case 'I':
239567 interval = strtol(optarg, NULL, 10);
568
+ break;
569
+ case 'p':
570
+ phys_offset = strtoull(optarg, NULL, 0);
571
+ break;
572
+ case 'm':
573
+ if (!mode_selected) {
574
+ for (i = 0; i < NUM_VM_MODES; ++i)
575
+ guest_modes[i].enabled = false;
576
+ mode_selected = true;
577
+ }
578
+ mode = strtoul(optarg, NULL, 10);
579
+ TEST_ASSERT(mode < NUM_VM_MODES,
580
+ "Guest mode ID %d too big", mode);
581
+ guest_modes[mode].enabled = true;
582
+ break;
583
+ case 'M':
584
+ if (!strcmp(optarg, "all")) {
585
+ host_log_mode_option = LOG_MODE_ALL;
586
+ break;
587
+ }
588
+ for (i = 0; i < LOG_MODE_NUM; i++) {
589
+ if (!strcmp(optarg, log_modes[i].name)) {
590
+ pr_info("Setting log mode to: '%s'\n",
591
+ optarg);
592
+ host_log_mode_option = i;
593
+ break;
594
+ }
595
+ }
596
+ if (i == LOG_MODE_NUM) {
597
+ printf("Log mode '%s' invalid. Please choose "
598
+ "from: ", optarg);
599
+ log_modes_dump();
600
+ exit(1);
601
+ }
240602 break;
241603 case 'h':
242604 default:
....@@ -245,64 +607,33 @@
245607 }
246608 }
247609
248
- TEST_ASSERT(iterations > 2, "Iteration must be bigger than zero\n");
249
- TEST_ASSERT(interval > 0, "Interval must be bigger than zero");
610
+ TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
611
+ TEST_ASSERT(interval > 0, "Interval must be greater than zero");
250612
251
- DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
252
- iterations, interval);
613
+ pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
614
+ iterations, interval);
253615
254616 srandom(time(0));
255617
256
- bmap = bitmap_alloc(TEST_MEM_PAGES);
257
- host_bmap_track = bitmap_alloc(TEST_MEM_PAGES);
258
-
259
- vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code);
260
-
261
- /* Add an extra memory slot for testing dirty logging */
262
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
263
- TEST_MEM_OFFSET,
264
- TEST_MEM_SLOT_INDEX,
265
- TEST_MEM_PAGES,
266
- KVM_MEM_LOG_DIRTY_PAGES);
267
- /* Cache the HVA pointer of the region */
268
- host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET);
269
-
270
- /* Do 1:1 mapping for the dirty track memory slot */
271
- virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET,
272
- TEST_MEM_PAGES * getpagesize(), 0);
273
-
274
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
275
-
276
- /* Tell the guest about the page size on the system */
277
- psize = addr_gva2hva(vm, (vm_vaddr_t)&guest_page_size);
278
- *psize = getpagesize();
279
-
280
- /* Start the iterations */
281
- iteration = addr_gva2hva(vm, (vm_vaddr_t)&guest_iteration);
282
- *iteration = 1;
283
-
284
- /* Start dirtying pages */
285
- pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
286
-
287
- while (*iteration < iterations) {
288
- /* Give the vcpu thread some time to dirty some pages */
289
- usleep(interval * 1000);
290
- kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
291
- vm_dirty_log_verify(bmap, *iteration);
292
- (*iteration)++;
618
+ for (i = 0; i < NUM_VM_MODES; ++i) {
619
+ if (!guest_modes[i].enabled)
620
+ continue;
621
+ TEST_ASSERT(guest_modes[i].supported,
622
+ "Guest mode ID %d (%s) not supported.",
623
+ i, vm_guest_mode_string(i));
624
+ if (host_log_mode_option == LOG_MODE_ALL) {
625
+ /* Run each log mode */
626
+ for (j = 0; j < LOG_MODE_NUM; j++) {
627
+ pr_info("Testing Log Mode '%s'\n",
628
+ log_modes[j].name);
629
+ host_log_mode = j;
630
+ run_test(i, iterations, interval, phys_offset);
631
+ }
632
+ } else {
633
+ host_log_mode = host_log_mode_option;
634
+ run_test(i, iterations, interval, phys_offset);
635
+ }
293636 }
294
-
295
- /* Tell the vcpu thread to quit */
296
- host_quit = true;
297
- pthread_join(vcpu_thread, NULL);
298
-
299
- DEBUG("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
300
- "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
301
- host_track_next_count);
302
-
303
- free(bmap);
304
- free(host_bmap_track);
305
- kvm_vm_free(vm);
306637
307638 return 0;
308639 }