xsk: Fix possible crash when multiple sockets are created
[sfrench/cifs-2.6.git] / tools / testing / selftests / kvm / dirty_log_perf_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KVM dirty page logging performance test
4  *
5  * Based on dirty_log_test.c
6  *
7  * Copyright (C) 2018, Red Hat, Inc.
8  * Copyright (C) 2020, Google, Inc.
9  */
10
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <time.h>
14 #include <pthread.h>
15 #include <linux/bitmap.h>
16
17 #include "kvm_util.h"
18 #include "test_util.h"
19 #include "perf_test_util.h"
20 #include "guest_modes.h"
21 #ifdef __aarch64__
22 #include "aarch64/vgic.h"
23
24 #define GICD_BASE_GPA                   0x8000000ULL
25 #define GICR_BASE_GPA                   0x80A0000ULL
26 #endif
27
28 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
29 #define TEST_HOST_LOOP_N                2UL
30
31 static int nr_vcpus = 1;
32 static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
33
34 /* Host variables */
35 static u64 dirty_log_manual_caps;
36 static bool host_quit;
37 static int iteration;
38 static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
39
40 static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
41 {
42         int ret;
43         struct kvm_vm *vm = perf_test_args.vm;
44         uint64_t pages_count = 0;
45         struct kvm_run *run;
46         struct timespec start;
47         struct timespec ts_diff;
48         struct timespec total = (struct timespec){0};
49         struct timespec avg;
50         int vcpu_id = vcpu_args->vcpu_id;
51
52         run = vcpu_state(vm, vcpu_id);
53
54         while (!READ_ONCE(host_quit)) {
55                 int current_iteration = READ_ONCE(iteration);
56
57                 clock_gettime(CLOCK_MONOTONIC, &start);
58                 ret = _vcpu_run(vm, vcpu_id);
59                 ts_diff = timespec_elapsed(start);
60
61                 TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
62                 TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
63                             "Invalid guest sync status: exit_reason=%s\n",
64                             exit_reason_str(run->exit_reason));
65
66                 pr_debug("Got sync event from vCPU %d\n", vcpu_id);
67                 vcpu_last_completed_iteration[vcpu_id] = current_iteration;
68                 pr_debug("vCPU %d updated last completed iteration to %d\n",
69                          vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
70
71                 if (current_iteration) {
72                         pages_count += vcpu_args->pages;
73                         total = timespec_add(total, ts_diff);
74                         pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
75                                 vcpu_id, current_iteration, ts_diff.tv_sec,
76                                 ts_diff.tv_nsec);
77                 } else {
78                         pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
79                                 vcpu_id, current_iteration, ts_diff.tv_sec,
80                                 ts_diff.tv_nsec);
81                 }
82
83                 while (current_iteration == READ_ONCE(iteration) &&
84                        !READ_ONCE(host_quit)) {}
85         }
86
87         avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
88         pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
89                 vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
90                 total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
91 }
92
93 struct test_params {
94         unsigned long iterations;
95         uint64_t phys_offset;
96         int wr_fract;
97         bool partition_vcpu_memory_access;
98         enum vm_mem_backing_src_type backing_src;
99         int slots;
100 };
101
102 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
103 {
104         int i;
105
106         for (i = 0; i < slots; i++) {
107                 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
108                 int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
109
110                 vm_mem_region_set_flags(vm, slot, flags);
111         }
112 }
113
114 static inline void enable_dirty_logging(struct kvm_vm *vm, int slots)
115 {
116         toggle_dirty_logging(vm, slots, true);
117 }
118
119 static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
120 {
121         toggle_dirty_logging(vm, slots, false);
122 }
123
124 static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
125 {
126         int i;
127
128         for (i = 0; i < slots; i++) {
129                 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
130
131                 kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
132         }
133 }
134
135 static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
136                             int slots, uint64_t pages_per_slot)
137 {
138         int i;
139
140         for (i = 0; i < slots; i++) {
141                 int slot = PERF_TEST_MEM_SLOT_INDEX + i;
142
143                 kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
144         }
145 }
146
147 static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
148 {
149         unsigned long **bitmaps;
150         int i;
151
152         bitmaps = malloc(slots * sizeof(bitmaps[0]));
153         TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
154
155         for (i = 0; i < slots; i++) {
156                 bitmaps[i] = bitmap_zalloc(pages_per_slot);
157                 TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
158         }
159
160         return bitmaps;
161 }
162
163 static void free_bitmaps(unsigned long *bitmaps[], int slots)
164 {
165         int i;
166
167         for (i = 0; i < slots; i++)
168                 free(bitmaps[i]);
169
170         free(bitmaps);
171 }
172
173 static void run_test(enum vm_guest_mode mode, void *arg)
174 {
175         struct test_params *p = arg;
176         struct kvm_vm *vm;
177         unsigned long **bitmaps;
178         uint64_t guest_num_pages;
179         uint64_t host_num_pages;
180         uint64_t pages_per_slot;
181         int vcpu_id;
182         struct timespec start;
183         struct timespec ts_diff;
184         struct timespec get_dirty_log_total = (struct timespec){0};
185         struct timespec vcpu_dirty_total = (struct timespec){0};
186         struct timespec avg;
187         struct kvm_enable_cap cap = {};
188         struct timespec clear_dirty_log_total = (struct timespec){0};
189
190         vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
191                                  p->slots, p->backing_src,
192                                  p->partition_vcpu_memory_access);
193
194         perf_test_set_wr_fract(vm, p->wr_fract);
195
196         guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
197         guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
198         host_num_pages = vm_num_host_pages(mode, guest_num_pages);
199         pages_per_slot = host_num_pages / p->slots;
200
201         bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
202
203         if (dirty_log_manual_caps) {
204                 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
205                 cap.args[0] = dirty_log_manual_caps;
206                 vm_enable_cap(vm, &cap);
207         }
208
209 #ifdef __aarch64__
210         vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
211 #endif
212
213         /* Start the iterations */
214         iteration = 0;
215         host_quit = false;
216
217         clock_gettime(CLOCK_MONOTONIC, &start);
218         for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
219                 vcpu_last_completed_iteration[vcpu_id] = -1;
220
221         perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
222
223         /* Allow the vCPUs to populate memory */
224         pr_debug("Starting iteration %d - Populating\n", iteration);
225         for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
226                 while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
227                        iteration)
228                         ;
229         }
230
231         ts_diff = timespec_elapsed(start);
232         pr_info("Populate memory time: %ld.%.9lds\n",
233                 ts_diff.tv_sec, ts_diff.tv_nsec);
234
235         /* Enable dirty logging */
236         clock_gettime(CLOCK_MONOTONIC, &start);
237         enable_dirty_logging(vm, p->slots);
238         ts_diff = timespec_elapsed(start);
239         pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
240                 ts_diff.tv_sec, ts_diff.tv_nsec);
241
242         while (iteration < p->iterations) {
243                 /*
244                  * Incrementing the iteration number will start the vCPUs
245                  * dirtying memory again.
246                  */
247                 clock_gettime(CLOCK_MONOTONIC, &start);
248                 iteration++;
249
250                 pr_debug("Starting iteration %d\n", iteration);
251                 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
252                         while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
253                                != iteration)
254                                 ;
255                 }
256
257                 ts_diff = timespec_elapsed(start);
258                 vcpu_dirty_total = timespec_add(vcpu_dirty_total, ts_diff);
259                 pr_info("Iteration %d dirty memory time: %ld.%.9lds\n",
260                         iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
261
262                 clock_gettime(CLOCK_MONOTONIC, &start);
263                 get_dirty_log(vm, bitmaps, p->slots);
264                 ts_diff = timespec_elapsed(start);
265                 get_dirty_log_total = timespec_add(get_dirty_log_total,
266                                                    ts_diff);
267                 pr_info("Iteration %d get dirty log time: %ld.%.9lds\n",
268                         iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
269
270                 if (dirty_log_manual_caps) {
271                         clock_gettime(CLOCK_MONOTONIC, &start);
272                         clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
273                         ts_diff = timespec_elapsed(start);
274                         clear_dirty_log_total = timespec_add(clear_dirty_log_total,
275                                                              ts_diff);
276                         pr_info("Iteration %d clear dirty log time: %ld.%.9lds\n",
277                                 iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
278                 }
279         }
280
281         /* Disable dirty logging */
282         clock_gettime(CLOCK_MONOTONIC, &start);
283         disable_dirty_logging(vm, p->slots);
284         ts_diff = timespec_elapsed(start);
285         pr_info("Disabling dirty logging time: %ld.%.9lds\n",
286                 ts_diff.tv_sec, ts_diff.tv_nsec);
287
288         /* Tell the vcpu thread to quit */
289         host_quit = true;
290         perf_test_join_vcpu_threads(nr_vcpus);
291
292         avg = timespec_div(get_dirty_log_total, p->iterations);
293         pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
294                 p->iterations, get_dirty_log_total.tv_sec,
295                 get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
296
297         if (dirty_log_manual_caps) {
298                 avg = timespec_div(clear_dirty_log_total, p->iterations);
299                 pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
300                         p->iterations, clear_dirty_log_total.tv_sec,
301                         clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
302         }
303
304         free_bitmaps(bitmaps, p->slots);
305         perf_test_destroy_vm(vm);
306 }
307
308 static void help(char *name)
309 {
310         puts("");
311         printf("usage: %s [-h] [-i iterations] [-p offset] [-g]"
312                "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
313                "[-x memslots]\n", name);
314         puts("");
315         printf(" -i: specify iteration counts (default: %"PRIu64")\n",
316                TEST_HOST_LOOP_N);
317         printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n"
318                "     makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n"
319                "     KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n"
320                "     and writes will be tracked as soon as dirty logging is\n"
321                "     enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n"
322                "     is not enabled).\n");
323         printf(" -p: specify guest physical test memory offset\n"
324                "     Warning: a low offset can conflict with the loaded test code.\n");
325         guest_modes_help();
326         printf(" -b: specify the size of the memory region which should be\n"
327                "     dirtied by each vCPU. e.g. 10M or 3G.\n"
328                "     (default: 1G)\n");
329         printf(" -f: specify the fraction of pages which should be written to\n"
330                "     as opposed to simply read, in the form\n"
331                "     1/<fraction of pages to write>.\n"
332                "     (default: 1 i.e. all pages are written to.)\n");
333         printf(" -v: specify the number of vCPUs to run.\n");
334         printf(" -o: Overlap guest memory accesses instead of partitioning\n"
335                "     them into a separate region of memory for each vCPU.\n");
336         backing_src_help("-s");
337         printf(" -x: Split the memory region into this number of memslots.\n"
338                "     (default: 1)\n");
339         puts("");
340         exit(0);
341 }
342
343 int main(int argc, char *argv[])
344 {
345         int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
346         struct test_params p = {
347                 .iterations = TEST_HOST_LOOP_N,
348                 .wr_fract = 1,
349                 .partition_vcpu_memory_access = true,
350                 .backing_src = DEFAULT_VM_MEM_SRC,
351                 .slots = 1,
352         };
353         int opt;
354
355         dirty_log_manual_caps =
356                 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
357         dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
358                                   KVM_DIRTY_LOG_INITIALLY_SET);
359
360         guest_modes_append_default();
361
362         while ((opt = getopt(argc, argv, "ghi:p:m:b:f:v:os:x:")) != -1) {
363                 switch (opt) {
364                 case 'g':
365                         dirty_log_manual_caps = 0;
366                         break;
367                 case 'i':
368                         p.iterations = atoi(optarg);
369                         break;
370                 case 'p':
371                         p.phys_offset = strtoull(optarg, NULL, 0);
372                         break;
373                 case 'm':
374                         guest_modes_cmdline(optarg);
375                         break;
376                 case 'b':
377                         guest_percpu_mem_size = parse_size(optarg);
378                         break;
379                 case 'f':
380                         p.wr_fract = atoi(optarg);
381                         TEST_ASSERT(p.wr_fract >= 1,
382                                     "Write fraction cannot be less than one");
383                         break;
384                 case 'v':
385                         nr_vcpus = atoi(optarg);
386                         TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
387                                     "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
388                         break;
389                 case 'o':
390                         p.partition_vcpu_memory_access = false;
391                         break;
392                 case 's':
393                         p.backing_src = parse_backing_src_type(optarg);
394                         break;
395                 case 'x':
396                         p.slots = atoi(optarg);
397                         break;
398                 case 'h':
399                 default:
400                         help(argv[0]);
401                         break;
402                 }
403         }
404
405         TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
406
407         pr_info("Test iterations: %"PRIu64"\n", p.iterations);
408
409         for_each_guest_mode(run_test, &p);
410
411         return 0;
412 }