1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020, Google LLC.
8 #include <linux/bitmap.h>
11 #include "memstress.h"
12 #include "processor.h"
14 struct memstress_args memstress_args;
17 * Guest virtual memory offset of the testing memory slot.
18 * Must not conflict with identity mapped test code.
20 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
23 /* The index of the vCPU. */
26 /* The pthread backing the vCPU. */
29 /* Set to true once the vCPU thread is up and running. */
33 /* The vCPU threads involved in this test. */
34 static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
36 /* The function run by each vCPU thread, as provided by the test. */
37 static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
39 /* Set to true once all vCPU threads are up and running. */
40 static bool all_vcpu_threads_running;
42 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
45 * Continuously write to the first 8 bytes of each page in the
48 void memstress_guest_code(uint32_t vcpu_idx)
50 struct memstress_args *args = &memstress_args;
51 struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
52 struct guest_random_state rand_state;
59 rand_state = new_guest_random_state(args->random_seed + vcpu_idx);
62 pages = vcpu_args->pages;
64 /* Make sure vCPU args data structure is not corrupt. */
65 GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
68 for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
69 (void) *((volatile char *)args + i);
71 for (i = 0; i < pages; i++) {
72 if (args->random_access)
73 page = guest_random_u32(&rand_state) % pages;
77 addr = gva + (page * args->guest_page_size);
79 if (guest_random_u32(&rand_state) % 100 < args->write_percent)
80 *(uint64_t *)addr = 0x0123456789ABCDEF;
82 READ_ONCE(*(uint64_t *)addr);
89 void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
90 struct kvm_vcpu *vcpus[],
91 uint64_t vcpu_memory_bytes,
92 bool partition_vcpu_memory_access)
94 struct memstress_args *args = &memstress_args;
95 struct memstress_vcpu_args *vcpu_args;
98 for (i = 0; i < nr_vcpus; i++) {
99 vcpu_args = &args->vcpu_args[i];
101 vcpu_args->vcpu = vcpus[i];
102 vcpu_args->vcpu_idx = i;
104 if (partition_vcpu_memory_access) {
105 vcpu_args->gva = guest_test_virt_mem +
106 (i * vcpu_memory_bytes);
107 vcpu_args->pages = vcpu_memory_bytes /
108 args->guest_page_size;
109 vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes);
111 vcpu_args->gva = guest_test_virt_mem;
112 vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
113 args->guest_page_size;
114 vcpu_args->gpa = args->gpa;
117 vcpu_args_set(vcpus[i], 1, i);
119 pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
120 i, vcpu_args->gpa, vcpu_args->gpa +
121 (vcpu_args->pages * args->guest_page_size));
125 struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
126 uint64_t vcpu_memory_bytes, int slots,
127 enum vm_mem_backing_src_type backing_src,
128 bool partition_vcpu_memory_access)
130 struct memstress_args *args = &memstress_args;
132 uint64_t guest_num_pages, slot0_pages = 0;
133 uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
134 uint64_t region_end_gfn;
137 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
139 /* By default vCPUs will write to memory. */
140 args->write_percent = 100;
143 * Snapshot the non-huge page size. This is used by the guest code to
144 * access/dirty pages at the logging granularity.
146 args->guest_page_size = vm_guest_mode_params[mode].page_size;
148 guest_num_pages = vm_adjust_num_guest_pages(mode,
149 (nr_vcpus * vcpu_memory_bytes) / args->guest_page_size);
151 TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
152 "Guest memory size is not host page size aligned.");
153 TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0,
154 "Guest memory size is not guest page size aligned.");
155 TEST_ASSERT(guest_num_pages % slots == 0,
156 "Guest memory cannot be evenly divided into %d slots.",
160 * If using nested, allocate extra pages for the nested page tables and
161 * in-memory data structures.
164 slot0_pages += memstress_nested_pages(nr_vcpus);
167 * Pass guest_num_pages to populate the page tables for test memory.
168 * The memory is also added to memslot 0, but that's a benign side
169 * effect as KVM allows aliasing HVAs in meslots.
171 vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
172 slot0_pages + guest_num_pages,
173 memstress_guest_code, vcpus);
177 /* Put the test region at the top guest physical memory. */
178 region_end_gfn = vm->max_gfn + 1;
182 * When running vCPUs in L2, restrict the test region to 48 bits to
183 * avoid needing 5-level page tables to identity map L2.
186 region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size);
189 * If there should be more memory in the guest test region than there
190 * can be pages in the guest, it will definitely cause problems.
192 TEST_ASSERT(guest_num_pages < region_end_gfn,
193 "Requested more guest memory than address space allows.\n"
194 " guest pages: %" PRIx64 " max gfn: %" PRIx64
195 " nr_vcpus: %d wss: %" PRIx64 "]\n",
196 guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
198 args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
199 args->gpa = align_down(args->gpa, backing_src_pagesz);
201 /* Align to 1M (segment size) */
202 args->gpa = align_down(args->gpa, 1 << 20);
204 args->size = guest_num_pages * args->guest_page_size;
205 pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
206 args->gpa, args->gpa + args->size);
208 /* Add extra memory slots for testing */
209 for (i = 0; i < slots; i++) {
210 uint64_t region_pages = guest_num_pages / slots;
211 vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
213 vm_userspace_mem_region_add(vm, backing_src, region_start,
214 MEMSTRESS_MEM_SLOT_INDEX + i,
218 /* Do mapping for the demand paging memory slot */
219 virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
221 memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
222 partition_vcpu_memory_access);
225 pr_info("Configuring vCPUs to run in L2 (nested).\n");
226 memstress_setup_nested(vm, nr_vcpus, vcpus);
229 /* Export the shared variables to the guest. */
230 sync_global_to_guest(vm, memstress_args);
235 void memstress_destroy_vm(struct kvm_vm *vm)
240 void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
242 memstress_args.write_percent = write_percent;
243 sync_global_to_guest(vm, memstress_args.write_percent);
246 void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
248 memstress_args.random_seed = random_seed;
249 sync_global_to_guest(vm, memstress_args.random_seed);
252 void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
254 memstress_args.random_access = random_access;
255 sync_global_to_guest(vm, memstress_args.random_access);
258 uint64_t __weak memstress_nested_pages(int nr_vcpus)
263 void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
265 pr_info("%s() not support on this architecture, skipping.\n", __func__);
269 static void *vcpu_thread_main(void *data)
271 struct vcpu_thread *vcpu = data;
272 int vcpu_idx = vcpu->vcpu_idx;
274 if (memstress_args.pin_vcpus)
275 kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
277 WRITE_ONCE(vcpu->running, true);
280 * Wait for all vCPU threads to be up and running before calling the test-
281 * provided vCPU thread function. This prevents thread creation (which
282 * requires taking the mmap_sem in write mode) from interfering with the
283 * guest faulting in its memory.
285 while (!READ_ONCE(all_vcpu_threads_running))
288 vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
293 void memstress_start_vcpu_threads(int nr_vcpus,
294 void (*vcpu_fn)(struct memstress_vcpu_args *))
298 vcpu_thread_fn = vcpu_fn;
299 WRITE_ONCE(all_vcpu_threads_running, false);
300 WRITE_ONCE(memstress_args.stop_vcpus, false);
302 for (i = 0; i < nr_vcpus; i++) {
303 struct vcpu_thread *vcpu = &vcpu_threads[i];
306 WRITE_ONCE(vcpu->running, false);
308 pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
311 for (i = 0; i < nr_vcpus; i++) {
312 while (!READ_ONCE(vcpu_threads[i].running))
316 WRITE_ONCE(all_vcpu_threads_running, true);
319 void memstress_join_vcpu_threads(int nr_vcpus)
323 WRITE_ONCE(memstress_args.stop_vcpus, true);
325 for (i = 0; i < nr_vcpus; i++)
326 pthread_join(vcpu_threads[i].thread, NULL);
329 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
333 for (i = 0; i < slots; i++) {
334 int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
335 int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
337 vm_mem_region_set_flags(vm, slot, flags);
341 void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots)
343 toggle_dirty_logging(vm, slots, true);
346 void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots)
348 toggle_dirty_logging(vm, slots, false);
351 void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
355 for (i = 0; i < slots; i++) {
356 int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
358 kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
362 void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
363 int slots, uint64_t pages_per_slot)
367 for (i = 0; i < slots; i++) {
368 int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
370 kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
374 unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
376 unsigned long **bitmaps;
379 bitmaps = malloc(slots * sizeof(bitmaps[0]));
380 TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
382 for (i = 0; i < slots; i++) {
383 bitmaps[i] = bitmap_zalloc(pages_per_slot);
384 TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
390 void memstress_free_bitmaps(unsigned long *bitmaps[], int slots)
394 for (i = 0; i < slots; i++)