1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020, Google LLC.
4  */
5 #define _GNU_SOURCE
6 
7 #include <inttypes.h>
8 #include <linux/bitmap.h>
9 
10 #include "kvm_util.h"
11 #include "memstress.h"
12 #include "processor.h"
13 
14 struct memstress_args memstress_args;
15 
16 /*
17  * Guest virtual memory offset of the testing memory slot.
18  * Must not conflict with identity mapped test code.
19  */
20 static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
21 
22 struct vcpu_thread {
23 	/* The index of the vCPU. */
24 	int vcpu_idx;
25 
26 	/* The pthread backing the vCPU. */
27 	pthread_t thread;
28 
29 	/* Set to true once the vCPU thread is up and running. */
30 	bool running;
31 };
32 
33 /* The vCPU threads involved in this test. */
34 static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
35 
36 /* The function run by each vCPU thread, as provided by the test. */
37 static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
38 
39 /* Set to true once all vCPU threads are up and running. */
40 static bool all_vcpu_threads_running;
41 
42 static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
43 
44 /*
45  * Continuously write to the first 8 bytes of each page in the
46  * specified region.
47  */
48 void memstress_guest_code(uint32_t vcpu_idx)
49 {
50 	struct memstress_args *args = &memstress_args;
51 	struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
52 	struct guest_random_state rand_state;
53 	uint64_t gva;
54 	uint64_t pages;
55 	uint64_t addr;
56 	uint64_t page;
57 	int i;
58 
59 	rand_state = new_guest_random_state(args->random_seed + vcpu_idx);
60 
61 	gva = vcpu_args->gva;
62 	pages = vcpu_args->pages;
63 
64 	/* Make sure vCPU args data structure is not corrupt. */
65 	GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
66 
67 	while (true) {
68 		for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
69 			(void) *((volatile char *)args + i);
70 
71 		for (i = 0; i < pages; i++) {
72 			if (args->random_access)
73 				page = guest_random_u32(&rand_state) % pages;
74 			else
75 				page = i;
76 
77 			addr = gva + (page * args->guest_page_size);
78 
79 			if (guest_random_u32(&rand_state) % 100 < args->write_percent)
80 				*(uint64_t *)addr = 0x0123456789ABCDEF;
81 			else
82 				READ_ONCE(*(uint64_t *)addr);
83 		}
84 
85 		GUEST_SYNC(1);
86 	}
87 }
88 
89 void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
90 			   struct kvm_vcpu *vcpus[],
91 			   uint64_t vcpu_memory_bytes,
92 			   bool partition_vcpu_memory_access)
93 {
94 	struct memstress_args *args = &memstress_args;
95 	struct memstress_vcpu_args *vcpu_args;
96 	int i;
97 
98 	for (i = 0; i < nr_vcpus; i++) {
99 		vcpu_args = &args->vcpu_args[i];
100 
101 		vcpu_args->vcpu = vcpus[i];
102 		vcpu_args->vcpu_idx = i;
103 
104 		if (partition_vcpu_memory_access) {
105 			vcpu_args->gva = guest_test_virt_mem +
106 					 (i * vcpu_memory_bytes);
107 			vcpu_args->pages = vcpu_memory_bytes /
108 					   args->guest_page_size;
109 			vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes);
110 		} else {
111 			vcpu_args->gva = guest_test_virt_mem;
112 			vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
113 					   args->guest_page_size;
114 			vcpu_args->gpa = args->gpa;
115 		}
116 
117 		vcpu_args_set(vcpus[i], 1, i);
118 
119 		pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
120 			 i, vcpu_args->gpa, vcpu_args->gpa +
121 			 (vcpu_args->pages * args->guest_page_size));
122 	}
123 }
124 
125 struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
126 				   uint64_t vcpu_memory_bytes, int slots,
127 				   enum vm_mem_backing_src_type backing_src,
128 				   bool partition_vcpu_memory_access)
129 {
130 	struct memstress_args *args = &memstress_args;
131 	struct kvm_vm *vm;
132 	uint64_t guest_num_pages, slot0_pages = 0;
133 	uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
134 	uint64_t region_end_gfn;
135 	int i;
136 
137 	pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
138 
139 	/* By default vCPUs will write to memory. */
140 	args->write_percent = 100;
141 
142 	/*
143 	 * Snapshot the non-huge page size.  This is used by the guest code to
144 	 * access/dirty pages at the logging granularity.
145 	 */
146 	args->guest_page_size = vm_guest_mode_params[mode].page_size;
147 
148 	guest_num_pages = vm_adjust_num_guest_pages(mode,
149 				(nr_vcpus * vcpu_memory_bytes) / args->guest_page_size);
150 
151 	TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
152 		    "Guest memory size is not host page size aligned.");
153 	TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0,
154 		    "Guest memory size is not guest page size aligned.");
155 	TEST_ASSERT(guest_num_pages % slots == 0,
156 		    "Guest memory cannot be evenly divided into %d slots.",
157 		    slots);
158 
159 	/*
160 	 * If using nested, allocate extra pages for the nested page tables and
161 	 * in-memory data structures.
162 	 */
163 	if (args->nested)
164 		slot0_pages += memstress_nested_pages(nr_vcpus);
165 
166 	/*
167 	 * Pass guest_num_pages to populate the page tables for test memory.
168 	 * The memory is also added to memslot 0, but that's a benign side
169 	 * effect as KVM allows aliasing HVAs in meslots.
170 	 */
171 	vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
172 				    memstress_guest_code, vcpus);
173 
174 	args->vm = vm;
175 
176 	/* Put the test region at the top guest physical memory. */
177 	region_end_gfn = vm->max_gfn + 1;
178 
179 #ifdef __x86_64__
180 	/*
181 	 * When running vCPUs in L2, restrict the test region to 48 bits to
182 	 * avoid needing 5-level page tables to identity map L2.
183 	 */
184 	if (args->nested)
185 		region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size);
186 #endif
187 	/*
188 	 * If there should be more memory in the guest test region than there
189 	 * can be pages in the guest, it will definitely cause problems.
190 	 */
191 	TEST_ASSERT(guest_num_pages < region_end_gfn,
192 		    "Requested more guest memory than address space allows.\n"
193 		    "    guest pages: %" PRIx64 " max gfn: %" PRIx64
194 		    " nr_vcpus: %d wss: %" PRIx64 "]\n",
195 		    guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
196 
197 	args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size;
198 	args->gpa = align_down(args->gpa, backing_src_pagesz);
199 #ifdef __s390x__
200 	/* Align to 1M (segment size) */
201 	args->gpa = align_down(args->gpa, 1 << 20);
202 #endif
203 	args->size = guest_num_pages * args->guest_page_size;
204 	pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
205 		args->gpa, args->gpa + args->size);
206 
207 	/* Add extra memory slots for testing */
208 	for (i = 0; i < slots; i++) {
209 		uint64_t region_pages = guest_num_pages / slots;
210 		vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
211 
212 		vm_userspace_mem_region_add(vm, backing_src, region_start,
213 					    MEMSTRESS_MEM_SLOT_INDEX + i,
214 					    region_pages, 0);
215 	}
216 
217 	/* Do mapping for the demand paging memory slot */
218 	virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
219 
220 	memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
221 			      partition_vcpu_memory_access);
222 
223 	if (args->nested) {
224 		pr_info("Configuring vCPUs to run in L2 (nested).\n");
225 		memstress_setup_nested(vm, nr_vcpus, vcpus);
226 	}
227 
228 	/* Export the shared variables to the guest. */
229 	sync_global_to_guest(vm, memstress_args);
230 
231 	return vm;
232 }
233 
234 void memstress_destroy_vm(struct kvm_vm *vm)
235 {
236 	kvm_vm_free(vm);
237 }
238 
239 void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
240 {
241 	memstress_args.write_percent = write_percent;
242 	sync_global_to_guest(vm, memstress_args.write_percent);
243 }
244 
245 void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
246 {
247 	memstress_args.random_seed = random_seed;
248 	sync_global_to_guest(vm, memstress_args.random_seed);
249 }
250 
251 void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
252 {
253 	memstress_args.random_access = random_access;
254 	sync_global_to_guest(vm, memstress_args.random_access);
255 }
256 
257 uint64_t __weak memstress_nested_pages(int nr_vcpus)
258 {
259 	return 0;
260 }
261 
262 void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
263 {
264 	pr_info("%s() not support on this architecture, skipping.\n", __func__);
265 	exit(KSFT_SKIP);
266 }
267 
268 static void *vcpu_thread_main(void *data)
269 {
270 	struct vcpu_thread *vcpu = data;
271 	int vcpu_idx = vcpu->vcpu_idx;
272 
273 	if (memstress_args.pin_vcpus)
274 		kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
275 
276 	WRITE_ONCE(vcpu->running, true);
277 
278 	/*
279 	 * Wait for all vCPU threads to be up and running before calling the test-
280 	 * provided vCPU thread function. This prevents thread creation (which
281 	 * requires taking the mmap_sem in write mode) from interfering with the
282 	 * guest faulting in its memory.
283 	 */
284 	while (!READ_ONCE(all_vcpu_threads_running))
285 		;
286 
287 	vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
288 
289 	return NULL;
290 }
291 
292 void memstress_start_vcpu_threads(int nr_vcpus,
293 				  void (*vcpu_fn)(struct memstress_vcpu_args *))
294 {
295 	int i;
296 
297 	vcpu_thread_fn = vcpu_fn;
298 	WRITE_ONCE(all_vcpu_threads_running, false);
299 	WRITE_ONCE(memstress_args.stop_vcpus, false);
300 
301 	for (i = 0; i < nr_vcpus; i++) {
302 		struct vcpu_thread *vcpu = &vcpu_threads[i];
303 
304 		vcpu->vcpu_idx = i;
305 		WRITE_ONCE(vcpu->running, false);
306 
307 		pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
308 	}
309 
310 	for (i = 0; i < nr_vcpus; i++) {
311 		while (!READ_ONCE(vcpu_threads[i].running))
312 			;
313 	}
314 
315 	WRITE_ONCE(all_vcpu_threads_running, true);
316 }
317 
318 void memstress_join_vcpu_threads(int nr_vcpus)
319 {
320 	int i;
321 
322 	WRITE_ONCE(memstress_args.stop_vcpus, true);
323 
324 	for (i = 0; i < nr_vcpus; i++)
325 		pthread_join(vcpu_threads[i].thread, NULL);
326 }
327 
328 static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
329 {
330 	int i;
331 
332 	for (i = 0; i < slots; i++) {
333 		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
334 		int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
335 
336 		vm_mem_region_set_flags(vm, slot, flags);
337 	}
338 }
339 
340 void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots)
341 {
342 	toggle_dirty_logging(vm, slots, true);
343 }
344 
345 void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots)
346 {
347 	toggle_dirty_logging(vm, slots, false);
348 }
349 
350 void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
351 {
352 	int i;
353 
354 	for (i = 0; i < slots; i++) {
355 		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
356 
357 		kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
358 	}
359 }
360 
361 void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
362 			       int slots, uint64_t pages_per_slot)
363 {
364 	int i;
365 
366 	for (i = 0; i < slots; i++) {
367 		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
368 
369 		kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
370 	}
371 }
372 
373 unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
374 {
375 	unsigned long **bitmaps;
376 	int i;
377 
378 	bitmaps = malloc(slots * sizeof(bitmaps[0]));
379 	TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
380 
381 	for (i = 0; i < slots; i++) {
382 		bitmaps[i] = bitmap_zalloc(pages_per_slot);
383 		TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
384 	}
385 
386 	return bitmaps;
387 }
388 
389 void memstress_free_bitmaps(unsigned long *bitmaps[], int slots)
390 {
391 	int i;
392 
393 	for (i = 0; i < slots; i++)
394 		free(bitmaps[i]);
395 
396 	free(bitmaps);
397 }
398