1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <pthread.h>
7 #include <semaphore.h>
8 #include <sys/types.h>
9 #include <signal.h>
10 #include <errno.h>
11 #include <linux/bitmap.h>
12 #include <linux/bitops.h>
13 #include <linux/atomic.h>
14 
15 #include "kvm_util.h"
16 #include "test_util.h"
17 #include "guest_modes.h"
18 #include "processor.h"
19 
20 static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
21 {
22 	uint64_t gpa;
23 
24 	for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
25 		*((volatile uint64_t *)gpa) = gpa;
26 
27 	GUEST_DONE();
28 }
29 
30 struct vcpu_info {
31 	struct kvm_vcpu *vcpu;
32 	uint64_t start_gpa;
33 	uint64_t end_gpa;
34 };
35 
36 static int nr_vcpus;
37 static atomic_t rendezvous;
38 
39 static void rendezvous_with_boss(void)
40 {
41 	int orig = atomic_read(&rendezvous);
42 
43 	if (orig > 0) {
44 		atomic_dec_and_test(&rendezvous);
45 		while (atomic_read(&rendezvous) > 0)
46 			cpu_relax();
47 	} else {
48 		atomic_inc(&rendezvous);
49 		while (atomic_read(&rendezvous) < 0)
50 			cpu_relax();
51 	}
52 }
53 
54 static void run_vcpu(struct kvm_vcpu *vcpu)
55 {
56 	vcpu_run(vcpu);
57 	ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
58 }
59 
60 static void *vcpu_worker(void *data)
61 {
62 	struct vcpu_info *info = data;
63 	struct kvm_vcpu *vcpu = info->vcpu;
64 	struct kvm_vm *vm = vcpu->vm;
65 	struct kvm_sregs sregs;
66 	struct kvm_regs regs;
67 
68 	vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
69 
70 	/* Snapshot regs before the first run. */
71 	vcpu_regs_get(vcpu, &regs);
72 	rendezvous_with_boss();
73 
74 	run_vcpu(vcpu);
75 	rendezvous_with_boss();
76 	vcpu_regs_set(vcpu, &regs);
77 	vcpu_sregs_get(vcpu, &sregs);
78 #ifdef __x86_64__
79 	/* Toggle CR0.WP to trigger a MMU context reset. */
80 	sregs.cr0 ^= X86_CR0_WP;
81 #endif
82 	vcpu_sregs_set(vcpu, &sregs);
83 	rendezvous_with_boss();
84 
85 	run_vcpu(vcpu);
86 	rendezvous_with_boss();
87 
88 	return NULL;
89 }
90 
91 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
92 				uint64_t start_gpa, uint64_t end_gpa)
93 {
94 	struct vcpu_info *info;
95 	uint64_t gpa, nr_bytes;
96 	pthread_t *threads;
97 	int i;
98 
99 	threads = malloc(nr_vcpus * sizeof(*threads));
100 	TEST_ASSERT(threads, "Failed to allocate vCPU threads");
101 
102 	info = malloc(nr_vcpus * sizeof(*info));
103 	TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
104 
105 	nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
106 			~((uint64_t)vm->page_size - 1);
107 	TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
108 
109 	for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
110 		info[i].vcpu = vcpus[i];
111 		info[i].start_gpa = gpa;
112 		info[i].end_gpa = gpa + nr_bytes;
113 		pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
114 	}
115 	return threads;
116 }
117 
118 static void rendezvous_with_vcpus(struct timespec *time, const char *name)
119 {
120 	int i, rendezvoused;
121 
122 	pr_info("Waiting for vCPUs to finish %s...\n", name);
123 
124 	rendezvoused = atomic_read(&rendezvous);
125 	for (i = 0; abs(rendezvoused) != 1; i++) {
126 		usleep(100);
127 		if (!(i & 0x3f))
128 			pr_info("\r%d vCPUs haven't rendezvoused...",
129 				abs(rendezvoused) - 1);
130 		rendezvoused = atomic_read(&rendezvous);
131 	}
132 
133 	clock_gettime(CLOCK_MONOTONIC, time);
134 
135 	/* Release the vCPUs after getting the time of the previous action. */
136 	pr_info("\rAll vCPUs finished %s, releasing...\n", name);
137 	if (rendezvoused > 0)
138 		atomic_set(&rendezvous, -nr_vcpus - 1);
139 	else
140 		atomic_set(&rendezvous, nr_vcpus + 1);
141 }
142 
143 static void calc_default_nr_vcpus(void)
144 {
145 	cpu_set_t possible_mask;
146 	int r;
147 
148 	r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
149 	TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)",
150 		    errno, strerror(errno));
151 
152 	nr_vcpus = CPU_COUNT(&possible_mask) * 3/4;
153 	TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?");
154 }
155 
156 int main(int argc, char *argv[])
157 {
158 	/*
159 	 * Skip the first 4gb and slot0.  slot0 maps <1gb and is used to back
160 	 * the guest's code, stack, and page tables.  Because selftests creates
161 	 * an IRQCHIP, a.k.a. a local APIC, KVM creates an internal memslot
162 	 * just below the 4gb boundary.  This test could create memory at
163 	 * 1gb-3gb,but it's simpler to skip straight to 4gb.
164 	 */
165 	const uint64_t size_1gb = (1 << 30);
166 	const uint64_t start_gpa = (4ull * size_1gb);
167 	const int first_slot = 1;
168 
169 	struct timespec time_start, time_run1, time_reset, time_run2;
170 	uint64_t max_gpa, gpa, slot_size, max_mem, i;
171 	int max_slots, slot, opt, fd;
172 	bool hugepages = false;
173 	struct kvm_vcpu **vcpus;
174 	pthread_t *threads;
175 	struct kvm_vm *vm;
176 	void *mem;
177 
178 	/*
179 	 * Default to 2gb so that maxing out systems with MAXPHADDR=46, which
180 	 * are quite common for x86, requires changing only max_mem (KVM allows
181 	 * 32k memslots, 32k * 2gb == ~64tb of guest memory).
182 	 */
183 	slot_size = 2 * size_1gb;
184 
185 	max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
186 	TEST_ASSERT(max_slots > first_slot, "KVM is broken");
187 
188 	/* All KVM MMUs should be able to survive a 128gb guest. */
189 	max_mem = 128 * size_1gb;
190 
191 	calc_default_nr_vcpus();
192 
193 	while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) {
194 		switch (opt) {
195 		case 'c':
196 			nr_vcpus = atoi(optarg);
197 			TEST_ASSERT(nr_vcpus > 0, "number of vcpus must be >0");
198 			break;
199 		case 'm':
200 			max_mem = atoi(optarg) * size_1gb;
201 			TEST_ASSERT(max_mem > 0, "memory size must be >0");
202 			break;
203 		case 's':
204 			slot_size = atoi(optarg) * size_1gb;
205 			TEST_ASSERT(slot_size > 0, "slot size must be >0");
206 			break;
207 		case 'H':
208 			hugepages = true;
209 			break;
210 		case 'h':
211 		default:
212 			printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]);
213 			exit(1);
214 		}
215 	}
216 
217 	vcpus = malloc(nr_vcpus * sizeof(*vcpus));
218 	TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
219 
220 	vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
221 
222 	max_gpa = vm->max_gfn << vm->page_shift;
223 	TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
224 
225 	fd = kvm_memfd_alloc(slot_size, hugepages);
226 	mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
227 	TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
228 
229 	TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
230 
231 	/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
232 	for (i = 0; i < slot_size; i += vm->page_size)
233 		((uint8_t *)mem)[i] = 0xaa;
234 
235 	gpa = 0;
236 	for (slot = first_slot; slot < max_slots; slot++) {
237 		gpa = start_gpa + ((slot - first_slot) * slot_size);
238 		if (gpa + slot_size > max_gpa)
239 			break;
240 
241 		if ((gpa - start_gpa) >= max_mem)
242 			break;
243 
244 		vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem);
245 
246 #ifdef __x86_64__
247 		/* Identity map memory in the guest using 1gb pages. */
248 		for (i = 0; i < slot_size; i += size_1gb)
249 			__virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
250 #else
251 		for (i = 0; i < slot_size; i += vm->page_size)
252 			virt_pg_map(vm, gpa + i, gpa + i);
253 #endif
254 	}
255 
256 	atomic_set(&rendezvous, nr_vcpus + 1);
257 	threads = spawn_workers(vm, vcpus, start_gpa, gpa);
258 
259 	free(vcpus);
260 	vcpus = NULL;
261 
262 	pr_info("Running with %lugb of guest memory and %u vCPUs\n",
263 		(gpa - start_gpa) / size_1gb, nr_vcpus);
264 
265 	rendezvous_with_vcpus(&time_start, "spawning");
266 	rendezvous_with_vcpus(&time_run1, "run 1");
267 	rendezvous_with_vcpus(&time_reset, "reset");
268 	rendezvous_with_vcpus(&time_run2, "run 2");
269 
270 	time_run2  = timespec_sub(time_run2,   time_reset);
271 	time_reset = timespec_sub(time_reset, time_run1);
272 	time_run1  = timespec_sub(time_run1,   time_start);
273 
274 	pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 =  %ld.%.9lds\n",
275 		time_run1.tv_sec, time_run1.tv_nsec,
276 		time_reset.tv_sec, time_reset.tv_nsec,
277 		time_run2.tv_sec, time_run2.tv_nsec);
278 
279 	/*
280 	 * Delete even numbered slots (arbitrary) and unmap the first half of
281 	 * the backing (also arbitrary) to verify KVM correctly drops all
282 	 * references to the removed regions.
283 	 */
284 	for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
285 		vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
286 
287 	munmap(mem, slot_size / 2);
288 
289 	/* Sanity check that the vCPUs actually ran. */
290 	for (i = 0; i < nr_vcpus; i++)
291 		pthread_join(threads[i], NULL);
292 
293 	/*
294 	 * Deliberately exit without deleting the remaining memslots or closing
295 	 * kvm_fd to test cleanup via mmu_notifier.release.
296 	 */
297 }
298