1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * tools/testing/selftests/kvm/lib/x86_64/processor.c
4  *
5  * Copyright (C) 2018, Google LLC.
6  */
7 
8 #include "test_util.h"
9 #include "kvm_util.h"
10 #include "processor.h"
11 
12 #ifndef NUM_INTERRUPTS
13 #define NUM_INTERRUPTS 256
14 #endif
15 
16 #define DEFAULT_CODE_SELECTOR 0x8
17 #define DEFAULT_DATA_SELECTOR 0x10
18 
19 #define MAX_NR_CPUID_ENTRIES 100
20 
21 vm_vaddr_t exception_handlers;
22 
23 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
24 {
25 	fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
26 		"rcx: 0x%.16llx rdx: 0x%.16llx\n",
27 		indent, "",
28 		regs->rax, regs->rbx, regs->rcx, regs->rdx);
29 	fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
30 		"rsp: 0x%.16llx rbp: 0x%.16llx\n",
31 		indent, "",
32 		regs->rsi, regs->rdi, regs->rsp, regs->rbp);
33 	fprintf(stream, "%*sr8:  0x%.16llx r9:  0x%.16llx "
34 		"r10: 0x%.16llx r11: 0x%.16llx\n",
35 		indent, "",
36 		regs->r8, regs->r9, regs->r10, regs->r11);
37 	fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
38 		"r14: 0x%.16llx r15: 0x%.16llx\n",
39 		indent, "",
40 		regs->r12, regs->r13, regs->r14, regs->r15);
41 	fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
42 		indent, "",
43 		regs->rip, regs->rflags);
44 }
45 
46 static void segment_dump(FILE *stream, struct kvm_segment *segment,
47 			 uint8_t indent)
48 {
49 	fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
50 		"selector: 0x%.4x type: 0x%.2x\n",
51 		indent, "", segment->base, segment->limit,
52 		segment->selector, segment->type);
53 	fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
54 		"db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
55 		indent, "", segment->present, segment->dpl,
56 		segment->db, segment->s, segment->l);
57 	fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
58 		"unusable: 0x%.2x padding: 0x%.2x\n",
59 		indent, "", segment->g, segment->avl,
60 		segment->unusable, segment->padding);
61 }
62 
63 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
64 			uint8_t indent)
65 {
66 	fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
67 		"padding: 0x%.4x 0x%.4x 0x%.4x\n",
68 		indent, "", dtable->base, dtable->limit,
69 		dtable->padding[0], dtable->padding[1], dtable->padding[2]);
70 }
71 
72 static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
73 {
74 	unsigned int i;
75 
76 	fprintf(stream, "%*scs:\n", indent, "");
77 	segment_dump(stream, &sregs->cs, indent + 2);
78 	fprintf(stream, "%*sds:\n", indent, "");
79 	segment_dump(stream, &sregs->ds, indent + 2);
80 	fprintf(stream, "%*ses:\n", indent, "");
81 	segment_dump(stream, &sregs->es, indent + 2);
82 	fprintf(stream, "%*sfs:\n", indent, "");
83 	segment_dump(stream, &sregs->fs, indent + 2);
84 	fprintf(stream, "%*sgs:\n", indent, "");
85 	segment_dump(stream, &sregs->gs, indent + 2);
86 	fprintf(stream, "%*sss:\n", indent, "");
87 	segment_dump(stream, &sregs->ss, indent + 2);
88 	fprintf(stream, "%*str:\n", indent, "");
89 	segment_dump(stream, &sregs->tr, indent + 2);
90 	fprintf(stream, "%*sldt:\n", indent, "");
91 	segment_dump(stream, &sregs->ldt, indent + 2);
92 
93 	fprintf(stream, "%*sgdt:\n", indent, "");
94 	dtable_dump(stream, &sregs->gdt, indent + 2);
95 	fprintf(stream, "%*sidt:\n", indent, "");
96 	dtable_dump(stream, &sregs->idt, indent + 2);
97 
98 	fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
99 		"cr3: 0x%.16llx cr4: 0x%.16llx\n",
100 		indent, "",
101 		sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
102 	fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
103 		"apic_base: 0x%.16llx\n",
104 		indent, "",
105 		sregs->cr8, sregs->efer, sregs->apic_base);
106 
107 	fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
108 	for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
109 		fprintf(stream, "%*s%.16llx\n", indent + 2, "",
110 			sregs->interrupt_bitmap[i]);
111 	}
112 }
113 
114 bool kvm_is_tdp_enabled(void)
115 {
116 	if (is_intel_cpu())
117 		return get_kvm_intel_param_bool("ept");
118 	else
119 		return get_kvm_amd_param_bool("npt");
120 }
121 
122 void virt_arch_pgd_alloc(struct kvm_vm *vm)
123 {
124 	TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
125 		"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
126 
127 	/* If needed, create page map l4 table. */
128 	if (!vm->pgd_created) {
129 		vm->pgd = vm_alloc_page_table(vm);
130 		vm->pgd_created = true;
131 	}
132 }
133 
134 static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
135 			  int level)
136 {
137 	uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
138 	int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
139 
140 	return &page_table[index];
141 }
142 
143 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
144 				       uint64_t pt_pfn,
145 				       uint64_t vaddr,
146 				       uint64_t paddr,
147 				       int current_level,
148 				       int target_level)
149 {
150 	uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level);
151 
152 	if (!(*pte & PTE_PRESENT_MASK)) {
153 		*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
154 		if (current_level == target_level)
155 			*pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
156 		else
157 			*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
158 	} else {
159 		/*
160 		 * Entry already present.  Assert that the caller doesn't want
161 		 * a hugepage at this level, and that there isn't a hugepage at
162 		 * this level.
163 		 */
164 		TEST_ASSERT(current_level != target_level,
165 			    "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
166 			    current_level, vaddr);
167 		TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
168 			    "Cannot create page table at level: %u, vaddr: 0x%lx\n",
169 			    current_level, vaddr);
170 	}
171 	return pte;
172 }
173 
174 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
175 {
176 	const uint64_t pg_size = PG_LEVEL_SIZE(level);
177 	uint64_t *pml4e, *pdpe, *pde;
178 	uint64_t *pte;
179 
180 	TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
181 		    "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
182 
183 	TEST_ASSERT((vaddr % pg_size) == 0,
184 		    "Virtual address not aligned,\n"
185 		    "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
186 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
187 		    "Invalid virtual address, vaddr: 0x%lx", vaddr);
188 	TEST_ASSERT((paddr % pg_size) == 0,
189 		    "Physical address not aligned,\n"
190 		    "  paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
191 	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
192 		    "Physical address beyond maximum supported,\n"
193 		    "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
194 		    paddr, vm->max_gfn, vm->page_size);
195 
196 	/*
197 	 * Allocate upper level page tables, if not already present.  Return
198 	 * early if a hugepage was created.
199 	 */
200 	pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
201 				      vaddr, paddr, PG_LEVEL_512G, level);
202 	if (*pml4e & PTE_LARGE_MASK)
203 		return;
204 
205 	pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
206 	if (*pdpe & PTE_LARGE_MASK)
207 		return;
208 
209 	pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
210 	if (*pde & PTE_LARGE_MASK)
211 		return;
212 
213 	/* Fill in page table entry. */
214 	pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K);
215 	TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
216 		    "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
217 	*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
218 }
219 
220 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
221 {
222 	__virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
223 }
224 
225 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
226 		    uint64_t nr_bytes, int level)
227 {
228 	uint64_t pg_size = PG_LEVEL_SIZE(level);
229 	uint64_t nr_pages = nr_bytes / pg_size;
230 	int i;
231 
232 	TEST_ASSERT(nr_bytes % pg_size == 0,
233 		    "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
234 		    nr_bytes, pg_size);
235 
236 	for (i = 0; i < nr_pages; i++) {
237 		__virt_pg_map(vm, vaddr, paddr, level);
238 
239 		vaddr += pg_size;
240 		paddr += pg_size;
241 	}
242 }
243 
244 static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
245 					  struct kvm_vcpu *vcpu,
246 					  uint64_t vaddr)
247 {
248 	uint16_t index[4];
249 	uint64_t *pml4e, *pdpe, *pde;
250 	uint64_t *pte;
251 	struct kvm_sregs sregs;
252 	uint64_t rsvd_mask = 0;
253 
254 	/* Set the high bits in the reserved mask. */
255 	if (vm->pa_bits < 52)
256 		rsvd_mask = GENMASK_ULL(51, vm->pa_bits);
257 
258 	/*
259 	 * SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
260 	 * with 4-Level Paging and 5-Level Paging".
261 	 * If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1,
262 	 * the XD flag (bit 63) is reserved.
263 	 */
264 	vcpu_sregs_get(vcpu, &sregs);
265 	if ((sregs.efer & EFER_NX) == 0) {
266 		rsvd_mask |= PTE_NX_MASK;
267 	}
268 
269 	TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
270 		"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
271 	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
272 		(vaddr >> vm->page_shift)),
273 		"Invalid virtual address, vaddr: 0x%lx",
274 		vaddr);
275 	/*
276 	 * Based on the mode check above there are 48 bits in the vaddr, so
277 	 * shift 16 to sign extend the last bit (bit-47),
278 	 */
279 	TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
280 		"Canonical check failed.  The virtual address is invalid.");
281 
282 	index[0] = (vaddr >> 12) & 0x1ffu;
283 	index[1] = (vaddr >> 21) & 0x1ffu;
284 	index[2] = (vaddr >> 30) & 0x1ffu;
285 	index[3] = (vaddr >> 39) & 0x1ffu;
286 
287 	pml4e = addr_gpa2hva(vm, vm->pgd);
288 	TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
289 		"Expected pml4e to be present for gva: 0x%08lx", vaddr);
290 	TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
291 		"Unexpected reserved bits set.");
292 
293 	pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
294 	TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
295 		"Expected pdpe to be present for gva: 0x%08lx", vaddr);
296 	TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
297 		"Expected pdpe to map a pde not a 1-GByte page.");
298 	TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
299 		"Unexpected reserved bits set.");
300 
301 	pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
302 	TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
303 		"Expected pde to be present for gva: 0x%08lx", vaddr);
304 	TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
305 		"Expected pde to map a pte not a 2-MByte page.");
306 	TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
307 		"Unexpected reserved bits set.");
308 
309 	pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
310 	TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
311 		"Expected pte to be present for gva: 0x%08lx", vaddr);
312 
313 	return &pte[index[0]];
314 }
315 
316 uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
317 				 uint64_t vaddr)
318 {
319 	uint64_t *pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
320 
321 	return *(uint64_t *)pte;
322 }
323 
324 void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
325 			     uint64_t vaddr, uint64_t pte)
326 {
327 	uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
328 
329 	*(uint64_t *)new_pte = pte;
330 }
331 
332 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
333 {
334 	uint64_t *pml4e, *pml4e_start;
335 	uint64_t *pdpe, *pdpe_start;
336 	uint64_t *pde, *pde_start;
337 	uint64_t *pte, *pte_start;
338 
339 	if (!vm->pgd_created)
340 		return;
341 
342 	fprintf(stream, "%*s                                          "
343 		"                no\n", indent, "");
344 	fprintf(stream, "%*s      index hvaddr         gpaddr         "
345 		"addr         w exec dirty\n",
346 		indent, "");
347 	pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
348 	for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
349 		pml4e = &pml4e_start[n1];
350 		if (!(*pml4e & PTE_PRESENT_MASK))
351 			continue;
352 		fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
353 			" %u\n",
354 			indent, "",
355 			pml4e - pml4e_start, pml4e,
356 			addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
357 			!!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
358 
359 		pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
360 		for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
361 			pdpe = &pdpe_start[n2];
362 			if (!(*pdpe & PTE_PRESENT_MASK))
363 				continue;
364 			fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10llx "
365 				"%u  %u\n",
366 				indent, "",
367 				pdpe - pdpe_start, pdpe,
368 				addr_hva2gpa(vm, pdpe),
369 				PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
370 				!!(*pdpe & PTE_NX_MASK));
371 
372 			pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
373 			for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
374 				pde = &pde_start[n3];
375 				if (!(*pde & PTE_PRESENT_MASK))
376 					continue;
377 				fprintf(stream, "%*spde   0x%-3zx %p "
378 					"0x%-12lx 0x%-10llx %u  %u\n",
379 					indent, "", pde - pde_start, pde,
380 					addr_hva2gpa(vm, pde),
381 					PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
382 					!!(*pde & PTE_NX_MASK));
383 
384 				pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
385 				for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
386 					pte = &pte_start[n4];
387 					if (!(*pte & PTE_PRESENT_MASK))
388 						continue;
389 					fprintf(stream, "%*spte   0x%-3zx %p "
390 						"0x%-12lx 0x%-10llx %u  %u "
391 						"    %u    0x%-10lx\n",
392 						indent, "",
393 						pte - pte_start, pte,
394 						addr_hva2gpa(vm, pte),
395 						PTE_GET_PFN(*pte),
396 						!!(*pte & PTE_WRITABLE_MASK),
397 						!!(*pte & PTE_NX_MASK),
398 						!!(*pte & PTE_DIRTY_MASK),
399 						((uint64_t) n1 << 27)
400 							| ((uint64_t) n2 << 18)
401 							| ((uint64_t) n3 << 9)
402 							| ((uint64_t) n4));
403 				}
404 			}
405 		}
406 	}
407 }
408 
409 /*
410  * Set Unusable Segment
411  *
412  * Input Args: None
413  *
414  * Output Args:
415  *   segp - Pointer to segment register
416  *
417  * Return: None
418  *
419  * Sets the segment register pointed to by @segp to an unusable state.
420  */
421 static void kvm_seg_set_unusable(struct kvm_segment *segp)
422 {
423 	memset(segp, 0, sizeof(*segp));
424 	segp->unusable = true;
425 }
426 
427 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
428 {
429 	void *gdt = addr_gva2hva(vm, vm->gdt);
430 	struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
431 
432 	desc->limit0 = segp->limit & 0xFFFF;
433 	desc->base0 = segp->base & 0xFFFF;
434 	desc->base1 = segp->base >> 16;
435 	desc->type = segp->type;
436 	desc->s = segp->s;
437 	desc->dpl = segp->dpl;
438 	desc->p = segp->present;
439 	desc->limit1 = segp->limit >> 16;
440 	desc->avl = segp->avl;
441 	desc->l = segp->l;
442 	desc->db = segp->db;
443 	desc->g = segp->g;
444 	desc->base2 = segp->base >> 24;
445 	if (!segp->s)
446 		desc->base3 = segp->base >> 32;
447 }
448 
449 
450 /*
451  * Set Long Mode Flat Kernel Code Segment
452  *
453  * Input Args:
454  *   vm - VM whose GDT is being filled, or NULL to only write segp
455  *   selector - selector value
456  *
457  * Output Args:
458  *   segp - Pointer to KVM segment
459  *
460  * Return: None
461  *
462  * Sets up the KVM segment pointed to by @segp, to be a code segment
463  * with the selector value given by @selector.
464  */
465 static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
466 	struct kvm_segment *segp)
467 {
468 	memset(segp, 0, sizeof(*segp));
469 	segp->selector = selector;
470 	segp->limit = 0xFFFFFFFFu;
471 	segp->s = 0x1; /* kTypeCodeData */
472 	segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
473 					  * | kFlagCodeReadable
474 					  */
475 	segp->g = true;
476 	segp->l = true;
477 	segp->present = 1;
478 	if (vm)
479 		kvm_seg_fill_gdt_64bit(vm, segp);
480 }
481 
482 /*
483  * Set Long Mode Flat Kernel Data Segment
484  *
485  * Input Args:
486  *   vm - VM whose GDT is being filled, or NULL to only write segp
487  *   selector - selector value
488  *
489  * Output Args:
490  *   segp - Pointer to KVM segment
491  *
492  * Return: None
493  *
494  * Sets up the KVM segment pointed to by @segp, to be a data segment
495  * with the selector value given by @selector.
496  */
497 static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
498 	struct kvm_segment *segp)
499 {
500 	memset(segp, 0, sizeof(*segp));
501 	segp->selector = selector;
502 	segp->limit = 0xFFFFFFFFu;
503 	segp->s = 0x1; /* kTypeCodeData */
504 	segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
505 					  * | kFlagDataWritable
506 					  */
507 	segp->g = true;
508 	segp->present = true;
509 	if (vm)
510 		kvm_seg_fill_gdt_64bit(vm, segp);
511 }
512 
513 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
514 {
515 	uint16_t index[4];
516 	uint64_t *pml4e, *pdpe, *pde;
517 	uint64_t *pte;
518 
519 	TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
520 		"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
521 
522 	index[0] = (gva >> 12) & 0x1ffu;
523 	index[1] = (gva >> 21) & 0x1ffu;
524 	index[2] = (gva >> 30) & 0x1ffu;
525 	index[3] = (gva >> 39) & 0x1ffu;
526 
527 	if (!vm->pgd_created)
528 		goto unmapped_gva;
529 	pml4e = addr_gpa2hva(vm, vm->pgd);
530 	if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
531 		goto unmapped_gva;
532 
533 	pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
534 	if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
535 		goto unmapped_gva;
536 
537 	pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
538 	if (!(pde[index[1]] & PTE_PRESENT_MASK))
539 		goto unmapped_gva;
540 
541 	pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
542 	if (!(pte[index[0]] & PTE_PRESENT_MASK))
543 		goto unmapped_gva;
544 
545 	return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
546 
547 unmapped_gva:
548 	TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
549 	exit(EXIT_FAILURE);
550 }
551 
552 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
553 {
554 	if (!vm->gdt)
555 		vm->gdt = vm_vaddr_alloc_page(vm);
556 
557 	dt->base = vm->gdt;
558 	dt->limit = getpagesize();
559 }
560 
561 static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
562 				int selector)
563 {
564 	if (!vm->tss)
565 		vm->tss = vm_vaddr_alloc_page(vm);
566 
567 	memset(segp, 0, sizeof(*segp));
568 	segp->base = vm->tss;
569 	segp->limit = 0x67;
570 	segp->selector = selector;
571 	segp->type = 0xb;
572 	segp->present = 1;
573 	kvm_seg_fill_gdt_64bit(vm, segp);
574 }
575 
576 static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
577 {
578 	struct kvm_sregs sregs;
579 
580 	/* Set mode specific system register values. */
581 	vcpu_sregs_get(vcpu, &sregs);
582 
583 	sregs.idt.limit = 0;
584 
585 	kvm_setup_gdt(vm, &sregs.gdt);
586 
587 	switch (vm->mode) {
588 	case VM_MODE_PXXV48_4K:
589 		sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
590 		sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
591 		sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
592 
593 		kvm_seg_set_unusable(&sregs.ldt);
594 		kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
595 		kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
596 		kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
597 		kvm_setup_tss_64bit(vm, &sregs.tr, 0x18);
598 		break;
599 
600 	default:
601 		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
602 	}
603 
604 	sregs.cr3 = vm->pgd;
605 	vcpu_sregs_set(vcpu, &sregs);
606 }
607 
608 void __vm_xsave_require_permission(int bit, const char *name)
609 {
610 	int kvm_fd;
611 	u64 bitmask;
612 	long rc;
613 	struct kvm_device_attr attr = {
614 		.group = 0,
615 		.attr = KVM_X86_XCOMP_GUEST_SUPP,
616 		.addr = (unsigned long) &bitmask
617 	};
618 
619 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
620 
621 	kvm_fd = open_kvm_dev_path_or_exit();
622 	rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
623 	close(kvm_fd);
624 
625 	if (rc == -1 && (errno == ENXIO || errno == EINVAL))
626 		__TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
627 
628 	TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
629 
630 	__TEST_REQUIRE(bitmask & (1ULL << bit),
631 		       "Required XSAVE feature '%s' not supported", name);
632 
633 	TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit));
634 
635 	rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
636 	TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
637 	TEST_ASSERT(bitmask & (1ULL << bit),
638 		    "prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx",
639 		    bitmask);
640 }
641 
642 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
643 				  void *guest_code)
644 {
645 	struct kvm_mp_state mp_state;
646 	struct kvm_regs regs;
647 	vm_vaddr_t stack_vaddr;
648 	struct kvm_vcpu *vcpu;
649 
650 	stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
651 				     DEFAULT_GUEST_STACK_VADDR_MIN);
652 
653 	vcpu = __vm_vcpu_add(vm, vcpu_id);
654 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
655 	vcpu_setup(vm, vcpu);
656 
657 	/* Setup guest general purpose registers */
658 	vcpu_regs_get(vcpu, &regs);
659 	regs.rflags = regs.rflags | 0x2;
660 	regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
661 	regs.rip = (unsigned long) guest_code;
662 	vcpu_regs_set(vcpu, &regs);
663 
664 	/* Setup the MP state */
665 	mp_state.mp_state = 0;
666 	vcpu_mp_state_set(vcpu, &mp_state);
667 
668 	return vcpu;
669 }
670 
671 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
672 {
673 	struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
674 
675 	vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
676 
677 	return vcpu;
678 }
679 
680 void vcpu_arch_free(struct kvm_vcpu *vcpu)
681 {
682 	if (vcpu->cpuid)
683 		free(vcpu->cpuid);
684 }
685 
686 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
687 {
688 	static struct kvm_cpuid2 *cpuid;
689 	int kvm_fd;
690 
691 	if (cpuid)
692 		return cpuid;
693 
694 	cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
695 	kvm_fd = open_kvm_dev_path_or_exit();
696 
697 	kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
698 
699 	close(kvm_fd);
700 	return cpuid;
701 }
702 
703 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
704 		   struct kvm_x86_cpu_feature feature)
705 {
706 	const struct kvm_cpuid_entry2 *entry;
707 	int i;
708 
709 	for (i = 0; i < cpuid->nent; i++) {
710 		entry = &cpuid->entries[i];
711 
712 		/*
713 		 * The output registers in kvm_cpuid_entry2 are in alphabetical
714 		 * order, but kvm_x86_cpu_feature matches that mess, so yay
715 		 * pointer shenanigans!
716 		 */
717 		if (entry->function == feature.function &&
718 		    entry->index == feature.index)
719 			return (&entry->eax)[feature.reg] & BIT(feature.bit);
720 	}
721 
722 	return false;
723 }
724 
725 uint64_t kvm_get_feature_msr(uint64_t msr_index)
726 {
727 	struct {
728 		struct kvm_msrs header;
729 		struct kvm_msr_entry entry;
730 	} buffer = {};
731 	int r, kvm_fd;
732 
733 	buffer.header.nmsrs = 1;
734 	buffer.entry.index = msr_index;
735 	kvm_fd = open_kvm_dev_path_or_exit();
736 
737 	r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
738 	TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
739 
740 	close(kvm_fd);
741 	return buffer.entry.data;
742 }
743 
744 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
745 {
746 	TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
747 
748 	/* Allow overriding the default CPUID. */
749 	if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
750 		free(vcpu->cpuid);
751 		vcpu->cpuid = NULL;
752 	}
753 
754 	if (!vcpu->cpuid)
755 		vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
756 
757 	memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
758 	vcpu_set_cpuid(vcpu);
759 }
760 
761 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr)
762 {
763 	struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, 0x80000008);
764 
765 	entry->eax = (entry->eax & ~0xff) | maxphyaddr;
766 	vcpu_set_cpuid(vcpu);
767 }
768 
769 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
770 {
771 	struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
772 
773 	entry->eax = 0;
774 	entry->ebx = 0;
775 	entry->ecx = 0;
776 	entry->edx = 0;
777 	vcpu_set_cpuid(vcpu);
778 }
779 
780 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
781 				     struct kvm_x86_cpu_feature feature,
782 				     bool set)
783 {
784 	struct kvm_cpuid_entry2 *entry;
785 	u32 *reg;
786 
787 	entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
788 	reg = (&entry->eax) + feature.reg;
789 
790 	if (set)
791 		*reg |= BIT(feature.bit);
792 	else
793 		*reg &= ~BIT(feature.bit);
794 
795 	vcpu_set_cpuid(vcpu);
796 }
797 
798 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
799 {
800 	struct {
801 		struct kvm_msrs header;
802 		struct kvm_msr_entry entry;
803 	} buffer = {};
804 
805 	buffer.header.nmsrs = 1;
806 	buffer.entry.index = msr_index;
807 
808 	vcpu_msrs_get(vcpu, &buffer.header);
809 
810 	return buffer.entry.data;
811 }
812 
813 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
814 {
815 	struct {
816 		struct kvm_msrs header;
817 		struct kvm_msr_entry entry;
818 	} buffer = {};
819 
820 	memset(&buffer, 0, sizeof(buffer));
821 	buffer.header.nmsrs = 1;
822 	buffer.entry.index = msr_index;
823 	buffer.entry.data = msr_value;
824 
825 	return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
826 }
827 
828 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
829 {
830 	va_list ap;
831 	struct kvm_regs regs;
832 
833 	TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
834 		    "  num: %u\n",
835 		    num);
836 
837 	va_start(ap, num);
838 	vcpu_regs_get(vcpu, &regs);
839 
840 	if (num >= 1)
841 		regs.rdi = va_arg(ap, uint64_t);
842 
843 	if (num >= 2)
844 		regs.rsi = va_arg(ap, uint64_t);
845 
846 	if (num >= 3)
847 		regs.rdx = va_arg(ap, uint64_t);
848 
849 	if (num >= 4)
850 		regs.rcx = va_arg(ap, uint64_t);
851 
852 	if (num >= 5)
853 		regs.r8 = va_arg(ap, uint64_t);
854 
855 	if (num >= 6)
856 		regs.r9 = va_arg(ap, uint64_t);
857 
858 	vcpu_regs_set(vcpu, &regs);
859 	va_end(ap);
860 }
861 
862 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
863 {
864 	struct kvm_regs regs;
865 	struct kvm_sregs sregs;
866 
867 	fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
868 
869 	fprintf(stream, "%*sregs:\n", indent + 2, "");
870 	vcpu_regs_get(vcpu, &regs);
871 	regs_dump(stream, &regs, indent + 4);
872 
873 	fprintf(stream, "%*ssregs:\n", indent + 2, "");
874 	vcpu_sregs_get(vcpu, &sregs);
875 	sregs_dump(stream, &sregs, indent + 4);
876 }
877 
878 static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
879 {
880 	struct kvm_msr_list *list;
881 	struct kvm_msr_list nmsrs;
882 	int kvm_fd, r;
883 
884 	kvm_fd = open_kvm_dev_path_or_exit();
885 
886 	nmsrs.nmsrs = 0;
887 	if (!feature_msrs)
888 		r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
889 	else
890 		r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
891 
892 	TEST_ASSERT(r == -1 && errno == E2BIG,
893 		    "Expected -E2BIG, got rc: %i errno: %i (%s)",
894 		    r, errno, strerror(errno));
895 
896 	list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
897 	TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
898 	list->nmsrs = nmsrs.nmsrs;
899 
900 	if (!feature_msrs)
901 		kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
902 	else
903 		kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
904 	close(kvm_fd);
905 
906 	TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
907 		    "Number of MSRs in list changed, was %d, now %d",
908 		    nmsrs.nmsrs, list->nmsrs);
909 	return list;
910 }
911 
912 const struct kvm_msr_list *kvm_get_msr_index_list(void)
913 {
914 	static const struct kvm_msr_list *list;
915 
916 	if (!list)
917 		list = __kvm_get_msr_index_list(false);
918 	return list;
919 }
920 
921 
922 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
923 {
924 	static const struct kvm_msr_list *list;
925 
926 	if (!list)
927 		list = __kvm_get_msr_index_list(true);
928 	return list;
929 }
930 
931 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
932 {
933 	const struct kvm_msr_list *list = kvm_get_msr_index_list();
934 	int i;
935 
936 	for (i = 0; i < list->nmsrs; ++i) {
937 		if (list->indices[i] == msr_index)
938 			return true;
939 	}
940 
941 	return false;
942 }
943 
944 static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
945 				  struct kvm_x86_state *state)
946 {
947 	int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
948 
949 	if (size) {
950 		state->xsave = malloc(size);
951 		vcpu_xsave2_get(vcpu, state->xsave);
952 	} else {
953 		state->xsave = malloc(sizeof(struct kvm_xsave));
954 		vcpu_xsave_get(vcpu, state->xsave);
955 	}
956 }
957 
958 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
959 {
960 	const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
961 	struct kvm_x86_state *state;
962 	int i;
963 
964 	static int nested_size = -1;
965 
966 	if (nested_size == -1) {
967 		nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
968 		TEST_ASSERT(nested_size <= sizeof(state->nested_),
969 			    "Nested state size too big, %i > %zi",
970 			    nested_size, sizeof(state->nested_));
971 	}
972 
973 	/*
974 	 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
975 	 * guest state is consistent only after userspace re-enters the
976 	 * kernel with KVM_RUN.  Complete IO prior to migrating state
977 	 * to a new VM.
978 	 */
979 	vcpu_run_complete_io(vcpu);
980 
981 	state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
982 
983 	vcpu_events_get(vcpu, &state->events);
984 	vcpu_mp_state_get(vcpu, &state->mp_state);
985 	vcpu_regs_get(vcpu, &state->regs);
986 	vcpu_save_xsave_state(vcpu, state);
987 
988 	if (kvm_has_cap(KVM_CAP_XCRS))
989 		vcpu_xcrs_get(vcpu, &state->xcrs);
990 
991 	vcpu_sregs_get(vcpu, &state->sregs);
992 
993 	if (nested_size) {
994 		state->nested.size = sizeof(state->nested_);
995 
996 		vcpu_nested_state_get(vcpu, &state->nested);
997 		TEST_ASSERT(state->nested.size <= nested_size,
998 			    "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
999 			    state->nested.size, nested_size);
1000 	} else {
1001 		state->nested.size = 0;
1002 	}
1003 
1004 	state->msrs.nmsrs = msr_list->nmsrs;
1005 	for (i = 0; i < msr_list->nmsrs; i++)
1006 		state->msrs.entries[i].index = msr_list->indices[i];
1007 	vcpu_msrs_get(vcpu, &state->msrs);
1008 
1009 	vcpu_debugregs_get(vcpu, &state->debugregs);
1010 
1011 	return state;
1012 }
1013 
1014 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
1015 {
1016 	vcpu_sregs_set(vcpu, &state->sregs);
1017 	vcpu_msrs_set(vcpu, &state->msrs);
1018 
1019 	if (kvm_has_cap(KVM_CAP_XCRS))
1020 		vcpu_xcrs_set(vcpu, &state->xcrs);
1021 
1022 	vcpu_xsave_set(vcpu,  state->xsave);
1023 	vcpu_events_set(vcpu, &state->events);
1024 	vcpu_mp_state_set(vcpu, &state->mp_state);
1025 	vcpu_debugregs_set(vcpu, &state->debugregs);
1026 	vcpu_regs_set(vcpu, &state->regs);
1027 
1028 	if (state->nested.size)
1029 		vcpu_nested_state_set(vcpu, &state->nested);
1030 }
1031 
1032 void kvm_x86_state_cleanup(struct kvm_x86_state *state)
1033 {
1034 	free(state->xsave);
1035 	free(state);
1036 }
1037 
1038 static bool cpu_vendor_string_is(const char *vendor)
1039 {
1040 	const uint32_t *chunk = (const uint32_t *)vendor;
1041 	uint32_t eax, ebx, ecx, edx;
1042 
1043 	cpuid(0, &eax, &ebx, &ecx, &edx);
1044 	return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
1045 }
1046 
1047 bool is_intel_cpu(void)
1048 {
1049 	return cpu_vendor_string_is("GenuineIntel");
1050 }
1051 
1052 /*
1053  * Exclude early K5 samples with a vendor string of "AMDisbetter!"
1054  */
1055 bool is_amd_cpu(void)
1056 {
1057 	return cpu_vendor_string_is("AuthenticAMD");
1058 }
1059 
1060 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1061 {
1062 	const struct kvm_cpuid_entry2 *entry;
1063 	bool pae;
1064 
1065 	/* SDM 4.1.4 */
1066 	if (kvm_get_cpuid_max_extended() < 0x80000008) {
1067 		pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
1068 		*pa_bits = pae ? 36 : 32;
1069 		*va_bits = 32;
1070 	} else {
1071 		entry = kvm_get_supported_cpuid_entry(0x80000008);
1072 		*pa_bits = entry->eax & 0xff;
1073 		*va_bits = (entry->eax >> 8) & 0xff;
1074 	}
1075 }
1076 
1077 struct idt_entry {
1078 	uint16_t offset0;
1079 	uint16_t selector;
1080 	uint16_t ist : 3;
1081 	uint16_t : 5;
1082 	uint16_t type : 4;
1083 	uint16_t : 1;
1084 	uint16_t dpl : 2;
1085 	uint16_t p : 1;
1086 	uint16_t offset1;
1087 	uint32_t offset2; uint32_t reserved;
1088 };
1089 
1090 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
1091 			  int dpl, unsigned short selector)
1092 {
1093 	struct idt_entry *base =
1094 		(struct idt_entry *)addr_gva2hva(vm, vm->idt);
1095 	struct idt_entry *e = &base[vector];
1096 
1097 	memset(e, 0, sizeof(*e));
1098 	e->offset0 = addr;
1099 	e->selector = selector;
1100 	e->ist = 0;
1101 	e->type = 14;
1102 	e->dpl = dpl;
1103 	e->p = 1;
1104 	e->offset1 = addr >> 16;
1105 	e->offset2 = addr >> 32;
1106 }
1107 
1108 
1109 static bool kvm_fixup_exception(struct ex_regs *regs)
1110 {
1111 	if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
1112 		return false;
1113 
1114 	if (regs->vector == DE_VECTOR)
1115 		return false;
1116 
1117 	regs->rip = regs->r11;
1118 	regs->r9 = regs->vector;
1119 	return true;
1120 }
1121 
1122 void kvm_exit_unexpected_vector(uint32_t value)
1123 {
1124 	ucall(UCALL_UNHANDLED, 1, value);
1125 }
1126 
1127 void route_exception(struct ex_regs *regs)
1128 {
1129 	typedef void(*handler)(struct ex_regs *);
1130 	handler *handlers = (handler *)exception_handlers;
1131 
1132 	if (handlers && handlers[regs->vector]) {
1133 		handlers[regs->vector](regs);
1134 		return;
1135 	}
1136 
1137 	if (kvm_fixup_exception(regs))
1138 		return;
1139 
1140 	kvm_exit_unexpected_vector(regs->vector);
1141 }
1142 
1143 void vm_init_descriptor_tables(struct kvm_vm *vm)
1144 {
1145 	extern void *idt_handlers;
1146 	int i;
1147 
1148 	vm->idt = vm_vaddr_alloc_page(vm);
1149 	vm->handlers = vm_vaddr_alloc_page(vm);
1150 	/* Handlers have the same address in both address spaces.*/
1151 	for (i = 0; i < NUM_INTERRUPTS; i++)
1152 		set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
1153 			DEFAULT_CODE_SELECTOR);
1154 }
1155 
1156 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
1157 {
1158 	struct kvm_vm *vm = vcpu->vm;
1159 	struct kvm_sregs sregs;
1160 
1161 	vcpu_sregs_get(vcpu, &sregs);
1162 	sregs.idt.base = vm->idt;
1163 	sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
1164 	sregs.gdt.base = vm->gdt;
1165 	sregs.gdt.limit = getpagesize() - 1;
1166 	kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
1167 	vcpu_sregs_set(vcpu, &sregs);
1168 	*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
1169 }
1170 
1171 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1172 			       void (*handler)(struct ex_regs *))
1173 {
1174 	vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
1175 
1176 	handlers[vector] = (vm_vaddr_t)handler;
1177 }
1178 
1179 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
1180 {
1181 	struct ucall uc;
1182 
1183 	if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
1184 		uint64_t vector = uc.args[0];
1185 
1186 		TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
1187 			  vector);
1188 	}
1189 }
1190 
1191 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
1192 					       uint32_t function, uint32_t index)
1193 {
1194 	int i;
1195 
1196 	for (i = 0; i < cpuid->nent; i++) {
1197 		if (cpuid->entries[i].function == function &&
1198 		    cpuid->entries[i].index == index)
1199 			return &cpuid->entries[i];
1200 	}
1201 
1202 	TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
1203 
1204 	return NULL;
1205 }
1206 
1207 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1208 		       uint64_t a3)
1209 {
1210 	uint64_t r;
1211 
1212 	asm volatile("vmcall"
1213 		     : "=a"(r)
1214 		     : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1215 	return r;
1216 }
1217 
1218 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
1219 {
1220 	static struct kvm_cpuid2 *cpuid;
1221 	int kvm_fd;
1222 
1223 	if (cpuid)
1224 		return cpuid;
1225 
1226 	cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
1227 	kvm_fd = open_kvm_dev_path_or_exit();
1228 
1229 	kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1230 
1231 	close(kvm_fd);
1232 	return cpuid;
1233 }
1234 
1235 void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
1236 {
1237 	static struct kvm_cpuid2 *cpuid_full;
1238 	const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
1239 	int i, nent = 0;
1240 
1241 	if (!cpuid_full) {
1242 		cpuid_sys = kvm_get_supported_cpuid();
1243 		cpuid_hv = kvm_get_supported_hv_cpuid();
1244 
1245 		cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
1246 		if (!cpuid_full) {
1247 			perror("malloc");
1248 			abort();
1249 		}
1250 
1251 		/* Need to skip KVM CPUID leaves 0x400000xx */
1252 		for (i = 0; i < cpuid_sys->nent; i++) {
1253 			if (cpuid_sys->entries[i].function >= 0x40000000 &&
1254 			    cpuid_sys->entries[i].function < 0x40000100)
1255 				continue;
1256 			cpuid_full->entries[nent] = cpuid_sys->entries[i];
1257 			nent++;
1258 		}
1259 
1260 		memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
1261 		       cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
1262 		cpuid_full->nent = nent + cpuid_hv->nent;
1263 	}
1264 
1265 	vcpu_init_cpuid(vcpu, cpuid_full);
1266 }
1267 
1268 const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
1269 {
1270 	struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
1271 
1272 	vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1273 
1274 	return cpuid;
1275 }
1276 
1277 unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
1278 {
1279 	const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
1280 	unsigned long ht_gfn, max_gfn, max_pfn;
1281 	uint32_t eax, ebx, ecx, edx, max_ext_leaf;
1282 
1283 	max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
1284 
1285 	/* Avoid reserved HyperTransport region on AMD processors.  */
1286 	if (!is_amd_cpu())
1287 		return max_gfn;
1288 
1289 	/* On parts with <40 physical address bits, the area is fully hidden */
1290 	if (vm->pa_bits < 40)
1291 		return max_gfn;
1292 
1293 	/* Before family 17h, the HyperTransport area is just below 1T.  */
1294 	ht_gfn = (1 << 28) - num_ht_pages;
1295 	cpuid(1, &eax, &ebx, &ecx, &edx);
1296 	if (x86_family(eax) < 0x17)
1297 		goto done;
1298 
1299 	/*
1300 	 * Otherwise it's at the top of the physical address space, possibly
1301 	 * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX.  Use
1302 	 * the old conservative value if MAXPHYADDR is not enumerated.
1303 	 */
1304 	cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
1305 	max_ext_leaf = eax;
1306 	if (max_ext_leaf < 0x80000008)
1307 		goto done;
1308 
1309 	cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1310 	max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
1311 	if (max_ext_leaf >= 0x8000001f) {
1312 		cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1313 		max_pfn >>= (ebx >> 6) & 0x3f;
1314 	}
1315 
1316 	ht_gfn = max_pfn - num_ht_pages;
1317 done:
1318 	return min(max_gfn, ht_gfn - 1);
1319 }
1320 
1321 /* Returns true if kvm_intel was loaded with unrestricted_guest=1. */
1322 bool vm_is_unrestricted_guest(struct kvm_vm *vm)
1323 {
1324 	/* Ensure that a KVM vendor-specific module is loaded. */
1325 	if (vm == NULL)
1326 		close(open_kvm_dev_path_or_exit());
1327 
1328 	return get_kvm_intel_param_bool("unrestricted_guest");
1329 }
1330