1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2023 Oxide Computer Company
14  */
15 
16 #include <stdio.h>
17 #include <unistd.h>
18 #include <stdlib.h>
19 #include <fcntl.h>
20 #include <libgen.h>
21 #include <sys/stat.h>
22 #include <errno.h>
23 #include <err.h>
24 #include <assert.h>
25 #include <sys/sysmacros.h>
26 #include <stdbool.h>
27 
28 #include <sys/vmm.h>
29 #include <sys/vmm_dev.h>
30 #include <sys/vmm_data.h>
31 #include <vmmapi.h>
32 #include <sys/x86_archext.h>
33 #include <sys/controlregs.h>
34 
35 #include "common.h"
36 
37 static void
38 do_data_write(int vmfd, struct vm_data_xfer *vdx)
39 {
40 	if (ioctl(vmfd, VM_DATA_WRITE, vdx) != 0) {
41 		err(EXIT_FAILURE, "valid vmm_data_write failed");
42 	}
43 	if (vdx->vdx_result_len != vdx->vdx_len) {
44 		errx(EXIT_FAILURE, "unexpected vdx_result_len %u != %u",
45 		    vdx->vdx_len, vdx->vdx_result_len);
46 	}
47 }
48 
49 static void
50 do_data_read(int vmfd, struct vm_data_xfer *vdx)
51 {
52 	if (ioctl(vmfd, VM_DATA_READ, vdx) != 0) {
53 		err(EXIT_FAILURE, "valid vmm_data_read failed");
54 	}
55 	if (vdx->vdx_result_len != vdx->vdx_len) {
56 		errx(EXIT_FAILURE, "unexpected vdx_result_len %u != %u",
57 		    vdx->vdx_len, vdx->vdx_result_len);
58 	}
59 }
60 
61 static uint32_t
62 query_data_size(int vmfd, struct vm_data_xfer *vdx)
63 {
64 	vdx->vdx_len = 0;
65 	vdx->vdx_data = NULL;
66 	vdx->vdx_flags = 0;
67 
68 	if (ioctl(vmfd, VM_DATA_READ, vdx) == 0) {
69 		errx(EXIT_FAILURE,
70 		    "expected VM_DATA_READ to fail for size query");
71 	}
72 	if (errno != ENOSPC) {
73 		err(EXIT_FAILURE,
74 		    "expected ENOSPC error for VM_DATA_READ size query");
75 	}
76 	return (vdx->vdx_result_len);
77 }
78 
79 int
80 main(int argc, char *argv[])
81 {
82 	const char *suite_name = basename(argv[0]);
83 	struct vmctx *ctx;
84 
85 	ctx = create_test_vm(suite_name);
86 	if (ctx == NULL) {
87 		errx(EXIT_FAILURE, "could not open test VM");
88 	}
89 
90 	if (vm_activate_cpu(ctx, 0) != 0) {
91 		err(EXIT_FAILURE, "could not activate vcpu0");
92 	}
93 
94 	const int vmfd = vm_get_device_fd(ctx);
95 
96 	/* Pause the instance before attempting to manipulate vcpu data */
97 	if (ioctl(vmfd, VM_PAUSE, 0) != 0) {
98 		err(EXIT_FAILURE, "VM_PAUSE failed");
99 	}
100 
101 	struct vm_data_xfer vdx = {
102 		.vdx_class = VDC_MSR,
103 		.vdx_version = 1,
104 		.vdx_vcpuid = 0,
105 	};
106 
107 	const uint32_t msr_sz = query_data_size(vmfd, &vdx);
108 	const uint32_t msr_count = msr_sz / sizeof (struct vdi_field_entry_v1);
109 
110 	struct vdi_field_entry_v1 *entries =
111 	    calloc(msr_count, sizeof (struct vdi_field_entry_v1));
112 	if (entries == NULL) {
113 		err(EXIT_FAILURE, "could not allocate space for MSR data");
114 	}
115 
116 	/* Attempt to read all the (default) entries */
117 	vdx.vdx_data = entries;
118 	vdx.vdx_len = msr_sz;
119 	do_data_read(vmfd, &vdx);
120 
121 	/* Spot check a few MSRs which we expect to be present */
122 	struct expected_msr {
123 		const char *name;
124 		uint32_t msr;
125 		bool present;
126 	} spot_check[] = {
127 		{ .msr = MSR_AMD_EFER, .name = "EFER" },
128 		{ .msr = REG_TSC, .name = "TSC" },
129 		{ .msr = MSR_AMD_CSTAR, .name = "CSTAR" },
130 		{ .msr = MSR_AMD_KGSBASE, .name = "KGSBASE" },
131 	};
132 	for (uint_t i = 0; i < msr_count; i++) {
133 		for (uint_t j = 0; j < ARRAY_SIZE(spot_check); j++) {
134 			if (spot_check[j].msr == entries[i].vfe_ident) {
135 				spot_check[j].present = true;
136 			}
137 		}
138 	}
139 	for (uint_t j = 0; j < ARRAY_SIZE(spot_check); j++) {
140 		if (!spot_check[j].present) {
141 			errx(EXIT_FAILURE,
142 			    "did not find %s(%x) MSR in VM_DATA_READ results",
143 			    spot_check[j].name, spot_check[j].msr);
144 		}
145 	}
146 
147 	/* Attempt to write those same values back to the instance */
148 	do_data_write(vmfd, &vdx);
149 	free(entries);
150 	entries = NULL;
151 
152 	/* Do a targeted read of a few values */
153 	struct vdi_field_entry_v1 small_list[] = {
154 		{ .vfe_ident = REG_TSC },
155 		{ .vfe_ident = MSR_INTC_SEP_EIP },
156 		{ .vfe_ident = REG_PAT },
157 	};
158 	vdx.vdx_data = small_list;
159 	vdx.vdx_len = sizeof (small_list);
160 	vdx.vdx_flags = VDX_FLAG_READ_COPYIN;
161 	do_data_read(vmfd, &vdx);
162 
163 	/*
164 	 * Test access to DEBUGCTL and LBR-related MSRs on AMD.
165 	 *
166 	 * Because support for these varies between CPUs, they are (currently)
167 	 * not included in the default set of MSRs emitted by a blanket read of
168 	 * MSRs via the vmm-data interface.
169 	 */
170 	if (cpu_vendor_amd()) {
171 		struct vdi_field_entry_v1 dbg_entries[] = {
172 			{ .vfe_ident = MSR_DEBUGCTL },
173 			{ .vfe_ident = MSR_LBR_FROM },
174 			{ .vfe_ident = MSR_LBR_TO },
175 			{ .vfe_ident = MSR_LEX_FROM },
176 			{ .vfe_ident = MSR_LEX_TO },
177 		};
178 
179 		vdx.vdx_data = &dbg_entries;
180 		vdx.vdx_len = sizeof (dbg_entries);
181 		vdx.vdx_flags = VDX_FLAG_READ_COPYIN;
182 
183 		do_data_read(vmfd, &vdx);
184 
185 		vdx.vdx_flags = 0;
186 		do_data_write(vmfd, &vdx);
187 	}
188 
189 
190 	vm_destroy(ctx);
191 	(void) printf("%s\tPASS\n", suite_name);
192 	return (EXIT_SUCCESS);
193 }
194