xref: /openbsd/sys/dev/pv/vmt.c (revision b4155af8)
1 /*	$OpenBSD: vmt.c,v 1.32 2024/05/24 10:05:55 jsg Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Crawshaw <david@zentus.com>
5  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #if !defined(__i386__) && !defined(__amd64__)
21 #error vmt(4) is only supported on i386 and amd64
22 #endif
23 
24 /*
25  * Protocol reverse engineered by Ken Kato:
26  * https://sites.google.com/site/chitchatvmback/backdoor
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/timeout.h>
33 #include <sys/syslog.h>
34 #include <sys/mount.h>
35 #include <sys/task.h>
36 #include <sys/sensors.h>
37 
38 #include <net/if.h>
39 #include <net/if_dl.h>
40 #include <net/if_var.h>
41 #include <net/if_types.h>
42 #include <net/rtable.h>
43 #include <netinet/in.h>
44 #include <netinet/if_ether.h>
45 
46 #include <dev/pv/pvvar.h>
47 
48 /* "The" magic number, always occupies the EAX register. */
49 #define VM_MAGIC			0x564D5868
50 
51 /* Port numbers, passed on EDX.LOW . */
52 #define VM_PORT_CMD			0x5658
53 #define VM_PORT_RPC			0x5659
54 
55 /* Commands, passed on ECX.LOW. */
56 #define VM_CMD_GET_SPEED		0x01
57 #define VM_CMD_APM			0x02
58 #define VM_CMD_GET_MOUSEPOS		0x04
59 #define VM_CMD_SET_MOUSEPOS		0x05
60 #define VM_CMD_GET_CLIPBOARD_LEN	0x06
61 #define VM_CMD_GET_CLIPBOARD		0x07
62 #define VM_CMD_SET_CLIPBOARD_LEN	0x08
63 #define VM_CMD_SET_CLIPBOARD		0x09
64 #define VM_CMD_GET_VERSION		0x0a
65 #define  VM_VERSION_UNMANAGED			0x7fffffff
66 #define VM_CMD_GET_DEVINFO		0x0b
67 #define VM_CMD_DEV_ADDREMOVE		0x0c
68 #define VM_CMD_GET_GUI_OPTIONS		0x0d
69 #define VM_CMD_SET_GUI_OPTIONS		0x0e
70 #define VM_CMD_GET_SCREEN_SIZE		0x0f
71 #define VM_CMD_GET_HWVER		0x11
72 #define VM_CMD_POPUP_OSNOTFOUND		0x12
73 #define VM_CMD_GET_BIOS_UUID		0x13
74 #define VM_CMD_GET_MEM_SIZE		0x14
75 /*#define VM_CMD_GET_TIME		0x17 */	/* deprecated */
76 #define VM_CMD_RPC			0x1e
77 #define VM_CMD_GET_TIME_FULL		0x2e
78 
79 /* RPC sub-commands, passed on ECX.HIGH. */
80 #define VM_RPC_OPEN			0x00
81 #define VM_RPC_SET_LENGTH		0x01
82 #define VM_RPC_SET_DATA			0x02
83 #define VM_RPC_GET_LENGTH		0x03
84 #define VM_RPC_GET_DATA			0x04
85 #define VM_RPC_GET_END			0x05
86 #define VM_RPC_CLOSE			0x06
87 
88 /* RPC magic numbers, passed on EBX. */
89 #define VM_RPC_OPEN_RPCI	0x49435052UL /* with VM_RPC_OPEN. */
90 #define VM_RPC_OPEN_TCLO	0x4F4C4354UL /* with VP_RPC_OPEN. */
91 #define VM_RPC_ENH_DATA		0x00010000UL /* with enhanced RPC data calls. */
92 
93 #define VM_RPC_FLAG_COOKIE	0x80000000UL
94 
95 /* RPC reply flags */
96 #define VM_RPC_REPLY_SUCCESS	0x0001
97 #define VM_RPC_REPLY_DORECV	0x0002		/* incoming message available */
98 #define VM_RPC_REPLY_CLOSED	0x0004		/* RPC channel is closed */
99 #define VM_RPC_REPLY_UNSENT	0x0008		/* incoming message was removed? */
100 #define VM_RPC_REPLY_CHECKPOINT	0x0010		/* checkpoint occurred -> retry */
101 #define VM_RPC_REPLY_POWEROFF	0x0020		/* underlying device is powering off */
102 #define VM_RPC_REPLY_TIMEOUT	0x0040
103 #define VM_RPC_REPLY_HB		0x0080		/* high-bandwidth tx/rx available */
104 
105 /* VM state change IDs */
106 #define VM_STATE_CHANGE_HALT	1
107 #define VM_STATE_CHANGE_REBOOT	2
108 #define VM_STATE_CHANGE_POWERON 3
109 #define VM_STATE_CHANGE_RESUME  4
110 #define VM_STATE_CHANGE_SUSPEND 5
111 
112 /* VM guest info keys */
113 #define VM_GUEST_INFO_DNS_NAME		1
114 #define VM_GUEST_INFO_IP_ADDRESS	2
115 #define VM_GUEST_INFO_DISK_FREE_SPACE	3
116 #define VM_GUEST_INFO_BUILD_NUMBER	4
117 #define VM_GUEST_INFO_OS_NAME_FULL	5
118 #define VM_GUEST_INFO_OS_NAME		6
119 #define VM_GUEST_INFO_UPTIME		7
120 #define VM_GUEST_INFO_MEMORY		8
121 #define VM_GUEST_INFO_IP_ADDRESS_V2	9
122 #define VM_GUEST_INFO_IP_ADDRESS_V3	10
123 
124 /* RPC responses */
125 #define VM_RPC_REPLY_OK			"OK "
126 #define VM_RPC_RESET_REPLY		"OK ATR toolbox"
127 #define VM_RPC_REPLY_ERROR		"ERROR Unknown command"
128 #define VM_RPC_REPLY_ERROR_IP_ADDR	"ERROR Unable to find guest IP address"
129 
130 /* VM backup error codes */
131 #define VM_BACKUP_SUCCESS		0
132 #define VM_BACKUP_SYNC_ERROR		3
133 #define VM_BACKUP_REMOTE_ABORT		4
134 
135 #define VM_BACKUP_TIMEOUT		30 /* seconds */
136 
137 /* NIC/IP address stuff */
138 #define VM_NICINFO_VERSION		3
139 
140 #define VM_NICINFO_IP_LEN		64
141 #define VM_NICINFO_MAX_NICS		16
142 #define VM_NICINFO_MAX_ADDRS		2048
143 #define VM_NICINFO_MAC_LEN		20
144 
145 #define VM_NICINFO_ADDR_IPV4		1
146 #define VM_NICINFO_ADDR_IPV6		2
147 
148 struct vm_nicinfo_addr_v4 {
149 	uint32_t	v4_addr_type;
150 	uint32_t	v4_addr_len;
151 	struct in_addr	v4_addr;
152 	uint32_t	v4_prefix_len;
153 	uint32_t	v4_origin;
154 	uint32_t	v4_status;
155 };
156 
157 struct vm_nicinfo_addr_v6 {
158 	uint32_t	v6_addr_type;
159 	uint32_t	v6_addr_len;
160 	struct in6_addr v6_addr;
161 	uint32_t	v6_prefix_len;
162 	uint32_t	v6_origin;
163 	uint32_t	v6_status;
164 };
165 
166 struct vm_nicinfo_nic {
167 	uint32_t	ni_mac_len;
168 	char		ni_mac[VM_NICINFO_MAC_LEN];
169 	uint32_t	ni_num_addrs;
170 };
171 
172 struct vm_nicinfo_nic_nomac {
173 	uint32_t	nn_mac_len;
174 	uint32_t	nn_num_addrs;
175 };
176 
177 struct vm_nicinfo_nic_post {
178 	uint32_t	np_dns_config;
179 	uint32_t	np_wins_config;
180 	uint32_t	np_dhcpv4_config;
181 	uint32_t	np_dhcpv6_config;
182 };
183 
184 struct vm_nicinfo_nic_list {
185 	uint32_t	nl_version;
186 	uint32_t	nl_nic_list;
187 	uint32_t	nl_num_nics;
188 };
189 
190 struct vm_nicinfo_nic_list_post {
191 	uint32_t	nl_num_routes;
192 	uint32_t	nl_dns_config;
193 	uint32_t	nl_wins_config;
194 	uint32_t	nl_dhcpv4_config;
195 	uint32_t	nl_dhcpv6_config;
196 };
197 
198 #define VM_NICINFO_CMD			"SetGuestInfo  10 "
199 
200 /* A register. */
201 union vm_reg {
202 	struct {
203 		uint16_t low;
204 		uint16_t high;
205 	} part;
206 	uint32_t word;
207 #ifdef __amd64__
208 	struct {
209 		uint32_t low;
210 		uint32_t high;
211 	} words;
212 	uint64_t quad;
213 #endif
214 } __packed;
215 
216 /* A register frame. */
217 struct vm_backdoor {
218 	union vm_reg eax;
219 	union vm_reg ebx;
220 	union vm_reg ecx;
221 	union vm_reg edx;
222 	union vm_reg esi;
223 	union vm_reg edi;
224 	union vm_reg ebp;
225 } __packed;
226 
227 /* RPC context. */
228 struct vm_rpc {
229 	uint16_t channel;
230 	uint32_t cookie1;
231 	uint32_t cookie2;
232 };
233 
234 struct vmt_softc {
235 	struct device		sc_dev;
236 
237 	struct vm_rpc		sc_tclo_rpc;
238 	char			*sc_rpc_buf;
239 	int			sc_rpc_error;
240 	int			sc_tclo_ping;
241 	int			sc_set_guest_os;
242 	int			sc_quiesce;
243 	struct task		sc_quiesce_task;
244 	struct task		sc_nicinfo_task;
245 #define VMT_RPC_BUFLEN		4096
246 
247 	struct timeout		sc_tick;
248 	struct timeout		sc_tclo_tick;
249 	struct ksensordev	sc_sensordev;
250 	struct ksensor		sc_sensor;
251 
252 	char			sc_hostname[MAXHOSTNAMELEN];
253 	size_t			sc_nic_info_size;
254 	char			*sc_nic_info;
255 };
256 
257 #ifdef VMT_DEBUG
258 #define DPRINTF(_arg...)	printf(_arg)
259 #else
260 #define DPRINTF(_arg...)	do {} while(0)
261 #endif
262 #define DEVNAME(_s)		((_s)->sc_dev.dv_xname)
263 
264 void	 vm_cmd(struct vm_backdoor *);
265 void	 vm_ins(struct vm_backdoor *);
266 void	 vm_outs(struct vm_backdoor *);
267 
268 /* Functions for communicating with the VM Host. */
269 int	 vm_rpc_open(struct vm_rpc *, uint32_t);
270 int	 vm_rpc_close(struct vm_rpc *);
271 int	 vm_rpc_send(const struct vm_rpc *, const uint8_t *, uint32_t);
272 int	 vm_rpc_send_str(const struct vm_rpc *, const uint8_t *);
273 int	 vm_rpc_get_length(const struct vm_rpc *, uint32_t *, uint16_t *);
274 int	 vm_rpc_get_data(const struct vm_rpc *, char *, uint32_t, uint16_t);
275 int	 vm_rpc_send_rpci_tx_buf(struct vmt_softc *, const uint8_t *, uint32_t);
276 int	 vm_rpc_send_rpci_tx(struct vmt_softc *, const char *, ...)
277 	    __attribute__((__format__(__kprintf__,2,3)));
278 int	 vm_rpci_response_successful(struct vmt_softc *);
279 
280 int	 vmt_kvop(void *, int, char *, char *, size_t);
281 
282 void	 vmt_probe_cmd(struct vm_backdoor *, uint16_t);
283 void	 vmt_tclo_state_change_success(struct vmt_softc *, int, char);
284 void	 vmt_do_reboot(struct vmt_softc *);
285 void	 vmt_do_shutdown(struct vmt_softc *);
286 void	 vmt_shutdown(void *);
287 
288 void	 vmt_clear_guest_info(struct vmt_softc *);
289 void	 vmt_update_guest_info(struct vmt_softc *);
290 void	 vmt_update_guest_uptime(struct vmt_softc *);
291 
292 void	 vmt_tick_hook(struct device *self);
293 void	 vmt_tick(void *);
294 void	 vmt_resume(void);
295 
296 int	 vmt_match(struct device *, void *, void *);
297 void	 vmt_attach(struct device *, struct device *, void *);
298 int	 vmt_activate(struct device *, int);
299 
300 void	 vmt_tclo_tick(void *);
301 int	 vmt_tclo_process(struct vmt_softc *, const char *);
302 void	 vmt_tclo_reset(struct vmt_softc *);
303 void	 vmt_tclo_ping(struct vmt_softc *);
304 void	 vmt_tclo_halt(struct vmt_softc *);
305 void	 vmt_tclo_reboot(struct vmt_softc *);
306 void	 vmt_tclo_poweron(struct vmt_softc *);
307 void	 vmt_tclo_suspend(struct vmt_softc *);
308 void	 vmt_tclo_resume(struct vmt_softc *);
309 void	 vmt_tclo_capreg(struct vmt_softc *);
310 void	 vmt_tclo_broadcastip(struct vmt_softc *);
311 
312 void	 vmt_set_backup_status(struct vmt_softc *, const char *, int,
313 	    const char *);
314 void	 vmt_quiesce_task(void *);
315 void	 vmt_quiesce_done_task(void *);
316 void	 vmt_tclo_abortbackup(struct vmt_softc *);
317 void	 vmt_tclo_startbackup(struct vmt_softc *);
318 void	 vmt_tclo_backupdone(struct vmt_softc *);
319 
320 size_t	 vmt_xdr_ifaddr(struct ifaddr *, char *);
321 size_t	 vmt_xdr_nic_entry(struct ifnet *, char *);
322 size_t	 vmt_xdr_nic_info(char *);
323 void	 vmt_nicinfo_task(void *);
324 
325 int	 vmt_probe(void);
326 
327 struct vmt_tclo_rpc {
328 	const char	*name;
329 	void		(*cb)(struct vmt_softc *);
330 } vmt_tclo_rpc[] = {
331 	/* Keep sorted by name (case-sensitive) */
332         { "Capabilities_Register",      vmt_tclo_capreg },
333         { "OS_Halt",                    vmt_tclo_halt },
334         { "OS_PowerOn",                 vmt_tclo_poweron },
335         { "OS_Reboot",                  vmt_tclo_reboot },
336         { "OS_Resume",                  vmt_tclo_resume },
337         { "OS_Suspend",                 vmt_tclo_suspend },
338         { "Set_Option broadcastIP 1",   vmt_tclo_broadcastip },
339         { "ping",                       vmt_tclo_ping },
340         { "reset",                      vmt_tclo_reset },
341         { "vmbackup.abort",		vmt_tclo_abortbackup },
342         { "vmbackup.snapshotDone",	vmt_tclo_backupdone },
343         { "vmbackup.start 1",		vmt_tclo_startbackup },
344         { NULL },
345 #if 0
346 	/* Various unsupported commands */
347 	{ "Set_Option autohide 0" },
348 	{ "Set_Option copypaste 1" },
349 	{ "Set_Option enableDnD 1" },
350 	{ "Set_Option enableMessageBusTunnel 0" },
351 	{ "Set_Option linkRootHgfsShare 0" },
352 	{ "Set_Option mapRootHgfsShare 0" },
353 	{ "Set_Option synctime 1" },
354 	{ "Set_Option synctime.period 0" },
355 	{ "Set_Option time.synchronize.tools.enable 1" },
356 	{ "Set_Option time.synchronize.tools.percentCorrection 0" },
357 	{ "Set_Option time.synchronize.tools.slewCorrection 1" },
358 	{ "Set_Option time.synchronize.tools.startup 1" },
359 	{ "Set_Option toolScripts.afterPowerOn 1" },
360 	{ "Set_Option toolScripts.afterResume 1" },
361 	{ "Set_Option toolScripts.beforePowerOff 1" },
362 	{ "Set_Option toolScripts.beforeSuspend 1" },
363 	{ "Time_Synchronize 0" },
364 	{ "Vix_1_Relayed_Command \"38cdcae40e075d66\"" },
365 #endif
366 };
367 
368 const struct cfattach vmt_ca = {
369 	sizeof(struct vmt_softc),
370 	vmt_match,
371 	vmt_attach,
372 	NULL,
373 	vmt_activate
374 };
375 
376 struct cfdriver vmt_cd = {
377 	NULL,
378 	"vmt",
379 	DV_DULL
380 };
381 
382 extern char hostname[MAXHOSTNAMELEN];
383 
384 void
vmt_probe_cmd(struct vm_backdoor * frame,uint16_t cmd)385 vmt_probe_cmd(struct vm_backdoor *frame, uint16_t cmd)
386 {
387 	bzero(frame, sizeof(*frame));
388 
389 	(frame->eax).word = VM_MAGIC;
390 	(frame->ebx).word = ~VM_MAGIC;
391 	(frame->ecx).part.low = cmd;
392 	(frame->ecx).part.high = 0xffff;
393 	(frame->edx).part.low  = VM_PORT_CMD;
394 	(frame->edx).part.high = 0;
395 
396 	vm_cmd(frame);
397 }
398 
399 int
vmt_probe(void)400 vmt_probe(void)
401 {
402 	struct vm_backdoor frame;
403 
404 	vmt_probe_cmd(&frame, VM_CMD_GET_VERSION);
405 	if (frame.eax.word == 0xffffffff ||
406 	    frame.ebx.word != VM_MAGIC)
407 		return (0);
408 
409 	vmt_probe_cmd(&frame, VM_CMD_GET_SPEED);
410 	if (frame.eax.word == VM_MAGIC)
411 		return (0);
412 
413 	return (1);
414 }
415 
416 int
vmt_match(struct device * parent,void * match,void * aux)417 vmt_match(struct device *parent, void *match, void *aux)
418 {
419 	struct pv_attach_args	*pva = aux;
420 	struct pvbus_hv		*hv = &pva->pva_hv[PVBUS_VMWARE];
421 
422 	if (hv->hv_base == 0)
423 		return (0);
424 	if (!vmt_probe())
425 		return (0);
426 
427 	return (1);
428 }
429 
430 void
vmt_attach(struct device * parent,struct device * self,void * aux)431 vmt_attach(struct device *parent, struct device *self, void *aux)
432 {
433 	struct vmt_softc *sc = (struct vmt_softc *)self;
434 	struct pv_attach_args	*pva = aux;
435 	struct pvbus_hv		*hv = &pva->pva_hv[PVBUS_VMWARE];
436 
437 	printf("\n");
438 	sc->sc_rpc_buf = malloc(VMT_RPC_BUFLEN, M_DEVBUF, M_NOWAIT);
439 	if (sc->sc_rpc_buf == NULL) {
440 		printf("%s: unable to allocate buffer for RPC\n",
441 		    DEVNAME(sc));
442 		return;
443 	}
444 
445 	if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
446 		printf("%s: failed to open backdoor RPC channel "
447 		    "(TCLO protocol)\n", DEVNAME(sc));
448 		goto free;
449 	}
450 
451 	/* don't know if this is important at all yet */
452 	if (vm_rpc_send_rpci_tx(sc,
453 	    "tools.capability.hgfs_server toolbox 1") != 0) {
454 		printf(": failed to set HGFS server capability\n");
455 		goto free;
456 	}
457 
458 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
459 	    sizeof(sc->sc_sensordev.xname));
460 
461 	sc->sc_sensor.type = SENSOR_TIMEDELTA;
462 	sc->sc_sensor.status = SENSOR_S_UNKNOWN;
463 
464 	sensor_attach(&sc->sc_sensordev, &sc->sc_sensor);
465 	sensordev_install(&sc->sc_sensordev);
466 
467 	config_mountroot(self, vmt_tick_hook);
468 
469 	timeout_set_proc(&sc->sc_tclo_tick, vmt_tclo_tick, sc);
470 	timeout_add_sec(&sc->sc_tclo_tick, 1);
471 	sc->sc_tclo_ping = 1;
472 
473 	task_set(&sc->sc_nicinfo_task, vmt_nicinfo_task, sc);
474 
475 	/* pvbus(4) key/value interface */
476 	hv->hv_kvop = vmt_kvop;
477 	hv->hv_arg = sc;
478 
479 	return;
480 
481 free:
482 	free(sc->sc_rpc_buf, M_DEVBUF, VMT_RPC_BUFLEN);
483 }
484 
485 int
vmt_kvop(void * arg,int op,char * key,char * value,size_t valuelen)486 vmt_kvop(void *arg, int op, char *key, char *value, size_t valuelen)
487 {
488 	struct vmt_softc *sc = arg;
489 	struct vm_rpc rpci;
490 	char *buf = NULL;
491 	size_t bufsz;
492 	int error = 0;
493 	uint32_t rlen;
494 	uint16_t ack;
495 
496 	bufsz = VMT_RPC_BUFLEN;
497 	buf = malloc(bufsz, M_TEMP, M_WAITOK | M_ZERO);
498 
499 	switch (op) {
500 	case PVBUS_KVWRITE:
501 		if ((size_t)snprintf(buf, bufsz, "info-set %s %s",
502 		    key, value) >= bufsz) {
503 			DPRINTF("%s: write command too long", DEVNAME(sc));
504 			error = EINVAL;
505 			goto done;
506 		}
507 		break;
508 	case PVBUS_KVREAD:
509 		if ((size_t)snprintf(buf, bufsz, "info-get %s",
510 		    key) >= bufsz) {
511 			DPRINTF("%s: read command too long", DEVNAME(sc));
512 			error = EINVAL;
513 			goto done;
514 		}
515 		break;
516 	default:
517 		error = EOPNOTSUPP;
518 		goto done;
519 	}
520 
521 	if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) {
522 		DPRINTF("%s: rpci channel open failed\n", DEVNAME(sc));
523 		sc->sc_rpc_error = 1;
524 		error = EIO;
525 		goto done;
526 	}
527 
528 	if (vm_rpc_send(&rpci, buf, bufsz) != 0) {
529 		DPRINTF("%s: unable to send rpci command\n", DEVNAME(sc));
530 		sc->sc_rpc_error = 1;
531 		error = EIO;
532 		goto close;
533 	}
534 
535 	if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) {
536 		DPRINTF("%s: failed to get length of rpci response data\n",
537 		    DEVNAME(sc));
538 		sc->sc_rpc_error = 1;
539 		error = EIO;
540 		goto close;
541 	}
542 
543 	if (rlen > 0) {
544 		if (rlen + 1 > valuelen) {
545 			error = ERANGE;
546 			goto close;
547 		}
548 
549 		if (vm_rpc_get_data(&rpci, value, rlen, ack) != 0) {
550 			DPRINTF("%s: failed to get rpci response data\n",
551 			    DEVNAME(sc));
552 			sc->sc_rpc_error = 1;
553 			error = EIO;
554 			goto close;
555 		}
556 		/* test if response success  */
557 		if (rlen < 2 || value[0] != '1' || value[1] != ' ') {
558 			DPRINTF("%s: host rejected command: %s\n", DEVNAME(sc),
559 			    buf);
560 			error = EINVAL;
561 			goto close;
562 		}
563 		/* skip response that was tested */
564 		bcopy(value + 2, value, valuelen - 2);
565 		value[rlen - 2] = '\0';
566 	}
567 
568  close:
569 	if (vm_rpc_close(&rpci) != 0)
570 		DPRINTF("%s: unable to close rpci channel\n", DEVNAME(sc));
571  done:
572 	free(buf, M_TEMP, bufsz);
573 	return (error);
574 }
575 
576 void
vmt_resume(void)577 vmt_resume(void)
578 {
579 	struct vm_backdoor frame;
580 	extern void rdrand(void *);
581 
582 	bzero(&frame, sizeof(frame));
583 	frame.eax.word = VM_MAGIC;
584 	frame.ecx.part.low = VM_CMD_GET_TIME_FULL;
585 	frame.edx.part.low  = VM_PORT_CMD;
586 	vm_cmd(&frame);
587 
588 	rdrand(NULL);
589 	enqueue_randomness(frame.eax.word);
590 	enqueue_randomness(frame.esi.word);
591 	enqueue_randomness(frame.edx.word);
592 	enqueue_randomness(frame.ebx.word);
593 	resume_randomness(NULL, 0);
594 }
595 
596 int
vmt_activate(struct device * self,int act)597 vmt_activate(struct device *self, int act)
598 {
599 	int rv = 0;
600 
601 	switch (act) {
602 	case DVACT_POWERDOWN:
603 		vmt_shutdown(self);
604 		break;
605 	case DVACT_RESUME:
606 		vmt_resume();
607 		break;
608 	}
609 	return (rv);
610 }
611 
612 
613 void
vmt_update_guest_uptime(struct vmt_softc * sc)614 vmt_update_guest_uptime(struct vmt_softc *sc)
615 {
616 	/* host wants uptime in hundredths of a second */
617 	if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %lld00",
618 	    VM_GUEST_INFO_UPTIME, (long long)getuptime()) != 0) {
619 		DPRINTF("%s: unable to set guest uptime", DEVNAME(sc));
620 		sc->sc_rpc_error = 1;
621 	}
622 }
623 
624 void
vmt_clear_guest_info(struct vmt_softc * sc)625 vmt_clear_guest_info(struct vmt_softc *sc)
626 {
627 	if (sc->sc_nic_info_size != 0) {
628 		free(sc->sc_nic_info, M_DEVBUF, sc->sc_nic_info_size);
629 		sc->sc_nic_info = NULL;
630 		sc->sc_nic_info_size = 0;
631 	}
632 	sc->sc_hostname[0] = '\0';
633 	sc->sc_set_guest_os = 0;
634 }
635 
636 void
vmt_update_guest_info(struct vmt_softc * sc)637 vmt_update_guest_info(struct vmt_softc *sc)
638 {
639 	if (strncmp(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)) != 0) {
640 		strlcpy(sc->sc_hostname, hostname, sizeof(sc->sc_hostname));
641 
642 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s",
643 		    VM_GUEST_INFO_DNS_NAME, sc->sc_hostname) != 0) {
644 			DPRINTF("%s: unable to set hostname", DEVNAME(sc));
645 			sc->sc_rpc_error = 1;
646 		}
647 	}
648 
649 	if (sc->sc_set_guest_os == 0) {
650 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s %s %s",
651 		    VM_GUEST_INFO_OS_NAME_FULL,
652 		    ostype, osrelease, osversion) != 0) {
653 			DPRINTF("%s: unable to set full guest OS", DEVNAME(sc));
654 			sc->sc_rpc_error = 1;
655 		}
656 
657 		/*
658 		 * Host doesn't like it if we send an OS name it doesn't
659 		 * recognise, so use the closest match, which happens
660 		 * to be FreeBSD.
661 		 */
662 
663 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s",
664 		    VM_GUEST_INFO_OS_NAME, "FreeBSD") != 0) {
665 			DPRINTF("%s: unable to set guest OS", DEVNAME(sc));
666 			sc->sc_rpc_error = 1;
667 		}
668 
669 		sc->sc_set_guest_os = 1;
670 	}
671 
672 	task_add(systq, &sc->sc_nicinfo_task);
673 }
674 
675 void
vmt_tick_hook(struct device * self)676 vmt_tick_hook(struct device *self)
677 {
678 	struct vmt_softc *sc = (struct vmt_softc *)self;
679 
680 	timeout_set(&sc->sc_tick, vmt_tick, sc);
681 	vmt_tick(sc);
682 }
683 
684 void
vmt_tick(void * xarg)685 vmt_tick(void *xarg)
686 {
687 	struct vmt_softc *sc = xarg;
688 	struct vm_backdoor frame;
689 	struct timeval *guest = &sc->sc_sensor.tv;
690 	struct timeval host, diff;
691 
692 	microtime(guest);
693 
694 	bzero(&frame, sizeof(frame));
695 	frame.eax.word = VM_MAGIC;
696 	frame.ecx.part.low = VM_CMD_GET_TIME_FULL;
697 	frame.edx.part.low  = VM_PORT_CMD;
698 	vm_cmd(&frame);
699 
700 	if (frame.eax.word != 0xffffffff) {
701 		host.tv_sec = ((uint64_t)frame.esi.word << 32) | frame.edx.word;
702 		host.tv_usec = frame.ebx.word;
703 
704 		timersub(guest, &host, &diff);
705 
706 		sc->sc_sensor.value = (u_int64_t)diff.tv_sec * 1000000000LL +
707 		    (u_int64_t)diff.tv_usec * 1000LL;
708 		sc->sc_sensor.status = SENSOR_S_OK;
709 	} else {
710 		sc->sc_sensor.status = SENSOR_S_UNKNOWN;
711 	}
712 
713 	vmt_update_guest_info(sc);
714 	vmt_update_guest_uptime(sc);
715 
716 	timeout_add_sec(&sc->sc_tick, 15);
717 }
718 
719 void
vmt_tclo_state_change_success(struct vmt_softc * sc,int success,char state)720 vmt_tclo_state_change_success(struct vmt_softc *sc, int success, char state)
721 {
722 	if (vm_rpc_send_rpci_tx(sc, "tools.os.statechange.status %d %d",
723 	    success, state) != 0) {
724 		DPRINTF("%s: unable to send state change result\n",
725 		    DEVNAME(sc));
726 		sc->sc_rpc_error = 1;
727 	}
728 }
729 
730 void
vmt_do_shutdown(struct vmt_softc * sc)731 vmt_do_shutdown(struct vmt_softc *sc)
732 {
733 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_HALT);
734 	vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
735 	pvbus_shutdown(&sc->sc_dev);
736 }
737 
738 void
vmt_do_reboot(struct vmt_softc * sc)739 vmt_do_reboot(struct vmt_softc *sc)
740 {
741 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_REBOOT);
742 	vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
743 	pvbus_reboot(&sc->sc_dev);
744 }
745 
746 void
vmt_shutdown(void * arg)747 vmt_shutdown(void *arg)
748 {
749 	struct vmt_softc *sc = arg;
750 
751 	if (vm_rpc_send_rpci_tx(sc,
752 	    "tools.capability.hgfs_server toolbox 0") != 0) {
753 		DPRINTF("%s: failed to disable hgfs server capability\n",
754 		    DEVNAME(sc));
755 	}
756 
757 	if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
758 		DPRINTF("%s: failed to send shutdown ping\n", DEVNAME(sc));
759 	}
760 
761 	vm_rpc_close(&sc->sc_tclo_rpc);
762 }
763 
764 void
vmt_tclo_reset(struct vmt_softc * sc)765 vmt_tclo_reset(struct vmt_softc *sc)
766 {
767 	if (sc->sc_rpc_error != 0) {
768 		DPRINTF("%s: resetting rpc\n", DEVNAME(sc));
769 		vm_rpc_close(&sc->sc_tclo_rpc);
770 
771 		/* reopen and send the reset reply next time around */
772 		sc->sc_rpc_error = 1;
773 		return;
774 	}
775 
776 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) {
777 		DPRINTF("%s: failed to send reset reply\n", DEVNAME(sc));
778 		sc->sc_rpc_error = 1;
779 	}
780 }
781 
782 void
vmt_tclo_ping(struct vmt_softc * sc)783 vmt_tclo_ping(struct vmt_softc *sc)
784 {
785 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
786 		DPRINTF("%s: error sending ping response\n", DEVNAME(sc));
787 		sc->sc_rpc_error = 1;
788 	}
789 }
790 
791 void
vmt_tclo_halt(struct vmt_softc * sc)792 vmt_tclo_halt(struct vmt_softc *sc)
793 {
794 	vmt_do_shutdown(sc);
795 }
796 
797 void
vmt_tclo_reboot(struct vmt_softc * sc)798 vmt_tclo_reboot(struct vmt_softc *sc)
799 {
800 	vmt_do_reboot(sc);
801 }
802 
803 void
vmt_tclo_poweron(struct vmt_softc * sc)804 vmt_tclo_poweron(struct vmt_softc *sc)
805 {
806 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_POWERON);
807 
808 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
809 		DPRINTF("%s: error sending poweron response\n", DEVNAME(sc));
810 		sc->sc_rpc_error = 1;
811 	}
812 }
813 
814 void
vmt_tclo_suspend(struct vmt_softc * sc)815 vmt_tclo_suspend(struct vmt_softc *sc)
816 {
817 	log(LOG_KERN | LOG_NOTICE,
818 	    "VMware guest entering suspended state\n");
819 
820 	suspend_randomness();
821 
822 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_SUSPEND);
823 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
824 		DPRINTF("%s: error sending suspend response\n", DEVNAME(sc));
825 		sc->sc_rpc_error = 1;
826 	}
827 }
828 
829 void
vmt_tclo_resume(struct vmt_softc * sc)830 vmt_tclo_resume(struct vmt_softc *sc)
831 {
832 	log(LOG_KERN | LOG_NOTICE,
833 	    "VMware guest resuming from suspended state\n");
834 
835 	/* force guest info update */
836 	vmt_clear_guest_info(sc);
837 	vmt_update_guest_info(sc);
838 	vmt_resume();
839 
840 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_RESUME);
841 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
842 		DPRINTF("%s: error sending resume response\n", DEVNAME(sc));
843 		sc->sc_rpc_error = 1;
844 	}
845 }
846 
847 void
vmt_tclo_capreg(struct vmt_softc * sc)848 vmt_tclo_capreg(struct vmt_softc *sc)
849 {
850 	/* don't know if this is important at all */
851 	if (vm_rpc_send_rpci_tx(sc,
852 	    "vmx.capability.unified_loop toolbox") != 0) {
853 		DPRINTF("%s: unable to set unified loop\n", DEVNAME(sc));
854 		sc->sc_rpc_error = 1;
855 	}
856 
857 	if (vm_rpci_response_successful(sc) == 0) {
858 		DPRINTF("%s: host rejected unified loop setting\n",
859 		    DEVNAME(sc));
860 	}
861 
862 	/* the trailing space is apparently important here */
863 	if (vm_rpc_send_rpci_tx(sc,
864 	    "tools.capability.statechange ") != 0) {
865 		DPRINTF("%s: unable to send statechange capability\n",
866 		    DEVNAME(sc));
867 		sc->sc_rpc_error = 1;
868 	}
869 
870 	if (vm_rpci_response_successful(sc) == 0) {
871 		DPRINTF("%s: host rejected statechange capability\n",
872 		    DEVNAME(sc));
873 	}
874 
875 	if (vm_rpc_send_rpci_tx(sc, "tools.set.version %u",
876 	    VM_VERSION_UNMANAGED) != 0) {
877 		DPRINTF("%s: unable to set tools version\n",
878 		    DEVNAME(sc));
879 		sc->sc_rpc_error = 1;
880 	}
881 
882 	vmt_clear_guest_info(sc);
883 	vmt_update_guest_uptime(sc);
884 
885 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
886 		DPRINTF("%s: error sending capabilities_register"
887 		    " response\n", DEVNAME(sc));
888 		sc->sc_rpc_error = 1;
889 	}
890 }
891 
892 void
vmt_tclo_broadcastip(struct vmt_softc * sc)893 vmt_tclo_broadcastip(struct vmt_softc *sc)
894 {
895 	struct ifnet *iface;
896 	struct sockaddr_in *guest_ip;
897 	char ip[INET_ADDRSTRLEN];
898 
899 	/* find first available ipv4 address */
900 	guest_ip = NULL;
901 
902 	NET_LOCK_SHARED();
903 	TAILQ_FOREACH(iface, &ifnetlist, if_list) {
904 		struct ifaddr *iface_addr;
905 
906 		/* skip loopback */
907 		if (strncmp(iface->if_xname, "lo", 2) == 0 &&
908 		    iface->if_xname[2] >= '0' &&
909 		    iface->if_xname[2] <= '9') {
910 			continue;
911 		}
912 
913 		TAILQ_FOREACH(iface_addr, &iface->if_addrlist,
914 		    ifa_list) {
915 			if (iface_addr->ifa_addr->sa_family != AF_INET)
916 				continue;
917 
918 			guest_ip = satosin(iface_addr->ifa_addr);
919 			inet_ntop(AF_INET, &guest_ip->sin_addr, ip,
920 			    sizeof(ip));
921 			break;
922 		}
923 	}
924 	NET_UNLOCK_SHARED();
925 
926 	if (guest_ip != NULL) {
927 		if (vm_rpc_send_rpci_tx(sc, "info-set guestinfo.ip %s",
928 		    ip) != 0) {
929 			DPRINTF("%s: unable to send guest IP address\n",
930 			    DEVNAME(sc));
931 			sc->sc_rpc_error = 1;
932 		}
933 
934 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
935 		    VM_RPC_REPLY_OK) != 0) {
936 			DPRINTF("%s: error sending broadcastIP"
937 			    " response\n", DEVNAME(sc));
938 			sc->sc_rpc_error = 1;
939 		}
940 	} else {
941 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
942 		    VM_RPC_REPLY_ERROR_IP_ADDR) != 0) {
943 			DPRINTF("%s: error sending broadcastIP"
944 			    " error response\n", DEVNAME(sc));
945 			sc->sc_rpc_error = 1;
946 		}
947 	}
948 }
949 
950 void
vmt_set_backup_status(struct vmt_softc * sc,const char * state,int code,const char * desc)951 vmt_set_backup_status(struct vmt_softc *sc, const char *state, int code,
952     const char *desc)
953 {
954 	if (vm_rpc_send_rpci_tx(sc, "vmbackup.eventSet %s %d %s",
955 	    state, code, desc) != 0) {
956 		DPRINTF("%s: setting backup status failed\n", DEVNAME(sc));
957 	}
958 }
959 
960 void
vmt_quiesce_task(void * data)961 vmt_quiesce_task(void *data)
962 {
963 	struct vmt_softc *sc = data;
964 	int err;
965 
966 	DPRINTF("%s: quiescing filesystems for backup\n", DEVNAME(sc));
967 	err = vfs_stall(curproc, 1);
968 	if (err != 0) {
969 		printf("%s: unable to quiesce filesystems\n", DEVNAME(sc));
970 		vfs_stall(curproc, 0);
971 
972 		vmt_set_backup_status(sc, "req.aborted", VM_BACKUP_SYNC_ERROR,
973 		    "vfs_stall failed");
974 		vmt_set_backup_status(sc, "req.done", VM_BACKUP_SUCCESS, "");
975 		sc->sc_quiesce = 0;
976 		return;
977 	}
978 
979 	DPRINTF("%s: filesystems quiesced\n", DEVNAME(sc));
980 	vmt_set_backup_status(sc, "prov.snapshotCommit", VM_BACKUP_SUCCESS, "");
981 }
982 
983 void
vmt_quiesce_done_task(void * data)984 vmt_quiesce_done_task(void *data)
985 {
986 	struct vmt_softc *sc = data;
987 
988 	vfs_stall(curproc, 0);
989 
990 	if (sc->sc_quiesce == -1)
991 		vmt_set_backup_status(sc, "req.aborted", VM_BACKUP_REMOTE_ABORT,
992 		    "");
993 
994 	vmt_set_backup_status(sc, "req.done", VM_BACKUP_SUCCESS, "");
995 	sc->sc_quiesce = 0;
996 }
997 
998 void
vmt_tclo_abortbackup(struct vmt_softc * sc)999 vmt_tclo_abortbackup(struct vmt_softc *sc)
1000 {
1001 	const char *reply = VM_RPC_REPLY_OK;
1002 
1003 	if (sc->sc_quiesce > 0) {
1004 		DPRINTF("%s: aborting backup\n", DEVNAME(sc));
1005 		sc->sc_quiesce = -1;
1006 		task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task, sc);
1007 		task_add(systq, &sc->sc_quiesce_task);
1008 	} else {
1009 		DPRINTF("%s: can't abort, no backup in progress\n",
1010 		    DEVNAME(sc));
1011 		reply = VM_RPC_REPLY_ERROR;
1012 	}
1013 
1014 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) {
1015 		DPRINTF("%s: error sending vmbackup.abort reply\n",
1016 		    DEVNAME(sc));
1017 		sc->sc_rpc_error = 1;
1018 	}
1019 }
1020 
1021 void
vmt_tclo_startbackup(struct vmt_softc * sc)1022 vmt_tclo_startbackup(struct vmt_softc *sc)
1023 {
1024 	const char *reply = VM_RPC_REPLY_OK;
1025 
1026 	if (sc->sc_quiesce == 0) {
1027 		DPRINTF("%s: starting quiesce\n", DEVNAME(sc));
1028 		vmt_set_backup_status(sc, "reset", VM_BACKUP_SUCCESS, "");
1029 
1030 		task_set(&sc->sc_quiesce_task, vmt_quiesce_task, sc);
1031 		task_add(systq, &sc->sc_quiesce_task);
1032 		sc->sc_quiesce = 1;
1033 	} else {
1034 		DPRINTF("%s: can't start backup, already in progress\n",
1035 		    DEVNAME(sc));
1036 		reply = VM_RPC_REPLY_ERROR;
1037 	}
1038 
1039 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) {
1040 		DPRINTF("%s: error sending vmbackup.start reply\n",
1041 		    DEVNAME(sc));
1042 		sc->sc_rpc_error = 1;
1043 	}
1044 }
1045 
1046 void
vmt_tclo_backupdone(struct vmt_softc * sc)1047 vmt_tclo_backupdone(struct vmt_softc *sc)
1048 {
1049 	const char *reply = VM_RPC_REPLY_OK;
1050 	if (sc->sc_quiesce > 0) {
1051 		DPRINTF("%s: backup complete\n", DEVNAME(sc));
1052 		task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task, sc);
1053 		task_add(systq, &sc->sc_quiesce_task);
1054 	} else {
1055 		DPRINTF("%s: got backup complete, but not doing a backup\n",
1056 		    DEVNAME(sc));
1057 		reply = VM_RPC_REPLY_ERROR;
1058 	}
1059 
1060 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) {
1061 		DPRINTF("%s: error sending vmbackup.snapshotDone reply\n",
1062 		    DEVNAME(sc));
1063 		sc->sc_rpc_error = 1;
1064 	}
1065 }
1066 
1067 int
vmt_tclo_process(struct vmt_softc * sc,const char * name)1068 vmt_tclo_process(struct vmt_softc *sc, const char *name)
1069 {
1070 	int i;
1071 
1072 	/* Search for rpc command and call handler */
1073 	for (i = 0; vmt_tclo_rpc[i].name != NULL; i++) {
1074 		if (strcmp(vmt_tclo_rpc[i].name, sc->sc_rpc_buf) == 0) {
1075 			vmt_tclo_rpc[i].cb(sc);
1076 			return (0);
1077 		}
1078 	}
1079 
1080 	DPRINTF("%s: unknown command: \"%s\"\n", DEVNAME(sc), name);
1081 
1082 	return (-1);
1083 }
1084 
1085 void
vmt_tclo_tick(void * xarg)1086 vmt_tclo_tick(void *xarg)
1087 {
1088 	struct vmt_softc *sc = xarg;
1089 	u_int32_t rlen;
1090 	u_int16_t ack;
1091 	int delay;
1092 
1093 	/* By default, poll every second for new messages */
1094 	delay = 1;
1095 
1096 	if (sc->sc_quiesce > 0) {
1097 		/* abort quiesce if it's taking too long */
1098 		if (sc->sc_quiesce++ == VM_BACKUP_TIMEOUT) {
1099 			printf("%s: aborting quiesce\n", DEVNAME(sc));
1100 			sc->sc_quiesce = -1;
1101 			task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task,
1102 			    sc);
1103 			task_add(systq, &sc->sc_quiesce_task);
1104 		} else
1105 			vmt_set_backup_status(sc, "req.keepAlive",
1106 			    VM_BACKUP_SUCCESS, "");
1107 	}
1108 
1109 	/* reopen tclo channel if it's currently closed */
1110 	if (sc->sc_tclo_rpc.channel == 0 &&
1111 	    sc->sc_tclo_rpc.cookie1 == 0 &&
1112 	    sc->sc_tclo_rpc.cookie2 == 0) {
1113 		if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
1114 			DPRINTF("%s: unable to reopen TCLO channel\n",
1115 			    DEVNAME(sc));
1116 			delay = 15;
1117 			goto out;
1118 		}
1119 
1120 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
1121 		    VM_RPC_RESET_REPLY) != 0) {
1122 			DPRINTF("%s: failed to send reset reply\n",
1123 			    DEVNAME(sc));
1124 			sc->sc_rpc_error = 1;
1125 			goto out;
1126 		} else {
1127 			sc->sc_rpc_error = 0;
1128 		}
1129 	}
1130 
1131 	if (sc->sc_tclo_ping) {
1132 		if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
1133 			DPRINTF("%s: failed to send TCLO outgoing ping\n",
1134 			    DEVNAME(sc));
1135 			sc->sc_rpc_error = 1;
1136 			goto out;
1137 		}
1138 	}
1139 
1140 	if (vm_rpc_get_length(&sc->sc_tclo_rpc, &rlen, &ack) != 0) {
1141 		DPRINTF("%s: failed to get length of incoming TCLO data\n",
1142 		    DEVNAME(sc));
1143 		sc->sc_rpc_error = 1;
1144 		goto out;
1145 	}
1146 
1147 	if (rlen == 0) {
1148 		sc->sc_tclo_ping = 1;
1149 		goto out;
1150 	}
1151 
1152 	if (rlen >= VMT_RPC_BUFLEN) {
1153 		rlen = VMT_RPC_BUFLEN - 1;
1154 	}
1155 	if (vm_rpc_get_data(&sc->sc_tclo_rpc, sc->sc_rpc_buf, rlen, ack) != 0) {
1156 		DPRINTF("%s: failed to get incoming TCLO data\n", DEVNAME(sc));
1157 		sc->sc_rpc_error = 1;
1158 		goto out;
1159 	}
1160 	sc->sc_tclo_ping = 0;
1161 
1162 	/* The VM host can queue multiple messages; continue without delay */
1163 	delay = 0;
1164 
1165 	if (vmt_tclo_process(sc, sc->sc_rpc_buf) != 0) {
1166 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
1167 		    VM_RPC_REPLY_ERROR) != 0) {
1168 			DPRINTF("%s: error sending unknown command reply\n",
1169 			    DEVNAME(sc));
1170 			sc->sc_rpc_error = 1;
1171 		}
1172 	}
1173 
1174 	if (sc->sc_rpc_error == 1) {
1175 		/* On error, give time to recover and wait a second */
1176 		delay = 1;
1177 	}
1178 
1179 out:
1180 	timeout_add_sec(&sc->sc_tclo_tick, delay);
1181 }
1182 
1183 size_t
vmt_xdr_ifaddr(struct ifaddr * ifa,char * data)1184 vmt_xdr_ifaddr(struct ifaddr *ifa, char *data)
1185 {
1186 	struct sockaddr_in *sin;
1187 	struct vm_nicinfo_addr_v4 v4;
1188 #ifdef INET6
1189 	struct sockaddr_in6 *sin6;
1190 	struct vm_nicinfo_addr_v6 v6;
1191 #endif
1192 
1193 	/* skip loopback addresses and anything that isn't ipv4/v6 */
1194 	switch (ifa->ifa_addr->sa_family) {
1195 	case AF_INET:
1196 		sin = satosin(ifa->ifa_addr);
1197 		if ((ntohl(sin->sin_addr.s_addr) >>
1198 		    IN_CLASSA_NSHIFT) != IN_LOOPBACKNET) {
1199 			if (data != NULL) {
1200 				memset(&v4, 0, sizeof(v4));
1201 				htobem32(&v4.v4_addr_type,
1202 				    VM_NICINFO_ADDR_IPV4);
1203 				htobem32(&v4.v4_addr_len,
1204 				    sizeof(struct in_addr));
1205 				memcpy(&v4.v4_addr, &sin->sin_addr.s_addr,
1206 				    sizeof(struct in_addr));
1207 				htobem32(&v4.v4_prefix_len,
1208 				    rtable_satoplen(AF_INET, ifa->ifa_netmask));
1209 				memcpy(data, &v4, sizeof(v4));
1210 			}
1211 			return (sizeof (v4));
1212 		}
1213 		break;
1214 
1215 #ifdef INET6
1216 	case AF_INET6:
1217 		sin6 = satosin6(ifa->ifa_addr);
1218 		if (!IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
1219 		    !IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
1220 			if (data != NULL) {
1221 				memset(&v6, 0, sizeof(v6));
1222 				htobem32(&v6.v6_addr_type,
1223 				    VM_NICINFO_ADDR_IPV6);
1224 				htobem32(&v6.v6_addr_len,
1225 				    sizeof(sin6->sin6_addr));
1226 				memcpy(&v6.v6_addr, &sin6->sin6_addr,
1227 				    sizeof(sin6->sin6_addr));
1228 				htobem32(&v6.v6_prefix_len,
1229 				    rtable_satoplen(AF_INET6,
1230 				        ifa->ifa_netmask));
1231 				memcpy(data, &v6, sizeof(v6));
1232 			}
1233 			return (sizeof (v6));
1234 		}
1235 		break;
1236 #endif
1237 
1238 	default:
1239 		break;
1240 	}
1241 
1242 	return (0);
1243 }
1244 
1245 size_t
vmt_xdr_nic_entry(struct ifnet * iface,char * data)1246 vmt_xdr_nic_entry(struct ifnet *iface, char *data)
1247 {
1248 	struct ifaddr *iface_addr;
1249 	struct sockaddr_dl *sdl;
1250 	struct vm_nicinfo_nic nic;
1251 	struct vm_nicinfo_nic_nomac nnic;
1252 	char *nicdata;
1253 	const char *mac;
1254 	size_t addrsize, total;
1255 	int addrs;
1256 
1257 	total = 0;
1258 	addrs = 0;
1259 
1260 	/* work out if we have a mac address */
1261 	sdl = iface->if_sadl;
1262 	if (sdl != NULL && sdl->sdl_alen &&
1263 	    (sdl->sdl_type == IFT_ETHER || sdl->sdl_type == IFT_CARP))
1264 		mac = ether_sprintf(sdl->sdl_data + sdl->sdl_nlen);
1265 	else
1266 		mac = NULL;
1267 
1268 	if (data != NULL) {
1269 		nicdata = data;
1270 		if (mac != NULL)
1271 			data += sizeof(nic);
1272 		else
1273 			data += sizeof(nnic);
1274 	}
1275 
1276 	TAILQ_FOREACH(iface_addr, &iface->if_addrlist, ifa_list) {
1277 		addrsize = vmt_xdr_ifaddr(iface_addr, data);
1278 		if (addrsize == 0)
1279 			continue;
1280 
1281 		if (data != NULL)
1282 			data += addrsize;
1283 		total += addrsize;
1284 		addrs++;
1285 		if (addrs == VM_NICINFO_MAX_ADDRS)
1286 			break;
1287 	}
1288 
1289 	if (addrs == 0)
1290 		return (0);
1291 
1292 	if (data != NULL) {
1293 		/* fill in mac address, if any */
1294 		if (mac != NULL) {
1295 			memset(&nic, 0, sizeof(nic));
1296 			htobem32(&nic.ni_mac_len, strlen(mac));
1297 			strncpy(nic.ni_mac, mac, VM_NICINFO_MAC_LEN);
1298 			htobem32(&nic.ni_num_addrs, addrs);
1299 			memcpy(nicdata, &nic, sizeof(nic));
1300 		} else {
1301 			nnic.nn_mac_len = 0;
1302 			htobem32(&nnic.nn_num_addrs, addrs);
1303 			memcpy(nicdata, &nnic, sizeof(nnic));
1304 		}
1305 
1306 		/* we don't actually set anything in vm_nicinfo_nic_post */
1307 	}
1308 
1309 	if (mac != NULL)
1310 		total += sizeof(nic);
1311 	else
1312 		total += sizeof(nnic);
1313 	total += sizeof(struct vm_nicinfo_nic_post);
1314 	return (total);
1315 }
1316 
1317 size_t
vmt_xdr_nic_info(char * data)1318 vmt_xdr_nic_info(char *data)
1319 {
1320 	struct ifnet *iface;
1321 	struct vm_nicinfo_nic_list nl;
1322 	size_t total, nictotal;
1323 	char *listdata = NULL;
1324 	int nics;
1325 
1326 	NET_ASSERT_LOCKED();
1327 
1328 	total = sizeof(nl);
1329 	if (data != NULL) {
1330 		listdata = data;
1331 		data += sizeof(nl);
1332 	}
1333 
1334 	nics = 0;
1335 	TAILQ_FOREACH(iface, &ifnetlist, if_list) {
1336 		nictotal = vmt_xdr_nic_entry(iface, data);
1337 		if (nictotal == 0)
1338 			continue;
1339 
1340 		if (data != NULL)
1341 			data += nictotal;
1342 
1343 		total += nictotal;
1344 		nics++;
1345 		if (nics == VM_NICINFO_MAX_NICS)
1346 			break;
1347 	}
1348 
1349 	if (listdata != NULL) {
1350 		memset(&nl, 0, sizeof(nl));
1351 		htobem32(&nl.nl_version, VM_NICINFO_VERSION);
1352 		htobem32(&nl.nl_nic_list, 1);
1353 		htobem32(&nl.nl_num_nics, nics);
1354 		memcpy(listdata, &nl, sizeof(nl));
1355 	}
1356 
1357 	/* we don't actually set anything in vm_nicinfo_nic_list_post */
1358 	total += sizeof(struct vm_nicinfo_nic_list_post);
1359 
1360 	return (total);
1361 }
1362 
1363 void
vmt_nicinfo_task(void * data)1364 vmt_nicinfo_task(void *data)
1365 {
1366 	struct vmt_softc *sc = data;
1367 	size_t nic_info_size;
1368 	char *nic_info;
1369 
1370 	NET_LOCK();
1371 
1372 	nic_info_size = vmt_xdr_nic_info(NULL) + sizeof(VM_NICINFO_CMD) - 1;
1373 	nic_info = malloc(nic_info_size, M_DEVBUF, M_WAITOK | M_ZERO);
1374 
1375 	strncpy(nic_info, VM_NICINFO_CMD, nic_info_size);
1376 	vmt_xdr_nic_info(nic_info + sizeof(VM_NICINFO_CMD) - 1);
1377 
1378 	NET_UNLOCK();
1379 
1380 	if (nic_info_size != sc->sc_nic_info_size ||
1381 	    (memcmp(nic_info, sc->sc_nic_info, nic_info_size) != 0)) {
1382 		if (vm_rpc_send_rpci_tx_buf(sc, nic_info,
1383 		    nic_info_size) != 0) {
1384 			DPRINTF("%s: unable to send nic info",
1385 			    DEVNAME(sc));
1386 			sc->sc_rpc_error = 1;
1387 		}
1388 
1389 		free(sc->sc_nic_info, M_DEVBUF, sc->sc_nic_info_size);
1390 		sc->sc_nic_info = nic_info;
1391 		sc->sc_nic_info_size = nic_info_size;
1392 	} else {
1393 		free(nic_info, M_DEVBUF, nic_info_size);
1394 	}
1395 }
1396 
1397 #define BACKDOOR_OP_I386(op, frame)		\
1398 	__asm__ volatile (			\
1399 		"pushal;"			\
1400 		"pushl %%eax;"			\
1401 		"movl 0x18(%%eax), %%ebp;"	\
1402 		"movl 0x14(%%eax), %%edi;"	\
1403 		"movl 0x10(%%eax), %%esi;"	\
1404 		"movl 0x0c(%%eax), %%edx;"	\
1405 		"movl 0x08(%%eax), %%ecx;"	\
1406 		"movl 0x04(%%eax), %%ebx;"	\
1407 		"movl 0x00(%%eax), %%eax;"	\
1408 		op				\
1409 		"xchgl %%eax, 0x00(%%esp);"	\
1410 		"movl %%ebp, 0x18(%%eax);"	\
1411 		"movl %%edi, 0x14(%%eax);"	\
1412 		"movl %%esi, 0x10(%%eax);"	\
1413 		"movl %%edx, 0x0c(%%eax);"	\
1414 		"movl %%ecx, 0x08(%%eax);"	\
1415 		"movl %%ebx, 0x04(%%eax);"	\
1416 		"popl 0x00(%%eax);"		\
1417 		"popal;"			\
1418 		::"a"(frame)			\
1419 	)
1420 
1421 #define BACKDOOR_OP_AMD64(op, frame)		\
1422 	__asm__ volatile (			\
1423 		"pushq %%rbp;			\n\t" \
1424 		"pushq %%rax;			\n\t" \
1425 		"movq 0x30(%%rax), %%rbp;	\n\t" \
1426 		"movq 0x28(%%rax), %%rdi;	\n\t" \
1427 		"movq 0x20(%%rax), %%rsi;	\n\t" \
1428 		"movq 0x18(%%rax), %%rdx;	\n\t" \
1429 		"movq 0x10(%%rax), %%rcx;	\n\t" \
1430 		"movq 0x08(%%rax), %%rbx;	\n\t" \
1431 		"movq 0x00(%%rax), %%rax;	\n\t" \
1432 		op				"\n\t" \
1433 		"xchgq %%rax, 0x00(%%rsp);	\n\t" \
1434 		"movq %%rbp, 0x30(%%rax);	\n\t" \
1435 		"movq %%rdi, 0x28(%%rax);	\n\t" \
1436 		"movq %%rsi, 0x20(%%rax);	\n\t" \
1437 		"movq %%rdx, 0x18(%%rax);	\n\t" \
1438 		"movq %%rcx, 0x10(%%rax);	\n\t" \
1439 		"movq %%rbx, 0x08(%%rax);	\n\t" \
1440 		"popq 0x00(%%rax);		\n\t" \
1441 		"popq %%rbp;			\n\t" \
1442 		: /* No outputs. */ : "a" (frame) \
1443 		  /* No pushal on amd64 so warn gcc about the clobbered registers. */ \
1444 		: "rbx", "rcx", "rdx", "rdi", "rsi", "cc", "memory" \
1445 	)
1446 
1447 
1448 #ifdef __i386__
1449 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_I386(op, frame)
1450 #else
1451 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_AMD64(op, frame)
1452 #endif
1453 
1454 void
vm_cmd(struct vm_backdoor * frame)1455 vm_cmd(struct vm_backdoor *frame)
1456 {
1457 	BACKDOOR_OP("inl %%dx, %%eax;", frame);
1458 }
1459 
1460 void
vm_ins(struct vm_backdoor * frame)1461 vm_ins(struct vm_backdoor *frame)
1462 {
1463 	BACKDOOR_OP("cld;\n\trep insb;", frame);
1464 }
1465 
1466 void
vm_outs(struct vm_backdoor * frame)1467 vm_outs(struct vm_backdoor *frame)
1468 {
1469 	BACKDOOR_OP("cld;\n\trep outsb;", frame);
1470 }
1471 
1472 int
vm_rpc_open(struct vm_rpc * rpc,uint32_t proto)1473 vm_rpc_open(struct vm_rpc *rpc, uint32_t proto)
1474 {
1475 	struct vm_backdoor frame;
1476 
1477 	bzero(&frame, sizeof(frame));
1478 	frame.eax.word      = VM_MAGIC;
1479 	frame.ebx.word      = proto | VM_RPC_FLAG_COOKIE;
1480 	frame.ecx.part.low  = VM_CMD_RPC;
1481 	frame.ecx.part.high = VM_RPC_OPEN;
1482 	frame.edx.part.low  = VM_PORT_CMD;
1483 	frame.edx.part.high = 0;
1484 
1485 	vm_cmd(&frame);
1486 
1487 	if (frame.ecx.part.high != 1 || frame.edx.part.low != 0) {
1488 		/* open-vm-tools retries without VM_RPC_FLAG_COOKIE here.. */
1489 		DPRINTF("vmware: open failed, eax=%08x, ecx=%08x, edx=%08x\n",
1490 		    frame.eax.word, frame.ecx.word, frame.edx.word);
1491 		return EIO;
1492 	}
1493 
1494 	rpc->channel = frame.edx.part.high;
1495 	rpc->cookie1 = frame.esi.word;
1496 	rpc->cookie2 = frame.edi.word;
1497 
1498 	return 0;
1499 }
1500 
1501 int
vm_rpc_close(struct vm_rpc * rpc)1502 vm_rpc_close(struct vm_rpc *rpc)
1503 {
1504 	struct vm_backdoor frame;
1505 
1506 	bzero(&frame, sizeof(frame));
1507 	frame.eax.word      = VM_MAGIC;
1508 	frame.ebx.word      = 0;
1509 	frame.ecx.part.low  = VM_CMD_RPC;
1510 	frame.ecx.part.high = VM_RPC_CLOSE;
1511 	frame.edx.part.low  = VM_PORT_CMD;
1512 	frame.edx.part.high = rpc->channel;
1513 	frame.edi.word      = rpc->cookie2;
1514 	frame.esi.word      = rpc->cookie1;
1515 
1516 	vm_cmd(&frame);
1517 
1518 	if (frame.ecx.part.high == 0 || frame.ecx.part.low != 0) {
1519 		DPRINTF("vmware: close failed, eax=%08x, ecx=%08x\n",
1520 		    frame.eax.word, frame.ecx.word);
1521 		return EIO;
1522 	}
1523 
1524 	rpc->channel = 0;
1525 	rpc->cookie1 = 0;
1526 	rpc->cookie2 = 0;
1527 
1528 	return 0;
1529 }
1530 
1531 int
vm_rpc_send(const struct vm_rpc * rpc,const uint8_t * buf,uint32_t length)1532 vm_rpc_send(const struct vm_rpc *rpc, const uint8_t *buf, uint32_t length)
1533 {
1534 	struct vm_backdoor frame;
1535 
1536 	/* Send the length of the command. */
1537 	bzero(&frame, sizeof(frame));
1538 	frame.eax.word = VM_MAGIC;
1539 	frame.ebx.word = length;
1540 	frame.ecx.part.low  = VM_CMD_RPC;
1541 	frame.ecx.part.high = VM_RPC_SET_LENGTH;
1542 	frame.edx.part.low  = VM_PORT_CMD;
1543 	frame.edx.part.high = rpc->channel;
1544 	frame.esi.word = rpc->cookie1;
1545 	frame.edi.word = rpc->cookie2;
1546 
1547 	vm_cmd(&frame);
1548 
1549 	if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) {
1550 		DPRINTF("vmware: sending length failed, eax=%08x, ecx=%08x\n",
1551 		    frame.eax.word, frame.ecx.word);
1552 		return EIO;
1553 	}
1554 
1555 	if (length == 0)
1556 		return 0; /* Only need to poke once if command is null. */
1557 
1558 	/* Send the command using enhanced RPC. */
1559 	bzero(&frame, sizeof(frame));
1560 	frame.eax.word = VM_MAGIC;
1561 	frame.ebx.word = VM_RPC_ENH_DATA;
1562 	frame.ecx.word = length;
1563 	frame.edx.part.low  = VM_PORT_RPC;
1564 	frame.edx.part.high = rpc->channel;
1565 	frame.ebp.word = rpc->cookie1;
1566 	frame.edi.word = rpc->cookie2;
1567 #ifdef __amd64__
1568 	frame.esi.quad = (uint64_t)buf;
1569 #else
1570 	frame.esi.word = (uint32_t)buf;
1571 #endif
1572 
1573 	vm_outs(&frame);
1574 
1575 	if (frame.ebx.word != VM_RPC_ENH_DATA) {
1576 		/* open-vm-tools retries on VM_RPC_REPLY_CHECKPOINT */
1577 		DPRINTF("vmware: send failed, ebx=%08x\n", frame.ebx.word);
1578 		return EIO;
1579 	}
1580 
1581 	return 0;
1582 }
1583 
1584 int
vm_rpc_send_str(const struct vm_rpc * rpc,const uint8_t * str)1585 vm_rpc_send_str(const struct vm_rpc *rpc, const uint8_t *str)
1586 {
1587 	return vm_rpc_send(rpc, str, strlen(str));
1588 }
1589 
1590 int
vm_rpc_get_data(const struct vm_rpc * rpc,char * data,uint32_t length,uint16_t dataid)1591 vm_rpc_get_data(const struct vm_rpc *rpc, char *data, uint32_t length,
1592     uint16_t dataid)
1593 {
1594 	struct vm_backdoor frame;
1595 
1596 	/* Get data using enhanced RPC. */
1597 	bzero(&frame, sizeof(frame));
1598 	frame.eax.word      = VM_MAGIC;
1599 	frame.ebx.word      = VM_RPC_ENH_DATA;
1600 	frame.ecx.word      = length;
1601 	frame.edx.part.low  = VM_PORT_RPC;
1602 	frame.edx.part.high = rpc->channel;
1603 	frame.esi.word      = rpc->cookie1;
1604 #ifdef __amd64__
1605 	frame.edi.quad      = (uint64_t)data;
1606 #else
1607 	frame.edi.word      = (uint32_t)data;
1608 #endif
1609 	frame.ebp.word      = rpc->cookie2;
1610 
1611 	vm_ins(&frame);
1612 
1613 	/* NUL-terminate the data */
1614 	data[length] = '\0';
1615 
1616 	if (frame.ebx.word != VM_RPC_ENH_DATA) {
1617 		DPRINTF("vmware: get data failed, ebx=%08x\n",
1618 		    frame.ebx.word);
1619 		return EIO;
1620 	}
1621 
1622 	/* Acknowledge data received. */
1623 	bzero(&frame, sizeof(frame));
1624 	frame.eax.word      = VM_MAGIC;
1625 	frame.ebx.word      = dataid;
1626 	frame.ecx.part.low  = VM_CMD_RPC;
1627 	frame.ecx.part.high = VM_RPC_GET_END;
1628 	frame.edx.part.low  = VM_PORT_CMD;
1629 	frame.edx.part.high = rpc->channel;
1630 	frame.esi.word      = rpc->cookie1;
1631 	frame.edi.word      = rpc->cookie2;
1632 
1633 	vm_cmd(&frame);
1634 
1635 	if (frame.ecx.part.high == 0) {
1636 		DPRINTF("vmware: ack data failed, eax=%08x, ecx=%08x\n",
1637 		    frame.eax.word, frame.ecx.word);
1638 		return EIO;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 int
vm_rpc_get_length(const struct vm_rpc * rpc,uint32_t * length,uint16_t * dataid)1645 vm_rpc_get_length(const struct vm_rpc *rpc, uint32_t *length, uint16_t *dataid)
1646 {
1647 	struct vm_backdoor frame;
1648 
1649 	bzero(&frame, sizeof(frame));
1650 	frame.eax.word      = VM_MAGIC;
1651 	frame.ebx.word      = 0;
1652 	frame.ecx.part.low  = VM_CMD_RPC;
1653 	frame.ecx.part.high = VM_RPC_GET_LENGTH;
1654 	frame.edx.part.low  = VM_PORT_CMD;
1655 	frame.edx.part.high = rpc->channel;
1656 	frame.esi.word      = rpc->cookie1;
1657 	frame.edi.word      = rpc->cookie2;
1658 
1659 	vm_cmd(&frame);
1660 
1661 	if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) {
1662 		DPRINTF("vmware: get length failed, eax=%08x, ecx=%08x\n",
1663 		    frame.eax.word, frame.ecx.word);
1664 		return EIO;
1665 	}
1666 	if ((frame.ecx.part.high & VM_RPC_REPLY_DORECV) == 0) {
1667 		*length = 0;
1668 		*dataid = 0;
1669 	} else {
1670 		*length = frame.ebx.word;
1671 		*dataid = frame.edx.part.high;
1672 	}
1673 
1674 	return 0;
1675 }
1676 
1677 int
vm_rpci_response_successful(struct vmt_softc * sc)1678 vm_rpci_response_successful(struct vmt_softc *sc)
1679 {
1680 	return (sc->sc_rpc_buf[0] == '1' && sc->sc_rpc_buf[1] == ' ');
1681 }
1682 
1683 int
vm_rpc_send_rpci_tx_buf(struct vmt_softc * sc,const uint8_t * buf,uint32_t length)1684 vm_rpc_send_rpci_tx_buf(struct vmt_softc *sc, const uint8_t *buf,
1685     uint32_t length)
1686 {
1687 	struct vm_rpc rpci;
1688 	u_int32_t rlen;
1689 	u_int16_t ack;
1690 	int result = 0;
1691 
1692 	if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) {
1693 		DPRINTF("%s: rpci channel open failed\n", DEVNAME(sc));
1694 		return EIO;
1695 	}
1696 
1697 	if (vm_rpc_send(&rpci, buf, length) != 0) {
1698 		DPRINTF("%s: unable to send rpci command\n", DEVNAME(sc));
1699 		result = EIO;
1700 		goto out;
1701 	}
1702 
1703 	if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) {
1704 		DPRINTF("%s: failed to get length of rpci response data\n",
1705 		    DEVNAME(sc));
1706 		result = EIO;
1707 		goto out;
1708 	}
1709 
1710 	if (rlen > 0) {
1711 		if (rlen >= VMT_RPC_BUFLEN) {
1712 			rlen = VMT_RPC_BUFLEN - 1;
1713 		}
1714 
1715 		if (vm_rpc_get_data(&rpci, sc->sc_rpc_buf, rlen, ack) != 0) {
1716 			DPRINTF("%s: failed to get rpci response data\n",
1717 			    DEVNAME(sc));
1718 			result = EIO;
1719 			goto out;
1720 		}
1721 	}
1722 
1723 out:
1724 	if (vm_rpc_close(&rpci) != 0) {
1725 		DPRINTF("%s: unable to close rpci channel\n", DEVNAME(sc));
1726 	}
1727 
1728 	return result;
1729 }
1730 
1731 int
vm_rpc_send_rpci_tx(struct vmt_softc * sc,const char * fmt,...)1732 vm_rpc_send_rpci_tx(struct vmt_softc *sc, const char *fmt, ...)
1733 {
1734 	va_list args;
1735 	int len;
1736 
1737 	va_start(args, fmt);
1738 	len = vsnprintf(sc->sc_rpc_buf, VMT_RPC_BUFLEN, fmt, args);
1739 	va_end(args);
1740 
1741 	if (len >= VMT_RPC_BUFLEN) {
1742 		DPRINTF("%s: rpci command didn't fit in buffer\n", DEVNAME(sc));
1743 		return EIO;
1744 	}
1745 
1746 	return vm_rpc_send_rpci_tx_buf(sc, sc->sc_rpc_buf, len);
1747 }
1748 
1749 #if 0
1750 	struct vm_backdoor frame;
1751 
1752 	bzero(&frame, sizeof(frame));
1753 
1754 	frame.eax.word = VM_MAGIC;
1755 	frame.ecx.part.low = VM_CMD_GET_VERSION;
1756 	frame.edx.part.low  = VM_PORT_CMD;
1757 
1758 	printf("\n");
1759 	printf("eax 0x%08x\n", frame.eax.word);
1760 	printf("ebx 0x%08x\n", frame.ebx.word);
1761 	printf("ecx 0x%08x\n", frame.ecx.word);
1762 	printf("edx 0x%08x\n", frame.edx.word);
1763 	printf("ebp 0x%08x\n", frame.ebp.word);
1764 	printf("edi 0x%08x\n", frame.edi.word);
1765 	printf("esi 0x%08x\n", frame.esi.word);
1766 
1767 	vm_cmd(&frame);
1768 
1769 	printf("-\n");
1770 	printf("eax 0x%08x\n", frame.eax.word);
1771 	printf("ebx 0x%08x\n", frame.ebx.word);
1772 	printf("ecx 0x%08x\n", frame.ecx.word);
1773 	printf("edx 0x%08x\n", frame.edx.word);
1774 	printf("ebp 0x%08x\n", frame.ebp.word);
1775 	printf("edi 0x%08x\n", frame.edi.word);
1776 	printf("esi 0x%08x\n", frame.esi.word);
1777 #endif
1778 
1779 /*
1780  * Notes on tracing backdoor activity in vmware-guestd:
1781  *
1782  * - Find the addresses of the inl / rep insb / rep outsb
1783  *   instructions used to perform backdoor operations.
1784  *   One way to do this is to disassemble vmware-guestd:
1785  *
1786  *   $ objdump -S /emul/freebsd/sbin/vmware-guestd > vmware-guestd.S
1787  *
1788  *   and search for '<tab>in ' in the resulting file.  The rep insb and
1789  *   rep outsb code is directly below that.
1790  *
1791  * - Run vmware-guestd under gdb, setting up breakpoints as follows:
1792  *   (the addresses shown here are the ones from VMware-server-1.0.10-203137,
1793  *   the last version that actually works in FreeBSD emulation on OpenBSD)
1794  *
1795  * break *0x805497b   (address of 'in' instruction)
1796  * commands 1
1797  * silent
1798  * echo INOUT\n
1799  * print/x $ecx
1800  * print/x $ebx
1801  * print/x $edx
1802  * continue
1803  * end
1804  * break *0x805497c   (address of instruction after 'in')
1805  * commands 2
1806  * silent
1807  * echo ===\n
1808  * print/x $ecx
1809  * print/x $ebx
1810  * print/x $edx
1811  * echo \n
1812  * continue
1813  * end
1814  * break *0x80549b7   (address of instruction before 'rep insb')
1815  * commands 3
1816  * silent
1817  * set variable $inaddr = $edi
1818  * set variable $incount = $ecx
1819  * continue
1820  * end
1821  * break *0x80549ba   (address of instruction after 'rep insb')
1822  * commands 4
1823  * silent
1824  * echo IN\n
1825  * print $incount
1826  * x/s $inaddr
1827  * echo \n
1828  * continue
1829  * end
1830  * break *0x80549fb    (address of instruction before 'rep outsb')
1831  * commands 5
1832  * silent
1833  * echo OUT\n
1834  * print $ecx
1835  * x/s $esi
1836  * echo \n
1837  * continue
1838  * end
1839  *
1840  * This will produce a log of the backdoor operations, including the
1841  * data sent and received and the relevant register values.  You can then
1842  * match the register values to the various constants in this file.
1843  */
1844