xref: /openbsd/sys/dev/pv/hypervic.c (revision 771fbea0)
1 /*-
2  * Copyright (c) 2009-2016 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32  */
33 
34 #include <sys/param.h>
35 
36 /* Hyperv requires locked atomic operations */
37 #ifndef MULTIPROCESSOR
38 #define _HYPERVMPATOMICS
39 #define MULTIPROCESSOR
40 #endif
41 #include <sys/atomic.h>
42 #ifdef _HYPERVMPATOMICS
43 #undef MULTIPROCESSOR
44 #undef _HYPERVMPATOMICS
45 #endif
46 
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/device.h>
54 #include <sys/pool.h>
55 #include <sys/timetc.h>
56 #include <sys/task.h>
57 #include <sys/syslog.h>
58 #include <sys/socket.h>
59 #include <sys/sensors.h>
60 
61 #include <machine/bus.h>
62 #include <machine/cpu.h>
63 #include <machine/cpufunc.h>
64 
65 #include <machine/i82489var.h>
66 
67 #include <net/if.h>
68 #include <net/if_dl.h>
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 
72 #include <dev/pv/pvvar.h>
73 #include <dev/pv/pvreg.h>
74 #include <dev/pv/hypervreg.h>
75 #include <dev/pv/hypervvar.h>
76 #include <dev/pv/hypervicreg.h>
77 
78 struct hv_ic_dev;
79 
80 #define NKVPPOOLS			4
81 #define MAXPOOLENTS			1023
82 
83 struct kvp_entry {
84 	int				kpe_index;
85 	uint32_t			kpe_valtype;
86 	uint8_t				kpe_key[HV_KVP_MAX_KEY_SIZE / 2];
87 	uint8_t				kpe_val[HV_KVP_MAX_VAL_SIZE / 2];
88 	TAILQ_ENTRY(kvp_entry)		kpe_entry;
89 };
90 TAILQ_HEAD(kvp_list, kvp_entry);
91 
92 struct kvp_pool {
93 	struct kvp_list			kvp_entries;
94 	struct mutex			kvp_lock;
95 	u_int				kvp_index;
96 };
97 
98 struct pool				kvp_entry_pool;
99 
100 struct hv_kvp {
101 	struct kvp_pool			kvp_pool[NKVPPOOLS];
102 };
103 
104 int	hv_heartbeat_attach(struct hv_ic_dev *);
105 void	hv_heartbeat(void *);
106 int	hv_kvp_attach(struct hv_ic_dev *);
107 void	hv_kvp(void *);
108 int	hv_kvop(void *, int, char *, char *, size_t);
109 int	hv_shutdown_attach(struct hv_ic_dev *);
110 void	hv_shutdown(void *);
111 int	hv_timesync_attach(struct hv_ic_dev *);
112 void	hv_timesync(void *);
113 
114 static struct hv_ic_dev {
115 	const char		 *dv_name;
116 	const struct hv_guid	 *dv_type;
117 	int			(*dv_attach)(struct hv_ic_dev *);
118 	void			(*dv_handler)(void *);
119 	struct hv_channel	 *dv_ch;
120 	uint8_t			 *dv_buf;
121 	void			 *dv_priv;
122 } hv_ic_devs[] = {
123 	{
124 		"heartbeat",
125 		&hv_guid_heartbeat,
126 		hv_heartbeat_attach,
127 		hv_heartbeat
128 	},
129 	{
130 		"kvp",
131 		&hv_guid_kvp,
132 		hv_kvp_attach,
133 		hv_kvp
134 	},
135 	{
136 		"shutdown",
137 		&hv_guid_shutdown,
138 		hv_shutdown_attach,
139 		hv_shutdown
140 	},
141 	{
142 		"timesync",
143 		&hv_guid_timesync,
144 		hv_timesync_attach,
145 		hv_timesync
146 	}
147 };
148 
149 static const struct {
150 	enum hv_kvp_pool		 poolidx;
151 	const char			*poolname;
152 	size_t				 poolnamelen;
153 } kvp_pools[] = {
154 	{ HV_KVP_POOL_EXTERNAL,		"External",	sizeof("External") },
155 	{ HV_KVP_POOL_GUEST,		"Guest",	sizeof("Guest")	},
156 	{ HV_KVP_POOL_AUTO,		"Auto",		sizeof("Auto") },
157 	{ HV_KVP_POOL_AUTO_EXTERNAL,	"Guest/Parameters",
158 	  sizeof("Guest/Parameters") }
159 };
160 
161 static const struct {
162 	int				 keyidx;
163 	const char			*keyname;
164 	const char			*value;
165 } kvp_pool_auto[] = {
166 	{ 0, "FullyQualifiedDomainName",	hostname },
167 	{ 1, "IntegrationServicesVersion",	"6.6.6"	},
168 	{ 2, "NetworkAddressIPv4",		"127.0.0.1" },
169 	{ 3, "NetworkAddressIPv6",		"::1" },
170 	{ 4, "OSBuildNumber",			osversion },
171 	{ 5, "OSName",				ostype },
172 	{ 6, "OSMajorVersion",			"6" }, /* free commit for mike */
173 	{ 7, "OSMinorVersion",			&osrelease[2] },
174 	{ 8, "OSVersion",			osrelease },
175 #ifdef __amd64__ /* As specified in SYSTEM_INFO.wProcessorArchitecture */
176 	{ 9, "ProcessorArchitecture",		"9" }
177 #else
178 	{ 9, "ProcessorArchitecture",		"0" }
179 #endif
180 };
181 
182 void
183 hv_attach_icdevs(struct hv_softc *sc)
184 {
185 	struct hv_ic_dev *dv;
186 	struct hv_channel *ch;
187 	int i, header = 0;
188 
189 	for (i = 0; i < nitems(hv_ic_devs); i++) {
190 		dv = &hv_ic_devs[i];
191 
192 		TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
193 			if (ch->ch_state != HV_CHANSTATE_OFFERED)
194 				continue;
195 			if (ch->ch_flags & CHF_MONITOR)
196 				continue;
197 			if (memcmp(dv->dv_type, &ch->ch_type,
198 			    sizeof(ch->ch_type)) == 0)
199 				break;
200 		}
201 		if (ch == NULL)
202 			continue;
203 
204 		dv->dv_ch = ch;
205 
206 		/*
207 		 * These services are not performance critical and
208 		 * do not need batched reading. Furthermore, some
209 		 * services such as KVP can only handle one message
210 		 * from the host at a time.
211 		 */
212 		dv->dv_ch->ch_flags &= ~CHF_BATCHED;
213 
214 		if (dv->dv_attach && dv->dv_attach(dv) != 0)
215 			continue;
216 
217 		if (hv_channel_open(ch, VMBUS_IC_BUFRINGSIZE, NULL, 0,
218 		    dv->dv_handler, dv)) {
219 			printf("%s: failed to open channel for %s\n",
220 			    sc->sc_dev.dv_xname, dv->dv_name);
221 			continue;
222 		}
223 		evcount_attach(&ch->ch_evcnt, dv->dv_name, &sc->sc_idtvec);
224 
225 		if (!header) {
226 			printf("%s: %s", sc->sc_dev.dv_xname, dv->dv_name);
227 			header = 1;
228 		} else
229 			printf(", %s", dv->dv_name);
230 	}
231 	if (header)
232 		printf("\n");
233 }
234 
235 static inline void
236 hv_ic_negotiate(struct vmbus_icmsg_hdr *hdr, uint32_t *rlen, uint32_t fwver,
237     uint32_t msgver)
238 {
239 	struct vmbus_icmsg_negotiate *msg;
240 	uint16_t propmin, propmaj, chosenmaj, chosenmin;
241 	int i;
242 
243 	msg = (struct vmbus_icmsg_negotiate *)hdr;
244 
245 	chosenmaj = chosenmin = 0;
246 	for (i = 0; i < msg->ic_fwver_cnt; i++) {
247 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
248 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
249 		if (propmaj > chosenmaj &&
250 		    propmaj <= VMBUS_ICVER_MAJOR(fwver) &&
251 		    propmin >= chosenmin &&
252 		    propmin <= VMBUS_ICVER_MINOR(fwver)) {
253 			chosenmaj = propmaj;
254 			chosenmin = propmin;
255 		}
256 	}
257 	fwver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
258 
259 	chosenmaj = chosenmin = 0;
260 	for (; i < msg->ic_fwver_cnt + msg->ic_msgver_cnt; i++) {
261 		propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
262 		propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
263 		if (propmaj > chosenmaj &&
264 		    propmaj <= VMBUS_ICVER_MAJOR(msgver) &&
265 		    propmin >= chosenmin &&
266 		    propmin <= VMBUS_ICVER_MINOR(msgver)) {
267 			chosenmaj = propmaj;
268 			chosenmin = propmin;
269 		}
270 	}
271 	msgver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
272 
273 	msg->ic_fwver_cnt = 1;
274 	msg->ic_ver[0] = fwver;
275 	msg->ic_msgver_cnt = 1;
276 	msg->ic_ver[1] = msgver;
277 	hdr->ic_dsize = sizeof(*msg) + 2 * sizeof(uint32_t) -
278 	    sizeof(struct vmbus_icmsg_hdr);
279 	if (*rlen < sizeof(*msg) + 2 * sizeof(uint32_t))
280 		*rlen = sizeof(*msg) + 2 * sizeof(uint32_t);
281 }
282 
283 int
284 hv_heartbeat_attach(struct hv_ic_dev *dv)
285 {
286 	struct hv_channel *ch = dv->dv_ch;
287 	struct hv_softc *sc = ch->ch_sc;
288 
289 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
290 	    (cold ? M_NOWAIT : M_WAITOK));
291 	if (dv->dv_buf == NULL) {
292 		printf("%s: failed to allocate receive buffer\n",
293 		    sc->sc_dev.dv_xname);
294 		return (-1);
295 	}
296 	return (0);
297 }
298 
299 void
300 hv_heartbeat(void *arg)
301 {
302 	struct hv_ic_dev *dv = arg;
303 	struct hv_channel *ch = dv->dv_ch;
304 	struct hv_softc *sc = ch->ch_sc;
305 	struct vmbus_icmsg_hdr *hdr;
306 	struct vmbus_icmsg_heartbeat *msg;
307 	uint64_t rid;
308 	uint32_t rlen;
309 	int rv;
310 
311 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
312 	if (rv || rlen == 0) {
313 		if (rv != EAGAIN)
314 			DPRINTF("%s: heartbeat rv=%d rlen=%u\n",
315 			    sc->sc_dev.dv_xname, rv, rlen);
316 		return;
317 	}
318 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
319 		DPRINTF("%s: heartbeat short read rlen=%u\n",
320 			    sc->sc_dev.dv_xname, rlen);
321 		return;
322 	}
323 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
324 	switch (hdr->ic_type) {
325 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
326 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
327 		    VMBUS_IC_VERSION(3, 0));
328 		break;
329 	case VMBUS_ICMSG_TYPE_HEARTBEAT:
330 		msg = (struct vmbus_icmsg_heartbeat *)hdr;
331 		msg->ic_seq += 1;
332 		break;
333 	default:
334 		printf("%s: unhandled heartbeat message type %u\n",
335 		    sc->sc_dev.dv_xname, hdr->ic_type);
336 		return;
337 	}
338 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
339 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
340 }
341 
342 static void
343 hv_shutdown_task(void *arg)
344 {
345 	struct hv_softc *sc = arg;
346 	pvbus_shutdown(&sc->sc_dev);
347 }
348 
349 int
350 hv_shutdown_attach(struct hv_ic_dev *dv)
351 {
352 	struct hv_channel *ch = dv->dv_ch;
353 	struct hv_softc *sc = ch->ch_sc;
354 
355 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
356 	    (cold ? M_NOWAIT : M_WAITOK));
357 	if (dv->dv_buf == NULL) {
358 		printf("%s: failed to allocate receive buffer\n",
359 		    sc->sc_dev.dv_xname);
360 		return (-1);
361 	}
362 
363 	task_set(&sc->sc_sdtask, hv_shutdown_task, sc);
364 
365 	return (0);
366 }
367 
368 void
369 hv_shutdown(void *arg)
370 {
371 	struct hv_ic_dev *dv = arg;
372 	struct hv_channel *ch = dv->dv_ch;
373 	struct hv_softc *sc = ch->ch_sc;
374 	struct vmbus_icmsg_hdr *hdr;
375 	struct vmbus_icmsg_shutdown *msg;
376 	uint64_t rid;
377 	uint32_t rlen;
378 	int rv, shutdown = 0;
379 
380 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
381 	if (rv || rlen == 0) {
382 		if (rv != EAGAIN)
383 			DPRINTF("%s: shutdown rv=%d rlen=%u\n",
384 			    sc->sc_dev.dv_xname, rv, rlen);
385 		return;
386 	}
387 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
388 		DPRINTF("%s: shutdown short read rlen=%u\n",
389 			    sc->sc_dev.dv_xname, rlen);
390 		return;
391 	}
392 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
393 	switch (hdr->ic_type) {
394 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
395 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
396 		    VMBUS_IC_VERSION(3, 0));
397 		break;
398 	case VMBUS_ICMSG_TYPE_SHUTDOWN:
399 		msg = (struct vmbus_icmsg_shutdown *)hdr;
400 		if (msg->ic_haltflags == 0 || msg->ic_haltflags == 1) {
401 			shutdown = 1;
402 			hdr->ic_status = VMBUS_ICMSG_STATUS_OK;
403 		} else
404 			hdr->ic_status = VMBUS_ICMSG_STATUS_FAIL;
405 		break;
406 	default:
407 		printf("%s: unhandled shutdown message type %u\n",
408 		    sc->sc_dev.dv_xname, hdr->ic_type);
409 		return;
410 	}
411 
412 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
413 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
414 
415 	if (shutdown)
416 		task_add(systq, &sc->sc_sdtask);
417 }
418 
419 int
420 hv_timesync_attach(struct hv_ic_dev *dv)
421 {
422 	struct hv_channel *ch = dv->dv_ch;
423 	struct hv_softc *sc = ch->ch_sc;
424 
425 	dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
426 	    (cold ? M_NOWAIT : M_WAITOK));
427 	if (dv->dv_buf == NULL) {
428 		printf("%s: failed to allocate receive buffer\n",
429 		    sc->sc_dev.dv_xname);
430 		return (-1);
431 	}
432 
433 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
434 	    sizeof(sc->sc_sensordev.xname));
435 
436 	sc->sc_sensor.type = SENSOR_TIMEDELTA;
437 	sc->sc_sensor.status = SENSOR_S_UNKNOWN;
438 
439 	sensor_attach(&sc->sc_sensordev, &sc->sc_sensor);
440 	sensordev_install(&sc->sc_sensordev);
441 
442 	return (0);
443 }
444 
445 void
446 hv_timesync(void *arg)
447 {
448 	struct hv_ic_dev *dv = arg;
449 	struct hv_channel *ch = dv->dv_ch;
450 	struct hv_softc *sc = ch->ch_sc;
451 	struct vmbus_icmsg_hdr *hdr;
452 	struct vmbus_icmsg_timesync *msg;
453 	struct timespec guest, host, diff;
454 	uint64_t tns;
455 	uint64_t rid;
456 	uint32_t rlen;
457 	int rv;
458 
459 	rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
460 	if (rv || rlen == 0) {
461 		if (rv != EAGAIN)
462 			DPRINTF("%s: timesync rv=%d rlen=%u\n",
463 			    sc->sc_dev.dv_xname, rv, rlen);
464 		return;
465 	}
466 	if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
467 		DPRINTF("%s: timesync short read rlen=%u\n",
468 			    sc->sc_dev.dv_xname, rlen);
469 		return;
470 	}
471 	hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
472 	switch (hdr->ic_type) {
473 	case VMBUS_ICMSG_TYPE_NEGOTIATE:
474 		hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
475 		    VMBUS_IC_VERSION(3, 0));
476 		break;
477 	case VMBUS_ICMSG_TYPE_TIMESYNC:
478 		msg = (struct vmbus_icmsg_timesync *)hdr;
479 		if (msg->ic_tsflags == VMBUS_ICMSG_TS_FLAG_SAMPLE) {
480 			microtime(&sc->sc_sensor.tv);
481 			nanotime(&guest);
482 			tns = (msg->ic_hvtime - 116444736000000000LL) * 100;
483 			host.tv_sec = tns / 1000000000LL;
484 			host.tv_nsec = tns % 1000000000LL;
485 			timespecsub(&guest, &host, &diff);
486 			sc->sc_sensor.value = (int64_t)diff.tv_sec *
487 			    1000000000LL + diff.tv_nsec;
488 			sc->sc_sensor.status = SENSOR_S_OK;
489 		}
490 		break;
491 	default:
492 		printf("%s: unhandled timesync message type %u\n",
493 		    sc->sc_dev.dv_xname, hdr->ic_type);
494 		return;
495 	}
496 
497 	hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
498 	hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
499 }
500 
501 static inline int
502 copyout_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
503 {
504 	const uint8_t *sp = src;
505 	uint8_t *dp = dst;
506 	int i, j;
507 
508 	KASSERT(dlen >= slen * 2);
509 
510 	for (i = j = 0; i < slen; i++, j += 2) {
511 		dp[j] = sp[i];
512 		dp[j + 1] = '\0';
513 	}
514 	return (j);
515 }
516 
517 static inline int
518 copyin_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
519 {
520 	const uint8_t *sp = src;
521 	uint8_t *dp = dst;
522 	int i, j;
523 
524 	KASSERT(dlen >= slen / 2);
525 
526 	for (i = j = 0; i < slen; i += 2, j++)
527 		dp[j] = sp[i];
528 	return (j);
529 }
530 
531 static inline int
532 keycmp_utf16le(const uint8_t *key, const uint8_t *ukey, size_t ukeylen)
533 {
534 	int i, j;
535 
536 	for (i = j = 0; i < ukeylen; i += 2, j++) {
537 		if (key[j] != ukey[i])
538 			return (key[j] > ukey[i] ?
539 			    key[j] - ukey[i] :
540 			    ukey[i] - key[j]);
541 	}
542 	return (0);
543 }
544 
545 static void
546 kvp_pool_init(struct kvp_pool *kvpl)
547 {
548 	TAILQ_INIT(&kvpl->kvp_entries);
549 	mtx_init(&kvpl->kvp_lock, IPL_NET);
550 	kvpl->kvp_index = 0;
551 }
552 
553 static int
554 kvp_pool_insert(struct kvp_pool *kvpl, const char *key, const char *val,
555     uint32_t vallen, uint32_t valtype)
556 {
557 	struct kvp_entry *kpe;
558 	int keylen = strlen(key);
559 
560 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
561 		return (ERANGE);
562 
563 	mtx_enter(&kvpl->kvp_lock);
564 
565 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
566 		if (strcmp(kpe->kpe_key, key) == 0) {
567 			mtx_leave(&kvpl->kvp_lock);
568 			return (EEXIST);
569 		}
570 	}
571 
572 	kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
573 	if (kpe == NULL) {
574 		mtx_leave(&kvpl->kvp_lock);
575 		return (ENOMEM);
576 	}
577 
578 	strlcpy(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2);
579 
580 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
581 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
582 	else
583 		memcpy(kpe->kpe_val, val, vallen);
584 
585 	kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
586 
587 	TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
588 
589 	mtx_leave(&kvpl->kvp_lock);
590 
591 	return (0);
592 }
593 
594 static int
595 kvp_pool_update(struct kvp_pool *kvpl, const char *key, const char *val,
596     uint32_t vallen, uint32_t valtype)
597 {
598 	struct kvp_entry *kpe;
599 	int keylen = strlen(key);
600 
601 	if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
602 		return (ERANGE);
603 
604 	mtx_enter(&kvpl->kvp_lock);
605 
606 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
607 		if (strcmp(kpe->kpe_key, key) == 0)
608 			break;
609 	}
610 	if (kpe == NULL) {
611 		mtx_leave(&kvpl->kvp_lock);
612 		return (ENOENT);
613 	}
614 
615 	if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
616 		strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
617 	else
618 		memcpy(kpe->kpe_val, val, vallen);
619 
620 	mtx_leave(&kvpl->kvp_lock);
621 
622 	return (0);
623 }
624 
625 static int
626 kvp_pool_import(struct kvp_pool *kvpl, const char *key, uint32_t keylen,
627     const char *val, uint32_t vallen, uint32_t valtype)
628 {
629 	struct kvp_entry *kpe;
630 
631 	if (keylen > HV_KVP_MAX_KEY_SIZE ||
632 	    vallen > HV_KVP_MAX_VAL_SIZE)
633 		return (ERANGE);
634 
635 	mtx_enter(&kvpl->kvp_lock);
636 
637 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
638 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
639 			break;
640 	}
641 	if (kpe == NULL) {
642 		kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
643 		if (kpe == NULL) {
644 			mtx_leave(&kvpl->kvp_lock);
645 			return (ENOMEM);
646 		}
647 
648 		copyin_utf16le(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2,
649 		    keylen);
650 
651 		kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
652 
653 		TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
654 	}
655 
656 	copyin_utf16le(kpe->kpe_val, val, HV_KVP_MAX_VAL_SIZE / 2, vallen);
657 	kpe->kpe_valtype = valtype;
658 
659 	mtx_leave(&kvpl->kvp_lock);
660 
661 	return (0);
662 }
663 
664 static int
665 kvp_pool_export(struct kvp_pool *kvpl, uint32_t index, char *key,
666     uint32_t *keylen, char *val, uint32_t *vallen, uint32_t *valtype)
667 {
668 	struct kvp_entry *kpe;
669 
670 	mtx_enter(&kvpl->kvp_lock);
671 
672 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
673 		if (kpe->kpe_index == index)
674 			break;
675 	}
676 	if (kpe == NULL) {
677 		mtx_leave(&kvpl->kvp_lock);
678 		return (ENOENT);
679 	}
680 
681 	*keylen = copyout_utf16le(key, kpe->kpe_key, HV_KVP_MAX_KEY_SIZE,
682 	    strlen(kpe->kpe_key) + 1);
683 	*vallen = copyout_utf16le(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE,
684 	    strlen(kpe->kpe_val) + 1);
685 	*valtype = kpe->kpe_valtype;
686 
687 	mtx_leave(&kvpl->kvp_lock);
688 
689 	return (0);
690 }
691 
692 static int
693 kvp_pool_remove(struct kvp_pool *kvpl, const char *key, uint32_t keylen)
694 {
695 	struct kvp_entry *kpe;
696 
697 	mtx_enter(&kvpl->kvp_lock);
698 
699 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
700 		if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
701 			break;
702 	}
703 	if (kpe == NULL) {
704 		mtx_leave(&kvpl->kvp_lock);
705 		return (ENOENT);
706 	}
707 
708 	TAILQ_REMOVE(&kvpl->kvp_entries, kpe, kpe_entry);
709 
710 	mtx_leave(&kvpl->kvp_lock);
711 
712 	pool_put(&kvp_entry_pool, kpe);
713 
714 	return (0);
715 }
716 
717 static int
718 kvp_pool_extract(struct kvp_pool *kvpl, const char *key, char *val,
719     uint32_t vallen)
720 {
721 	struct kvp_entry *kpe;
722 
723 	if (vallen < HV_KVP_MAX_VAL_SIZE / 2)
724 		return (ERANGE);
725 
726 	mtx_enter(&kvpl->kvp_lock);
727 
728 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
729 		if (strcmp(kpe->kpe_key, key) == 0)
730 			break;
731 	}
732 	if (kpe == NULL) {
733 		mtx_leave(&kvpl->kvp_lock);
734 		return (ENOENT);
735 	}
736 
737 	switch (kpe->kpe_valtype) {
738 	case HV_KVP_REG_SZ:
739 		strlcpy(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE / 2);
740 		break;
741 	case HV_KVP_REG_U32:
742 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%u",
743 		    *(uint32_t *)kpe->kpe_val);
744 		break;
745 	case HV_KVP_REG_U64:
746 		snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%llu",
747 		    *(uint64_t *)kpe->kpe_val);
748 		break;
749 	}
750 
751 	mtx_leave(&kvpl->kvp_lock);
752 
753 	return (0);
754 }
755 
756 static int
757 kvp_pool_keys(struct kvp_pool *kvpl, int next, char *key, size_t *keylen)
758 {
759 	struct kvp_entry *kpe;
760 	int iter = 0;
761 
762 	TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
763 		if (iter++ < next)
764 			continue;
765 		*keylen = strlen(kpe->kpe_key) + 1;
766 		strlcpy(key, kpe->kpe_key, *keylen);
767 		return (0);
768 	}
769 
770 	return (-1);
771 }
772 
773 int
774 hv_kvp_attach(struct hv_ic_dev *dv)
775 {
776 	struct hv_channel *ch = dv->dv_ch;
777 	struct hv_softc *sc = ch->ch_sc;
778 	struct hv_kvp *kvp;
779 	int i;
780 
781 	dv->dv_buf = malloc(2 * PAGE_SIZE, M_DEVBUF, M_ZERO |
782 	    (cold ? M_NOWAIT : M_WAITOK));
783 	if (dv->dv_buf == NULL) {
784 		printf("%s: failed to allocate receive buffer\n",
785 		    sc->sc_dev.dv_xname);
786 		return (-1);
787 	}
788 
789 	dv->dv_priv = malloc(sizeof(struct hv_kvp), M_DEVBUF, M_ZERO |
790 	    (cold ? M_NOWAIT : M_WAITOK));
791 	if (dv->dv_priv == NULL) {
792 		free(dv->dv_buf, M_DEVBUF, 2 * PAGE_SIZE);
793 		printf("%s: failed to allocate KVP private data\n",
794 		    sc->sc_dev.dv_xname);
795 		return (-1);
796 	}
797 	kvp = dv->dv_priv;
798 
799 	pool_init(&kvp_entry_pool, sizeof(struct kvp_entry), 0, IPL_NET, 0,
800 	    "hvkvpl", NULL);
801 
802 	for (i = 0; i < NKVPPOOLS; i++)
803 		kvp_pool_init(&kvp->kvp_pool[i]);
804 
805 	/* Initialize 'Auto' pool */
806 	for (i = 0; i < nitems(kvp_pool_auto); i++) {
807 		if (kvp_pool_insert(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
808 		    kvp_pool_auto[i].keyname, kvp_pool_auto[i].value,
809 		    strlen(kvp_pool_auto[i].value), HV_KVP_REG_SZ))
810 			DPRINTF("%s: failed to insert into 'Auto' pool\n",
811 			    sc->sc_dev.dv_xname);
812 	}
813 
814 	sc->sc_pvbus->hv_kvop = hv_kvop;
815 	sc->sc_pvbus->hv_arg = dv;
816 
817 	return (0);
818 }
819 
820 static int
821 nibble(int ch)
822 {
823 	if (ch >= '0' && ch <= '9')
824 		return (ch - '0');
825 	if (ch >= 'A' && ch <= 'F')
826 		return (10 + ch - 'A');
827 	if (ch >= 'a' && ch <= 'f')
828 		return (10 + ch - 'a');
829 	return (-1);
830 }
831 
832 static int
833 kvp_get_ip_info(struct hv_kvp *kvp, const uint8_t *mac, uint8_t *family,
834     uint8_t *addr, uint8_t *netmask, size_t addrlen)
835 {
836 	struct ifnet *ifp;
837 	struct ifaddr *ifa, *ifa6, *ifa6ll;
838 	struct sockaddr_in *sin;
839 	struct sockaddr_in6 *sin6, sa6;
840 	uint8_t	enaddr[ETHER_ADDR_LEN];
841 	uint8_t ipaddr[INET6_ADDRSTRLEN];
842 	int i, j, lo, hi, s, af;
843 
844 	/* Convert from the UTF-16LE string format to binary */
845 	for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 6) {
846 		if ((hi = nibble(mac[i])) == -1 ||
847 		    (lo = nibble(mac[i+2])) == -1)
848 			return (-1);
849 		enaddr[j++] = hi << 4 | lo;
850 	}
851 
852 	switch (*family) {
853 	case ADDR_FAMILY_NONE:
854 		af = AF_UNSPEC;
855 		break;
856 	case ADDR_FAMILY_IPV4:
857 		af = AF_INET;
858 		break;
859 	case ADDR_FAMILY_IPV6:
860 		af = AF_INET6;
861 		break;
862 	default:
863 		return (-1);
864 	}
865 
866 	KERNEL_LOCK();
867 	s = splnet();
868 
869 	TAILQ_FOREACH(ifp, &ifnet, if_list) {
870 		if (!memcmp(LLADDR(ifp->if_sadl), enaddr, ETHER_ADDR_LEN))
871 			break;
872 	}
873 	if (ifp == NULL) {
874 		splx(s);
875 		KERNEL_UNLOCK();
876 		return (-1);
877 	}
878 
879 	ifa6 = ifa6ll = NULL;
880 
881 	/* Try to find a best matching address, preferring IPv4 */
882 	TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
883 		/*
884 		 * First IPv4 address is always a best match unless
885 		 * we were asked for for an IPv6 address.
886 		 */
887 		if ((af == AF_INET || af == AF_UNSPEC) &&
888 		    (ifa->ifa_addr->sa_family == AF_INET)) {
889 			af = AF_INET;
890 			goto found;
891 		}
892 		if ((af == AF_INET6 || af == AF_UNSPEC) &&
893 		    (ifa->ifa_addr->sa_family == AF_INET6)) {
894 			if (!IN6_IS_ADDR_LINKLOCAL(
895 			    &satosin6(ifa->ifa_addr)->sin6_addr)) {
896 				/* Done if we're looking for an IPv6 address */
897 				if (af == AF_INET6)
898 					goto found;
899 				/* Stick to the first one */
900 				if (ifa6 == NULL)
901 					ifa6 = ifa;
902 			} else	/* Pick the last one */
903 				ifa6ll = ifa;
904 		}
905 	}
906 	/* If we haven't found any IPv4 or IPv6 direct matches... */
907 	if (ifa == NULL) {
908 		/* ... try the last global IPv6 address... */
909 		if (ifa6 != NULL)
910 			ifa = ifa6;
911 		/* ... or the last link-local...  */
912 		else if (ifa6ll != NULL)
913 			ifa = ifa6ll;
914 		else {
915 			splx(s);
916 			KERNEL_UNLOCK();
917 			return (-1);
918 		}
919 	}
920  found:
921 	switch (af) {
922 	case AF_INET:
923 		sin = satosin(ifa->ifa_addr);
924 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
925 		copyout_utf16le(addr, ipaddr, addrlen, INET_ADDRSTRLEN);
926 
927 		sin = satosin(ifa->ifa_netmask);
928 		inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
929 		copyout_utf16le(netmask, ipaddr, addrlen, INET_ADDRSTRLEN);
930 
931 		*family = ADDR_FAMILY_IPV4;
932 		break;
933 	case AF_UNSPEC:
934 	case AF_INET6:
935 		sin6 = satosin6(ifa->ifa_addr);
936 		if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
937 			sa6 = *satosin6(ifa->ifa_addr);
938 			sa6.sin6_addr.s6_addr16[1] = 0;
939 			sin6 = &sa6;
940 		}
941 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
942 		copyout_utf16le(addr, ipaddr, addrlen, INET6_ADDRSTRLEN);
943 
944 		sin6 = satosin6(ifa->ifa_netmask);
945 		inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
946 		copyout_utf16le(netmask, ipaddr, addrlen, INET6_ADDRSTRLEN);
947 
948 		*family = ADDR_FAMILY_IPV6;
949 		break;
950 	}
951 
952 	splx(s);
953 	KERNEL_UNLOCK();
954 
955 	return (0);
956 }
957 
958 static void
959 hv_kvp_process(struct hv_kvp *kvp, struct vmbus_icmsg_kvp *msg)
960 {
961 	union hv_kvp_hdr *kvh = &msg->ic_kvh;
962 	union hv_kvp_msg *kvm = &msg->ic_kvm;
963 
964 	switch (kvh->kvh_op) {
965 	case HV_KVP_OP_SET:
966 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO_EXTERNAL &&
967 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_AUTO_EXTERNAL],
968 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
969 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
970 		    kvm->kvm_val.kvm_valtype)) {
971 			DPRINTF("%s: failed to import into 'Guest/Parameters'"
972 			    " pool\n", __func__);
973 			kvh->kvh_err = HV_KVP_S_CONT;
974 		} else if (kvh->kvh_pool == HV_KVP_POOL_EXTERNAL &&
975 		    kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
976 		    kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
977 		    kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
978 		    kvm->kvm_val.kvm_valtype)) {
979 			DPRINTF("%s: failed to import into 'External' pool\n",
980 			    __func__);
981 			kvh->kvh_err = HV_KVP_S_CONT;
982 		} else if (kvh->kvh_pool != HV_KVP_POOL_AUTO_EXTERNAL &&
983 		    kvh->kvh_pool != HV_KVP_POOL_EXTERNAL) {
984 			kvh->kvh_err = HV_KVP_S_CONT;
985 		} else
986 			kvh->kvh_err = HV_KVP_S_OK;
987 		break;
988 	case HV_KVP_OP_DELETE:
989 		if (kvh->kvh_pool != HV_KVP_POOL_EXTERNAL ||
990 		    kvp_pool_remove(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
991 		    kvm->kvm_del.kvm_key, kvm->kvm_del.kvm_keylen)) {
992 			DPRINTF("%s: failed to remove from 'External' pool\n",
993 			    __func__);
994 			kvh->kvh_err = HV_KVP_S_CONT;
995 		} else
996 			kvh->kvh_err = HV_KVP_S_OK;
997 		break;
998 	case HV_KVP_OP_ENUMERATE:
999 		if (kvh->kvh_pool == HV_KVP_POOL_AUTO &&
1000 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
1001 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1002 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1003 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1004 			kvh->kvh_err = HV_KVP_S_CONT;
1005 		else if (kvh->kvh_pool == HV_KVP_POOL_GUEST &&
1006 		    kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_GUEST],
1007 		    kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1008 		    &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1009 		    &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1010 			kvh->kvh_err = HV_KVP_S_CONT;
1011 		else
1012 			kvh->kvh_err = HV_KVP_S_OK;
1013 		break;
1014 	case HV_KVP_OP_GET_IP_INFO:
1015 		if (VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver) <= 4) {
1016 			struct vmbus_icmsg_kvp_addr *amsg;
1017 			struct hv_kvp_msg_addr *kva;
1018 
1019 			amsg = (struct vmbus_icmsg_kvp_addr *)msg;
1020 			kva = &amsg->ic_kvm;
1021 
1022 			if (kvp_get_ip_info(kvp, kva->kvm_mac,
1023 			    &kva->kvm_family, kva->kvm_addr,
1024 			    kva->kvm_netmask, sizeof(kva->kvm_addr)))
1025 				kvh->kvh_err = HV_KVP_S_CONT;
1026 			else
1027 				kvh->kvh_err = HV_KVP_S_OK;
1028 		} else {
1029 			DPRINTF("KVP GET_IP_INFO fw %u.%u msg %u.%u dsize=%u\n",
1030 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_fwver),
1031 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_fwver),
1032 			    VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver),
1033 			    VMBUS_ICVER_MINOR(msg->ic_hdr.ic_msgver),
1034 			    msg->ic_hdr.ic_dsize);
1035 			kvh->kvh_err = HV_KVP_S_CONT;
1036 		}
1037 		break;
1038 	default:
1039 		DPRINTF("KVP message op %u pool %u\n", kvh->kvh_op,
1040 		    kvh->kvh_pool);
1041 		kvh->kvh_err = HV_KVP_S_CONT;
1042 	}
1043 }
1044 
1045 void
1046 hv_kvp(void *arg)
1047 {
1048 	struct hv_ic_dev *dv = arg;
1049 	struct hv_channel *ch = dv->dv_ch;
1050 	struct hv_softc *sc = ch->ch_sc;
1051 	struct hv_kvp *kvp = dv->dv_priv;
1052 	struct vmbus_icmsg_hdr *hdr;
1053 	uint64_t rid;
1054 	uint32_t fwver, msgver, rlen;
1055 	int rv;
1056 
1057 	for (;;) {
1058 		rv = hv_channel_recv(ch, dv->dv_buf, 2 * PAGE_SIZE,
1059 		    &rlen, &rid, 0);
1060 		if (rv || rlen == 0) {
1061 			if (rv != EAGAIN)
1062 				DPRINTF("%s: kvp rv=%d rlen=%u\n",
1063 				    sc->sc_dev.dv_xname, rv, rlen);
1064 			return;
1065 		}
1066 		if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
1067 			DPRINTF("%s: kvp short read rlen=%u\n",
1068 			    sc->sc_dev.dv_xname, rlen);
1069 			return;
1070 		}
1071 		hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
1072 		switch (hdr->ic_type) {
1073 		case VMBUS_ICMSG_TYPE_NEGOTIATE:
1074 			switch (sc->sc_proto) {
1075 			case VMBUS_VERSION_WS2008:
1076 				fwver = VMBUS_IC_VERSION(1, 0);
1077 				msgver = VMBUS_IC_VERSION(1, 0);
1078 				break;
1079 			case VMBUS_VERSION_WIN7:
1080 				fwver = VMBUS_IC_VERSION(3, 0);
1081 				msgver = VMBUS_IC_VERSION(3, 0);
1082 				break;
1083 			default:
1084 				fwver = VMBUS_IC_VERSION(3, 0);
1085 				msgver = VMBUS_IC_VERSION(4, 0);
1086 			}
1087 			hv_ic_negotiate(hdr, &rlen, fwver, msgver);
1088 			break;
1089 		case VMBUS_ICMSG_TYPE_KVP:
1090 			if (hdr->ic_dsize >= sizeof(union hv_kvp_hdr))
1091 				hv_kvp_process(kvp,
1092 				    (struct vmbus_icmsg_kvp *)hdr);
1093 			else
1094 				printf("%s: message too short: %u\n",
1095 				    sc->sc_dev.dv_xname, hdr->ic_dsize);
1096 			break;
1097 		default:
1098 			printf("%s: unhandled kvp message type %u\n",
1099 			    sc->sc_dev.dv_xname, hdr->ic_type);
1100 			continue;
1101 		}
1102 		hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION |
1103 		    VMBUS_ICMSG_FLAG_RESPONSE;
1104 		hv_channel_send(ch, dv->dv_buf, rlen, rid,
1105 		    VMBUS_CHANPKT_TYPE_INBAND, 0);
1106 	}
1107 }
1108 
1109 static int
1110 kvp_poolname(char **key)
1111 {
1112 	char *p;
1113 	int i, rv = -1;
1114 
1115 	if ((p = strrchr(*key, '/')) == NULL)
1116 		return (rv);
1117 	*p = '\0';
1118 	for (i = 0; i < nitems(kvp_pools); i++) {
1119 		if (strncasecmp(*key, kvp_pools[i].poolname,
1120 		    kvp_pools[i].poolnamelen) == 0) {
1121 			rv = kvp_pools[i].poolidx;
1122 			break;
1123 		}
1124 	}
1125 	if (rv >= 0)
1126 		*key = ++p;
1127 	return (rv);
1128 }
1129 
1130 int
1131 hv_kvop(void *arg, int op, char *key, char *val, size_t vallen)
1132 {
1133 	struct hv_ic_dev *dv = arg;
1134 	struct hv_kvp *kvp = dv->dv_priv;
1135 	struct kvp_pool *kvpl;
1136 	int next, pool, error = 0;
1137 	char *vp = val;
1138 	size_t keylen;
1139 
1140 	pool = kvp_poolname(&key);
1141 	if (pool == -1)
1142 		return (EINVAL);
1143 
1144 	kvpl = &kvp->kvp_pool[pool];
1145 	if (strlen(key) == 0) {
1146 		for (next = 0; next < MAXPOOLENTS; next++) {
1147 			if ((val + vallen < vp + HV_KVP_MAX_KEY_SIZE / 2) ||
1148 			    kvp_pool_keys(kvpl, next, vp, &keylen))
1149 				goto out;
1150 			if (strlcat(val, "\n", vallen) >= vallen)
1151 				goto out;
1152 			vp += keylen;
1153 		}
1154  out:
1155 		if (vp > val)
1156 			*(vp - 1) = '\0';
1157 		return (0);
1158 	}
1159 
1160 	if (op == PVBUS_KVWRITE) {
1161 		if (pool == HV_KVP_POOL_AUTO)
1162 			error = kvp_pool_update(kvpl, key, val, vallen,
1163 			    HV_KVP_REG_SZ);
1164 		else if (pool == HV_KVP_POOL_GUEST)
1165 			error = kvp_pool_insert(kvpl, key, val, vallen,
1166 			    HV_KVP_REG_SZ);
1167 		else
1168 			error = EINVAL;
1169 	} else
1170 		error = kvp_pool_extract(kvpl, key, val, vallen);
1171 
1172 	return (error);
1173 }
1174