1 /*-
2 * Copyright (c) 2009-2016 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
32 */
33
34 #include <sys/param.h>
35
36 /* Hyperv requires locked atomic operations */
37 #ifndef MULTIPROCESSOR
38 #define _HYPERVMPATOMICS
39 #define MULTIPROCESSOR
40 #endif
41 #include <sys/atomic.h>
42 #ifdef _HYPERVMPATOMICS
43 #undef MULTIPROCESSOR
44 #undef _HYPERVMPATOMICS
45 #endif
46
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/kernel.h>
50 #include <sys/device.h>
51 #include <sys/pool.h>
52 #include <sys/task.h>
53 #include <sys/sensors.h>
54
55 #include <machine/bus.h>
56
57 #include <net/if.h>
58 #include <net/if_dl.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61
62 #include <dev/pv/pvvar.h>
63 #include <dev/pv/hypervreg.h>
64 #include <dev/pv/hypervvar.h>
65 #include <dev/pv/hypervicreg.h>
66
67 struct hv_ic_dev;
68
69 #define NKVPPOOLS 4
70 #define MAXPOOLENTS 1023
71
72 struct kvp_entry {
73 int kpe_index;
74 uint32_t kpe_valtype;
75 uint8_t kpe_key[HV_KVP_MAX_KEY_SIZE / 2];
76 uint8_t kpe_val[HV_KVP_MAX_VAL_SIZE / 2];
77 TAILQ_ENTRY(kvp_entry) kpe_entry;
78 };
79 TAILQ_HEAD(kvp_list, kvp_entry);
80
81 struct kvp_pool {
82 struct kvp_list kvp_entries;
83 struct mutex kvp_lock;
84 u_int kvp_index;
85 };
86
87 struct pool kvp_entry_pool;
88
89 struct hv_kvp {
90 struct kvp_pool kvp_pool[NKVPPOOLS];
91 };
92
93 int hv_heartbeat_attach(struct hv_ic_dev *);
94 void hv_heartbeat(void *);
95 int hv_kvp_attach(struct hv_ic_dev *);
96 void hv_kvp(void *);
97 int hv_kvop(void *, int, char *, char *, size_t);
98 int hv_shutdown_attach(struct hv_ic_dev *);
99 void hv_shutdown(void *);
100 int hv_timesync_attach(struct hv_ic_dev *);
101 void hv_timesync(void *);
102
103 static struct hv_ic_dev {
104 const char *dv_name;
105 const struct hv_guid *dv_type;
106 int (*dv_attach)(struct hv_ic_dev *);
107 void (*dv_handler)(void *);
108 struct hv_channel *dv_ch;
109 uint8_t *dv_buf;
110 void *dv_priv;
111 } hv_ic_devs[] = {
112 {
113 "heartbeat",
114 &hv_guid_heartbeat,
115 hv_heartbeat_attach,
116 hv_heartbeat
117 },
118 {
119 "kvp",
120 &hv_guid_kvp,
121 hv_kvp_attach,
122 hv_kvp
123 },
124 {
125 "shutdown",
126 &hv_guid_shutdown,
127 hv_shutdown_attach,
128 hv_shutdown
129 },
130 {
131 "timesync",
132 &hv_guid_timesync,
133 hv_timesync_attach,
134 hv_timesync
135 }
136 };
137
138 static const struct {
139 enum hv_kvp_pool poolidx;
140 const char *poolname;
141 size_t poolnamelen;
142 } kvp_pools[] = {
143 { HV_KVP_POOL_EXTERNAL, "External", sizeof("External") },
144 { HV_KVP_POOL_GUEST, "Guest", sizeof("Guest") },
145 { HV_KVP_POOL_AUTO, "Auto", sizeof("Auto") },
146 { HV_KVP_POOL_AUTO_EXTERNAL, "Guest/Parameters",
147 sizeof("Guest/Parameters") }
148 };
149
150 static const struct {
151 int keyidx;
152 const char *keyname;
153 const char *value;
154 } kvp_pool_auto[] = {
155 { 0, "FullyQualifiedDomainName", hostname },
156 { 1, "IntegrationServicesVersion", "6.6.6" },
157 { 2, "NetworkAddressIPv4", "127.0.0.1" },
158 { 3, "NetworkAddressIPv6", "::1" },
159 { 4, "OSBuildNumber", osversion },
160 { 5, "OSName", ostype },
161 { 6, "OSMajorVersion", "6" }, /* free commit for mike */
162 { 7, "OSMinorVersion", &osrelease[2] },
163 { 8, "OSVersion", osrelease },
164 #ifdef __amd64__ /* As specified in SYSTEM_INFO.wProcessorArchitecture */
165 { 9, "ProcessorArchitecture", "9" }
166 #else
167 { 9, "ProcessorArchitecture", "0" }
168 #endif
169 };
170
171 void
hv_attach_icdevs(struct hv_softc * sc)172 hv_attach_icdevs(struct hv_softc *sc)
173 {
174 struct hv_ic_dev *dv;
175 struct hv_channel *ch;
176 int i, header = 0;
177
178 for (i = 0; i < nitems(hv_ic_devs); i++) {
179 dv = &hv_ic_devs[i];
180
181 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
182 if (ch->ch_state != HV_CHANSTATE_OFFERED)
183 continue;
184 if (ch->ch_flags & CHF_MONITOR)
185 continue;
186 if (memcmp(dv->dv_type, &ch->ch_type,
187 sizeof(ch->ch_type)) == 0)
188 break;
189 }
190 if (ch == NULL)
191 continue;
192
193 dv->dv_ch = ch;
194
195 /*
196 * These services are not performance critical and
197 * do not need batched reading. Furthermore, some
198 * services such as KVP can only handle one message
199 * from the host at a time.
200 */
201 dv->dv_ch->ch_flags &= ~CHF_BATCHED;
202
203 if (dv->dv_attach && dv->dv_attach(dv) != 0)
204 continue;
205
206 if (hv_channel_open(ch, VMBUS_IC_BUFRINGSIZE, NULL, 0,
207 dv->dv_handler, dv)) {
208 printf("%s: failed to open channel for %s\n",
209 sc->sc_dev.dv_xname, dv->dv_name);
210 continue;
211 }
212 evcount_attach(&ch->ch_evcnt, dv->dv_name, &sc->sc_idtvec);
213
214 if (!header) {
215 printf("%s: %s", sc->sc_dev.dv_xname, dv->dv_name);
216 header = 1;
217 } else
218 printf(", %s", dv->dv_name);
219 }
220 if (header)
221 printf("\n");
222 }
223
224 static inline void
hv_ic_negotiate(struct vmbus_icmsg_hdr * hdr,uint32_t * rlen,uint32_t fwver,uint32_t msgver)225 hv_ic_negotiate(struct vmbus_icmsg_hdr *hdr, uint32_t *rlen, uint32_t fwver,
226 uint32_t msgver)
227 {
228 struct vmbus_icmsg_negotiate *msg;
229 uint16_t propmin, propmaj, chosenmaj, chosenmin;
230 int i;
231
232 msg = (struct vmbus_icmsg_negotiate *)hdr;
233
234 chosenmaj = chosenmin = 0;
235 for (i = 0; i < msg->ic_fwver_cnt; i++) {
236 propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
237 propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
238 if (propmaj > chosenmaj &&
239 propmaj <= VMBUS_ICVER_MAJOR(fwver) &&
240 propmin >= chosenmin &&
241 propmin <= VMBUS_ICVER_MINOR(fwver)) {
242 chosenmaj = propmaj;
243 chosenmin = propmin;
244 }
245 }
246 fwver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
247
248 chosenmaj = chosenmin = 0;
249 for (; i < msg->ic_fwver_cnt + msg->ic_msgver_cnt; i++) {
250 propmaj = VMBUS_ICVER_MAJOR(msg->ic_ver[i]);
251 propmin = VMBUS_ICVER_MINOR(msg->ic_ver[i]);
252 if (propmaj > chosenmaj &&
253 propmaj <= VMBUS_ICVER_MAJOR(msgver) &&
254 propmin >= chosenmin &&
255 propmin <= VMBUS_ICVER_MINOR(msgver)) {
256 chosenmaj = propmaj;
257 chosenmin = propmin;
258 }
259 }
260 msgver = VMBUS_IC_VERSION(chosenmaj, chosenmin);
261
262 msg->ic_fwver_cnt = 1;
263 msg->ic_ver[0] = fwver;
264 msg->ic_msgver_cnt = 1;
265 msg->ic_ver[1] = msgver;
266 hdr->ic_dsize = sizeof(*msg) + 2 * sizeof(uint32_t) -
267 sizeof(struct vmbus_icmsg_hdr);
268 if (*rlen < sizeof(*msg) + 2 * sizeof(uint32_t))
269 *rlen = sizeof(*msg) + 2 * sizeof(uint32_t);
270 }
271
272 int
hv_heartbeat_attach(struct hv_ic_dev * dv)273 hv_heartbeat_attach(struct hv_ic_dev *dv)
274 {
275 struct hv_channel *ch = dv->dv_ch;
276 struct hv_softc *sc = ch->ch_sc;
277
278 dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
279 (cold ? M_NOWAIT : M_WAITOK));
280 if (dv->dv_buf == NULL) {
281 printf("%s: failed to allocate receive buffer\n",
282 sc->sc_dev.dv_xname);
283 return (-1);
284 }
285 return (0);
286 }
287
288 void
hv_heartbeat(void * arg)289 hv_heartbeat(void *arg)
290 {
291 struct hv_ic_dev *dv = arg;
292 struct hv_channel *ch = dv->dv_ch;
293 struct hv_softc *sc = ch->ch_sc;
294 struct vmbus_icmsg_hdr *hdr;
295 struct vmbus_icmsg_heartbeat *msg;
296 uint64_t rid;
297 uint32_t rlen;
298 int rv;
299
300 rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
301 if (rv || rlen == 0) {
302 if (rv != EAGAIN)
303 DPRINTF("%s: heartbeat rv=%d rlen=%u\n",
304 sc->sc_dev.dv_xname, rv, rlen);
305 return;
306 }
307 if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
308 DPRINTF("%s: heartbeat short read rlen=%u\n",
309 sc->sc_dev.dv_xname, rlen);
310 return;
311 }
312 hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
313 switch (hdr->ic_type) {
314 case VMBUS_ICMSG_TYPE_NEGOTIATE:
315 hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
316 VMBUS_IC_VERSION(3, 0));
317 break;
318 case VMBUS_ICMSG_TYPE_HEARTBEAT:
319 msg = (struct vmbus_icmsg_heartbeat *)hdr;
320 msg->ic_seq += 1;
321 break;
322 default:
323 printf("%s: unhandled heartbeat message type %u\n",
324 sc->sc_dev.dv_xname, hdr->ic_type);
325 return;
326 }
327 hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
328 hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
329 }
330
331 static void
hv_shutdown_task(void * arg)332 hv_shutdown_task(void *arg)
333 {
334 struct hv_softc *sc = arg;
335 pvbus_shutdown(&sc->sc_dev);
336 }
337
338 int
hv_shutdown_attach(struct hv_ic_dev * dv)339 hv_shutdown_attach(struct hv_ic_dev *dv)
340 {
341 struct hv_channel *ch = dv->dv_ch;
342 struct hv_softc *sc = ch->ch_sc;
343
344 dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
345 (cold ? M_NOWAIT : M_WAITOK));
346 if (dv->dv_buf == NULL) {
347 printf("%s: failed to allocate receive buffer\n",
348 sc->sc_dev.dv_xname);
349 return (-1);
350 }
351
352 task_set(&sc->sc_sdtask, hv_shutdown_task, sc);
353
354 return (0);
355 }
356
357 void
hv_shutdown(void * arg)358 hv_shutdown(void *arg)
359 {
360 struct hv_ic_dev *dv = arg;
361 struct hv_channel *ch = dv->dv_ch;
362 struct hv_softc *sc = ch->ch_sc;
363 struct vmbus_icmsg_hdr *hdr;
364 struct vmbus_icmsg_shutdown *msg;
365 uint64_t rid;
366 uint32_t rlen;
367 int rv, shutdown = 0;
368
369 rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
370 if (rv || rlen == 0) {
371 if (rv != EAGAIN)
372 DPRINTF("%s: shutdown rv=%d rlen=%u\n",
373 sc->sc_dev.dv_xname, rv, rlen);
374 return;
375 }
376 if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
377 DPRINTF("%s: shutdown short read rlen=%u\n",
378 sc->sc_dev.dv_xname, rlen);
379 return;
380 }
381 hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
382 switch (hdr->ic_type) {
383 case VMBUS_ICMSG_TYPE_NEGOTIATE:
384 hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
385 VMBUS_IC_VERSION(3, 0));
386 break;
387 case VMBUS_ICMSG_TYPE_SHUTDOWN:
388 msg = (struct vmbus_icmsg_shutdown *)hdr;
389 if (msg->ic_haltflags == 0 || msg->ic_haltflags == 1) {
390 shutdown = 1;
391 hdr->ic_status = VMBUS_ICMSG_STATUS_OK;
392 } else
393 hdr->ic_status = VMBUS_ICMSG_STATUS_FAIL;
394 break;
395 default:
396 printf("%s: unhandled shutdown message type %u\n",
397 sc->sc_dev.dv_xname, hdr->ic_type);
398 return;
399 }
400
401 hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
402 hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
403
404 if (shutdown)
405 task_add(systq, &sc->sc_sdtask);
406 }
407
408 int
hv_timesync_attach(struct hv_ic_dev * dv)409 hv_timesync_attach(struct hv_ic_dev *dv)
410 {
411 struct hv_channel *ch = dv->dv_ch;
412 struct hv_softc *sc = ch->ch_sc;
413
414 dv->dv_buf = malloc(PAGE_SIZE, M_DEVBUF, M_ZERO |
415 (cold ? M_NOWAIT : M_WAITOK));
416 if (dv->dv_buf == NULL) {
417 printf("%s: failed to allocate receive buffer\n",
418 sc->sc_dev.dv_xname);
419 return (-1);
420 }
421
422 strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
423 sizeof(sc->sc_sensordev.xname));
424
425 sc->sc_sensor.type = SENSOR_TIMEDELTA;
426 sc->sc_sensor.status = SENSOR_S_UNKNOWN;
427
428 sensor_attach(&sc->sc_sensordev, &sc->sc_sensor);
429 sensordev_install(&sc->sc_sensordev);
430
431 return (0);
432 }
433
434 void
hv_timesync(void * arg)435 hv_timesync(void *arg)
436 {
437 struct hv_ic_dev *dv = arg;
438 struct hv_channel *ch = dv->dv_ch;
439 struct hv_softc *sc = ch->ch_sc;
440 struct vmbus_icmsg_hdr *hdr;
441 struct vmbus_icmsg_timesync *msg;
442 struct timespec guest, host, diff;
443 uint64_t tns;
444 uint64_t rid;
445 uint32_t rlen;
446 int rv;
447
448 rv = hv_channel_recv(ch, dv->dv_buf, PAGE_SIZE, &rlen, &rid, 0);
449 if (rv || rlen == 0) {
450 if (rv != EAGAIN)
451 DPRINTF("%s: timesync rv=%d rlen=%u\n",
452 sc->sc_dev.dv_xname, rv, rlen);
453 return;
454 }
455 if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
456 DPRINTF("%s: timesync short read rlen=%u\n",
457 sc->sc_dev.dv_xname, rlen);
458 return;
459 }
460 hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
461 switch (hdr->ic_type) {
462 case VMBUS_ICMSG_TYPE_NEGOTIATE:
463 hv_ic_negotiate(hdr, &rlen, VMBUS_IC_VERSION(3, 0),
464 VMBUS_IC_VERSION(3, 0));
465 break;
466 case VMBUS_ICMSG_TYPE_TIMESYNC:
467 msg = (struct vmbus_icmsg_timesync *)hdr;
468 if (msg->ic_tsflags == VMBUS_ICMSG_TS_FLAG_SAMPLE) {
469 microtime(&sc->sc_sensor.tv);
470 nanotime(&guest);
471 tns = (msg->ic_hvtime - 116444736000000000LL) * 100;
472 host.tv_sec = tns / 1000000000LL;
473 host.tv_nsec = tns % 1000000000LL;
474 timespecsub(&guest, &host, &diff);
475 sc->sc_sensor.value = (int64_t)diff.tv_sec *
476 1000000000LL + diff.tv_nsec;
477 sc->sc_sensor.status = SENSOR_S_OK;
478 }
479 break;
480 default:
481 printf("%s: unhandled timesync message type %u\n",
482 sc->sc_dev.dv_xname, hdr->ic_type);
483 return;
484 }
485
486 hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION | VMBUS_ICMSG_FLAG_RESPONSE;
487 hv_channel_send(ch, dv->dv_buf, rlen, rid, VMBUS_CHANPKT_TYPE_INBAND, 0);
488 }
489
490 static inline int
copyout_utf16le(void * dst,const void * src,size_t dlen,size_t slen)491 copyout_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
492 {
493 const uint8_t *sp = src;
494 uint8_t *dp = dst;
495 int i, j;
496
497 KASSERT(dlen >= slen * 2);
498
499 for (i = j = 0; i < slen; i++, j += 2) {
500 dp[j] = sp[i];
501 dp[j + 1] = '\0';
502 }
503 return (j);
504 }
505
506 static inline int
copyin_utf16le(void * dst,const void * src,size_t dlen,size_t slen)507 copyin_utf16le(void *dst, const void *src, size_t dlen, size_t slen)
508 {
509 const uint8_t *sp = src;
510 uint8_t *dp = dst;
511 int i, j;
512
513 KASSERT(dlen >= slen / 2);
514
515 for (i = j = 0; i < slen; i += 2, j++)
516 dp[j] = sp[i];
517 return (j);
518 }
519
520 static inline int
keycmp_utf16le(const uint8_t * key,const uint8_t * ukey,size_t ukeylen)521 keycmp_utf16le(const uint8_t *key, const uint8_t *ukey, size_t ukeylen)
522 {
523 int i, j;
524
525 for (i = j = 0; i < ukeylen; i += 2, j++) {
526 if (key[j] != ukey[i])
527 return (key[j] > ukey[i] ?
528 key[j] - ukey[i] :
529 ukey[i] - key[j]);
530 }
531 return (0);
532 }
533
534 static void
kvp_pool_init(struct kvp_pool * kvpl)535 kvp_pool_init(struct kvp_pool *kvpl)
536 {
537 TAILQ_INIT(&kvpl->kvp_entries);
538 mtx_init(&kvpl->kvp_lock, IPL_NET);
539 kvpl->kvp_index = 0;
540 }
541
542 static int
kvp_pool_insert(struct kvp_pool * kvpl,const char * key,const char * val,uint32_t vallen,uint32_t valtype)543 kvp_pool_insert(struct kvp_pool *kvpl, const char *key, const char *val,
544 uint32_t vallen, uint32_t valtype)
545 {
546 struct kvp_entry *kpe;
547 int keylen = strlen(key);
548
549 if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
550 return (ERANGE);
551
552 mtx_enter(&kvpl->kvp_lock);
553
554 TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
555 if (strcmp(kpe->kpe_key, key) == 0) {
556 mtx_leave(&kvpl->kvp_lock);
557 return (EEXIST);
558 }
559 }
560
561 kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
562 if (kpe == NULL) {
563 mtx_leave(&kvpl->kvp_lock);
564 return (ENOMEM);
565 }
566
567 strlcpy(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2);
568
569 if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
570 strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
571 else
572 memcpy(kpe->kpe_val, val, vallen);
573
574 kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
575
576 TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
577
578 mtx_leave(&kvpl->kvp_lock);
579
580 return (0);
581 }
582
583 static int
kvp_pool_update(struct kvp_pool * kvpl,const char * key,const char * val,uint32_t vallen,uint32_t valtype)584 kvp_pool_update(struct kvp_pool *kvpl, const char *key, const char *val,
585 uint32_t vallen, uint32_t valtype)
586 {
587 struct kvp_entry *kpe;
588 int keylen = strlen(key);
589
590 if (keylen > HV_KVP_MAX_KEY_SIZE / 2)
591 return (ERANGE);
592
593 mtx_enter(&kvpl->kvp_lock);
594
595 TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
596 if (strcmp(kpe->kpe_key, key) == 0)
597 break;
598 }
599 if (kpe == NULL) {
600 mtx_leave(&kvpl->kvp_lock);
601 return (ENOENT);
602 }
603
604 if ((kpe->kpe_valtype = valtype) == HV_KVP_REG_SZ)
605 strlcpy(kpe->kpe_val, val, HV_KVP_MAX_KEY_SIZE / 2);
606 else
607 memcpy(kpe->kpe_val, val, vallen);
608
609 mtx_leave(&kvpl->kvp_lock);
610
611 return (0);
612 }
613
614 static int
kvp_pool_import(struct kvp_pool * kvpl,const char * key,uint32_t keylen,const char * val,uint32_t vallen,uint32_t valtype)615 kvp_pool_import(struct kvp_pool *kvpl, const char *key, uint32_t keylen,
616 const char *val, uint32_t vallen, uint32_t valtype)
617 {
618 struct kvp_entry *kpe;
619
620 if (keylen > HV_KVP_MAX_KEY_SIZE ||
621 vallen > HV_KVP_MAX_VAL_SIZE)
622 return (ERANGE);
623
624 mtx_enter(&kvpl->kvp_lock);
625
626 TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
627 if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
628 break;
629 }
630 if (kpe == NULL) {
631 kpe = pool_get(&kvp_entry_pool, PR_ZERO | PR_NOWAIT);
632 if (kpe == NULL) {
633 mtx_leave(&kvpl->kvp_lock);
634 return (ENOMEM);
635 }
636
637 copyin_utf16le(kpe->kpe_key, key, HV_KVP_MAX_KEY_SIZE / 2,
638 keylen);
639
640 kpe->kpe_index = kvpl->kvp_index++ & MAXPOOLENTS;
641
642 TAILQ_INSERT_TAIL(&kvpl->kvp_entries, kpe, kpe_entry);
643 }
644
645 copyin_utf16le(kpe->kpe_val, val, HV_KVP_MAX_VAL_SIZE / 2, vallen);
646 kpe->kpe_valtype = valtype;
647
648 mtx_leave(&kvpl->kvp_lock);
649
650 return (0);
651 }
652
653 static int
kvp_pool_export(struct kvp_pool * kvpl,uint32_t index,char * key,uint32_t * keylen,char * val,uint32_t * vallen,uint32_t * valtype)654 kvp_pool_export(struct kvp_pool *kvpl, uint32_t index, char *key,
655 uint32_t *keylen, char *val, uint32_t *vallen, uint32_t *valtype)
656 {
657 struct kvp_entry *kpe;
658
659 mtx_enter(&kvpl->kvp_lock);
660
661 TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
662 if (kpe->kpe_index == index)
663 break;
664 }
665 if (kpe == NULL) {
666 mtx_leave(&kvpl->kvp_lock);
667 return (ENOENT);
668 }
669
670 *keylen = copyout_utf16le(key, kpe->kpe_key, HV_KVP_MAX_KEY_SIZE,
671 strlen(kpe->kpe_key) + 1);
672 *vallen = copyout_utf16le(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE,
673 strlen(kpe->kpe_val) + 1);
674 *valtype = kpe->kpe_valtype;
675
676 mtx_leave(&kvpl->kvp_lock);
677
678 return (0);
679 }
680
681 static int
kvp_pool_remove(struct kvp_pool * kvpl,const char * key,uint32_t keylen)682 kvp_pool_remove(struct kvp_pool *kvpl, const char *key, uint32_t keylen)
683 {
684 struct kvp_entry *kpe;
685
686 mtx_enter(&kvpl->kvp_lock);
687
688 TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
689 if (keycmp_utf16le(kpe->kpe_key, key, keylen) == 0)
690 break;
691 }
692 if (kpe == NULL) {
693 mtx_leave(&kvpl->kvp_lock);
694 return (ENOENT);
695 }
696
697 TAILQ_REMOVE(&kvpl->kvp_entries, kpe, kpe_entry);
698
699 mtx_leave(&kvpl->kvp_lock);
700
701 pool_put(&kvp_entry_pool, kpe);
702
703 return (0);
704 }
705
706 static int
kvp_pool_extract(struct kvp_pool * kvpl,const char * key,char * val,uint32_t vallen)707 kvp_pool_extract(struct kvp_pool *kvpl, const char *key, char *val,
708 uint32_t vallen)
709 {
710 struct kvp_entry *kpe;
711
712 if (vallen < HV_KVP_MAX_VAL_SIZE / 2)
713 return (ERANGE);
714
715 mtx_enter(&kvpl->kvp_lock);
716
717 TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
718 if (strcmp(kpe->kpe_key, key) == 0)
719 break;
720 }
721 if (kpe == NULL) {
722 mtx_leave(&kvpl->kvp_lock);
723 return (ENOENT);
724 }
725
726 switch (kpe->kpe_valtype) {
727 case HV_KVP_REG_SZ:
728 strlcpy(val, kpe->kpe_val, HV_KVP_MAX_VAL_SIZE / 2);
729 break;
730 case HV_KVP_REG_U32:
731 snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%u",
732 *(uint32_t *)kpe->kpe_val);
733 break;
734 case HV_KVP_REG_U64:
735 snprintf(val, HV_KVP_MAX_VAL_SIZE / 2, "%llu",
736 *(uint64_t *)kpe->kpe_val);
737 break;
738 }
739
740 mtx_leave(&kvpl->kvp_lock);
741
742 return (0);
743 }
744
745 static int
kvp_pool_keys(struct kvp_pool * kvpl,int next,char * key,size_t * keylen)746 kvp_pool_keys(struct kvp_pool *kvpl, int next, char *key, size_t *keylen)
747 {
748 struct kvp_entry *kpe;
749 int iter = 0;
750
751 mtx_enter(&kvpl->kvp_lock);
752
753 TAILQ_FOREACH(kpe, &kvpl->kvp_entries, kpe_entry) {
754 if (iter++ < next)
755 continue;
756 *keylen = strlen(kpe->kpe_key) + 1;
757 strlcpy(key, kpe->kpe_key, *keylen);
758
759 mtx_leave(&kvpl->kvp_lock);
760
761 return (0);
762 }
763
764 mtx_leave(&kvpl->kvp_lock);
765
766 return (-1);
767 }
768
769 int
hv_kvp_attach(struct hv_ic_dev * dv)770 hv_kvp_attach(struct hv_ic_dev *dv)
771 {
772 struct hv_channel *ch = dv->dv_ch;
773 struct hv_softc *sc = ch->ch_sc;
774 struct hv_kvp *kvp;
775 int i;
776
777 dv->dv_buf = malloc(2 * PAGE_SIZE, M_DEVBUF, M_ZERO |
778 (cold ? M_NOWAIT : M_WAITOK));
779 if (dv->dv_buf == NULL) {
780 printf("%s: failed to allocate receive buffer\n",
781 sc->sc_dev.dv_xname);
782 return (-1);
783 }
784
785 dv->dv_priv = malloc(sizeof(struct hv_kvp), M_DEVBUF, M_ZERO |
786 (cold ? M_NOWAIT : M_WAITOK));
787 if (dv->dv_priv == NULL) {
788 free(dv->dv_buf, M_DEVBUF, 2 * PAGE_SIZE);
789 printf("%s: failed to allocate KVP private data\n",
790 sc->sc_dev.dv_xname);
791 return (-1);
792 }
793 kvp = dv->dv_priv;
794
795 pool_init(&kvp_entry_pool, sizeof(struct kvp_entry), 0, IPL_NET, 0,
796 "hvkvpl", NULL);
797
798 for (i = 0; i < NKVPPOOLS; i++)
799 kvp_pool_init(&kvp->kvp_pool[i]);
800
801 /* Initialize 'Auto' pool */
802 for (i = 0; i < nitems(kvp_pool_auto); i++) {
803 if (kvp_pool_insert(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
804 kvp_pool_auto[i].keyname, kvp_pool_auto[i].value,
805 strlen(kvp_pool_auto[i].value), HV_KVP_REG_SZ))
806 DPRINTF("%s: failed to insert into 'Auto' pool\n",
807 sc->sc_dev.dv_xname);
808 }
809
810 sc->sc_pvbus->hv_kvop = hv_kvop;
811 sc->sc_pvbus->hv_arg = dv;
812
813 return (0);
814 }
815
816 static int
nibble(int ch)817 nibble(int ch)
818 {
819 if (ch >= '0' && ch <= '9')
820 return (ch - '0');
821 if (ch >= 'A' && ch <= 'F')
822 return (10 + ch - 'A');
823 if (ch >= 'a' && ch <= 'f')
824 return (10 + ch - 'a');
825 return (-1);
826 }
827
828 static int
kvp_get_ip_info(struct hv_kvp * kvp,const uint8_t * mac,uint8_t * family,uint8_t * addr,uint8_t * netmask,size_t addrlen)829 kvp_get_ip_info(struct hv_kvp *kvp, const uint8_t *mac, uint8_t *family,
830 uint8_t *addr, uint8_t *netmask, size_t addrlen)
831 {
832 struct ifnet *ifp;
833 struct ifaddr *ifa, *ifa6, *ifa6ll;
834 struct sockaddr_in *sin;
835 struct sockaddr_in6 *sin6, sa6;
836 uint8_t enaddr[ETHER_ADDR_LEN];
837 uint8_t ipaddr[INET6_ADDRSTRLEN];
838 int i, j, lo, hi, s, af;
839
840 /* Convert from the UTF-16LE string format to binary */
841 for (i = 0, j = 0; j < ETHER_ADDR_LEN; i += 6) {
842 if ((hi = nibble(mac[i])) == -1 ||
843 (lo = nibble(mac[i+2])) == -1)
844 return (-1);
845 enaddr[j++] = hi << 4 | lo;
846 }
847
848 switch (*family) {
849 case ADDR_FAMILY_NONE:
850 af = AF_UNSPEC;
851 break;
852 case ADDR_FAMILY_IPV4:
853 af = AF_INET;
854 break;
855 case ADDR_FAMILY_IPV6:
856 af = AF_INET6;
857 break;
858 default:
859 return (-1);
860 }
861
862 KERNEL_LOCK();
863 s = splnet();
864
865 TAILQ_FOREACH(ifp, &ifnetlist, if_list) {
866 if (!memcmp(LLADDR(ifp->if_sadl), enaddr, ETHER_ADDR_LEN))
867 break;
868 }
869 if (ifp == NULL) {
870 splx(s);
871 KERNEL_UNLOCK();
872 return (-1);
873 }
874
875 ifa6 = ifa6ll = NULL;
876
877 /* Try to find a best matching address, preferring IPv4 */
878 TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) {
879 /*
880 * First IPv4 address is always a best match unless
881 * we were asked for an IPv6 address.
882 */
883 if ((af == AF_INET || af == AF_UNSPEC) &&
884 (ifa->ifa_addr->sa_family == AF_INET)) {
885 af = AF_INET;
886 goto found;
887 }
888 if ((af == AF_INET6 || af == AF_UNSPEC) &&
889 (ifa->ifa_addr->sa_family == AF_INET6)) {
890 if (!IN6_IS_ADDR_LINKLOCAL(
891 &satosin6(ifa->ifa_addr)->sin6_addr)) {
892 /* Done if we're looking for an IPv6 address */
893 if (af == AF_INET6)
894 goto found;
895 /* Stick to the first one */
896 if (ifa6 == NULL)
897 ifa6 = ifa;
898 } else /* Pick the last one */
899 ifa6ll = ifa;
900 }
901 }
902 /* If we haven't found any IPv4 or IPv6 direct matches... */
903 if (ifa == NULL) {
904 /* ... try the last global IPv6 address... */
905 if (ifa6 != NULL)
906 ifa = ifa6;
907 /* ... or the last link-local... */
908 else if (ifa6ll != NULL)
909 ifa = ifa6ll;
910 else {
911 splx(s);
912 KERNEL_UNLOCK();
913 return (-1);
914 }
915 }
916 found:
917 switch (af) {
918 case AF_INET:
919 sin = satosin(ifa->ifa_addr);
920 inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
921 copyout_utf16le(addr, ipaddr, addrlen, INET_ADDRSTRLEN);
922
923 sin = satosin(ifa->ifa_netmask);
924 inet_ntop(AF_INET, &sin->sin_addr, ipaddr, sizeof(ipaddr));
925 copyout_utf16le(netmask, ipaddr, addrlen, INET_ADDRSTRLEN);
926
927 *family = ADDR_FAMILY_IPV4;
928 break;
929 case AF_UNSPEC:
930 case AF_INET6:
931 sin6 = satosin6(ifa->ifa_addr);
932 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
933 sa6 = *satosin6(ifa->ifa_addr);
934 sa6.sin6_addr.s6_addr16[1] = 0;
935 sin6 = &sa6;
936 }
937 inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
938 copyout_utf16le(addr, ipaddr, addrlen, INET6_ADDRSTRLEN);
939
940 sin6 = satosin6(ifa->ifa_netmask);
941 inet_ntop(AF_INET6, &sin6->sin6_addr, ipaddr, sizeof(ipaddr));
942 copyout_utf16le(netmask, ipaddr, addrlen, INET6_ADDRSTRLEN);
943
944 *family = ADDR_FAMILY_IPV6;
945 break;
946 }
947
948 splx(s);
949 KERNEL_UNLOCK();
950
951 return (0);
952 }
953
954 static void
hv_kvp_process(struct hv_kvp * kvp,struct vmbus_icmsg_kvp * msg)955 hv_kvp_process(struct hv_kvp *kvp, struct vmbus_icmsg_kvp *msg)
956 {
957 union hv_kvp_hdr *kvh = &msg->ic_kvh;
958 union hv_kvp_msg *kvm = &msg->ic_kvm;
959
960 switch (kvh->kvh_op) {
961 case HV_KVP_OP_SET:
962 if (kvh->kvh_pool == HV_KVP_POOL_AUTO_EXTERNAL &&
963 kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_AUTO_EXTERNAL],
964 kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
965 kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
966 kvm->kvm_val.kvm_valtype)) {
967 DPRINTF("%s: failed to import into 'Guest/Parameters'"
968 " pool\n", __func__);
969 kvh->kvh_err = HV_KVP_S_CONT;
970 } else if (kvh->kvh_pool == HV_KVP_POOL_EXTERNAL &&
971 kvp_pool_import(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
972 kvm->kvm_val.kvm_key, kvm->kvm_val.kvm_keylen,
973 kvm->kvm_val.kvm_val, kvm->kvm_val.kvm_vallen,
974 kvm->kvm_val.kvm_valtype)) {
975 DPRINTF("%s: failed to import into 'External' pool\n",
976 __func__);
977 kvh->kvh_err = HV_KVP_S_CONT;
978 } else if (kvh->kvh_pool != HV_KVP_POOL_AUTO_EXTERNAL &&
979 kvh->kvh_pool != HV_KVP_POOL_EXTERNAL) {
980 kvh->kvh_err = HV_KVP_S_CONT;
981 } else
982 kvh->kvh_err = HV_KVP_S_OK;
983 break;
984 case HV_KVP_OP_DELETE:
985 if (kvh->kvh_pool != HV_KVP_POOL_EXTERNAL ||
986 kvp_pool_remove(&kvp->kvp_pool[HV_KVP_POOL_EXTERNAL],
987 kvm->kvm_del.kvm_key, kvm->kvm_del.kvm_keylen)) {
988 DPRINTF("%s: failed to remove from 'External' pool\n",
989 __func__);
990 kvh->kvh_err = HV_KVP_S_CONT;
991 } else
992 kvh->kvh_err = HV_KVP_S_OK;
993 break;
994 case HV_KVP_OP_ENUMERATE:
995 if (kvh->kvh_pool == HV_KVP_POOL_AUTO &&
996 kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_AUTO],
997 kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
998 &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
999 &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1000 kvh->kvh_err = HV_KVP_S_CONT;
1001 else if (kvh->kvh_pool == HV_KVP_POOL_GUEST &&
1002 kvp_pool_export(&kvp->kvp_pool[HV_KVP_POOL_GUEST],
1003 kvm->kvm_enum.kvm_index, kvm->kvm_enum.kvm_key,
1004 &kvm->kvm_enum.kvm_keylen, kvm->kvm_enum.kvm_val,
1005 &kvm->kvm_enum.kvm_vallen, &kvm->kvm_enum.kvm_valtype))
1006 kvh->kvh_err = HV_KVP_S_CONT;
1007 else
1008 kvh->kvh_err = HV_KVP_S_OK;
1009 break;
1010 case HV_KVP_OP_GET_IP_INFO:
1011 if (VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver) <= 4) {
1012 struct vmbus_icmsg_kvp_addr *amsg;
1013 struct hv_kvp_msg_addr *kva;
1014
1015 amsg = (struct vmbus_icmsg_kvp_addr *)msg;
1016 kva = &amsg->ic_kvm;
1017
1018 if (kvp_get_ip_info(kvp, kva->kvm_mac,
1019 &kva->kvm_family, kva->kvm_addr,
1020 kva->kvm_netmask, sizeof(kva->kvm_addr)))
1021 kvh->kvh_err = HV_KVP_S_CONT;
1022 else
1023 kvh->kvh_err = HV_KVP_S_OK;
1024 } else {
1025 DPRINTF("KVP GET_IP_INFO fw %u.%u msg %u.%u dsize=%u\n",
1026 VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_fwver),
1027 VMBUS_ICVER_MINOR(msg->ic_hdr.ic_fwver),
1028 VMBUS_ICVER_MAJOR(msg->ic_hdr.ic_msgver),
1029 VMBUS_ICVER_MINOR(msg->ic_hdr.ic_msgver),
1030 msg->ic_hdr.ic_dsize);
1031 kvh->kvh_err = HV_KVP_S_CONT;
1032 }
1033 break;
1034 default:
1035 DPRINTF("KVP message op %u pool %u\n", kvh->kvh_op,
1036 kvh->kvh_pool);
1037 kvh->kvh_err = HV_KVP_S_CONT;
1038 }
1039 }
1040
1041 void
hv_kvp(void * arg)1042 hv_kvp(void *arg)
1043 {
1044 struct hv_ic_dev *dv = arg;
1045 struct hv_channel *ch = dv->dv_ch;
1046 struct hv_softc *sc = ch->ch_sc;
1047 struct hv_kvp *kvp = dv->dv_priv;
1048 struct vmbus_icmsg_hdr *hdr;
1049 uint64_t rid;
1050 uint32_t fwver, msgver, rlen;
1051 int rv;
1052
1053 for (;;) {
1054 rv = hv_channel_recv(ch, dv->dv_buf, 2 * PAGE_SIZE,
1055 &rlen, &rid, 0);
1056 if (rv || rlen == 0) {
1057 if (rv != EAGAIN)
1058 DPRINTF("%s: kvp rv=%d rlen=%u\n",
1059 sc->sc_dev.dv_xname, rv, rlen);
1060 return;
1061 }
1062 if (rlen < sizeof(struct vmbus_icmsg_hdr)) {
1063 DPRINTF("%s: kvp short read rlen=%u\n",
1064 sc->sc_dev.dv_xname, rlen);
1065 return;
1066 }
1067 hdr = (struct vmbus_icmsg_hdr *)dv->dv_buf;
1068 switch (hdr->ic_type) {
1069 case VMBUS_ICMSG_TYPE_NEGOTIATE:
1070 switch (sc->sc_proto) {
1071 case VMBUS_VERSION_WS2008:
1072 fwver = VMBUS_IC_VERSION(1, 0);
1073 msgver = VMBUS_IC_VERSION(1, 0);
1074 break;
1075 case VMBUS_VERSION_WIN7:
1076 fwver = VMBUS_IC_VERSION(3, 0);
1077 msgver = VMBUS_IC_VERSION(3, 0);
1078 break;
1079 default:
1080 fwver = VMBUS_IC_VERSION(3, 0);
1081 msgver = VMBUS_IC_VERSION(4, 0);
1082 }
1083 hv_ic_negotiate(hdr, &rlen, fwver, msgver);
1084 break;
1085 case VMBUS_ICMSG_TYPE_KVP:
1086 if (hdr->ic_dsize >= sizeof(union hv_kvp_hdr))
1087 hv_kvp_process(kvp,
1088 (struct vmbus_icmsg_kvp *)hdr);
1089 else
1090 printf("%s: message too short: %u\n",
1091 sc->sc_dev.dv_xname, hdr->ic_dsize);
1092 break;
1093 default:
1094 printf("%s: unhandled kvp message type %u\n",
1095 sc->sc_dev.dv_xname, hdr->ic_type);
1096 continue;
1097 }
1098 hdr->ic_flags = VMBUS_ICMSG_FLAG_TRANSACTION |
1099 VMBUS_ICMSG_FLAG_RESPONSE;
1100 hv_channel_send(ch, dv->dv_buf, rlen, rid,
1101 VMBUS_CHANPKT_TYPE_INBAND, 0);
1102 }
1103 }
1104
1105 static int
kvp_poolname(char ** key)1106 kvp_poolname(char **key)
1107 {
1108 char *p;
1109 int i, rv = -1;
1110
1111 if ((p = strrchr(*key, '/')) == NULL)
1112 return (rv);
1113 *p = '\0';
1114 for (i = 0; i < nitems(kvp_pools); i++) {
1115 if (strncasecmp(*key, kvp_pools[i].poolname,
1116 kvp_pools[i].poolnamelen) == 0) {
1117 rv = kvp_pools[i].poolidx;
1118 break;
1119 }
1120 }
1121 if (rv >= 0)
1122 *key = ++p;
1123 return (rv);
1124 }
1125
1126 int
hv_kvop(void * arg,int op,char * key,char * val,size_t vallen)1127 hv_kvop(void *arg, int op, char *key, char *val, size_t vallen)
1128 {
1129 struct hv_ic_dev *dv = arg;
1130 struct hv_kvp *kvp = dv->dv_priv;
1131 struct kvp_pool *kvpl;
1132 int next, pool, error = 0;
1133 char *vp = val;
1134 size_t keylen;
1135
1136 pool = kvp_poolname(&key);
1137 if (pool == -1)
1138 return (EINVAL);
1139
1140 kvpl = &kvp->kvp_pool[pool];
1141 if (strlen(key) == 0) {
1142 for (next = 0; next < MAXPOOLENTS; next++) {
1143 if (val + vallen < vp + HV_KVP_MAX_KEY_SIZE / 2)
1144 return (ERANGE);
1145 if (kvp_pool_keys(kvpl, next, vp, &keylen))
1146 goto out;
1147 if (strlcat(val, "\n", vallen) >= vallen)
1148 return (ERANGE);
1149 vp += keylen;
1150 }
1151 out:
1152 if (vp > val)
1153 *(vp - 1) = '\0';
1154 return (0);
1155 }
1156
1157 if (op == PVBUS_KVWRITE) {
1158 if (pool == HV_KVP_POOL_AUTO)
1159 error = kvp_pool_update(kvpl, key, val, vallen,
1160 HV_KVP_REG_SZ);
1161 else if (pool == HV_KVP_POOL_GUEST)
1162 error = kvp_pool_insert(kvpl, key, val, vallen,
1163 HV_KVP_REG_SZ);
1164 else
1165 error = EINVAL;
1166 } else
1167 error = kvp_pool_extract(kvpl, key, val, vallen);
1168
1169 return (error);
1170 }
1171