1 /* $NetBSD: vmbus.c,v 1.18 2022/05/20 13:55:17 nonaka Exp $ */
2 /* $OpenBSD: hyperv.c,v 1.43 2017/06/27 13:56:15 mikeb Exp $ */
3
4 /*-
5 * Copyright (c) 2009-2012 Microsoft Corp.
6 * Copyright (c) 2012 NetApp Inc.
7 * Copyright (c) 2012 Citrix Inc.
8 * Copyright (c) 2016 Mike Belopuhov <mike@esdenera.com>
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * The OpenBSD port was done under funding by Esdenera Networks GmbH.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: vmbus.c,v 1.18 2022/05/20 13:55:17 nonaka Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/device.h>
43 #include <sys/atomic.h>
44 #include <sys/bitops.h>
45 #include <sys/bus.h>
46 #include <sys/cpu.h>
47 #include <sys/intr.h>
48 #include <sys/kmem.h>
49 #include <sys/kthread.h>
50 #include <sys/module.h>
51 #include <sys/mutex.h>
52 #include <sys/xcall.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #include <dev/hyperv/vmbusvar.h>
57
58 #define VMBUS_GPADL_START 0xffff /* 0x10000 effectively */
59
60 /* Command submission flags */
61 #define HCF_SLEEPOK 0x0000
62 #define HCF_NOSLEEP 0x0002 /* M_NOWAIT */
63 #define HCF_NOREPLY 0x0004
64
65 static void vmbus_attach_deferred(device_t);
66 static int vmbus_attach_print(void *, const char *);
67 static int vmbus_alloc_dma(struct vmbus_softc *);
68 static void vmbus_free_dma(struct vmbus_softc *);
69 static int vmbus_init_interrupts(struct vmbus_softc *);
70 static void vmbus_deinit_interrupts(struct vmbus_softc *);
71 static void vmbus_init_interrupts_pcpu(void *, void *);
72 static void vmbus_deinit_interrupts_pcpu(void *, void *);
73
74 static int vmbus_connect(struct vmbus_softc *);
75 static int vmbus_cmd(struct vmbus_softc *, void *, size_t, void *, size_t,
76 int);
77 static int vmbus_start(struct vmbus_softc *, struct vmbus_msg *, paddr_t);
78 static int vmbus_reply(struct vmbus_softc *, struct vmbus_msg *);
79 static uint16_t vmbus_intr_signal(struct vmbus_softc *, paddr_t);
80 static void vmbus_event_proc(void *, struct cpu_info *);
81 static void vmbus_event_proc_compat(void *, struct cpu_info *);
82 static void vmbus_message_proc(void *, struct cpu_info *);
83 static void vmbus_message_softintr(void *);
84 static void vmbus_channel_response(struct vmbus_softc *,
85 struct vmbus_chanmsg_hdr *);
86 static void vmbus_channel_offer(struct vmbus_softc *,
87 struct vmbus_chanmsg_hdr *);
88 static void vmbus_channel_rescind(struct vmbus_softc *,
89 struct vmbus_chanmsg_hdr *);
90 static void vmbus_channel_delivered(struct vmbus_softc *,
91 struct vmbus_chanmsg_hdr *);
92 static int vmbus_channel_scan(struct vmbus_softc *);
93 static void vmbus_channel_cpu_default(struct vmbus_channel *);
94 static void vmbus_process_offer(struct vmbus_softc *,
95 struct vmbus_chanmsg_choffer *);
96 static void vmbus_process_rescind(struct vmbus_softc *,
97 struct vmbus_chanmsg_chrescind *);
98 static struct vmbus_channel *
99 vmbus_channel_lookup(struct vmbus_softc *, uint32_t);
100 static int vmbus_channel_ring_create(struct vmbus_channel *, uint32_t);
101 static void vmbus_channel_ring_destroy(struct vmbus_channel *);
102 static void vmbus_channel_detach(struct vmbus_channel *);
103 static void vmbus_chevq_enqueue(struct vmbus_softc *, int, void *);
104 static void vmbus_process_chevq(void *);
105 static void vmbus_chevq_thread(void *);
106 static void vmbus_devq_enqueue(struct vmbus_softc *, int,
107 struct vmbus_channel *);
108 static void vmbus_process_devq(void *);
109 static void vmbus_devq_thread(void *);
110 static void vmbus_subchannel_devq_thread(void *);
111
112 static struct vmbus_softc *vmbus_sc;
113
114 static const struct {
115 int hmd_response;
116 int hmd_request;
117 void (*hmd_handler)(struct vmbus_softc *,
118 struct vmbus_chanmsg_hdr *);
119 } vmbus_msg_dispatch[] = {
120 { 0, 0, NULL },
121 { VMBUS_CHANMSG_CHOFFER, 0, vmbus_channel_offer },
122 { VMBUS_CHANMSG_CHRESCIND, 0, vmbus_channel_rescind },
123 { VMBUS_CHANMSG_CHREQUEST, VMBUS_CHANMSG_CHOFFER, NULL },
124 { VMBUS_CHANMSG_CHOFFER_DONE, 0, vmbus_channel_delivered },
125 { VMBUS_CHANMSG_CHOPEN, 0, NULL },
126 { VMBUS_CHANMSG_CHOPEN_RESP, VMBUS_CHANMSG_CHOPEN,
127 vmbus_channel_response },
128 { VMBUS_CHANMSG_CHCLOSE, 0, NULL },
129 { VMBUS_CHANMSG_GPADL_CONN, 0, NULL },
130 { VMBUS_CHANMSG_GPADL_SUBCONN, 0, NULL },
131 { VMBUS_CHANMSG_GPADL_CONNRESP, VMBUS_CHANMSG_GPADL_CONN,
132 vmbus_channel_response },
133 { VMBUS_CHANMSG_GPADL_DISCONN, 0, NULL },
134 { VMBUS_CHANMSG_GPADL_DISCONNRESP, VMBUS_CHANMSG_GPADL_DISCONN,
135 vmbus_channel_response },
136 { VMBUS_CHANMSG_CHFREE, 0, NULL },
137 { VMBUS_CHANMSG_CONNECT, 0, NULL },
138 { VMBUS_CHANMSG_CONNECT_RESP, VMBUS_CHANMSG_CONNECT,
139 vmbus_channel_response },
140 { VMBUS_CHANMSG_DISCONNECT, 0, NULL },
141 };
142
143 const struct hyperv_guid hyperv_guid_network = {
144 { 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46,
145 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e }
146 };
147
148 const struct hyperv_guid hyperv_guid_ide = {
149 { 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
150 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 }
151 };
152
153 const struct hyperv_guid hyperv_guid_scsi = {
154 { 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
155 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f }
156 };
157
158 const struct hyperv_guid hyperv_guid_shutdown = {
159 { 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49,
160 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb }
161 };
162
163 const struct hyperv_guid hyperv_guid_timesync = {
164 { 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
165 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf }
166 };
167
168 const struct hyperv_guid hyperv_guid_heartbeat = {
169 { 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
170 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d }
171 };
172
173 const struct hyperv_guid hyperv_guid_kvp = {
174 { 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
175 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6 }
176 };
177
178 const struct hyperv_guid hyperv_guid_vss = {
179 { 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42,
180 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 }
181 };
182
183 const struct hyperv_guid hyperv_guid_dynmem = {
184 { 0xdc, 0x74, 0x50, 0x52, 0x85, 0x89, 0xe2, 0x46,
185 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 }
186 };
187
188 const struct hyperv_guid hyperv_guid_mouse = {
189 { 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c,
190 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a }
191 };
192
193 const struct hyperv_guid hyperv_guid_kbd = {
194 { 0x6d, 0xad, 0x12, 0xf9, 0x17, 0x2b, 0xea, 0x48,
195 0xbd, 0x65, 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84 }
196 };
197
198 const struct hyperv_guid hyperv_guid_video = {
199 { 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a,
200 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 }
201 };
202
203 const struct hyperv_guid hyperv_guid_fc = {
204 { 0x4a, 0xcc, 0x9b, 0x2f, 0x69, 0x00, 0xf3, 0x4a,
205 0xb7, 0x6b, 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda }
206 };
207
208 const struct hyperv_guid hyperv_guid_fcopy = {
209 { 0xe3, 0x4b, 0xd1, 0x34, 0xe4, 0xde, 0xc8, 0x41,
210 0x9a, 0xe7, 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92 }
211 };
212
213 const struct hyperv_guid hyperv_guid_pcie = {
214 { 0x1d, 0xf6, 0xc4, 0x44, 0x44, 0x44, 0x00, 0x44,
215 0x9d, 0x52, 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f }
216 };
217
218 const struct hyperv_guid hyperv_guid_netdir = {
219 { 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b,
220 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 }
221 };
222
223 const struct hyperv_guid hyperv_guid_rdesktop = {
224 { 0xf4, 0xac, 0x6a, 0x27, 0x15, 0xac, 0x6c, 0x42,
225 0x98, 0xdd, 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe }
226 };
227
228 /* Automatic Virtual Machine Activation (AVMA) Services */
229 const struct hyperv_guid hyperv_guid_avma1 = {
230 { 0x55, 0xb2, 0x87, 0x44, 0x8c, 0xb8, 0x3f, 0x40,
231 0xbb, 0x51, 0xd1, 0xf6, 0x9c, 0xf1, 0x7f, 0x87 }
232 };
233
234 const struct hyperv_guid hyperv_guid_avma2 = {
235 { 0xf4, 0xba, 0x75, 0x33, 0x15, 0x9e, 0x30, 0x4b,
236 0xb7, 0x65, 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b }
237 };
238
239 const struct hyperv_guid hyperv_guid_avma3 = {
240 { 0xa0, 0x1f, 0x22, 0x99, 0xad, 0x24, 0xe2, 0x11,
241 0xbe, 0x98, 0x00, 0x1a, 0xa0, 0x1b, 0xbf, 0x6e }
242 };
243
244 const struct hyperv_guid hyperv_guid_avma4 = {
245 { 0x16, 0x57, 0xe6, 0xf8, 0xb3, 0x3c, 0x06, 0x4a,
246 0x9a, 0x60, 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5 }
247 };
248
249 int
vmbus_match(device_t parent,cfdata_t cf,void * aux)250 vmbus_match(device_t parent, cfdata_t cf, void *aux)
251 {
252
253 if (cf->cf_unit != 0 ||
254 !hyperv_hypercall_enabled() ||
255 !hyperv_synic_supported())
256 return 0;
257
258 return 1;
259 }
260
261 int
vmbus_attach(struct vmbus_softc * sc)262 vmbus_attach(struct vmbus_softc *sc)
263 {
264
265 aprint_naive("\n");
266 aprint_normal(": Hyper-V VMBus\n");
267
268 vmbus_sc = sc;
269
270 sc->sc_msgpool = pool_cache_init(sizeof(struct vmbus_msg), 8, 0, 0,
271 "hvmsg", NULL, IPL_NET, NULL, NULL, NULL);
272 hyperv_set_message_proc(vmbus_message_proc, sc);
273
274 sc->sc_chanmap = kmem_zalloc(sizeof(struct vmbus_channel *) *
275 VMBUS_CHAN_MAX, KM_SLEEP);
276
277 if (vmbus_alloc_dma(sc))
278 goto cleanup;
279
280 if (vmbus_init_interrupts(sc))
281 goto cleanup;
282
283 if (vmbus_connect(sc))
284 goto cleanup;
285
286 aprint_normal_dev(sc->sc_dev, "protocol %d.%d\n",
287 VMBUS_VERSION_MAJOR(sc->sc_proto),
288 VMBUS_VERSION_MINOR(sc->sc_proto));
289
290 if (sc->sc_proto == VMBUS_VERSION_WS2008 ||
291 sc->sc_proto == VMBUS_VERSION_WIN7) {
292 hyperv_set_event_proc(vmbus_event_proc_compat, sc);
293 sc->sc_channel_max = VMBUS_CHAN_MAX_COMPAT;
294 } else {
295 hyperv_set_event_proc(vmbus_event_proc, sc);
296 sc->sc_channel_max = VMBUS_CHAN_MAX;
297 }
298
299 if (vmbus_channel_scan(sc))
300 goto cleanup;
301
302 config_interrupts(sc->sc_dev, vmbus_attach_deferred);
303
304 return 0;
305
306 cleanup:
307 vmbus_deinit_interrupts(sc);
308 vmbus_free_dma(sc);
309 kmem_free(__UNVOLATILE(sc->sc_chanmap),
310 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX);
311 return -1;
312 }
313
314 static void
vmbus_attach_deferred(device_t self)315 vmbus_attach_deferred(device_t self)
316 {
317 struct vmbus_softc *sc = device_private(self);
318 uint64_t xc;
319
320 xc = xc_broadcast(0, vmbus_init_interrupts_pcpu,
321 sc, NULL);
322 xc_wait(xc);
323 }
324
325 int
vmbus_detach(struct vmbus_softc * sc,int flags)326 vmbus_detach(struct vmbus_softc *sc, int flags)
327 {
328
329 vmbus_deinit_interrupts(sc);
330 vmbus_free_dma(sc);
331 kmem_free(__UNVOLATILE(sc->sc_chanmap),
332 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX);
333
334 return 0;
335 }
336
337 static int
vmbus_alloc_dma(struct vmbus_softc * sc)338 vmbus_alloc_dma(struct vmbus_softc *sc)
339 {
340 CPU_INFO_ITERATOR cii;
341 struct cpu_info *ci;
342 struct vmbus_percpu_data *pd;
343 int i;
344
345 /*
346 * Per-CPU messages and event flags.
347 */
348 for (CPU_INFO_FOREACH(cii, ci)) {
349 pd = &sc->sc_percpu[cpu_index(ci)];
350
351 pd->simp = hyperv_dma_alloc(sc->sc_dmat, &pd->simp_dma,
352 PAGE_SIZE, PAGE_SIZE, 0, 1);
353 if (pd->simp == NULL)
354 return ENOMEM;
355
356 pd->siep = hyperv_dma_alloc(sc->sc_dmat, &pd->siep_dma,
357 PAGE_SIZE, PAGE_SIZE, 0, 1);
358 if (pd->siep == NULL)
359 return ENOMEM;
360 }
361
362 sc->sc_events = hyperv_dma_alloc(sc->sc_dmat, &sc->sc_events_dma,
363 PAGE_SIZE, PAGE_SIZE, 0, 1);
364 if (sc->sc_events == NULL)
365 return ENOMEM;
366 sc->sc_wevents = (u_long *)sc->sc_events;
367 sc->sc_revents = (u_long *)((uint8_t *)sc->sc_events + (PAGE_SIZE / 2));
368
369 for (i = 0; i < __arraycount(sc->sc_monitor); i++) {
370 sc->sc_monitor[i] = hyperv_dma_alloc(sc->sc_dmat,
371 &sc->sc_monitor_dma[i], PAGE_SIZE, PAGE_SIZE, 0, 1);
372 if (sc->sc_monitor[i] == NULL)
373 return ENOMEM;
374 }
375
376 return 0;
377 }
378
379 static void
vmbus_free_dma(struct vmbus_softc * sc)380 vmbus_free_dma(struct vmbus_softc *sc)
381 {
382 CPU_INFO_ITERATOR cii;
383 struct cpu_info *ci;
384 int i;
385
386 if (sc->sc_events != NULL) {
387 sc->sc_events = sc->sc_wevents = sc->sc_revents = NULL;
388 hyperv_dma_free(sc->sc_dmat, &sc->sc_events_dma);
389 }
390
391 for (i = 0; i < __arraycount(sc->sc_monitor); i++) {
392 sc->sc_monitor[i] = NULL;
393 hyperv_dma_free(sc->sc_dmat, &sc->sc_monitor_dma[i]);
394 }
395
396 for (CPU_INFO_FOREACH(cii, ci)) {
397 struct vmbus_percpu_data *pd = &sc->sc_percpu[cpu_index(ci)];
398
399 if (pd->simp != NULL) {
400 pd->simp = NULL;
401 hyperv_dma_free(sc->sc_dmat, &pd->simp_dma);
402 }
403 if (pd->siep != NULL) {
404 pd->siep = NULL;
405 hyperv_dma_free(sc->sc_dmat, &pd->siep_dma);
406 }
407 }
408 }
409
410 static int
vmbus_init_interrupts(struct vmbus_softc * sc)411 vmbus_init_interrupts(struct vmbus_softc *sc)
412 {
413 uint64_t xc;
414
415 TAILQ_INIT(&sc->sc_reqs);
416 mutex_init(&sc->sc_req_lock, MUTEX_DEFAULT, IPL_NET);
417
418 TAILQ_INIT(&sc->sc_rsps);
419 mutex_init(&sc->sc_rsp_lock, MUTEX_DEFAULT, IPL_NET);
420
421 sc->sc_proto = VMBUS_VERSION_WS2008;
422
423 /* XXX event_tq */
424
425 sc->sc_msg_sih = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
426 vmbus_message_softintr, sc);
427 if (sc->sc_msg_sih == NULL)
428 return -1;
429
430 kcpuset_create(&sc->sc_intr_cpuset, true);
431 if (cold) {
432 /* Initialize other CPUs later. */
433 vmbus_init_interrupts_pcpu(sc, NULL);
434 } else {
435 xc = xc_broadcast(0, vmbus_init_interrupts_pcpu,
436 sc, NULL);
437 xc_wait(xc);
438 }
439 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_SYNIC);
440
441 return 0;
442 }
443
444 static void
vmbus_deinit_interrupts(struct vmbus_softc * sc)445 vmbus_deinit_interrupts(struct vmbus_softc *sc)
446 {
447 uint64_t xc;
448
449 if (cold) {
450 vmbus_deinit_interrupts_pcpu(sc, NULL);
451 } else {
452 xc = xc_broadcast(0, vmbus_deinit_interrupts_pcpu,
453 sc, NULL);
454 xc_wait(xc);
455 }
456 atomic_and_32(&sc->sc_flags, (uint32_t)~VMBUS_SCFLAG_SYNIC);
457
458 /* XXX event_tq */
459
460 if (sc->sc_msg_sih != NULL) {
461 softint_disestablish(sc->sc_msg_sih);
462 sc->sc_msg_sih = NULL;
463 }
464 }
465
466 static void
vmbus_init_interrupts_pcpu(void * arg1,void * arg2 __unused)467 vmbus_init_interrupts_pcpu(void *arg1, void *arg2 __unused)
468 {
469 struct vmbus_softc *sc = arg1;
470 cpuid_t cpu;
471 int s;
472
473 s = splhigh();
474
475 cpu = cpu_index(curcpu());
476 if (!kcpuset_isset(sc->sc_intr_cpuset, cpu)) {
477 kcpuset_atomic_set(sc->sc_intr_cpuset, cpu);
478 vmbus_init_interrupts_md(sc, cpu);
479 vmbus_init_synic_md(sc, cpu);
480 }
481
482 splx(s);
483 }
484
485 static void
vmbus_deinit_interrupts_pcpu(void * arg1,void * arg2 __unused)486 vmbus_deinit_interrupts_pcpu(void *arg1, void *arg2 __unused)
487 {
488 struct vmbus_softc *sc = arg1;
489 cpuid_t cpu;
490 int s;
491
492 s = splhigh();
493
494 cpu = cpu_index(curcpu());
495 if (kcpuset_isset(sc->sc_intr_cpuset, cpu)) {
496 if (ISSET(sc->sc_flags, VMBUS_SCFLAG_SYNIC))
497 vmbus_deinit_synic_md(sc, cpu);
498 vmbus_deinit_interrupts_md(sc, cpu);
499 kcpuset_atomic_clear(sc->sc_intr_cpuset, cpu);
500 }
501
502 splx(s);
503 }
504
505 static int
vmbus_connect(struct vmbus_softc * sc)506 vmbus_connect(struct vmbus_softc *sc)
507 {
508 static const uint32_t versions[] = {
509 VMBUS_VERSION_WIN8_1,
510 VMBUS_VERSION_WIN8,
511 VMBUS_VERSION_WIN7,
512 VMBUS_VERSION_WS2008
513 };
514 struct vmbus_chanmsg_connect cmd;
515 struct vmbus_chanmsg_connect_resp rsp;
516 int i, rv;
517
518 memset(&cmd, 0, sizeof(cmd));
519 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CONNECT;
520 cmd.chm_evtflags = hyperv_dma_get_paddr(&sc->sc_events_dma);
521 cmd.chm_mnf1 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[0]);
522 cmd.chm_mnf2 = hyperv_dma_get_paddr(&sc->sc_monitor_dma[1]);
523
524 memset(&rsp, 0, sizeof(rsp));
525
526 for (i = 0; i < __arraycount(versions); i++) {
527 cmd.chm_ver = versions[i];
528 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp),
529 HCF_NOSLEEP);
530 if (rv) {
531 DPRINTF("%s: CONNECT failed\n",
532 device_xname(sc->sc_dev));
533 return rv;
534 }
535 if (rsp.chm_done) {
536 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_CONNECTED);
537 sc->sc_proto = versions[i];
538 sc->sc_handle = VMBUS_GPADL_START;
539 break;
540 }
541 }
542 if (i == __arraycount(versions)) {
543 device_printf(sc->sc_dev,
544 "failed to negotiate protocol version\n");
545 return ENXIO;
546 }
547
548 return 0;
549 }
550
551 static int
vmbus_cmd(struct vmbus_softc * sc,void * cmd,size_t cmdlen,void * rsp,size_t rsplen,int flags)552 vmbus_cmd(struct vmbus_softc *sc, void *cmd, size_t cmdlen, void *rsp,
553 size_t rsplen, int flags)
554 {
555 struct vmbus_msg *msg;
556 paddr_t pa;
557 int rv;
558
559 if (cmdlen > VMBUS_MSG_DSIZE_MAX) {
560 device_printf(sc->sc_dev, "payload too large (%zu)\n",
561 cmdlen);
562 return EMSGSIZE;
563 }
564
565 msg = pool_cache_get_paddr(sc->sc_msgpool, PR_WAITOK, &pa);
566 if (msg == NULL) {
567 device_printf(sc->sc_dev, "couldn't get msgpool\n");
568 return ENOMEM;
569 }
570 memset(msg, 0, sizeof(*msg));
571 msg->msg_req.hc_dsize = cmdlen;
572 memcpy(msg->msg_req.hc_data, cmd, cmdlen);
573
574 if (!(flags & HCF_NOREPLY)) {
575 msg->msg_rsp = rsp;
576 msg->msg_rsplen = rsplen;
577 } else
578 msg->msg_flags |= MSGF_NOQUEUE;
579
580 if (flags & HCF_NOSLEEP)
581 msg->msg_flags |= MSGF_NOSLEEP;
582
583 rv = vmbus_start(sc, msg, pa);
584 if (rv == 0)
585 rv = vmbus_reply(sc, msg);
586 pool_cache_put_paddr(sc->sc_msgpool, msg, pa);
587 return rv;
588 }
589
590 static int
vmbus_start(struct vmbus_softc * sc,struct vmbus_msg * msg,paddr_t msg_pa)591 vmbus_start(struct vmbus_softc *sc, struct vmbus_msg *msg, paddr_t msg_pa)
592 {
593 const char *wchan = "hvstart";
594 uint16_t status;
595 int wait_ms = 1; /* milliseconds */
596 int i, s;
597
598 msg->msg_req.hc_connid = VMBUS_CONNID_MESSAGE;
599 msg->msg_req.hc_msgtype = 1;
600
601 if (!(msg->msg_flags & MSGF_NOQUEUE)) {
602 mutex_enter(&sc->sc_req_lock);
603 TAILQ_INSERT_TAIL(&sc->sc_reqs, msg, msg_entry);
604 mutex_exit(&sc->sc_req_lock);
605 }
606
607 /*
608 * In order to cope with transient failures, e.g. insufficient
609 * resources on host side, we retry the post message Hypercall
610 * several times. 20 retries seem sufficient.
611 */
612 #define HC_RETRY_MAX 20
613 #define HC_WAIT_MAX (2 * 1000) /* 2s */
614
615 for (i = 0; i < HC_RETRY_MAX; i++) {
616 status = hyperv_hypercall_post_message(
617 msg_pa + offsetof(struct vmbus_msg, msg_req));
618 if (status == HYPERCALL_STATUS_SUCCESS)
619 return 0;
620
621 if (msg->msg_flags & MSGF_NOSLEEP) {
622 DELAY(wait_ms * 1000);
623 s = splnet();
624 hyperv_intr();
625 splx(s);
626 } else
627 tsleep(wchan, PRIBIO, wchan, uimax(1, mstohz(wait_ms)));
628
629 if (wait_ms < HC_WAIT_MAX)
630 wait_ms *= 2;
631 }
632
633 #undef HC_RETRY_MAX
634 #undef HC_WAIT_MAX
635
636 device_printf(sc->sc_dev,
637 "posting vmbus message failed with %d\n", status);
638
639 if (!(msg->msg_flags & MSGF_NOQUEUE)) {
640 mutex_enter(&sc->sc_req_lock);
641 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
642 mutex_exit(&sc->sc_req_lock);
643 }
644
645 return EIO;
646 }
647
648 static int
vmbus_reply_done(struct vmbus_softc * sc,struct vmbus_msg * msg)649 vmbus_reply_done(struct vmbus_softc *sc, struct vmbus_msg *msg)
650 {
651 struct vmbus_msg *m;
652
653 mutex_enter(&sc->sc_rsp_lock);
654 TAILQ_FOREACH(m, &sc->sc_rsps, msg_entry) {
655 if (m == msg) {
656 mutex_exit(&sc->sc_rsp_lock);
657 return 1;
658 }
659 }
660 mutex_exit(&sc->sc_rsp_lock);
661 return 0;
662 }
663
664 static int
vmbus_reply(struct vmbus_softc * sc,struct vmbus_msg * msg)665 vmbus_reply(struct vmbus_softc *sc, struct vmbus_msg *msg)
666 {
667 int s;
668
669 if (msg->msg_flags & MSGF_NOQUEUE)
670 return 0;
671
672 while (!vmbus_reply_done(sc, msg)) {
673 if (msg->msg_flags & MSGF_NOSLEEP) {
674 delay(1000);
675 s = splnet();
676 hyperv_intr();
677 splx(s);
678 } else
679 tsleep(msg, PRIBIO, "hvreply", uimax(1, mstohz(1)));
680 }
681
682 mutex_enter(&sc->sc_rsp_lock);
683 TAILQ_REMOVE(&sc->sc_rsps, msg, msg_entry);
684 mutex_exit(&sc->sc_rsp_lock);
685
686 return 0;
687 }
688
689 static uint16_t
vmbus_intr_signal(struct vmbus_softc * sc,paddr_t con_pa)690 vmbus_intr_signal(struct vmbus_softc *sc, paddr_t con_pa)
691 {
692 uint64_t status;
693
694 status = hyperv_hypercall_signal_event(con_pa);
695 return (uint16_t)status;
696 }
697
698 #if LONG_BIT == 64
699 #define ffsl(v) ffs64(v)
700 #elif LONG_BIT == 32
701 #define ffsl(v) ffs32(v)
702 #else
703 #error unsupport LONG_BIT
704 #endif /* LONG_BIT */
705
706 static void
vmbus_event_flags_proc(struct vmbus_softc * sc,volatile u_long * revents,int maxrow)707 vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *revents,
708 int maxrow)
709 {
710 struct vmbus_channel *ch;
711 u_long pending;
712 uint32_t chanid, chanid_base;
713 int row, chanid_ofs;
714
715 for (row = 0; row < maxrow; row++) {
716 if (revents[row] == 0)
717 continue;
718
719 pending = atomic_swap_ulong(&revents[row], 0);
720 pending &= ~sc->sc_evtmask[row];
721 chanid_base = row * VMBUS_EVTFLAG_LEN;
722
723 while ((chanid_ofs = ffsl(pending)) != 0) {
724 chanid_ofs--; /* NOTE: ffs is 1-based */
725 pending &= ~(1UL << chanid_ofs);
726
727 chanid = chanid_base + chanid_ofs;
728 /* vmbus channel protocol message */
729 if (chanid == 0)
730 continue;
731
732 ch = sc->sc_chanmap[chanid];
733 if (__predict_false(ch == NULL)) {
734 /* Channel is closed. */
735 continue;
736 }
737 __insn_barrier();
738 if (ch->ch_state != VMBUS_CHANSTATE_OPENED) {
739 device_printf(sc->sc_dev,
740 "channel %d is not active\n", chanid);
741 continue;
742 }
743 ch->ch_evcnt.ev_count++;
744 vmbus_channel_schedule(ch);
745 }
746 }
747 }
748
749 static void
vmbus_event_proc(void * arg,struct cpu_info * ci)750 vmbus_event_proc(void *arg, struct cpu_info *ci)
751 {
752 struct vmbus_softc *sc = arg;
753 struct vmbus_evtflags *evt;
754
755 /*
756 * On Host with Win8 or above, the event page can be
757 * checked directly to get the id of the channel
758 * that has the pending interrupt.
759 */
760 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep +
761 VMBUS_SINT_MESSAGE;
762
763 vmbus_event_flags_proc(sc, evt->evt_flags,
764 __arraycount(evt->evt_flags));
765 }
766
767 static void
vmbus_event_proc_compat(void * arg,struct cpu_info * ci)768 vmbus_event_proc_compat(void *arg, struct cpu_info *ci)
769 {
770 struct vmbus_softc *sc = arg;
771 struct vmbus_evtflags *evt;
772
773 evt = (struct vmbus_evtflags *)sc->sc_percpu[cpu_index(ci)].siep +
774 VMBUS_SINT_MESSAGE;
775
776 if (test_bit(0, &evt->evt_flags[0])) {
777 clear_bit(0, &evt->evt_flags[0]);
778 /*
779 * receive size is 1/2 page and divide that by 4 bytes
780 */
781 vmbus_event_flags_proc(sc, sc->sc_revents,
782 VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN);
783 }
784 }
785
786 static void
vmbus_message_proc(void * arg,struct cpu_info * ci)787 vmbus_message_proc(void *arg, struct cpu_info *ci)
788 {
789 struct vmbus_softc *sc = arg;
790 struct vmbus_message *msg;
791
792 msg = (struct vmbus_message *)sc->sc_percpu[cpu_index(ci)].simp +
793 VMBUS_SINT_MESSAGE;
794 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) {
795 if (__predict_true(!cold))
796 softint_schedule_cpu(sc->sc_msg_sih, ci);
797 else
798 vmbus_message_softintr(sc);
799 }
800 }
801
802 static void
vmbus_message_softintr(void * arg)803 vmbus_message_softintr(void *arg)
804 {
805 struct vmbus_softc *sc = arg;
806 struct vmbus_message *msg;
807 struct vmbus_chanmsg_hdr *hdr;
808 uint32_t type;
809 cpuid_t cpu;
810
811 cpu = cpu_index(curcpu());
812
813 for (;;) {
814 msg = (struct vmbus_message *)sc->sc_percpu[cpu].simp +
815 VMBUS_SINT_MESSAGE;
816 if (msg->msg_type == HYPERV_MSGTYPE_NONE)
817 break;
818
819 hdr = (struct vmbus_chanmsg_hdr *)msg->msg_data;
820 type = hdr->chm_type;
821 if (type >= VMBUS_CHANMSG_COUNT) {
822 device_printf(sc->sc_dev,
823 "unhandled message type %u flags %#x\n", type,
824 msg->msg_flags);
825 } else {
826 if (vmbus_msg_dispatch[type].hmd_handler) {
827 vmbus_msg_dispatch[type].hmd_handler(sc, hdr);
828 } else {
829 device_printf(sc->sc_dev,
830 "unhandled message type %u\n", type);
831 }
832 }
833
834 msg->msg_type = HYPERV_MSGTYPE_NONE;
835 membar_sync();
836 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING)
837 hyperv_send_eom();
838 }
839 }
840
841 static void
vmbus_channel_response(struct vmbus_softc * sc,struct vmbus_chanmsg_hdr * rsphdr)842 vmbus_channel_response(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *rsphdr)
843 {
844 struct vmbus_msg *msg;
845 struct vmbus_chanmsg_hdr *reqhdr;
846 int req;
847
848 req = vmbus_msg_dispatch[rsphdr->chm_type].hmd_request;
849 mutex_enter(&sc->sc_req_lock);
850 TAILQ_FOREACH(msg, &sc->sc_reqs, msg_entry) {
851 reqhdr = (struct vmbus_chanmsg_hdr *)&msg->msg_req.hc_data;
852 if (reqhdr->chm_type == req) {
853 TAILQ_REMOVE(&sc->sc_reqs, msg, msg_entry);
854 break;
855 }
856 }
857 mutex_exit(&sc->sc_req_lock);
858 if (msg != NULL) {
859 memcpy(msg->msg_rsp, rsphdr, msg->msg_rsplen);
860 mutex_enter(&sc->sc_rsp_lock);
861 TAILQ_INSERT_TAIL(&sc->sc_rsps, msg, msg_entry);
862 mutex_exit(&sc->sc_rsp_lock);
863 wakeup(msg);
864 }
865 }
866
867 static void
vmbus_channel_offer(struct vmbus_softc * sc,struct vmbus_chanmsg_hdr * hdr)868 vmbus_channel_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr)
869 {
870 struct vmbus_chanmsg_choffer *co;
871
872 co = kmem_intr_alloc(sizeof(*co), KM_NOSLEEP);
873 if (co == NULL) {
874 device_printf(sc->sc_dev,
875 "failed to allocate an offer object\n");
876 return;
877 }
878
879 memcpy(co, hdr, sizeof(*co));
880 vmbus_chevq_enqueue(sc, VMBUS_CHEV_TYPE_OFFER, co);
881 }
882
883 static void
vmbus_channel_rescind(struct vmbus_softc * sc,struct vmbus_chanmsg_hdr * hdr)884 vmbus_channel_rescind(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr)
885 {
886 struct vmbus_chanmsg_chrescind *cr;
887
888 cr = kmem_intr_alloc(sizeof(*cr), KM_NOSLEEP);
889 if (cr == NULL) {
890 device_printf(sc->sc_dev,
891 "failed to allocate an rescind object\n");
892 return;
893 }
894
895 memcpy(cr, hdr, sizeof(*cr));
896 vmbus_chevq_enqueue(sc, VMBUS_CHEV_TYPE_RESCIND, cr);
897 }
898
899 static void
vmbus_channel_delivered(struct vmbus_softc * sc,struct vmbus_chanmsg_hdr * hdr)900 vmbus_channel_delivered(struct vmbus_softc *sc, struct vmbus_chanmsg_hdr *hdr)
901 {
902
903 atomic_or_32(&sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED);
904 wakeup(&sc->sc_devq);
905 }
906
907 static void
hyperv_guid_sprint(struct hyperv_guid * guid,char * str,size_t size)908 hyperv_guid_sprint(struct hyperv_guid *guid, char *str, size_t size)
909 {
910 static const struct {
911 const struct hyperv_guid *guid;
912 const char *ident;
913 } map[] = {
914 { &hyperv_guid_network, "network" },
915 { &hyperv_guid_ide, "ide" },
916 { &hyperv_guid_scsi, "scsi" },
917 { &hyperv_guid_shutdown, "shutdown" },
918 { &hyperv_guid_timesync, "timesync" },
919 { &hyperv_guid_heartbeat, "heartbeat" },
920 { &hyperv_guid_kvp, "kvp" },
921 { &hyperv_guid_vss, "vss" },
922 { &hyperv_guid_dynmem, "dynamic-memory" },
923 { &hyperv_guid_mouse, "mouse" },
924 { &hyperv_guid_kbd, "keyboard" },
925 { &hyperv_guid_video, "video" },
926 { &hyperv_guid_fc, "fiber-channel" },
927 { &hyperv_guid_fcopy, "file-copy" },
928 { &hyperv_guid_pcie, "pcie-passthrough" },
929 { &hyperv_guid_netdir, "network-direct" },
930 { &hyperv_guid_rdesktop, "remote-desktop" },
931 { &hyperv_guid_avma1, "avma-1" },
932 { &hyperv_guid_avma2, "avma-2" },
933 { &hyperv_guid_avma3, "avma-3" },
934 { &hyperv_guid_avma4, "avma-4" },
935 };
936 int i;
937
938 for (i = 0; i < __arraycount(map); i++) {
939 if (memcmp(guid, map[i].guid, sizeof(*guid)) == 0) {
940 strlcpy(str, map[i].ident, size);
941 return;
942 }
943 }
944 hyperv_guid2str(guid, str, size);
945 }
946
947 static int
vmbus_channel_scan(struct vmbus_softc * sc)948 vmbus_channel_scan(struct vmbus_softc *sc)
949 {
950 struct vmbus_chanmsg_hdr hdr;
951 struct vmbus_chanmsg_choffer rsp;
952
953 TAILQ_INIT(&sc->sc_prichans);
954 mutex_init(&sc->sc_prichan_lock, MUTEX_DEFAULT, IPL_NET);
955 TAILQ_INIT(&sc->sc_channels);
956 mutex_init(&sc->sc_channel_lock, MUTEX_DEFAULT, IPL_NET);
957
958 /*
959 * This queue serializes vmbus channel offer and rescind messages.
960 */
961 SIMPLEQ_INIT(&sc->sc_chevq);
962 mutex_init(&sc->sc_chevq_lock, MUTEX_DEFAULT, IPL_NET);
963 cv_init(&sc->sc_chevq_cv, "hvchevcv");
964 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
965 vmbus_chevq_thread, sc, NULL, "hvchevq") != 0) {
966 DPRINTF("%s: failed to create prich chevq thread\n",
967 device_xname(sc->sc_dev));
968 return -1;
969 }
970
971 /*
972 * This queue serializes vmbus devices' attach and detach
973 * for channel offer and rescind messages.
974 */
975 SIMPLEQ_INIT(&sc->sc_devq);
976 mutex_init(&sc->sc_devq_lock, MUTEX_DEFAULT, IPL_NET);
977 cv_init(&sc->sc_devq_cv, "hvdevqcv");
978 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
979 vmbus_devq_thread, sc, NULL, "hvdevq") != 0) {
980 DPRINTF("%s: failed to create prich devq thread\n",
981 device_xname(sc->sc_dev));
982 return -1;
983 }
984
985 /*
986 * This queue handles sub-channel detach, so that vmbus
987 * device's detach running in sc_devq can drain its sub-channels.
988 */
989 SIMPLEQ_INIT(&sc->sc_subch_devq);
990 mutex_init(&sc->sc_subch_devq_lock, MUTEX_DEFAULT, IPL_NET);
991 cv_init(&sc->sc_subch_devq_cv, "hvsdvqcv");
992 if (kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
993 vmbus_subchannel_devq_thread, sc, NULL, "hvsdevq") != 0) {
994 DPRINTF("%s: failed to create subch devq thread\n",
995 device_xname(sc->sc_dev));
996 return -1;
997 }
998
999 memset(&hdr, 0, sizeof(hdr));
1000 hdr.chm_type = VMBUS_CHANMSG_CHREQUEST;
1001
1002 if (vmbus_cmd(sc, &hdr, sizeof(hdr), &rsp, sizeof(rsp),
1003 HCF_NOREPLY | HCF_NOSLEEP)) {
1004 DPRINTF("%s: CHREQUEST failed\n", device_xname(sc->sc_dev));
1005 return -1;
1006 }
1007
1008 while (!ISSET(sc->sc_flags, VMBUS_SCFLAG_OFFERS_DELIVERED))
1009 tsleep(&sc->sc_devq, PRIBIO, "hvscan", 1);
1010
1011 mutex_enter(&sc->sc_chevq_lock);
1012 vmbus_process_chevq(sc);
1013 mutex_exit(&sc->sc_chevq_lock);
1014 mutex_enter(&sc->sc_devq_lock);
1015 vmbus_process_devq(sc);
1016 mutex_exit(&sc->sc_devq_lock);
1017
1018 return 0;
1019 }
1020
1021 static struct vmbus_channel *
vmbus_channel_alloc(struct vmbus_softc * sc)1022 vmbus_channel_alloc(struct vmbus_softc *sc)
1023 {
1024 struct vmbus_channel *ch;
1025
1026 ch = kmem_zalloc(sizeof(*ch), KM_SLEEP);
1027
1028 ch->ch_monprm = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_monprm_dma,
1029 sizeof(*ch->ch_monprm), 8, 0, 1);
1030 if (ch->ch_monprm == NULL) {
1031 device_printf(sc->sc_dev, "monprm alloc failed\n");
1032 kmem_free(ch, sizeof(*ch));
1033 return NULL;
1034 }
1035
1036 ch->ch_refs = 1;
1037 ch->ch_sc = sc;
1038 mutex_init(&ch->ch_event_lock, MUTEX_DEFAULT, IPL_NET);
1039 cv_init(&ch->ch_event_cv, "hvevwait");
1040 mutex_init(&ch->ch_subchannel_lock, MUTEX_DEFAULT, IPL_NET);
1041 cv_init(&ch->ch_subchannel_cv, "hvsubch");
1042 TAILQ_INIT(&ch->ch_subchannels);
1043
1044 ch->ch_state = VMBUS_CHANSTATE_CLOSED;
1045
1046 return ch;
1047 }
1048
1049 static void
vmbus_channel_free(struct vmbus_channel * ch)1050 vmbus_channel_free(struct vmbus_channel *ch)
1051 {
1052 struct vmbus_softc *sc = ch->ch_sc;
1053
1054 KASSERTMSG(TAILQ_EMPTY(&ch->ch_subchannels) &&
1055 ch->ch_subchannel_count == 0, "still owns sub-channels");
1056 KASSERTMSG(ch->ch_state == 0 || ch->ch_state == VMBUS_CHANSTATE_CLOSED,
1057 "free busy channel");
1058 KASSERTMSG(ch->ch_refs == 0, "channel %u: invalid refcnt %d",
1059 ch->ch_id, ch->ch_refs);
1060
1061 hyperv_dma_free(sc->sc_dmat, &ch->ch_monprm_dma);
1062 mutex_destroy(&ch->ch_event_lock);
1063 cv_destroy(&ch->ch_event_cv);
1064 mutex_destroy(&ch->ch_subchannel_lock);
1065 cv_destroy(&ch->ch_subchannel_cv);
1066 /* XXX ch_evcnt */
1067 if (ch->ch_taskq != NULL)
1068 softint_disestablish(ch->ch_taskq);
1069 kmem_free(ch, sizeof(*ch));
1070 }
1071
1072 static int
vmbus_channel_add(struct vmbus_channel * nch)1073 vmbus_channel_add(struct vmbus_channel *nch)
1074 {
1075 struct vmbus_softc *sc = nch->ch_sc;
1076 struct vmbus_channel *ch;
1077 int refs __diagused;
1078
1079 if (nch->ch_id == 0) {
1080 device_printf(sc->sc_dev, "got channel 0 offer, discard\n");
1081 return EINVAL;
1082 } else if (nch->ch_id >= sc->sc_channel_max) {
1083 device_printf(sc->sc_dev, "invalid channel %u offer\n",
1084 nch->ch_id);
1085 return EINVAL;
1086 }
1087
1088 mutex_enter(&sc->sc_prichan_lock);
1089 TAILQ_FOREACH(ch, &sc->sc_prichans, ch_prientry) {
1090 if (!memcmp(&ch->ch_type, &nch->ch_type, sizeof(ch->ch_type)) &&
1091 !memcmp(&ch->ch_inst, &nch->ch_inst, sizeof(ch->ch_inst)))
1092 break;
1093 }
1094 if (VMBUS_CHAN_ISPRIMARY(nch)) {
1095 if (ch == NULL) {
1096 TAILQ_INSERT_TAIL(&sc->sc_prichans, nch, ch_prientry);
1097 mutex_exit(&sc->sc_prichan_lock);
1098 goto done;
1099 } else {
1100 mutex_exit(&sc->sc_prichan_lock);
1101 device_printf(sc->sc_dev,
1102 "duplicated primary channel%u\n", nch->ch_id);
1103 return EINVAL;
1104 }
1105 } else {
1106 if (ch == NULL) {
1107 mutex_exit(&sc->sc_prichan_lock);
1108 device_printf(sc->sc_dev, "no primary channel%u\n",
1109 nch->ch_id);
1110 return EINVAL;
1111 }
1112 }
1113 mutex_exit(&sc->sc_prichan_lock);
1114
1115 KASSERT(!VMBUS_CHAN_ISPRIMARY(nch));
1116 KASSERT(ch != NULL);
1117
1118 refs = atomic_inc_uint_nv(&nch->ch_refs);
1119 KASSERT(refs == 2);
1120
1121 nch->ch_primary_channel = ch;
1122 nch->ch_dev = ch->ch_dev;
1123
1124 mutex_enter(&ch->ch_subchannel_lock);
1125 TAILQ_INSERT_TAIL(&ch->ch_subchannels, nch, ch_subentry);
1126 ch->ch_subchannel_count++;
1127 cv_signal(&ch->ch_subchannel_cv);
1128 mutex_exit(&ch->ch_subchannel_lock);
1129
1130 done:
1131 mutex_enter(&sc->sc_channel_lock);
1132 TAILQ_INSERT_TAIL(&sc->sc_channels, nch, ch_entry);
1133 mutex_exit(&sc->sc_channel_lock);
1134
1135 vmbus_channel_cpu_default(nch);
1136
1137 return 0;
1138 }
1139
1140 void
vmbus_channel_cpu_set(struct vmbus_channel * ch,int cpu)1141 vmbus_channel_cpu_set(struct vmbus_channel *ch, int cpu)
1142 {
1143 struct vmbus_softc *sc = ch->ch_sc;
1144
1145 KASSERTMSG(cpu >= 0 && cpu < ncpu, "invalid cpu %d", cpu);
1146
1147 if (sc->sc_proto == VMBUS_VERSION_WS2008 ||
1148 sc->sc_proto == VMBUS_VERSION_WIN7) {
1149 /* Only cpu0 is supported */
1150 cpu = 0;
1151 }
1152
1153 ch->ch_cpuid = cpu;
1154 ch->ch_vcpu = hyperv_get_vcpuid(cpu);
1155
1156 aprint_debug_dev(ch->ch_dev != NULL ? ch->ch_dev : sc->sc_dev,
1157 "channel %u assigned to cpu%u [vcpu%u]\n",
1158 ch->ch_id, ch->ch_cpuid, ch->ch_vcpu);
1159 }
1160
1161 void
vmbus_channel_cpu_rr(struct vmbus_channel * ch)1162 vmbus_channel_cpu_rr(struct vmbus_channel *ch)
1163 {
1164 static uint32_t vmbus_channel_nextcpu;
1165 int cpu;
1166
1167 cpu = atomic_inc_32_nv(&vmbus_channel_nextcpu) % ncpu;
1168 vmbus_channel_cpu_set(ch, cpu);
1169 }
1170
1171 static void
vmbus_channel_cpu_default(struct vmbus_channel * ch)1172 vmbus_channel_cpu_default(struct vmbus_channel *ch)
1173 {
1174
1175 /*
1176 * By default, pin the channel to cpu0. Devices having
1177 * special channel-cpu mapping requirement should call
1178 * vmbus_channel_cpu_{set,rr}().
1179 */
1180 vmbus_channel_cpu_set(ch, 0);
1181 }
1182
1183 bool
vmbus_channel_is_revoked(struct vmbus_channel * ch)1184 vmbus_channel_is_revoked(struct vmbus_channel *ch)
1185 {
1186
1187 return (ch->ch_flags & CHF_REVOKED) ? true : false;
1188 }
1189
1190 static void
vmbus_process_offer(struct vmbus_softc * sc,struct vmbus_chanmsg_choffer * co)1191 vmbus_process_offer(struct vmbus_softc *sc, struct vmbus_chanmsg_choffer *co)
1192 {
1193 struct vmbus_channel *ch;
1194
1195 ch = vmbus_channel_alloc(sc);
1196 if (ch == NULL) {
1197 device_printf(sc->sc_dev, "allocate channel %u failed\n",
1198 co->chm_chanid);
1199 return;
1200 }
1201
1202 /*
1203 * By default we setup state to enable batched reading.
1204 * A specific service can choose to disable this prior
1205 * to opening the channel.
1206 */
1207 ch->ch_flags |= CHF_BATCHED;
1208
1209 hyperv_guid_sprint(&co->chm_chtype, ch->ch_ident,
1210 sizeof(ch->ch_ident));
1211
1212 ch->ch_monprm->mp_connid = VMBUS_CONNID_EVENT;
1213 if (sc->sc_proto > VMBUS_VERSION_WS2008)
1214 ch->ch_monprm->mp_connid = co->chm_connid;
1215
1216 if (co->chm_flags1 & VMBUS_CHOFFER_FLAG1_HASMNF) {
1217 ch->ch_mgroup = co->chm_montrig / VMBUS_MONTRIG_LEN;
1218 ch->ch_mindex = co->chm_montrig % VMBUS_MONTRIG_LEN;
1219 ch->ch_flags |= CHF_MONITOR;
1220 }
1221
1222 ch->ch_id = co->chm_chanid;
1223 ch->ch_subidx = co->chm_subidx;
1224
1225 memcpy(&ch->ch_type, &co->chm_chtype, sizeof(ch->ch_type));
1226 memcpy(&ch->ch_inst, &co->chm_chinst, sizeof(ch->ch_inst));
1227
1228 if (vmbus_channel_add(ch) != 0) {
1229 atomic_dec_uint(&ch->ch_refs);
1230 vmbus_channel_free(ch);
1231 return;
1232 }
1233
1234 ch->ch_state = VMBUS_CHANSTATE_OFFERED;
1235
1236 vmbus_devq_enqueue(sc, VMBUS_DEV_TYPE_ATTACH, ch);
1237
1238 #ifdef HYPERV_DEBUG
1239 printf("%s: channel %u: \"%s\"", device_xname(sc->sc_dev), ch->ch_id,
1240 ch->ch_ident);
1241 if (ch->ch_flags & CHF_MONITOR)
1242 printf(", monitor %u\n", co->chm_montrig);
1243 else
1244 printf("\n");
1245 #endif
1246 }
1247
1248 static void
vmbus_process_rescind(struct vmbus_softc * sc,struct vmbus_chanmsg_chrescind * cr)1249 vmbus_process_rescind(struct vmbus_softc *sc,
1250 struct vmbus_chanmsg_chrescind *cr)
1251 {
1252 struct vmbus_channel *ch;
1253
1254 if (cr->chm_chanid > VMBUS_CHAN_MAX) {
1255 device_printf(sc->sc_dev, "invalid revoked channel%u\n",
1256 cr->chm_chanid);
1257 return;
1258 }
1259
1260 mutex_enter(&sc->sc_channel_lock);
1261 ch = vmbus_channel_lookup(sc, cr->chm_chanid);
1262 if (ch == NULL) {
1263 mutex_exit(&sc->sc_channel_lock);
1264 device_printf(sc->sc_dev, "channel%u is not offered\n",
1265 cr->chm_chanid);
1266 return;
1267 }
1268 TAILQ_REMOVE(&sc->sc_channels, ch, ch_entry);
1269 mutex_exit(&sc->sc_channel_lock);
1270
1271 if (VMBUS_CHAN_ISPRIMARY(ch)) {
1272 mutex_enter(&sc->sc_prichan_lock);
1273 TAILQ_REMOVE(&sc->sc_prichans, ch, ch_prientry);
1274 mutex_exit(&sc->sc_prichan_lock);
1275 }
1276
1277 KASSERTMSG(!(ch->ch_flags & CHF_REVOKED),
1278 "channel%u has already been revoked", ch->ch_id);
1279 atomic_or_uint(&ch->ch_flags, CHF_REVOKED);
1280
1281 vmbus_channel_detach(ch);
1282 }
1283
1284 static int
vmbus_channel_release(struct vmbus_channel * ch)1285 vmbus_channel_release(struct vmbus_channel *ch)
1286 {
1287 struct vmbus_softc *sc = ch->ch_sc;
1288 struct vmbus_chanmsg_chfree cmd;
1289 int rv;
1290
1291 memset(&cmd, 0, sizeof(cmd));
1292 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHFREE;
1293 cmd.chm_chanid = ch->ch_id;
1294
1295 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0,
1296 HCF_NOREPLY | HCF_SLEEPOK);
1297 if (rv) {
1298 DPRINTF("%s: CHFREE failed with %d\n", device_xname(sc->sc_dev),
1299 rv);
1300 }
1301 return rv;
1302 }
1303
1304 struct vmbus_channel **
vmbus_subchannel_get(struct vmbus_channel * prich,int subchan_cnt)1305 vmbus_subchannel_get(struct vmbus_channel *prich, int subchan_cnt)
1306 {
1307 struct vmbus_softc *sc = prich->ch_sc;
1308 struct vmbus_channel **ret, *ch;
1309 int i, s;
1310
1311 KASSERTMSG(subchan_cnt > 0,
1312 "invalid sub-channel count %d", subchan_cnt);
1313
1314 ret = kmem_zalloc(sizeof(struct vmbus_channel *) * subchan_cnt,
1315 KM_SLEEP);
1316
1317 mutex_enter(&prich->ch_subchannel_lock);
1318
1319 while (prich->ch_subchannel_count < subchan_cnt) {
1320 if (cold) {
1321 mutex_exit(&prich->ch_subchannel_lock);
1322 delay(1000);
1323 s = splnet();
1324 hyperv_intr();
1325 splx(s);
1326 mutex_enter(&sc->sc_chevq_lock);
1327 vmbus_process_chevq(sc);
1328 mutex_exit(&sc->sc_chevq_lock);
1329 mutex_enter(&prich->ch_subchannel_lock);
1330 } else {
1331 mtsleep(prich, PRIBIO, "hvsubch", 1,
1332 &prich->ch_subchannel_lock);
1333 }
1334 }
1335
1336 i = 0;
1337 TAILQ_FOREACH(ch, &prich->ch_subchannels, ch_subentry) {
1338 ret[i] = ch; /* XXX inc refs */
1339
1340 if (++i == subchan_cnt)
1341 break;
1342 }
1343
1344 KASSERTMSG(i == subchan_cnt, "invalid subchan count %d, should be %d",
1345 prich->ch_subchannel_count, subchan_cnt);
1346
1347 mutex_exit(&prich->ch_subchannel_lock);
1348
1349 return ret;
1350 }
1351
1352 void
vmbus_subchannel_rel(struct vmbus_channel ** subch,int cnt)1353 vmbus_subchannel_rel(struct vmbus_channel **subch, int cnt)
1354 {
1355
1356 kmem_free(subch, sizeof(struct vmbus_channel *) * cnt);
1357 }
1358
1359 void
vmbus_subchannel_drain(struct vmbus_channel * prich)1360 vmbus_subchannel_drain(struct vmbus_channel *prich)
1361 {
1362 int s;
1363
1364 mutex_enter(&prich->ch_subchannel_lock);
1365 while (prich->ch_subchannel_count > 0) {
1366 if (cold) {
1367 mutex_exit(&prich->ch_subchannel_lock);
1368 delay(1000);
1369 s = splnet();
1370 hyperv_intr();
1371 splx(s);
1372 mutex_enter(&prich->ch_subchannel_lock);
1373 } else {
1374 cv_wait(&prich->ch_subchannel_cv,
1375 &prich->ch_subchannel_lock);
1376 }
1377 }
1378 mutex_exit(&prich->ch_subchannel_lock);
1379 }
1380
1381 static struct vmbus_channel *
vmbus_channel_lookup(struct vmbus_softc * sc,uint32_t chanid)1382 vmbus_channel_lookup(struct vmbus_softc *sc, uint32_t chanid)
1383 {
1384 struct vmbus_channel *ch = NULL;
1385
1386 TAILQ_FOREACH(ch, &sc->sc_channels, ch_entry) {
1387 if (ch->ch_id == chanid)
1388 return ch;
1389 }
1390 return NULL;
1391 }
1392
1393 static int
vmbus_channel_ring_create(struct vmbus_channel * ch,uint32_t buflen)1394 vmbus_channel_ring_create(struct vmbus_channel *ch, uint32_t buflen)
1395 {
1396 struct vmbus_softc *sc = ch->ch_sc;
1397
1398 buflen = roundup(buflen, PAGE_SIZE) + sizeof(struct vmbus_bufring);
1399 ch->ch_ring_size = 2 * buflen;
1400 /* page aligned memory */
1401 ch->ch_ring = hyperv_dma_alloc(sc->sc_dmat, &ch->ch_ring_dma,
1402 ch->ch_ring_size, PAGE_SIZE, 0, 1);
1403 if (ch->ch_ring == NULL) {
1404 device_printf(sc->sc_dev,
1405 "failed to allocate channel ring\n");
1406 return ENOMEM;
1407 }
1408
1409 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1410 ch->ch_wrd.rd_ring = (struct vmbus_bufring *)ch->ch_ring;
1411 ch->ch_wrd.rd_size = buflen;
1412 ch->ch_wrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1413 mutex_init(&ch->ch_wrd.rd_lock, MUTEX_DEFAULT, IPL_NET);
1414
1415 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1416 ch->ch_rrd.rd_ring = (struct vmbus_bufring *)((uint8_t *)ch->ch_ring +
1417 buflen);
1418 ch->ch_rrd.rd_size = buflen;
1419 ch->ch_rrd.rd_dsize = buflen - sizeof(struct vmbus_bufring);
1420 mutex_init(&ch->ch_rrd.rd_lock, MUTEX_DEFAULT, IPL_NET);
1421
1422 if (vmbus_handle_alloc(ch, &ch->ch_ring_dma, ch->ch_ring_size,
1423 &ch->ch_ring_gpadl)) {
1424 device_printf(sc->sc_dev,
1425 "failed to obtain a PA handle for the ring\n");
1426 vmbus_channel_ring_destroy(ch);
1427 return ENOMEM;
1428 }
1429
1430 return 0;
1431 }
1432
1433 static void
vmbus_channel_ring_destroy(struct vmbus_channel * ch)1434 vmbus_channel_ring_destroy(struct vmbus_channel *ch)
1435 {
1436 struct vmbus_softc *sc = ch->ch_sc;
1437
1438 hyperv_dma_free(sc->sc_dmat, &ch->ch_ring_dma);
1439 ch->ch_ring = NULL;
1440 vmbus_handle_free(ch, ch->ch_ring_gpadl);
1441
1442 mutex_destroy(&ch->ch_wrd.rd_lock);
1443 memset(&ch->ch_wrd, 0, sizeof(ch->ch_wrd));
1444 mutex_destroy(&ch->ch_rrd.rd_lock);
1445 memset(&ch->ch_rrd, 0, sizeof(ch->ch_rrd));
1446 }
1447
1448 int
vmbus_channel_open(struct vmbus_channel * ch,size_t buflen,void * udata,size_t udatalen,void (* handler)(void *),void * arg)1449 vmbus_channel_open(struct vmbus_channel *ch, size_t buflen, void *udata,
1450 size_t udatalen, void (*handler)(void *), void *arg)
1451 {
1452 struct vmbus_softc *sc = ch->ch_sc;
1453 struct vmbus_chanmsg_chopen cmd;
1454 struct vmbus_chanmsg_chopen_resp rsp;
1455 int rv = EINVAL;
1456
1457 if (ch->ch_ring == NULL &&
1458 (rv = vmbus_channel_ring_create(ch, buflen))) {
1459 DPRINTF("%s: failed to create channel ring\n",
1460 device_xname(sc->sc_dev));
1461 return rv;
1462 }
1463
1464 __insn_barrier();
1465 sc->sc_chanmap[ch->ch_id] = ch;
1466
1467 memset(&cmd, 0, sizeof(cmd));
1468 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHOPEN;
1469 cmd.chm_openid = ch->ch_id;
1470 cmd.chm_chanid = ch->ch_id;
1471 cmd.chm_gpadl = ch->ch_ring_gpadl;
1472 cmd.chm_txbr_pgcnt = atop(ch->ch_wrd.rd_size);
1473 cmd.chm_vcpuid = ch->ch_vcpu;
1474 if (udata && udatalen > 0)
1475 memcpy(cmd.chm_udata, udata, udatalen);
1476
1477 memset(&rsp, 0, sizeof(rsp));
1478
1479 ch->ch_handler = handler;
1480 ch->ch_ctx = arg;
1481 ch->ch_state = VMBUS_CHANSTATE_OPENED;
1482
1483 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), HCF_NOSLEEP);
1484 if (rv) {
1485 sc->sc_chanmap[ch->ch_id] = NULL;
1486 vmbus_channel_ring_destroy(ch);
1487 DPRINTF("%s: CHOPEN failed with %d\n", device_xname(sc->sc_dev),
1488 rv);
1489 ch->ch_handler = NULL;
1490 ch->ch_ctx = NULL;
1491 ch->ch_state = VMBUS_CHANSTATE_OFFERED;
1492 return rv;
1493 }
1494 return 0;
1495 }
1496
1497 static void
vmbus_channel_detach(struct vmbus_channel * ch)1498 vmbus_channel_detach(struct vmbus_channel *ch)
1499 {
1500 u_int refs;
1501
1502 KASSERTMSG(ch->ch_refs > 0, "channel%u: invalid refcnt %d",
1503 ch->ch_id, ch->ch_refs);
1504
1505 membar_release();
1506 refs = atomic_dec_uint_nv(&ch->ch_refs);
1507 if (refs == 0) {
1508 membar_acquire();
1509 /* Detach the target channel. */
1510 vmbus_devq_enqueue(ch->ch_sc, VMBUS_DEV_TYPE_DETACH, ch);
1511 }
1512 }
1513
1514 static int
vmbus_channel_close_internal(struct vmbus_channel * ch)1515 vmbus_channel_close_internal(struct vmbus_channel *ch)
1516 {
1517 struct vmbus_softc *sc = ch->ch_sc;
1518 struct vmbus_chanmsg_chclose cmd;
1519 int rv;
1520
1521 sc->sc_chanmap[ch->ch_id] = NULL;
1522
1523 memset(&cmd, 0, sizeof(cmd));
1524 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_CHCLOSE;
1525 cmd.chm_chanid = ch->ch_id;
1526
1527 ch->ch_state = VMBUS_CHANSTATE_CLOSING;
1528 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), NULL, 0,
1529 HCF_NOREPLY | HCF_NOSLEEP);
1530 if (rv) {
1531 DPRINTF("%s: CHCLOSE failed with %d\n",
1532 device_xname(sc->sc_dev), rv);
1533 return rv;
1534 }
1535 ch->ch_state = VMBUS_CHANSTATE_CLOSED;
1536 vmbus_channel_ring_destroy(ch);
1537 return 0;
1538 }
1539
1540 int
vmbus_channel_close_direct(struct vmbus_channel * ch)1541 vmbus_channel_close_direct(struct vmbus_channel *ch)
1542 {
1543 int rv;
1544
1545 rv = vmbus_channel_close_internal(ch);
1546 if (!VMBUS_CHAN_ISPRIMARY(ch))
1547 vmbus_channel_detach(ch);
1548 return rv;
1549 }
1550
1551 int
vmbus_channel_close(struct vmbus_channel * ch)1552 vmbus_channel_close(struct vmbus_channel *ch)
1553 {
1554 struct vmbus_channel **subch;
1555 int i, cnt, rv;
1556
1557 if (!VMBUS_CHAN_ISPRIMARY(ch))
1558 return 0;
1559
1560 cnt = ch->ch_subchannel_count;
1561 if (cnt > 0) {
1562 subch = vmbus_subchannel_get(ch, cnt);
1563 for (i = 0; i < ch->ch_subchannel_count; i++) {
1564 rv = vmbus_channel_close_internal(subch[i]);
1565 (void) rv; /* XXX */
1566 vmbus_channel_detach(ch);
1567 }
1568 vmbus_subchannel_rel(subch, cnt);
1569 }
1570
1571 return vmbus_channel_close_internal(ch);
1572 }
1573
1574 static inline void
vmbus_channel_setevent(struct vmbus_softc * sc,struct vmbus_channel * ch)1575 vmbus_channel_setevent(struct vmbus_softc *sc, struct vmbus_channel *ch)
1576 {
1577 struct vmbus_mon_trig *mtg;
1578
1579 /* Each uint32_t represents 32 channels */
1580 set_bit(ch->ch_id, sc->sc_wevents);
1581 if (ch->ch_flags & CHF_MONITOR) {
1582 mtg = &sc->sc_monitor[1]->mnf_trigs[ch->ch_mgroup];
1583 set_bit(ch->ch_mindex, &mtg->mt_pending);
1584 } else
1585 vmbus_intr_signal(sc, hyperv_dma_get_paddr(&ch->ch_monprm_dma));
1586 }
1587
1588 static void
vmbus_channel_intr(void * arg)1589 vmbus_channel_intr(void *arg)
1590 {
1591 struct vmbus_channel *ch = arg;
1592
1593 if (vmbus_channel_ready(ch))
1594 ch->ch_handler(ch->ch_ctx);
1595
1596 if (vmbus_channel_unpause(ch) == 0)
1597 return;
1598
1599 vmbus_channel_pause(ch);
1600 vmbus_channel_schedule(ch);
1601 }
1602
1603 int
vmbus_channel_setdeferred(struct vmbus_channel * ch,const char * name)1604 vmbus_channel_setdeferred(struct vmbus_channel *ch, const char *name)
1605 {
1606
1607 ch->ch_taskq = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1608 vmbus_channel_intr, ch);
1609 if (ch->ch_taskq == NULL)
1610 return -1;
1611 return 0;
1612 }
1613
1614 void
vmbus_channel_schedule(struct vmbus_channel * ch)1615 vmbus_channel_schedule(struct vmbus_channel *ch)
1616 {
1617
1618 if (ch->ch_handler) {
1619 if (!cold && (ch->ch_flags & CHF_BATCHED)) {
1620 vmbus_channel_pause(ch);
1621 softint_schedule(ch->ch_taskq);
1622 } else
1623 ch->ch_handler(ch->ch_ctx);
1624 }
1625 }
1626
1627 static __inline void
vmbus_ring_put(struct vmbus_ring_data * wrd,uint8_t * data,uint32_t datalen)1628 vmbus_ring_put(struct vmbus_ring_data *wrd, uint8_t *data, uint32_t datalen)
1629 {
1630 int left = MIN(datalen, wrd->rd_dsize - wrd->rd_prod);
1631
1632 memcpy(&wrd->rd_ring->br_data[wrd->rd_prod], data, left);
1633 memcpy(&wrd->rd_ring->br_data[0], data + left, datalen - left);
1634 wrd->rd_prod += datalen;
1635 if (wrd->rd_prod >= wrd->rd_dsize)
1636 wrd->rd_prod -= wrd->rd_dsize;
1637 }
1638
1639 static inline void
vmbus_ring_get(struct vmbus_ring_data * rrd,uint8_t * data,uint32_t datalen,int peek)1640 vmbus_ring_get(struct vmbus_ring_data *rrd, uint8_t *data, uint32_t datalen,
1641 int peek)
1642 {
1643 int left = MIN(datalen, rrd->rd_dsize - rrd->rd_cons);
1644
1645 memcpy(data, &rrd->rd_ring->br_data[rrd->rd_cons], left);
1646 memcpy(data + left, &rrd->rd_ring->br_data[0], datalen - left);
1647 if (!peek) {
1648 rrd->rd_cons += datalen;
1649 if (rrd->rd_cons >= rrd->rd_dsize)
1650 rrd->rd_cons -= rrd->rd_dsize;
1651 }
1652 }
1653
1654 static __inline void
vmbus_ring_avail(struct vmbus_ring_data * rd,uint32_t * towrite,uint32_t * toread)1655 vmbus_ring_avail(struct vmbus_ring_data *rd, uint32_t *towrite,
1656 uint32_t *toread)
1657 {
1658 uint32_t ridx = rd->rd_ring->br_rindex;
1659 uint32_t widx = rd->rd_ring->br_windex;
1660 uint32_t r, w;
1661
1662 if (widx >= ridx)
1663 w = rd->rd_dsize - (widx - ridx);
1664 else
1665 w = ridx - widx;
1666 r = rd->rd_dsize - w;
1667 if (towrite)
1668 *towrite = w;
1669 if (toread)
1670 *toread = r;
1671 }
1672
1673 static bool
vmbus_ring_is_empty(struct vmbus_ring_data * rd)1674 vmbus_ring_is_empty(struct vmbus_ring_data *rd)
1675 {
1676
1677 return rd->rd_ring->br_rindex == rd->rd_ring->br_windex;
1678 }
1679
1680 static int
vmbus_ring_write(struct vmbus_ring_data * wrd,struct iovec * iov,int iov_cnt,int * needsig)1681 vmbus_ring_write(struct vmbus_ring_data *wrd, struct iovec *iov, int iov_cnt,
1682 int *needsig)
1683 {
1684 uint64_t indices = 0;
1685 uint32_t avail, oprod, datalen = sizeof(indices);
1686 int i;
1687
1688 for (i = 0; i < iov_cnt; i++)
1689 datalen += iov[i].iov_len;
1690
1691 KASSERT(datalen <= wrd->rd_dsize);
1692
1693 vmbus_ring_avail(wrd, &avail, NULL);
1694 if (avail <= datalen) {
1695 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1696 return EAGAIN;
1697 }
1698
1699 oprod = wrd->rd_prod;
1700
1701 for (i = 0; i < iov_cnt; i++)
1702 vmbus_ring_put(wrd, iov[i].iov_base, iov[i].iov_len);
1703
1704 indices = (uint64_t)oprod << 32;
1705 vmbus_ring_put(wrd, (uint8_t *)&indices, sizeof(indices));
1706
1707 membar_sync();
1708 wrd->rd_ring->br_windex = wrd->rd_prod;
1709 membar_sync();
1710
1711 /* Signal when the ring transitions from being empty to non-empty */
1712 if (wrd->rd_ring->br_imask == 0 &&
1713 wrd->rd_ring->br_rindex == oprod)
1714 *needsig = 1;
1715 else
1716 *needsig = 0;
1717
1718 return 0;
1719 }
1720
1721 int
vmbus_channel_send(struct vmbus_channel * ch,void * data,uint32_t datalen,uint64_t rid,int type,uint32_t flags)1722 vmbus_channel_send(struct vmbus_channel *ch, void *data, uint32_t datalen,
1723 uint64_t rid, int type, uint32_t flags)
1724 {
1725 struct vmbus_softc *sc = ch->ch_sc;
1726 struct vmbus_chanpkt cp;
1727 struct iovec iov[3];
1728 uint32_t pktlen, pktlen_aligned;
1729 uint64_t zeropad = 0;
1730 int rv, needsig = 0;
1731
1732 pktlen = sizeof(cp) + datalen;
1733 pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1734
1735 cp.cp_hdr.cph_type = type;
1736 cp.cp_hdr.cph_flags = flags;
1737 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp));
1738 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1739 cp.cp_hdr.cph_tid = rid;
1740
1741 iov[0].iov_base = &cp;
1742 iov[0].iov_len = sizeof(cp);
1743
1744 iov[1].iov_base = data;
1745 iov[1].iov_len = datalen;
1746
1747 iov[2].iov_base = &zeropad;
1748 iov[2].iov_len = pktlen_aligned - pktlen;
1749
1750 mutex_enter(&ch->ch_wrd.rd_lock);
1751 rv = vmbus_ring_write(&ch->ch_wrd, iov, 3, &needsig);
1752 mutex_exit(&ch->ch_wrd.rd_lock);
1753 if (rv == 0 && needsig)
1754 vmbus_channel_setevent(sc, ch);
1755
1756 return rv;
1757 }
1758
1759 int
vmbus_channel_send_sgl(struct vmbus_channel * ch,struct vmbus_gpa * sgl,uint32_t nsge,void * data,uint32_t datalen,uint64_t rid)1760 vmbus_channel_send_sgl(struct vmbus_channel *ch, struct vmbus_gpa *sgl,
1761 uint32_t nsge, void *data, uint32_t datalen, uint64_t rid)
1762 {
1763 struct vmbus_softc *sc = ch->ch_sc;
1764 struct vmbus_chanpkt_sglist cp;
1765 struct iovec iov[4];
1766 uint32_t buflen, pktlen, pktlen_aligned;
1767 uint64_t zeropad = 0;
1768 int rv, needsig = 0;
1769
1770 buflen = sizeof(struct vmbus_gpa) * nsge;
1771 pktlen = sizeof(cp) + datalen + buflen;
1772 pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1773
1774 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1775 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1776 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1777 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1778 cp.cp_hdr.cph_tid = rid;
1779 cp.cp_gpa_cnt = nsge;
1780 cp.cp_rsvd = 0;
1781
1782 iov[0].iov_base = &cp;
1783 iov[0].iov_len = sizeof(cp);
1784
1785 iov[1].iov_base = sgl;
1786 iov[1].iov_len = buflen;
1787
1788 iov[2].iov_base = data;
1789 iov[2].iov_len = datalen;
1790
1791 iov[3].iov_base = &zeropad;
1792 iov[3].iov_len = pktlen_aligned - pktlen;
1793
1794 mutex_enter(&ch->ch_wrd.rd_lock);
1795 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1796 mutex_exit(&ch->ch_wrd.rd_lock);
1797 if (rv == 0 && needsig)
1798 vmbus_channel_setevent(sc, ch);
1799
1800 return rv;
1801 }
1802
1803 int
vmbus_channel_send_prpl(struct vmbus_channel * ch,struct vmbus_gpa_range * prpl,uint32_t nprp,void * data,uint32_t datalen,uint64_t rid)1804 vmbus_channel_send_prpl(struct vmbus_channel *ch, struct vmbus_gpa_range *prpl,
1805 uint32_t nprp, void *data, uint32_t datalen, uint64_t rid)
1806 {
1807 struct vmbus_softc *sc = ch->ch_sc;
1808 struct vmbus_chanpkt_prplist cp;
1809 struct iovec iov[4];
1810 uint32_t buflen, pktlen, pktlen_aligned;
1811 uint64_t zeropad = 0;
1812 int rv, needsig = 0;
1813
1814 buflen = sizeof(struct vmbus_gpa_range) * (nprp + 1);
1815 pktlen = sizeof(cp) + datalen + buflen;
1816 pktlen_aligned = roundup(pktlen, sizeof(uint64_t));
1817
1818 cp.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
1819 cp.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
1820 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_hlen, sizeof(cp) + buflen);
1821 VMBUS_CHANPKT_SETLEN(cp.cp_hdr.cph_tlen, pktlen_aligned);
1822 cp.cp_hdr.cph_tid = rid;
1823 cp.cp_range_cnt = 1;
1824 cp.cp_rsvd = 0;
1825
1826 iov[0].iov_base = &cp;
1827 iov[0].iov_len = sizeof(cp);
1828
1829 iov[1].iov_base = prpl;
1830 iov[1].iov_len = buflen;
1831
1832 iov[2].iov_base = data;
1833 iov[2].iov_len = datalen;
1834
1835 iov[3].iov_base = &zeropad;
1836 iov[3].iov_len = pktlen_aligned - pktlen;
1837
1838 mutex_enter(&ch->ch_wrd.rd_lock);
1839 rv = vmbus_ring_write(&ch->ch_wrd, iov, 4, &needsig);
1840 mutex_exit(&ch->ch_wrd.rd_lock);
1841 if (rv == 0 && needsig)
1842 vmbus_channel_setevent(sc, ch);
1843
1844 return rv;
1845 }
1846
1847 static int
vmbus_ring_peek(struct vmbus_ring_data * rrd,void * data,uint32_t datalen)1848 vmbus_ring_peek(struct vmbus_ring_data *rrd, void *data, uint32_t datalen)
1849 {
1850 uint32_t avail;
1851
1852 KASSERT(datalen <= rrd->rd_dsize);
1853
1854 vmbus_ring_avail(rrd, NULL, &avail);
1855 if (avail < datalen)
1856 return EAGAIN;
1857
1858 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 1);
1859 return 0;
1860 }
1861
1862 static int
vmbus_ring_read(struct vmbus_ring_data * rrd,void * data,uint32_t datalen,uint32_t offset)1863 vmbus_ring_read(struct vmbus_ring_data *rrd, void *data, uint32_t datalen,
1864 uint32_t offset)
1865 {
1866 uint64_t indices;
1867 uint32_t avail;
1868
1869 KASSERT(datalen <= rrd->rd_dsize);
1870
1871 vmbus_ring_avail(rrd, NULL, &avail);
1872 if (avail < datalen) {
1873 DPRINTF("%s: avail %u datalen %u\n", __func__, avail, datalen);
1874 return EAGAIN;
1875 }
1876
1877 if (offset) {
1878 rrd->rd_cons += offset;
1879 if (rrd->rd_cons >= rrd->rd_dsize)
1880 rrd->rd_cons -= rrd->rd_dsize;
1881 }
1882
1883 vmbus_ring_get(rrd, (uint8_t *)data, datalen, 0);
1884 vmbus_ring_get(rrd, (uint8_t *)&indices, sizeof(indices), 0);
1885
1886 membar_sync();
1887 rrd->rd_ring->br_rindex = rrd->rd_cons;
1888
1889 return 0;
1890 }
1891
1892 int
vmbus_channel_recv(struct vmbus_channel * ch,void * data,uint32_t datalen,uint32_t * rlen,uint64_t * rid,int raw)1893 vmbus_channel_recv(struct vmbus_channel *ch, void *data, uint32_t datalen,
1894 uint32_t *rlen, uint64_t *rid, int raw)
1895 {
1896 struct vmbus_softc *sc = ch->ch_sc;
1897 struct vmbus_chanpkt_hdr cph;
1898 uint32_t offset, pktlen;
1899 int rv;
1900
1901 *rlen = 0;
1902
1903 mutex_enter(&ch->ch_rrd.rd_lock);
1904
1905 if ((rv = vmbus_ring_peek(&ch->ch_rrd, &cph, sizeof(cph))) != 0) {
1906 mutex_exit(&ch->ch_rrd.rd_lock);
1907 return rv;
1908 }
1909
1910 offset = raw ? 0 : VMBUS_CHANPKT_GETLEN(cph.cph_hlen);
1911 pktlen = VMBUS_CHANPKT_GETLEN(cph.cph_tlen) - offset;
1912 if (pktlen > datalen) {
1913 mutex_exit(&ch->ch_rrd.rd_lock);
1914 device_printf(sc->sc_dev, "%s: pktlen %u datalen %u\n",
1915 __func__, pktlen, datalen);
1916 return EINVAL;
1917 }
1918
1919 rv = vmbus_ring_read(&ch->ch_rrd, data, pktlen, offset);
1920 if (rv == 0) {
1921 *rlen = pktlen;
1922 *rid = cph.cph_tid;
1923 }
1924
1925 mutex_exit(&ch->ch_rrd.rd_lock);
1926
1927 return rv;
1928 }
1929
1930 static inline void
vmbus_ring_mask(struct vmbus_ring_data * rd)1931 vmbus_ring_mask(struct vmbus_ring_data *rd)
1932 {
1933
1934 membar_sync();
1935 rd->rd_ring->br_imask = 1;
1936 membar_sync();
1937 }
1938
1939 static inline void
vmbus_ring_unmask(struct vmbus_ring_data * rd)1940 vmbus_ring_unmask(struct vmbus_ring_data *rd)
1941 {
1942
1943 membar_sync();
1944 rd->rd_ring->br_imask = 0;
1945 membar_sync();
1946 }
1947
1948 void
vmbus_channel_pause(struct vmbus_channel * ch)1949 vmbus_channel_pause(struct vmbus_channel *ch)
1950 {
1951
1952 atomic_or_ulong(&ch->ch_sc->sc_evtmask[ch->ch_id / VMBUS_EVTFLAG_LEN],
1953 __BIT(ch->ch_id % VMBUS_EVTFLAG_LEN));
1954 vmbus_ring_mask(&ch->ch_rrd);
1955 }
1956
1957 uint32_t
vmbus_channel_unpause(struct vmbus_channel * ch)1958 vmbus_channel_unpause(struct vmbus_channel *ch)
1959 {
1960 uint32_t avail;
1961
1962 atomic_and_ulong(&ch->ch_sc->sc_evtmask[ch->ch_id / VMBUS_EVTFLAG_LEN],
1963 ~__BIT(ch->ch_id % VMBUS_EVTFLAG_LEN));
1964 vmbus_ring_unmask(&ch->ch_rrd);
1965 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail);
1966
1967 return avail;
1968 }
1969
1970 uint32_t
vmbus_channel_ready(struct vmbus_channel * ch)1971 vmbus_channel_ready(struct vmbus_channel *ch)
1972 {
1973 uint32_t avail;
1974
1975 vmbus_ring_avail(&ch->ch_rrd, NULL, &avail);
1976
1977 return avail;
1978 }
1979
1980 bool
vmbus_channel_tx_empty(struct vmbus_channel * ch)1981 vmbus_channel_tx_empty(struct vmbus_channel *ch)
1982 {
1983
1984 return vmbus_ring_is_empty(&ch->ch_wrd);
1985 }
1986
1987 bool
vmbus_channel_rx_empty(struct vmbus_channel * ch)1988 vmbus_channel_rx_empty(struct vmbus_channel *ch)
1989 {
1990
1991 return vmbus_ring_is_empty(&ch->ch_rrd);
1992 }
1993
1994 /* How many PFNs can be referenced by the header */
1995 #define VMBUS_NPFNHDR ((VMBUS_MSG_DSIZE_MAX - \
1996 sizeof(struct vmbus_chanmsg_gpadl_conn)) / sizeof(uint64_t))
1997
1998 /* How many PFNs can be referenced by the body */
1999 #define VMBUS_NPFNBODY ((VMBUS_MSG_DSIZE_MAX - \
2000 sizeof(struct vmbus_chanmsg_gpadl_subconn)) / sizeof(uint64_t))
2001
2002 int
vmbus_handle_alloc(struct vmbus_channel * ch,const struct hyperv_dma * dma,uint32_t buflen,uint32_t * handle)2003 vmbus_handle_alloc(struct vmbus_channel *ch, const struct hyperv_dma *dma,
2004 uint32_t buflen, uint32_t *handle)
2005 {
2006 struct vmbus_softc *sc = ch->ch_sc;
2007 struct vmbus_chanmsg_gpadl_conn *hdr;
2008 struct vmbus_chanmsg_gpadl_subconn *cmd;
2009 struct vmbus_chanmsg_gpadl_connresp rsp;
2010 struct vmbus_msg *msg;
2011 int i, j, last, left, rv;
2012 int bodylen = 0, ncmds = 0, pfn = 0;
2013 uint64_t *frames;
2014 paddr_t pa;
2015 uint8_t *body;
2016 /* Total number of pages to reference */
2017 int total = atop(buflen);
2018 /* Number of pages that will fit the header */
2019 int inhdr = MIN(total, VMBUS_NPFNHDR);
2020
2021 KASSERT((buflen & PAGE_MASK) == 0);
2022 KASSERT(buflen == (uint32_t)dma->map->dm_mapsize);
2023
2024 msg = pool_cache_get_paddr(sc->sc_msgpool, PR_WAITOK, &pa);
2025
2026 /* Prepare array of frame addresses */
2027 frames = kmem_zalloc(total * sizeof(*frames), KM_SLEEP);
2028 for (i = 0, j = 0; i < dma->map->dm_nsegs && j < total; i++) {
2029 bus_dma_segment_t *seg = &dma->map->dm_segs[i];
2030 bus_addr_t addr = seg->ds_addr;
2031
2032 KASSERT((addr & PAGE_MASK) == 0);
2033 KASSERT((seg->ds_len & PAGE_MASK) == 0);
2034
2035 while (addr < seg->ds_addr + seg->ds_len && j < total) {
2036 frames[j++] = atop(addr);
2037 addr += PAGE_SIZE;
2038 }
2039 }
2040
2041 memset(msg, 0, sizeof(*msg));
2042 msg->msg_req.hc_dsize = sizeof(struct vmbus_chanmsg_gpadl_conn) +
2043 inhdr * sizeof(uint64_t);
2044 hdr = (struct vmbus_chanmsg_gpadl_conn *)msg->msg_req.hc_data;
2045 msg->msg_rsp = &rsp;
2046 msg->msg_rsplen = sizeof(rsp);
2047 msg->msg_flags = MSGF_NOSLEEP;
2048
2049 left = total - inhdr;
2050
2051 /* Allocate additional gpadl_body structures if required */
2052 if (left > 0) {
2053 ncmds = howmany(left, VMBUS_NPFNBODY);
2054 bodylen = ncmds * VMBUS_MSG_DSIZE_MAX;
2055 body = kmem_zalloc(bodylen, KM_SLEEP);
2056 }
2057
2058 *handle = atomic_inc_32_nv(&sc->sc_handle);
2059
2060 hdr->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_CONN;
2061 hdr->chm_chanid = ch->ch_id;
2062 hdr->chm_gpadl = *handle;
2063
2064 /* Single range for a contiguous buffer */
2065 hdr->chm_range_cnt = 1;
2066 hdr->chm_range_len = sizeof(struct vmbus_gpa_range) + total *
2067 sizeof(uint64_t);
2068 hdr->chm_range.gpa_ofs = 0;
2069 hdr->chm_range.gpa_len = buflen;
2070
2071 /* Fit as many pages as possible into the header */
2072 for (i = 0; i < inhdr; i++)
2073 hdr->chm_range.gpa_page[i] = frames[pfn++];
2074
2075 for (i = 0; i < ncmds; i++) {
2076 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
2077 VMBUS_MSG_DSIZE_MAX * i);
2078 cmd->chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_SUBCONN;
2079 cmd->chm_gpadl = *handle;
2080 last = MIN(left, VMBUS_NPFNBODY);
2081 for (j = 0; j < last; j++)
2082 cmd->chm_gpa_page[j] = frames[pfn++];
2083 left -= last;
2084 }
2085
2086 rv = vmbus_start(sc, msg, pa);
2087 if (rv != 0) {
2088 DPRINTF("%s: GPADL_CONN failed\n", device_xname(sc->sc_dev));
2089 goto out;
2090 }
2091 for (i = 0; i < ncmds; i++) {
2092 int cmdlen = sizeof(*cmd);
2093 cmd = (struct vmbus_chanmsg_gpadl_subconn *)(body +
2094 VMBUS_MSG_DSIZE_MAX * i);
2095 /* Last element can be short */
2096 if (i == ncmds - 1)
2097 cmdlen += last * sizeof(uint64_t);
2098 else
2099 cmdlen += VMBUS_NPFNBODY * sizeof(uint64_t);
2100 rv = vmbus_cmd(sc, cmd, cmdlen, NULL, 0,
2101 HCF_NOREPLY | HCF_NOSLEEP);
2102 if (rv != 0) {
2103 DPRINTF("%s: GPADL_SUBCONN (iteration %d/%d) failed "
2104 "with %d\n", device_xname(sc->sc_dev), i, ncmds,
2105 rv);
2106 goto out;
2107 }
2108 }
2109 rv = vmbus_reply(sc, msg);
2110 if (rv != 0) {
2111 DPRINTF("%s: GPADL allocation failed with %d\n",
2112 device_xname(sc->sc_dev), rv);
2113 }
2114
2115 out:
2116 if (bodylen > 0)
2117 kmem_free(body, bodylen);
2118 kmem_free(frames, total * sizeof(*frames));
2119 pool_cache_put_paddr(sc->sc_msgpool, msg, pa);
2120 if (rv)
2121 return rv;
2122
2123 KASSERT(*handle == rsp.chm_gpadl);
2124
2125 return 0;
2126 }
2127
2128 void
vmbus_handle_free(struct vmbus_channel * ch,uint32_t handle)2129 vmbus_handle_free(struct vmbus_channel *ch, uint32_t handle)
2130 {
2131 struct vmbus_softc *sc = ch->ch_sc;
2132 struct vmbus_chanmsg_gpadl_disconn cmd;
2133 struct vmbus_chanmsg_gpadl_disconn rsp;
2134 int rv;
2135
2136 memset(&cmd, 0, sizeof(cmd));
2137 cmd.chm_hdr.chm_type = VMBUS_CHANMSG_GPADL_DISCONN;
2138 cmd.chm_chanid = ch->ch_id;
2139 cmd.chm_gpadl = handle;
2140
2141 rv = vmbus_cmd(sc, &cmd, sizeof(cmd), &rsp, sizeof(rsp), HCF_NOSLEEP);
2142 if (rv) {
2143 DPRINTF("%s: GPADL_DISCONN failed with %d\n",
2144 device_xname(sc->sc_dev), rv);
2145 }
2146 }
2147
2148 static void
vmbus_chevq_enqueue(struct vmbus_softc * sc,int type,void * arg)2149 vmbus_chevq_enqueue(struct vmbus_softc *sc, int type, void *arg)
2150 {
2151 struct vmbus_chev *vce;
2152
2153 vce = kmem_intr_alloc(sizeof(*vce), KM_NOSLEEP);
2154 if (vce == NULL) {
2155 device_printf(sc->sc_dev, "failed to allocate chev\n");
2156 return;
2157 }
2158
2159 vce->vce_type = type;
2160 vce->vce_arg = arg;
2161
2162 mutex_enter(&sc->sc_chevq_lock);
2163 SIMPLEQ_INSERT_TAIL(&sc->sc_chevq, vce, vce_entry);
2164 cv_broadcast(&sc->sc_chevq_cv);
2165 mutex_exit(&sc->sc_chevq_lock);
2166 }
2167
2168 static void
vmbus_process_chevq(void * arg)2169 vmbus_process_chevq(void *arg)
2170 {
2171 struct vmbus_softc *sc = arg;
2172 struct vmbus_chev *vce;
2173 struct vmbus_chanmsg_choffer *co;
2174 struct vmbus_chanmsg_chrescind *cr;
2175
2176 KASSERT(mutex_owned(&sc->sc_chevq_lock));
2177
2178 while (!SIMPLEQ_EMPTY(&sc->sc_chevq)) {
2179 vce = SIMPLEQ_FIRST(&sc->sc_chevq);
2180 SIMPLEQ_REMOVE_HEAD(&sc->sc_chevq, vce_entry);
2181 mutex_exit(&sc->sc_chevq_lock);
2182
2183 switch (vce->vce_type) {
2184 case VMBUS_CHEV_TYPE_OFFER:
2185 co = vce->vce_arg;
2186 vmbus_process_offer(sc, co);
2187 kmem_free(co, sizeof(*co));
2188 break;
2189
2190 case VMBUS_CHEV_TYPE_RESCIND:
2191 cr = vce->vce_arg;
2192 vmbus_process_rescind(sc, cr);
2193 kmem_free(cr, sizeof(*cr));
2194 break;
2195
2196 default:
2197 DPRINTF("%s: unknown chevq type %d\n",
2198 device_xname(sc->sc_dev), vce->vce_type);
2199 break;
2200 }
2201 kmem_free(vce, sizeof(*vce));
2202
2203 mutex_enter(&sc->sc_chevq_lock);
2204 }
2205 }
2206
2207 static void
vmbus_chevq_thread(void * arg)2208 vmbus_chevq_thread(void *arg)
2209 {
2210 struct vmbus_softc *sc = arg;
2211
2212 mutex_enter(&sc->sc_chevq_lock);
2213 for (;;) {
2214 if (SIMPLEQ_EMPTY(&sc->sc_chevq)) {
2215 cv_wait(&sc->sc_chevq_cv, &sc->sc_chevq_lock);
2216 continue;
2217 }
2218
2219 vmbus_process_chevq(sc);
2220 }
2221 mutex_exit(&sc->sc_chevq_lock);
2222
2223 kthread_exit(0);
2224 }
2225
2226 static void
vmbus_devq_enqueue(struct vmbus_softc * sc,int type,struct vmbus_channel * ch)2227 vmbus_devq_enqueue(struct vmbus_softc *sc, int type, struct vmbus_channel *ch)
2228 {
2229 struct vmbus_dev *vd;
2230
2231 vd = kmem_zalloc(sizeof(*vd), KM_SLEEP);
2232 if (vd == NULL) {
2233 device_printf(sc->sc_dev, "failed to allocate devq\n");
2234 return;
2235 }
2236
2237 vd->vd_type = type;
2238 vd->vd_chan = ch;
2239
2240 if (VMBUS_CHAN_ISPRIMARY(ch)) {
2241 mutex_enter(&sc->sc_devq_lock);
2242 SIMPLEQ_INSERT_TAIL(&sc->sc_devq, vd, vd_entry);
2243 cv_broadcast(&sc->sc_devq_cv);
2244 mutex_exit(&sc->sc_devq_lock);
2245 } else {
2246 mutex_enter(&sc->sc_subch_devq_lock);
2247 SIMPLEQ_INSERT_TAIL(&sc->sc_subch_devq, vd, vd_entry);
2248 cv_broadcast(&sc->sc_subch_devq_cv);
2249 mutex_exit(&sc->sc_subch_devq_lock);
2250 }
2251 }
2252
2253 static void
vmbus_process_devq(void * arg)2254 vmbus_process_devq(void *arg)
2255 {
2256 struct vmbus_softc *sc = arg;
2257 struct vmbus_dev *vd;
2258 struct vmbus_channel *ch;
2259 struct vmbus_attach_args vaa;
2260
2261 KASSERT(mutex_owned(&sc->sc_devq_lock));
2262
2263 while (!SIMPLEQ_EMPTY(&sc->sc_devq)) {
2264 vd = SIMPLEQ_FIRST(&sc->sc_devq);
2265 SIMPLEQ_REMOVE_HEAD(&sc->sc_devq, vd_entry);
2266 mutex_exit(&sc->sc_devq_lock);
2267
2268 switch (vd->vd_type) {
2269 case VMBUS_DEV_TYPE_ATTACH:
2270 ch = vd->vd_chan;
2271 vaa.aa_type = &ch->ch_type;
2272 vaa.aa_inst = &ch->ch_inst;
2273 vaa.aa_ident = ch->ch_ident;
2274 vaa.aa_chan = ch;
2275 vaa.aa_iot = sc->sc_iot;
2276 vaa.aa_memt = sc->sc_memt;
2277 ch->ch_dev = config_found(sc->sc_dev,
2278 &vaa, vmbus_attach_print, CFARGS_NONE);
2279 break;
2280
2281 case VMBUS_DEV_TYPE_DETACH:
2282 ch = vd->vd_chan;
2283 if (ch->ch_dev != NULL) {
2284 config_detach(ch->ch_dev, DETACH_FORCE);
2285 ch->ch_dev = NULL;
2286 }
2287 vmbus_channel_release(ch);
2288 vmbus_channel_free(ch);
2289 break;
2290
2291 default:
2292 DPRINTF("%s: unknown devq type %d\n",
2293 device_xname(sc->sc_dev), vd->vd_type);
2294 break;
2295 }
2296 kmem_free(vd, sizeof(*vd));
2297
2298 mutex_enter(&sc->sc_devq_lock);
2299 }
2300 }
2301
2302 static void
vmbus_devq_thread(void * arg)2303 vmbus_devq_thread(void *arg)
2304 {
2305 struct vmbus_softc *sc = arg;
2306
2307 mutex_enter(&sc->sc_devq_lock);
2308 for (;;) {
2309 if (SIMPLEQ_EMPTY(&sc->sc_devq)) {
2310 cv_wait(&sc->sc_devq_cv, &sc->sc_devq_lock);
2311 continue;
2312 }
2313
2314 vmbus_process_devq(sc);
2315 }
2316 mutex_exit(&sc->sc_devq_lock);
2317
2318 kthread_exit(0);
2319 }
2320
2321 static void
vmbus_subchannel_devq_thread(void * arg)2322 vmbus_subchannel_devq_thread(void *arg)
2323 {
2324 struct vmbus_softc *sc = arg;
2325 struct vmbus_dev *vd;
2326 struct vmbus_channel *ch, *prich;
2327
2328 mutex_enter(&sc->sc_subch_devq_lock);
2329 for (;;) {
2330 if (SIMPLEQ_EMPTY(&sc->sc_subch_devq)) {
2331 cv_wait(&sc->sc_subch_devq_cv, &sc->sc_subch_devq_lock);
2332 continue;
2333 }
2334
2335 while (!SIMPLEQ_EMPTY(&sc->sc_subch_devq)) {
2336 vd = SIMPLEQ_FIRST(&sc->sc_subch_devq);
2337 SIMPLEQ_REMOVE_HEAD(&sc->sc_subch_devq, vd_entry);
2338 mutex_exit(&sc->sc_subch_devq_lock);
2339
2340 switch (vd->vd_type) {
2341 case VMBUS_DEV_TYPE_ATTACH:
2342 /* Nothing to do */
2343 break;
2344
2345 case VMBUS_DEV_TYPE_DETACH:
2346 ch = vd->vd_chan;
2347
2348 vmbus_channel_release(ch);
2349
2350 prich = ch->ch_primary_channel;
2351 mutex_enter(&prich->ch_subchannel_lock);
2352 TAILQ_REMOVE(&prich->ch_subchannels, ch,
2353 ch_subentry);
2354 prich->ch_subchannel_count--;
2355 mutex_exit(&prich->ch_subchannel_lock);
2356 wakeup(prich);
2357
2358 vmbus_channel_free(ch);
2359 break;
2360
2361 default:
2362 DPRINTF("%s: unknown devq type %d\n",
2363 device_xname(sc->sc_dev), vd->vd_type);
2364 break;
2365 }
2366
2367 kmem_free(vd, sizeof(*vd));
2368
2369 mutex_enter(&sc->sc_subch_devq_lock);
2370 }
2371 }
2372 mutex_exit(&sc->sc_subch_devq_lock);
2373
2374 kthread_exit(0);
2375 }
2376
2377
2378 static int
vmbus_attach_print(void * aux,const char * name)2379 vmbus_attach_print(void *aux, const char *name)
2380 {
2381 struct vmbus_attach_args *aa = aux;
2382
2383 if (name)
2384 printf("\"%s\" at %s", aa->aa_ident, name);
2385
2386 return UNCONF;
2387 }
2388
2389 MODULE(MODULE_CLASS_DRIVER, vmbus, "hyperv");
2390
2391 #ifdef _MODULE
2392 #include "ioconf.c"
2393 #endif
2394
2395 static int
vmbus_modcmd(modcmd_t cmd,void * aux)2396 vmbus_modcmd(modcmd_t cmd, void *aux)
2397 {
2398 int rv = 0;
2399
2400 switch (cmd) {
2401 case MODULE_CMD_INIT:
2402 #ifdef _MODULE
2403 rv = config_init_component(cfdriver_ioconf_vmbus,
2404 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus);
2405 #endif
2406 break;
2407
2408 case MODULE_CMD_FINI:
2409 #ifdef _MODULE
2410 rv = config_fini_component(cfdriver_ioconf_vmbus,
2411 cfattach_ioconf_vmbus, cfdata_ioconf_vmbus);
2412 #endif
2413 break;
2414
2415 default:
2416 rv = ENOTTY;
2417 break;
2418 }
2419
2420 return rv;
2421 }
2422