xref: /freebsd/sys/dev/virtio/mmio/virtio_mmio.c (revision fdafd315)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Portions of this software were developed by Andrew Turner
11  * under sponsorship from the FreeBSD Foundation.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * VirtIO MMIO interface.
37  * This driver is heavily based on VirtIO PCI interface driver.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/rman.h>
47 #include <sys/endian.h>
48 
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 
52 #include <dev/virtio/virtio.h>
53 #include <dev/virtio/virtqueue.h>
54 #include <dev/virtio/mmio/virtio_mmio.h>
55 
56 #include "virtio_mmio_if.h"
57 #include "virtio_bus_if.h"
58 #include "virtio_if.h"
59 
60 struct vtmmio_virtqueue {
61 	struct virtqueue	*vtv_vq;
62 	int			 vtv_no_intr;
63 };
64 
65 static int	vtmmio_detach(device_t);
66 static int	vtmmio_suspend(device_t);
67 static int	vtmmio_resume(device_t);
68 static int	vtmmio_shutdown(device_t);
69 static void	vtmmio_driver_added(device_t, driver_t *);
70 static void	vtmmio_child_detached(device_t, device_t);
71 static int	vtmmio_read_ivar(device_t, device_t, int, uintptr_t *);
72 static int	vtmmio_write_ivar(device_t, device_t, int, uintptr_t);
73 static uint64_t	vtmmio_negotiate_features(device_t, uint64_t);
74 static int	vtmmio_finalize_features(device_t);
75 static bool	vtmmio_with_feature(device_t, uint64_t);
76 static void	vtmmio_set_virtqueue(struct vtmmio_softc *sc,
77 		    struct virtqueue *vq, uint32_t size);
78 static int	vtmmio_alloc_virtqueues(device_t, int,
79 		    struct vq_alloc_info *);
80 static int	vtmmio_setup_intr(device_t, enum intr_type);
81 static void	vtmmio_stop(device_t);
82 static void	vtmmio_poll(device_t);
83 static int	vtmmio_reinit(device_t, uint64_t);
84 static void	vtmmio_reinit_complete(device_t);
85 static void	vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
86 static int	vtmmio_config_generation(device_t);
87 static uint8_t	vtmmio_get_status(device_t);
88 static void	vtmmio_set_status(device_t, uint8_t);
89 static void	vtmmio_read_dev_config(device_t, bus_size_t, void *, int);
90 static uint64_t	vtmmio_read_dev_config_8(struct vtmmio_softc *, bus_size_t);
91 static void	vtmmio_write_dev_config(device_t, bus_size_t, const void *, int);
92 static void	vtmmio_describe_features(struct vtmmio_softc *, const char *,
93 		    uint64_t);
94 static void	vtmmio_probe_and_attach_child(struct vtmmio_softc *);
95 static int	vtmmio_reinit_virtqueue(struct vtmmio_softc *, int);
96 static void	vtmmio_free_interrupts(struct vtmmio_softc *);
97 static void	vtmmio_free_virtqueues(struct vtmmio_softc *);
98 static void	vtmmio_release_child_resources(struct vtmmio_softc *);
99 static void	vtmmio_reset(struct vtmmio_softc *);
100 static void	vtmmio_select_virtqueue(struct vtmmio_softc *, int);
101 static void	vtmmio_vq_intr(void *);
102 
103 /*
104  * I/O port read/write wrappers.
105  */
106 #define vtmmio_write_config_1(sc, o, v)				\
107 do {								\
108 	if (sc->platform != NULL)				\
109 		VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v));	\
110 	bus_write_1((sc)->res[0], (o), (v)); 			\
111 	if (sc->platform != NULL)				\
112 		VIRTIO_MMIO_NOTE(sc->platform, (o), (v));	\
113 } while (0)
114 #define vtmmio_write_config_2(sc, o, v)				\
115 do {								\
116 	if (sc->platform != NULL)				\
117 		VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v));	\
118 	bus_write_2((sc)->res[0], (o), (v));			\
119 	if (sc->platform != NULL)				\
120 		VIRTIO_MMIO_NOTE(sc->platform, (o), (v));	\
121 } while (0)
122 #define vtmmio_write_config_4(sc, o, v)				\
123 do {								\
124 	if (sc->platform != NULL)				\
125 		VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v));	\
126 	bus_write_4((sc)->res[0], (o), (v));			\
127 	if (sc->platform != NULL)				\
128 		VIRTIO_MMIO_NOTE(sc->platform, (o), (v));	\
129 } while (0)
130 
131 #define vtmmio_read_config_1(sc, o) \
132 	bus_read_1((sc)->res[0], (o))
133 #define vtmmio_read_config_2(sc, o) \
134 	bus_read_2((sc)->res[0], (o))
135 #define vtmmio_read_config_4(sc, o) \
136 	bus_read_4((sc)->res[0], (o))
137 
138 static device_method_t vtmmio_methods[] = {
139 	/* Device interface. */
140 	DEVMETHOD(device_attach,		  vtmmio_attach),
141 	DEVMETHOD(device_detach,		  vtmmio_detach),
142 	DEVMETHOD(device_suspend,		  vtmmio_suspend),
143 	DEVMETHOD(device_resume,		  vtmmio_resume),
144 	DEVMETHOD(device_shutdown,		  vtmmio_shutdown),
145 
146 	/* Bus interface. */
147 	DEVMETHOD(bus_driver_added,		  vtmmio_driver_added),
148 	DEVMETHOD(bus_child_detached,		  vtmmio_child_detached),
149 	DEVMETHOD(bus_child_pnpinfo,		  virtio_child_pnpinfo),
150 	DEVMETHOD(bus_read_ivar,		  vtmmio_read_ivar),
151 	DEVMETHOD(bus_write_ivar,		  vtmmio_write_ivar),
152 
153 	/* VirtIO bus interface. */
154 	DEVMETHOD(virtio_bus_negotiate_features,  vtmmio_negotiate_features),
155 	DEVMETHOD(virtio_bus_finalize_features,	  vtmmio_finalize_features),
156 	DEVMETHOD(virtio_bus_with_feature,	  vtmmio_with_feature),
157 	DEVMETHOD(virtio_bus_alloc_virtqueues,	  vtmmio_alloc_virtqueues),
158 	DEVMETHOD(virtio_bus_setup_intr,	  vtmmio_setup_intr),
159 	DEVMETHOD(virtio_bus_stop,		  vtmmio_stop),
160 	DEVMETHOD(virtio_bus_poll,		  vtmmio_poll),
161 	DEVMETHOD(virtio_bus_reinit,		  vtmmio_reinit),
162 	DEVMETHOD(virtio_bus_reinit_complete,	  vtmmio_reinit_complete),
163 	DEVMETHOD(virtio_bus_notify_vq,		  vtmmio_notify_virtqueue),
164 	DEVMETHOD(virtio_bus_config_generation,	  vtmmio_config_generation),
165 	DEVMETHOD(virtio_bus_read_device_config,  vtmmio_read_dev_config),
166 	DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config),
167 
168 	DEVMETHOD_END
169 };
170 
171 DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods,
172     sizeof(struct vtmmio_softc));
173 
174 MODULE_VERSION(virtio_mmio, 1);
175 
176 int
vtmmio_probe(device_t dev)177 vtmmio_probe(device_t dev)
178 {
179 	struct vtmmio_softc *sc;
180 	int rid;
181 	uint32_t magic, version;
182 
183 	sc = device_get_softc(dev);
184 
185 	rid = 0;
186 	sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
187 	    RF_ACTIVE);
188 	if (sc->res[0] == NULL) {
189 		device_printf(dev, "Cannot allocate memory window.\n");
190 		return (ENXIO);
191 	}
192 
193 	magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE);
194 	if (magic != VIRTIO_MMIO_MAGIC_VIRT) {
195 		device_printf(dev, "Bad magic value %#x\n", magic);
196 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
197 		return (ENXIO);
198 	}
199 
200 	version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
201 	if (version < 1 || version > 2) {
202 		device_printf(dev, "Unsupported version: %#x\n", version);
203 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
204 		return (ENXIO);
205 	}
206 
207 	if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) {
208 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
209 		return (ENXIO);
210 	}
211 
212 	bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]);
213 
214 	device_set_desc(dev, "VirtIO MMIO adapter");
215 	return (BUS_PROBE_DEFAULT);
216 }
217 
218 static int
vtmmio_setup_intr(device_t dev,enum intr_type type)219 vtmmio_setup_intr(device_t dev, enum intr_type type)
220 {
221 	struct vtmmio_softc *sc;
222 	int rid;
223 	int err;
224 
225 	sc = device_get_softc(dev);
226 
227 	if (sc->platform != NULL) {
228 		err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
229 					vtmmio_vq_intr, sc);
230 		if (err == 0) {
231 			/* Okay we have backend-specific interrupts */
232 			return (0);
233 		}
234 	}
235 
236 	rid = 0;
237 	sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
238 		RF_ACTIVE);
239 	if (!sc->res[1]) {
240 		device_printf(dev, "Can't allocate interrupt\n");
241 		return (ENXIO);
242 	}
243 
244 	if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE,
245 		NULL, vtmmio_vq_intr, sc, &sc->ih)) {
246 		device_printf(dev, "Can't setup the interrupt\n");
247 		return (ENXIO);
248 	}
249 
250 	return (0);
251 }
252 
253 int
vtmmio_attach(device_t dev)254 vtmmio_attach(device_t dev)
255 {
256 	struct vtmmio_softc *sc;
257 	device_t child;
258 	int rid;
259 
260 	sc = device_get_softc(dev);
261 	sc->dev = dev;
262 
263 	rid = 0;
264 	sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
265 			RF_ACTIVE);
266 	if (sc->res[0] == NULL) {
267 		device_printf(dev, "Cannot allocate memory window.\n");
268 		return (ENXIO);
269 	}
270 
271 	sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION);
272 
273 	vtmmio_reset(sc);
274 
275 	/* Tell the host we've noticed this device. */
276 	vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
277 
278 	if ((child = device_add_child(dev, NULL, -1)) == NULL) {
279 		device_printf(dev, "Cannot create child device.\n");
280 		vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
281 		vtmmio_detach(dev);
282 		return (ENOMEM);
283 	}
284 
285 	sc->vtmmio_child_dev = child;
286 	vtmmio_probe_and_attach_child(sc);
287 
288 	return (0);
289 }
290 
291 static int
vtmmio_detach(device_t dev)292 vtmmio_detach(device_t dev)
293 {
294 	struct vtmmio_softc *sc;
295 	device_t child;
296 	int error;
297 
298 	sc = device_get_softc(dev);
299 
300 	if ((child = sc->vtmmio_child_dev) != NULL) {
301 		error = device_delete_child(dev, child);
302 		if (error)
303 			return (error);
304 		sc->vtmmio_child_dev = NULL;
305 	}
306 
307 	vtmmio_reset(sc);
308 
309 	if (sc->res[0] != NULL) {
310 		bus_release_resource(dev, SYS_RES_MEMORY, 0,
311 		    sc->res[0]);
312 		sc->res[0] = NULL;
313 	}
314 
315 	return (0);
316 }
317 
318 static int
vtmmio_suspend(device_t dev)319 vtmmio_suspend(device_t dev)
320 {
321 
322 	return (bus_generic_suspend(dev));
323 }
324 
325 static int
vtmmio_resume(device_t dev)326 vtmmio_resume(device_t dev)
327 {
328 
329 	return (bus_generic_resume(dev));
330 }
331 
332 static int
vtmmio_shutdown(device_t dev)333 vtmmio_shutdown(device_t dev)
334 {
335 
336 	(void) bus_generic_shutdown(dev);
337 
338 	/* Forcibly stop the host device. */
339 	vtmmio_stop(dev);
340 
341 	return (0);
342 }
343 
344 static void
vtmmio_driver_added(device_t dev,driver_t * driver)345 vtmmio_driver_added(device_t dev, driver_t *driver)
346 {
347 	struct vtmmio_softc *sc;
348 
349 	sc = device_get_softc(dev);
350 
351 	vtmmio_probe_and_attach_child(sc);
352 }
353 
354 static void
vtmmio_child_detached(device_t dev,device_t child)355 vtmmio_child_detached(device_t dev, device_t child)
356 {
357 	struct vtmmio_softc *sc;
358 
359 	sc = device_get_softc(dev);
360 
361 	vtmmio_reset(sc);
362 	vtmmio_release_child_resources(sc);
363 }
364 
365 static int
vtmmio_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)366 vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
367 {
368 	struct vtmmio_softc *sc;
369 
370 	sc = device_get_softc(dev);
371 
372 	if (sc->vtmmio_child_dev != child)
373 		return (ENOENT);
374 
375 	switch (index) {
376 	case VIRTIO_IVAR_DEVTYPE:
377 	case VIRTIO_IVAR_SUBDEVICE:
378 		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID);
379 		break;
380 	case VIRTIO_IVAR_VENDOR:
381 		*result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID);
382 		break;
383 	case VIRTIO_IVAR_SUBVENDOR:
384 	case VIRTIO_IVAR_DEVICE:
385 		/*
386 		 * Dummy value for fields not present in this bus.  Used by
387 		 * bus-agnostic virtio_child_pnpinfo.
388 		 */
389 		*result = 0;
390 		break;
391 	case VIRTIO_IVAR_MODERN:
392 		/*
393 		 * There are several modern (aka MMIO v2) spec compliance
394 		 * issues with this driver, but keep the status quo.
395 		 */
396 		*result = sc->vtmmio_version > 1;
397 		break;
398 	default:
399 		return (ENOENT);
400 	}
401 
402 	return (0);
403 }
404 
405 static int
vtmmio_write_ivar(device_t dev,device_t child,int index,uintptr_t value)406 vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
407 {
408 	struct vtmmio_softc *sc;
409 
410 	sc = device_get_softc(dev);
411 
412 	if (sc->vtmmio_child_dev != child)
413 		return (ENOENT);
414 
415 	switch (index) {
416 	case VIRTIO_IVAR_FEATURE_DESC:
417 		sc->vtmmio_child_feat_desc = (void *) value;
418 		break;
419 	default:
420 		return (ENOENT);
421 	}
422 
423 	return (0);
424 }
425 
426 static uint64_t
vtmmio_negotiate_features(device_t dev,uint64_t child_features)427 vtmmio_negotiate_features(device_t dev, uint64_t child_features)
428 {
429 	struct vtmmio_softc *sc;
430 	uint64_t host_features, features;
431 
432 	sc = device_get_softc(dev);
433 
434 	if (sc->vtmmio_version > 1) {
435 		child_features |= VIRTIO_F_VERSION_1;
436 	}
437 
438 	vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1);
439 	host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
440 	host_features <<= 32;
441 
442 	vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0);
443 	host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES);
444 
445 	vtmmio_describe_features(sc, "host", host_features);
446 
447 	/*
448 	 * Limit negotiated features to what the driver, virtqueue, and
449 	 * host all support.
450 	 */
451 	features = host_features & child_features;
452 	features = virtio_filter_transport_features(features);
453 	sc->vtmmio_features = features;
454 
455 	vtmmio_describe_features(sc, "negotiated", features);
456 
457 	vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1);
458 	vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32);
459 
460 	vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0);
461 	vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features);
462 
463 	return (features);
464 }
465 
466 static int
vtmmio_finalize_features(device_t dev)467 vtmmio_finalize_features(device_t dev)
468 {
469 	struct vtmmio_softc *sc;
470 	uint8_t status;
471 
472 	sc = device_get_softc(dev);
473 
474 	if (sc->vtmmio_version > 1) {
475 		/*
476 		 * Must re-read the status after setting it to verify the
477 		 * negotiated features were accepted by the device.
478 		 */
479 		vtmmio_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
480 
481 		status = vtmmio_get_status(dev);
482 		if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
483 			device_printf(dev, "desired features were not accepted\n");
484 			return (ENOTSUP);
485 		}
486 	}
487 
488 	return (0);
489 }
490 
491 static bool
vtmmio_with_feature(device_t dev,uint64_t feature)492 vtmmio_with_feature(device_t dev, uint64_t feature)
493 {
494 	struct vtmmio_softc *sc;
495 
496 	sc = device_get_softc(dev);
497 
498 	return ((sc->vtmmio_features & feature) != 0);
499 }
500 
501 static void
vtmmio_set_virtqueue(struct vtmmio_softc * sc,struct virtqueue * vq,uint32_t size)502 vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq,
503     uint32_t size)
504 {
505 	vm_paddr_t paddr;
506 
507 	vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size);
508 
509 	if (sc->vtmmio_version == 1) {
510 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN,
511 		    VIRTIO_MMIO_VRING_ALIGN);
512 		paddr = virtqueue_paddr(vq);
513 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN,
514 		    paddr >> PAGE_SHIFT);
515 	} else {
516 		paddr = virtqueue_desc_paddr(vq);
517 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW,
518 		    paddr);
519 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH,
520 		    ((uint64_t)paddr) >> 32);
521 
522 		paddr = virtqueue_avail_paddr(vq);
523 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW,
524 		    paddr);
525 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH,
526 		    ((uint64_t)paddr) >> 32);
527 
528 		paddr = virtqueue_used_paddr(vq);
529 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW,
530 		    paddr);
531 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH,
532 		    ((uint64_t)paddr) >> 32);
533 
534 		vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1);
535 	}
536 }
537 
538 static int
vtmmio_alloc_virtqueues(device_t dev,int nvqs,struct vq_alloc_info * vq_info)539 vtmmio_alloc_virtqueues(device_t dev, int nvqs,
540     struct vq_alloc_info *vq_info)
541 {
542 	struct vtmmio_virtqueue *vqx;
543 	struct vq_alloc_info *info;
544 	struct vtmmio_softc *sc;
545 	struct virtqueue *vq;
546 	uint32_t size;
547 	int idx, error;
548 
549 	sc = device_get_softc(dev);
550 
551 	if (sc->vtmmio_nvqs != 0)
552 		return (EALREADY);
553 	if (nvqs <= 0)
554 		return (EINVAL);
555 
556 	sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue),
557 	    M_DEVBUF, M_NOWAIT | M_ZERO);
558 	if (sc->vtmmio_vqs == NULL)
559 		return (ENOMEM);
560 
561 	if (sc->vtmmio_version == 1) {
562 		vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
563 		    (1 << PAGE_SHIFT));
564 	}
565 
566 	for (idx = 0; idx < nvqs; idx++) {
567 		vqx = &sc->vtmmio_vqs[idx];
568 		info = &vq_info[idx];
569 
570 		vtmmio_select_virtqueue(sc, idx);
571 		size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
572 
573 		error = virtqueue_alloc(dev, idx, size,
574 		    VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN,
575 		    ~(vm_paddr_t)0, info, &vq);
576 		if (error) {
577 			device_printf(dev,
578 			    "cannot allocate virtqueue %d: %d\n",
579 			    idx, error);
580 			break;
581 		}
582 
583 		vtmmio_set_virtqueue(sc, vq, size);
584 
585 		vqx->vtv_vq = *info->vqai_vq = vq;
586 		vqx->vtv_no_intr = info->vqai_intr == NULL;
587 
588 		sc->vtmmio_nvqs++;
589 	}
590 
591 	if (error)
592 		vtmmio_free_virtqueues(sc);
593 
594 	return (error);
595 }
596 
597 static void
vtmmio_stop(device_t dev)598 vtmmio_stop(device_t dev)
599 {
600 
601 	vtmmio_reset(device_get_softc(dev));
602 }
603 
604 static void
vtmmio_poll(device_t dev)605 vtmmio_poll(device_t dev)
606 {
607 	struct vtmmio_softc *sc;
608 
609 	sc = device_get_softc(dev);
610 
611 	if (sc->platform != NULL)
612 		VIRTIO_MMIO_POLL(sc->platform);
613 }
614 
615 static int
vtmmio_reinit(device_t dev,uint64_t features)616 vtmmio_reinit(device_t dev, uint64_t features)
617 {
618 	struct vtmmio_softc *sc;
619 	int idx, error;
620 
621 	sc = device_get_softc(dev);
622 
623 	if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET)
624 		vtmmio_stop(dev);
625 
626 	/*
627 	 * Quickly drive the status through ACK and DRIVER. The device
628 	 * does not become usable again until vtmmio_reinit_complete().
629 	 */
630 	vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
631 	vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
632 
633 	/*
634 	 * TODO: Check that features are not added as to what was
635 	 * originally negotiated.
636 	 */
637 	vtmmio_negotiate_features(dev, features);
638 	error = vtmmio_finalize_features(dev);
639 	if (error) {
640 		device_printf(dev, "cannot finalize features during reinit\n");
641 		return (error);
642 	}
643 
644 	if (sc->vtmmio_version == 1) {
645 		vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE,
646 		    (1 << PAGE_SHIFT));
647 	}
648 
649 	for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
650 		error = vtmmio_reinit_virtqueue(sc, idx);
651 		if (error)
652 			return (error);
653 	}
654 
655 	return (0);
656 }
657 
658 static void
vtmmio_reinit_complete(device_t dev)659 vtmmio_reinit_complete(device_t dev)
660 {
661 
662 	vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
663 }
664 
665 static void
vtmmio_notify_virtqueue(device_t dev,uint16_t queue,bus_size_t offset)666 vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset)
667 {
668 	struct vtmmio_softc *sc;
669 
670 	sc = device_get_softc(dev);
671 	MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY);
672 
673 	vtmmio_write_config_4(sc, offset, queue);
674 }
675 
676 static int
vtmmio_config_generation(device_t dev)677 vtmmio_config_generation(device_t dev)
678 {
679 	struct vtmmio_softc *sc;
680 	uint32_t gen;
681 
682 	sc = device_get_softc(dev);
683 
684 	if (sc->vtmmio_version > 1)
685 		gen = vtmmio_read_config_4(sc, VIRTIO_MMIO_CONFIG_GENERATION);
686 	else
687 		gen = 0;
688 
689 	return (gen);
690 }
691 
692 static uint8_t
vtmmio_get_status(device_t dev)693 vtmmio_get_status(device_t dev)
694 {
695 	struct vtmmio_softc *sc;
696 
697 	sc = device_get_softc(dev);
698 
699 	return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS));
700 }
701 
702 static void
vtmmio_set_status(device_t dev,uint8_t status)703 vtmmio_set_status(device_t dev, uint8_t status)
704 {
705 	struct vtmmio_softc *sc;
706 
707 	sc = device_get_softc(dev);
708 
709 	if (status != VIRTIO_CONFIG_STATUS_RESET)
710 		status |= vtmmio_get_status(dev);
711 
712 	vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status);
713 }
714 
715 static void
vtmmio_read_dev_config(device_t dev,bus_size_t offset,void * dst,int length)716 vtmmio_read_dev_config(device_t dev, bus_size_t offset,
717     void *dst, int length)
718 {
719 	struct vtmmio_softc *sc;
720 	bus_size_t off;
721 	uint8_t *d;
722 	int size;
723 
724 	sc = device_get_softc(dev);
725 	off = VIRTIO_MMIO_CONFIG + offset;
726 
727 	/*
728 	 * The non-legacy MMIO specification adds the following restriction:
729 	 *
730 	 *   4.2.2.2: For the device-specific configuration space, the driver
731 	 *   MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide
732 	 *   and aligned accesses for 16 bit wide fields and 32 bit wide and
733 	 *   aligned accesses for 32 and 64 bit wide fields.
734 	 *
735 	 * The endianness also varies between non-legacy and legacy:
736 	 *
737 	 *   2.4: Note: The device configuration space uses the little-endian
738 	 *   format for multi-byte fields.
739 	 *
740 	 *   2.4.3: Note that for legacy interfaces, device configuration space
741 	 *   is generally the guest’s native endian, rather than PCI’s
742 	 *   little-endian. The correct endian-ness is documented for each
743 	 *   device.
744 	 */
745 	if (sc->vtmmio_version > 1) {
746 		switch (length) {
747 		case 1:
748 			*(uint8_t *)dst = vtmmio_read_config_1(sc, off);
749 			break;
750 		case 2:
751 			*(uint16_t *)dst =
752 			    le16toh(vtmmio_read_config_2(sc, off));
753 			break;
754 		case 4:
755 			*(uint32_t *)dst =
756 			    le32toh(vtmmio_read_config_4(sc, off));
757 			break;
758 		case 8:
759 			*(uint64_t *)dst = vtmmio_read_dev_config_8(sc, off);
760 			break;
761 		default:
762 			panic("%s: invalid length %d\n", __func__, length);
763 		}
764 
765 		return;
766 	}
767 
768 	for (d = dst; length > 0; d += size, off += size, length -= size) {
769 #ifdef ALLOW_WORD_ALIGNED_ACCESS
770 		if (length >= 4) {
771 			size = 4;
772 			*(uint32_t *)d = vtmmio_read_config_4(sc, off);
773 		} else if (length >= 2) {
774 			size = 2;
775 			*(uint16_t *)d = vtmmio_read_config_2(sc, off);
776 		} else
777 #endif
778 		{
779 			size = 1;
780 			*d = vtmmio_read_config_1(sc, off);
781 		}
782 	}
783 }
784 
785 static uint64_t
vtmmio_read_dev_config_8(struct vtmmio_softc * sc,bus_size_t off)786 vtmmio_read_dev_config_8(struct vtmmio_softc *sc, bus_size_t off)
787 {
788 	device_t dev;
789 	int gen;
790 	uint32_t val0, val1;
791 
792 	dev = sc->dev;
793 
794 	do {
795 		gen = vtmmio_config_generation(dev);
796 		val0 = le32toh(vtmmio_read_config_4(sc, off));
797 		val1 = le32toh(vtmmio_read_config_4(sc, off + 4));
798 	} while (gen != vtmmio_config_generation(dev));
799 
800 	return (((uint64_t) val1 << 32) | val0);
801 }
802 
803 static void
vtmmio_write_dev_config(device_t dev,bus_size_t offset,const void * src,int length)804 vtmmio_write_dev_config(device_t dev, bus_size_t offset,
805     const void *src, int length)
806 {
807 	struct vtmmio_softc *sc;
808 	bus_size_t off;
809 	const uint8_t *s;
810 	int size;
811 
812 	sc = device_get_softc(dev);
813 	off = VIRTIO_MMIO_CONFIG + offset;
814 
815 	/*
816 	 * The non-legacy MMIO specification adds size and alignment
817 	 * restrctions. It also changes the endianness from native-endian to
818 	 * little-endian. See vtmmio_read_dev_config.
819 	 */
820 	if (sc->vtmmio_version > 1) {
821 		switch (length) {
822 		case 1:
823 			vtmmio_write_config_1(sc, off, *(const uint8_t *)src);
824 			break;
825 		case 2:
826 			vtmmio_write_config_2(sc, off,
827 			    htole16(*(const uint16_t *)src));
828 			break;
829 		case 4:
830 			vtmmio_write_config_4(sc, off,
831 			    htole32(*(const uint32_t *)src));
832 			break;
833 		case 8:
834 			vtmmio_write_config_4(sc, off,
835 			    htole32(*(const uint64_t *)src));
836 			vtmmio_write_config_4(sc, off + 4,
837 			    htole32((*(const uint64_t *)src) >> 32));
838 			break;
839 		default:
840 			panic("%s: invalid length %d\n", __func__, length);
841 		}
842 
843 		return;
844 	}
845 
846 	for (s = src; length > 0; s += size, off += size, length -= size) {
847 #ifdef ALLOW_WORD_ALIGNED_ACCESS
848 		if (length >= 4) {
849 			size = 4;
850 			vtmmio_write_config_4(sc, off, *(uint32_t *)s);
851 		} else if (length >= 2) {
852 			size = 2;
853 			vtmmio_write_config_2(sc, off, *(uint16_t *)s);
854 		} else
855 #endif
856 		{
857 			size = 1;
858 			vtmmio_write_config_1(sc, off, *s);
859 		}
860 	}
861 }
862 
863 static void
vtmmio_describe_features(struct vtmmio_softc * sc,const char * msg,uint64_t features)864 vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg,
865     uint64_t features)
866 {
867 	device_t dev, child;
868 
869 	dev = sc->dev;
870 	child = sc->vtmmio_child_dev;
871 
872 	if (device_is_attached(child) || bootverbose == 0)
873 		return;
874 
875 	virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc);
876 }
877 
878 static void
vtmmio_probe_and_attach_child(struct vtmmio_softc * sc)879 vtmmio_probe_and_attach_child(struct vtmmio_softc *sc)
880 {
881 	device_t dev, child;
882 
883 	dev = sc->dev;
884 	child = sc->vtmmio_child_dev;
885 
886 	if (child == NULL)
887 		return;
888 
889 	if (device_get_state(child) != DS_NOTPRESENT) {
890 		return;
891 	}
892 
893 	if (device_probe(child) != 0) {
894 		return;
895 	}
896 
897 	vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
898 	if (device_attach(child) != 0) {
899 		vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
900 		vtmmio_reset(sc);
901 		vtmmio_release_child_resources(sc);
902 		/* Reset status for future attempt. */
903 		vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
904 	} else {
905 		vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
906 		VIRTIO_ATTACH_COMPLETED(child);
907 	}
908 }
909 
910 static int
vtmmio_reinit_virtqueue(struct vtmmio_softc * sc,int idx)911 vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx)
912 {
913 	struct vtmmio_virtqueue *vqx;
914 	struct virtqueue *vq;
915 	int error;
916 	uint16_t size;
917 
918 	vqx = &sc->vtmmio_vqs[idx];
919 	vq = vqx->vtv_vq;
920 
921 	KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx));
922 
923 	vtmmio_select_virtqueue(sc, idx);
924 	size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX);
925 
926 	error = virtqueue_reinit(vq, size);
927 	if (error)
928 		return (error);
929 
930 	vtmmio_set_virtqueue(sc, vq, size);
931 
932 	return (0);
933 }
934 
935 static void
vtmmio_free_interrupts(struct vtmmio_softc * sc)936 vtmmio_free_interrupts(struct vtmmio_softc *sc)
937 {
938 
939 	if (sc->ih != NULL)
940 		bus_teardown_intr(sc->dev, sc->res[1], sc->ih);
941 
942 	if (sc->res[1] != NULL)
943 		bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]);
944 }
945 
946 static void
vtmmio_free_virtqueues(struct vtmmio_softc * sc)947 vtmmio_free_virtqueues(struct vtmmio_softc *sc)
948 {
949 	struct vtmmio_virtqueue *vqx;
950 	int idx;
951 
952 	for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
953 		vqx = &sc->vtmmio_vqs[idx];
954 
955 		vtmmio_select_virtqueue(sc, idx);
956 		if (sc->vtmmio_version > 1) {
957 			vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0);
958 			vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_READY);
959 		} else
960 			vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0);
961 
962 		virtqueue_free(vqx->vtv_vq);
963 		vqx->vtv_vq = NULL;
964 	}
965 
966 	free(sc->vtmmio_vqs, M_DEVBUF);
967 	sc->vtmmio_vqs = NULL;
968 	sc->vtmmio_nvqs = 0;
969 }
970 
971 static void
vtmmio_release_child_resources(struct vtmmio_softc * sc)972 vtmmio_release_child_resources(struct vtmmio_softc *sc)
973 {
974 
975 	vtmmio_free_interrupts(sc);
976 	vtmmio_free_virtqueues(sc);
977 }
978 
979 static void
vtmmio_reset(struct vtmmio_softc * sc)980 vtmmio_reset(struct vtmmio_softc *sc)
981 {
982 
983 	/*
984 	 * Setting the status to RESET sets the host device to
985 	 * the original, uninitialized state.
986 	 */
987 	vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET);
988 }
989 
990 static void
vtmmio_select_virtqueue(struct vtmmio_softc * sc,int idx)991 vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx)
992 {
993 
994 	vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx);
995 }
996 
997 static void
vtmmio_vq_intr(void * arg)998 vtmmio_vq_intr(void *arg)
999 {
1000 	struct vtmmio_virtqueue *vqx;
1001 	struct vtmmio_softc *sc;
1002 	struct virtqueue *vq;
1003 	uint32_t status;
1004 	int idx;
1005 
1006 	sc = arg;
1007 
1008 	status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS);
1009 	vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status);
1010 
1011 	/* The config changed */
1012 	if (status & VIRTIO_MMIO_INT_CONFIG)
1013 		if (sc->vtmmio_child_dev != NULL)
1014 			VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev);
1015 
1016 	/* Notify all virtqueues. */
1017 	if (status & VIRTIO_MMIO_INT_VRING) {
1018 		for (idx = 0; idx < sc->vtmmio_nvqs; idx++) {
1019 			vqx = &sc->vtmmio_vqs[idx];
1020 			if (vqx->vtv_no_intr == 0) {
1021 				vq = vqx->vtv_vq;
1022 				virtqueue_intr(vq);
1023 			}
1024 		}
1025 	}
1026 }
1027