xref: /freebsd/sys/dev/virtio/block/virtio_blk.c (revision e17f5b1d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO block devices. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 
46 #include <geom/geom.h>
47 #include <geom/geom_disk.h>
48 
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53 
54 #include <dev/virtio/virtio.h>
55 #include <dev/virtio/virtqueue.h>
56 #include <dev/virtio/block/virtio_blk.h>
57 
58 #include "virtio_if.h"
59 
60 struct vtblk_request {
61 	struct virtio_blk_outhdr	 vbr_hdr;
62 	struct bio			*vbr_bp;
63 	uint8_t				 vbr_ack;
64 	TAILQ_ENTRY(vtblk_request)	 vbr_link;
65 };
66 
67 enum vtblk_cache_mode {
68 	VTBLK_CACHE_WRITETHROUGH,
69 	VTBLK_CACHE_WRITEBACK,
70 	VTBLK_CACHE_MAX
71 };
72 
73 struct vtblk_softc {
74 	device_t		 vtblk_dev;
75 	struct mtx		 vtblk_mtx;
76 	uint64_t		 vtblk_features;
77 	uint32_t		 vtblk_flags;
78 #define VTBLK_FLAG_INDIRECT	0x0001
79 #define VTBLK_FLAG_READONLY	0x0002
80 #define VTBLK_FLAG_DETACH	0x0004
81 #define VTBLK_FLAG_SUSPEND	0x0008
82 #define VTBLK_FLAG_BARRIER	0x0010
83 #define VTBLK_FLAG_WC_CONFIG	0x0020
84 #define VTBLK_FLAG_DISCARD	0x0040
85 
86 	struct virtqueue	*vtblk_vq;
87 	struct sglist		*vtblk_sglist;
88 	struct disk		*vtblk_disk;
89 
90 	struct bio_queue_head	 vtblk_bioq;
91 	TAILQ_HEAD(, vtblk_request)
92 				 vtblk_req_free;
93 	TAILQ_HEAD(, vtblk_request)
94 				 vtblk_req_ready;
95 	struct vtblk_request	*vtblk_req_ordered;
96 
97 	int			 vtblk_max_nsegs;
98 	int			 vtblk_request_count;
99 	enum vtblk_cache_mode	 vtblk_write_cache;
100 
101 	struct bio_queue	 vtblk_dump_queue;
102 	struct vtblk_request	 vtblk_dump_request;
103 };
104 
105 static struct virtio_feature_desc vtblk_feature_desc[] = {
106 	{ VIRTIO_BLK_F_BARRIER,		"HostBarrier"	},
107 	{ VIRTIO_BLK_F_SIZE_MAX,	"MaxSegSize"	},
108 	{ VIRTIO_BLK_F_SEG_MAX,		"MaxNumSegs"	},
109 	{ VIRTIO_BLK_F_GEOMETRY,	"DiskGeometry"	},
110 	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
111 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
112 	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
113 	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
114 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
115 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
116 	{ VIRTIO_BLK_F_DISCARD,		"Discard"	},
117 
118 	{ 0, NULL }
119 };
120 
121 static int	vtblk_modevent(module_t, int, void *);
122 
123 static int	vtblk_probe(device_t);
124 static int	vtblk_attach(device_t);
125 static int	vtblk_detach(device_t);
126 static int	vtblk_suspend(device_t);
127 static int	vtblk_resume(device_t);
128 static int	vtblk_shutdown(device_t);
129 static int	vtblk_config_change(device_t);
130 
131 static int	vtblk_open(struct disk *);
132 static int	vtblk_close(struct disk *);
133 static int	vtblk_ioctl(struct disk *, u_long, void *, int,
134 		    struct thread *);
135 static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
136 static void	vtblk_strategy(struct bio *);
137 
138 static void	vtblk_negotiate_features(struct vtblk_softc *);
139 static void	vtblk_setup_features(struct vtblk_softc *);
140 static int	vtblk_maximum_segments(struct vtblk_softc *,
141 		    struct virtio_blk_config *);
142 static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
143 static void	vtblk_resize_disk(struct vtblk_softc *, uint64_t);
144 static void	vtblk_alloc_disk(struct vtblk_softc *,
145 		    struct virtio_blk_config *);
146 static void	vtblk_create_disk(struct vtblk_softc *);
147 
148 static int	vtblk_request_prealloc(struct vtblk_softc *);
149 static void	vtblk_request_free(struct vtblk_softc *);
150 static struct vtblk_request *
151 		vtblk_request_dequeue(struct vtblk_softc *);
152 static void	vtblk_request_enqueue(struct vtblk_softc *,
153 		    struct vtblk_request *);
154 static struct vtblk_request *
155 		vtblk_request_next_ready(struct vtblk_softc *);
156 static void	vtblk_request_requeue_ready(struct vtblk_softc *,
157 		    struct vtblk_request *);
158 static struct vtblk_request *
159 		vtblk_request_next(struct vtblk_softc *);
160 static struct vtblk_request *
161 		vtblk_request_bio(struct vtblk_softc *);
162 static int	vtblk_request_execute(struct vtblk_softc *,
163 		    struct vtblk_request *);
164 static int	vtblk_request_error(struct vtblk_request *);
165 
166 static void	vtblk_queue_completed(struct vtblk_softc *,
167 		    struct bio_queue *);
168 static void	vtblk_done_completed(struct vtblk_softc *,
169 		    struct bio_queue *);
170 static void	vtblk_drain_vq(struct vtblk_softc *);
171 static void	vtblk_drain(struct vtblk_softc *);
172 
173 static void	vtblk_startio(struct vtblk_softc *);
174 static void	vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
175 
176 static void	vtblk_read_config(struct vtblk_softc *,
177 		    struct virtio_blk_config *);
178 static void	vtblk_ident(struct vtblk_softc *);
179 static int	vtblk_poll_request(struct vtblk_softc *,
180 		    struct vtblk_request *);
181 static int	vtblk_quiesce(struct vtblk_softc *);
182 static void	vtblk_vq_intr(void *);
183 static void	vtblk_stop(struct vtblk_softc *);
184 
185 static void	vtblk_dump_quiesce(struct vtblk_softc *);
186 static int	vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
187 static int	vtblk_dump_flush(struct vtblk_softc *);
188 static void	vtblk_dump_complete(struct vtblk_softc *);
189 
190 static void	vtblk_set_write_cache(struct vtblk_softc *, int);
191 static int	vtblk_write_cache_enabled(struct vtblk_softc *sc,
192 		    struct virtio_blk_config *);
193 static int	vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
194 
195 static void	vtblk_setup_sysctl(struct vtblk_softc *);
196 static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
197 
198 /* Tunables. */
199 static int vtblk_no_ident = 0;
200 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
201 static int vtblk_writecache_mode = -1;
202 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
203 
204 /* Features desired/implemented by this driver. */
205 #define VTBLK_FEATURES \
206     (VIRTIO_BLK_F_BARRIER		| \
207      VIRTIO_BLK_F_SIZE_MAX		| \
208      VIRTIO_BLK_F_SEG_MAX		| \
209      VIRTIO_BLK_F_GEOMETRY		| \
210      VIRTIO_BLK_F_RO			| \
211      VIRTIO_BLK_F_BLK_SIZE		| \
212      VIRTIO_BLK_F_WCE			| \
213      VIRTIO_BLK_F_TOPOLOGY		| \
214      VIRTIO_BLK_F_CONFIG_WCE		| \
215      VIRTIO_BLK_F_DISCARD		| \
216      VIRTIO_RING_F_INDIRECT_DESC)
217 
218 #define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
219 #define VTBLK_LOCK_INIT(_sc, _name) \
220 				mtx_init(VTBLK_MTX((_sc)), (_name), \
221 				    "VirtIO Block Lock", MTX_DEF)
222 #define VTBLK_LOCK(_sc)		mtx_lock(VTBLK_MTX((_sc)))
223 #define VTBLK_UNLOCK(_sc)	mtx_unlock(VTBLK_MTX((_sc)))
224 #define VTBLK_LOCK_DESTROY(_sc)	mtx_destroy(VTBLK_MTX((_sc)))
225 #define VTBLK_LOCK_ASSERT(_sc)	mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
226 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
227 				mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
228 
229 #define VTBLK_DISK_NAME		"vtbd"
230 #define VTBLK_QUIESCE_TIMEOUT	(30 * hz)
231 
232 /*
233  * Each block request uses at least two segments - one for the header
234  * and one for the status.
235  */
236 #define VTBLK_MIN_SEGMENTS	2
237 
238 static device_method_t vtblk_methods[] = {
239 	/* Device methods. */
240 	DEVMETHOD(device_probe,		vtblk_probe),
241 	DEVMETHOD(device_attach,	vtblk_attach),
242 	DEVMETHOD(device_detach,	vtblk_detach),
243 	DEVMETHOD(device_suspend,	vtblk_suspend),
244 	DEVMETHOD(device_resume,	vtblk_resume),
245 	DEVMETHOD(device_shutdown,	vtblk_shutdown),
246 
247 	/* VirtIO methods. */
248 	DEVMETHOD(virtio_config_change,	vtblk_config_change),
249 
250 	DEVMETHOD_END
251 };
252 
253 static driver_t vtblk_driver = {
254 	"vtblk",
255 	vtblk_methods,
256 	sizeof(struct vtblk_softc)
257 };
258 static devclass_t vtblk_devclass;
259 
260 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
261     vtblk_modevent, 0);
262 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
263     vtblk_modevent, 0);
264 MODULE_VERSION(virtio_blk, 1);
265 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
266 
267 VIRTIO_SIMPLE_PNPTABLE(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter");
268 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_blk);
269 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_blk);
270 
271 static int
272 vtblk_modevent(module_t mod, int type, void *unused)
273 {
274 	int error;
275 
276 	error = 0;
277 
278 	switch (type) {
279 	case MOD_LOAD:
280 	case MOD_QUIESCE:
281 	case MOD_UNLOAD:
282 	case MOD_SHUTDOWN:
283 		break;
284 	default:
285 		error = EOPNOTSUPP;
286 		break;
287 	}
288 
289 	return (error);
290 }
291 
292 static int
293 vtblk_probe(device_t dev)
294 {
295 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk));
296 }
297 
298 static int
299 vtblk_attach(device_t dev)
300 {
301 	struct vtblk_softc *sc;
302 	struct virtio_blk_config blkcfg;
303 	int error;
304 
305 	virtio_set_feature_desc(dev, vtblk_feature_desc);
306 
307 	sc = device_get_softc(dev);
308 	sc->vtblk_dev = dev;
309 	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
310 	bioq_init(&sc->vtblk_bioq);
311 	TAILQ_INIT(&sc->vtblk_dump_queue);
312 	TAILQ_INIT(&sc->vtblk_req_free);
313 	TAILQ_INIT(&sc->vtblk_req_ready);
314 
315 	vtblk_setup_sysctl(sc);
316 	vtblk_setup_features(sc);
317 
318 	vtblk_read_config(sc, &blkcfg);
319 
320 	/*
321 	 * With the current sglist(9) implementation, it is not easy
322 	 * for us to support a maximum segment size as adjacent
323 	 * segments are coalesced. For now, just make sure it's larger
324 	 * than the maximum supported transfer size.
325 	 */
326 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
327 		if (blkcfg.size_max < MAXPHYS) {
328 			error = ENOTSUP;
329 			device_printf(dev, "host requires unsupported "
330 			    "maximum segment size feature\n");
331 			goto fail;
332 		}
333 	}
334 
335 	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
336 	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
337 		error = EINVAL;
338 		device_printf(dev, "fewer than minimum number of segments "
339 		    "allowed: %d\n", sc->vtblk_max_nsegs);
340 		goto fail;
341 	}
342 
343 	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
344 	if (sc->vtblk_sglist == NULL) {
345 		error = ENOMEM;
346 		device_printf(dev, "cannot allocate sglist\n");
347 		goto fail;
348 	}
349 
350 	error = vtblk_alloc_virtqueue(sc);
351 	if (error) {
352 		device_printf(dev, "cannot allocate virtqueue\n");
353 		goto fail;
354 	}
355 
356 	error = vtblk_request_prealloc(sc);
357 	if (error) {
358 		device_printf(dev, "cannot preallocate requests\n");
359 		goto fail;
360 	}
361 
362 	vtblk_alloc_disk(sc, &blkcfg);
363 
364 	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
365 	if (error) {
366 		device_printf(dev, "cannot setup virtqueue interrupt\n");
367 		goto fail;
368 	}
369 
370 	vtblk_create_disk(sc);
371 
372 	virtqueue_enable_intr(sc->vtblk_vq);
373 
374 fail:
375 	if (error)
376 		vtblk_detach(dev);
377 
378 	return (error);
379 }
380 
381 static int
382 vtblk_detach(device_t dev)
383 {
384 	struct vtblk_softc *sc;
385 
386 	sc = device_get_softc(dev);
387 
388 	VTBLK_LOCK(sc);
389 	sc->vtblk_flags |= VTBLK_FLAG_DETACH;
390 	if (device_is_attached(dev))
391 		vtblk_stop(sc);
392 	VTBLK_UNLOCK(sc);
393 
394 	vtblk_drain(sc);
395 
396 	if (sc->vtblk_disk != NULL) {
397 		disk_destroy(sc->vtblk_disk);
398 		sc->vtblk_disk = NULL;
399 	}
400 
401 	if (sc->vtblk_sglist != NULL) {
402 		sglist_free(sc->vtblk_sglist);
403 		sc->vtblk_sglist = NULL;
404 	}
405 
406 	VTBLK_LOCK_DESTROY(sc);
407 
408 	return (0);
409 }
410 
411 static int
412 vtblk_suspend(device_t dev)
413 {
414 	struct vtblk_softc *sc;
415 	int error;
416 
417 	sc = device_get_softc(dev);
418 
419 	VTBLK_LOCK(sc);
420 	sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
421 	/* XXX BMV: virtio_stop(), etc needed here? */
422 	error = vtblk_quiesce(sc);
423 	if (error)
424 		sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
425 	VTBLK_UNLOCK(sc);
426 
427 	return (error);
428 }
429 
430 static int
431 vtblk_resume(device_t dev)
432 {
433 	struct vtblk_softc *sc;
434 
435 	sc = device_get_softc(dev);
436 
437 	VTBLK_LOCK(sc);
438 	/* XXX BMV: virtio_reinit(), etc needed here? */
439 	sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
440 	vtblk_startio(sc);
441 	VTBLK_UNLOCK(sc);
442 
443 	return (0);
444 }
445 
446 static int
447 vtblk_shutdown(device_t dev)
448 {
449 
450 	return (0);
451 }
452 
453 static int
454 vtblk_config_change(device_t dev)
455 {
456 	struct vtblk_softc *sc;
457 	struct virtio_blk_config blkcfg;
458 	uint64_t capacity;
459 
460 	sc = device_get_softc(dev);
461 
462 	vtblk_read_config(sc, &blkcfg);
463 
464 	/* Capacity is always in 512-byte units. */
465 	capacity = blkcfg.capacity * VTBLK_BSIZE;
466 
467 	if (sc->vtblk_disk->d_mediasize != capacity)
468 		vtblk_resize_disk(sc, capacity);
469 
470 	return (0);
471 }
472 
473 static int
474 vtblk_open(struct disk *dp)
475 {
476 	struct vtblk_softc *sc;
477 
478 	if ((sc = dp->d_drv1) == NULL)
479 		return (ENXIO);
480 
481 	return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
482 }
483 
484 static int
485 vtblk_close(struct disk *dp)
486 {
487 	struct vtblk_softc *sc;
488 
489 	if ((sc = dp->d_drv1) == NULL)
490 		return (ENXIO);
491 
492 	return (0);
493 }
494 
495 static int
496 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
497     struct thread *td)
498 {
499 	struct vtblk_softc *sc;
500 
501 	if ((sc = dp->d_drv1) == NULL)
502 		return (ENXIO);
503 
504 	return (ENOTTY);
505 }
506 
507 static int
508 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
509     size_t length)
510 {
511 	struct disk *dp;
512 	struct vtblk_softc *sc;
513 	int error;
514 
515 	dp = arg;
516 	error = 0;
517 
518 	if ((sc = dp->d_drv1) == NULL)
519 		return (ENXIO);
520 
521 	VTBLK_LOCK(sc);
522 
523 	vtblk_dump_quiesce(sc);
524 
525 	if (length > 0)
526 		error = vtblk_dump_write(sc, virtual, offset, length);
527 	if (error || (virtual == NULL && offset == 0))
528 		vtblk_dump_complete(sc);
529 
530 	VTBLK_UNLOCK(sc);
531 
532 	return (error);
533 }
534 
535 static void
536 vtblk_strategy(struct bio *bp)
537 {
538 	struct vtblk_softc *sc;
539 
540 	if ((sc = bp->bio_disk->d_drv1) == NULL) {
541 		vtblk_bio_done(NULL, bp, EINVAL);
542 		return;
543 	}
544 
545 	/*
546 	 * Fail any write if RO. Unfortunately, there does not seem to
547 	 * be a better way to report our readonly'ness to GEOM above.
548 	 */
549 	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
550 	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH ||
551 	    bp->bio_cmd == BIO_DELETE)) {
552 		vtblk_bio_done(sc, bp, EROFS);
553 		return;
554 	}
555 
556 	if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE) &&
557 	    (bp->bio_cmd != BIO_FLUSH) && (bp->bio_cmd != BIO_DELETE)) {
558 		vtblk_bio_done(sc, bp, EOPNOTSUPP);
559 		return;
560 	}
561 
562 	VTBLK_LOCK(sc);
563 
564 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
565 		VTBLK_UNLOCK(sc);
566 		vtblk_bio_done(sc, bp, ENXIO);
567 		return;
568 	}
569 
570 	if ((bp->bio_cmd == BIO_DELETE) &&
571 	    !(sc->vtblk_flags & VTBLK_FLAG_DISCARD)) {
572 		VTBLK_UNLOCK(sc);
573 		vtblk_bio_done(sc, bp, EOPNOTSUPP);
574 		return;
575 	}
576 
577 	bioq_insert_tail(&sc->vtblk_bioq, bp);
578 	vtblk_startio(sc);
579 
580 	VTBLK_UNLOCK(sc);
581 }
582 
583 static void
584 vtblk_negotiate_features(struct vtblk_softc *sc)
585 {
586 	device_t dev;
587 	uint64_t features;
588 
589 	dev = sc->vtblk_dev;
590 	features = VTBLK_FEATURES;
591 
592 	sc->vtblk_features = virtio_negotiate_features(dev, features);
593 }
594 
595 static void
596 vtblk_setup_features(struct vtblk_softc *sc)
597 {
598 	device_t dev;
599 
600 	dev = sc->vtblk_dev;
601 
602 	vtblk_negotiate_features(sc);
603 
604 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
605 		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
606 	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
607 		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
608 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
609 		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
610 	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
611 		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
612 	if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD))
613 		sc->vtblk_flags |= VTBLK_FLAG_DISCARD;
614 }
615 
616 static int
617 vtblk_maximum_segments(struct vtblk_softc *sc,
618     struct virtio_blk_config *blkcfg)
619 {
620 	device_t dev;
621 	int nsegs;
622 
623 	dev = sc->vtblk_dev;
624 	nsegs = VTBLK_MIN_SEGMENTS;
625 
626 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
627 		nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
628 		if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
629 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
630 	} else
631 		nsegs += 1;
632 
633 	return (nsegs);
634 }
635 
636 static int
637 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
638 {
639 	device_t dev;
640 	struct vq_alloc_info vq_info;
641 
642 	dev = sc->vtblk_dev;
643 
644 	VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
645 	    vtblk_vq_intr, sc, &sc->vtblk_vq,
646 	    "%s request", device_get_nameunit(dev));
647 
648 	return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
649 }
650 
651 static void
652 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
653 {
654 	device_t dev;
655 	struct disk *dp;
656 	int error;
657 
658 	dev = sc->vtblk_dev;
659 	dp = sc->vtblk_disk;
660 
661 	dp->d_mediasize = new_capacity;
662 	if (bootverbose) {
663 		device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
664 		    (uintmax_t) dp->d_mediasize >> 20,
665 		    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
666 		    dp->d_sectorsize);
667 	}
668 
669 	error = disk_resize(dp, M_NOWAIT);
670 	if (error) {
671 		device_printf(dev,
672 		    "disk_resize(9) failed, error: %d\n", error);
673 	}
674 }
675 
676 static void
677 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
678 {
679 	device_t dev;
680 	struct disk *dp;
681 
682 	dev = sc->vtblk_dev;
683 
684 	sc->vtblk_disk = dp = disk_alloc();
685 	dp->d_open = vtblk_open;
686 	dp->d_close = vtblk_close;
687 	dp->d_ioctl = vtblk_ioctl;
688 	dp->d_strategy = vtblk_strategy;
689 	dp->d_name = VTBLK_DISK_NAME;
690 	dp->d_unit = device_get_unit(dev);
691 	dp->d_drv1 = sc;
692 	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
693 	    DISKFLAG_DIRECT_COMPLETION;
694 	dp->d_hba_vendor = virtio_get_vendor(dev);
695 	dp->d_hba_device = virtio_get_device(dev);
696 	dp->d_hba_subvendor = virtio_get_subvendor(dev);
697 	dp->d_hba_subdevice = virtio_get_subdevice(dev);
698 
699 	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
700 		dp->d_dump = vtblk_dump;
701 
702 	/* Capacity is always in 512-byte units. */
703 	dp->d_mediasize = blkcfg->capacity * VTBLK_BSIZE;
704 
705 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
706 		dp->d_sectorsize = blkcfg->blk_size;
707 	else
708 		dp->d_sectorsize = VTBLK_BSIZE;
709 
710 	/*
711 	 * The VirtIO maximum I/O size is given in terms of segments.
712 	 * However, FreeBSD limits I/O size by logical buffer size, not
713 	 * by physically contiguous pages. Therefore, we have to assume
714 	 * no pages are contiguous. This may impose an artificially low
715 	 * maximum I/O size. But in practice, since QEMU advertises 128
716 	 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
717 	 * which is typically greater than MAXPHYS. Eventually we should
718 	 * just advertise MAXPHYS and split buffers that are too big.
719 	 *
720 	 * Note we must subtract one additional segment in case of non
721 	 * page aligned buffers.
722 	 */
723 	dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
724 	    PAGE_SIZE;
725 	if (dp->d_maxsize < PAGE_SIZE)
726 		dp->d_maxsize = PAGE_SIZE; /* XXX */
727 
728 	if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
729 		dp->d_fwsectors = blkcfg->geometry.sectors;
730 		dp->d_fwheads = blkcfg->geometry.heads;
731 	}
732 
733 	if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
734 	    blkcfg->topology.physical_block_exp > 0) {
735 		dp->d_stripesize = dp->d_sectorsize *
736 		    (1 << blkcfg->topology.physical_block_exp);
737 		dp->d_stripeoffset = (dp->d_stripesize -
738 		    blkcfg->topology.alignment_offset * dp->d_sectorsize) %
739 		    dp->d_stripesize;
740 	}
741 
742 	if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD)) {
743 		dp->d_flags |= DISKFLAG_CANDELETE;
744 		dp->d_delmaxsize = blkcfg->max_discard_sectors * VTBLK_BSIZE;
745 	}
746 
747 	if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
748 		sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
749 	else
750 		sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
751 }
752 
753 static void
754 vtblk_create_disk(struct vtblk_softc *sc)
755 {
756 	struct disk *dp;
757 
758 	dp = sc->vtblk_disk;
759 
760 	vtblk_ident(sc);
761 
762 	device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
763 	    (uintmax_t) dp->d_mediasize >> 20,
764 	    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
765 	    dp->d_sectorsize);
766 
767 	disk_create(dp, DISK_VERSION);
768 }
769 
770 static int
771 vtblk_request_prealloc(struct vtblk_softc *sc)
772 {
773 	struct vtblk_request *req;
774 	int i, nreqs;
775 
776 	nreqs = virtqueue_size(sc->vtblk_vq);
777 
778 	/*
779 	 * Preallocate sufficient requests to keep the virtqueue full. Each
780 	 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
781 	 * the number allocated when indirect descriptors are not available.
782 	 */
783 	if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
784 		nreqs /= VTBLK_MIN_SEGMENTS;
785 
786 	for (i = 0; i < nreqs; i++) {
787 		req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
788 		if (req == NULL)
789 			return (ENOMEM);
790 
791 		MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
792 		MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
793 
794 		sc->vtblk_request_count++;
795 		vtblk_request_enqueue(sc, req);
796 	}
797 
798 	return (0);
799 }
800 
801 static void
802 vtblk_request_free(struct vtblk_softc *sc)
803 {
804 	struct vtblk_request *req;
805 
806 	MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
807 
808 	while ((req = vtblk_request_dequeue(sc)) != NULL) {
809 		sc->vtblk_request_count--;
810 		free(req, M_DEVBUF);
811 	}
812 
813 	KASSERT(sc->vtblk_request_count == 0,
814 	    ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
815 }
816 
817 static struct vtblk_request *
818 vtblk_request_dequeue(struct vtblk_softc *sc)
819 {
820 	struct vtblk_request *req;
821 
822 	req = TAILQ_FIRST(&sc->vtblk_req_free);
823 	if (req != NULL) {
824 		TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
825 		bzero(req, sizeof(struct vtblk_request));
826 	}
827 
828 	return (req);
829 }
830 
831 static void
832 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
833 {
834 
835 	TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
836 }
837 
838 static struct vtblk_request *
839 vtblk_request_next_ready(struct vtblk_softc *sc)
840 {
841 	struct vtblk_request *req;
842 
843 	req = TAILQ_FIRST(&sc->vtblk_req_ready);
844 	if (req != NULL)
845 		TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
846 
847 	return (req);
848 }
849 
850 static void
851 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
852 {
853 
854 	/* NOTE: Currently, there will be at most one request in the queue. */
855 	TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
856 }
857 
858 static struct vtblk_request *
859 vtblk_request_next(struct vtblk_softc *sc)
860 {
861 	struct vtblk_request *req;
862 
863 	req = vtblk_request_next_ready(sc);
864 	if (req != NULL)
865 		return (req);
866 
867 	return (vtblk_request_bio(sc));
868 }
869 
870 static struct vtblk_request *
871 vtblk_request_bio(struct vtblk_softc *sc)
872 {
873 	struct bio_queue_head *bioq;
874 	struct vtblk_request *req;
875 	struct bio *bp;
876 
877 	bioq = &sc->vtblk_bioq;
878 
879 	if (bioq_first(bioq) == NULL)
880 		return (NULL);
881 
882 	req = vtblk_request_dequeue(sc);
883 	if (req == NULL)
884 		return (NULL);
885 
886 	bp = bioq_takefirst(bioq);
887 	req->vbr_bp = bp;
888 	req->vbr_ack = -1;
889 	req->vbr_hdr.ioprio = 1;
890 
891 	switch (bp->bio_cmd) {
892 	case BIO_FLUSH:
893 		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
894 		break;
895 	case BIO_READ:
896 		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
897 		req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE;
898 		break;
899 	case BIO_WRITE:
900 		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
901 		req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE;
902 		break;
903 	case BIO_DELETE:
904 		req->vbr_hdr.type = VIRTIO_BLK_T_DISCARD;
905 		req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE;
906 		break;
907 	default:
908 		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
909 	}
910 
911 	if (bp->bio_flags & BIO_ORDERED)
912 		req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
913 
914 	return (req);
915 }
916 
917 static int
918 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
919 {
920 	struct virtqueue *vq;
921 	struct sglist *sg;
922 	struct bio *bp;
923 	int ordered, readable, writable, error;
924 
925 	vq = sc->vtblk_vq;
926 	sg = sc->vtblk_sglist;
927 	bp = req->vbr_bp;
928 	ordered = 0;
929 	writable = 0;
930 
931 	/*
932 	 * Some hosts (such as bhyve) do not implement the barrier feature,
933 	 * so we emulate it in the driver by allowing the barrier request
934 	 * to be the only one in flight.
935 	 */
936 	if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
937 		if (sc->vtblk_req_ordered != NULL)
938 			return (EBUSY);
939 		if (bp->bio_flags & BIO_ORDERED) {
940 			if (!virtqueue_empty(vq))
941 				return (EBUSY);
942 			ordered = 1;
943 			req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
944 		}
945 	}
946 
947 	sglist_reset(sg);
948 	sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
949 
950 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
951 		error = sglist_append_bio(sg, bp);
952 		if (error || sg->sg_nseg == sg->sg_maxseg) {
953 			panic("%s: bio %p data buffer too big %d",
954 			    __func__, bp, error);
955 		}
956 
957 		/* BIO_READ means the host writes into our buffer. */
958 		if (bp->bio_cmd == BIO_READ)
959 			writable = sg->sg_nseg - 1;
960 	} else if (bp->bio_cmd == BIO_DELETE) {
961 		struct virtio_blk_discard_write_zeroes *discard;
962 
963 		discard = malloc(sizeof(*discard), M_DEVBUF, M_NOWAIT | M_ZERO);
964 		if (discard == NULL)
965 			return (ENOMEM);
966 		discard->sector = bp->bio_offset / VTBLK_BSIZE;
967 		discard->num_sectors = bp->bio_bcount / VTBLK_BSIZE;
968 		bp->bio_driver1 = discard;
969 		error = sglist_append(sg, discard, sizeof(*discard));
970 		if (error || sg->sg_nseg == sg->sg_maxseg) {
971 			panic("%s: bio %p data buffer too big %d",
972 			    __func__, bp, error);
973 		}
974 	}
975 
976 	writable++;
977 	sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
978 	readable = sg->sg_nseg - writable;
979 
980 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
981 	if (error == 0 && ordered)
982 		sc->vtblk_req_ordered = req;
983 
984 	return (error);
985 }
986 
987 static int
988 vtblk_request_error(struct vtblk_request *req)
989 {
990 	int error;
991 
992 	switch (req->vbr_ack) {
993 	case VIRTIO_BLK_S_OK:
994 		error = 0;
995 		break;
996 	case VIRTIO_BLK_S_UNSUPP:
997 		error = ENOTSUP;
998 		break;
999 	default:
1000 		error = EIO;
1001 		break;
1002 	}
1003 
1004 	return (error);
1005 }
1006 
1007 static void
1008 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1009 {
1010 	struct vtblk_request *req;
1011 	struct bio *bp;
1012 
1013 	while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1014 		if (sc->vtblk_req_ordered != NULL) {
1015 			MPASS(sc->vtblk_req_ordered == req);
1016 			sc->vtblk_req_ordered = NULL;
1017 		}
1018 
1019 		bp = req->vbr_bp;
1020 		bp->bio_error = vtblk_request_error(req);
1021 		TAILQ_INSERT_TAIL(queue, bp, bio_queue);
1022 
1023 		vtblk_request_enqueue(sc, req);
1024 	}
1025 }
1026 
1027 static void
1028 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1029 {
1030 	struct bio *bp, *tmp;
1031 
1032 	TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
1033 		if (bp->bio_error != 0)
1034 			disk_err(bp, "hard error", -1, 1);
1035 		vtblk_bio_done(sc, bp, bp->bio_error);
1036 	}
1037 }
1038 
1039 static void
1040 vtblk_drain_vq(struct vtblk_softc *sc)
1041 {
1042 	struct virtqueue *vq;
1043 	struct vtblk_request *req;
1044 	int last;
1045 
1046 	vq = sc->vtblk_vq;
1047 	last = 0;
1048 
1049 	while ((req = virtqueue_drain(vq, &last)) != NULL) {
1050 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1051 		vtblk_request_enqueue(sc, req);
1052 	}
1053 
1054 	sc->vtblk_req_ordered = NULL;
1055 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1056 }
1057 
1058 static void
1059 vtblk_drain(struct vtblk_softc *sc)
1060 {
1061 	struct bio_queue queue;
1062 	struct bio_queue_head *bioq;
1063 	struct vtblk_request *req;
1064 	struct bio *bp;
1065 
1066 	bioq = &sc->vtblk_bioq;
1067 	TAILQ_INIT(&queue);
1068 
1069 	if (sc->vtblk_vq != NULL) {
1070 		vtblk_queue_completed(sc, &queue);
1071 		vtblk_done_completed(sc, &queue);
1072 
1073 		vtblk_drain_vq(sc);
1074 	}
1075 
1076 	while ((req = vtblk_request_next_ready(sc)) != NULL) {
1077 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1078 		vtblk_request_enqueue(sc, req);
1079 	}
1080 
1081 	while (bioq_first(bioq) != NULL) {
1082 		bp = bioq_takefirst(bioq);
1083 		vtblk_bio_done(sc, bp, ENXIO);
1084 	}
1085 
1086 	vtblk_request_free(sc);
1087 }
1088 
1089 static void
1090 vtblk_startio(struct vtblk_softc *sc)
1091 {
1092 	struct virtqueue *vq;
1093 	struct vtblk_request *req;
1094 	int enq;
1095 
1096 	VTBLK_LOCK_ASSERT(sc);
1097 	vq = sc->vtblk_vq;
1098 	enq = 0;
1099 
1100 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1101 		return;
1102 
1103 	while (!virtqueue_full(vq)) {
1104 		req = vtblk_request_next(sc);
1105 		if (req == NULL)
1106 			break;
1107 
1108 		if (vtblk_request_execute(sc, req) != 0) {
1109 			vtblk_request_requeue_ready(sc, req);
1110 			break;
1111 		}
1112 
1113 		enq++;
1114 	}
1115 
1116 	if (enq > 0)
1117 		virtqueue_notify(vq);
1118 }
1119 
1120 static void
1121 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1122 {
1123 
1124 	/* Because of GEOM direct dispatch, we cannot hold any locks. */
1125 	if (sc != NULL)
1126 		VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1127 
1128 	if (error) {
1129 		bp->bio_resid = bp->bio_bcount;
1130 		bp->bio_error = error;
1131 		bp->bio_flags |= BIO_ERROR;
1132 	}
1133 
1134 	if (bp->bio_driver1 != NULL) {
1135 		free(bp->bio_driver1, M_DEVBUF);
1136 		bp->bio_driver1 = NULL;
1137 	}
1138 
1139 	biodone(bp);
1140 }
1141 
1142 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)			\
1143 	if (virtio_with_feature(_dev, _feature)) {			\
1144 		virtio_read_device_config(_dev,				\
1145 		    offsetof(struct virtio_blk_config, _field),		\
1146 		    &(_cfg)->_field, sizeof((_cfg)->_field));		\
1147 	}
1148 
1149 static void
1150 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1151 {
1152 	device_t dev;
1153 
1154 	dev = sc->vtblk_dev;
1155 
1156 	bzero(blkcfg, sizeof(struct virtio_blk_config));
1157 
1158 	/* The capacity is always available. */
1159 	virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1160 	    capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1161 
1162 	/* Read the configuration if the feature was negotiated. */
1163 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1164 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1165 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1166 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1167 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1168 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1169 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_sectors,
1170 	    blkcfg);
1171 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_seg, blkcfg);
1172 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, discard_sector_alignment,
1173 	    blkcfg);
1174 }
1175 
1176 #undef VTBLK_GET_CONFIG
1177 
1178 static void
1179 vtblk_ident(struct vtblk_softc *sc)
1180 {
1181 	struct bio buf;
1182 	struct disk *dp;
1183 	struct vtblk_request *req;
1184 	int len, error;
1185 
1186 	dp = sc->vtblk_disk;
1187 	len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1188 
1189 	if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1190 		return;
1191 
1192 	req = vtblk_request_dequeue(sc);
1193 	if (req == NULL)
1194 		return;
1195 
1196 	req->vbr_ack = -1;
1197 	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1198 	req->vbr_hdr.ioprio = 1;
1199 	req->vbr_hdr.sector = 0;
1200 
1201 	req->vbr_bp = &buf;
1202 	g_reset_bio(&buf);
1203 
1204 	buf.bio_cmd = BIO_READ;
1205 	buf.bio_data = dp->d_ident;
1206 	buf.bio_bcount = len;
1207 
1208 	VTBLK_LOCK(sc);
1209 	error = vtblk_poll_request(sc, req);
1210 	VTBLK_UNLOCK(sc);
1211 
1212 	vtblk_request_enqueue(sc, req);
1213 
1214 	if (error) {
1215 		device_printf(sc->vtblk_dev,
1216 		    "error getting device identifier: %d\n", error);
1217 	}
1218 }
1219 
1220 static int
1221 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1222 {
1223 	struct virtqueue *vq;
1224 	int error;
1225 
1226 	vq = sc->vtblk_vq;
1227 
1228 	if (!virtqueue_empty(vq))
1229 		return (EBUSY);
1230 
1231 	error = vtblk_request_execute(sc, req);
1232 	if (error)
1233 		return (error);
1234 
1235 	virtqueue_notify(vq);
1236 	virtqueue_poll(vq, NULL);
1237 
1238 	error = vtblk_request_error(req);
1239 	if (error && bootverbose) {
1240 		device_printf(sc->vtblk_dev,
1241 		    "%s: IO error: %d\n", __func__, error);
1242 	}
1243 
1244 	return (error);
1245 }
1246 
1247 static int
1248 vtblk_quiesce(struct vtblk_softc *sc)
1249 {
1250 	int error;
1251 
1252 	VTBLK_LOCK_ASSERT(sc);
1253 	error = 0;
1254 
1255 	while (!virtqueue_empty(sc->vtblk_vq)) {
1256 		if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1257 		    VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1258 			error = EBUSY;
1259 			break;
1260 		}
1261 	}
1262 
1263 	return (error);
1264 }
1265 
1266 static void
1267 vtblk_vq_intr(void *xsc)
1268 {
1269 	struct vtblk_softc *sc;
1270 	struct virtqueue *vq;
1271 	struct bio_queue queue;
1272 
1273 	sc = xsc;
1274 	vq = sc->vtblk_vq;
1275 	TAILQ_INIT(&queue);
1276 
1277 	VTBLK_LOCK(sc);
1278 
1279 again:
1280 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1281 		goto out;
1282 
1283 	vtblk_queue_completed(sc, &queue);
1284 	vtblk_startio(sc);
1285 
1286 	if (virtqueue_enable_intr(vq) != 0) {
1287 		virtqueue_disable_intr(vq);
1288 		goto again;
1289 	}
1290 
1291 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1292 		wakeup(&sc->vtblk_vq);
1293 
1294 out:
1295 	VTBLK_UNLOCK(sc);
1296 	vtblk_done_completed(sc, &queue);
1297 }
1298 
1299 static void
1300 vtblk_stop(struct vtblk_softc *sc)
1301 {
1302 
1303 	virtqueue_disable_intr(sc->vtblk_vq);
1304 	virtio_stop(sc->vtblk_dev);
1305 }
1306 
1307 static void
1308 vtblk_dump_quiesce(struct vtblk_softc *sc)
1309 {
1310 
1311 	/*
1312 	 * Spin here until all the requests in-flight at the time of the
1313 	 * dump are completed and queued. The queued requests will be
1314 	 * biodone'd once the dump is finished.
1315 	 */
1316 	while (!virtqueue_empty(sc->vtblk_vq))
1317 		vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1318 }
1319 
1320 static int
1321 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1322     size_t length)
1323 {
1324 	struct bio buf;
1325 	struct vtblk_request *req;
1326 
1327 	req = &sc->vtblk_dump_request;
1328 	req->vbr_ack = -1;
1329 	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1330 	req->vbr_hdr.ioprio = 1;
1331 	req->vbr_hdr.sector = offset / VTBLK_BSIZE;
1332 
1333 	req->vbr_bp = &buf;
1334 	g_reset_bio(&buf);
1335 
1336 	buf.bio_cmd = BIO_WRITE;
1337 	buf.bio_data = virtual;
1338 	buf.bio_bcount = length;
1339 
1340 	return (vtblk_poll_request(sc, req));
1341 }
1342 
1343 static int
1344 vtblk_dump_flush(struct vtblk_softc *sc)
1345 {
1346 	struct bio buf;
1347 	struct vtblk_request *req;
1348 
1349 	req = &sc->vtblk_dump_request;
1350 	req->vbr_ack = -1;
1351 	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1352 	req->vbr_hdr.ioprio = 1;
1353 	req->vbr_hdr.sector = 0;
1354 
1355 	req->vbr_bp = &buf;
1356 	g_reset_bio(&buf);
1357 
1358 	buf.bio_cmd = BIO_FLUSH;
1359 
1360 	return (vtblk_poll_request(sc, req));
1361 }
1362 
1363 static void
1364 vtblk_dump_complete(struct vtblk_softc *sc)
1365 {
1366 
1367 	vtblk_dump_flush(sc);
1368 
1369 	VTBLK_UNLOCK(sc);
1370 	vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1371 	VTBLK_LOCK(sc);
1372 }
1373 
1374 static void
1375 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1376 {
1377 
1378 	/* Set either writeback (1) or writethrough (0) mode. */
1379 	virtio_write_dev_config_1(sc->vtblk_dev,
1380 	    offsetof(struct virtio_blk_config, wce), wc);
1381 }
1382 
1383 static int
1384 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1385     struct virtio_blk_config *blkcfg)
1386 {
1387 	int wc;
1388 
1389 	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1390 		wc = vtblk_tunable_int(sc, "writecache_mode",
1391 		    vtblk_writecache_mode);
1392 		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1393 			vtblk_set_write_cache(sc, wc);
1394 		else
1395 			wc = blkcfg->wce;
1396 	} else
1397 		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1398 
1399 	return (wc);
1400 }
1401 
1402 static int
1403 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1404 {
1405 	struct vtblk_softc *sc;
1406 	int wc, error;
1407 
1408 	sc = oidp->oid_arg1;
1409 	wc = sc->vtblk_write_cache;
1410 
1411 	error = sysctl_handle_int(oidp, &wc, 0, req);
1412 	if (error || req->newptr == NULL)
1413 		return (error);
1414 	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1415 		return (EPERM);
1416 	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1417 		return (EINVAL);
1418 
1419 	VTBLK_LOCK(sc);
1420 	sc->vtblk_write_cache = wc;
1421 	vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1422 	VTBLK_UNLOCK(sc);
1423 
1424 	return (0);
1425 }
1426 
1427 static void
1428 vtblk_setup_sysctl(struct vtblk_softc *sc)
1429 {
1430 	device_t dev;
1431 	struct sysctl_ctx_list *ctx;
1432 	struct sysctl_oid *tree;
1433 	struct sysctl_oid_list *child;
1434 
1435 	dev = sc->vtblk_dev;
1436 	ctx = device_get_sysctl_ctx(dev);
1437 	tree = device_get_sysctl_tree(dev);
1438 	child = SYSCTL_CHILDREN(tree);
1439 
1440 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1441 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
1442 	    vtblk_write_cache_sysctl, "I",
1443 	    "Write cache mode (writethrough (0) or writeback (1))");
1444 }
1445 
1446 static int
1447 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1448 {
1449 	char path[64];
1450 
1451 	snprintf(path, sizeof(path),
1452 	    "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1453 	TUNABLE_INT_FETCH(path, &def);
1454 
1455 	return (def);
1456 }
1457