xref: /openbsd/sys/dev/pv/vioblk.c (revision 09467b48)
1 /*	$OpenBSD: vioblk.c,v 1.26 2020/07/22 13:16:05 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 2012 Stefan Fritsch.
5  * Copyright (c) 2010 Minoura Makoto.
6  * Copyright (c) 1998, 2001 Manuel Bouyer.
7  * All rights reserved.
8  *
9  * This code is based in part on the NetBSD ld_virtio driver and the
10  * OpenBSD vdsk driver.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *	notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *	notice, this list of conditions and the following disclaimer in the
19  *	documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 2009, 2011 Mark Kettenis
35  *
36  * Permission to use, copy, modify, and distribute this software for any
37  * purpose with or without fee is hereby granted, provided that the above
38  * copyright notice and this permission notice appear in all copies.
39  *
40  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
41  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
42  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
43  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
44  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
45  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
46  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
47  */
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <machine/bus.h>
53 
54 #include <sys/device.h>
55 #include <sys/stat.h>
56 #include <sys/buf.h>
57 #include <sys/mutex.h>
58 #include <dev/pv/virtioreg.h>
59 #include <dev/pv/virtiovar.h>
60 #include <dev/pv/vioblkreg.h>
61 
62 #include <scsi/scsi_all.h>
63 #include <scsi/scsi_disk.h>
64 #include <scsi/scsiconf.h>
65 
66 #define VIOBLK_DONE	-1
67 
68 /* Number of DMA segments for buffers that the device must support */
69 #define SEG_MAX		(MAXPHYS/PAGE_SIZE + 1)
70 /* In the virtqueue, we need space for header and footer, too */
71 #define ALLOC_SEGS	(SEG_MAX + 2)
72 
73 struct virtio_feature_name vioblk_feature_names[] = {
74 #if VIRTIO_DEBUG
75 	{ VIRTIO_BLK_F_BARRIER,		"Barrier" },
76 	{ VIRTIO_BLK_F_SIZE_MAX,	"SizeMax" },
77 	{ VIRTIO_BLK_F_SEG_MAX,		"SegMax" },
78 	{ VIRTIO_BLK_F_GEOMETRY,	"Geometry" },
79 	{ VIRTIO_BLK_F_RO,		"RO" },
80 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlkSize" },
81 	{ VIRTIO_BLK_F_SCSI,		"SCSI" },
82 	{ VIRTIO_BLK_F_FLUSH,		"Flush" },
83 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology" },
84 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE" },
85 	{ VIRTIO_BLK_F_DISCARD,		"Discard" },
86 	{ VIRTIO_BLK_F_WRITE_ZEROES,	"Write0s" },
87 #endif
88 	{ 0,				NULL }
89 };
90 
91 struct virtio_blk_req {
92 	struct virtio_blk_req_hdr	 vr_hdr;
93 	uint8_t				 vr_status;
94 #define VR_DMA_END	offsetof(struct virtio_blk_req, vr_qe_index)
95 	int16_t				 vr_qe_index;
96 	int				 vr_len;
97 	struct scsi_xfer		*vr_xs;
98 	bus_dmamap_t			 vr_cmdsts;
99 	bus_dmamap_t			 vr_payload;
100 	SLIST_ENTRY(virtio_blk_req)	 vr_list;
101 };
102 
103 struct vioblk_softc {
104 	struct device		 sc_dev;
105 	struct virtio_softc	*sc_virtio;
106 
107 	struct virtqueue         sc_vq[1];
108 	struct virtio_blk_req   *sc_reqs;
109 	bus_dma_segment_t        sc_reqs_segs[1];
110 	int			 sc_nreqs;
111 
112 	struct scsi_iopool	 sc_iopool;
113 	struct mutex		 sc_vr_mtx;
114 	SLIST_HEAD(, virtio_blk_req) sc_freelist;
115 
116 	int			 sc_notify_on_empty;
117 
118 	uint32_t		 sc_queued;
119 
120 	uint64_t		 sc_capacity;
121 };
122 
123 int	vioblk_match(struct device *, void *, void *);
124 void	vioblk_attach(struct device *, struct device *, void *);
125 int	vioblk_alloc_reqs(struct vioblk_softc *, int);
126 int	vioblk_vq_done(struct virtqueue *);
127 void	vioblk_vq_done1(struct vioblk_softc *, struct virtio_softc *,
128 			struct virtqueue *, int);
129 void	vioblk_reset(struct vioblk_softc *);
130 
131 void	vioblk_scsi_cmd(struct scsi_xfer *);
132 
133 void   *vioblk_req_get(void *);
134 void	vioblk_req_put(void *, void *);
135 
136 void	vioblk_scsi_inq(struct scsi_xfer *);
137 void	vioblk_scsi_capacity(struct scsi_xfer *);
138 void	vioblk_scsi_capacity16(struct scsi_xfer *);
139 void	vioblk_scsi_done(struct scsi_xfer *, int);
140 
141 struct cfattach vioblk_ca = {
142 	sizeof(struct vioblk_softc),
143 	vioblk_match,
144 	vioblk_attach,
145 	NULL
146 };
147 
148 struct cfdriver vioblk_cd = {
149 	NULL, "vioblk", DV_DULL
150 };
151 
152 struct scsi_adapter vioblk_switch = {
153 	vioblk_scsi_cmd, NULL, NULL, NULL, NULL
154 };
155 
156 int vioblk_match(struct device *parent, void *match, void *aux)
157 {
158 	struct virtio_softc *va = aux;
159 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
160 		return 1;
161 	return 0;
162 }
163 
164 #define DNPRINTF(n,x...)				\
165     do { if (VIRTIO_DEBUG >= n) printf(x); } while(0)
166 
167 void
168 vioblk_attach(struct device *parent, struct device *self, void *aux)
169 {
170 	struct vioblk_softc *sc = (struct vioblk_softc *)self;
171 	struct virtio_softc *vsc = (struct virtio_softc *)parent;
172 	struct scsibus_attach_args saa;
173 	int qsize;
174 
175 	vsc->sc_vqs = &sc->sc_vq[0];
176 	vsc->sc_nvqs = 1;
177 	vsc->sc_config_change = 0;
178 	if (vsc->sc_child)
179 		panic("already attached to something else");
180 	vsc->sc_child = self;
181 	vsc->sc_ipl = IPL_BIO;
182 	sc->sc_virtio = vsc;
183 	vsc->sc_driver_features = VIRTIO_BLK_F_RO | VIRTIO_F_NOTIFY_ON_EMPTY |
184 	     VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_FLUSH;
185 
186         virtio_negotiate_features(vsc, vioblk_feature_names);
187 
188 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SIZE_MAX)) {
189 		uint32_t size_max = virtio_read_device_config_4(vsc,
190 		    VIRTIO_BLK_CONFIG_SIZE_MAX);
191 		if (size_max < PAGE_SIZE) {
192 			printf("\nMax segment size %u too low\n", size_max);
193 			goto err;
194 		}
195 	}
196 
197 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_SEG_MAX)) {
198 		uint32_t seg_max = virtio_read_device_config_4(vsc,
199 		    VIRTIO_BLK_CONFIG_SEG_MAX);
200 		if (seg_max < SEG_MAX) {
201 			printf("\nMax number of segments %d too small\n",
202 			    seg_max);
203 			goto err;
204 		}
205 	}
206 
207 	sc->sc_capacity = virtio_read_device_config_8(vsc,
208 	    VIRTIO_BLK_CONFIG_CAPACITY);
209 
210 	if (virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, MAXPHYS, ALLOC_SEGS,
211 	    "I/O request") != 0) {
212 		printf("\nCan't alloc virtqueue\n");
213 		goto err;
214 	}
215 	qsize = sc->sc_vq[0].vq_num;
216 	sc->sc_vq[0].vq_done = vioblk_vq_done;
217 
218 	if (virtio_has_feature(vsc, VIRTIO_F_NOTIFY_ON_EMPTY)) {
219 		virtio_stop_vq_intr(vsc, &sc->sc_vq[0]);
220 		sc->sc_notify_on_empty = 1;
221 	}
222 	else {
223 		sc->sc_notify_on_empty = 0;
224 	}
225 
226 	sc->sc_queued = 0;
227 
228 	SLIST_INIT(&sc->sc_freelist);
229 	mtx_init(&sc->sc_vr_mtx, IPL_BIO);
230 	scsi_iopool_init(&sc->sc_iopool, sc, vioblk_req_get, vioblk_req_put);
231 
232 	sc->sc_nreqs = vioblk_alloc_reqs(sc, qsize);
233 	if (sc->sc_nreqs == 0) {
234 		printf("\nCan't alloc reqs\n");
235 		goto err;
236 	}
237 	DNPRINTF(1, "%s: qsize: %d\n", __func__, qsize);
238 	printf("\n");
239 
240 	saa.saa_adapter = &vioblk_switch;
241 	saa.saa_adapter_softc = self;
242 	saa.saa_adapter_buswidth = 1;
243 	saa.saa_luns = 1;
244 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
245 	saa.saa_openings = sc->sc_nreqs;
246 	saa.saa_pool = &sc->sc_iopool;
247 	if (virtio_has_feature(vsc, VIRTIO_BLK_F_RO))
248 		saa.saa_flags = SDEV_READONLY;
249 	else
250 		saa.saa_flags = 0;
251 	saa.saa_quirks = 0;
252 	saa.saa_wwpn = saa.saa_wwnn = 0;
253 
254 	config_found(self, &saa, scsiprint);
255 
256 	return;
257 err:
258 	vsc->sc_child = VIRTIO_CHILD_ERROR;
259 	return;
260 }
261 
262 /*
263  * vioblk_req_get() provides the SCSI layer with all the
264  * resources necessary to start an I/O on the device.
265  *
266  * Since the size of the I/O is unknown at this time the
267  * resouces allocated (a.k.a. reserved) must be sufficient
268  * to allow the maximum possible I/O size.
269  *
270  * When the I/O is actually attempted via vioblk_scsi_cmd()
271  * excess resources will be returned via virtio_enqueue_trim().
272  */
273 void *
274 vioblk_req_get(void *cookie)
275 {
276 	struct vioblk_softc *sc = cookie;
277 	struct virtio_blk_req *vr = NULL;
278 
279 	mtx_enter(&sc->sc_vr_mtx);
280 	vr = SLIST_FIRST(&sc->sc_freelist);
281 	if (vr != NULL)
282 		SLIST_REMOVE_HEAD(&sc->sc_freelist, vr_list);
283 	mtx_leave(&sc->sc_vr_mtx);
284 
285 	DNPRINTF(2, "%s: %p\n", __func__, vr);
286 
287 	return vr;
288 }
289 
290 void
291 vioblk_req_put(void *cookie, void *io)
292 {
293 	struct vioblk_softc *sc = cookie;
294 	struct virtio_blk_req *vr = io;
295 
296 	DNPRINTF(2, "%s: %p\n", __func__, vr);
297 
298 	mtx_enter(&sc->sc_vr_mtx);
299 	/*
300 	 * Do *NOT* call virtio_dequeue_commit()!
301 	 *
302 	 * Descriptors are permanently associated with the vioscsi_req and
303 	 * should not be placed on the free list!
304 	 */
305 	SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
306 	mtx_leave(&sc->sc_vr_mtx);
307 }
308 
309 int
310 vioblk_vq_done(struct virtqueue *vq)
311 {
312 	struct virtio_softc *vsc = vq->vq_owner;
313 	struct vioblk_softc *sc = (struct vioblk_softc *)vsc->sc_child;
314 	struct vq_entry *qe;
315 	int slot;
316 	int ret = 0;
317 
318 	if (!sc->sc_notify_on_empty)
319 		virtio_stop_vq_intr(vsc, vq);
320 	for (;;) {
321 		if (virtio_dequeue(vsc, vq, &slot, NULL) != 0) {
322 			if (sc->sc_notify_on_empty)
323 				break;
324 			virtio_start_vq_intr(vsc, vq);
325 			if (virtio_dequeue(vsc, vq, &slot, NULL) != 0)
326 				break;
327 		}
328 		qe = &vq->vq_entries[slot];
329 		vioblk_vq_done1(sc, vsc, vq, qe->qe_vr_index);
330 		ret = 1;
331 	}
332 	return ret;
333 }
334 
335 void
336 vioblk_vq_done1(struct vioblk_softc *sc, struct virtio_softc *vsc,
337     struct virtqueue *vq, int slot)
338 {
339 	struct virtio_blk_req *vr = &sc->sc_reqs[slot];
340 	struct scsi_xfer *xs = vr->vr_xs;
341 	KASSERT(vr->vr_len != VIOBLK_DONE);
342 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0,
343 	    sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE);
344 	if (vr->vr_hdr.type != VIRTIO_BLK_T_FLUSH) {
345 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, vr->vr_len,
346 		    (vr->vr_hdr.type == VIRTIO_BLK_T_IN) ?
347 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
348 		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
349 	}
350 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
351 	    sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
352 	    BUS_DMASYNC_POSTREAD);
353 
354 
355 	if (vr->vr_status != VIRTIO_BLK_S_OK) {
356 		DNPRINTF(1, "%s: EIO\n", __func__);
357 		xs->error = XS_DRIVER_STUFFUP;
358 		xs->resid = xs->datalen;
359 	} else {
360 		xs->error = XS_NOERROR;
361 		xs->resid = xs->datalen - vr->vr_len;
362 	}
363 	vr->vr_len = VIOBLK_DONE;
364 	scsi_done(xs);
365 }
366 
367 void
368 vioblk_reset(struct vioblk_softc *sc)
369 {
370 	int i;
371 
372 	/* reset device to stop DMA */
373 	virtio_reset(sc->sc_virtio);
374 
375 	/* finish requests that have been completed */
376 	vioblk_vq_done(&sc->sc_vq[0]);
377 
378 	/* abort all remaining requests */
379 	for (i = 0; i < sc->sc_nreqs; i++) {
380 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
381 		struct scsi_xfer *xs = vr->vr_xs;
382 
383 		if (vr->vr_len == VIOBLK_DONE)
384 			continue;
385 
386 		xs->error = XS_DRIVER_STUFFUP;
387 		xs->resid = xs->datalen;
388 		scsi_done(xs);
389 	}
390 }
391 
392 void
393 vioblk_scsi_cmd(struct scsi_xfer *xs)
394 {
395 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
396 	struct virtqueue *vq = &sc->sc_vq[0];
397 	struct virtio_softc *vsc = sc->sc_virtio;
398 	struct virtio_blk_req *vr;
399 	int len, s, timeout, isread, slot, ret, nsegs;
400 	int error = XS_DRIVER_STUFFUP;
401 	struct scsi_rw *rw;
402 	struct scsi_rw_big *rwb;
403 	struct scsi_rw_12 *rw12;
404 	struct scsi_rw_16 *rw16;
405 	u_int64_t lba = 0;
406 	u_int32_t sector_count = 0;
407 	uint8_t operation;
408 
409 	switch (xs->cmd->opcode) {
410 	case READ_BIG:
411 	case READ_COMMAND:
412 	case READ_12:
413 	case READ_16:
414 		operation = VIRTIO_BLK_T_IN;
415 		isread = 1;
416 		break;
417 	case WRITE_BIG:
418 	case WRITE_COMMAND:
419 	case WRITE_12:
420 	case WRITE_16:
421 		operation = VIRTIO_BLK_T_OUT;
422 		isread = 0;
423 		break;
424 
425 	case SYNCHRONIZE_CACHE:
426 		if (!virtio_has_feature(vsc, VIRTIO_BLK_F_FLUSH)) {
427 			vioblk_scsi_done(xs, XS_NOERROR);
428 			return;
429 		}
430 		operation = VIRTIO_BLK_T_FLUSH;
431 		break;
432 
433 	case INQUIRY:
434 		vioblk_scsi_inq(xs);
435 		return;
436 	case READ_CAPACITY:
437 		vioblk_scsi_capacity(xs);
438 		return;
439 	case READ_CAPACITY_16:
440 		vioblk_scsi_capacity16(xs);
441 		return;
442 
443 	case TEST_UNIT_READY:
444 	case START_STOP:
445 	case PREVENT_ALLOW:
446 		vioblk_scsi_done(xs, XS_NOERROR);
447 		return;
448 
449 	default:
450 		printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode);
451 	case MODE_SENSE:
452 	case MODE_SENSE_BIG:
453 	case REPORT_LUNS:
454 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
455 		return;
456 	}
457 
458 	/*
459 	 * READ/WRITE/SYNCHRONIZE commands. SYNCHRONIZE CACHE has same
460 	 * layout as 10-byte READ/WRITE commands.
461 	 */
462 	if (xs->cmdlen == 6) {
463 		rw = (struct scsi_rw *)xs->cmd;
464 		lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
465 		sector_count = rw->length ? rw->length : 0x100;
466 	} else if (xs->cmdlen == 10) {
467 		rwb = (struct scsi_rw_big *)xs->cmd;
468 		lba = _4btol(rwb->addr);
469 		sector_count = _2btol(rwb->length);
470 	} else if (xs->cmdlen == 12) {
471 		rw12 = (struct scsi_rw_12 *)xs->cmd;
472 		lba = _4btol(rw12->addr);
473 		sector_count = _4btol(rw12->length);
474 	} else if (xs->cmdlen == 16) {
475 		rw16 = (struct scsi_rw_16 *)xs->cmd;
476 		lba = _8btol(rw16->addr);
477 		sector_count = _4btol(rw16->length);
478 	}
479 
480 	s = splbio();
481 	vr = xs->io;
482 	slot = vr->vr_qe_index;
483 	if (operation != VIRTIO_BLK_T_FLUSH) {
484 		len = MIN(xs->datalen, sector_count * VIRTIO_BLK_SECTOR_SIZE);
485 		ret = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
486 		    xs->data, len, NULL,
487 		    ((isread ? BUS_DMA_READ : BUS_DMA_WRITE) |
488 		     BUS_DMA_NOWAIT));
489 		if (ret) {
490 			printf("%s: bus_dmamap_load: %d", __func__, ret);
491 			error = XS_DRIVER_STUFFUP;
492 			goto out_done;
493 		}
494 		nsegs = vr->vr_payload->dm_nsegs + 2;
495 	} else {
496 		len = 0;
497 		nsegs = 2;
498 	}
499 
500 	/*
501 	 * Adjust reservation to the number needed, or virtio gets upset. Note
502 	 * that it may trim UP if 'xs' is being recycled w/o getting a new
503 	 * reservation!
504 	 */
505 	virtio_enqueue_trim(vq, slot, nsegs);
506 
507 	vr->vr_xs = xs;
508 	vr->vr_hdr.type = operation;
509 	vr->vr_hdr.ioprio = 0;
510 	vr->vr_hdr.sector = lba;
511 	vr->vr_len = len;
512 
513 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
514 			0, sizeof(struct virtio_blk_req_hdr),
515 			BUS_DMASYNC_PREWRITE);
516 	if (operation != VIRTIO_BLK_T_FLUSH) {
517 		bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, len,
518 		    isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
519 	}
520 	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
521 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t),
522 	    BUS_DMASYNC_PREREAD);
523 
524 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts, 0,
525 	    sizeof(struct virtio_blk_req_hdr), 1);
526 	if (operation != VIRTIO_BLK_T_FLUSH)
527 		virtio_enqueue(vq, slot, vr->vr_payload, !isread);
528 	virtio_enqueue_p(vq, slot, vr->vr_cmdsts,
529 	    offsetof(struct virtio_blk_req, vr_status), sizeof(uint8_t), 0);
530 	virtio_enqueue_commit(vsc, vq, slot, 1);
531 	sc->sc_queued++;
532 
533 	if (!ISSET(xs->flags, SCSI_POLL)) {
534 		/* check if some xfers are done: */
535 		if (sc->sc_queued > 1)
536 			vioblk_vq_done(vq);
537 		splx(s);
538 		return;
539 	}
540 
541 	timeout = 15 * 1000;
542 	do {
543 		if (virtio_poll_intr(vsc) && vr->vr_len == VIOBLK_DONE)
544 			break;
545 
546 		delay(1000);
547 	} while(--timeout > 0);
548 	if (timeout <= 0) {
549 		printf("%s: SCSI_POLL timed out\n", __func__);
550 		vioblk_reset(sc);
551 		virtio_reinit_start(vsc);
552 		virtio_reinit_end(vsc);
553 	}
554 	splx(s);
555 	return;
556 
557 out_done:
558 	splx(s);
559 	vioblk_scsi_done(xs, error);
560 }
561 
562 void
563 vioblk_scsi_inq(struct scsi_xfer *xs)
564 {
565 	struct scsi_inquiry *inq = (struct scsi_inquiry *)xs->cmd;
566 	struct scsi_inquiry_data inqd;
567 
568 	if (ISSET(inq->flags, SI_EVPD)) {
569 		vioblk_scsi_done(xs, XS_DRIVER_STUFFUP);
570 		return;
571 	}
572 
573 	bzero(&inqd, sizeof(inqd));
574 
575 	inqd.device = T_DIRECT;
576 	inqd.version = 0x05; /* SPC-3 */
577 	inqd.response_format = 2;
578 	inqd.additional_length = 32;
579 	inqd.flags |= SID_CmdQue;
580 	bcopy("VirtIO  ", inqd.vendor, sizeof(inqd.vendor));
581 	bcopy("Block Device    ", inqd.product, sizeof(inqd.product));
582 
583 	bcopy(&inqd, xs->data, MIN(sizeof(inqd), xs->datalen));
584 	vioblk_scsi_done(xs, XS_NOERROR);
585 }
586 
587 void
588 vioblk_scsi_capacity(struct scsi_xfer *xs)
589 {
590 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
591 	struct scsi_read_cap_data rcd;
592 	uint64_t capacity;
593 
594 	bzero(&rcd, sizeof(rcd));
595 
596 	capacity = sc->sc_capacity - 1;
597 	if (capacity > 0xffffffff)
598 		capacity = 0xffffffff;
599 
600 	_lto4b(capacity, rcd.addr);
601 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
602 
603 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
604 	vioblk_scsi_done(xs, XS_NOERROR);
605 }
606 
607 void
608 vioblk_scsi_capacity16(struct scsi_xfer *xs)
609 {
610 	struct vioblk_softc *sc = xs->sc_link->bus->sb_adapter_softc;
611 	struct scsi_read_cap_data_16 rcd;
612 
613 	bzero(&rcd, sizeof(rcd));
614 
615 	_lto8b(sc->sc_capacity - 1, rcd.addr);
616 	_lto4b(VIRTIO_BLK_SECTOR_SIZE, rcd.length);
617 
618 	bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
619 	vioblk_scsi_done(xs, XS_NOERROR);
620 }
621 
622 void
623 vioblk_scsi_done(struct scsi_xfer *xs, int error)
624 {
625 	xs->error = error;
626 	scsi_done(xs);
627 }
628 
629 int
630 vioblk_alloc_reqs(struct vioblk_softc *sc, int qsize)
631 {
632 	struct virtqueue *vq = &sc->sc_vq[0];
633 	struct vring_desc *vd;
634 	int allocsize, nreqs, r, rsegs, slot, i;
635 	void *vaddr;
636 
637 	if (vq->vq_indirect != NULL)
638 		nreqs = qsize;
639 	else
640 		nreqs = qsize / ALLOC_SEGS;
641 
642 	allocsize = sizeof(struct virtio_blk_req) * nreqs;
643 	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
644 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
645 	if (r != 0) {
646 		printf("DMA memory allocation failed, size %d, error %d\n",
647 		    allocsize, r);
648 		goto err_none;
649 	}
650 	r = bus_dmamem_map(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1,
651 	    allocsize, (caddr_t *)&vaddr, BUS_DMA_NOWAIT);
652 	if (r != 0) {
653 		printf("DMA memory map failed, error %d\n", r);
654 		goto err_dmamem_alloc;
655 	}
656 	sc->sc_reqs = vaddr;
657 	memset(vaddr, 0, allocsize);
658 	for (i = 0; i < nreqs; i++) {
659 		/*
660 		 * Assign descriptors and create the DMA maps for each
661 		 * allocated request.
662 		 */
663 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
664 		r = virtio_enqueue_prep(vq, &slot);
665 		if (r == 0)
666 			r = virtio_enqueue_reserve(vq, slot, ALLOC_SEGS);
667 		if (r != 0)
668 			return i;
669 
670 		if (vq->vq_indirect == NULL) {
671 			/*
672 			 * The reserved slots must be a contiguous block
673 			 * starting at vq_desc[slot].
674 			 */
675 			vd = &vq->vq_desc[slot];
676 			for (r = 0; r < ALLOC_SEGS - 1; r++) {
677 				DNPRINTF(2, "%s: vd[%d].next = %d should be "
678 				    "%d\n", __func__, r, vd[r].next,
679 				    (slot + r + 1));
680 				if (vd[r].next != (slot + r + 1))
681 					return i;
682 			}
683 			if (r == (ALLOC_SEGS -1) && vd[r].next != 0)
684 				return i;
685 			DNPRINTF(2, "%s: reserved slots are contiguous "
686 			    "(good!)\n", __func__);
687 		}
688 
689 		vr->vr_qe_index = slot;
690 		vq->vq_entries[slot].qe_vr_index = i;
691 		vr->vr_len = VIOBLK_DONE;
692 
693 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
694 		    VR_DMA_END, 1, VR_DMA_END, 0,
695 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_cmdsts);
696 		if (r != 0) {
697 			printf("cmd dmamap creation failed, err %d\n", r);
698 			nreqs = i;
699 			goto err_reqs;
700 		}
701 		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
702 		    &vr->vr_hdr, VR_DMA_END, NULL, BUS_DMA_NOWAIT);
703 		if (r != 0) {
704 			printf("command dmamap load failed, err %d\n", r);
705 			nreqs = i;
706 			goto err_reqs;
707 		}
708 		r = bus_dmamap_create(sc->sc_virtio->sc_dmat, MAXPHYS,
709 		    SEG_MAX, MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
710 		    &vr->vr_payload);
711 		if (r != 0) {
712 			printf("payload dmamap creation failed, err %d\n", r);
713 			nreqs = i;
714 			goto err_reqs;
715 		}
716 		SLIST_INSERT_HEAD(&sc->sc_freelist, vr, vr_list);
717 	}
718 	return nreqs;
719 
720 err_reqs:
721 	for (i = 0; i < nreqs; i++) {
722 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
723 		if (vr->vr_cmdsts) {
724 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
725 			    vr->vr_cmdsts);
726 			vr->vr_cmdsts = 0;
727 		}
728 		if (vr->vr_payload) {
729 			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
730 			    vr->vr_payload);
731 			vr->vr_payload = 0;
732 		}
733 	}
734 	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, (caddr_t)sc->sc_reqs,
735 	    allocsize);
736 err_dmamem_alloc:
737 	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_segs[0], 1);
738 err_none:
739 	return 0;
740 }
741