xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision a3557ef0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO SCSI devices. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/callout.h>
45 #include <sys/queue.h>
46 #include <sys/sbuf.h>
47 
48 #include <machine/stdarg.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
54 
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_debug.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 
64 #include <dev/virtio/virtio.h>
65 #include <dev/virtio/virtqueue.h>
66 #include <dev/virtio/scsi/virtio_scsi.h>
67 #include <dev/virtio/scsi/virtio_scsivar.h>
68 
69 #include "virtio_if.h"
70 
71 static int	vtscsi_modevent(module_t, int, void *);
72 
73 static int	vtscsi_probe(device_t);
74 static int	vtscsi_attach(device_t);
75 static int	vtscsi_detach(device_t);
76 static int	vtscsi_suspend(device_t);
77 static int	vtscsi_resume(device_t);
78 
79 static void	vtscsi_negotiate_features(struct vtscsi_softc *);
80 static void	vtscsi_read_config(struct vtscsi_softc *,
81 		    struct virtio_scsi_config *);
82 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
83 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
84 static void	vtscsi_check_sizes(struct vtscsi_softc *);
85 static void	vtscsi_write_device_config(struct vtscsi_softc *);
86 static int	vtscsi_reinit(struct vtscsi_softc *);
87 
88 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
89 static int	vtscsi_register_cam(struct vtscsi_softc *);
90 static void	vtscsi_free_cam(struct vtscsi_softc *);
91 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
92 static int	vtscsi_register_async(struct vtscsi_softc *);
93 static void	vtscsi_deregister_async(struct vtscsi_softc *);
94 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
95 static void	vtscsi_cam_poll(struct cam_sim *);
96 
97 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
98 		    union ccb *);
99 static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
100 		    union ccb *);
101 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
102 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
103 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
104 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
105 		    struct cam_sim *, union ccb *);
106 
107 static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
108 		    struct sglist *, struct ccb_scsiio *);
109 static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
110 		    struct vtscsi_request *, int *, int *);
111 static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
112 		    struct vtscsi_request *);
113 static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
114 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
115 		    struct vtscsi_request *);
116 static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
117 		    struct vtscsi_request *);
118 static void	vtscsi_timedout_scsi_cmd(void *);
119 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
120 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
121 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
122 static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
123 		    struct vtscsi_request *);
124 
125 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
126 		    struct vtscsi_request *);
127 static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
128 		    struct vtscsi_request *, struct sglist *, int, int, int);
129 static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
130 		    struct vtscsi_request *);
131 static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
132 		    struct vtscsi_request *);
133 static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
134 		    struct vtscsi_request *);
135 
136 static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
137 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
138 static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
139 		    struct virtio_scsi_cmd_req *);
140 static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
141 		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
142 
143 static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
144 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
145 
146 static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
147 		    lun_id_t);
148 static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
149 		    lun_id_t);
150 static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
151 
152 static void	vtscsi_handle_event(struct vtscsi_softc *,
153 		    struct virtio_scsi_event *);
154 static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
155 		    struct virtio_scsi_event *);
156 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
157 static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
158 static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
159 
160 static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
161 static void	vtscsi_complete_vqs(struct vtscsi_softc *);
162 static void	vtscsi_drain_vqs(struct vtscsi_softc *);
163 static void	vtscsi_cancel_request(struct vtscsi_softc *,
164 		    struct vtscsi_request *);
165 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
166 static void	vtscsi_stop(struct vtscsi_softc *);
167 static int	vtscsi_reset_bus(struct vtscsi_softc *);
168 
169 static void	vtscsi_init_request(struct vtscsi_softc *,
170 		    struct vtscsi_request *);
171 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
172 static void	vtscsi_free_requests(struct vtscsi_softc *);
173 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
174 		    struct vtscsi_request *);
175 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
176 
177 static void	vtscsi_complete_request(struct vtscsi_request *);
178 static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
179 
180 static void	vtscsi_control_vq_intr(void *);
181 static void	vtscsi_event_vq_intr(void *);
182 static void	vtscsi_request_vq_intr(void *);
183 static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
184 static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
185 
186 static void	vtscsi_get_tunables(struct vtscsi_softc *);
187 static void	vtscsi_add_sysctl(struct vtscsi_softc *);
188 
189 static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
190 		    const char *, ...);
191 
192 /* Global tunables. */
193 /*
194  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
195  * IO during virtio_stop(). So in-flight requests still complete after the
196  * device reset. We would have to wait for all the in-flight IO to complete,
197  * which defeats the typical purpose of a bus reset. We could simulate the
198  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
199  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
200  * control virtqueue). But this isn't very useful if things really go off
201  * the rails, so default to disabled for now.
202  */
203 static int vtscsi_bus_reset_disable = 1;
204 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
205 
206 static struct virtio_feature_desc vtscsi_feature_desc[] = {
207 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
208 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
209 
210 	{ 0, NULL }
211 };
212 
213 static device_method_t vtscsi_methods[] = {
214 	/* Device methods. */
215 	DEVMETHOD(device_probe,		vtscsi_probe),
216 	DEVMETHOD(device_attach,	vtscsi_attach),
217 	DEVMETHOD(device_detach,	vtscsi_detach),
218 	DEVMETHOD(device_suspend,	vtscsi_suspend),
219 	DEVMETHOD(device_resume,	vtscsi_resume),
220 
221 	DEVMETHOD_END
222 };
223 
224 static driver_t vtscsi_driver = {
225 	"vtscsi",
226 	vtscsi_methods,
227 	sizeof(struct vtscsi_softc)
228 };
229 static devclass_t vtscsi_devclass;
230 
231 DRIVER_MODULE(virtio_scsi, virtio_mmio, vtscsi_driver, vtscsi_devclass,
232     vtscsi_modevent, 0);
233 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
234     vtscsi_modevent, 0);
235 MODULE_VERSION(virtio_scsi, 1);
236 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
237 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
238 
239 VIRTIO_SIMPLE_PNPTABLE(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
240 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_scsi);
241 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_scsi);
242 
243 static int
244 vtscsi_modevent(module_t mod, int type, void *unused)
245 {
246 	int error;
247 
248 	switch (type) {
249 	case MOD_LOAD:
250 	case MOD_QUIESCE:
251 	case MOD_UNLOAD:
252 	case MOD_SHUTDOWN:
253 		error = 0;
254 		break;
255 	default:
256 		error = EOPNOTSUPP;
257 		break;
258 	}
259 
260 	return (error);
261 }
262 
263 static int
264 vtscsi_probe(device_t dev)
265 {
266 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
267 }
268 
269 static int
270 vtscsi_attach(device_t dev)
271 {
272 	struct vtscsi_softc *sc;
273 	struct virtio_scsi_config scsicfg;
274 	int error;
275 
276 	sc = device_get_softc(dev);
277 	sc->vtscsi_dev = dev;
278 
279 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
280 	TAILQ_INIT(&sc->vtscsi_req_free);
281 
282 	vtscsi_get_tunables(sc);
283 	vtscsi_add_sysctl(sc);
284 
285 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
286 	vtscsi_negotiate_features(sc);
287 
288 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
289 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
290 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
291 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
292 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
293 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
294 
295 	vtscsi_read_config(sc, &scsicfg);
296 
297 	sc->vtscsi_max_channel = scsicfg.max_channel;
298 	sc->vtscsi_max_target = scsicfg.max_target;
299 	sc->vtscsi_max_lun = scsicfg.max_lun;
300 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
301 
302 	vtscsi_write_device_config(sc);
303 
304 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
305 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
306 	if (sc->vtscsi_sglist == NULL) {
307 		error = ENOMEM;
308 		device_printf(dev, "cannot allocate sglist\n");
309 		goto fail;
310 	}
311 
312 	error = vtscsi_alloc_virtqueues(sc);
313 	if (error) {
314 		device_printf(dev, "cannot allocate virtqueues\n");
315 		goto fail;
316 	}
317 
318 	vtscsi_check_sizes(sc);
319 
320 	error = vtscsi_init_event_vq(sc);
321 	if (error) {
322 		device_printf(dev, "cannot populate the eventvq\n");
323 		goto fail;
324 	}
325 
326 	error = vtscsi_alloc_requests(sc);
327 	if (error) {
328 		device_printf(dev, "cannot allocate requests\n");
329 		goto fail;
330 	}
331 
332 	error = vtscsi_alloc_cam(sc);
333 	if (error) {
334 		device_printf(dev, "cannot allocate CAM structures\n");
335 		goto fail;
336 	}
337 
338 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
339 	if (error) {
340 		device_printf(dev, "cannot setup virtqueue interrupts\n");
341 		goto fail;
342 	}
343 
344 	vtscsi_enable_vqs_intr(sc);
345 
346 	/*
347 	 * Register with CAM after interrupts are enabled so we will get
348 	 * notified of the probe responses.
349 	 */
350 	error = vtscsi_register_cam(sc);
351 	if (error) {
352 		device_printf(dev, "cannot register with CAM\n");
353 		goto fail;
354 	}
355 
356 fail:
357 	if (error)
358 		vtscsi_detach(dev);
359 
360 	return (error);
361 }
362 
363 static int
364 vtscsi_detach(device_t dev)
365 {
366 	struct vtscsi_softc *sc;
367 
368 	sc = device_get_softc(dev);
369 
370 	VTSCSI_LOCK(sc);
371 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
372 	if (device_is_attached(dev))
373 		vtscsi_stop(sc);
374 	VTSCSI_UNLOCK(sc);
375 
376 	vtscsi_complete_vqs(sc);
377 	vtscsi_drain_vqs(sc);
378 
379 	vtscsi_free_cam(sc);
380 	vtscsi_free_requests(sc);
381 
382 	if (sc->vtscsi_sglist != NULL) {
383 		sglist_free(sc->vtscsi_sglist);
384 		sc->vtscsi_sglist = NULL;
385 	}
386 
387 	VTSCSI_LOCK_DESTROY(sc);
388 
389 	return (0);
390 }
391 
392 static int
393 vtscsi_suspend(device_t dev)
394 {
395 
396 	return (0);
397 }
398 
399 static int
400 vtscsi_resume(device_t dev)
401 {
402 
403 	return (0);
404 }
405 
406 static void
407 vtscsi_negotiate_features(struct vtscsi_softc *sc)
408 {
409 	device_t dev;
410 	uint64_t features;
411 
412 	dev = sc->vtscsi_dev;
413 	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
414 	sc->vtscsi_features = features;
415 }
416 
417 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
418 	virtio_read_device_config(_dev,				\
419 	    offsetof(struct virtio_scsi_config, _field),	\
420 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
421 
422 static void
423 vtscsi_read_config(struct vtscsi_softc *sc,
424     struct virtio_scsi_config *scsicfg)
425 {
426 	device_t dev;
427 
428 	dev = sc->vtscsi_dev;
429 
430 	bzero(scsicfg, sizeof(struct virtio_scsi_config));
431 
432 	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
433 	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
434 	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
435 	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
436 	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
437 	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
438 	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
439 	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
440 	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
441 	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
442 }
443 
444 #undef VTSCSI_GET_CONFIG
445 
446 static int
447 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
448 {
449 	int nsegs;
450 
451 	nsegs = VTSCSI_MIN_SEGMENTS;
452 
453 	if (seg_max > 0) {
454 		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
455 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
456 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
457 	} else
458 		nsegs += 1;
459 
460 	return (nsegs);
461 }
462 
463 static int
464 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
465 {
466 	device_t dev;
467 	struct vq_alloc_info vq_info[3];
468 	int nvqs;
469 
470 	dev = sc->vtscsi_dev;
471 	nvqs = 3;
472 
473 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
474 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
475 
476 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
477 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
478 
479 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
480 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
481 	    "%s request", device_get_nameunit(dev));
482 
483 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
484 }
485 
486 static void
487 vtscsi_check_sizes(struct vtscsi_softc *sc)
488 {
489 	int rqsize;
490 
491 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
492 		/*
493 		 * Ensure the assertions in virtqueue_enqueue(),
494 		 * even if the hypervisor reports a bad seg_max.
495 		 */
496 		rqsize = virtqueue_size(sc->vtscsi_request_vq);
497 		if (sc->vtscsi_max_nsegs > rqsize) {
498 			device_printf(sc->vtscsi_dev,
499 			    "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
500 			    rqsize);
501 			sc->vtscsi_max_nsegs = rqsize;
502 		}
503 	}
504 }
505 
506 static void
507 vtscsi_write_device_config(struct vtscsi_softc *sc)
508 {
509 
510 	virtio_write_dev_config_4(sc->vtscsi_dev,
511 	    offsetof(struct virtio_scsi_config, sense_size),
512 	    VIRTIO_SCSI_SENSE_SIZE);
513 
514 	/*
515 	 * This is the size in the virtio_scsi_cmd_req structure. Note
516 	 * this value (32) is larger than the maximum CAM CDB size (16).
517 	 */
518 	virtio_write_dev_config_4(sc->vtscsi_dev,
519 	    offsetof(struct virtio_scsi_config, cdb_size),
520 	    VIRTIO_SCSI_CDB_SIZE);
521 }
522 
523 static int
524 vtscsi_reinit(struct vtscsi_softc *sc)
525 {
526 	device_t dev;
527 	int error;
528 
529 	dev = sc->vtscsi_dev;
530 
531 	error = virtio_reinit(dev, sc->vtscsi_features);
532 	if (error == 0) {
533 		vtscsi_write_device_config(sc);
534 		vtscsi_reinit_event_vq(sc);
535 		virtio_reinit_complete(dev);
536 
537 		vtscsi_enable_vqs_intr(sc);
538 	}
539 
540 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
541 
542 	return (error);
543 }
544 
545 static int
546 vtscsi_alloc_cam(struct vtscsi_softc *sc)
547 {
548 	device_t dev;
549 	struct cam_devq *devq;
550 	int openings;
551 
552 	dev = sc->vtscsi_dev;
553 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
554 
555 	devq = cam_simq_alloc(openings);
556 	if (devq == NULL) {
557 		device_printf(dev, "cannot allocate SIM queue\n");
558 		return (ENOMEM);
559 	}
560 
561 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
562 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
563 	    openings, devq);
564 	if (sc->vtscsi_sim == NULL) {
565 		cam_simq_free(devq);
566 		device_printf(dev, "cannot allocate SIM\n");
567 		return (ENOMEM);
568 	}
569 
570 	return (0);
571 }
572 
573 static int
574 vtscsi_register_cam(struct vtscsi_softc *sc)
575 {
576 	device_t dev;
577 	int registered, error;
578 
579 	dev = sc->vtscsi_dev;
580 	registered = 0;
581 
582 	VTSCSI_LOCK(sc);
583 
584 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
585 		error = ENOMEM;
586 		device_printf(dev, "cannot register XPT bus\n");
587 		goto fail;
588 	}
589 
590 	registered = 1;
591 
592 	if (xpt_create_path(&sc->vtscsi_path, NULL,
593 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
594 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
595 		error = ENOMEM;
596 		device_printf(dev, "cannot create bus path\n");
597 		goto fail;
598 	}
599 
600 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
601 		error = EIO;
602 		device_printf(dev, "cannot register async callback\n");
603 		goto fail;
604 	}
605 
606 	VTSCSI_UNLOCK(sc);
607 
608 	return (0);
609 
610 fail:
611 	if (sc->vtscsi_path != NULL) {
612 		xpt_free_path(sc->vtscsi_path);
613 		sc->vtscsi_path = NULL;
614 	}
615 
616 	if (registered != 0)
617 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
618 
619 	VTSCSI_UNLOCK(sc);
620 
621 	return (error);
622 }
623 
624 static void
625 vtscsi_free_cam(struct vtscsi_softc *sc)
626 {
627 
628 	VTSCSI_LOCK(sc);
629 
630 	if (sc->vtscsi_path != NULL) {
631 		vtscsi_deregister_async(sc);
632 
633 		xpt_free_path(sc->vtscsi_path);
634 		sc->vtscsi_path = NULL;
635 
636 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
637 	}
638 
639 	if (sc->vtscsi_sim != NULL) {
640 		cam_sim_free(sc->vtscsi_sim, 1);
641 		sc->vtscsi_sim = NULL;
642 	}
643 
644 	VTSCSI_UNLOCK(sc);
645 }
646 
647 static void
648 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
649 {
650 	struct cam_sim *sim;
651 	struct vtscsi_softc *sc;
652 
653 	sim = cb_arg;
654 	sc = cam_sim_softc(sim);
655 
656 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
657 
658 	/*
659 	 * TODO Once QEMU supports event reporting, we should
660 	 *      (un)subscribe to events here.
661 	 */
662 	switch (code) {
663 	case AC_FOUND_DEVICE:
664 		break;
665 	case AC_LOST_DEVICE:
666 		break;
667 	}
668 }
669 
670 static int
671 vtscsi_register_async(struct vtscsi_softc *sc)
672 {
673 	struct ccb_setasync csa;
674 
675 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
676 	csa.ccb_h.func_code = XPT_SASYNC_CB;
677 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
678 	csa.callback = vtscsi_cam_async;
679 	csa.callback_arg = sc->vtscsi_sim;
680 
681 	xpt_action((union ccb *) &csa);
682 
683 	return (csa.ccb_h.status);
684 }
685 
686 static void
687 vtscsi_deregister_async(struct vtscsi_softc *sc)
688 {
689 	struct ccb_setasync csa;
690 
691 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
692 	csa.ccb_h.func_code = XPT_SASYNC_CB;
693 	csa.event_enable = 0;
694 	csa.callback = vtscsi_cam_async;
695 	csa.callback_arg = sc->vtscsi_sim;
696 
697 	xpt_action((union ccb *) &csa);
698 }
699 
700 static void
701 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
702 {
703 	struct vtscsi_softc *sc;
704 	struct ccb_hdr *ccbh;
705 
706 	sc = cam_sim_softc(sim);
707 	ccbh = &ccb->ccb_h;
708 
709 	VTSCSI_LOCK_OWNED(sc);
710 
711 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
712 		/*
713 		 * The VTSCSI_MTX is briefly dropped between setting
714 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
715 		 * drop any CCBs that come in during that window.
716 		 */
717 		ccbh->status = CAM_NO_HBA;
718 		xpt_done(ccb);
719 		return;
720 	}
721 
722 	switch (ccbh->func_code) {
723 	case XPT_SCSI_IO:
724 		vtscsi_cam_scsi_io(sc, sim, ccb);
725 		break;
726 
727 	case XPT_SET_TRAN_SETTINGS:
728 		ccbh->status = CAM_FUNC_NOTAVAIL;
729 		xpt_done(ccb);
730 		break;
731 
732 	case XPT_GET_TRAN_SETTINGS:
733 		vtscsi_cam_get_tran_settings(sc, ccb);
734 		break;
735 
736 	case XPT_RESET_BUS:
737 		vtscsi_cam_reset_bus(sc, ccb);
738 		break;
739 
740 	case XPT_RESET_DEV:
741 		vtscsi_cam_reset_dev(sc, ccb);
742 		break;
743 
744 	case XPT_ABORT:
745 		vtscsi_cam_abort(sc, ccb);
746 		break;
747 
748 	case XPT_CALC_GEOMETRY:
749 		cam_calc_geometry(&ccb->ccg, 1);
750 		xpt_done(ccb);
751 		break;
752 
753 	case XPT_PATH_INQ:
754 		vtscsi_cam_path_inquiry(sc, sim, ccb);
755 		break;
756 
757 	default:
758 		vtscsi_dprintf(sc, VTSCSI_ERROR,
759 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
760 
761 		ccbh->status = CAM_REQ_INVALID;
762 		xpt_done(ccb);
763 		break;
764 	}
765 }
766 
767 static void
768 vtscsi_cam_poll(struct cam_sim *sim)
769 {
770 	struct vtscsi_softc *sc;
771 
772 	sc = cam_sim_softc(sim);
773 
774 	vtscsi_complete_vqs_locked(sc);
775 }
776 
777 static void
778 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
779     union ccb *ccb)
780 {
781 	struct ccb_hdr *ccbh;
782 	struct ccb_scsiio *csio;
783 	int error;
784 
785 	ccbh = &ccb->ccb_h;
786 	csio = &ccb->csio;
787 
788 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
789 		error = EINVAL;
790 		ccbh->status = CAM_REQ_INVALID;
791 		goto done;
792 	}
793 
794 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
795 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
796 		error = EINVAL;
797 		ccbh->status = CAM_REQ_INVALID;
798 		goto done;
799 	}
800 
801 	error = vtscsi_start_scsi_cmd(sc, ccb);
802 
803 done:
804 	if (error) {
805 		vtscsi_dprintf(sc, VTSCSI_ERROR,
806 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
807 		xpt_done(ccb);
808 	}
809 }
810 
811 static void
812 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
813 {
814 	struct ccb_trans_settings *cts;
815 	struct ccb_trans_settings_scsi *scsi;
816 
817 	cts = &ccb->cts;
818 	scsi = &cts->proto_specific.scsi;
819 
820 	cts->protocol = PROTO_SCSI;
821 	cts->protocol_version = SCSI_REV_SPC3;
822 	cts->transport = XPORT_SAS;
823 	cts->transport_version = 0;
824 
825 	scsi->valid = CTS_SCSI_VALID_TQ;
826 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
827 
828 	ccb->ccb_h.status = CAM_REQ_CMP;
829 	xpt_done(ccb);
830 }
831 
832 static void
833 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
834 {
835 	int error;
836 
837 	error = vtscsi_reset_bus(sc);
838 	if (error == 0)
839 		ccb->ccb_h.status = CAM_REQ_CMP;
840 	else
841 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
842 
843 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
844 	    error, ccb, ccb->ccb_h.status);
845 
846 	xpt_done(ccb);
847 }
848 
849 static void
850 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
851 {
852 	struct ccb_hdr *ccbh;
853 	struct vtscsi_request *req;
854 	int error;
855 
856 	ccbh = &ccb->ccb_h;
857 
858 	req = vtscsi_dequeue_request(sc);
859 	if (req == NULL) {
860 		error = EAGAIN;
861 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
862 		goto fail;
863 	}
864 
865 	req->vsr_ccb = ccb;
866 
867 	error = vtscsi_execute_reset_dev_cmd(sc, req);
868 	if (error == 0)
869 		return;
870 
871 	vtscsi_enqueue_request(sc, req);
872 
873 fail:
874 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
875 	    error, req, ccb);
876 
877 	if (error == EAGAIN)
878 		ccbh->status = CAM_RESRC_UNAVAIL;
879 	else
880 		ccbh->status = CAM_REQ_CMP_ERR;
881 
882 	xpt_done(ccb);
883 }
884 
885 static void
886 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
887 {
888 	struct vtscsi_request *req;
889 	struct ccb_hdr *ccbh;
890 	int error;
891 
892 	ccbh = &ccb->ccb_h;
893 
894 	req = vtscsi_dequeue_request(sc);
895 	if (req == NULL) {
896 		error = EAGAIN;
897 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
898 		goto fail;
899 	}
900 
901 	req->vsr_ccb = ccb;
902 
903 	error = vtscsi_execute_abort_task_cmd(sc, req);
904 	if (error == 0)
905 		return;
906 
907 	vtscsi_enqueue_request(sc, req);
908 
909 fail:
910 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
911 	    error, req, ccb);
912 
913 	if (error == EAGAIN)
914 		ccbh->status = CAM_RESRC_UNAVAIL;
915 	else
916 		ccbh->status = CAM_REQ_CMP_ERR;
917 
918 	xpt_done(ccb);
919 }
920 
921 static void
922 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
923     union ccb *ccb)
924 {
925 	device_t dev;
926 	struct ccb_pathinq *cpi;
927 
928 	dev = sc->vtscsi_dev;
929 	cpi = &ccb->cpi;
930 
931 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
932 
933 	cpi->version_num = 1;
934 	cpi->hba_inquiry = PI_TAG_ABLE;
935 	cpi->target_sprt = 0;
936 	cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
937 	if (vtscsi_bus_reset_disable != 0)
938 		cpi->hba_misc |= PIM_NOBUSRESET;
939 	cpi->hba_eng_cnt = 0;
940 
941 	cpi->max_target = sc->vtscsi_max_target;
942 	cpi->max_lun = sc->vtscsi_max_lun;
943 	cpi->initiator_id = cpi->max_target + 1;
944 
945 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
946 	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
947 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
948 
949 	cpi->unit_number = cam_sim_unit(sim);
950 	cpi->bus_id = cam_sim_bus(sim);
951 
952 	cpi->base_transfer_speed = 300000;
953 
954 	cpi->protocol = PROTO_SCSI;
955 	cpi->protocol_version = SCSI_REV_SPC3;
956 	cpi->transport = XPORT_SAS;
957 	cpi->transport_version = 0;
958 
959 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
960 	    PAGE_SIZE;
961 
962 	cpi->hba_vendor = virtio_get_vendor(dev);
963 	cpi->hba_device = virtio_get_device(dev);
964 	cpi->hba_subvendor = virtio_get_subvendor(dev);
965 	cpi->hba_subdevice = virtio_get_subdevice(dev);
966 
967 	ccb->ccb_h.status = CAM_REQ_CMP;
968 	xpt_done(ccb);
969 }
970 
971 static int
972 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
973     struct ccb_scsiio *csio)
974 {
975 	struct ccb_hdr *ccbh;
976 	struct bus_dma_segment *dseg;
977 	int i, error;
978 
979 	ccbh = &csio->ccb_h;
980 	error = 0;
981 
982 	switch ((ccbh->flags & CAM_DATA_MASK)) {
983 	case CAM_DATA_VADDR:
984 		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
985 		break;
986 	case CAM_DATA_PADDR:
987 		error = sglist_append_phys(sg,
988 		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
989 		break;
990 	case CAM_DATA_SG:
991 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
992 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
993 			error = sglist_append(sg,
994 			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
995 		}
996 		break;
997 	case CAM_DATA_SG_PADDR:
998 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
999 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1000 			error = sglist_append_phys(sg,
1001 			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1002 		}
1003 		break;
1004 	case CAM_DATA_BIO:
1005 		error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1006 		break;
1007 	default:
1008 		error = EINVAL;
1009 		break;
1010 	}
1011 
1012 	return (error);
1013 }
1014 
1015 static int
1016 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1017     int *readable, int *writable)
1018 {
1019 	struct sglist *sg;
1020 	struct ccb_hdr *ccbh;
1021 	struct ccb_scsiio *csio;
1022 	struct virtio_scsi_cmd_req *cmd_req;
1023 	struct virtio_scsi_cmd_resp *cmd_resp;
1024 	int error;
1025 
1026 	sg = sc->vtscsi_sglist;
1027 	csio = &req->vsr_ccb->csio;
1028 	ccbh = &csio->ccb_h;
1029 	cmd_req = &req->vsr_cmd_req;
1030 	cmd_resp = &req->vsr_cmd_resp;
1031 
1032 	sglist_reset(sg);
1033 
1034 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1035 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1036 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1037 		/* At least one segment must be left for the response. */
1038 		if (error || sg->sg_nseg == sg->sg_maxseg)
1039 			goto fail;
1040 	}
1041 
1042 	*readable = sg->sg_nseg;
1043 
1044 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1045 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1046 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1047 		if (error)
1048 			goto fail;
1049 	}
1050 
1051 	*writable = sg->sg_nseg - *readable;
1052 
1053 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1054 	    "writable=%d\n", req, ccbh, *readable, *writable);
1055 
1056 	return (0);
1057 
1058 fail:
1059 	/*
1060 	 * This should never happen unless maxio was incorrectly set.
1061 	 */
1062 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1063 
1064 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1065 	    "nseg=%d maxseg=%d\n",
1066 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1067 
1068 	return (EFBIG);
1069 }
1070 
1071 static int
1072 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1073 {
1074 	struct sglist *sg;
1075 	struct virtqueue *vq;
1076 	struct ccb_scsiio *csio;
1077 	struct ccb_hdr *ccbh;
1078 	struct virtio_scsi_cmd_req *cmd_req;
1079 	struct virtio_scsi_cmd_resp *cmd_resp;
1080 	int readable, writable, error;
1081 
1082 	sg = sc->vtscsi_sglist;
1083 	vq = sc->vtscsi_request_vq;
1084 	csio = &req->vsr_ccb->csio;
1085 	ccbh = &csio->ccb_h;
1086 	cmd_req = &req->vsr_cmd_req;
1087 	cmd_resp = &req->vsr_cmd_resp;
1088 
1089 	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1090 
1091 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1092 	if (error)
1093 		return (error);
1094 
1095 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1096 	cmd_resp->response = -1;
1097 
1098 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1099 	if (error) {
1100 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1101 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1102 
1103 		ccbh->status = CAM_REQUEUE_REQ;
1104 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1105 		return (error);
1106 	}
1107 
1108 	ccbh->status |= CAM_SIM_QUEUED;
1109 	ccbh->ccbh_vtscsi_req = req;
1110 
1111 	virtqueue_notify(vq);
1112 
1113 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1114 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1115 		callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1116 		    0, vtscsi_timedout_scsi_cmd, req, 0);
1117 	}
1118 
1119 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1120 	    req, ccbh);
1121 
1122 	return (0);
1123 }
1124 
1125 static int
1126 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1127 {
1128 	struct vtscsi_request *req;
1129 	int error;
1130 
1131 	req = vtscsi_dequeue_request(sc);
1132 	if (req == NULL) {
1133 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1134 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1135 		return (ENOBUFS);
1136 	}
1137 
1138 	req->vsr_ccb = ccb;
1139 
1140 	error = vtscsi_execute_scsi_cmd(sc, req);
1141 	if (error)
1142 		vtscsi_enqueue_request(sc, req);
1143 
1144 	return (error);
1145 }
1146 
1147 static void
1148 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1149     struct vtscsi_request *req)
1150 {
1151 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1152 	struct vtscsi_request *to_req;
1153 	uint8_t response;
1154 
1155 	tmf_resp = &req->vsr_tmf_resp;
1156 	response = tmf_resp->response;
1157 	to_req = req->vsr_timedout_req;
1158 
1159 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1160 	    req, to_req, response);
1161 
1162 	vtscsi_enqueue_request(sc, req);
1163 
1164 	/*
1165 	 * The timedout request could have completed between when the
1166 	 * abort task was sent and when the host processed it.
1167 	 */
1168 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1169 		return;
1170 
1171 	/* The timedout request was successfully aborted. */
1172 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1173 		return;
1174 
1175 	/* Don't bother if the device is going away. */
1176 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1177 		return;
1178 
1179 	/* The timedout request will be aborted by the reset. */
1180 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1181 		return;
1182 
1183 	vtscsi_reset_bus(sc);
1184 }
1185 
1186 static int
1187 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1188     struct vtscsi_request *to_req)
1189 {
1190 	struct sglist *sg;
1191 	struct ccb_hdr *to_ccbh;
1192 	struct vtscsi_request *req;
1193 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1194 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1195 	int error;
1196 
1197 	sg = sc->vtscsi_sglist;
1198 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1199 
1200 	req = vtscsi_dequeue_request(sc);
1201 	if (req == NULL) {
1202 		error = ENOBUFS;
1203 		goto fail;
1204 	}
1205 
1206 	tmf_req = &req->vsr_tmf_req;
1207 	tmf_resp = &req->vsr_tmf_resp;
1208 
1209 	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1210 	    (uintptr_t) to_ccbh, tmf_req);
1211 
1212 	sglist_reset(sg);
1213 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1214 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1215 
1216 	req->vsr_timedout_req = to_req;
1217 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1218 	tmf_resp->response = -1;
1219 
1220 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1221 	    VTSCSI_EXECUTE_ASYNC);
1222 	if (error == 0)
1223 		return (0);
1224 
1225 	vtscsi_enqueue_request(sc, req);
1226 
1227 fail:
1228 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1229 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1230 
1231 	return (error);
1232 }
1233 
1234 static void
1235 vtscsi_timedout_scsi_cmd(void *xreq)
1236 {
1237 	struct vtscsi_softc *sc;
1238 	struct vtscsi_request *to_req;
1239 
1240 	to_req = xreq;
1241 	sc = to_req->vsr_softc;
1242 
1243 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1244 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1245 
1246 	/* Don't bother if the device is going away. */
1247 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1248 		return;
1249 
1250 	/*
1251 	 * Bail if the request is not in use. We likely raced when
1252 	 * stopping the callout handler or it has already been aborted.
1253 	 */
1254 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1255 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1256 		return;
1257 
1258 	/*
1259 	 * Complete the request queue in case the timedout request is
1260 	 * actually just pending.
1261 	 */
1262 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1263 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1264 		return;
1265 
1266 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1267 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1268 
1269 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1270 		return;
1271 
1272 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1273 	vtscsi_reset_bus(sc);
1274 }
1275 
1276 static cam_status
1277 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1278 {
1279 	cam_status status;
1280 
1281 	switch (cmd_resp->response) {
1282 	case VIRTIO_SCSI_S_OK:
1283 		status = CAM_REQ_CMP;
1284 		break;
1285 	case VIRTIO_SCSI_S_OVERRUN:
1286 		status = CAM_DATA_RUN_ERR;
1287 		break;
1288 	case VIRTIO_SCSI_S_ABORTED:
1289 		status = CAM_REQ_ABORTED;
1290 		break;
1291 	case VIRTIO_SCSI_S_BAD_TARGET:
1292 		status = CAM_SEL_TIMEOUT;
1293 		break;
1294 	case VIRTIO_SCSI_S_RESET:
1295 		status = CAM_SCSI_BUS_RESET;
1296 		break;
1297 	case VIRTIO_SCSI_S_BUSY:
1298 		status = CAM_SCSI_BUSY;
1299 		break;
1300 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1301 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1302 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1303 		status = CAM_SCSI_IT_NEXUS_LOST;
1304 		break;
1305 	default: /* VIRTIO_SCSI_S_FAILURE */
1306 		status = CAM_REQ_CMP_ERR;
1307 		break;
1308 	}
1309 
1310 	return (status);
1311 }
1312 
1313 static cam_status
1314 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1315     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1316 {
1317 	cam_status status;
1318 
1319 	csio->scsi_status = cmd_resp->status;
1320 	csio->resid = cmd_resp->resid;
1321 
1322 	if (csio->scsi_status == SCSI_STATUS_OK)
1323 		status = CAM_REQ_CMP;
1324 	else
1325 		status = CAM_SCSI_STATUS_ERROR;
1326 
1327 	if (cmd_resp->sense_len > 0) {
1328 		status |= CAM_AUTOSNS_VALID;
1329 
1330 		if (cmd_resp->sense_len < csio->sense_len)
1331 			csio->sense_resid = csio->sense_len -
1332 			    cmd_resp->sense_len;
1333 		else
1334 			csio->sense_resid = 0;
1335 
1336 		memcpy(&csio->sense_data, cmd_resp->sense,
1337 		    csio->sense_len - csio->sense_resid);
1338 	}
1339 
1340 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1341 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1342 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1343 
1344 	return (status);
1345 }
1346 
1347 static void
1348 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1349 {
1350 	struct ccb_hdr *ccbh;
1351 	struct ccb_scsiio *csio;
1352 	struct virtio_scsi_cmd_resp *cmd_resp;
1353 	cam_status status;
1354 
1355 	csio = &req->vsr_ccb->csio;
1356 	ccbh = &csio->ccb_h;
1357 	cmd_resp = &req->vsr_cmd_resp;
1358 
1359 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1360 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1361 
1362 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1363 		callout_stop(&req->vsr_callout);
1364 
1365 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1366 	if (status == CAM_REQ_ABORTED) {
1367 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1368 			status = CAM_CMD_TIMEOUT;
1369 	} else if (status == CAM_REQ_CMP)
1370 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1371 
1372 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1373 		status |= CAM_DEV_QFRZN;
1374 		xpt_freeze_devq(ccbh->path, 1);
1375 	}
1376 
1377 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1378 		status |= CAM_RELEASE_SIMQ;
1379 
1380 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1381 	    req, ccbh, status);
1382 
1383 	ccbh->status = status;
1384 	xpt_done(req->vsr_ccb);
1385 	vtscsi_enqueue_request(sc, req);
1386 }
1387 
1388 static void
1389 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1390 {
1391 
1392 	/* XXX We probably shouldn't poll forever. */
1393 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1394 	do
1395 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1396 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1397 
1398 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1399 }
1400 
1401 static int
1402 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1403     struct sglist *sg, int readable, int writable, int flag)
1404 {
1405 	struct virtqueue *vq;
1406 	int error;
1407 
1408 	vq = sc->vtscsi_control_vq;
1409 
1410 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1411 
1412 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1413 	if (error) {
1414 		/*
1415 		 * Return EAGAIN when the virtqueue does not have enough
1416 		 * descriptors available.
1417 		 */
1418 		if (error == ENOSPC || error == EMSGSIZE)
1419 			error = EAGAIN;
1420 
1421 		return (error);
1422 	}
1423 
1424 	virtqueue_notify(vq);
1425 	if (flag == VTSCSI_EXECUTE_POLL)
1426 		vtscsi_poll_ctrl_req(sc, req);
1427 
1428 	return (0);
1429 }
1430 
1431 static void
1432 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1433     struct vtscsi_request *req)
1434 {
1435 	union ccb *ccb;
1436 	struct ccb_hdr *ccbh;
1437 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1438 
1439 	ccb = req->vsr_ccb;
1440 	ccbh = &ccb->ccb_h;
1441 	tmf_resp = &req->vsr_tmf_resp;
1442 
1443 	switch (tmf_resp->response) {
1444 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1445 		ccbh->status = CAM_REQ_CMP;
1446 		break;
1447 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1448 		ccbh->status = CAM_UA_ABORT;
1449 		break;
1450 	default:
1451 		ccbh->status = CAM_REQ_CMP_ERR;
1452 		break;
1453 	}
1454 
1455 	xpt_done(ccb);
1456 	vtscsi_enqueue_request(sc, req);
1457 }
1458 
1459 static int
1460 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1461     struct vtscsi_request *req)
1462 {
1463 	struct sglist *sg;
1464 	struct ccb_abort *cab;
1465 	struct ccb_hdr *ccbh;
1466 	struct ccb_hdr *abort_ccbh;
1467 	struct vtscsi_request *abort_req;
1468 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1469 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1470 	int error;
1471 
1472 	sg = sc->vtscsi_sglist;
1473 	cab = &req->vsr_ccb->cab;
1474 	ccbh = &cab->ccb_h;
1475 	tmf_req = &req->vsr_tmf_req;
1476 	tmf_resp = &req->vsr_tmf_resp;
1477 
1478 	/* CCB header and request that's to be aborted. */
1479 	abort_ccbh = &cab->abort_ccb->ccb_h;
1480 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1481 
1482 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1483 		error = EINVAL;
1484 		goto fail;
1485 	}
1486 
1487 	/* Only attempt to abort requests that could be in-flight. */
1488 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1489 		error = EALREADY;
1490 		goto fail;
1491 	}
1492 
1493 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1494 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1495 		callout_stop(&abort_req->vsr_callout);
1496 
1497 	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1498 	    (uintptr_t) abort_ccbh, tmf_req);
1499 
1500 	sglist_reset(sg);
1501 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1502 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1503 
1504 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1505 	tmf_resp->response = -1;
1506 
1507 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1508 	    VTSCSI_EXECUTE_ASYNC);
1509 
1510 fail:
1511 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1512 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1513 
1514 	return (error);
1515 }
1516 
1517 static void
1518 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1519     struct vtscsi_request *req)
1520 {
1521 	union ccb *ccb;
1522 	struct ccb_hdr *ccbh;
1523 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1524 
1525 	ccb = req->vsr_ccb;
1526 	ccbh = &ccb->ccb_h;
1527 	tmf_resp = &req->vsr_tmf_resp;
1528 
1529 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1530 	    req, ccb, tmf_resp->response);
1531 
1532 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1533 		ccbh->status = CAM_REQ_CMP;
1534 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1535 		    ccbh->target_lun);
1536 	} else
1537 		ccbh->status = CAM_REQ_CMP_ERR;
1538 
1539 	xpt_done(ccb);
1540 	vtscsi_enqueue_request(sc, req);
1541 }
1542 
1543 static int
1544 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1545     struct vtscsi_request *req)
1546 {
1547 	struct sglist *sg;
1548 	struct ccb_resetdev *crd;
1549 	struct ccb_hdr *ccbh;
1550 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1551 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1552 	uint32_t subtype;
1553 	int error;
1554 
1555 	sg = sc->vtscsi_sglist;
1556 	crd = &req->vsr_ccb->crd;
1557 	ccbh = &crd->ccb_h;
1558 	tmf_req = &req->vsr_tmf_req;
1559 	tmf_resp = &req->vsr_tmf_resp;
1560 
1561 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1562 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1563 	else
1564 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1565 
1566 	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1567 
1568 	sglist_reset(sg);
1569 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1570 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1571 
1572 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1573 	tmf_resp->response = -1;
1574 
1575 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1576 	    VTSCSI_EXECUTE_ASYNC);
1577 
1578 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1579 	    error, req, ccbh);
1580 
1581 	return (error);
1582 }
1583 
1584 static void
1585 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1586 {
1587 
1588 	*target_id = lun[1];
1589 	*lun_id = (lun[2] << 8) | lun[3];
1590 }
1591 
1592 static void
1593 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1594 {
1595 
1596 	lun[0] = 1;
1597 	lun[1] = ccbh->target_id;
1598 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1599 	lun[3] = ccbh->target_lun & 0xFF;
1600 }
1601 
1602 static void
1603 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1604     struct virtio_scsi_cmd_req *cmd_req)
1605 {
1606 	uint8_t attr;
1607 
1608 	switch (csio->tag_action) {
1609 	case MSG_HEAD_OF_Q_TAG:
1610 		attr = VIRTIO_SCSI_S_HEAD;
1611 		break;
1612 	case MSG_ORDERED_Q_TAG:
1613 		attr = VIRTIO_SCSI_S_ORDERED;
1614 		break;
1615 	case MSG_ACA_TASK:
1616 		attr = VIRTIO_SCSI_S_ACA;
1617 		break;
1618 	default: /* MSG_SIMPLE_Q_TAG */
1619 		attr = VIRTIO_SCSI_S_SIMPLE;
1620 		break;
1621 	}
1622 
1623 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1624 	cmd_req->tag = (uintptr_t) csio;
1625 	cmd_req->task_attr = attr;
1626 
1627 	memcpy(cmd_req->cdb,
1628 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1629 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1630 	    csio->cdb_len);
1631 }
1632 
1633 static void
1634 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1635     uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1636 {
1637 
1638 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1639 
1640 	tmf_req->type = VIRTIO_SCSI_T_TMF;
1641 	tmf_req->subtype = subtype;
1642 	tmf_req->tag = tag;
1643 }
1644 
1645 static void
1646 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1647 {
1648 	int frozen;
1649 
1650 	frozen = sc->vtscsi_frozen;
1651 
1652 	if (reason & VTSCSI_REQUEST &&
1653 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1654 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1655 
1656 	if (reason & VTSCSI_REQUEST_VQ &&
1657 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1658 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1659 
1660 	/* Freeze the SIMQ if transitioned to frozen. */
1661 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1662 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1663 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1664 	}
1665 }
1666 
1667 static int
1668 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1669 {
1670 	int thawed;
1671 
1672 	if (sc->vtscsi_frozen == 0 || reason == 0)
1673 		return (0);
1674 
1675 	if (reason & VTSCSI_REQUEST &&
1676 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1677 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1678 
1679 	if (reason & VTSCSI_REQUEST_VQ &&
1680 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1681 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1682 
1683 	thawed = sc->vtscsi_frozen == 0;
1684 	if (thawed != 0)
1685 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1686 
1687 	return (thawed);
1688 }
1689 
1690 static void
1691 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1692     target_id_t target_id, lun_id_t lun_id)
1693 {
1694 	struct cam_path *path;
1695 
1696 	/* Use the wildcard path from our softc for bus announcements. */
1697 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1698 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1699 		return;
1700 	}
1701 
1702 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1703 	    target_id, lun_id) != CAM_REQ_CMP) {
1704 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1705 		return;
1706 	}
1707 
1708 	xpt_async(ac_code, path, NULL);
1709 	xpt_free_path(path);
1710 }
1711 
1712 static void
1713 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1714     lun_id_t lun_id)
1715 {
1716 	union ccb *ccb;
1717 	cam_status status;
1718 
1719 	ccb = xpt_alloc_ccb_nowait();
1720 	if (ccb == NULL) {
1721 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1722 		return;
1723 	}
1724 
1725 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1726 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1727 	if (status != CAM_REQ_CMP) {
1728 		xpt_free_ccb(ccb);
1729 		return;
1730 	}
1731 
1732 	xpt_rescan(ccb);
1733 }
1734 
1735 static void
1736 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1737 {
1738 
1739 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1740 }
1741 
1742 static void
1743 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1744     struct virtio_scsi_event *event)
1745 {
1746 	target_id_t target_id;
1747 	lun_id_t lun_id;
1748 
1749 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1750 
1751 	switch (event->reason) {
1752 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1753 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1754 		vtscsi_execute_rescan(sc, target_id, lun_id);
1755 		break;
1756 	default:
1757 		device_printf(sc->vtscsi_dev,
1758 		    "unhandled transport event reason: %d\n", event->reason);
1759 		break;
1760 	}
1761 }
1762 
1763 static void
1764 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1765 {
1766 	int error;
1767 
1768 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1769 		switch (event->event) {
1770 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1771 			vtscsi_transport_reset_event(sc, event);
1772 			break;
1773 		default:
1774 			device_printf(sc->vtscsi_dev,
1775 			    "unhandled event: %d\n", event->event);
1776 			break;
1777 		}
1778 	} else
1779 		vtscsi_execute_rescan_bus(sc);
1780 
1781 	/*
1782 	 * This should always be successful since the buffer
1783 	 * was just dequeued.
1784 	 */
1785 	error = vtscsi_enqueue_event_buf(sc, event);
1786 	KASSERT(error == 0,
1787 	    ("cannot requeue event buffer: %d", error));
1788 }
1789 
1790 static int
1791 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1792     struct virtio_scsi_event *event)
1793 {
1794 	struct sglist *sg;
1795 	struct virtqueue *vq;
1796 	int size, error;
1797 
1798 	sg = sc->vtscsi_sglist;
1799 	vq = sc->vtscsi_event_vq;
1800 	size = sc->vtscsi_event_buf_size;
1801 
1802 	bzero(event, size);
1803 
1804 	sglist_reset(sg);
1805 	error = sglist_append(sg, event, size);
1806 	if (error)
1807 		return (error);
1808 
1809 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1810 	if (error)
1811 		return (error);
1812 
1813 	virtqueue_notify(vq);
1814 
1815 	return (0);
1816 }
1817 
1818 static int
1819 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1820 {
1821 	struct virtio_scsi_event *event;
1822 	int i, size, error;
1823 
1824 	/*
1825 	 * The first release of QEMU with VirtIO SCSI support would crash
1826 	 * when attempting to notify the event virtqueue. This was fixed
1827 	 * when hotplug support was added.
1828 	 */
1829 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1830 		size = sc->vtscsi_event_buf_size;
1831 	else
1832 		size = 0;
1833 
1834 	if (size < sizeof(struct virtio_scsi_event))
1835 		return (0);
1836 
1837 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1838 		event = &sc->vtscsi_event_bufs[i];
1839 
1840 		error = vtscsi_enqueue_event_buf(sc, event);
1841 		if (error)
1842 			break;
1843 	}
1844 
1845 	/*
1846 	 * Even just one buffer is enough. Missed events are
1847 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1848 	 */
1849 	if (i > 0)
1850 		error = 0;
1851 
1852 	return (error);
1853 }
1854 
1855 static void
1856 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1857 {
1858 	struct virtio_scsi_event *event;
1859 	int i, error;
1860 
1861 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1862 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1863 		return;
1864 
1865 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1866 		event = &sc->vtscsi_event_bufs[i];
1867 
1868 		error = vtscsi_enqueue_event_buf(sc, event);
1869 		if (error)
1870 			break;
1871 	}
1872 
1873 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1874 }
1875 
1876 static void
1877 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1878 {
1879 	struct virtqueue *vq;
1880 	int last;
1881 
1882 	vq = sc->vtscsi_event_vq;
1883 	last = 0;
1884 
1885 	while (virtqueue_drain(vq, &last) != NULL)
1886 		;
1887 
1888 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1889 }
1890 
1891 static void
1892 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1893 {
1894 
1895 	VTSCSI_LOCK_OWNED(sc);
1896 
1897 	if (sc->vtscsi_request_vq != NULL)
1898 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1899 	if (sc->vtscsi_control_vq != NULL)
1900 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1901 }
1902 
1903 static void
1904 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1905 {
1906 
1907 	VTSCSI_LOCK(sc);
1908 	vtscsi_complete_vqs_locked(sc);
1909 	VTSCSI_UNLOCK(sc);
1910 }
1911 
1912 static void
1913 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1914 {
1915 	union ccb *ccb;
1916 	int detach;
1917 
1918 	ccb = req->vsr_ccb;
1919 
1920 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1921 
1922 	/*
1923 	 * The callout must be drained when detaching since the request is
1924 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1925 	 * case the callout is pending because there is a deadlock potential.
1926 	 * Otherwise, the virtqueue is being drained because of a bus reset
1927 	 * so we only need to attempt to stop the callouts.
1928 	 */
1929 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1930 	if (detach != 0)
1931 		VTSCSI_LOCK_NOTOWNED(sc);
1932 	else
1933 		VTSCSI_LOCK_OWNED(sc);
1934 
1935 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1936 		if (detach != 0)
1937 			callout_drain(&req->vsr_callout);
1938 		else
1939 			callout_stop(&req->vsr_callout);
1940 	}
1941 
1942 	if (ccb != NULL) {
1943 		if (detach != 0) {
1944 			VTSCSI_LOCK(sc);
1945 			ccb->ccb_h.status = CAM_NO_HBA;
1946 		} else
1947 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1948 		xpt_done(ccb);
1949 		if (detach != 0)
1950 			VTSCSI_UNLOCK(sc);
1951 	}
1952 
1953 	vtscsi_enqueue_request(sc, req);
1954 }
1955 
1956 static void
1957 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1958 {
1959 	struct vtscsi_request *req;
1960 	int last;
1961 
1962 	last = 0;
1963 
1964 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1965 
1966 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1967 		vtscsi_cancel_request(sc, req);
1968 
1969 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1970 }
1971 
1972 static void
1973 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1974 {
1975 
1976 	if (sc->vtscsi_control_vq != NULL)
1977 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1978 	if (sc->vtscsi_request_vq != NULL)
1979 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1980 	if (sc->vtscsi_event_vq != NULL)
1981 		vtscsi_drain_event_vq(sc);
1982 }
1983 
1984 static void
1985 vtscsi_stop(struct vtscsi_softc *sc)
1986 {
1987 
1988 	vtscsi_disable_vqs_intr(sc);
1989 	virtio_stop(sc->vtscsi_dev);
1990 }
1991 
1992 static int
1993 vtscsi_reset_bus(struct vtscsi_softc *sc)
1994 {
1995 	int error;
1996 
1997 	VTSCSI_LOCK_OWNED(sc);
1998 
1999 	if (vtscsi_bus_reset_disable != 0) {
2000 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2001 		return (0);
2002 	}
2003 
2004 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2005 
2006 	/*
2007 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2008 	 * Those requests are then completed here so CAM will retry them
2009 	 * after the reset is complete.
2010 	 */
2011 	vtscsi_stop(sc);
2012 	vtscsi_complete_vqs_locked(sc);
2013 
2014 	/* Rid the virtqueues of any remaining requests. */
2015 	vtscsi_drain_vqs(sc);
2016 
2017 	/*
2018 	 * Any resource shortage that froze the SIMQ cannot persist across
2019 	 * a bus reset so ensure it gets thawed here.
2020 	 */
2021 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2022 		xpt_release_simq(sc->vtscsi_sim, 0);
2023 
2024 	error = vtscsi_reinit(sc);
2025 	if (error) {
2026 		device_printf(sc->vtscsi_dev,
2027 		    "reinitialization failed, stopping device...\n");
2028 		vtscsi_stop(sc);
2029 	} else
2030 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2031 		    CAM_LUN_WILDCARD);
2032 
2033 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2034 
2035 	return (error);
2036 }
2037 
2038 static void
2039 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2040 {
2041 
2042 #ifdef INVARIANTS
2043 	int req_nsegs, resp_nsegs;
2044 
2045 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2046 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2047 
2048 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2049 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2050 #endif
2051 
2052 	req->vsr_softc = sc;
2053 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2054 }
2055 
2056 static int
2057 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2058 {
2059 	struct vtscsi_request *req;
2060 	int i, nreqs;
2061 
2062 	/*
2063 	 * Commands destined for either the request or control queues come
2064 	 * from the same SIM queue. Use the size of the request virtqueue
2065 	 * as it (should) be much more frequently used. Some additional
2066 	 * requests are allocated for internal (TMF) use.
2067 	 */
2068 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2069 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2070 		nreqs /= VTSCSI_MIN_SEGMENTS;
2071 	nreqs += VTSCSI_RESERVED_REQUESTS;
2072 
2073 	for (i = 0; i < nreqs; i++) {
2074 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2075 		    M_NOWAIT);
2076 		if (req == NULL)
2077 			return (ENOMEM);
2078 
2079 		vtscsi_init_request(sc, req);
2080 
2081 		sc->vtscsi_nrequests++;
2082 		vtscsi_enqueue_request(sc, req);
2083 	}
2084 
2085 	return (0);
2086 }
2087 
2088 static void
2089 vtscsi_free_requests(struct vtscsi_softc *sc)
2090 {
2091 	struct vtscsi_request *req;
2092 
2093 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2094 		KASSERT(callout_active(&req->vsr_callout) == 0,
2095 		    ("request callout still active"));
2096 
2097 		sc->vtscsi_nrequests--;
2098 		free(req, M_DEVBUF);
2099 	}
2100 
2101 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2102 	    sc->vtscsi_nrequests));
2103 }
2104 
2105 static void
2106 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2107 {
2108 
2109 	KASSERT(req->vsr_softc == sc,
2110 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2111 
2112 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2113 
2114 	/* A request is available so the SIMQ could be released. */
2115 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2116 		xpt_release_simq(sc->vtscsi_sim, 1);
2117 
2118 	req->vsr_ccb = NULL;
2119 	req->vsr_complete = NULL;
2120 	req->vsr_ptr0 = NULL;
2121 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2122 	req->vsr_flags = 0;
2123 
2124 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2125 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2126 
2127 	/*
2128 	 * We insert at the tail of the queue in order to make it
2129 	 * very unlikely a request will be reused if we race with
2130 	 * stopping its callout handler.
2131 	 */
2132 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2133 }
2134 
2135 static struct vtscsi_request *
2136 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2137 {
2138 	struct vtscsi_request *req;
2139 
2140 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2141 	if (req != NULL) {
2142 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2143 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2144 	} else
2145 		sc->vtscsi_stats.dequeue_no_requests++;
2146 
2147 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2148 
2149 	return (req);
2150 }
2151 
2152 static void
2153 vtscsi_complete_request(struct vtscsi_request *req)
2154 {
2155 
2156 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2157 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2158 
2159 	if (req->vsr_complete != NULL)
2160 		req->vsr_complete(req->vsr_softc, req);
2161 }
2162 
2163 static void
2164 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2165 {
2166 	struct vtscsi_request *req;
2167 
2168 	VTSCSI_LOCK_OWNED(sc);
2169 
2170 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2171 		vtscsi_complete_request(req);
2172 }
2173 
2174 static void
2175 vtscsi_control_vq_intr(void *xsc)
2176 {
2177 	struct vtscsi_softc *sc;
2178 	struct virtqueue *vq;
2179 
2180 	sc = xsc;
2181 	vq = sc->vtscsi_control_vq;
2182 
2183 again:
2184 	VTSCSI_LOCK(sc);
2185 
2186 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2187 
2188 	if (virtqueue_enable_intr(vq) != 0) {
2189 		virtqueue_disable_intr(vq);
2190 		VTSCSI_UNLOCK(sc);
2191 		goto again;
2192 	}
2193 
2194 	VTSCSI_UNLOCK(sc);
2195 }
2196 
2197 static void
2198 vtscsi_event_vq_intr(void *xsc)
2199 {
2200 	struct vtscsi_softc *sc;
2201 	struct virtqueue *vq;
2202 	struct virtio_scsi_event *event;
2203 
2204 	sc = xsc;
2205 	vq = sc->vtscsi_event_vq;
2206 
2207 again:
2208 	VTSCSI_LOCK(sc);
2209 
2210 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2211 		vtscsi_handle_event(sc, event);
2212 
2213 	if (virtqueue_enable_intr(vq) != 0) {
2214 		virtqueue_disable_intr(vq);
2215 		VTSCSI_UNLOCK(sc);
2216 		goto again;
2217 	}
2218 
2219 	VTSCSI_UNLOCK(sc);
2220 }
2221 
2222 static void
2223 vtscsi_request_vq_intr(void *xsc)
2224 {
2225 	struct vtscsi_softc *sc;
2226 	struct virtqueue *vq;
2227 
2228 	sc = xsc;
2229 	vq = sc->vtscsi_request_vq;
2230 
2231 again:
2232 	VTSCSI_LOCK(sc);
2233 
2234 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2235 
2236 	if (virtqueue_enable_intr(vq) != 0) {
2237 		virtqueue_disable_intr(vq);
2238 		VTSCSI_UNLOCK(sc);
2239 		goto again;
2240 	}
2241 
2242 	VTSCSI_UNLOCK(sc);
2243 }
2244 
2245 static void
2246 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2247 {
2248 
2249 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2250 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2251 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2252 }
2253 
2254 static void
2255 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2256 {
2257 
2258 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2259 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2260 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2261 }
2262 
2263 static void
2264 vtscsi_get_tunables(struct vtscsi_softc *sc)
2265 {
2266 	char tmpstr[64];
2267 
2268 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2269 
2270 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2271 	    device_get_unit(sc->vtscsi_dev));
2272 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2273 }
2274 
2275 static void
2276 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2277 {
2278 	device_t dev;
2279 	struct vtscsi_statistics *stats;
2280         struct sysctl_ctx_list *ctx;
2281 	struct sysctl_oid *tree;
2282 	struct sysctl_oid_list *child;
2283 
2284 	dev = sc->vtscsi_dev;
2285 	stats = &sc->vtscsi_stats;
2286 	ctx = device_get_sysctl_ctx(dev);
2287 	tree = device_get_sysctl_tree(dev);
2288 	child = SYSCTL_CHILDREN(tree);
2289 
2290 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2291 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2292 	    "Debug level");
2293 
2294 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2295 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2296 	    "SCSI command timeouts");
2297 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2298 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2299 	    "No available requests to dequeue");
2300 }
2301 
2302 static void
2303 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2304     const char *fmt, ...)
2305 {
2306 	struct vtscsi_softc *sc;
2307 	union ccb *ccb;
2308 	struct sbuf sb;
2309 	va_list ap;
2310 	char str[192];
2311 	char path_str[64];
2312 
2313 	if (req == NULL)
2314 		return;
2315 
2316 	sc = req->vsr_softc;
2317 	ccb = req->vsr_ccb;
2318 
2319 	va_start(ap, fmt);
2320 	sbuf_new(&sb, str, sizeof(str), 0);
2321 
2322 	if (ccb == NULL) {
2323 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2324 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2325 		    cam_sim_bus(sc->vtscsi_sim));
2326 	} else {
2327 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2328 		sbuf_cat(&sb, path_str);
2329 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2330 			scsi_command_string(&ccb->csio, &sb);
2331 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2332 		}
2333 	}
2334 
2335 	sbuf_vprintf(&sb, fmt, ap);
2336 	va_end(ap);
2337 
2338 	sbuf_finish(&sb);
2339 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2340 	    sbuf_data(&sb));
2341 }
2342