xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision ac4b6bcd)
1 /*-
2  * Copyright (c) 2012, Bryan Venteicher <bryanv@daemoninthecloset.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO SCSI devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/callout.h>
43 #include <sys/taskqueue.h>
44 #include <sys/queue.h>
45 #include <sys/sbuf.h>
46 
47 #include <machine/stdarg.h>
48 
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53 
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 
63 #include <dev/virtio/virtio.h>
64 #include <dev/virtio/virtqueue.h>
65 #include <dev/virtio/scsi/virtio_scsi.h>
66 #include <dev/virtio/scsi/virtio_scsivar.h>
67 
68 #include "virtio_if.h"
69 
70 static int	vtscsi_modevent(module_t, int, void *);
71 
72 static int	vtscsi_probe(device_t);
73 static int	vtscsi_attach(device_t);
74 static int	vtscsi_detach(device_t);
75 static int	vtscsi_suspend(device_t);
76 static int	vtscsi_resume(device_t);
77 
78 static void	vtscsi_negotiate_features(struct vtscsi_softc *);
79 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
80 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
81 static void	vtscsi_write_device_config(struct vtscsi_softc *);
82 static int	vtscsi_reinit(struct vtscsi_softc *);
83 
84 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
85 static int 	vtscsi_register_cam(struct vtscsi_softc *);
86 static void	vtscsi_free_cam(struct vtscsi_softc *);
87 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
88 static int	vtscsi_register_async(struct vtscsi_softc *);
89 static void	vtscsi_deregister_async(struct vtscsi_softc *);
90 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
91 static void	vtscsi_cam_poll(struct cam_sim *);
92 
93 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
94 		    union ccb *);
95 static void 	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
96 		    union ccb *);
97 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
98 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
99 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
100 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
101 		    struct cam_sim *, union ccb *);
102 
103 static int 	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
104 		    struct sglist *, struct ccb_scsiio *);
105 static int 	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
106 		    struct vtscsi_request *, int *, int *);
107 static int 	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
108 		    struct vtscsi_request *);
109 static int 	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
110 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
111 		    struct vtscsi_request *);
112 static int 	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
113 		    struct vtscsi_request *);
114 static void	vtscsi_timedout_scsi_cmd(void *);
115 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
116 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
117 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
118 static void 	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
119 		    struct vtscsi_request *);
120 
121 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
122 		    struct vtscsi_request *);
123 static int 	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
124 		    struct vtscsi_request *, struct sglist *, int, int, int);
125 static void 	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
126 		    struct vtscsi_request *);
127 static int 	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
128 		    struct vtscsi_request *);
129 static int 	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
130 		    struct vtscsi_request *);
131 
132 static void 	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
133 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
134 static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
135 		    struct virtio_scsi_cmd_req *);
136 static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
137 		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
138 
139 static void 	vtscsi_freeze_simq(struct vtscsi_softc *, int);
140 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
141 
142 static void 	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
143 		    lun_id_t);
144 static void 	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
145 		    lun_id_t);
146 static void 	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
147 
148 static void 	vtscsi_handle_event(struct vtscsi_softc *,
149 		    struct virtio_scsi_event *);
150 static int 	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
151 		    struct virtio_scsi_event *);
152 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
153 static void 	vtscsi_reinit_event_vq(struct vtscsi_softc *);
154 static void 	vtscsi_drain_event_vq(struct vtscsi_softc *);
155 
156 static void 	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
157 static void 	vtscsi_complete_vqs(struct vtscsi_softc *);
158 static void 	vtscsi_drain_vqs(struct vtscsi_softc *);
159 static void 	vtscsi_cancel_request(struct vtscsi_softc *,
160 		    struct vtscsi_request *);
161 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
162 static void	vtscsi_stop(struct vtscsi_softc *);
163 static int	vtscsi_reset_bus(struct vtscsi_softc *);
164 
165 static void 	vtscsi_init_request(struct vtscsi_softc *,
166 		    struct vtscsi_request *);
167 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
168 static void	vtscsi_free_requests(struct vtscsi_softc *);
169 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
170 		    struct vtscsi_request *);
171 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
172 
173 static void	vtscsi_complete_request(struct vtscsi_request *);
174 static void 	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
175 static void	vtscsi_control_vq_task(void *, int);
176 static void	vtscsi_event_vq_task(void *, int);
177 static void	vtscsi_request_vq_task(void *, int);
178 
179 static int	vtscsi_control_vq_intr(void *);
180 static int	vtscsi_event_vq_intr(void *);
181 static int	vtscsi_request_vq_intr(void *);
182 static void 	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
183 static void 	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
184 
185 static void 	vtscsi_get_tunables(struct vtscsi_softc *);
186 static void 	vtscsi_add_sysctl(struct vtscsi_softc *);
187 
188 static void 	vtscsi_printf_req(struct vtscsi_request *, const char *,
189 		    const char *, ...);
190 
191 /* Global tunables. */
192 /*
193  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
194  * IO during virtio_stop(). So in-flight requests still complete after the
195  * device reset. We would have to wait for all the in-flight IO to complete,
196  * which defeats the typical purpose of a bus reset. We could simulate the
197  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
198  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
199  * control virtqueue). But this isn't very useful if things really go off
200  * the rails, so default to disabled for now.
201  */
202 static int vtscsi_bus_reset_disable = 1;
203 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
204 
205 static struct virtio_feature_desc vtscsi_feature_desc[] = {
206 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
207 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
208 
209 	{ 0, NULL }
210 };
211 
212 static device_method_t vtscsi_methods[] = {
213 	/* Device methods. */
214 	DEVMETHOD(device_probe,		vtscsi_probe),
215 	DEVMETHOD(device_attach,	vtscsi_attach),
216 	DEVMETHOD(device_detach,	vtscsi_detach),
217 	DEVMETHOD(device_suspend,	vtscsi_suspend),
218 	DEVMETHOD(device_resume,	vtscsi_resume),
219 
220 	DEVMETHOD_END
221 };
222 
223 static driver_t vtscsi_driver = {
224 	"vtscsi",
225 	vtscsi_methods,
226 	sizeof(struct vtscsi_softc)
227 };
228 static devclass_t vtscsi_devclass;
229 
230 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
231     vtscsi_modevent, 0);
232 MODULE_VERSION(virtio_scsi, 1);
233 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
234 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
235 
236 static int
237 vtscsi_modevent(module_t mod, int type, void *unused)
238 {
239 	int error;
240 
241 	switch (type) {
242 	case MOD_LOAD:
243 	case MOD_QUIESCE:
244 	case MOD_UNLOAD:
245 	case MOD_SHUTDOWN:
246 		error = 0;
247 		break;
248 	default:
249 		error = EOPNOTSUPP;
250 		break;
251 	}
252 
253 	return (error);
254 }
255 
256 static int
257 vtscsi_probe(device_t dev)
258 {
259 
260 	if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
261 		return (ENXIO);
262 
263 	device_set_desc(dev, "VirtIO SCSI Adapter");
264 
265 	return (BUS_PROBE_DEFAULT);
266 }
267 
268 static int
269 vtscsi_attach(device_t dev)
270 {
271 	struct vtscsi_softc *sc;
272 	struct virtio_scsi_config scsicfg;
273 	int error;
274 
275 	sc = device_get_softc(dev);
276 	sc->vtscsi_dev = dev;
277 
278 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
279 	TAILQ_INIT(&sc->vtscsi_req_free);
280 
281 	vtscsi_get_tunables(sc);
282 	vtscsi_add_sysctl(sc);
283 
284 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
285 	vtscsi_negotiate_features(sc);
286 
287 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
288 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
289 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
290 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
291 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
292 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
293 
294 	virtio_read_device_config(dev, 0, &scsicfg,
295 	    sizeof(struct virtio_scsi_config));
296 
297 	sc->vtscsi_max_channel = scsicfg.max_channel;
298 	sc->vtscsi_max_target = scsicfg.max_target;
299 	sc->vtscsi_max_lun = scsicfg.max_lun;
300 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
301 
302 	vtscsi_write_device_config(sc);
303 
304 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
305 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
306 	if (sc->vtscsi_sglist == NULL) {
307 		error = ENOMEM;
308 		device_printf(dev, "cannot allocate sglist\n");
309 		goto fail;
310 	}
311 
312 	error = vtscsi_alloc_virtqueues(sc);
313 	if (error) {
314 		device_printf(dev, "cannot allocate virtqueues\n");
315 		goto fail;
316 	}
317 
318 	error = vtscsi_init_event_vq(sc);
319 	if (error) {
320 		device_printf(dev, "cannot populate the eventvq\n");
321 		goto fail;
322 	}
323 
324 	error = vtscsi_alloc_requests(sc);
325 	if (error) {
326 		device_printf(dev, "cannot allocate requests\n");
327 		goto fail;
328 	}
329 
330 	error = vtscsi_alloc_cam(sc);
331 	if (error) {
332 		device_printf(dev, "cannot allocate CAM structures\n");
333 		goto fail;
334 	}
335 
336 	TASK_INIT(&sc->vtscsi_control_intr_task, 0,
337 	    vtscsi_control_vq_task, sc);
338 	TASK_INIT(&sc->vtscsi_event_intr_task, 0,
339 	    vtscsi_event_vq_task, sc);
340 	TASK_INIT(&sc->vtscsi_request_intr_task, 0,
341 	    vtscsi_request_vq_task, sc);
342 
343 	sc->vtscsi_tq = taskqueue_create_fast("vtscsi_taskq", M_NOWAIT,
344 	    taskqueue_thread_enqueue, &sc->vtscsi_tq);
345 	if (sc->vtscsi_tq == NULL) {
346 		error = ENOMEM;
347 		device_printf(dev, "cannot allocate taskqueue\n");
348 		goto fail;
349 	}
350 
351 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
352 	if (error) {
353 		device_printf(dev, "cannot setup virtqueue interrupts\n");
354 		goto fail;
355 	}
356 
357 	taskqueue_start_threads(&sc->vtscsi_tq, 1, PI_DISK, "%s taskq",
358 	    device_get_nameunit(dev));
359 
360 	vtscsi_enable_vqs_intr(sc);
361 
362 	/*
363 	 * Register with CAM after interrupts are enabled so we will get
364 	 * notified of the probe responses.
365 	 */
366 	error = vtscsi_register_cam(sc);
367 	if (error) {
368 		device_printf(dev, "cannot register with CAM\n");
369 		goto fail;
370 	}
371 
372 fail:
373 	if (error)
374 		vtscsi_detach(dev);
375 
376 	return (error);
377 }
378 
379 static int
380 vtscsi_detach(device_t dev)
381 {
382 	struct vtscsi_softc *sc;
383 
384 	sc = device_get_softc(dev);
385 
386 	VTSCSI_LOCK(sc);
387 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
388 	if (device_is_attached(dev))
389 		vtscsi_stop(sc);
390 	VTSCSI_UNLOCK(sc);
391 
392 	if (sc->vtscsi_tq != NULL) {
393 		taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_control_intr_task);
394 		taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_event_intr_task);
395 		taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_request_intr_task);
396 		taskqueue_free(sc->vtscsi_tq);
397 		sc->vtscsi_tq = NULL;
398 	}
399 
400 	vtscsi_complete_vqs(sc);
401 	vtscsi_drain_vqs(sc);
402 
403 	vtscsi_free_cam(sc);
404 	vtscsi_free_requests(sc);
405 
406 	if (sc->vtscsi_sglist != NULL) {
407 		sglist_free(sc->vtscsi_sglist);
408 		sc->vtscsi_sglist = NULL;
409 	}
410 
411 	VTSCSI_LOCK_DESTROY(sc);
412 
413 	return (0);
414 }
415 
416 static int
417 vtscsi_suspend(device_t dev)
418 {
419 
420 	return (0);
421 }
422 
423 static int
424 vtscsi_resume(device_t dev)
425 {
426 
427 	return (0);
428 }
429 
430 static void
431 vtscsi_negotiate_features(struct vtscsi_softc *sc)
432 {
433 	device_t dev;
434 	uint64_t features;
435 
436 	dev = sc->vtscsi_dev;
437 	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
438 	sc->vtscsi_features = features;
439 }
440 
441 static int
442 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
443 {
444 	int nsegs;
445 
446 	nsegs = VTSCSI_MIN_SEGMENTS;
447 
448 	if (seg_max > 0) {
449 		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
450 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
451 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
452 	} else
453 		nsegs += 1;
454 
455 	return (nsegs);
456 }
457 
458 static int
459 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
460 {
461 	device_t dev;
462 	struct vq_alloc_info vq_info[3];
463 	int nvqs;
464 
465 	dev = sc->vtscsi_dev;
466 	nvqs = 3;
467 
468 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
469 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
470 
471 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
472 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
473 
474 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
475 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
476 	    "%s request", device_get_nameunit(dev));
477 
478 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
479 }
480 
481 static void
482 vtscsi_write_device_config(struct vtscsi_softc *sc)
483 {
484 
485 	virtio_write_dev_config_4(sc->vtscsi_dev,
486 	    offsetof(struct virtio_scsi_config, sense_size),
487 	    VIRTIO_SCSI_SENSE_SIZE);
488 
489 	/*
490 	 * This is the size in the virtio_scsi_cmd_req structure. Note
491 	 * this value (32) is larger than the maximum CAM CDB size (16).
492 	 */
493 	virtio_write_dev_config_4(sc->vtscsi_dev,
494 	    offsetof(struct virtio_scsi_config, cdb_size),
495 	    VIRTIO_SCSI_CDB_SIZE);
496 }
497 
498 static int
499 vtscsi_reinit(struct vtscsi_softc *sc)
500 {
501 	device_t dev;
502 	int error;
503 
504 	dev = sc->vtscsi_dev;
505 
506 	error = virtio_reinit(dev, sc->vtscsi_features);
507 	if (error == 0) {
508 		vtscsi_write_device_config(sc);
509 		vtscsi_reinit_event_vq(sc);
510 		virtio_reinit_complete(dev);
511 
512 		vtscsi_enable_vqs_intr(sc);
513 	}
514 
515 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
516 
517 	return (error);
518 }
519 
520 static int
521 vtscsi_alloc_cam(struct vtscsi_softc *sc)
522 {
523 	device_t dev;
524 	struct cam_devq *devq;
525 	int openings;
526 
527 	dev = sc->vtscsi_dev;
528 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
529 
530 	devq = cam_simq_alloc(openings);
531 	if (devq == NULL) {
532 		device_printf(dev, "cannot allocate SIM queue\n");
533 		return (ENOMEM);
534 	}
535 
536 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
537 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
538 	    openings, devq);
539 	if (sc->vtscsi_sim == NULL) {
540 		cam_simq_free(devq);
541 		device_printf(dev, "cannot allocate SIM\n");
542 		return (ENOMEM);
543 	}
544 
545 	return (0);
546 }
547 
548 static int
549 vtscsi_register_cam(struct vtscsi_softc *sc)
550 {
551 	device_t dev;
552 	int registered, error;
553 
554 	dev = sc->vtscsi_dev;
555 	registered = 0;
556 
557 	VTSCSI_LOCK(sc);
558 
559 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
560 		error = ENOMEM;
561 		device_printf(dev, "cannot register XPT bus\n");
562 		goto fail;
563 	}
564 
565 	registered = 1;
566 
567 	if (xpt_create_path(&sc->vtscsi_path, NULL,
568 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
569 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
570 		error = ENOMEM;
571 		device_printf(dev, "cannot create bus path\n");
572 		goto fail;
573 	}
574 
575 	VTSCSI_UNLOCK(sc);
576 
577 	/*
578 	 * The async register apparently needs to be done without
579 	 * the lock held, otherwise it can recurse on the lock.
580 	 */
581 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
582 		error = EIO;
583 		device_printf(dev, "cannot register async callback\n");
584 		VTSCSI_LOCK(sc);
585 		goto fail;
586 	}
587 
588 	return (0);
589 
590 fail:
591 	if (sc->vtscsi_path != NULL) {
592 		xpt_free_path(sc->vtscsi_path);
593 		sc->vtscsi_path = NULL;
594 	}
595 
596 	if (registered != 0)
597 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
598 
599 	VTSCSI_UNLOCK(sc);
600 
601 	return (error);
602 }
603 
604 static void
605 vtscsi_free_cam(struct vtscsi_softc *sc)
606 {
607 
608 	VTSCSI_LOCK(sc);
609 
610 	if (sc->vtscsi_path != NULL) {
611 		vtscsi_deregister_async(sc);
612 
613 		xpt_free_path(sc->vtscsi_path);
614 		sc->vtscsi_path = NULL;
615 
616 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
617 	}
618 
619 	if (sc->vtscsi_sim != NULL) {
620 		cam_sim_free(sc->vtscsi_sim, 1);
621 		sc->vtscsi_sim = NULL;
622 	}
623 
624 	VTSCSI_UNLOCK(sc);
625 }
626 
627 static void
628 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
629 {
630 	struct cam_sim *sim;
631 	struct vtscsi_softc *sc;
632 
633 	sim = cb_arg;
634 	sc = cam_sim_softc(sim);
635 
636 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
637 
638 	/*
639 	 * TODO Once QEMU supports event reporting, we should
640 	 *      (un)subscribe to events here.
641 	 */
642 	switch (code) {
643 	case AC_FOUND_DEVICE:
644 		break;
645 	case AC_LOST_DEVICE:
646 		break;
647 	}
648 }
649 
650 static int
651 vtscsi_register_async(struct vtscsi_softc *sc)
652 {
653 	struct ccb_setasync csa;
654 
655 	VTSCSI_LOCK_NOTOWNED(sc);
656 
657 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
658 	csa.ccb_h.func_code = XPT_SASYNC_CB;
659 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
660 	csa.callback = vtscsi_cam_async;
661 	csa.callback_arg = sc->vtscsi_sim;
662 
663 	xpt_action((union ccb *) &csa);
664 
665 	return (csa.ccb_h.status);
666 }
667 
668 static void
669 vtscsi_deregister_async(struct vtscsi_softc *sc)
670 {
671 	struct ccb_setasync csa;
672 
673 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
674 	csa.ccb_h.func_code = XPT_SASYNC_CB;
675 	csa.event_enable = 0;
676 	csa.callback = vtscsi_cam_async;
677 	csa.callback_arg = sc->vtscsi_sim;
678 
679 	xpt_action((union ccb *) &csa);
680 }
681 
682 static void
683 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
684 {
685 	struct vtscsi_softc *sc;
686 	struct ccb_hdr *ccbh;
687 
688 	sc = cam_sim_softc(sim);
689 	ccbh = &ccb->ccb_h;
690 
691 	VTSCSI_LOCK_OWNED(sc);
692 
693 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
694 		/*
695 		 * The VTSCSI_MTX is briefly dropped between setting
696 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
697 		 * drop any CCBs that come in during that window.
698 		 */
699 		ccbh->status = CAM_NO_HBA;
700 		xpt_done(ccb);
701 		return;
702 	}
703 
704 	switch (ccbh->func_code) {
705 	case XPT_SCSI_IO:
706 		vtscsi_cam_scsi_io(sc, sim, ccb);
707 		break;
708 
709 	case XPT_SET_TRAN_SETTINGS:
710 		ccbh->status = CAM_FUNC_NOTAVAIL;
711 		xpt_done(ccb);
712 		break;
713 
714 	case XPT_GET_TRAN_SETTINGS:
715 		vtscsi_cam_get_tran_settings(sc, ccb);
716 		break;
717 
718 	case XPT_RESET_BUS:
719 		vtscsi_cam_reset_bus(sc, ccb);
720 		break;
721 
722 	case XPT_RESET_DEV:
723 		vtscsi_cam_reset_dev(sc, ccb);
724 		break;
725 
726 	case XPT_ABORT:
727 		vtscsi_cam_abort(sc, ccb);
728 		break;
729 
730 	case XPT_CALC_GEOMETRY:
731 		cam_calc_geometry(&ccb->ccg, 1);
732 		xpt_done(ccb);
733 		break;
734 
735 	case XPT_PATH_INQ:
736 		vtscsi_cam_path_inquiry(sc, sim, ccb);
737 		break;
738 
739 	default:
740 		vtscsi_dprintf(sc, VTSCSI_ERROR,
741 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
742 
743 		ccbh->status = CAM_REQ_INVALID;
744 		xpt_done(ccb);
745 		break;
746 	}
747 }
748 
749 static void
750 vtscsi_cam_poll(struct cam_sim *sim)
751 {
752 	struct vtscsi_softc *sc;
753 
754 	sc = cam_sim_softc(sim);
755 
756 	vtscsi_complete_vqs_locked(sc);
757 }
758 
759 static void
760 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
761     union ccb *ccb)
762 {
763 	struct ccb_hdr *ccbh;
764 	struct ccb_scsiio *csio;
765 	int error;
766 
767 	ccbh = &ccb->ccb_h;
768 	csio = &ccb->csio;
769 
770 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
771 		error = EINVAL;
772 		ccbh->status = CAM_REQ_INVALID;
773 		goto done;
774 	}
775 
776 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
777 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
778 		error = EINVAL;
779 		ccbh->status = CAM_REQ_INVALID;
780 		goto done;
781 	}
782 
783 	error = vtscsi_start_scsi_cmd(sc, ccb);
784 
785 done:
786 	if (error) {
787 		vtscsi_dprintf(sc, VTSCSI_ERROR,
788 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
789 		xpt_done(ccb);
790 	}
791 }
792 
793 static void
794 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
795 {
796 	struct ccb_trans_settings *cts;
797 	struct ccb_trans_settings_scsi *scsi;
798 
799 	cts = &ccb->cts;
800 	scsi = &cts->proto_specific.scsi;
801 
802 	cts->protocol = PROTO_SCSI;
803 	cts->protocol_version = SCSI_REV_SPC3;
804 	cts->transport = XPORT_SAS;
805 	cts->transport_version = 0;
806 
807 	scsi->valid = CTS_SCSI_VALID_TQ;
808 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
809 
810 	ccb->ccb_h.status = CAM_REQ_CMP;
811 	xpt_done(ccb);
812 }
813 
814 static void
815 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
816 {
817 	int error;
818 
819 	error = vtscsi_reset_bus(sc);
820 	if (error == 0)
821 		ccb->ccb_h.status = CAM_REQ_CMP;
822 	else
823 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
824 
825 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
826 	    error, ccb, ccb->ccb_h.status);
827 
828 	xpt_done(ccb);
829 }
830 
831 static void
832 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
833 {
834 	struct ccb_hdr *ccbh;
835 	struct vtscsi_request *req;
836 	int error;
837 
838 	ccbh = &ccb->ccb_h;
839 
840 	req = vtscsi_dequeue_request(sc);
841 	if (req == NULL) {
842 		error = EAGAIN;
843 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
844 		goto fail;
845 	}
846 
847 	req->vsr_ccb = ccb;
848 
849 	error = vtscsi_execute_reset_dev_cmd(sc, req);
850 	if (error == 0)
851 		return;
852 
853 	vtscsi_enqueue_request(sc, req);
854 
855 fail:
856 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
857 	    error, req, ccb);
858 
859 	if (error == EAGAIN)
860 		ccbh->status = CAM_RESRC_UNAVAIL;
861 	else
862 		ccbh->status = CAM_REQ_CMP_ERR;
863 
864 	xpt_done(ccb);
865 }
866 
867 static void
868 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
869 {
870 	struct vtscsi_request *req;
871 	struct ccb_hdr *ccbh;
872 	int error;
873 
874 	ccbh = &ccb->ccb_h;
875 
876 	req = vtscsi_dequeue_request(sc);
877 	if (req == NULL) {
878 		error = EAGAIN;
879 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
880 		goto fail;
881 	}
882 
883 	req->vsr_ccb = ccb;
884 
885 	error = vtscsi_execute_abort_task_cmd(sc, req);
886 	if (error == 0)
887 		return;
888 
889 	vtscsi_enqueue_request(sc, req);
890 
891 fail:
892 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
893 	    error, req, ccb);
894 
895 	if (error == EAGAIN)
896 		ccbh->status = CAM_RESRC_UNAVAIL;
897 	else
898 		ccbh->status = CAM_REQ_CMP_ERR;
899 
900 	xpt_done(ccb);
901 }
902 
903 static void
904 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
905     union ccb *ccb)
906 {
907 	device_t dev;
908 	struct ccb_pathinq *cpi;
909 
910 	dev = sc->vtscsi_dev;
911 	cpi = &ccb->cpi;
912 
913 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
914 
915 	cpi->version_num = 1;
916 	cpi->hba_inquiry = PI_TAG_ABLE;
917 	cpi->target_sprt = 0;
918 	cpi->hba_misc = PIM_SEQSCAN;
919 	if (vtscsi_bus_reset_disable != 0)
920 		cpi->hba_misc |= PIM_NOBUSRESET;
921 	cpi->hba_eng_cnt = 0;
922 
923 	cpi->max_target = sc->vtscsi_max_target;
924 	cpi->max_lun = sc->vtscsi_max_lun;
925 	cpi->initiator_id = VTSCSI_INITIATOR_ID;
926 
927 	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
928 	strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
929 	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
930 
931 	cpi->unit_number = cam_sim_unit(sim);
932 	cpi->bus_id = cam_sim_bus(sim);
933 
934 	cpi->base_transfer_speed = 300000;
935 
936 	cpi->protocol = PROTO_SCSI;
937 	cpi->protocol_version = SCSI_REV_SPC3;
938 	cpi->transport = XPORT_SAS;
939 	cpi->transport_version = 0;
940 
941 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
942 	    PAGE_SIZE;
943 
944 	cpi->hba_vendor = virtio_get_vendor(dev);
945 	cpi->hba_device = virtio_get_device(dev);
946 	cpi->hba_subvendor = virtio_get_subvendor(dev);
947 	cpi->hba_subdevice = virtio_get_subdevice(dev);
948 
949 	ccb->ccb_h.status = CAM_REQ_CMP;
950 	xpt_done(ccb);
951 }
952 
953 static int
954 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
955     struct ccb_scsiio *csio)
956 {
957 	struct ccb_hdr *ccbh;
958 	struct bus_dma_segment *dseg;
959 	int i, error;
960 
961 	ccbh = &csio->ccb_h;
962 	error = 0;
963 
964 	if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
965 
966 		if ((ccbh->flags & CAM_DATA_PHYS) == 0)
967 			error = sglist_append(sg,
968 			    csio->data_ptr, csio->dxfer_len);
969 		else
970 			error = sglist_append_phys(sg,
971 			    (vm_paddr_t)(vm_offset_t) csio->data_ptr,
972 			    csio->dxfer_len);
973 	} else {
974 
975 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
976 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
977 
978 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0)
979 				error = sglist_append(sg,
980 				    (void *)(vm_offset_t) dseg->ds_addr,
981 				    dseg->ds_len);
982 			else
983 				error = sglist_append_phys(sg,
984 				    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
985 		}
986 	}
987 
988 	return (error);
989 }
990 
991 static int
992 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
993     int *readable, int *writable)
994 {
995 	struct sglist *sg;
996 	struct ccb_hdr *ccbh;
997 	struct ccb_scsiio *csio;
998 	struct virtio_scsi_cmd_req *cmd_req;
999 	struct virtio_scsi_cmd_resp *cmd_resp;
1000 	int error;
1001 
1002 	sg = sc->vtscsi_sglist;
1003 	csio = &req->vsr_ccb->csio;
1004 	ccbh = &csio->ccb_h;
1005 	cmd_req = &req->vsr_cmd_req;
1006 	cmd_resp = &req->vsr_cmd_resp;
1007 
1008 	sglist_reset(sg);
1009 
1010 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1011 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1012 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1013 		/* At least one segment must be left for the response. */
1014 		if (error || sg->sg_nseg == sg->sg_maxseg)
1015 			goto fail;
1016 	}
1017 
1018 	*readable = sg->sg_nseg;
1019 
1020 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1021 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1022 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1023 		if (error)
1024 			goto fail;
1025 	}
1026 
1027 	*writable = sg->sg_nseg - *readable;
1028 
1029 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1030 	    "writable=%d\n", req, ccbh, *readable, *writable);
1031 
1032 	return (0);
1033 
1034 fail:
1035 	/*
1036 	 * This should never happen unless maxio was incorrectly set.
1037 	 */
1038 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1039 
1040 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1041 	    "nseg=%d maxseg=%d\n",
1042 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1043 
1044 	return (EFBIG);
1045 }
1046 
1047 static int
1048 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1049 {
1050 	struct sglist *sg;
1051 	struct virtqueue *vq;
1052 	struct ccb_scsiio *csio;
1053 	struct ccb_hdr *ccbh;
1054 	struct virtio_scsi_cmd_req *cmd_req;
1055 	struct virtio_scsi_cmd_resp *cmd_resp;
1056 	int readable, writable, error;
1057 
1058 	sg = sc->vtscsi_sglist;
1059 	vq = sc->vtscsi_request_vq;
1060 	csio = &req->vsr_ccb->csio;
1061 	ccbh = &csio->ccb_h;
1062 	cmd_req = &req->vsr_cmd_req;
1063 	cmd_resp = &req->vsr_cmd_resp;
1064 
1065 	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1066 
1067 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1068 	if (error)
1069 		return (error);
1070 
1071 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1072 	cmd_resp->response = -1;
1073 
1074 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1075 	if (error) {
1076 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1077 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1078 
1079 		ccbh->status = CAM_REQUEUE_REQ;
1080 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1081 		return (error);
1082 	}
1083 
1084 	ccbh->status |= CAM_SIM_QUEUED;
1085 	ccbh->ccbh_vtscsi_req = req;
1086 
1087 	virtqueue_notify(vq);
1088 
1089 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1090 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1091 		callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1092 		    vtscsi_timedout_scsi_cmd, req);
1093 	}
1094 
1095 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1096 	    req, ccbh);
1097 
1098 	return (0);
1099 }
1100 
1101 static int
1102 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1103 {
1104 	struct vtscsi_request *req;
1105 	int error;
1106 
1107 	req = vtscsi_dequeue_request(sc);
1108 	if (req == NULL) {
1109 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1110 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1111 		return (ENOBUFS);
1112 	}
1113 
1114 	req->vsr_ccb = ccb;
1115 
1116 	error = vtscsi_execute_scsi_cmd(sc, req);
1117 	if (error)
1118 		vtscsi_enqueue_request(sc, req);
1119 
1120 	return (error);
1121 }
1122 
1123 static void
1124 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1125     struct vtscsi_request *req)
1126 {
1127 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1128 	struct vtscsi_request *to_req;
1129 	uint8_t response;
1130 
1131 	tmf_resp = &req->vsr_tmf_resp;
1132 	response = tmf_resp->response;
1133 	to_req = req->vsr_timedout_req;
1134 
1135 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1136 	    req, to_req, response);
1137 
1138 	vtscsi_enqueue_request(sc, req);
1139 
1140 	/*
1141 	 * The timedout request could have completed between when the
1142 	 * abort task was sent and when the host processed it.
1143 	 */
1144 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1145 		return;
1146 
1147 	/* The timedout request was successfully aborted. */
1148 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1149 		return;
1150 
1151 	/* Don't bother if the device is going away. */
1152 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1153 		return;
1154 
1155 	/* The timedout request will be aborted by the reset. */
1156 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1157 		return;
1158 
1159 	vtscsi_reset_bus(sc);
1160 }
1161 
1162 static int
1163 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1164     struct vtscsi_request *to_req)
1165 {
1166 	struct sglist *sg;
1167 	struct ccb_hdr *to_ccbh;
1168 	struct vtscsi_request *req;
1169 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1170 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1171 	int error;
1172 
1173 	sg = sc->vtscsi_sglist;
1174 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1175 
1176 	req = vtscsi_dequeue_request(sc);
1177 	if (req == NULL) {
1178 		error = ENOBUFS;
1179 		goto fail;
1180 	}
1181 
1182 	tmf_req = &req->vsr_tmf_req;
1183 	tmf_resp = &req->vsr_tmf_resp;
1184 
1185 	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1186 	    (uintptr_t) to_ccbh, tmf_req);
1187 
1188 	sglist_reset(sg);
1189 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1190 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1191 
1192 	req->vsr_timedout_req = to_req;
1193 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1194 	tmf_resp->response = -1;
1195 
1196 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1197 	    VTSCSI_EXECUTE_ASYNC);
1198 	if (error == 0)
1199 		return (0);
1200 
1201 	vtscsi_enqueue_request(sc, req);
1202 
1203 fail:
1204 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1205 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1206 
1207 	return (error);
1208 }
1209 
1210 static void
1211 vtscsi_timedout_scsi_cmd(void *xreq)
1212 {
1213 	struct vtscsi_softc *sc;
1214 	struct vtscsi_request *to_req;
1215 
1216 	to_req = xreq;
1217 	sc = to_req->vsr_softc;
1218 
1219 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1220 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1221 
1222 	/* Don't bother if the device is going away. */
1223 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1224 		return;
1225 
1226 	/*
1227 	 * Bail if the request is not in use. We likely raced when
1228 	 * stopping the callout handler or it has already been aborted.
1229 	 */
1230 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1231 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1232 		return;
1233 
1234 	/*
1235 	 * Complete the request queue in case the timedout request is
1236 	 * actually just pending.
1237 	 */
1238 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1239 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1240 		return;
1241 
1242 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1243 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1244 
1245 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1246 		return;
1247 
1248 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1249 	vtscsi_reset_bus(sc);
1250 }
1251 
1252 static cam_status
1253 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1254 {
1255 	cam_status status;
1256 
1257 	switch (cmd_resp->response) {
1258 	case VIRTIO_SCSI_S_OK:
1259 		status = CAM_REQ_CMP;
1260 		break;
1261 	case VIRTIO_SCSI_S_OVERRUN:
1262 		status = CAM_DATA_RUN_ERR;
1263 		break;
1264 	case VIRTIO_SCSI_S_ABORTED:
1265 		status = CAM_REQ_ABORTED;
1266 		break;
1267 	case VIRTIO_SCSI_S_BAD_TARGET:
1268 		status = CAM_TID_INVALID;
1269 		break;
1270 	case VIRTIO_SCSI_S_RESET:
1271 		status = CAM_SCSI_BUS_RESET;
1272 		break;
1273 	case VIRTIO_SCSI_S_BUSY:
1274 		status = CAM_SCSI_BUSY;
1275 		break;
1276 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1277 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1278 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1279 		status = CAM_SCSI_IT_NEXUS_LOST;
1280 		break;
1281 	default: /* VIRTIO_SCSI_S_FAILURE */
1282 		status = CAM_REQ_CMP_ERR;
1283 		break;
1284 	}
1285 
1286 	return (status);
1287 }
1288 
1289 static cam_status
1290 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1291     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1292 {
1293 	cam_status status;
1294 
1295 	csio->scsi_status = cmd_resp->status;
1296 	csio->resid = cmd_resp->resid;
1297 
1298 	if (csio->scsi_status == SCSI_STATUS_OK)
1299 		status = CAM_REQ_CMP;
1300 	else
1301 		status = CAM_SCSI_STATUS_ERROR;
1302 
1303 	if (cmd_resp->sense_len > 0) {
1304 		status |= CAM_AUTOSNS_VALID;
1305 
1306 		if (cmd_resp->sense_len < csio->sense_len)
1307 			csio->sense_resid = csio->sense_len -
1308 			    cmd_resp->sense_len;
1309 		else
1310 			csio->sense_resid = 0;
1311 
1312 		bzero(&csio->sense_data, sizeof(csio->sense_data));
1313 		memcpy(cmd_resp->sense, &csio->sense_data,
1314 		    csio->sense_len - csio->sense_resid);
1315 	}
1316 
1317 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1318 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1319 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1320 
1321 	return (status);
1322 }
1323 
1324 static void
1325 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1326 {
1327 	struct ccb_hdr *ccbh;
1328 	struct ccb_scsiio *csio;
1329 	struct virtio_scsi_cmd_resp *cmd_resp;
1330 	cam_status status;
1331 
1332 	csio = &req->vsr_ccb->csio;
1333 	ccbh = &csio->ccb_h;
1334 	cmd_resp = &req->vsr_cmd_resp;
1335 
1336 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1337 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1338 
1339 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1340 		callout_stop(&req->vsr_callout);
1341 
1342 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1343 	if (status == CAM_REQ_ABORTED) {
1344 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1345 			status = CAM_CMD_TIMEOUT;
1346 	} else if (status == CAM_REQ_CMP)
1347 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1348 
1349 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1350 		status |= CAM_DEV_QFRZN;
1351 		xpt_freeze_devq(ccbh->path, 1);
1352 	}
1353 
1354 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1355 		status |= CAM_RELEASE_SIMQ;
1356 
1357 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1358 	    req, ccbh, status);
1359 
1360 	ccbh->status = status;
1361 	xpt_done(req->vsr_ccb);
1362 	vtscsi_enqueue_request(sc, req);
1363 }
1364 
1365 static void
1366 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1367 {
1368 
1369 	/* XXX We probably shouldn't poll forever. */
1370 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1371 	do
1372 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1373 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1374 
1375 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1376 }
1377 
1378 static int
1379 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1380     struct sglist *sg, int readable, int writable, int flag)
1381 {
1382 	struct virtqueue *vq;
1383 	int error;
1384 
1385 	vq = sc->vtscsi_control_vq;
1386 
1387 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1388 
1389 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1390 	if (error) {
1391 		/*
1392 		 * Return EAGAIN when the virtqueue does not have enough
1393 		 * descriptors available.
1394 		 */
1395 		if (error == ENOSPC || error == EMSGSIZE)
1396 			error = EAGAIN;
1397 
1398 		return (error);
1399 	}
1400 
1401 	virtqueue_notify(vq);
1402 	if (flag == VTSCSI_EXECUTE_POLL)
1403 		vtscsi_poll_ctrl_req(sc, req);
1404 
1405 	return (0);
1406 }
1407 
1408 static void
1409 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1410     struct vtscsi_request *req)
1411 {
1412 	union ccb *ccb;
1413 	struct ccb_hdr *ccbh;
1414 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1415 
1416 	ccb = req->vsr_ccb;
1417 	ccbh = &ccb->ccb_h;
1418 	tmf_resp = &req->vsr_tmf_resp;
1419 
1420 	switch (tmf_resp->response) {
1421 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1422 		ccbh->status = CAM_REQ_CMP;
1423 		break;
1424 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1425 		ccbh->status = CAM_UA_ABORT;
1426 		break;
1427 	default:
1428 		ccbh->status = CAM_REQ_CMP_ERR;
1429 		break;
1430 	}
1431 
1432 	xpt_done(ccb);
1433 	vtscsi_enqueue_request(sc, req);
1434 }
1435 
1436 static int
1437 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1438     struct vtscsi_request *req)
1439 {
1440 	struct sglist *sg;
1441 	struct ccb_abort *cab;
1442 	struct ccb_hdr *ccbh;
1443 	struct ccb_hdr *abort_ccbh;
1444 	struct vtscsi_request *abort_req;
1445 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1446 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1447 	int error;
1448 
1449 	sg = sc->vtscsi_sglist;
1450 	cab = &req->vsr_ccb->cab;
1451 	ccbh = &cab->ccb_h;
1452 	tmf_req = &req->vsr_tmf_req;
1453 	tmf_resp = &req->vsr_tmf_resp;
1454 
1455 	/* CCB header and request that's to be aborted. */
1456 	abort_ccbh = &cab->abort_ccb->ccb_h;
1457 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1458 
1459 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1460 		error = EINVAL;
1461 		goto fail;
1462 	}
1463 
1464 	/* Only attempt to abort requests that could be in-flight. */
1465 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1466 		error = EALREADY;
1467 		goto fail;
1468 	}
1469 
1470 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1471 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1472 		callout_stop(&abort_req->vsr_callout);
1473 
1474 	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1475 	    (uintptr_t) abort_ccbh, tmf_req);
1476 
1477 	sglist_reset(sg);
1478 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1479 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1480 
1481 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1482 	tmf_resp->response = -1;
1483 
1484 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1485 	    VTSCSI_EXECUTE_ASYNC);
1486 
1487 fail:
1488 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1489 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1490 
1491 	return (error);
1492 }
1493 
1494 static void
1495 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1496     struct vtscsi_request *req)
1497 {
1498 	union ccb *ccb;
1499 	struct ccb_hdr *ccbh;
1500 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1501 
1502 	ccb = req->vsr_ccb;
1503 	ccbh = &ccb->ccb_h;
1504 	tmf_resp = &req->vsr_tmf_resp;
1505 
1506 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1507 	    req, ccb, tmf_resp->response);
1508 
1509 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1510 		ccbh->status = CAM_REQ_CMP;
1511 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1512 		    ccbh->target_lun);
1513 	} else
1514 		ccbh->status = CAM_REQ_CMP_ERR;
1515 
1516 	xpt_done(ccb);
1517 	vtscsi_enqueue_request(sc, req);
1518 }
1519 
1520 static int
1521 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1522     struct vtscsi_request *req)
1523 {
1524 	struct sglist *sg;
1525 	struct ccb_resetdev *crd;
1526 	struct ccb_hdr *ccbh;
1527 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1528 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1529 	uint32_t subtype;
1530 	int error;
1531 
1532 	sg = sc->vtscsi_sglist;
1533 	crd = &req->vsr_ccb->crd;
1534 	ccbh = &crd->ccb_h;
1535 	tmf_req = &req->vsr_tmf_req;
1536 	tmf_resp = &req->vsr_tmf_resp;
1537 
1538 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1539 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1540 	else
1541 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1542 
1543 	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1544 
1545 	sglist_reset(sg);
1546 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1547 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1548 
1549 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1550 	tmf_resp->response = -1;
1551 
1552 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1553 	    VTSCSI_EXECUTE_ASYNC);
1554 
1555 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1556 	    error, req, ccbh);
1557 
1558 	return (error);
1559 }
1560 
1561 static void
1562 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1563 {
1564 
1565 	*target_id = lun[1];
1566 	*lun_id = (lun[2] << 8) | lun[3];
1567 }
1568 
1569 static void
1570 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1571 {
1572 
1573 	lun[0] = 1;
1574 	lun[1] = ccbh->target_id;
1575 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1576 	lun[3] = (ccbh->target_lun >> 8) & 0xFF;
1577 }
1578 
1579 static void
1580 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1581     struct virtio_scsi_cmd_req *cmd_req)
1582 {
1583 	uint8_t attr;
1584 
1585 	switch (csio->tag_action) {
1586 	case MSG_HEAD_OF_Q_TAG:
1587 		attr = VIRTIO_SCSI_S_HEAD;
1588 		break;
1589 	case MSG_ORDERED_Q_TAG:
1590 		attr = VIRTIO_SCSI_S_ORDERED;
1591 		break;
1592 	case MSG_ACA_TASK:
1593 		attr = VIRTIO_SCSI_S_ACA;
1594 		break;
1595 	default: /* MSG_SIMPLE_Q_TAG */
1596 		attr = VIRTIO_SCSI_S_SIMPLE;
1597 		break;
1598 	}
1599 
1600 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1601 	cmd_req->tag = (uintptr_t) csio;
1602 	cmd_req->task_attr = attr;
1603 
1604 	memcpy(cmd_req->cdb,
1605 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1606 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1607 	    csio->cdb_len);
1608 }
1609 
1610 static void
1611 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1612     uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1613 {
1614 
1615 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1616 
1617 	tmf_req->type = VIRTIO_SCSI_T_TMF;
1618 	tmf_req->subtype = subtype;
1619 	tmf_req->tag = tag;
1620 }
1621 
1622 static void
1623 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1624 {
1625 	int frozen;
1626 
1627 	frozen = sc->vtscsi_frozen;
1628 
1629 	if (reason & VTSCSI_REQUEST &&
1630 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1631 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1632 
1633 	if (reason & VTSCSI_REQUEST_VQ &&
1634 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1635 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1636 
1637 	/* Freeze the SIMQ if transitioned to frozen. */
1638 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1639 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1640 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1641 	}
1642 }
1643 
1644 static int
1645 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1646 {
1647 	int thawed;
1648 
1649 	if (sc->vtscsi_frozen == 0 || reason == 0)
1650 		return (0);
1651 
1652 	if (reason & VTSCSI_REQUEST &&
1653 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1654 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1655 
1656 	if (reason & VTSCSI_REQUEST_VQ &&
1657 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1658 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1659 
1660 	thawed = sc->vtscsi_frozen == 0;
1661 	if (thawed != 0)
1662 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1663 
1664 	return (thawed);
1665 }
1666 
1667 static void
1668 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1669     target_id_t target_id, lun_id_t lun_id)
1670 {
1671 	struct cam_path *path;
1672 
1673 	/* Use the wildcard path from our softc for bus announcements. */
1674 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1675 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1676 		return;
1677 	}
1678 
1679 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1680 	    target_id, lun_id) != CAM_REQ_CMP) {
1681 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1682 		return;
1683 	}
1684 
1685 	xpt_async(ac_code, path, NULL);
1686 	xpt_free_path(path);
1687 }
1688 
1689 static void
1690 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1691     lun_id_t lun_id)
1692 {
1693 	union ccb *ccb;
1694 	cam_status status;
1695 
1696 	ccb = xpt_alloc_ccb_nowait();
1697 	if (ccb == NULL) {
1698 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1699 		return;
1700 	}
1701 
1702 	status = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1703 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1704 	if (status != CAM_REQ_CMP) {
1705 		xpt_free_ccb(ccb);
1706 		return;
1707 	}
1708 
1709 	xpt_rescan(ccb);
1710 }
1711 
1712 static void
1713 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1714 {
1715 
1716 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1717 }
1718 
1719 static void
1720 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1721     struct virtio_scsi_event *event)
1722 {
1723 	target_id_t target_id;
1724 	lun_id_t lun_id;
1725 
1726 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1727 
1728 	switch (event->reason) {
1729 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1730 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1731 		vtscsi_execute_rescan(sc, target_id, lun_id);
1732 		break;
1733 	default:
1734 		device_printf(sc->vtscsi_dev,
1735 		    "unhandled transport event reason: %d\n", event->reason);
1736 		break;
1737 	}
1738 }
1739 
1740 static void
1741 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1742 {
1743 	int error;
1744 
1745 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1746 		switch (event->event) {
1747 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1748 			vtscsi_transport_reset_event(sc, event);
1749 			break;
1750 		default:
1751 			device_printf(sc->vtscsi_dev,
1752 			    "unhandled event: %d\n", event->event);
1753 			break;
1754 		}
1755 	} else
1756 		vtscsi_execute_rescan_bus(sc);
1757 
1758 	/*
1759 	 * This should always be successful since the buffer
1760 	 * was just dequeued.
1761 	 */
1762 	error = vtscsi_enqueue_event_buf(sc, event);
1763 	KASSERT(error == 0,
1764 	    ("cannot requeue event buffer: %d", error));
1765 }
1766 
1767 static int
1768 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1769     struct virtio_scsi_event *event)
1770 {
1771 	struct sglist *sg;
1772 	struct virtqueue *vq;
1773 	int size, error;
1774 
1775 	sg = sc->vtscsi_sglist;
1776 	vq = sc->vtscsi_event_vq;
1777 	size = sc->vtscsi_event_buf_size;
1778 
1779 	bzero(event, size);
1780 
1781 	sglist_reset(sg);
1782 	error = sglist_append(sg, event, size);
1783 	if (error)
1784 		return (error);
1785 
1786 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1787 	if (error)
1788 		return (error);
1789 
1790 	virtqueue_notify(vq);
1791 
1792 	return (0);
1793 }
1794 
1795 static int
1796 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1797 {
1798 	struct virtio_scsi_event *event;
1799 	int i, size, error;
1800 
1801 	/*
1802 	 * The first release of QEMU with VirtIO SCSI support would crash
1803 	 * when attempting to notify the event virtqueue. This was fixed
1804 	 * when hotplug support was added.
1805 	 */
1806 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1807 		size = sc->vtscsi_event_buf_size;
1808 	else
1809 		size = 0;
1810 
1811 	if (size < sizeof(struct virtio_scsi_event))
1812 		return (0);
1813 
1814 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1815 		event = &sc->vtscsi_event_bufs[i];
1816 
1817 		error = vtscsi_enqueue_event_buf(sc, event);
1818 		if (error)
1819 			break;
1820 	}
1821 
1822 	/*
1823 	 * Even just one buffer is enough. Missed events are
1824 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1825 	 */
1826 	if (i > 0)
1827 		error = 0;
1828 
1829 	return (error);
1830 }
1831 
1832 static void
1833 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1834 {
1835 	struct virtio_scsi_event *event;
1836 	int i, error;
1837 
1838 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1839 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1840 		return;
1841 
1842 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1843 		event = &sc->vtscsi_event_bufs[i];
1844 
1845 		error = vtscsi_enqueue_event_buf(sc, event);
1846 		if (error)
1847 			break;
1848 	}
1849 
1850 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1851 }
1852 
1853 static void
1854 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1855 {
1856 	struct virtqueue *vq;
1857 	int last;
1858 
1859 	vq = sc->vtscsi_event_vq;
1860 	last = 0;
1861 
1862 	while (virtqueue_drain(vq, &last) != NULL)
1863 		;
1864 
1865 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1866 }
1867 
1868 static void
1869 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1870 {
1871 
1872 	VTSCSI_LOCK_OWNED(sc);
1873 
1874 	if (sc->vtscsi_request_vq != NULL)
1875 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1876 	if (sc->vtscsi_control_vq != NULL)
1877 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1878 }
1879 
1880 static void
1881 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1882 {
1883 
1884 	VTSCSI_LOCK(sc);
1885 	vtscsi_complete_vqs_locked(sc);
1886 	VTSCSI_UNLOCK(sc);
1887 }
1888 
1889 static void
1890 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1891 {
1892 	union ccb *ccb;
1893 	int detach;
1894 
1895 	ccb = req->vsr_ccb;
1896 
1897 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1898 
1899 	/*
1900 	 * The callout must be drained when detaching since the request is
1901 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1902 	 * case the callout is pending because there is a deadlock potential.
1903 	 * Otherwise, the virtqueue is being drained because of a bus reset
1904 	 * so we only need to attempt to stop the callouts.
1905 	 */
1906 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1907 	if (detach != 0)
1908 		VTSCSI_LOCK_NOTOWNED(sc);
1909 	else
1910 		VTSCSI_LOCK_OWNED(sc);
1911 
1912 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1913 		if (detach != 0)
1914 			callout_drain(&req->vsr_callout);
1915 		else
1916 			callout_stop(&req->vsr_callout);
1917 	}
1918 
1919 	if (ccb != NULL) {
1920 		if (detach != 0) {
1921 			VTSCSI_LOCK(sc);
1922 			ccb->ccb_h.status = CAM_NO_HBA;
1923 		} else
1924 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1925 		xpt_done(ccb);
1926 		if (detach != 0)
1927 			VTSCSI_UNLOCK(sc);
1928 	}
1929 
1930 	vtscsi_enqueue_request(sc, req);
1931 }
1932 
1933 static void
1934 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1935 {
1936 	struct vtscsi_request *req;
1937 	int last;
1938 
1939 	last = 0;
1940 
1941 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1942 
1943 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1944 		vtscsi_cancel_request(sc, req);
1945 
1946 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1947 }
1948 
1949 static void
1950 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1951 {
1952 
1953 	if (sc->vtscsi_control_vq != NULL)
1954 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1955 	if (sc->vtscsi_request_vq != NULL)
1956 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1957 	if (sc->vtscsi_event_vq != NULL)
1958 		vtscsi_drain_event_vq(sc);
1959 }
1960 
1961 static void
1962 vtscsi_stop(struct vtscsi_softc *sc)
1963 {
1964 
1965 	vtscsi_disable_vqs_intr(sc);
1966 	virtio_stop(sc->vtscsi_dev);
1967 }
1968 
1969 static int
1970 vtscsi_reset_bus(struct vtscsi_softc *sc)
1971 {
1972 	int error;
1973 
1974 	VTSCSI_LOCK_OWNED(sc);
1975 
1976 	if (vtscsi_bus_reset_disable != 0) {
1977 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
1978 		return (0);
1979 	}
1980 
1981 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
1982 
1983 	/*
1984 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
1985 	 * Those requests are then completed here so CAM will retry them
1986 	 * after the reset is complete.
1987 	 */
1988 	vtscsi_stop(sc);
1989 	vtscsi_complete_vqs_locked(sc);
1990 
1991 	/* Rid the virtqueues of any remaining requests. */
1992 	vtscsi_drain_vqs(sc);
1993 
1994 	/*
1995 	 * Any resource shortage that froze the SIMQ cannot persist across
1996 	 * a bus reset so ensure it gets thawed here.
1997 	 */
1998 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1999 		xpt_release_simq(sc->vtscsi_sim, 0);
2000 
2001 	error = vtscsi_reinit(sc);
2002 	if (error) {
2003 		device_printf(sc->vtscsi_dev,
2004 		    "reinitialization failed, stopping device...\n");
2005 		vtscsi_stop(sc);
2006 	} else
2007 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2008 		    CAM_LUN_WILDCARD);
2009 
2010 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2011 
2012 	return (error);
2013 }
2014 
2015 static void
2016 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2017 {
2018 
2019 #ifdef INVARIANTS
2020 	int req_nsegs, resp_nsegs;
2021 
2022 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2023 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2024 
2025 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2026 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2027 #endif
2028 
2029 	req->vsr_softc = sc;
2030 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2031 }
2032 
2033 static int
2034 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2035 {
2036 	struct vtscsi_request *req;
2037 	int i, nreqs;
2038 
2039 	/*
2040 	 * Commands destined for either the request or control queues come
2041 	 * from the same SIM queue. Use the size of the request virtqueue
2042 	 * as it (should) be much more frequently used. Some additional
2043 	 * requests are allocated for internal (TMF) use.
2044 	 */
2045 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2046 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2047 		nreqs /= VTSCSI_MIN_SEGMENTS;
2048 	nreqs += VTSCSI_RESERVED_REQUESTS;
2049 
2050 	for (i = 0; i < nreqs; i++) {
2051 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2052 		    M_NOWAIT);
2053 		if (req == NULL)
2054 			return (ENOMEM);
2055 
2056 		vtscsi_init_request(sc, req);
2057 
2058 		sc->vtscsi_nrequests++;
2059 		vtscsi_enqueue_request(sc, req);
2060 	}
2061 
2062 	return (0);
2063 }
2064 
2065 static void
2066 vtscsi_free_requests(struct vtscsi_softc *sc)
2067 {
2068 	struct vtscsi_request *req;
2069 
2070 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2071 		KASSERT(callout_active(&req->vsr_callout) == 0,
2072 		    ("request callout still active"));
2073 
2074 		sc->vtscsi_nrequests--;
2075 		free(req, M_DEVBUF);
2076 	}
2077 
2078 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2079 	    sc->vtscsi_nrequests));
2080 }
2081 
2082 static void
2083 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2084 {
2085 
2086 	KASSERT(req->vsr_softc == sc,
2087 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2088 
2089 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2090 
2091 	/* A request is available so the SIMQ could be released. */
2092 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2093 		xpt_release_simq(sc->vtscsi_sim, 1);
2094 
2095 	req->vsr_ccb = NULL;
2096 	req->vsr_complete = NULL;
2097 	req->vsr_ptr0 = NULL;
2098 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2099 	req->vsr_flags = 0;
2100 
2101 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2102 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2103 
2104 	/*
2105 	 * We insert at the tail of the queue in order to make it
2106 	 * very unlikely a request will be reused if we race with
2107 	 * stopping its callout handler.
2108 	 */
2109 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2110 }
2111 
2112 static struct vtscsi_request *
2113 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2114 {
2115 	struct vtscsi_request *req;
2116 
2117 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2118 	if (req != NULL) {
2119 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2120 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2121 	} else
2122 		sc->vtscsi_stats.dequeue_no_requests++;
2123 
2124 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2125 
2126 	return (req);
2127 }
2128 
2129 static void
2130 vtscsi_complete_request(struct vtscsi_request *req)
2131 {
2132 
2133 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2134 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2135 
2136 	if (req->vsr_complete != NULL)
2137 		req->vsr_complete(req->vsr_softc, req);
2138 }
2139 
2140 static void
2141 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2142 {
2143 	struct vtscsi_request *req;
2144 
2145 	VTSCSI_LOCK_OWNED(sc);
2146 
2147 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2148 		vtscsi_complete_request(req);
2149 }
2150 
2151 static void
2152 vtscsi_control_vq_task(void *arg, int pending)
2153 {
2154 	struct vtscsi_softc *sc;
2155 	struct virtqueue *vq;
2156 
2157 	sc = arg;
2158 	vq = sc->vtscsi_control_vq;
2159 
2160 	VTSCSI_LOCK(sc);
2161 
2162 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2163 
2164 	if (virtqueue_enable_intr(vq) != 0) {
2165 		virtqueue_disable_intr(vq);
2166 		VTSCSI_UNLOCK(sc);
2167 		taskqueue_enqueue_fast(sc->vtscsi_tq,
2168 		    &sc->vtscsi_control_intr_task);
2169 		return;
2170 	}
2171 
2172 	VTSCSI_UNLOCK(sc);
2173 }
2174 
2175 static void
2176 vtscsi_event_vq_task(void *arg, int pending)
2177 {
2178 	struct vtscsi_softc *sc;
2179 	struct virtqueue *vq;
2180 	struct virtio_scsi_event *event;
2181 
2182 	sc = arg;
2183 	vq = sc->vtscsi_event_vq;
2184 
2185 	VTSCSI_LOCK(sc);
2186 
2187 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2188 		vtscsi_handle_event(sc, event);
2189 
2190 	if (virtqueue_enable_intr(vq) != 0) {
2191 		virtqueue_disable_intr(vq);
2192 		VTSCSI_UNLOCK(sc);
2193 		taskqueue_enqueue_fast(sc->vtscsi_tq,
2194 		    &sc->vtscsi_control_intr_task);
2195 		return;
2196 	}
2197 
2198 	VTSCSI_UNLOCK(sc);
2199 }
2200 
2201 static void
2202 vtscsi_request_vq_task(void *arg, int pending)
2203 {
2204 	struct vtscsi_softc *sc;
2205 	struct virtqueue *vq;
2206 
2207 	sc = arg;
2208 	vq = sc->vtscsi_request_vq;
2209 
2210 	VTSCSI_LOCK(sc);
2211 
2212 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2213 
2214 	if (virtqueue_enable_intr(vq) != 0) {
2215 		virtqueue_disable_intr(vq);
2216 		VTSCSI_UNLOCK(sc);
2217 		taskqueue_enqueue_fast(sc->vtscsi_tq,
2218 		    &sc->vtscsi_request_intr_task);
2219 		return;
2220 	}
2221 
2222 	VTSCSI_UNLOCK(sc);
2223 }
2224 
2225 static int
2226 vtscsi_control_vq_intr(void *xsc)
2227 {
2228 	struct vtscsi_softc *sc;
2229 
2230 	sc = xsc;
2231 
2232 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2233 	taskqueue_enqueue_fast(sc->vtscsi_tq,
2234 	    &sc->vtscsi_control_intr_task);
2235 
2236 	return (1);
2237 }
2238 
2239 static int
2240 vtscsi_event_vq_intr(void *xsc)
2241 {
2242 	struct vtscsi_softc *sc;
2243 
2244 	sc = xsc;
2245 
2246 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2247 	taskqueue_enqueue_fast(sc->vtscsi_tq,
2248 	    &sc->vtscsi_event_intr_task);
2249 
2250 	return (1);
2251 }
2252 
2253 static int
2254 vtscsi_request_vq_intr(void *xsc)
2255 {
2256 	struct vtscsi_softc *sc;
2257 
2258 	sc = xsc;
2259 
2260 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2261 	taskqueue_enqueue_fast(sc->vtscsi_tq,
2262 	    &sc->vtscsi_request_intr_task);
2263 
2264 	return (1);
2265 }
2266 
2267 static void
2268 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2269 {
2270 
2271 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2272 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2273 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2274 }
2275 
2276 static void
2277 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2278 {
2279 
2280 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2281 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2282 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2283 }
2284 
2285 static void
2286 vtscsi_get_tunables(struct vtscsi_softc *sc)
2287 {
2288 	char tmpstr[64];
2289 
2290 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2291 
2292 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2293 	    device_get_unit(sc->vtscsi_dev));
2294 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2295 }
2296 
2297 static void
2298 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2299 {
2300 	device_t dev;
2301 	struct vtscsi_statistics *stats;
2302         struct sysctl_ctx_list *ctx;
2303 	struct sysctl_oid *tree;
2304 	struct sysctl_oid_list *child;
2305 
2306 	dev = sc->vtscsi_dev;
2307 	stats = &sc->vtscsi_stats;
2308 	ctx = device_get_sysctl_ctx(dev);
2309 	tree = device_get_sysctl_tree(dev);
2310 	child = SYSCTL_CHILDREN(tree);
2311 
2312 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2313 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2314 	    "Debug level");
2315 
2316 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2317 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2318 	    "SCSI command timeouts");
2319 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2320 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2321 	    "No available requests to dequeue");
2322 }
2323 
2324 static void
2325 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2326     const char *fmt, ...)
2327 {
2328 	struct vtscsi_softc *sc;
2329 	union ccb *ccb;
2330 	struct sbuf sb;
2331 	va_list ap;
2332 	char str[192];
2333 	char path_str[64];
2334 
2335 	if (req == NULL)
2336 		return;
2337 
2338 	sc = req->vsr_softc;
2339 	ccb = req->vsr_ccb;
2340 
2341 	va_start(ap, fmt);
2342 	sbuf_new(&sb, str, sizeof(str), 0);
2343 
2344 	if (ccb == NULL) {
2345 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2346 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2347 		    cam_sim_bus(sc->vtscsi_sim));
2348 	} else {
2349 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2350 		sbuf_cat(&sb, path_str);
2351 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2352 			scsi_command_string(&ccb->csio, &sb);
2353 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2354 		}
2355 	}
2356 
2357 	sbuf_vprintf(&sb, fmt, ap);
2358 	va_end(ap);
2359 
2360 	sbuf_finish(&sb);
2361 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2362 	    sbuf_data(&sb));
2363 }
2364