xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision 64d8b0b4)
1 /*-
2  * Copyright (c) 2012, Bryan Venteicher <bryanv@daemoninthecloset.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO SCSI devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/callout.h>
43 #include <sys/taskqueue.h>
44 #include <sys/queue.h>
45 #include <sys/sbuf.h>
46 
47 #include <machine/stdarg.h>
48 
49 #include <machine/bus.h>
50 #include <machine/resource.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53 
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
62 
63 #include <dev/virtio/virtio.h>
64 #include <dev/virtio/virtqueue.h>
65 #include <dev/virtio/scsi/virtio_scsi.h>
66 #include <dev/virtio/scsi/virtio_scsivar.h>
67 
68 #include "virtio_if.h"
69 
70 static int	vtscsi_modevent(module_t, int, void *);
71 
72 static int	vtscsi_probe(device_t);
73 static int	vtscsi_attach(device_t);
74 static int	vtscsi_detach(device_t);
75 static int	vtscsi_suspend(device_t);
76 static int	vtscsi_resume(device_t);
77 
78 static void	vtscsi_negotiate_features(struct vtscsi_softc *);
79 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
80 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
81 static void	vtscsi_write_device_config(struct vtscsi_softc *);
82 static int	vtscsi_reinit(struct vtscsi_softc *);
83 
84 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
85 static int 	vtscsi_register_cam(struct vtscsi_softc *);
86 static void	vtscsi_free_cam(struct vtscsi_softc *);
87 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
88 static int	vtscsi_register_async(struct vtscsi_softc *);
89 static void	vtscsi_deregister_async(struct vtscsi_softc *);
90 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
91 static void	vtscsi_cam_poll(struct cam_sim *);
92 
93 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
94 		    union ccb *);
95 static void 	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
96 		    union ccb *);
97 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
98 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
99 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
100 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
101 		    struct cam_sim *, union ccb *);
102 
103 static int 	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
104 		    struct sglist *, struct ccb_scsiio *);
105 static int 	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
106 		    struct vtscsi_request *, int *, int *);
107 static int 	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
108 		    struct vtscsi_request *);
109 static int 	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
110 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
111 		    struct vtscsi_request *);
112 static int 	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
113 		    struct vtscsi_request *);
114 static void	vtscsi_timedout_scsi_cmd(void *);
115 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
116 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
117 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
118 static void 	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
119 		    struct vtscsi_request *);
120 
121 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
122 		    struct vtscsi_request *);
123 static int 	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
124 		    struct vtscsi_request *, struct sglist *, int, int, int);
125 static void 	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
126 		    struct vtscsi_request *);
127 static int 	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
128 		    struct vtscsi_request *);
129 static int 	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
130 		    struct vtscsi_request *);
131 
132 static void 	vtscsi_get_request_lun(uint8_t lun[], target_id_t *, lun_id_t *);
133 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
134 static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
135 		    struct virtio_scsi_cmd_req *);
136 static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
137 		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
138 
139 static void 	vtscsi_freeze_simq(struct vtscsi_softc *, int);
140 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
141 
142 static void 	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
143 		    lun_id_t);
144 static void 	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
145 		    lun_id_t);
146 static void 	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
147 
148 static void 	vtscsi_handle_event(struct vtscsi_softc *,
149 		    struct virtio_scsi_event *);
150 static int 	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
151 		    struct virtio_scsi_event *);
152 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
153 static void 	vtscsi_reinit_event_vq(struct vtscsi_softc *);
154 static void 	vtscsi_drain_event_vq(struct vtscsi_softc *);
155 
156 static void 	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
157 static void 	vtscsi_complete_vqs(struct vtscsi_softc *);
158 static void 	vtscsi_drain_vqs(struct vtscsi_softc *);
159 static void 	vtscsi_cancel_request(struct vtscsi_softc *,
160 		    struct vtscsi_request *);
161 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
162 static void	vtscsi_stop(struct vtscsi_softc *);
163 static int	vtscsi_reset_bus(struct vtscsi_softc *);
164 
165 static void 	vtscsi_init_request(struct vtscsi_softc *,
166 		    struct vtscsi_request *);
167 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
168 static void	vtscsi_free_requests(struct vtscsi_softc *);
169 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
170 		    struct vtscsi_request *);
171 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
172 
173 static void	vtscsi_complete_request(struct vtscsi_request *);
174 static void 	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
175 static void	vtscsi_control_vq_task(void *, int);
176 static void	vtscsi_event_vq_task(void *, int);
177 static void	vtscsi_request_vq_task(void *, int);
178 
179 static int	vtscsi_control_vq_intr(void *);
180 static int	vtscsi_event_vq_intr(void *);
181 static int	vtscsi_request_vq_intr(void *);
182 static void 	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
183 static void 	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
184 
185 static void 	vtscsi_get_tunables(struct vtscsi_softc *);
186 static void 	vtscsi_add_sysctl(struct vtscsi_softc *);
187 
188 static void 	vtscsi_printf_req(struct vtscsi_request *, const char *,
189 		    const char *, ...);
190 
191 /* Global tunables. */
192 /*
193  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
194  * IO during virtio_stop(). So in-flight requests still complete after the
195  * device reset. We would have to wait for all the in-flight IO to complete,
196  * which defeats the typical purpose of a bus reset. We could simulate the
197  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
198  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
199  * control virtqueue). But this isn't very useful if things really go off
200  * the rails, so default to disabled for now.
201  */
202 static int vtscsi_bus_reset_disable = 1;
203 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
204 
205 static struct virtio_feature_desc vtscsi_feature_desc[] = {
206 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
207 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
208 
209 	{ 0, NULL }
210 };
211 
212 static device_method_t vtscsi_methods[] = {
213 	/* Device methods. */
214 	DEVMETHOD(device_probe,		vtscsi_probe),
215 	DEVMETHOD(device_attach,	vtscsi_attach),
216 	DEVMETHOD(device_detach,	vtscsi_detach),
217 	DEVMETHOD(device_suspend,	vtscsi_suspend),
218 	DEVMETHOD(device_resume,	vtscsi_resume),
219 
220 	DEVMETHOD_END
221 };
222 
223 static driver_t vtscsi_driver = {
224 	"vtscsi",
225 	vtscsi_methods,
226 	sizeof(struct vtscsi_softc)
227 };
228 static devclass_t vtscsi_devclass;
229 
230 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
231     vtscsi_modevent, 0);
232 MODULE_VERSION(virtio_scsi, 1);
233 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
234 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
235 
236 static int
237 vtscsi_modevent(module_t mod, int type, void *unused)
238 {
239 	int error;
240 
241 	switch (type) {
242 	case MOD_LOAD:
243 	case MOD_QUIESCE:
244 	case MOD_UNLOAD:
245 	case MOD_SHUTDOWN:
246 		error = 0;
247 		break;
248 	default:
249 		error = EOPNOTSUPP;
250 		break;
251 	}
252 
253 	return (error);
254 }
255 
256 static int
257 vtscsi_probe(device_t dev)
258 {
259 
260 	if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
261 		return (ENXIO);
262 
263 	device_set_desc(dev, "VirtIO SCSI Adapter");
264 
265 	return (BUS_PROBE_DEFAULT);
266 }
267 
268 static int
269 vtscsi_attach(device_t dev)
270 {
271 	struct vtscsi_softc *sc;
272 	struct virtio_scsi_config scsicfg;
273 	int error;
274 
275 	sc = device_get_softc(dev);
276 	sc->vtscsi_dev = dev;
277 
278 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
279 	TAILQ_INIT(&sc->vtscsi_req_free);
280 
281 	vtscsi_get_tunables(sc);
282 	vtscsi_add_sysctl(sc);
283 
284 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
285 	vtscsi_negotiate_features(sc);
286 
287 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
288 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
289 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
290 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
291 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
292 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
293 
294 	virtio_read_device_config(dev, 0, &scsicfg,
295 	    sizeof(struct virtio_scsi_config));
296 
297 	sc->vtscsi_max_channel = scsicfg.max_channel;
298 	sc->vtscsi_max_target = scsicfg.max_target;
299 	sc->vtscsi_max_lun = scsicfg.max_lun;
300 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
301 
302 	vtscsi_write_device_config(sc);
303 
304 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
305 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
306 	if (sc->vtscsi_sglist == NULL) {
307 		error = ENOMEM;
308 		device_printf(dev, "cannot allocate sglist\n");
309 		goto fail;
310 	}
311 
312 	error = vtscsi_alloc_virtqueues(sc);
313 	if (error) {
314 		device_printf(dev, "cannot allocate virtqueues\n");
315 		goto fail;
316 	}
317 
318 	error = vtscsi_init_event_vq(sc);
319 	if (error) {
320 		device_printf(dev, "cannot populate the eventvq\n");
321 		goto fail;
322 	}
323 
324 	error = vtscsi_alloc_requests(sc);
325 	if (error) {
326 		device_printf(dev, "cannot allocate requests\n");
327 		goto fail;
328 	}
329 
330 	error = vtscsi_alloc_cam(sc);
331 	if (error) {
332 		device_printf(dev, "cannot allocate CAM structures\n");
333 		goto fail;
334 	}
335 
336 	TASK_INIT(&sc->vtscsi_control_intr_task, 0,
337 	    vtscsi_control_vq_task, sc);
338 	TASK_INIT(&sc->vtscsi_event_intr_task, 0,
339 	    vtscsi_event_vq_task, sc);
340 	TASK_INIT(&sc->vtscsi_request_intr_task, 0,
341 	    vtscsi_request_vq_task, sc);
342 
343 	sc->vtscsi_tq = taskqueue_create_fast("vtscsi_taskq", M_NOWAIT,
344 	    taskqueue_thread_enqueue, &sc->vtscsi_tq);
345 	if (sc->vtscsi_tq == NULL) {
346 		error = ENOMEM;
347 		device_printf(dev, "cannot allocate taskqueue\n");
348 		goto fail;
349 	}
350 	error = taskqueue_start_threads(&sc->vtscsi_tq, 1, PI_DISK, "%s taskq",
351 	    device_get_nameunit(dev));
352 	if (error) {
353 		device_printf(dev, "cannot start taskqueue threads\n");
354 		goto fail;
355 	}
356 
357 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
358 	if (error) {
359 		device_printf(dev, "cannot setup virtqueue interrupts\n");
360 		goto fail;
361 	}
362 
363 	vtscsi_enable_vqs_intr(sc);
364 
365 	/*
366 	 * Register with CAM after interrupts are enabled so we will get
367 	 * notified of the probe responses.
368 	 */
369 	error = vtscsi_register_cam(sc);
370 	if (error) {
371 		device_printf(dev, "cannot register with CAM\n");
372 		goto fail;
373 	}
374 
375 fail:
376 	if (error)
377 		vtscsi_detach(dev);
378 
379 	return (error);
380 }
381 
382 static int
383 vtscsi_detach(device_t dev)
384 {
385 	struct vtscsi_softc *sc;
386 
387 	sc = device_get_softc(dev);
388 
389 	VTSCSI_LOCK(sc);
390 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
391 	if (device_is_attached(dev))
392 		vtscsi_stop(sc);
393 	VTSCSI_UNLOCK(sc);
394 
395 	if (sc->vtscsi_tq != NULL) {
396 		taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_control_intr_task);
397 		taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_event_intr_task);
398 		taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_request_intr_task);
399 		taskqueue_free(sc->vtscsi_tq);
400 		sc->vtscsi_tq = NULL;
401 	}
402 
403 	vtscsi_complete_vqs(sc);
404 	vtscsi_drain_vqs(sc);
405 
406 	vtscsi_free_cam(sc);
407 	vtscsi_free_requests(sc);
408 
409 	if (sc->vtscsi_sglist != NULL) {
410 		sglist_free(sc->vtscsi_sglist);
411 		sc->vtscsi_sglist = NULL;
412 	}
413 
414 	VTSCSI_LOCK_DESTROY(sc);
415 
416 	return (0);
417 }
418 
419 static int
420 vtscsi_suspend(device_t dev)
421 {
422 
423 	return (0);
424 }
425 
426 static int
427 vtscsi_resume(device_t dev)
428 {
429 
430 	return (0);
431 }
432 
433 static void
434 vtscsi_negotiate_features(struct vtscsi_softc *sc)
435 {
436 	device_t dev;
437 	uint64_t features;
438 
439 	dev = sc->vtscsi_dev;
440 	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
441 	sc->vtscsi_features = features;
442 }
443 
444 static int
445 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
446 {
447 	int nsegs;
448 
449 	nsegs = VTSCSI_MIN_SEGMENTS;
450 
451 	if (seg_max > 0) {
452 		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
453 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
454 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
455 	} else
456 		nsegs += 1;
457 
458 	return (nsegs);
459 }
460 
461 static int
462 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
463 {
464 	device_t dev;
465 	struct vq_alloc_info vq_info[3];
466 	int nvqs;
467 
468 	dev = sc->vtscsi_dev;
469 	nvqs = 3;
470 
471 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
472 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
473 
474 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
475 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
476 
477 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
478 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
479 	    "%s request", device_get_nameunit(dev));
480 
481 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
482 }
483 
484 static void
485 vtscsi_write_device_config(struct vtscsi_softc *sc)
486 {
487 
488 	virtio_write_dev_config_4(sc->vtscsi_dev,
489 	    offsetof(struct virtio_scsi_config, sense_size),
490 	    VIRTIO_SCSI_SENSE_SIZE);
491 
492 	/*
493 	 * This is the size in the virtio_scsi_cmd_req structure. Note
494 	 * this value (32) is larger than the maximum CAM CDB size (16).
495 	 */
496 	virtio_write_dev_config_4(sc->vtscsi_dev,
497 	    offsetof(struct virtio_scsi_config, cdb_size),
498 	    VIRTIO_SCSI_CDB_SIZE);
499 }
500 
501 static int
502 vtscsi_reinit(struct vtscsi_softc *sc)
503 {
504 	device_t dev;
505 	int error;
506 
507 	dev = sc->vtscsi_dev;
508 
509 	error = virtio_reinit(dev, sc->vtscsi_features);
510 	if (error == 0) {
511 		vtscsi_write_device_config(sc);
512 		vtscsi_reinit_event_vq(sc);
513 		virtio_reinit_complete(dev);
514 
515 		vtscsi_enable_vqs_intr(sc);
516 	}
517 
518 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
519 
520 	return (error);
521 }
522 
523 static int
524 vtscsi_alloc_cam(struct vtscsi_softc *sc)
525 {
526 	device_t dev;
527 	struct cam_devq *devq;
528 	int openings;
529 
530 	dev = sc->vtscsi_dev;
531 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
532 
533 	devq = cam_simq_alloc(openings);
534 	if (devq == NULL) {
535 		device_printf(dev, "cannot allocate SIM queue\n");
536 		return (ENOMEM);
537 	}
538 
539 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
540 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
541 	    openings, devq);
542 	if (sc->vtscsi_sim == NULL) {
543 		cam_simq_free(devq);
544 		device_printf(dev, "cannot allocate SIM\n");
545 		return (ENOMEM);
546 	}
547 
548 	return (0);
549 }
550 
551 static int
552 vtscsi_register_cam(struct vtscsi_softc *sc)
553 {
554 	device_t dev;
555 	int registered, error;
556 
557 	dev = sc->vtscsi_dev;
558 	registered = 0;
559 
560 	VTSCSI_LOCK(sc);
561 
562 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
563 		error = ENOMEM;
564 		device_printf(dev, "cannot register XPT bus\n");
565 		goto fail;
566 	}
567 
568 	registered = 1;
569 
570 	if (xpt_create_path(&sc->vtscsi_path, NULL,
571 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
572 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
573 		error = ENOMEM;
574 		device_printf(dev, "cannot create bus path\n");
575 		goto fail;
576 	}
577 
578 	VTSCSI_UNLOCK(sc);
579 
580 	/*
581 	 * The async register apparently needs to be done without
582 	 * the lock held, otherwise it can recurse on the lock.
583 	 */
584 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
585 		error = EIO;
586 		device_printf(dev, "cannot register async callback\n");
587 		VTSCSI_LOCK(sc);
588 		goto fail;
589 	}
590 
591 	return (0);
592 
593 fail:
594 	if (sc->vtscsi_path != NULL) {
595 		xpt_free_path(sc->vtscsi_path);
596 		sc->vtscsi_path = NULL;
597 	}
598 
599 	if (registered != 0)
600 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
601 
602 	VTSCSI_UNLOCK(sc);
603 
604 	return (error);
605 }
606 
607 static void
608 vtscsi_free_cam(struct vtscsi_softc *sc)
609 {
610 
611 	VTSCSI_LOCK(sc);
612 
613 	if (sc->vtscsi_path != NULL) {
614 		vtscsi_deregister_async(sc);
615 
616 		xpt_free_path(sc->vtscsi_path);
617 		sc->vtscsi_path = NULL;
618 
619 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
620 	}
621 
622 	if (sc->vtscsi_sim != NULL) {
623 		cam_sim_free(sc->vtscsi_sim, 1);
624 		sc->vtscsi_sim = NULL;
625 	}
626 
627 	VTSCSI_UNLOCK(sc);
628 }
629 
630 static void
631 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
632 {
633 	struct cam_sim *sim;
634 	struct vtscsi_softc *sc;
635 
636 	sim = cb_arg;
637 	sc = cam_sim_softc(sim);
638 
639 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
640 
641 	/*
642 	 * TODO Once QEMU supports event reporting, we should
643 	 *      (un)subscribe to events here.
644 	 */
645 	switch (code) {
646 	case AC_FOUND_DEVICE:
647 		break;
648 	case AC_LOST_DEVICE:
649 		break;
650 	}
651 }
652 
653 static int
654 vtscsi_register_async(struct vtscsi_softc *sc)
655 {
656 	struct ccb_setasync csa;
657 
658 	VTSCSI_LOCK_NOTOWNED(sc);
659 
660 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
661 	csa.ccb_h.func_code = XPT_SASYNC_CB;
662 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
663 	csa.callback = vtscsi_cam_async;
664 	csa.callback_arg = sc->vtscsi_sim;
665 
666 	xpt_action((union ccb *) &csa);
667 
668 	return (csa.ccb_h.status);
669 }
670 
671 static void
672 vtscsi_deregister_async(struct vtscsi_softc *sc)
673 {
674 	struct ccb_setasync csa;
675 
676 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
677 	csa.ccb_h.func_code = XPT_SASYNC_CB;
678 	csa.event_enable = 0;
679 	csa.callback = vtscsi_cam_async;
680 	csa.callback_arg = sc->vtscsi_sim;
681 
682 	xpt_action((union ccb *) &csa);
683 }
684 
685 static void
686 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
687 {
688 	struct vtscsi_softc *sc;
689 	struct ccb_hdr *ccbh;
690 
691 	sc = cam_sim_softc(sim);
692 	ccbh = &ccb->ccb_h;
693 
694 	VTSCSI_LOCK_OWNED(sc);
695 
696 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
697 		/*
698 		 * The VTSCSI_MTX is briefly dropped between setting
699 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
700 		 * drop any CCBs that come in during that window.
701 		 */
702 		ccbh->status = CAM_NO_HBA;
703 		xpt_done(ccb);
704 		return;
705 	}
706 
707 	switch (ccbh->func_code) {
708 	case XPT_SCSI_IO:
709 		vtscsi_cam_scsi_io(sc, sim, ccb);
710 		break;
711 
712 	case XPT_SET_TRAN_SETTINGS:
713 		ccbh->status = CAM_FUNC_NOTAVAIL;
714 		xpt_done(ccb);
715 		break;
716 
717 	case XPT_GET_TRAN_SETTINGS:
718 		vtscsi_cam_get_tran_settings(sc, ccb);
719 		break;
720 
721 	case XPT_RESET_BUS:
722 		vtscsi_cam_reset_bus(sc, ccb);
723 		break;
724 
725 	case XPT_RESET_DEV:
726 		vtscsi_cam_reset_dev(sc, ccb);
727 		break;
728 
729 	case XPT_ABORT:
730 		vtscsi_cam_abort(sc, ccb);
731 		break;
732 
733 	case XPT_CALC_GEOMETRY:
734 		cam_calc_geometry(&ccb->ccg, 1);
735 		xpt_done(ccb);
736 		break;
737 
738 	case XPT_PATH_INQ:
739 		vtscsi_cam_path_inquiry(sc, sim, ccb);
740 		break;
741 
742 	default:
743 		vtscsi_dprintf(sc, VTSCSI_ERROR,
744 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
745 
746 		ccbh->status = CAM_REQ_INVALID;
747 		xpt_done(ccb);
748 		break;
749 	}
750 }
751 
752 static void
753 vtscsi_cam_poll(struct cam_sim *sim)
754 {
755 	struct vtscsi_softc *sc;
756 
757 	sc = cam_sim_softc(sim);
758 
759 	vtscsi_complete_vqs_locked(sc);
760 }
761 
762 static void
763 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
764     union ccb *ccb)
765 {
766 	struct ccb_hdr *ccbh;
767 	struct ccb_scsiio *csio;
768 	int error;
769 
770 	ccbh = &ccb->ccb_h;
771 	csio = &ccb->csio;
772 
773 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
774 		error = EINVAL;
775 		ccbh->status = CAM_REQ_INVALID;
776 		goto done;
777 	}
778 
779 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
780 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
781 		error = EINVAL;
782 		ccbh->status = CAM_REQ_INVALID;
783 		goto done;
784 	}
785 
786 	error = vtscsi_start_scsi_cmd(sc, ccb);
787 
788 done:
789 	if (error) {
790 		vtscsi_dprintf(sc, VTSCSI_ERROR,
791 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
792 		xpt_done(ccb);
793 	}
794 }
795 
796 static void
797 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
798 {
799 	struct ccb_trans_settings *cts;
800 	struct ccb_trans_settings_scsi *scsi;
801 
802 	cts = &ccb->cts;
803 	scsi = &cts->proto_specific.scsi;
804 
805 	cts->protocol = PROTO_SCSI;
806 	cts->protocol_version = SCSI_REV_SPC3;
807 	cts->transport = XPORT_SAS;
808 	cts->transport_version = 0;
809 
810 	scsi->valid = CTS_SCSI_VALID_TQ;
811 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
812 
813 	ccb->ccb_h.status = CAM_REQ_CMP;
814 	xpt_done(ccb);
815 }
816 
817 static void
818 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
819 {
820 	int error;
821 
822 	error = vtscsi_reset_bus(sc);
823 	if (error == 0)
824 		ccb->ccb_h.status = CAM_REQ_CMP;
825 	else
826 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
827 
828 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
829 	    error, ccb, ccb->ccb_h.status);
830 
831 	xpt_done(ccb);
832 }
833 
834 static void
835 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
836 {
837 	struct ccb_hdr *ccbh;
838 	struct vtscsi_request *req;
839 	int error;
840 
841 	ccbh = &ccb->ccb_h;
842 
843 	req = vtscsi_dequeue_request(sc);
844 	if (req == NULL) {
845 		error = EAGAIN;
846 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
847 		goto fail;
848 	}
849 
850 	req->vsr_ccb = ccb;
851 
852 	error = vtscsi_execute_reset_dev_cmd(sc, req);
853 	if (error == 0)
854 		return;
855 
856 	vtscsi_enqueue_request(sc, req);
857 
858 fail:
859 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
860 	    error, req, ccb);
861 
862 	if (error == EAGAIN)
863 		ccbh->status = CAM_RESRC_UNAVAIL;
864 	else
865 		ccbh->status = CAM_REQ_CMP_ERR;
866 
867 	xpt_done(ccb);
868 }
869 
870 static void
871 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
872 {
873 	struct vtscsi_request *req;
874 	struct ccb_hdr *ccbh;
875 	int error;
876 
877 	ccbh = &ccb->ccb_h;
878 
879 	req = vtscsi_dequeue_request(sc);
880 	if (req == NULL) {
881 		error = EAGAIN;
882 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
883 		goto fail;
884 	}
885 
886 	req->vsr_ccb = ccb;
887 
888 	error = vtscsi_execute_abort_task_cmd(sc, req);
889 	if (error == 0)
890 		return;
891 
892 	vtscsi_enqueue_request(sc, req);
893 
894 fail:
895 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
896 	    error, req, ccb);
897 
898 	if (error == EAGAIN)
899 		ccbh->status = CAM_RESRC_UNAVAIL;
900 	else
901 		ccbh->status = CAM_REQ_CMP_ERR;
902 
903 	xpt_done(ccb);
904 }
905 
906 static void
907 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
908     union ccb *ccb)
909 {
910 	device_t dev;
911 	struct ccb_pathinq *cpi;
912 
913 	dev = sc->vtscsi_dev;
914 	cpi = &ccb->cpi;
915 
916 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
917 
918 	cpi->version_num = 1;
919 	cpi->hba_inquiry = PI_TAG_ABLE;
920 	cpi->target_sprt = 0;
921 	cpi->hba_misc = PIM_SEQSCAN;
922 	if (vtscsi_bus_reset_disable != 0)
923 		cpi->hba_misc |= PIM_NOBUSRESET;
924 	cpi->hba_eng_cnt = 0;
925 
926 	cpi->max_target = sc->vtscsi_max_target;
927 	cpi->max_lun = sc->vtscsi_max_lun;
928 	cpi->initiator_id = VTSCSI_INITIATOR_ID;
929 
930 	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
931 	strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
932 	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
933 
934 	cpi->unit_number = cam_sim_unit(sim);
935 	cpi->bus_id = cam_sim_bus(sim);
936 
937 	cpi->base_transfer_speed = 300000;
938 
939 	cpi->protocol = PROTO_SCSI;
940 	cpi->protocol_version = SCSI_REV_SPC3;
941 	cpi->transport = XPORT_SAS;
942 	cpi->transport_version = 0;
943 
944 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
945 	    PAGE_SIZE;
946 
947 	cpi->hba_vendor = virtio_get_vendor(dev);
948 	cpi->hba_device = virtio_get_device(dev);
949 	cpi->hba_subvendor = virtio_get_subvendor(dev);
950 	cpi->hba_subdevice = virtio_get_subdevice(dev);
951 
952 	ccb->ccb_h.status = CAM_REQ_CMP;
953 	xpt_done(ccb);
954 }
955 
956 static int
957 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
958     struct ccb_scsiio *csio)
959 {
960 	struct ccb_hdr *ccbh;
961 	struct bus_dma_segment *dseg;
962 	int i, error;
963 
964 	ccbh = &csio->ccb_h;
965 	error = 0;
966 
967 	if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
968 
969 		if ((ccbh->flags & CAM_DATA_PHYS) == 0)
970 			error = sglist_append(sg,
971 			    csio->data_ptr, csio->dxfer_len);
972 		else
973 			error = sglist_append_phys(sg,
974 			    (vm_paddr_t)(vm_offset_t) csio->data_ptr,
975 			    csio->dxfer_len);
976 	} else {
977 
978 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
979 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
980 
981 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0)
982 				error = sglist_append(sg,
983 				    (void *)(vm_offset_t) dseg->ds_addr,
984 				    dseg->ds_len);
985 			else
986 				error = sglist_append_phys(sg,
987 				    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
988 		}
989 	}
990 
991 	return (error);
992 }
993 
994 static int
995 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
996     int *readable, int *writable)
997 {
998 	struct sglist *sg;
999 	struct ccb_hdr *ccbh;
1000 	struct ccb_scsiio *csio;
1001 	struct virtio_scsi_cmd_req *cmd_req;
1002 	struct virtio_scsi_cmd_resp *cmd_resp;
1003 	int error;
1004 
1005 	sg = sc->vtscsi_sglist;
1006 	csio = &req->vsr_ccb->csio;
1007 	ccbh = &csio->ccb_h;
1008 	cmd_req = &req->vsr_cmd_req;
1009 	cmd_resp = &req->vsr_cmd_resp;
1010 
1011 	sglist_reset(sg);
1012 
1013 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1014 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1015 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1016 		/* At least one segment must be left for the response. */
1017 		if (error || sg->sg_nseg == sg->sg_maxseg)
1018 			goto fail;
1019 	}
1020 
1021 	*readable = sg->sg_nseg;
1022 
1023 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1024 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1025 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1026 		if (error)
1027 			goto fail;
1028 	}
1029 
1030 	*writable = sg->sg_nseg - *readable;
1031 
1032 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1033 	    "writable=%d\n", req, ccbh, *readable, *writable);
1034 
1035 	return (0);
1036 
1037 fail:
1038 	/*
1039 	 * This should never happen unless maxio was incorrectly set.
1040 	 */
1041 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1042 
1043 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1044 	    "nseg=%d maxseg=%d\n",
1045 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1046 
1047 	return (EFBIG);
1048 }
1049 
1050 static int
1051 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1052 {
1053 	struct sglist *sg;
1054 	struct virtqueue *vq;
1055 	struct ccb_scsiio *csio;
1056 	struct ccb_hdr *ccbh;
1057 	struct virtio_scsi_cmd_req *cmd_req;
1058 	struct virtio_scsi_cmd_resp *cmd_resp;
1059 	int readable, writable, error;
1060 
1061 	sg = sc->vtscsi_sglist;
1062 	vq = sc->vtscsi_request_vq;
1063 	csio = &req->vsr_ccb->csio;
1064 	ccbh = &csio->ccb_h;
1065 	cmd_req = &req->vsr_cmd_req;
1066 	cmd_resp = &req->vsr_cmd_resp;
1067 
1068 	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1069 
1070 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1071 	if (error)
1072 		return (error);
1073 
1074 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1075 	cmd_resp->response = -1;
1076 
1077 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1078 	if (error) {
1079 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1080 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1081 
1082 		ccbh->status = CAM_REQUEUE_REQ;
1083 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1084 		return (error);
1085 	}
1086 
1087 	ccbh->status |= CAM_SIM_QUEUED;
1088 	ccbh->ccbh_vtscsi_req = req;
1089 
1090 	virtqueue_notify(vq);
1091 
1092 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1093 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1094 		callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1095 		    vtscsi_timedout_scsi_cmd, req);
1096 	}
1097 
1098 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1099 	    req, ccbh);
1100 
1101 	return (0);
1102 }
1103 
1104 static int
1105 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1106 {
1107 	struct vtscsi_request *req;
1108 	int error;
1109 
1110 	req = vtscsi_dequeue_request(sc);
1111 	if (req == NULL) {
1112 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1113 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1114 		return (ENOBUFS);
1115 	}
1116 
1117 	req->vsr_ccb = ccb;
1118 
1119 	error = vtscsi_execute_scsi_cmd(sc, req);
1120 	if (error)
1121 		vtscsi_enqueue_request(sc, req);
1122 
1123 	return (error);
1124 }
1125 
1126 static void
1127 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1128     struct vtscsi_request *req)
1129 {
1130 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1131 	struct vtscsi_request *to_req;
1132 	uint8_t response;
1133 
1134 	tmf_resp = &req->vsr_tmf_resp;
1135 	response = tmf_resp->response;
1136 	to_req = req->vsr_timedout_req;
1137 
1138 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1139 	    req, to_req, response);
1140 
1141 	vtscsi_enqueue_request(sc, req);
1142 
1143 	/*
1144 	 * The timedout request could have completed between when the
1145 	 * abort task was sent and when the host processed it.
1146 	 */
1147 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1148 		return;
1149 
1150 	/* The timedout request was successfully aborted. */
1151 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1152 		return;
1153 
1154 	/* Don't bother if the device is going away. */
1155 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1156 		return;
1157 
1158 	/* The timedout request will be aborted by the reset. */
1159 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1160 		return;
1161 
1162 	vtscsi_reset_bus(sc);
1163 }
1164 
1165 static int
1166 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1167     struct vtscsi_request *to_req)
1168 {
1169 	struct sglist *sg;
1170 	struct ccb_hdr *to_ccbh;
1171 	struct vtscsi_request *req;
1172 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1173 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1174 	int error;
1175 
1176 	sg = sc->vtscsi_sglist;
1177 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1178 
1179 	req = vtscsi_dequeue_request(sc);
1180 	if (req == NULL) {
1181 		error = ENOBUFS;
1182 		goto fail;
1183 	}
1184 
1185 	tmf_req = &req->vsr_tmf_req;
1186 	tmf_resp = &req->vsr_tmf_resp;
1187 
1188 	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1189 	    (uintptr_t) to_ccbh, tmf_req);
1190 
1191 	sglist_reset(sg);
1192 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1193 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1194 
1195 	req->vsr_timedout_req = to_req;
1196 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1197 	tmf_resp->response = -1;
1198 
1199 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1200 	    VTSCSI_EXECUTE_ASYNC);
1201 	if (error == 0)
1202 		return (0);
1203 
1204 	vtscsi_enqueue_request(sc, req);
1205 
1206 fail:
1207 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1208 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1209 
1210 	return (error);
1211 }
1212 
1213 static void
1214 vtscsi_timedout_scsi_cmd(void *xreq)
1215 {
1216 	struct vtscsi_softc *sc;
1217 	struct vtscsi_request *to_req;
1218 
1219 	to_req = xreq;
1220 	sc = to_req->vsr_softc;
1221 
1222 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1223 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1224 
1225 	/* Don't bother if the device is going away. */
1226 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1227 		return;
1228 
1229 	/*
1230 	 * Bail if the request is not in use. We likely raced when
1231 	 * stopping the callout handler or it has already been aborted.
1232 	 */
1233 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1234 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1235 		return;
1236 
1237 	/*
1238 	 * Complete the request queue in case the timedout request is
1239 	 * actually just pending.
1240 	 */
1241 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1242 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1243 		return;
1244 
1245 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1246 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1247 
1248 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1249 		return;
1250 
1251 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1252 	vtscsi_reset_bus(sc);
1253 }
1254 
1255 static cam_status
1256 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1257 {
1258 	cam_status status;
1259 
1260 	switch (cmd_resp->response) {
1261 	case VIRTIO_SCSI_S_OK:
1262 		status = CAM_REQ_CMP;
1263 		break;
1264 	case VIRTIO_SCSI_S_OVERRUN:
1265 		status = CAM_DATA_RUN_ERR;
1266 		break;
1267 	case VIRTIO_SCSI_S_ABORTED:
1268 		status = CAM_REQ_ABORTED;
1269 		break;
1270 	case VIRTIO_SCSI_S_BAD_TARGET:
1271 		status = CAM_TID_INVALID;
1272 		break;
1273 	case VIRTIO_SCSI_S_RESET:
1274 		status = CAM_SCSI_BUS_RESET;
1275 		break;
1276 	case VIRTIO_SCSI_S_BUSY:
1277 		status = CAM_SCSI_BUSY;
1278 		break;
1279 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1280 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1281 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1282 		status = CAM_SCSI_IT_NEXUS_LOST;
1283 		break;
1284 	default: /* VIRTIO_SCSI_S_FAILURE */
1285 		status = CAM_REQ_CMP_ERR;
1286 		break;
1287 	}
1288 
1289 	return (status);
1290 }
1291 
1292 static cam_status
1293 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1294     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1295 {
1296 	cam_status status;
1297 
1298 	csio->scsi_status = cmd_resp->status;
1299 	csio->resid = cmd_resp->resid;
1300 
1301 	if (csio->scsi_status == SCSI_STATUS_OK)
1302 		status = CAM_REQ_CMP;
1303 	else
1304 		status = CAM_SCSI_STATUS_ERROR;
1305 
1306 	if (cmd_resp->sense_len > 0) {
1307 		status |= CAM_AUTOSNS_VALID;
1308 
1309 		if (cmd_resp->sense_len < csio->sense_len)
1310 			csio->sense_resid = csio->sense_len -
1311 			    cmd_resp->sense_len;
1312 		else
1313 			csio->sense_resid = 0;
1314 
1315 		bzero(&csio->sense_data, sizeof(csio->sense_data));
1316 		memcpy(cmd_resp->sense, &csio->sense_data,
1317 		    csio->sense_len - csio->sense_resid);
1318 	}
1319 
1320 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1321 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1322 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1323 
1324 	return (status);
1325 }
1326 
1327 static void
1328 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1329 {
1330 	struct ccb_hdr *ccbh;
1331 	struct ccb_scsiio *csio;
1332 	struct virtio_scsi_cmd_resp *cmd_resp;
1333 	cam_status status;
1334 
1335 	csio = &req->vsr_ccb->csio;
1336 	ccbh = &csio->ccb_h;
1337 	cmd_resp = &req->vsr_cmd_resp;
1338 
1339 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1340 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1341 
1342 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1343 		callout_stop(&req->vsr_callout);
1344 
1345 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1346 	if (status == CAM_REQ_ABORTED) {
1347 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1348 			status = CAM_CMD_TIMEOUT;
1349 	} else if (status == CAM_REQ_CMP)
1350 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1351 
1352 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1353 		status |= CAM_DEV_QFRZN;
1354 		xpt_freeze_devq(ccbh->path, 1);
1355 	}
1356 
1357 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1358 		status |= CAM_RELEASE_SIMQ;
1359 
1360 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1361 	    req, ccbh, status);
1362 
1363 	ccbh->status = status;
1364 	xpt_done(req->vsr_ccb);
1365 	vtscsi_enqueue_request(sc, req);
1366 }
1367 
1368 static void
1369 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1370 {
1371 
1372 	/* XXX We probably shouldn't poll forever. */
1373 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1374 	do
1375 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1376 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1377 
1378 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1379 }
1380 
1381 static int
1382 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1383     struct sglist *sg, int readable, int writable, int flag)
1384 {
1385 	struct virtqueue *vq;
1386 	int error;
1387 
1388 	vq = sc->vtscsi_control_vq;
1389 
1390 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1391 
1392 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1393 	if (error) {
1394 		/*
1395 		 * Return EAGAIN when the virtqueue does not have enough
1396 		 * descriptors available.
1397 		 */
1398 		if (error == ENOSPC || error == EMSGSIZE)
1399 			error = EAGAIN;
1400 
1401 		return (error);
1402 	}
1403 
1404 	virtqueue_notify(vq);
1405 	if (flag == VTSCSI_EXECUTE_POLL)
1406 		vtscsi_poll_ctrl_req(sc, req);
1407 
1408 	return (0);
1409 }
1410 
1411 static void
1412 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1413     struct vtscsi_request *req)
1414 {
1415 	union ccb *ccb;
1416 	struct ccb_hdr *ccbh;
1417 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1418 
1419 	ccb = req->vsr_ccb;
1420 	ccbh = &ccb->ccb_h;
1421 	tmf_resp = &req->vsr_tmf_resp;
1422 
1423 	switch (tmf_resp->response) {
1424 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1425 		ccbh->status = CAM_REQ_CMP;
1426 		break;
1427 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1428 		ccbh->status = CAM_UA_ABORT;
1429 		break;
1430 	default:
1431 		ccbh->status = CAM_REQ_CMP_ERR;
1432 		break;
1433 	}
1434 
1435 	xpt_done(ccb);
1436 	vtscsi_enqueue_request(sc, req);
1437 }
1438 
1439 static int
1440 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1441     struct vtscsi_request *req)
1442 {
1443 	struct sglist *sg;
1444 	struct ccb_abort *cab;
1445 	struct ccb_hdr *ccbh;
1446 	struct ccb_hdr *abort_ccbh;
1447 	struct vtscsi_request *abort_req;
1448 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1449 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1450 	int error;
1451 
1452 	sg = sc->vtscsi_sglist;
1453 	cab = &req->vsr_ccb->cab;
1454 	ccbh = &cab->ccb_h;
1455 	tmf_req = &req->vsr_tmf_req;
1456 	tmf_resp = &req->vsr_tmf_resp;
1457 
1458 	/* CCB header and request that's to be aborted. */
1459 	abort_ccbh = &cab->abort_ccb->ccb_h;
1460 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1461 
1462 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1463 		error = EINVAL;
1464 		goto fail;
1465 	}
1466 
1467 	/* Only attempt to abort requests that could be in-flight. */
1468 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1469 		error = EALREADY;
1470 		goto fail;
1471 	}
1472 
1473 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1474 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1475 		callout_stop(&abort_req->vsr_callout);
1476 
1477 	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1478 	    (uintptr_t) abort_ccbh, tmf_req);
1479 
1480 	sglist_reset(sg);
1481 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1482 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1483 
1484 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1485 	tmf_resp->response = -1;
1486 
1487 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1488 	    VTSCSI_EXECUTE_ASYNC);
1489 
1490 fail:
1491 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1492 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1493 
1494 	return (error);
1495 }
1496 
1497 static void
1498 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1499     struct vtscsi_request *req)
1500 {
1501 	union ccb *ccb;
1502 	struct ccb_hdr *ccbh;
1503 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1504 
1505 	ccb = req->vsr_ccb;
1506 	ccbh = &ccb->ccb_h;
1507 	tmf_resp = &req->vsr_tmf_resp;
1508 
1509 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1510 	    req, ccb, tmf_resp->response);
1511 
1512 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1513 		ccbh->status = CAM_REQ_CMP;
1514 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1515 		    ccbh->target_lun);
1516 	} else
1517 		ccbh->status = CAM_REQ_CMP_ERR;
1518 
1519 	xpt_done(ccb);
1520 	vtscsi_enqueue_request(sc, req);
1521 }
1522 
1523 static int
1524 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1525     struct vtscsi_request *req)
1526 {
1527 	struct sglist *sg;
1528 	struct ccb_resetdev *crd;
1529 	struct ccb_hdr *ccbh;
1530 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1531 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1532 	uint32_t subtype;
1533 	int error;
1534 
1535 	sg = sc->vtscsi_sglist;
1536 	crd = &req->vsr_ccb->crd;
1537 	ccbh = &crd->ccb_h;
1538 	tmf_req = &req->vsr_tmf_req;
1539 	tmf_resp = &req->vsr_tmf_resp;
1540 
1541 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1542 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1543 	else
1544 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1545 
1546 	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1547 
1548 	sglist_reset(sg);
1549 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1550 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1551 
1552 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1553 	tmf_resp->response = -1;
1554 
1555 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1556 	    VTSCSI_EXECUTE_ASYNC);
1557 
1558 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1559 	    error, req, ccbh);
1560 
1561 	return (error);
1562 }
1563 
1564 static void
1565 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1566 {
1567 
1568 	*target_id = lun[1];
1569 	*lun_id = (lun[2] << 8) | lun[3];
1570 }
1571 
1572 static void
1573 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1574 {
1575 
1576 	lun[0] = 1;
1577 	lun[1] = ccbh->target_id;
1578 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1579 	lun[3] = (ccbh->target_lun >> 8) & 0xFF;
1580 }
1581 
1582 static void
1583 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1584     struct virtio_scsi_cmd_req *cmd_req)
1585 {
1586 	uint8_t attr;
1587 
1588 	switch (csio->tag_action) {
1589 	case MSG_HEAD_OF_Q_TAG:
1590 		attr = VIRTIO_SCSI_S_HEAD;
1591 		break;
1592 	case MSG_ORDERED_Q_TAG:
1593 		attr = VIRTIO_SCSI_S_ORDERED;
1594 		break;
1595 	case MSG_ACA_TASK:
1596 		attr = VIRTIO_SCSI_S_ACA;
1597 		break;
1598 	default: /* MSG_SIMPLE_Q_TAG */
1599 		attr = VIRTIO_SCSI_S_SIMPLE;
1600 		break;
1601 	}
1602 
1603 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1604 	cmd_req->tag = (uintptr_t) csio;
1605 	cmd_req->task_attr = attr;
1606 
1607 	memcpy(cmd_req->cdb,
1608 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1609 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1610 	    csio->cdb_len);
1611 }
1612 
1613 static void
1614 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1615     uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1616 {
1617 
1618 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1619 
1620 	tmf_req->type = VIRTIO_SCSI_T_TMF;
1621 	tmf_req->subtype = subtype;
1622 	tmf_req->tag = tag;
1623 }
1624 
1625 static void
1626 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1627 {
1628 	int frozen;
1629 
1630 	frozen = sc->vtscsi_frozen;
1631 
1632 	if (reason & VTSCSI_REQUEST &&
1633 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1634 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1635 
1636 	if (reason & VTSCSI_REQUEST_VQ &&
1637 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1638 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1639 
1640 	/* Freeze the SIMQ if transitioned to frozen. */
1641 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1642 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1643 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1644 	}
1645 }
1646 
1647 static int
1648 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1649 {
1650 	int thawed;
1651 
1652 	if (sc->vtscsi_frozen == 0 || reason == 0)
1653 		return (0);
1654 
1655 	if (reason & VTSCSI_REQUEST &&
1656 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1657 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1658 
1659 	if (reason & VTSCSI_REQUEST_VQ &&
1660 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1661 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1662 
1663 	thawed = sc->vtscsi_frozen == 0;
1664 	if (thawed != 0)
1665 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1666 
1667 	return (thawed);
1668 }
1669 
1670 static void
1671 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1672     target_id_t target_id, lun_id_t lun_id)
1673 {
1674 	struct cam_path *path;
1675 
1676 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1677 		return;
1678 
1679 	/* Use the wildcard path from our softc for bus announcements. */
1680 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1681 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1682 		return;
1683 	}
1684 
1685 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1686 	    target_id, lun_id) != CAM_REQ_CMP) {
1687 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1688 		return;
1689 	}
1690 
1691 	xpt_async(ac_code, path, NULL);
1692 	xpt_free_path(path);
1693 }
1694 
1695 static void
1696 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1697     lun_id_t lun_id)
1698 {
1699 	union ccb *ccb;
1700 	cam_status status;
1701 
1702 	ccb = xpt_alloc_ccb_nowait();
1703 	if (ccb == NULL) {
1704 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1705 		return;
1706 	}
1707 
1708 	status = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1709 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1710 	if (status != CAM_REQ_CMP) {
1711 		xpt_free_ccb(ccb);
1712 		return;
1713 	}
1714 
1715 	xpt_rescan(ccb);
1716 }
1717 
1718 static void
1719 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1720 {
1721 
1722 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1723 }
1724 
1725 static void
1726 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1727     struct virtio_scsi_event *event)
1728 {
1729 	target_id_t target_id;
1730 	lun_id_t lun_id;
1731 
1732 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1733 
1734 	switch (event->reason) {
1735 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1736 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1737 		vtscsi_execute_rescan(sc, target_id, lun_id);
1738 		break;
1739 	default:
1740 		device_printf(sc->vtscsi_dev,
1741 		    "unhandled transport event reason: %d\n", event->reason);
1742 		break;
1743 	}
1744 }
1745 
1746 static void
1747 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1748 {
1749 	int error;
1750 
1751 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1752 		switch (event->event) {
1753 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1754 			vtscsi_transport_reset_event(sc, event);
1755 			break;
1756 		default:
1757 			device_printf(sc->vtscsi_dev,
1758 			    "unhandled event: %d\n", event->event);
1759 			break;
1760 		}
1761 	} else
1762 		vtscsi_execute_rescan_bus(sc);
1763 
1764 	/*
1765 	 * This should always be successful since the buffer
1766 	 * was just dequeued.
1767 	 */
1768 	error = vtscsi_enqueue_event_buf(sc, event);
1769 	KASSERT(error == 0,
1770 	    ("cannot requeue event buffer: %d", error));
1771 }
1772 
1773 static int
1774 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1775     struct virtio_scsi_event *event)
1776 {
1777 	struct sglist *sg;
1778 	struct virtqueue *vq;
1779 	int size, error;
1780 
1781 	sg = sc->vtscsi_sglist;
1782 	vq = sc->vtscsi_event_vq;
1783 	size = sc->vtscsi_event_buf_size;
1784 
1785 	bzero(event, size);
1786 
1787 	sglist_reset(sg);
1788 	error = sglist_append(sg, event, size);
1789 	if (error)
1790 		return (error);
1791 
1792 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1793 	if (error)
1794 		return (error);
1795 
1796 	virtqueue_notify(vq);
1797 
1798 	return (0);
1799 }
1800 
1801 static int
1802 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1803 {
1804 	struct virtio_scsi_event *event;
1805 	int i, size, error;
1806 
1807 	/*
1808 	 * The first release of QEMU with VirtIO SCSI support would crash
1809 	 * when attempting to notify the event virtqueue. This was fixed
1810 	 * when hotplug support was added.
1811 	 */
1812 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1813 		size = sc->vtscsi_event_buf_size;
1814 	else
1815 		size = 0;
1816 
1817 	if (size < sizeof(struct virtio_scsi_event))
1818 		return (0);
1819 
1820 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1821 		event = &sc->vtscsi_event_bufs[i];
1822 
1823 		error = vtscsi_enqueue_event_buf(sc, event);
1824 		if (error)
1825 			break;
1826 	}
1827 
1828 	/*
1829 	 * Even just one buffer is enough. Missed events are
1830 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1831 	 */
1832 	if (i > 0)
1833 		error = 0;
1834 
1835 	return (error);
1836 }
1837 
1838 static void
1839 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1840 {
1841 	struct virtio_scsi_event *event;
1842 	int i, error;
1843 
1844 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1845 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1846 		return;
1847 
1848 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1849 		event = &sc->vtscsi_event_bufs[i];
1850 
1851 		error = vtscsi_enqueue_event_buf(sc, event);
1852 		if (error)
1853 			break;
1854 	}
1855 
1856 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1857 }
1858 
1859 static void
1860 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1861 {
1862 	struct virtqueue *vq;
1863 	int last;
1864 
1865 	vq = sc->vtscsi_event_vq;
1866 	last = 0;
1867 
1868 	while (virtqueue_drain(vq, &last) != NULL)
1869 		;
1870 
1871 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1872 }
1873 
1874 static void
1875 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1876 {
1877 
1878 	VTSCSI_LOCK_OWNED(sc);
1879 
1880 	if (sc->vtscsi_request_vq != NULL)
1881 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1882 	if (sc->vtscsi_control_vq != NULL)
1883 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1884 }
1885 
1886 static void
1887 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1888 {
1889 
1890 	VTSCSI_LOCK(sc);
1891 	vtscsi_complete_vqs_locked(sc);
1892 	VTSCSI_UNLOCK(sc);
1893 }
1894 
1895 static void
1896 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1897 {
1898 	union ccb *ccb;
1899 	int detach;
1900 
1901 	ccb = req->vsr_ccb;
1902 
1903 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1904 
1905 	/*
1906 	 * The callout must be drained when detaching since the request is
1907 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1908 	 * case the callout is pending because there is a deadlock potential.
1909 	 * Otherwise, the virtqueue is being drained because of a bus reset
1910 	 * so we only need to attempt to stop the callouts.
1911 	 */
1912 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1913 	if (detach != 0)
1914 		VTSCSI_LOCK_NOTOWNED(sc);
1915 	else
1916 		VTSCSI_LOCK_OWNED(sc);
1917 
1918 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1919 		if (detach != 0)
1920 			callout_drain(&req->vsr_callout);
1921 		else
1922 			callout_stop(&req->vsr_callout);
1923 	}
1924 
1925 	if (ccb != NULL) {
1926 		if (detach != 0) {
1927 			VTSCSI_LOCK(sc);
1928 			ccb->ccb_h.status = CAM_NO_HBA;
1929 		} else
1930 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1931 		xpt_done(ccb);
1932 		if (detach != 0)
1933 			VTSCSI_UNLOCK(sc);
1934 	}
1935 
1936 	vtscsi_enqueue_request(sc, req);
1937 }
1938 
1939 static void
1940 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1941 {
1942 	struct vtscsi_request *req;
1943 	int last;
1944 
1945 	last = 0;
1946 
1947 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1948 
1949 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1950 		vtscsi_cancel_request(sc, req);
1951 
1952 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1953 }
1954 
1955 static void
1956 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1957 {
1958 
1959 	if (sc->vtscsi_control_vq != NULL)
1960 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1961 	if (sc->vtscsi_request_vq != NULL)
1962 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1963 	if (sc->vtscsi_event_vq != NULL)
1964 		vtscsi_drain_event_vq(sc);
1965 }
1966 
1967 static void
1968 vtscsi_stop(struct vtscsi_softc *sc)
1969 {
1970 
1971 	vtscsi_disable_vqs_intr(sc);
1972 	virtio_stop(sc->vtscsi_dev);
1973 }
1974 
1975 static int
1976 vtscsi_reset_bus(struct vtscsi_softc *sc)
1977 {
1978 	int error;
1979 
1980 	VTSCSI_LOCK_OWNED(sc);
1981 
1982 	if (vtscsi_bus_reset_disable != 0) {
1983 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
1984 		return (0);
1985 	}
1986 
1987 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
1988 
1989 	/*
1990 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
1991 	 * Those requests are then completed here so CAM will retry them
1992 	 * after the reset is complete.
1993 	 */
1994 	vtscsi_stop(sc);
1995 	vtscsi_complete_vqs_locked(sc);
1996 
1997 	/* Rid the virtqueues of any remaining requests. */
1998 	vtscsi_drain_vqs(sc);
1999 
2000 	/*
2001 	 * Any resource shortage that froze the SIMQ cannot persist across
2002 	 * a bus reset so ensure it gets thawed here.
2003 	 */
2004 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2005 		xpt_release_simq(sc->vtscsi_sim, 0);
2006 
2007 	error = vtscsi_reinit(sc);
2008 	if (error) {
2009 		device_printf(sc->vtscsi_dev,
2010 		    "reinitialization failed, stopping device...\n");
2011 		vtscsi_stop(sc);
2012 	} else
2013 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2014 		    CAM_LUN_WILDCARD);
2015 
2016 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2017 
2018 	return (error);
2019 }
2020 
2021 static void
2022 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2023 {
2024 
2025 #ifdef INVARIANTS
2026 	int req_nsegs, resp_nsegs;
2027 
2028 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2029 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2030 
2031 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2032 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2033 #endif
2034 
2035 	req->vsr_softc = sc;
2036 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2037 }
2038 
2039 static int
2040 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2041 {
2042 	struct vtscsi_request *req;
2043 	int i, nreqs;
2044 
2045 	/*
2046 	 * Commands destined for either the request or control queues come
2047 	 * from the same SIM queue. Use the size of the request virtqueue
2048 	 * as it (should) be much more frequently used. Some additional
2049 	 * requests are allocated for internal (TMF) use.
2050 	 */
2051 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2052 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2053 		nreqs /= VTSCSI_MIN_SEGMENTS;
2054 	nreqs += VTSCSI_RESERVED_REQUESTS;
2055 
2056 	for (i = 0; i < nreqs; i++) {
2057 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2058 		    M_NOWAIT);
2059 		if (req == NULL)
2060 			return (ENOMEM);
2061 
2062 		vtscsi_init_request(sc, req);
2063 
2064 		sc->vtscsi_nrequests++;
2065 		vtscsi_enqueue_request(sc, req);
2066 	}
2067 
2068 	return (0);
2069 }
2070 
2071 static void
2072 vtscsi_free_requests(struct vtscsi_softc *sc)
2073 {
2074 	struct vtscsi_request *req;
2075 
2076 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2077 		KASSERT(callout_active(&req->vsr_callout) == 0,
2078 		    ("request callout still active"));
2079 
2080 		sc->vtscsi_nrequests--;
2081 		free(req, M_DEVBUF);
2082 	}
2083 
2084 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2085 	    sc->vtscsi_nrequests));
2086 }
2087 
2088 static void
2089 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2090 {
2091 
2092 	KASSERT(req->vsr_softc == sc,
2093 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2094 
2095 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2096 
2097 	/* A request is available so the SIMQ could be released. */
2098 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2099 		xpt_release_simq(sc->vtscsi_sim, 1);
2100 
2101 	req->vsr_ccb = NULL;
2102 	req->vsr_complete = NULL;
2103 	req->vsr_ptr0 = NULL;
2104 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2105 	req->vsr_flags = 0;
2106 
2107 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2108 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2109 
2110 	/*
2111 	 * We insert at the tail of the queue in order to make it
2112 	 * very unlikely a request will be reused if we race with
2113 	 * stopping its callout handler.
2114 	 */
2115 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2116 }
2117 
2118 static struct vtscsi_request *
2119 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2120 {
2121 	struct vtscsi_request *req;
2122 
2123 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2124 	if (req != NULL) {
2125 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2126 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2127 	} else
2128 		sc->vtscsi_stats.dequeue_no_requests++;
2129 
2130 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2131 
2132 	return (req);
2133 }
2134 
2135 static void
2136 vtscsi_complete_request(struct vtscsi_request *req)
2137 {
2138 
2139 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2140 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2141 
2142 	if (req->vsr_complete != NULL)
2143 		req->vsr_complete(req->vsr_softc, req);
2144 }
2145 
2146 static void
2147 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2148 {
2149 	struct vtscsi_request *req;
2150 
2151 	VTSCSI_LOCK_OWNED(sc);
2152 
2153 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2154 		vtscsi_complete_request(req);
2155 }
2156 
2157 static void
2158 vtscsi_control_vq_task(void *arg, int pending)
2159 {
2160 	struct vtscsi_softc *sc;
2161 	struct virtqueue *vq;
2162 
2163 	sc = arg;
2164 	vq = sc->vtscsi_control_vq;
2165 
2166 	VTSCSI_LOCK(sc);
2167 
2168 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2169 
2170 	if (virtqueue_enable_intr(vq) != 0) {
2171 		virtqueue_disable_intr(vq);
2172 		VTSCSI_UNLOCK(sc);
2173 		taskqueue_enqueue_fast(sc->vtscsi_tq,
2174 		    &sc->vtscsi_control_intr_task);
2175 		return;
2176 	}
2177 
2178 	VTSCSI_UNLOCK(sc);
2179 }
2180 
2181 static void
2182 vtscsi_event_vq_task(void *arg, int pending)
2183 {
2184 	struct vtscsi_softc *sc;
2185 	struct virtqueue *vq;
2186 	struct virtio_scsi_event *event;
2187 
2188 	sc = arg;
2189 	vq = sc->vtscsi_event_vq;
2190 
2191 	VTSCSI_LOCK(sc);
2192 
2193 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2194 		vtscsi_handle_event(sc, event);
2195 
2196 	if (virtqueue_enable_intr(vq) != 0) {
2197 		virtqueue_disable_intr(vq);
2198 		VTSCSI_UNLOCK(sc);
2199 		taskqueue_enqueue_fast(sc->vtscsi_tq,
2200 		    &sc->vtscsi_control_intr_task);
2201 		return;
2202 	}
2203 
2204 	VTSCSI_UNLOCK(sc);
2205 }
2206 
2207 static void
2208 vtscsi_request_vq_task(void *arg, int pending)
2209 {
2210 	struct vtscsi_softc *sc;
2211 	struct virtqueue *vq;
2212 
2213 	sc = arg;
2214 	vq = sc->vtscsi_request_vq;
2215 
2216 	VTSCSI_LOCK(sc);
2217 
2218 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2219 
2220 	if (virtqueue_enable_intr(vq) != 0) {
2221 		virtqueue_disable_intr(vq);
2222 		VTSCSI_UNLOCK(sc);
2223 		taskqueue_enqueue_fast(sc->vtscsi_tq,
2224 		    &sc->vtscsi_request_intr_task);
2225 		return;
2226 	}
2227 
2228 	VTSCSI_UNLOCK(sc);
2229 }
2230 
2231 static int
2232 vtscsi_control_vq_intr(void *xsc)
2233 {
2234 	struct vtscsi_softc *sc;
2235 
2236 	sc = xsc;
2237 
2238 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2239 	taskqueue_enqueue_fast(sc->vtscsi_tq,
2240 	    &sc->vtscsi_control_intr_task);
2241 
2242 	return (1);
2243 }
2244 
2245 static int
2246 vtscsi_event_vq_intr(void *xsc)
2247 {
2248 	struct vtscsi_softc *sc;
2249 
2250 	sc = xsc;
2251 
2252 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2253 	taskqueue_enqueue_fast(sc->vtscsi_tq,
2254 	    &sc->vtscsi_event_intr_task);
2255 
2256 	return (1);
2257 }
2258 
2259 static int
2260 vtscsi_request_vq_intr(void *xsc)
2261 {
2262 	struct vtscsi_softc *sc;
2263 
2264 	sc = xsc;
2265 
2266 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2267 	taskqueue_enqueue_fast(sc->vtscsi_tq,
2268 	    &sc->vtscsi_request_intr_task);
2269 
2270 	return (1);
2271 }
2272 
2273 static void
2274 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2275 {
2276 
2277 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2278 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2279 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2280 }
2281 
2282 static void
2283 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2284 {
2285 
2286 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2287 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2288 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2289 }
2290 
2291 static void
2292 vtscsi_get_tunables(struct vtscsi_softc *sc)
2293 {
2294 	char tmpstr[64];
2295 
2296 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2297 
2298 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2299 	    device_get_unit(sc->vtscsi_dev));
2300 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2301 }
2302 
2303 static void
2304 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2305 {
2306 	device_t dev;
2307 	struct vtscsi_statistics *stats;
2308         struct sysctl_ctx_list *ctx;
2309 	struct sysctl_oid *tree;
2310 	struct sysctl_oid_list *child;
2311 
2312 	dev = sc->vtscsi_dev;
2313 	stats = &sc->vtscsi_stats;
2314 	ctx = device_get_sysctl_ctx(dev);
2315 	tree = device_get_sysctl_tree(dev);
2316 	child = SYSCTL_CHILDREN(tree);
2317 
2318 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2319 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2320 	    "Debug level");
2321 
2322 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2323 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2324 	    "SCSI command timeouts");
2325 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2326 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2327 	    "No available requests to dequeue");
2328 }
2329 
2330 static void
2331 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2332     const char *fmt, ...)
2333 {
2334 	struct vtscsi_softc *sc;
2335 	union ccb *ccb;
2336 	struct sbuf sb;
2337 	va_list ap;
2338 	char str[192];
2339 	char path_str[64];
2340 
2341 	if (req == NULL)
2342 		return;
2343 
2344 	sc = req->vsr_softc;
2345 	ccb = req->vsr_ccb;
2346 
2347 	va_start(ap, fmt);
2348 	sbuf_new(&sb, str, sizeof(str), 0);
2349 
2350 	if (ccb == NULL) {
2351 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2352 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2353 		    cam_sim_bus(sc->vtscsi_sim));
2354 	} else {
2355 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2356 		sbuf_cat(&sb, path_str);
2357 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2358 			scsi_command_string(&ccb->csio, &sb);
2359 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2360 		}
2361 	}
2362 
2363 	sbuf_vprintf(&sb, fmt, ap);
2364 	va_end(ap);
2365 
2366 	sbuf_finish(&sb);
2367 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2368 	    sbuf_data(&sb));
2369 }
2370