xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision abd6790c)
1 /*-
2  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO SCSI devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/callout.h>
43 #include <sys/queue.h>
44 #include <sys/sbuf.h>
45 
46 #include <machine/stdarg.h>
47 
48 #include <machine/bus.h>
49 #include <machine/resource.h>
50 #include <sys/bus.h>
51 #include <sys/rman.h>
52 
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_sim.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_xpt_sim.h>
58 #include <cam/cam_debug.h>
59 #include <cam/scsi/scsi_all.h>
60 #include <cam/scsi/scsi_message.h>
61 
62 #include <dev/virtio/virtio.h>
63 #include <dev/virtio/virtqueue.h>
64 #include <dev/virtio/scsi/virtio_scsi.h>
65 #include <dev/virtio/scsi/virtio_scsivar.h>
66 
67 #include "virtio_if.h"
68 
69 static int	vtscsi_modevent(module_t, int, void *);
70 
71 static int	vtscsi_probe(device_t);
72 static int	vtscsi_attach(device_t);
73 static int	vtscsi_detach(device_t);
74 static int	vtscsi_suspend(device_t);
75 static int	vtscsi_resume(device_t);
76 
77 static void	vtscsi_negotiate_features(struct vtscsi_softc *);
78 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
79 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
80 static void	vtscsi_write_device_config(struct vtscsi_softc *);
81 static int	vtscsi_reinit(struct vtscsi_softc *);
82 
83 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
84 static int 	vtscsi_register_cam(struct vtscsi_softc *);
85 static void	vtscsi_free_cam(struct vtscsi_softc *);
86 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
87 static int	vtscsi_register_async(struct vtscsi_softc *);
88 static void	vtscsi_deregister_async(struct vtscsi_softc *);
89 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
90 static void	vtscsi_cam_poll(struct cam_sim *);
91 
92 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
93 		    union ccb *);
94 static void 	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
95 		    union ccb *);
96 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
97 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
98 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
99 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
100 		    struct cam_sim *, union ccb *);
101 
102 static int 	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
103 		    struct sglist *, struct ccb_scsiio *);
104 static int 	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
105 		    struct vtscsi_request *, int *, int *);
106 static int 	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
107 		    struct vtscsi_request *);
108 static int 	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
109 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
110 		    struct vtscsi_request *);
111 static int 	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
112 		    struct vtscsi_request *);
113 static void	vtscsi_timedout_scsi_cmd(void *);
114 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
115 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
116 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
117 static void 	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
118 		    struct vtscsi_request *);
119 
120 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
121 		    struct vtscsi_request *);
122 static int 	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
123 		    struct vtscsi_request *, struct sglist *, int, int, int);
124 static void 	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
125 		    struct vtscsi_request *);
126 static int 	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
127 		    struct vtscsi_request *);
128 static int 	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
129 		    struct vtscsi_request *);
130 
131 static void 	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
132 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
133 static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
134 		    struct virtio_scsi_cmd_req *);
135 static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
136 		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
137 
138 static void 	vtscsi_freeze_simq(struct vtscsi_softc *, int);
139 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
140 
141 static void 	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
142 		    lun_id_t);
143 static void 	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
144 		    lun_id_t);
145 static void 	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
146 
147 static void 	vtscsi_handle_event(struct vtscsi_softc *,
148 		    struct virtio_scsi_event *);
149 static int 	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
150 		    struct virtio_scsi_event *);
151 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
152 static void 	vtscsi_reinit_event_vq(struct vtscsi_softc *);
153 static void 	vtscsi_drain_event_vq(struct vtscsi_softc *);
154 
155 static void 	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
156 static void 	vtscsi_complete_vqs(struct vtscsi_softc *);
157 static void 	vtscsi_drain_vqs(struct vtscsi_softc *);
158 static void 	vtscsi_cancel_request(struct vtscsi_softc *,
159 		    struct vtscsi_request *);
160 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
161 static void	vtscsi_stop(struct vtscsi_softc *);
162 static int	vtscsi_reset_bus(struct vtscsi_softc *);
163 
164 static void 	vtscsi_init_request(struct vtscsi_softc *,
165 		    struct vtscsi_request *);
166 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
167 static void	vtscsi_free_requests(struct vtscsi_softc *);
168 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
169 		    struct vtscsi_request *);
170 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
171 
172 static void	vtscsi_complete_request(struct vtscsi_request *);
173 static void 	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
174 
175 static void	vtscsi_control_vq_intr(void *);
176 static void	vtscsi_event_vq_intr(void *);
177 static void	vtscsi_request_vq_intr(void *);
178 static void 	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
179 static void 	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
180 
181 static void 	vtscsi_get_tunables(struct vtscsi_softc *);
182 static void 	vtscsi_add_sysctl(struct vtscsi_softc *);
183 
184 static void 	vtscsi_printf_req(struct vtscsi_request *, const char *,
185 		    const char *, ...);
186 
187 /* Global tunables. */
188 /*
189  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
190  * IO during virtio_stop(). So in-flight requests still complete after the
191  * device reset. We would have to wait for all the in-flight IO to complete,
192  * which defeats the typical purpose of a bus reset. We could simulate the
193  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
194  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
195  * control virtqueue). But this isn't very useful if things really go off
196  * the rails, so default to disabled for now.
197  */
198 static int vtscsi_bus_reset_disable = 1;
199 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
200 
201 static struct virtio_feature_desc vtscsi_feature_desc[] = {
202 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
203 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
204 
205 	{ 0, NULL }
206 };
207 
208 static device_method_t vtscsi_methods[] = {
209 	/* Device methods. */
210 	DEVMETHOD(device_probe,		vtscsi_probe),
211 	DEVMETHOD(device_attach,	vtscsi_attach),
212 	DEVMETHOD(device_detach,	vtscsi_detach),
213 	DEVMETHOD(device_suspend,	vtscsi_suspend),
214 	DEVMETHOD(device_resume,	vtscsi_resume),
215 
216 	DEVMETHOD_END
217 };
218 
219 static driver_t vtscsi_driver = {
220 	"vtscsi",
221 	vtscsi_methods,
222 	sizeof(struct vtscsi_softc)
223 };
224 static devclass_t vtscsi_devclass;
225 
226 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
227     vtscsi_modevent, 0);
228 MODULE_VERSION(virtio_scsi, 1);
229 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
230 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
231 
232 static int
233 vtscsi_modevent(module_t mod, int type, void *unused)
234 {
235 	int error;
236 
237 	switch (type) {
238 	case MOD_LOAD:
239 	case MOD_QUIESCE:
240 	case MOD_UNLOAD:
241 	case MOD_SHUTDOWN:
242 		error = 0;
243 		break;
244 	default:
245 		error = EOPNOTSUPP;
246 		break;
247 	}
248 
249 	return (error);
250 }
251 
252 static int
253 vtscsi_probe(device_t dev)
254 {
255 
256 	if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
257 		return (ENXIO);
258 
259 	device_set_desc(dev, "VirtIO SCSI Adapter");
260 
261 	return (BUS_PROBE_DEFAULT);
262 }
263 
264 static int
265 vtscsi_attach(device_t dev)
266 {
267 	struct vtscsi_softc *sc;
268 	struct virtio_scsi_config scsicfg;
269 	int error;
270 
271 	sc = device_get_softc(dev);
272 	sc->vtscsi_dev = dev;
273 
274 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
275 	TAILQ_INIT(&sc->vtscsi_req_free);
276 
277 	vtscsi_get_tunables(sc);
278 	vtscsi_add_sysctl(sc);
279 
280 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
281 	vtscsi_negotiate_features(sc);
282 
283 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
284 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
285 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
286 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
287 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
288 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
289 
290 	virtio_read_device_config(dev, 0, &scsicfg,
291 	    sizeof(struct virtio_scsi_config));
292 
293 	sc->vtscsi_max_channel = scsicfg.max_channel;
294 	sc->vtscsi_max_target = scsicfg.max_target;
295 	sc->vtscsi_max_lun = scsicfg.max_lun;
296 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
297 
298 	vtscsi_write_device_config(sc);
299 
300 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
301 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
302 	if (sc->vtscsi_sglist == NULL) {
303 		error = ENOMEM;
304 		device_printf(dev, "cannot allocate sglist\n");
305 		goto fail;
306 	}
307 
308 	error = vtscsi_alloc_virtqueues(sc);
309 	if (error) {
310 		device_printf(dev, "cannot allocate virtqueues\n");
311 		goto fail;
312 	}
313 
314 	error = vtscsi_init_event_vq(sc);
315 	if (error) {
316 		device_printf(dev, "cannot populate the eventvq\n");
317 		goto fail;
318 	}
319 
320 	error = vtscsi_alloc_requests(sc);
321 	if (error) {
322 		device_printf(dev, "cannot allocate requests\n");
323 		goto fail;
324 	}
325 
326 	error = vtscsi_alloc_cam(sc);
327 	if (error) {
328 		device_printf(dev, "cannot allocate CAM structures\n");
329 		goto fail;
330 	}
331 
332 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
333 	if (error) {
334 		device_printf(dev, "cannot setup virtqueue interrupts\n");
335 		goto fail;
336 	}
337 
338 	vtscsi_enable_vqs_intr(sc);
339 
340 	/*
341 	 * Register with CAM after interrupts are enabled so we will get
342 	 * notified of the probe responses.
343 	 */
344 	error = vtscsi_register_cam(sc);
345 	if (error) {
346 		device_printf(dev, "cannot register with CAM\n");
347 		goto fail;
348 	}
349 
350 fail:
351 	if (error)
352 		vtscsi_detach(dev);
353 
354 	return (error);
355 }
356 
357 static int
358 vtscsi_detach(device_t dev)
359 {
360 	struct vtscsi_softc *sc;
361 
362 	sc = device_get_softc(dev);
363 
364 	VTSCSI_LOCK(sc);
365 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
366 	if (device_is_attached(dev))
367 		vtscsi_stop(sc);
368 	VTSCSI_UNLOCK(sc);
369 
370 	vtscsi_complete_vqs(sc);
371 	vtscsi_drain_vqs(sc);
372 
373 	vtscsi_free_cam(sc);
374 	vtscsi_free_requests(sc);
375 
376 	if (sc->vtscsi_sglist != NULL) {
377 		sglist_free(sc->vtscsi_sglist);
378 		sc->vtscsi_sglist = NULL;
379 	}
380 
381 	VTSCSI_LOCK_DESTROY(sc);
382 
383 	return (0);
384 }
385 
386 static int
387 vtscsi_suspend(device_t dev)
388 {
389 
390 	return (0);
391 }
392 
393 static int
394 vtscsi_resume(device_t dev)
395 {
396 
397 	return (0);
398 }
399 
400 static void
401 vtscsi_negotiate_features(struct vtscsi_softc *sc)
402 {
403 	device_t dev;
404 	uint64_t features;
405 
406 	dev = sc->vtscsi_dev;
407 	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
408 	sc->vtscsi_features = features;
409 }
410 
411 static int
412 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
413 {
414 	int nsegs;
415 
416 	nsegs = VTSCSI_MIN_SEGMENTS;
417 
418 	if (seg_max > 0) {
419 		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
420 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
421 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
422 	} else
423 		nsegs += 1;
424 
425 	return (nsegs);
426 }
427 
428 static int
429 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
430 {
431 	device_t dev;
432 	struct vq_alloc_info vq_info[3];
433 	int nvqs;
434 
435 	dev = sc->vtscsi_dev;
436 	nvqs = 3;
437 
438 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
439 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
440 
441 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
442 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
443 
444 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
445 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
446 	    "%s request", device_get_nameunit(dev));
447 
448 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
449 }
450 
451 static void
452 vtscsi_write_device_config(struct vtscsi_softc *sc)
453 {
454 
455 	virtio_write_dev_config_4(sc->vtscsi_dev,
456 	    offsetof(struct virtio_scsi_config, sense_size),
457 	    VIRTIO_SCSI_SENSE_SIZE);
458 
459 	/*
460 	 * This is the size in the virtio_scsi_cmd_req structure. Note
461 	 * this value (32) is larger than the maximum CAM CDB size (16).
462 	 */
463 	virtio_write_dev_config_4(sc->vtscsi_dev,
464 	    offsetof(struct virtio_scsi_config, cdb_size),
465 	    VIRTIO_SCSI_CDB_SIZE);
466 }
467 
468 static int
469 vtscsi_reinit(struct vtscsi_softc *sc)
470 {
471 	device_t dev;
472 	int error;
473 
474 	dev = sc->vtscsi_dev;
475 
476 	error = virtio_reinit(dev, sc->vtscsi_features);
477 	if (error == 0) {
478 		vtscsi_write_device_config(sc);
479 		vtscsi_reinit_event_vq(sc);
480 		virtio_reinit_complete(dev);
481 
482 		vtscsi_enable_vqs_intr(sc);
483 	}
484 
485 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
486 
487 	return (error);
488 }
489 
490 static int
491 vtscsi_alloc_cam(struct vtscsi_softc *sc)
492 {
493 	device_t dev;
494 	struct cam_devq *devq;
495 	int openings;
496 
497 	dev = sc->vtscsi_dev;
498 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
499 
500 	devq = cam_simq_alloc(openings);
501 	if (devq == NULL) {
502 		device_printf(dev, "cannot allocate SIM queue\n");
503 		return (ENOMEM);
504 	}
505 
506 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
507 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
508 	    openings, devq);
509 	if (sc->vtscsi_sim == NULL) {
510 		cam_simq_free(devq);
511 		device_printf(dev, "cannot allocate SIM\n");
512 		return (ENOMEM);
513 	}
514 
515 	return (0);
516 }
517 
518 static int
519 vtscsi_register_cam(struct vtscsi_softc *sc)
520 {
521 	device_t dev;
522 	int registered, error;
523 
524 	dev = sc->vtscsi_dev;
525 	registered = 0;
526 
527 	VTSCSI_LOCK(sc);
528 
529 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
530 		error = ENOMEM;
531 		device_printf(dev, "cannot register XPT bus\n");
532 		goto fail;
533 	}
534 
535 	registered = 1;
536 
537 	if (xpt_create_path(&sc->vtscsi_path, NULL,
538 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
539 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
540 		error = ENOMEM;
541 		device_printf(dev, "cannot create bus path\n");
542 		goto fail;
543 	}
544 
545 	VTSCSI_UNLOCK(sc);
546 
547 	/*
548 	 * The async register apparently needs to be done without
549 	 * the lock held, otherwise it can recurse on the lock.
550 	 */
551 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
552 		error = EIO;
553 		device_printf(dev, "cannot register async callback\n");
554 		VTSCSI_LOCK(sc);
555 		goto fail;
556 	}
557 
558 	return (0);
559 
560 fail:
561 	if (sc->vtscsi_path != NULL) {
562 		xpt_free_path(sc->vtscsi_path);
563 		sc->vtscsi_path = NULL;
564 	}
565 
566 	if (registered != 0)
567 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
568 
569 	VTSCSI_UNLOCK(sc);
570 
571 	return (error);
572 }
573 
574 static void
575 vtscsi_free_cam(struct vtscsi_softc *sc)
576 {
577 
578 	VTSCSI_LOCK(sc);
579 
580 	if (sc->vtscsi_path != NULL) {
581 		vtscsi_deregister_async(sc);
582 
583 		xpt_free_path(sc->vtscsi_path);
584 		sc->vtscsi_path = NULL;
585 
586 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
587 	}
588 
589 	if (sc->vtscsi_sim != NULL) {
590 		cam_sim_free(sc->vtscsi_sim, 1);
591 		sc->vtscsi_sim = NULL;
592 	}
593 
594 	VTSCSI_UNLOCK(sc);
595 }
596 
597 static void
598 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
599 {
600 	struct cam_sim *sim;
601 	struct vtscsi_softc *sc;
602 
603 	sim = cb_arg;
604 	sc = cam_sim_softc(sim);
605 
606 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
607 
608 	/*
609 	 * TODO Once QEMU supports event reporting, we should
610 	 *      (un)subscribe to events here.
611 	 */
612 	switch (code) {
613 	case AC_FOUND_DEVICE:
614 		break;
615 	case AC_LOST_DEVICE:
616 		break;
617 	}
618 }
619 
620 static int
621 vtscsi_register_async(struct vtscsi_softc *sc)
622 {
623 	struct ccb_setasync csa;
624 
625 	VTSCSI_LOCK_NOTOWNED(sc);
626 
627 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
628 	csa.ccb_h.func_code = XPT_SASYNC_CB;
629 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
630 	csa.callback = vtscsi_cam_async;
631 	csa.callback_arg = sc->vtscsi_sim;
632 
633 	xpt_action((union ccb *) &csa);
634 
635 	return (csa.ccb_h.status);
636 }
637 
638 static void
639 vtscsi_deregister_async(struct vtscsi_softc *sc)
640 {
641 	struct ccb_setasync csa;
642 
643 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
644 	csa.ccb_h.func_code = XPT_SASYNC_CB;
645 	csa.event_enable = 0;
646 	csa.callback = vtscsi_cam_async;
647 	csa.callback_arg = sc->vtscsi_sim;
648 
649 	xpt_action((union ccb *) &csa);
650 }
651 
652 static void
653 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
654 {
655 	struct vtscsi_softc *sc;
656 	struct ccb_hdr *ccbh;
657 
658 	sc = cam_sim_softc(sim);
659 	ccbh = &ccb->ccb_h;
660 
661 	VTSCSI_LOCK_OWNED(sc);
662 
663 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
664 		/*
665 		 * The VTSCSI_MTX is briefly dropped between setting
666 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
667 		 * drop any CCBs that come in during that window.
668 		 */
669 		ccbh->status = CAM_NO_HBA;
670 		xpt_done(ccb);
671 		return;
672 	}
673 
674 	switch (ccbh->func_code) {
675 	case XPT_SCSI_IO:
676 		vtscsi_cam_scsi_io(sc, sim, ccb);
677 		break;
678 
679 	case XPT_SET_TRAN_SETTINGS:
680 		ccbh->status = CAM_FUNC_NOTAVAIL;
681 		xpt_done(ccb);
682 		break;
683 
684 	case XPT_GET_TRAN_SETTINGS:
685 		vtscsi_cam_get_tran_settings(sc, ccb);
686 		break;
687 
688 	case XPT_RESET_BUS:
689 		vtscsi_cam_reset_bus(sc, ccb);
690 		break;
691 
692 	case XPT_RESET_DEV:
693 		vtscsi_cam_reset_dev(sc, ccb);
694 		break;
695 
696 	case XPT_ABORT:
697 		vtscsi_cam_abort(sc, ccb);
698 		break;
699 
700 	case XPT_CALC_GEOMETRY:
701 		cam_calc_geometry(&ccb->ccg, 1);
702 		xpt_done(ccb);
703 		break;
704 
705 	case XPT_PATH_INQ:
706 		vtscsi_cam_path_inquiry(sc, sim, ccb);
707 		break;
708 
709 	default:
710 		vtscsi_dprintf(sc, VTSCSI_ERROR,
711 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
712 
713 		ccbh->status = CAM_REQ_INVALID;
714 		xpt_done(ccb);
715 		break;
716 	}
717 }
718 
719 static void
720 vtscsi_cam_poll(struct cam_sim *sim)
721 {
722 	struct vtscsi_softc *sc;
723 
724 	sc = cam_sim_softc(sim);
725 
726 	vtscsi_complete_vqs_locked(sc);
727 }
728 
729 static void
730 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
731     union ccb *ccb)
732 {
733 	struct ccb_hdr *ccbh;
734 	struct ccb_scsiio *csio;
735 	int error;
736 
737 	ccbh = &ccb->ccb_h;
738 	csio = &ccb->csio;
739 
740 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
741 		error = EINVAL;
742 		ccbh->status = CAM_REQ_INVALID;
743 		goto done;
744 	}
745 
746 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
747 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
748 		error = EINVAL;
749 		ccbh->status = CAM_REQ_INVALID;
750 		goto done;
751 	}
752 
753 	error = vtscsi_start_scsi_cmd(sc, ccb);
754 
755 done:
756 	if (error) {
757 		vtscsi_dprintf(sc, VTSCSI_ERROR,
758 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
759 		xpt_done(ccb);
760 	}
761 }
762 
763 static void
764 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
765 {
766 	struct ccb_trans_settings *cts;
767 	struct ccb_trans_settings_scsi *scsi;
768 
769 	cts = &ccb->cts;
770 	scsi = &cts->proto_specific.scsi;
771 
772 	cts->protocol = PROTO_SCSI;
773 	cts->protocol_version = SCSI_REV_SPC3;
774 	cts->transport = XPORT_SAS;
775 	cts->transport_version = 0;
776 
777 	scsi->valid = CTS_SCSI_VALID_TQ;
778 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
779 
780 	ccb->ccb_h.status = CAM_REQ_CMP;
781 	xpt_done(ccb);
782 }
783 
784 static void
785 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
786 {
787 	int error;
788 
789 	error = vtscsi_reset_bus(sc);
790 	if (error == 0)
791 		ccb->ccb_h.status = CAM_REQ_CMP;
792 	else
793 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
794 
795 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
796 	    error, ccb, ccb->ccb_h.status);
797 
798 	xpt_done(ccb);
799 }
800 
801 static void
802 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
803 {
804 	struct ccb_hdr *ccbh;
805 	struct vtscsi_request *req;
806 	int error;
807 
808 	ccbh = &ccb->ccb_h;
809 
810 	req = vtscsi_dequeue_request(sc);
811 	if (req == NULL) {
812 		error = EAGAIN;
813 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
814 		goto fail;
815 	}
816 
817 	req->vsr_ccb = ccb;
818 
819 	error = vtscsi_execute_reset_dev_cmd(sc, req);
820 	if (error == 0)
821 		return;
822 
823 	vtscsi_enqueue_request(sc, req);
824 
825 fail:
826 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
827 	    error, req, ccb);
828 
829 	if (error == EAGAIN)
830 		ccbh->status = CAM_RESRC_UNAVAIL;
831 	else
832 		ccbh->status = CAM_REQ_CMP_ERR;
833 
834 	xpt_done(ccb);
835 }
836 
837 static void
838 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
839 {
840 	struct vtscsi_request *req;
841 	struct ccb_hdr *ccbh;
842 	int error;
843 
844 	ccbh = &ccb->ccb_h;
845 
846 	req = vtscsi_dequeue_request(sc);
847 	if (req == NULL) {
848 		error = EAGAIN;
849 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
850 		goto fail;
851 	}
852 
853 	req->vsr_ccb = ccb;
854 
855 	error = vtscsi_execute_abort_task_cmd(sc, req);
856 	if (error == 0)
857 		return;
858 
859 	vtscsi_enqueue_request(sc, req);
860 
861 fail:
862 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
863 	    error, req, ccb);
864 
865 	if (error == EAGAIN)
866 		ccbh->status = CAM_RESRC_UNAVAIL;
867 	else
868 		ccbh->status = CAM_REQ_CMP_ERR;
869 
870 	xpt_done(ccb);
871 }
872 
873 static void
874 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
875     union ccb *ccb)
876 {
877 	device_t dev;
878 	struct ccb_pathinq *cpi;
879 
880 	dev = sc->vtscsi_dev;
881 	cpi = &ccb->cpi;
882 
883 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
884 
885 	cpi->version_num = 1;
886 	cpi->hba_inquiry = PI_TAG_ABLE;
887 	cpi->target_sprt = 0;
888 	cpi->hba_misc = PIM_SEQSCAN;
889 	if (vtscsi_bus_reset_disable != 0)
890 		cpi->hba_misc |= PIM_NOBUSRESET;
891 	cpi->hba_eng_cnt = 0;
892 
893 	cpi->max_target = sc->vtscsi_max_target;
894 	cpi->max_lun = sc->vtscsi_max_lun;
895 	cpi->initiator_id = VTSCSI_INITIATOR_ID;
896 
897 	strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
898 	strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
899 	strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
900 
901 	cpi->unit_number = cam_sim_unit(sim);
902 	cpi->bus_id = cam_sim_bus(sim);
903 
904 	cpi->base_transfer_speed = 300000;
905 
906 	cpi->protocol = PROTO_SCSI;
907 	cpi->protocol_version = SCSI_REV_SPC3;
908 	cpi->transport = XPORT_SAS;
909 	cpi->transport_version = 0;
910 
911 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
912 	    PAGE_SIZE;
913 
914 	cpi->hba_vendor = virtio_get_vendor(dev);
915 	cpi->hba_device = virtio_get_device(dev);
916 	cpi->hba_subvendor = virtio_get_subvendor(dev);
917 	cpi->hba_subdevice = virtio_get_subdevice(dev);
918 
919 	ccb->ccb_h.status = CAM_REQ_CMP;
920 	xpt_done(ccb);
921 }
922 
923 static int
924 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
925     struct ccb_scsiio *csio)
926 {
927 	struct ccb_hdr *ccbh;
928 	struct bus_dma_segment *dseg;
929 	int i, error;
930 
931 	ccbh = &csio->ccb_h;
932 	error = 0;
933 
934 	switch ((ccbh->flags & CAM_DATA_MASK)) {
935 	case CAM_DATA_VADDR:
936 		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
937 		break;
938 	case CAM_DATA_PADDR:
939 		error = sglist_append_phys(sg,
940 		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
941 		break;
942 	case CAM_DATA_SG:
943 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
944 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
945 			error = sglist_append(sg,
946 			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
947 		}
948 		break;
949 	case CAM_DATA_SG_PADDR:
950 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
951 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
952 			error = sglist_append_phys(sg,
953 			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
954 		}
955 		break;
956 	default:
957 		error = EINVAL;
958 		break;
959 	}
960 
961 	return (error);
962 }
963 
964 static int
965 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
966     int *readable, int *writable)
967 {
968 	struct sglist *sg;
969 	struct ccb_hdr *ccbh;
970 	struct ccb_scsiio *csio;
971 	struct virtio_scsi_cmd_req *cmd_req;
972 	struct virtio_scsi_cmd_resp *cmd_resp;
973 	int error;
974 
975 	sg = sc->vtscsi_sglist;
976 	csio = &req->vsr_ccb->csio;
977 	ccbh = &csio->ccb_h;
978 	cmd_req = &req->vsr_cmd_req;
979 	cmd_resp = &req->vsr_cmd_resp;
980 
981 	sglist_reset(sg);
982 
983 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
984 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
985 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
986 		/* At least one segment must be left for the response. */
987 		if (error || sg->sg_nseg == sg->sg_maxseg)
988 			goto fail;
989 	}
990 
991 	*readable = sg->sg_nseg;
992 
993 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
994 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
995 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
996 		if (error)
997 			goto fail;
998 	}
999 
1000 	*writable = sg->sg_nseg - *readable;
1001 
1002 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1003 	    "writable=%d\n", req, ccbh, *readable, *writable);
1004 
1005 	return (0);
1006 
1007 fail:
1008 	/*
1009 	 * This should never happen unless maxio was incorrectly set.
1010 	 */
1011 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1012 
1013 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1014 	    "nseg=%d maxseg=%d\n",
1015 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1016 
1017 	return (EFBIG);
1018 }
1019 
1020 static int
1021 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1022 {
1023 	struct sglist *sg;
1024 	struct virtqueue *vq;
1025 	struct ccb_scsiio *csio;
1026 	struct ccb_hdr *ccbh;
1027 	struct virtio_scsi_cmd_req *cmd_req;
1028 	struct virtio_scsi_cmd_resp *cmd_resp;
1029 	int readable, writable, error;
1030 
1031 	sg = sc->vtscsi_sglist;
1032 	vq = sc->vtscsi_request_vq;
1033 	csio = &req->vsr_ccb->csio;
1034 	ccbh = &csio->ccb_h;
1035 	cmd_req = &req->vsr_cmd_req;
1036 	cmd_resp = &req->vsr_cmd_resp;
1037 
1038 	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1039 
1040 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1041 	if (error)
1042 		return (error);
1043 
1044 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1045 	cmd_resp->response = -1;
1046 
1047 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1048 	if (error) {
1049 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1050 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1051 
1052 		ccbh->status = CAM_REQUEUE_REQ;
1053 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1054 		return (error);
1055 	}
1056 
1057 	ccbh->status |= CAM_SIM_QUEUED;
1058 	ccbh->ccbh_vtscsi_req = req;
1059 
1060 	virtqueue_notify(vq);
1061 
1062 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1063 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1064 		callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1065 		    vtscsi_timedout_scsi_cmd, req);
1066 	}
1067 
1068 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1069 	    req, ccbh);
1070 
1071 	return (0);
1072 }
1073 
1074 static int
1075 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1076 {
1077 	struct vtscsi_request *req;
1078 	int error;
1079 
1080 	req = vtscsi_dequeue_request(sc);
1081 	if (req == NULL) {
1082 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1083 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1084 		return (ENOBUFS);
1085 	}
1086 
1087 	req->vsr_ccb = ccb;
1088 
1089 	error = vtscsi_execute_scsi_cmd(sc, req);
1090 	if (error)
1091 		vtscsi_enqueue_request(sc, req);
1092 
1093 	return (error);
1094 }
1095 
1096 static void
1097 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1098     struct vtscsi_request *req)
1099 {
1100 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1101 	struct vtscsi_request *to_req;
1102 	uint8_t response;
1103 
1104 	tmf_resp = &req->vsr_tmf_resp;
1105 	response = tmf_resp->response;
1106 	to_req = req->vsr_timedout_req;
1107 
1108 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1109 	    req, to_req, response);
1110 
1111 	vtscsi_enqueue_request(sc, req);
1112 
1113 	/*
1114 	 * The timedout request could have completed between when the
1115 	 * abort task was sent and when the host processed it.
1116 	 */
1117 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1118 		return;
1119 
1120 	/* The timedout request was successfully aborted. */
1121 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1122 		return;
1123 
1124 	/* Don't bother if the device is going away. */
1125 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1126 		return;
1127 
1128 	/* The timedout request will be aborted by the reset. */
1129 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1130 		return;
1131 
1132 	vtscsi_reset_bus(sc);
1133 }
1134 
1135 static int
1136 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1137     struct vtscsi_request *to_req)
1138 {
1139 	struct sglist *sg;
1140 	struct ccb_hdr *to_ccbh;
1141 	struct vtscsi_request *req;
1142 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1143 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1144 	int error;
1145 
1146 	sg = sc->vtscsi_sglist;
1147 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1148 
1149 	req = vtscsi_dequeue_request(sc);
1150 	if (req == NULL) {
1151 		error = ENOBUFS;
1152 		goto fail;
1153 	}
1154 
1155 	tmf_req = &req->vsr_tmf_req;
1156 	tmf_resp = &req->vsr_tmf_resp;
1157 
1158 	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1159 	    (uintptr_t) to_ccbh, tmf_req);
1160 
1161 	sglist_reset(sg);
1162 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1163 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1164 
1165 	req->vsr_timedout_req = to_req;
1166 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1167 	tmf_resp->response = -1;
1168 
1169 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1170 	    VTSCSI_EXECUTE_ASYNC);
1171 	if (error == 0)
1172 		return (0);
1173 
1174 	vtscsi_enqueue_request(sc, req);
1175 
1176 fail:
1177 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1178 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1179 
1180 	return (error);
1181 }
1182 
1183 static void
1184 vtscsi_timedout_scsi_cmd(void *xreq)
1185 {
1186 	struct vtscsi_softc *sc;
1187 	struct vtscsi_request *to_req;
1188 
1189 	to_req = xreq;
1190 	sc = to_req->vsr_softc;
1191 
1192 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1193 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1194 
1195 	/* Don't bother if the device is going away. */
1196 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1197 		return;
1198 
1199 	/*
1200 	 * Bail if the request is not in use. We likely raced when
1201 	 * stopping the callout handler or it has already been aborted.
1202 	 */
1203 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1204 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1205 		return;
1206 
1207 	/*
1208 	 * Complete the request queue in case the timedout request is
1209 	 * actually just pending.
1210 	 */
1211 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1212 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1213 		return;
1214 
1215 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1216 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1217 
1218 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1219 		return;
1220 
1221 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1222 	vtscsi_reset_bus(sc);
1223 }
1224 
1225 static cam_status
1226 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1227 {
1228 	cam_status status;
1229 
1230 	switch (cmd_resp->response) {
1231 	case VIRTIO_SCSI_S_OK:
1232 		status = CAM_REQ_CMP;
1233 		break;
1234 	case VIRTIO_SCSI_S_OVERRUN:
1235 		status = CAM_DATA_RUN_ERR;
1236 		break;
1237 	case VIRTIO_SCSI_S_ABORTED:
1238 		status = CAM_REQ_ABORTED;
1239 		break;
1240 	case VIRTIO_SCSI_S_BAD_TARGET:
1241 		status = CAM_TID_INVALID;
1242 		break;
1243 	case VIRTIO_SCSI_S_RESET:
1244 		status = CAM_SCSI_BUS_RESET;
1245 		break;
1246 	case VIRTIO_SCSI_S_BUSY:
1247 		status = CAM_SCSI_BUSY;
1248 		break;
1249 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1250 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1251 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1252 		status = CAM_SCSI_IT_NEXUS_LOST;
1253 		break;
1254 	default: /* VIRTIO_SCSI_S_FAILURE */
1255 		status = CAM_REQ_CMP_ERR;
1256 		break;
1257 	}
1258 
1259 	return (status);
1260 }
1261 
1262 static cam_status
1263 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1264     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1265 {
1266 	cam_status status;
1267 
1268 	csio->scsi_status = cmd_resp->status;
1269 	csio->resid = cmd_resp->resid;
1270 
1271 	if (csio->scsi_status == SCSI_STATUS_OK)
1272 		status = CAM_REQ_CMP;
1273 	else
1274 		status = CAM_SCSI_STATUS_ERROR;
1275 
1276 	if (cmd_resp->sense_len > 0) {
1277 		status |= CAM_AUTOSNS_VALID;
1278 
1279 		if (cmd_resp->sense_len < csio->sense_len)
1280 			csio->sense_resid = csio->sense_len -
1281 			    cmd_resp->sense_len;
1282 		else
1283 			csio->sense_resid = 0;
1284 
1285 		bzero(&csio->sense_data, sizeof(csio->sense_data));
1286 		memcpy(cmd_resp->sense, &csio->sense_data,
1287 		    csio->sense_len - csio->sense_resid);
1288 	}
1289 
1290 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1291 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1292 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1293 
1294 	return (status);
1295 }
1296 
1297 static void
1298 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1299 {
1300 	struct ccb_hdr *ccbh;
1301 	struct ccb_scsiio *csio;
1302 	struct virtio_scsi_cmd_resp *cmd_resp;
1303 	cam_status status;
1304 
1305 	csio = &req->vsr_ccb->csio;
1306 	ccbh = &csio->ccb_h;
1307 	cmd_resp = &req->vsr_cmd_resp;
1308 
1309 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1310 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1311 
1312 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1313 		callout_stop(&req->vsr_callout);
1314 
1315 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1316 	if (status == CAM_REQ_ABORTED) {
1317 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1318 			status = CAM_CMD_TIMEOUT;
1319 	} else if (status == CAM_REQ_CMP)
1320 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1321 
1322 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1323 		status |= CAM_DEV_QFRZN;
1324 		xpt_freeze_devq(ccbh->path, 1);
1325 	}
1326 
1327 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1328 		status |= CAM_RELEASE_SIMQ;
1329 
1330 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1331 	    req, ccbh, status);
1332 
1333 	ccbh->status = status;
1334 	xpt_done(req->vsr_ccb);
1335 	vtscsi_enqueue_request(sc, req);
1336 }
1337 
1338 static void
1339 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1340 {
1341 
1342 	/* XXX We probably shouldn't poll forever. */
1343 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1344 	do
1345 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1346 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1347 
1348 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1349 }
1350 
1351 static int
1352 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1353     struct sglist *sg, int readable, int writable, int flag)
1354 {
1355 	struct virtqueue *vq;
1356 	int error;
1357 
1358 	vq = sc->vtscsi_control_vq;
1359 
1360 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1361 
1362 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1363 	if (error) {
1364 		/*
1365 		 * Return EAGAIN when the virtqueue does not have enough
1366 		 * descriptors available.
1367 		 */
1368 		if (error == ENOSPC || error == EMSGSIZE)
1369 			error = EAGAIN;
1370 
1371 		return (error);
1372 	}
1373 
1374 	virtqueue_notify(vq);
1375 	if (flag == VTSCSI_EXECUTE_POLL)
1376 		vtscsi_poll_ctrl_req(sc, req);
1377 
1378 	return (0);
1379 }
1380 
1381 static void
1382 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1383     struct vtscsi_request *req)
1384 {
1385 	union ccb *ccb;
1386 	struct ccb_hdr *ccbh;
1387 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1388 
1389 	ccb = req->vsr_ccb;
1390 	ccbh = &ccb->ccb_h;
1391 	tmf_resp = &req->vsr_tmf_resp;
1392 
1393 	switch (tmf_resp->response) {
1394 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1395 		ccbh->status = CAM_REQ_CMP;
1396 		break;
1397 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1398 		ccbh->status = CAM_UA_ABORT;
1399 		break;
1400 	default:
1401 		ccbh->status = CAM_REQ_CMP_ERR;
1402 		break;
1403 	}
1404 
1405 	xpt_done(ccb);
1406 	vtscsi_enqueue_request(sc, req);
1407 }
1408 
1409 static int
1410 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1411     struct vtscsi_request *req)
1412 {
1413 	struct sglist *sg;
1414 	struct ccb_abort *cab;
1415 	struct ccb_hdr *ccbh;
1416 	struct ccb_hdr *abort_ccbh;
1417 	struct vtscsi_request *abort_req;
1418 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1419 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1420 	int error;
1421 
1422 	sg = sc->vtscsi_sglist;
1423 	cab = &req->vsr_ccb->cab;
1424 	ccbh = &cab->ccb_h;
1425 	tmf_req = &req->vsr_tmf_req;
1426 	tmf_resp = &req->vsr_tmf_resp;
1427 
1428 	/* CCB header and request that's to be aborted. */
1429 	abort_ccbh = &cab->abort_ccb->ccb_h;
1430 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1431 
1432 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1433 		error = EINVAL;
1434 		goto fail;
1435 	}
1436 
1437 	/* Only attempt to abort requests that could be in-flight. */
1438 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1439 		error = EALREADY;
1440 		goto fail;
1441 	}
1442 
1443 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1444 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1445 		callout_stop(&abort_req->vsr_callout);
1446 
1447 	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1448 	    (uintptr_t) abort_ccbh, tmf_req);
1449 
1450 	sglist_reset(sg);
1451 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1452 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1453 
1454 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1455 	tmf_resp->response = -1;
1456 
1457 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1458 	    VTSCSI_EXECUTE_ASYNC);
1459 
1460 fail:
1461 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1462 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1463 
1464 	return (error);
1465 }
1466 
1467 static void
1468 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1469     struct vtscsi_request *req)
1470 {
1471 	union ccb *ccb;
1472 	struct ccb_hdr *ccbh;
1473 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1474 
1475 	ccb = req->vsr_ccb;
1476 	ccbh = &ccb->ccb_h;
1477 	tmf_resp = &req->vsr_tmf_resp;
1478 
1479 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1480 	    req, ccb, tmf_resp->response);
1481 
1482 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1483 		ccbh->status = CAM_REQ_CMP;
1484 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1485 		    ccbh->target_lun);
1486 	} else
1487 		ccbh->status = CAM_REQ_CMP_ERR;
1488 
1489 	xpt_done(ccb);
1490 	vtscsi_enqueue_request(sc, req);
1491 }
1492 
1493 static int
1494 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1495     struct vtscsi_request *req)
1496 {
1497 	struct sglist *sg;
1498 	struct ccb_resetdev *crd;
1499 	struct ccb_hdr *ccbh;
1500 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1501 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1502 	uint32_t subtype;
1503 	int error;
1504 
1505 	sg = sc->vtscsi_sglist;
1506 	crd = &req->vsr_ccb->crd;
1507 	ccbh = &crd->ccb_h;
1508 	tmf_req = &req->vsr_tmf_req;
1509 	tmf_resp = &req->vsr_tmf_resp;
1510 
1511 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1512 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1513 	else
1514 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1515 
1516 	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1517 
1518 	sglist_reset(sg);
1519 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1520 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1521 
1522 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1523 	tmf_resp->response = -1;
1524 
1525 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1526 	    VTSCSI_EXECUTE_ASYNC);
1527 
1528 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1529 	    error, req, ccbh);
1530 
1531 	return (error);
1532 }
1533 
1534 static void
1535 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1536 {
1537 
1538 	*target_id = lun[1];
1539 	*lun_id = (lun[2] << 8) | lun[3];
1540 }
1541 
1542 static void
1543 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1544 {
1545 
1546 	lun[0] = 1;
1547 	lun[1] = ccbh->target_id;
1548 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1549 	lun[3] = (ccbh->target_lun >> 8) & 0xFF;
1550 }
1551 
1552 static void
1553 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1554     struct virtio_scsi_cmd_req *cmd_req)
1555 {
1556 	uint8_t attr;
1557 
1558 	switch (csio->tag_action) {
1559 	case MSG_HEAD_OF_Q_TAG:
1560 		attr = VIRTIO_SCSI_S_HEAD;
1561 		break;
1562 	case MSG_ORDERED_Q_TAG:
1563 		attr = VIRTIO_SCSI_S_ORDERED;
1564 		break;
1565 	case MSG_ACA_TASK:
1566 		attr = VIRTIO_SCSI_S_ACA;
1567 		break;
1568 	default: /* MSG_SIMPLE_Q_TAG */
1569 		attr = VIRTIO_SCSI_S_SIMPLE;
1570 		break;
1571 	}
1572 
1573 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1574 	cmd_req->tag = (uintptr_t) csio;
1575 	cmd_req->task_attr = attr;
1576 
1577 	memcpy(cmd_req->cdb,
1578 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1579 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1580 	    csio->cdb_len);
1581 }
1582 
1583 static void
1584 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1585     uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1586 {
1587 
1588 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1589 
1590 	tmf_req->type = VIRTIO_SCSI_T_TMF;
1591 	tmf_req->subtype = subtype;
1592 	tmf_req->tag = tag;
1593 }
1594 
1595 static void
1596 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1597 {
1598 	int frozen;
1599 
1600 	frozen = sc->vtscsi_frozen;
1601 
1602 	if (reason & VTSCSI_REQUEST &&
1603 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1604 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1605 
1606 	if (reason & VTSCSI_REQUEST_VQ &&
1607 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1608 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1609 
1610 	/* Freeze the SIMQ if transitioned to frozen. */
1611 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1612 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1613 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1614 	}
1615 }
1616 
1617 static int
1618 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1619 {
1620 	int thawed;
1621 
1622 	if (sc->vtscsi_frozen == 0 || reason == 0)
1623 		return (0);
1624 
1625 	if (reason & VTSCSI_REQUEST &&
1626 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1627 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1628 
1629 	if (reason & VTSCSI_REQUEST_VQ &&
1630 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1631 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1632 
1633 	thawed = sc->vtscsi_frozen == 0;
1634 	if (thawed != 0)
1635 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1636 
1637 	return (thawed);
1638 }
1639 
1640 static void
1641 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1642     target_id_t target_id, lun_id_t lun_id)
1643 {
1644 	struct cam_path *path;
1645 
1646 	/* Use the wildcard path from our softc for bus announcements. */
1647 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1648 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1649 		return;
1650 	}
1651 
1652 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1653 	    target_id, lun_id) != CAM_REQ_CMP) {
1654 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1655 		return;
1656 	}
1657 
1658 	xpt_async(ac_code, path, NULL);
1659 	xpt_free_path(path);
1660 }
1661 
1662 static void
1663 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1664     lun_id_t lun_id)
1665 {
1666 	union ccb *ccb;
1667 	cam_status status;
1668 
1669 	ccb = xpt_alloc_ccb_nowait();
1670 	if (ccb == NULL) {
1671 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1672 		return;
1673 	}
1674 
1675 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1676 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1677 	if (status != CAM_REQ_CMP) {
1678 		xpt_free_ccb(ccb);
1679 		return;
1680 	}
1681 
1682 	xpt_rescan(ccb);
1683 }
1684 
1685 static void
1686 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1687 {
1688 
1689 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1690 }
1691 
1692 static void
1693 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1694     struct virtio_scsi_event *event)
1695 {
1696 	target_id_t target_id;
1697 	lun_id_t lun_id;
1698 
1699 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1700 
1701 	switch (event->reason) {
1702 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1703 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1704 		vtscsi_execute_rescan(sc, target_id, lun_id);
1705 		break;
1706 	default:
1707 		device_printf(sc->vtscsi_dev,
1708 		    "unhandled transport event reason: %d\n", event->reason);
1709 		break;
1710 	}
1711 }
1712 
1713 static void
1714 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1715 {
1716 	int error;
1717 
1718 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1719 		switch (event->event) {
1720 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1721 			vtscsi_transport_reset_event(sc, event);
1722 			break;
1723 		default:
1724 			device_printf(sc->vtscsi_dev,
1725 			    "unhandled event: %d\n", event->event);
1726 			break;
1727 		}
1728 	} else
1729 		vtscsi_execute_rescan_bus(sc);
1730 
1731 	/*
1732 	 * This should always be successful since the buffer
1733 	 * was just dequeued.
1734 	 */
1735 	error = vtscsi_enqueue_event_buf(sc, event);
1736 	KASSERT(error == 0,
1737 	    ("cannot requeue event buffer: %d", error));
1738 }
1739 
1740 static int
1741 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1742     struct virtio_scsi_event *event)
1743 {
1744 	struct sglist *sg;
1745 	struct virtqueue *vq;
1746 	int size, error;
1747 
1748 	sg = sc->vtscsi_sglist;
1749 	vq = sc->vtscsi_event_vq;
1750 	size = sc->vtscsi_event_buf_size;
1751 
1752 	bzero(event, size);
1753 
1754 	sglist_reset(sg);
1755 	error = sglist_append(sg, event, size);
1756 	if (error)
1757 		return (error);
1758 
1759 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1760 	if (error)
1761 		return (error);
1762 
1763 	virtqueue_notify(vq);
1764 
1765 	return (0);
1766 }
1767 
1768 static int
1769 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1770 {
1771 	struct virtio_scsi_event *event;
1772 	int i, size, error;
1773 
1774 	/*
1775 	 * The first release of QEMU with VirtIO SCSI support would crash
1776 	 * when attempting to notify the event virtqueue. This was fixed
1777 	 * when hotplug support was added.
1778 	 */
1779 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1780 		size = sc->vtscsi_event_buf_size;
1781 	else
1782 		size = 0;
1783 
1784 	if (size < sizeof(struct virtio_scsi_event))
1785 		return (0);
1786 
1787 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1788 		event = &sc->vtscsi_event_bufs[i];
1789 
1790 		error = vtscsi_enqueue_event_buf(sc, event);
1791 		if (error)
1792 			break;
1793 	}
1794 
1795 	/*
1796 	 * Even just one buffer is enough. Missed events are
1797 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1798 	 */
1799 	if (i > 0)
1800 		error = 0;
1801 
1802 	return (error);
1803 }
1804 
1805 static void
1806 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1807 {
1808 	struct virtio_scsi_event *event;
1809 	int i, error;
1810 
1811 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1812 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1813 		return;
1814 
1815 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1816 		event = &sc->vtscsi_event_bufs[i];
1817 
1818 		error = vtscsi_enqueue_event_buf(sc, event);
1819 		if (error)
1820 			break;
1821 	}
1822 
1823 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1824 }
1825 
1826 static void
1827 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1828 {
1829 	struct virtqueue *vq;
1830 	int last;
1831 
1832 	vq = sc->vtscsi_event_vq;
1833 	last = 0;
1834 
1835 	while (virtqueue_drain(vq, &last) != NULL)
1836 		;
1837 
1838 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1839 }
1840 
1841 static void
1842 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1843 {
1844 
1845 	VTSCSI_LOCK_OWNED(sc);
1846 
1847 	if (sc->vtscsi_request_vq != NULL)
1848 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1849 	if (sc->vtscsi_control_vq != NULL)
1850 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1851 }
1852 
1853 static void
1854 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1855 {
1856 
1857 	VTSCSI_LOCK(sc);
1858 	vtscsi_complete_vqs_locked(sc);
1859 	VTSCSI_UNLOCK(sc);
1860 }
1861 
1862 static void
1863 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1864 {
1865 	union ccb *ccb;
1866 	int detach;
1867 
1868 	ccb = req->vsr_ccb;
1869 
1870 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1871 
1872 	/*
1873 	 * The callout must be drained when detaching since the request is
1874 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1875 	 * case the callout is pending because there is a deadlock potential.
1876 	 * Otherwise, the virtqueue is being drained because of a bus reset
1877 	 * so we only need to attempt to stop the callouts.
1878 	 */
1879 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1880 	if (detach != 0)
1881 		VTSCSI_LOCK_NOTOWNED(sc);
1882 	else
1883 		VTSCSI_LOCK_OWNED(sc);
1884 
1885 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1886 		if (detach != 0)
1887 			callout_drain(&req->vsr_callout);
1888 		else
1889 			callout_stop(&req->vsr_callout);
1890 	}
1891 
1892 	if (ccb != NULL) {
1893 		if (detach != 0) {
1894 			VTSCSI_LOCK(sc);
1895 			ccb->ccb_h.status = CAM_NO_HBA;
1896 		} else
1897 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1898 		xpt_done(ccb);
1899 		if (detach != 0)
1900 			VTSCSI_UNLOCK(sc);
1901 	}
1902 
1903 	vtscsi_enqueue_request(sc, req);
1904 }
1905 
1906 static void
1907 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1908 {
1909 	struct vtscsi_request *req;
1910 	int last;
1911 
1912 	last = 0;
1913 
1914 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1915 
1916 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1917 		vtscsi_cancel_request(sc, req);
1918 
1919 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1920 }
1921 
1922 static void
1923 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1924 {
1925 
1926 	if (sc->vtscsi_control_vq != NULL)
1927 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1928 	if (sc->vtscsi_request_vq != NULL)
1929 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1930 	if (sc->vtscsi_event_vq != NULL)
1931 		vtscsi_drain_event_vq(sc);
1932 }
1933 
1934 static void
1935 vtscsi_stop(struct vtscsi_softc *sc)
1936 {
1937 
1938 	vtscsi_disable_vqs_intr(sc);
1939 	virtio_stop(sc->vtscsi_dev);
1940 }
1941 
1942 static int
1943 vtscsi_reset_bus(struct vtscsi_softc *sc)
1944 {
1945 	int error;
1946 
1947 	VTSCSI_LOCK_OWNED(sc);
1948 
1949 	if (vtscsi_bus_reset_disable != 0) {
1950 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
1951 		return (0);
1952 	}
1953 
1954 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
1955 
1956 	/*
1957 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
1958 	 * Those requests are then completed here so CAM will retry them
1959 	 * after the reset is complete.
1960 	 */
1961 	vtscsi_stop(sc);
1962 	vtscsi_complete_vqs_locked(sc);
1963 
1964 	/* Rid the virtqueues of any remaining requests. */
1965 	vtscsi_drain_vqs(sc);
1966 
1967 	/*
1968 	 * Any resource shortage that froze the SIMQ cannot persist across
1969 	 * a bus reset so ensure it gets thawed here.
1970 	 */
1971 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1972 		xpt_release_simq(sc->vtscsi_sim, 0);
1973 
1974 	error = vtscsi_reinit(sc);
1975 	if (error) {
1976 		device_printf(sc->vtscsi_dev,
1977 		    "reinitialization failed, stopping device...\n");
1978 		vtscsi_stop(sc);
1979 	} else
1980 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1981 		    CAM_LUN_WILDCARD);
1982 
1983 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
1984 
1985 	return (error);
1986 }
1987 
1988 static void
1989 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1990 {
1991 
1992 #ifdef INVARIANTS
1993 	int req_nsegs, resp_nsegs;
1994 
1995 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
1996 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
1997 
1998 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
1999 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2000 #endif
2001 
2002 	req->vsr_softc = sc;
2003 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2004 }
2005 
2006 static int
2007 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2008 {
2009 	struct vtscsi_request *req;
2010 	int i, nreqs;
2011 
2012 	/*
2013 	 * Commands destined for either the request or control queues come
2014 	 * from the same SIM queue. Use the size of the request virtqueue
2015 	 * as it (should) be much more frequently used. Some additional
2016 	 * requests are allocated for internal (TMF) use.
2017 	 */
2018 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2019 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2020 		nreqs /= VTSCSI_MIN_SEGMENTS;
2021 	nreqs += VTSCSI_RESERVED_REQUESTS;
2022 
2023 	for (i = 0; i < nreqs; i++) {
2024 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2025 		    M_NOWAIT);
2026 		if (req == NULL)
2027 			return (ENOMEM);
2028 
2029 		vtscsi_init_request(sc, req);
2030 
2031 		sc->vtscsi_nrequests++;
2032 		vtscsi_enqueue_request(sc, req);
2033 	}
2034 
2035 	return (0);
2036 }
2037 
2038 static void
2039 vtscsi_free_requests(struct vtscsi_softc *sc)
2040 {
2041 	struct vtscsi_request *req;
2042 
2043 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2044 		KASSERT(callout_active(&req->vsr_callout) == 0,
2045 		    ("request callout still active"));
2046 
2047 		sc->vtscsi_nrequests--;
2048 		free(req, M_DEVBUF);
2049 	}
2050 
2051 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2052 	    sc->vtscsi_nrequests));
2053 }
2054 
2055 static void
2056 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2057 {
2058 
2059 	KASSERT(req->vsr_softc == sc,
2060 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2061 
2062 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2063 
2064 	/* A request is available so the SIMQ could be released. */
2065 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2066 		xpt_release_simq(sc->vtscsi_sim, 1);
2067 
2068 	req->vsr_ccb = NULL;
2069 	req->vsr_complete = NULL;
2070 	req->vsr_ptr0 = NULL;
2071 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2072 	req->vsr_flags = 0;
2073 
2074 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2075 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2076 
2077 	/*
2078 	 * We insert at the tail of the queue in order to make it
2079 	 * very unlikely a request will be reused if we race with
2080 	 * stopping its callout handler.
2081 	 */
2082 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2083 }
2084 
2085 static struct vtscsi_request *
2086 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2087 {
2088 	struct vtscsi_request *req;
2089 
2090 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2091 	if (req != NULL) {
2092 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2093 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2094 	} else
2095 		sc->vtscsi_stats.dequeue_no_requests++;
2096 
2097 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2098 
2099 	return (req);
2100 }
2101 
2102 static void
2103 vtscsi_complete_request(struct vtscsi_request *req)
2104 {
2105 
2106 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2107 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2108 
2109 	if (req->vsr_complete != NULL)
2110 		req->vsr_complete(req->vsr_softc, req);
2111 }
2112 
2113 static void
2114 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2115 {
2116 	struct vtscsi_request *req;
2117 
2118 	VTSCSI_LOCK_OWNED(sc);
2119 
2120 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2121 		vtscsi_complete_request(req);
2122 }
2123 
2124 static void
2125 vtscsi_control_vq_intr(void *xsc)
2126 {
2127 	struct vtscsi_softc *sc;
2128 	struct virtqueue *vq;
2129 
2130 	sc = xsc;
2131 	vq = sc->vtscsi_control_vq;
2132 
2133 again:
2134 	VTSCSI_LOCK(sc);
2135 
2136 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2137 
2138 	if (virtqueue_enable_intr(vq) != 0) {
2139 		virtqueue_disable_intr(vq);
2140 		VTSCSI_UNLOCK(sc);
2141 		goto again;
2142 	}
2143 
2144 	VTSCSI_UNLOCK(sc);
2145 }
2146 
2147 static void
2148 vtscsi_event_vq_intr(void *xsc)
2149 {
2150 	struct vtscsi_softc *sc;
2151 	struct virtqueue *vq;
2152 	struct virtio_scsi_event *event;
2153 
2154 	sc = xsc;
2155 	vq = sc->vtscsi_event_vq;
2156 
2157 again:
2158 	VTSCSI_LOCK(sc);
2159 
2160 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2161 		vtscsi_handle_event(sc, event);
2162 
2163 	if (virtqueue_enable_intr(vq) != 0) {
2164 		virtqueue_disable_intr(vq);
2165 		VTSCSI_UNLOCK(sc);
2166 		goto again;
2167 	}
2168 
2169 	VTSCSI_UNLOCK(sc);
2170 }
2171 
2172 static void
2173 vtscsi_request_vq_intr(void *xsc)
2174 {
2175 	struct vtscsi_softc *sc;
2176 	struct virtqueue *vq;
2177 
2178 	sc = xsc;
2179 	vq = sc->vtscsi_request_vq;
2180 
2181 again:
2182 	VTSCSI_LOCK(sc);
2183 
2184 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2185 
2186 	if (virtqueue_enable_intr(vq) != 0) {
2187 		virtqueue_disable_intr(vq);
2188 		VTSCSI_UNLOCK(sc);
2189 		goto again;
2190 	}
2191 
2192 	VTSCSI_UNLOCK(sc);
2193 }
2194 
2195 static void
2196 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2197 {
2198 
2199 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2200 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2201 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2202 }
2203 
2204 static void
2205 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2206 {
2207 
2208 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2209 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2210 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2211 }
2212 
2213 static void
2214 vtscsi_get_tunables(struct vtscsi_softc *sc)
2215 {
2216 	char tmpstr[64];
2217 
2218 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2219 
2220 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2221 	    device_get_unit(sc->vtscsi_dev));
2222 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2223 }
2224 
2225 static void
2226 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2227 {
2228 	device_t dev;
2229 	struct vtscsi_statistics *stats;
2230         struct sysctl_ctx_list *ctx;
2231 	struct sysctl_oid *tree;
2232 	struct sysctl_oid_list *child;
2233 
2234 	dev = sc->vtscsi_dev;
2235 	stats = &sc->vtscsi_stats;
2236 	ctx = device_get_sysctl_ctx(dev);
2237 	tree = device_get_sysctl_tree(dev);
2238 	child = SYSCTL_CHILDREN(tree);
2239 
2240 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2241 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2242 	    "Debug level");
2243 
2244 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2245 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2246 	    "SCSI command timeouts");
2247 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2248 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2249 	    "No available requests to dequeue");
2250 }
2251 
2252 static void
2253 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2254     const char *fmt, ...)
2255 {
2256 	struct vtscsi_softc *sc;
2257 	union ccb *ccb;
2258 	struct sbuf sb;
2259 	va_list ap;
2260 	char str[192];
2261 	char path_str[64];
2262 
2263 	if (req == NULL)
2264 		return;
2265 
2266 	sc = req->vsr_softc;
2267 	ccb = req->vsr_ccb;
2268 
2269 	va_start(ap, fmt);
2270 	sbuf_new(&sb, str, sizeof(str), 0);
2271 
2272 	if (ccb == NULL) {
2273 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2274 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2275 		    cam_sim_bus(sc->vtscsi_sim));
2276 	} else {
2277 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2278 		sbuf_cat(&sb, path_str);
2279 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2280 			scsi_command_string(&ccb->csio, &sb);
2281 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2282 		}
2283 	}
2284 
2285 	sbuf_vprintf(&sb, fmt, ap);
2286 	va_end(ap);
2287 
2288 	sbuf_finish(&sb);
2289 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2290 	    sbuf_data(&sb));
2291 }
2292