1 /*-
2  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: head/sys/dev/virtio/scsi/virtio_scsi.c 311305 2017-01-04 20:26:42Z asomers $
27  */
28 
29 /* Driver for VirtIO SCSI devices. */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
38 #include <sys/sysctl.h>
39 #include <sys/lock.h>
40 #include <sys/callout.h>
41 #include <sys/queue.h>
42 #include <sys/sbuf.h>
43 
44 #include <machine/stdarg.h>
45 
46 #include <sys/bus.h>
47 #include <sys/rman.h>
48 
49 #include <bus/cam/cam.h>
50 #include <bus/cam/cam_ccb.h>
51 #include <bus/cam/cam_sim.h>
52 #include <bus/cam/cam_periph.h>
53 #include <bus/cam/cam_xpt_periph.h>
54 #include <bus/cam/cam_xpt_sim.h>
55 #include <bus/cam/cam_debug.h>
56 #include <bus/cam/scsi/scsi_all.h>
57 #include <bus/cam/scsi/scsi_message.h>
58 
59 #include <dev/virtual/virtio/virtio/virtio.h>
60 #include <dev/virtual/virtio/virtio/virtqueue.h>
61 #include <dev/virtual/virtio/scsi/virtio_scsi.h>
62 #include <dev/virtual/virtio/scsi/virtio_scsivar.h>
63 
64 #include "virtio_if.h"
65 
66 static int	vtscsi_modevent(module_t, int, void *);
67 
68 static int	vtscsi_probe(device_t);
69 static int	vtscsi_attach(device_t);
70 static int	vtscsi_detach(device_t);
71 static int	vtscsi_suspend(device_t);
72 static int	vtscsi_resume(device_t);
73 
74 static void	vtscsi_negotiate_features(struct vtscsi_softc *);
75 static void	vtscsi_read_config(struct vtscsi_softc *,
76 		    struct virtio_scsi_config *);
77 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
78 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
79 static void	vtscsi_write_device_config(struct vtscsi_softc *);
80 static int	vtscsi_reinit(struct vtscsi_softc *);
81 
82 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
83 static int	vtscsi_register_cam(struct vtscsi_softc *);
84 static void	vtscsi_free_cam(struct vtscsi_softc *);
85 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
86 static int	vtscsi_register_async(struct vtscsi_softc *);
87 static void	vtscsi_deregister_async(struct vtscsi_softc *);
88 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
89 static void	vtscsi_cam_poll(struct cam_sim *);
90 
91 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
92 		    union ccb *);
93 static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
94 		    union ccb *);
95 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
96 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
97 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
98 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
99 		    struct cam_sim *, union ccb *);
100 
101 static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
102 		    struct sglist *, struct ccb_scsiio *);
103 static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
104 		    struct vtscsi_request *, int *, int *);
105 static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
106 		    struct vtscsi_request *);
107 static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
108 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
109 		    struct vtscsi_request *);
110 static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
111 		    struct vtscsi_request *);
112 static void	vtscsi_timedout_scsi_cmd(void *);
113 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
114 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
115 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
116 static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
117 		    struct vtscsi_request *);
118 
119 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
120 		    struct vtscsi_request *);
121 static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
122 		    struct vtscsi_request *, struct sglist *, int, int, int);
123 static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
124 		    struct vtscsi_request *);
125 static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
126 		    struct vtscsi_request *);
127 static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
128 		    struct vtscsi_request *);
129 
130 static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
131 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
132 static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
133 		    struct virtio_scsi_cmd_req *);
134 static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
135 		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
136 
137 static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
138 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
139 
140 static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
141 		    lun_id_t);
142 static void	vtscsi_cam_rescan_callback(struct cam_periph *periph,
143 		    union ccb *ccb);
144 static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
145 		    lun_id_t);
146 static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
147 
148 static void	vtscsi_handle_event(struct vtscsi_softc *,
149 		    struct virtio_scsi_event *);
150 static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
151 		    struct virtio_scsi_event *);
152 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
153 static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
154 static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
155 
156 static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
157 static void	vtscsi_complete_vqs(struct vtscsi_softc *);
158 static void	vtscsi_drain_vqs(struct vtscsi_softc *);
159 static void	vtscsi_cancel_request(struct vtscsi_softc *,
160 		    struct vtscsi_request *);
161 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
162 static void	vtscsi_stop(struct vtscsi_softc *);
163 static int	vtscsi_reset_bus(struct vtscsi_softc *);
164 
165 static void	vtscsi_init_request(struct vtscsi_softc *,
166 		    struct vtscsi_request *);
167 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
168 static void	vtscsi_free_requests(struct vtscsi_softc *);
169 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
170 		    struct vtscsi_request *);
171 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
172 
173 static void	vtscsi_complete_request(struct vtscsi_request *);
174 static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
175 
176 static int	vtscsi_control_vq_intr(void *);
177 static int	vtscsi_event_vq_intr(void *);
178 static int	vtscsi_request_vq_intr(void *);
179 static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
180 static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
181 
182 static void	vtscsi_get_tunables(struct vtscsi_softc *);
183 static void	vtscsi_add_sysctl(struct vtscsi_softc *);
184 
185 static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
186 		    const char *, ...);
187 
188 /* Global tunables. */
189 /*
190  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
191  * IO during virtio_stop(). So in-flight requests still complete after the
192  * device reset. We would have to wait for all the in-flight IO to complete,
193  * which defeats the typical purpose of a bus reset. We could simulate the
194  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
195  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
196  * control virtqueue). But this isn't very useful if things really go off
197  * the rails, so default to disabled for now.
198  */
199 static int vtscsi_bus_reset_disable = 1;
200 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
201 
202 static struct virtio_feature_desc vtscsi_feature_desc[] = {
203 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
204 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
205 
206 	{ 0, NULL }
207 };
208 
209 static device_method_t vtscsi_methods[] = {
210 	/* Device methods. */
211 	DEVMETHOD(device_probe,		vtscsi_probe),
212 	DEVMETHOD(device_attach,	vtscsi_attach),
213 	DEVMETHOD(device_detach,	vtscsi_detach),
214 	DEVMETHOD(device_suspend,	vtscsi_suspend),
215 	DEVMETHOD(device_resume,	vtscsi_resume),
216 
217 	DEVMETHOD_END
218 };
219 
220 static driver_t vtscsi_driver = {
221 	"vtscsi",
222 	vtscsi_methods,
223 	sizeof(struct vtscsi_softc)
224 };
225 static devclass_t vtscsi_devclass;
226 
227 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
228     vtscsi_modevent, NULL);
229 MODULE_VERSION(virtio_scsi, 1);
230 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
231 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
232 
233 static int
234 vtscsi_modevent(module_t mod, int type, void *unused)
235 {
236 	int error;
237 
238 	switch (type) {
239 	case MOD_LOAD:
240 	case MOD_UNLOAD:
241 	case MOD_SHUTDOWN:
242 		error = 0;
243 		break;
244 	default:
245 		error = EOPNOTSUPP;
246 		break;
247 	}
248 
249 	return (error);
250 }
251 
252 static int
253 vtscsi_probe(device_t dev)
254 {
255 
256 	if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
257 		return (ENXIO);
258 
259 	device_set_desc(dev, "VirtIO SCSI Adapter");
260 
261 	return (BUS_PROBE_DEFAULT);
262 }
263 
264 static int
265 vtscsi_attach(device_t dev)
266 {
267 	struct vtscsi_softc *sc;
268 	struct virtio_scsi_config scsicfg;
269 	int error;
270 
271 	sc = device_get_softc(dev);
272 	sc->vtscsi_dev = dev;
273 
274 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
275 	TAILQ_INIT(&sc->vtscsi_req_free);
276 
277 	vtscsi_get_tunables(sc);
278 	vtscsi_add_sysctl(sc);
279 
280 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
281 	vtscsi_negotiate_features(sc);
282 
283 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
284 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
285 #ifndef __DragonFly__ /* XXX swildner */
286 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
287 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
288 #endif
289 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
290 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
291 
292 	vtscsi_read_config(sc, &scsicfg);
293 
294 	sc->vtscsi_max_channel = scsicfg.max_channel;
295 	sc->vtscsi_max_target = scsicfg.max_target;
296 	sc->vtscsi_max_lun = scsicfg.max_lun;
297 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
298 
299 	vtscsi_write_device_config(sc);
300 
301 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
302 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
303 	if (sc->vtscsi_sglist == NULL) {
304 		error = ENOMEM;
305 		device_printf(dev, "cannot allocate sglist\n");
306 		goto fail;
307 	}
308 
309 	error = vtscsi_alloc_virtqueues(sc);
310 	if (error) {
311 		device_printf(dev, "cannot allocate virtqueues\n");
312 		goto fail;
313 	}
314 
315 	error = vtscsi_init_event_vq(sc);
316 	if (error) {
317 		device_printf(dev, "cannot populate the eventvq\n");
318 		goto fail;
319 	}
320 
321 	error = vtscsi_alloc_requests(sc);
322 	if (error) {
323 		device_printf(dev, "cannot allocate requests\n");
324 		goto fail;
325 	}
326 
327 	error = vtscsi_alloc_cam(sc);
328 	if (error) {
329 		device_printf(dev, "cannot allocate CAM structures\n");
330 		goto fail;
331 	}
332 
333 	error = virtio_setup_intr(dev, NULL);
334 	if (error) {
335 		device_printf(dev, "cannot setup virtqueue interrupts\n");
336 		goto fail;
337 	}
338 
339 	vtscsi_enable_vqs_intr(sc);
340 
341 	/*
342 	 * Register with CAM after interrupts are enabled so we will get
343 	 * notified of the probe responses.
344 	 */
345 	error = vtscsi_register_cam(sc);
346 	if (error) {
347 		device_printf(dev, "cannot register with CAM\n");
348 		goto fail;
349 	}
350 
351 fail:
352 	if (error)
353 		vtscsi_detach(dev);
354 
355 	return (error);
356 }
357 
358 static int
359 vtscsi_detach(device_t dev)
360 {
361 	struct vtscsi_softc *sc;
362 
363 	sc = device_get_softc(dev);
364 
365 	VTSCSI_LOCK(sc);
366 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
367 	if (device_is_attached(dev))
368 		vtscsi_stop(sc);
369 	VTSCSI_UNLOCK(sc);
370 
371 	vtscsi_complete_vqs(sc);
372 	vtscsi_drain_vqs(sc);
373 
374 	vtscsi_free_cam(sc);
375 	vtscsi_free_requests(sc);
376 
377 	if (sc->vtscsi_sglist != NULL) {
378 		sglist_free(sc->vtscsi_sglist);
379 		sc->vtscsi_sglist = NULL;
380 	}
381 
382 	VTSCSI_LOCK_DESTROY(sc);
383 
384 	return (0);
385 }
386 
387 static int
388 vtscsi_suspend(device_t dev)
389 {
390 
391 	return (0);
392 }
393 
394 static int
395 vtscsi_resume(device_t dev)
396 {
397 
398 	return (0);
399 }
400 
401 static void
402 vtscsi_negotiate_features(struct vtscsi_softc *sc)
403 {
404 	device_t dev;
405 	uint64_t features;
406 
407 	dev = sc->vtscsi_dev;
408 	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
409 	sc->vtscsi_features = features;
410 }
411 
412 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
413 	virtio_read_device_config(_dev,				\
414 	    offsetof(struct virtio_scsi_config, _field),	\
415 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
416 
417 static void
418 vtscsi_read_config(struct vtscsi_softc *sc,
419     struct virtio_scsi_config *scsicfg)
420 {
421 	device_t dev;
422 
423 	dev = sc->vtscsi_dev;
424 
425 	bzero(scsicfg, sizeof(struct virtio_scsi_config));
426 
427 	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
428 	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
429 	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
430 	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
431 	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
432 	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
433 	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
434 	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
435 	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
436 	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
437 }
438 
439 #undef VTSCSI_GET_CONFIG
440 
441 static int
442 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
443 {
444 	int nsegs;
445 
446 	nsegs = VTSCSI_MIN_SEGMENTS;
447 
448 	if (seg_max > 0) {
449 		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
450 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
451 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
452 	} else
453 		nsegs += 1;
454 
455 	return (nsegs);
456 }
457 
458 static int
459 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
460 {
461 	device_t dev;
462 	struct vq_alloc_info vq_info[3];
463 	int nvqs;
464 
465 	dev = sc->vtscsi_dev;
466 	nvqs = 3;
467 
468 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
469 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
470 
471 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
472 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
473 
474 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
475 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
476 	    "%s request", device_get_nameunit(dev));
477 
478 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
479 }
480 
481 static void
482 vtscsi_write_device_config(struct vtscsi_softc *sc)
483 {
484 
485 	virtio_write_dev_config_4(sc->vtscsi_dev,
486 	    offsetof(struct virtio_scsi_config, sense_size),
487 	    VIRTIO_SCSI_SENSE_SIZE);
488 
489 	/*
490 	 * This is the size in the virtio_scsi_cmd_req structure. Note
491 	 * this value (32) is larger than the maximum CAM CDB size (16).
492 	 */
493 	virtio_write_dev_config_4(sc->vtscsi_dev,
494 	    offsetof(struct virtio_scsi_config, cdb_size),
495 	    VIRTIO_SCSI_CDB_SIZE);
496 }
497 
498 static int
499 vtscsi_reinit(struct vtscsi_softc *sc)
500 {
501 	device_t dev;
502 	int error;
503 
504 	dev = sc->vtscsi_dev;
505 
506 	error = virtio_reinit(dev, sc->vtscsi_features);
507 	if (error == 0) {
508 		vtscsi_write_device_config(sc);
509 		vtscsi_reinit_event_vq(sc);
510 		virtio_reinit_complete(dev);
511 
512 		vtscsi_enable_vqs_intr(sc);
513 	}
514 
515 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
516 
517 	return (error);
518 }
519 
520 static int
521 vtscsi_alloc_cam(struct vtscsi_softc *sc)
522 {
523 	device_t dev;
524 	struct cam_devq *devq;
525 	int openings;
526 
527 	dev = sc->vtscsi_dev;
528 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
529 
530 	devq = cam_simq_alloc(openings);
531 	if (devq == NULL) {
532 		device_printf(dev, "cannot allocate SIM queue\n");
533 		return (ENOMEM);
534 	}
535 
536 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
537 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
538 	    openings, devq);
539 	cam_simq_release(devq);
540 	if (sc->vtscsi_sim == NULL) {
541 		device_printf(dev, "cannot allocate SIM\n");
542 		return (ENOMEM);
543 	}
544 
545 	return (0);
546 }
547 
548 static int
549 vtscsi_register_cam(struct vtscsi_softc *sc)
550 {
551 	device_t dev;
552 	int registered, error;
553 
554 	dev = sc->vtscsi_dev;
555 	registered = 0;
556 
557 	VTSCSI_LOCK(sc);
558 
559 	if (xpt_bus_register(sc->vtscsi_sim, 0) != CAM_SUCCESS) {
560 		error = ENOMEM;
561 		device_printf(dev, "cannot register XPT bus\n");
562 		goto fail;
563 	}
564 
565 	registered = 1;
566 
567 	if (xpt_create_path(&sc->vtscsi_path, NULL,
568 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
569 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
570 		error = ENOMEM;
571 		device_printf(dev, "cannot create bus path\n");
572 		goto fail;
573 	}
574 
575 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
576 		error = EIO;
577 		device_printf(dev, "cannot register async callback\n");
578 		goto fail;
579 	}
580 
581 	VTSCSI_UNLOCK(sc);
582 
583 	return (0);
584 
585 fail:
586 	if (sc->vtscsi_path != NULL) {
587 		xpt_free_path(sc->vtscsi_path);
588 		sc->vtscsi_path = NULL;
589 	}
590 
591 	if (registered != 0)
592 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
593 
594 	VTSCSI_UNLOCK(sc);
595 
596 	return (error);
597 }
598 
599 static void
600 vtscsi_free_cam(struct vtscsi_softc *sc)
601 {
602 
603 	VTSCSI_LOCK(sc);
604 
605 	if (sc->vtscsi_path != NULL) {
606 		vtscsi_deregister_async(sc);
607 
608 		xpt_free_path(sc->vtscsi_path);
609 		sc->vtscsi_path = NULL;
610 
611 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
612 	}
613 
614 	if (sc->vtscsi_sim != NULL) {
615 		cam_sim_free(sc->vtscsi_sim);
616 		sc->vtscsi_sim = NULL;
617 	}
618 
619 	VTSCSI_UNLOCK(sc);
620 }
621 
622 static void
623 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
624 {
625 	struct cam_sim *sim;
626 	struct vtscsi_softc *sc;
627 
628 	sim = cb_arg;
629 	sc = cam_sim_softc(sim);
630 
631 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
632 
633 	/*
634 	 * TODO Once QEMU supports event reporting, we should
635 	 *      (un)subscribe to events here.
636 	 */
637 	switch (code) {
638 	case AC_FOUND_DEVICE:
639 		break;
640 	case AC_LOST_DEVICE:
641 		break;
642 	}
643 }
644 
645 static int
646 vtscsi_register_async(struct vtscsi_softc *sc)
647 {
648 	struct ccb_setasync csa;
649 
650 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
651 	csa.ccb_h.func_code = XPT_SASYNC_CB;
652 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
653 	csa.callback = vtscsi_cam_async;
654 	csa.callback_arg = sc->vtscsi_sim;
655 
656 	xpt_action((union ccb *) &csa);
657 
658 	return (csa.ccb_h.status);
659 }
660 
661 static void
662 vtscsi_deregister_async(struct vtscsi_softc *sc)
663 {
664 	struct ccb_setasync csa;
665 
666 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
667 	csa.ccb_h.func_code = XPT_SASYNC_CB;
668 	csa.event_enable = 0;
669 	csa.callback = vtscsi_cam_async;
670 	csa.callback_arg = sc->vtscsi_sim;
671 
672 	xpt_action((union ccb *) &csa);
673 }
674 
675 static void
676 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
677 {
678 	struct vtscsi_softc *sc;
679 	struct ccb_hdr *ccbh;
680 
681 	sc = cam_sim_softc(sim);
682 	ccbh = &ccb->ccb_h;
683 
684 	VTSCSI_LOCK_OWNED(sc);
685 
686 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
687 		/*
688 		 * The VTSCSI_MTX is briefly dropped between setting
689 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
690 		 * drop any CCBs that come in during that window.
691 		 */
692 		ccbh->status = CAM_NO_HBA;
693 		xpt_done(ccb);
694 		return;
695 	}
696 
697 	switch (ccbh->func_code) {
698 	case XPT_SCSI_IO:
699 		vtscsi_cam_scsi_io(sc, sim, ccb);
700 		break;
701 
702 	case XPT_SET_TRAN_SETTINGS:
703 		ccbh->status = CAM_FUNC_NOTAVAIL;
704 		xpt_done(ccb);
705 		break;
706 
707 	case XPT_GET_TRAN_SETTINGS:
708 		vtscsi_cam_get_tran_settings(sc, ccb);
709 		break;
710 
711 	case XPT_RESET_BUS:
712 		vtscsi_cam_reset_bus(sc, ccb);
713 		break;
714 
715 	case XPT_RESET_DEV:
716 		vtscsi_cam_reset_dev(sc, ccb);
717 		break;
718 
719 	case XPT_ABORT:
720 		vtscsi_cam_abort(sc, ccb);
721 		break;
722 
723 	case XPT_CALC_GEOMETRY:
724 		cam_calc_geometry(&ccb->ccg, 1);
725 		xpt_done(ccb);
726 		break;
727 
728 	case XPT_PATH_INQ:
729 		vtscsi_cam_path_inquiry(sc, sim, ccb);
730 		break;
731 
732 	default:
733 		vtscsi_dprintf(sc, VTSCSI_ERROR,
734 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
735 
736 		ccbh->status = CAM_REQ_INVALID;
737 		xpt_done(ccb);
738 		break;
739 	}
740 }
741 
742 static void
743 vtscsi_cam_poll(struct cam_sim *sim)
744 {
745 	struct vtscsi_softc *sc;
746 
747 	sc = cam_sim_softc(sim);
748 
749 	vtscsi_complete_vqs_locked(sc);
750 }
751 
752 static void
753 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
754     union ccb *ccb)
755 {
756 	struct ccb_hdr *ccbh;
757 	struct ccb_scsiio *csio;
758 	int error;
759 
760 	ccbh = &ccb->ccb_h;
761 	csio = &ccb->csio;
762 
763 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
764 		error = EINVAL;
765 		ccbh->status = CAM_REQ_INVALID;
766 		goto done;
767 	}
768 
769 #ifndef __DragonFly__ /* XXX swildner */
770 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
771 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
772 		error = EINVAL;
773 		ccbh->status = CAM_REQ_INVALID;
774 		goto done;
775 	}
776 #endif
777 
778 	error = vtscsi_start_scsi_cmd(sc, ccb);
779 
780 done:
781 	if (error) {
782 		vtscsi_dprintf(sc, VTSCSI_ERROR,
783 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
784 		xpt_done(ccb);
785 	}
786 }
787 
788 static void
789 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
790 {
791 	struct ccb_trans_settings *cts;
792 	struct ccb_trans_settings_scsi *scsi;
793 
794 	cts = &ccb->cts;
795 	scsi = &cts->proto_specific.scsi;
796 
797 	cts->protocol = PROTO_SCSI;
798 	cts->protocol_version = SCSI_REV_SPC3;
799 	cts->transport = XPORT_SAS;
800 	cts->transport_version = 0;
801 
802 	scsi->valid = CTS_SCSI_VALID_TQ;
803 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
804 
805 	ccb->ccb_h.status = CAM_REQ_CMP;
806 	xpt_done(ccb);
807 }
808 
809 static void
810 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
811 {
812 	int error;
813 
814 	error = vtscsi_reset_bus(sc);
815 	if (error == 0)
816 		ccb->ccb_h.status = CAM_REQ_CMP;
817 	else
818 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
819 
820 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
821 	    error, ccb, ccb->ccb_h.status);
822 
823 	xpt_done(ccb);
824 }
825 
826 static void
827 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
828 {
829 	struct ccb_hdr *ccbh;
830 	struct vtscsi_request *req;
831 	int error;
832 
833 	ccbh = &ccb->ccb_h;
834 
835 	req = vtscsi_dequeue_request(sc);
836 	if (req == NULL) {
837 		error = EAGAIN;
838 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
839 		goto fail;
840 	}
841 
842 	req->vsr_ccb = ccb;
843 
844 	error = vtscsi_execute_reset_dev_cmd(sc, req);
845 	if (error == 0)
846 		return;
847 
848 	vtscsi_enqueue_request(sc, req);
849 
850 fail:
851 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
852 	    error, req, ccb);
853 
854 	if (error == EAGAIN)
855 		ccbh->status = CAM_RESRC_UNAVAIL;
856 	else
857 		ccbh->status = CAM_REQ_CMP_ERR;
858 
859 	xpt_done(ccb);
860 }
861 
862 static void
863 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
864 {
865 	struct vtscsi_request *req;
866 	struct ccb_hdr *ccbh;
867 	int error;
868 
869 	ccbh = &ccb->ccb_h;
870 
871 	req = vtscsi_dequeue_request(sc);
872 	if (req == NULL) {
873 		error = EAGAIN;
874 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
875 		goto fail;
876 	}
877 
878 	req->vsr_ccb = ccb;
879 
880 	error = vtscsi_execute_abort_task_cmd(sc, req);
881 	if (error == 0)
882 		return;
883 
884 	vtscsi_enqueue_request(sc, req);
885 
886 fail:
887 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
888 	    error, req, ccb);
889 
890 	if (error == EAGAIN)
891 		ccbh->status = CAM_RESRC_UNAVAIL;
892 	else
893 		ccbh->status = CAM_REQ_CMP_ERR;
894 
895 	xpt_done(ccb);
896 }
897 
898 static void
899 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
900     union ccb *ccb)
901 {
902 	device_t dev;
903 	struct ccb_pathinq *cpi;
904 
905 	dev = sc->vtscsi_dev;
906 	cpi = &ccb->cpi;
907 
908 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
909 
910 	cpi->version_num = 1;
911 	cpi->hba_inquiry = PI_TAG_ABLE;
912 	cpi->target_sprt = 0;
913 	cpi->hba_misc = PIM_SEQSCAN;
914 	if (vtscsi_bus_reset_disable != 0)
915 		cpi->hba_misc |= PIM_NOBUSRESET;
916 	cpi->hba_eng_cnt = 0;
917 
918 	cpi->max_target = sc->vtscsi_max_target;
919 	cpi->max_lun = sc->vtscsi_max_lun;
920 	cpi->initiator_id = VTSCSI_INITIATOR_ID;
921 
922 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
923 	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
924 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
925 
926 	cpi->unit_number = cam_sim_unit(sim);
927 	cpi->bus_id = cam_sim_bus(sim);
928 
929 	cpi->base_transfer_speed = 300000;
930 
931 	cpi->protocol = PROTO_SCSI;
932 	cpi->protocol_version = SCSI_REV_SPC3;
933 	cpi->transport = XPORT_SAS;
934 	cpi->transport_version = 0;
935 
936 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
937 	    PAGE_SIZE;
938 
939 #if 0
940 	cpi->hba_vendor = virtio_get_vendor(dev);
941 	cpi->hba_device = virtio_get_device(dev);
942 	cpi->hba_subvendor = virtio_get_subvendor(dev);
943 	cpi->hba_subdevice = virtio_get_subdevice(dev);
944 #endif
945 
946 	ccb->ccb_h.status = CAM_REQ_CMP;
947 	xpt_done(ccb);
948 }
949 
950 static int
951 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
952     struct ccb_scsiio *csio)
953 {
954 	struct ccb_hdr *ccbh;
955 	struct bus_dma_segment *dseg;
956 	int i, error;
957 
958 	ccbh = &csio->ccb_h;
959 	error = 0;
960 
961 	if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
962 
963 		if ((ccbh->flags & CAM_DATA_PHYS) == 0)
964 			error = sglist_append(sg,
965 			    csio->data_ptr, csio->dxfer_len);
966 		else
967 			error = sglist_append_phys(sg,
968 			    (vm_paddr_t)(vm_offset_t) csio->data_ptr,
969 			    csio->dxfer_len);
970 	} else {
971 
972 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
973 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
974 
975 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0)
976 				error = sglist_append(sg,
977 				    (void *)(vm_offset_t) dseg->ds_addr,
978 				    dseg->ds_len);
979 			else
980 				error = sglist_append_phys(sg,
981 				    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
982 		}
983 	}
984 
985 	return (error);
986 }
987 
988 static int
989 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
990     int *readable, int *writable)
991 {
992 	struct sglist *sg;
993 	struct ccb_hdr *ccbh;
994 	struct ccb_scsiio *csio;
995 	struct virtio_scsi_cmd_req *cmd_req;
996 	struct virtio_scsi_cmd_resp *cmd_resp;
997 	int error;
998 
999 	sg = sc->vtscsi_sglist;
1000 	csio = &req->vsr_ccb->csio;
1001 	ccbh = &csio->ccb_h;
1002 	cmd_req = &req->vsr_cmd_req;
1003 	cmd_resp = &req->vsr_cmd_resp;
1004 
1005 	sglist_reset(sg);
1006 
1007 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1008 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1009 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1010 		/* At least one segment must be left for the response. */
1011 		if (error || sg->sg_nseg == sg->sg_maxseg)
1012 			goto fail;
1013 	}
1014 
1015 	*readable = sg->sg_nseg;
1016 
1017 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1018 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1019 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1020 		if (error)
1021 			goto fail;
1022 	}
1023 
1024 	*writable = sg->sg_nseg - *readable;
1025 
1026 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1027 	    "writable=%d\n", req, ccbh, *readable, *writable);
1028 
1029 	return (0);
1030 
1031 fail:
1032 	/*
1033 	 * This should never happen unless maxio was incorrectly set.
1034 	 */
1035 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1036 
1037 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1038 	    "nseg=%d maxseg=%d\n",
1039 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1040 
1041 	return (EFBIG);
1042 }
1043 
1044 static int
1045 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1046 {
1047 	struct sglist *sg;
1048 	struct virtqueue *vq;
1049 	struct ccb_scsiio *csio;
1050 	struct ccb_hdr *ccbh;
1051 	struct virtio_scsi_cmd_req *cmd_req;
1052 	struct virtio_scsi_cmd_resp *cmd_resp;
1053 	int readable, writable, error;
1054 
1055 	sg = sc->vtscsi_sglist;
1056 	vq = sc->vtscsi_request_vq;
1057 	csio = &req->vsr_ccb->csio;
1058 	ccbh = &csio->ccb_h;
1059 	cmd_req = &req->vsr_cmd_req;
1060 	cmd_resp = &req->vsr_cmd_resp;
1061 
1062 	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1063 
1064 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1065 	if (error)
1066 		return (error);
1067 
1068 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1069 	cmd_resp->response = -1;
1070 
1071 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1072 	if (error) {
1073 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1074 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1075 
1076 		ccbh->status = CAM_REQUEUE_REQ;
1077 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1078 		return (error);
1079 	}
1080 
1081 	ccbh->status |= CAM_SIM_QUEUED;
1082 	ccbh->ccbh_vtscsi_req = req;
1083 
1084 	virtqueue_notify(vq, NULL);
1085 
1086 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1087 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1088 		callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1089 		    vtscsi_timedout_scsi_cmd, req);
1090 	}
1091 
1092 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1093 	    req, ccbh);
1094 
1095 	return (0);
1096 }
1097 
1098 static int
1099 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1100 {
1101 	struct vtscsi_request *req;
1102 	int error;
1103 
1104 	req = vtscsi_dequeue_request(sc);
1105 	if (req == NULL) {
1106 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1107 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1108 		return (ENOBUFS);
1109 	}
1110 
1111 	req->vsr_ccb = ccb;
1112 
1113 	error = vtscsi_execute_scsi_cmd(sc, req);
1114 	if (error)
1115 		vtscsi_enqueue_request(sc, req);
1116 
1117 	return (error);
1118 }
1119 
1120 static void
1121 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1122     struct vtscsi_request *req)
1123 {
1124 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1125 	struct vtscsi_request *to_req;
1126 	uint8_t response;
1127 
1128 	tmf_resp = &req->vsr_tmf_resp;
1129 	response = tmf_resp->response;
1130 	to_req = req->vsr_timedout_req;
1131 
1132 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1133 	    req, to_req, response);
1134 
1135 	vtscsi_enqueue_request(sc, req);
1136 
1137 	/*
1138 	 * The timedout request could have completed between when the
1139 	 * abort task was sent and when the host processed it.
1140 	 */
1141 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1142 		return;
1143 
1144 	/* The timedout request was successfully aborted. */
1145 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1146 		return;
1147 
1148 	/* Don't bother if the device is going away. */
1149 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1150 		return;
1151 
1152 	/* The timedout request will be aborted by the reset. */
1153 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1154 		return;
1155 
1156 	vtscsi_reset_bus(sc);
1157 }
1158 
1159 static int
1160 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1161     struct vtscsi_request *to_req)
1162 {
1163 	struct sglist *sg;
1164 	struct ccb_hdr *to_ccbh;
1165 	struct vtscsi_request *req;
1166 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1167 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1168 	int error;
1169 
1170 	sg = sc->vtscsi_sglist;
1171 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1172 
1173 	req = vtscsi_dequeue_request(sc);
1174 	if (req == NULL) {
1175 		error = ENOBUFS;
1176 		goto fail;
1177 	}
1178 
1179 	tmf_req = &req->vsr_tmf_req;
1180 	tmf_resp = &req->vsr_tmf_resp;
1181 
1182 	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1183 	    (uintptr_t) to_ccbh, tmf_req);
1184 
1185 	sglist_reset(sg);
1186 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1187 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1188 
1189 	req->vsr_timedout_req = to_req;
1190 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1191 	tmf_resp->response = -1;
1192 
1193 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1194 	    VTSCSI_EXECUTE_ASYNC);
1195 	if (error == 0)
1196 		return (0);
1197 
1198 	vtscsi_enqueue_request(sc, req);
1199 
1200 fail:
1201 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1202 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1203 
1204 	return (error);
1205 }
1206 
1207 static void
1208 vtscsi_timedout_scsi_cmd(void *xreq)
1209 {
1210 	struct vtscsi_softc *sc;
1211 	struct vtscsi_request *to_req;
1212 
1213 	to_req = xreq;
1214 	sc = to_req->vsr_softc;
1215 
1216 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1217 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1218 
1219 	/* Don't bother if the device is going away. */
1220 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1221 		return;
1222 
1223 	/*
1224 	 * Bail if the request is not in use. We likely raced when
1225 	 * stopping the callout handler or it has already been aborted.
1226 	 */
1227 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1228 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1229 		return;
1230 
1231 	/*
1232 	 * Complete the request queue in case the timedout request is
1233 	 * actually just pending.
1234 	 */
1235 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1236 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1237 		return;
1238 
1239 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1240 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1241 
1242 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1243 		return;
1244 
1245 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1246 	vtscsi_reset_bus(sc);
1247 }
1248 
1249 static cam_status
1250 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1251 {
1252 	cam_status status;
1253 
1254 	switch (cmd_resp->response) {
1255 	case VIRTIO_SCSI_S_OK:
1256 		status = CAM_REQ_CMP;
1257 		break;
1258 	case VIRTIO_SCSI_S_OVERRUN:
1259 		status = CAM_DATA_RUN_ERR;
1260 		break;
1261 	case VIRTIO_SCSI_S_ABORTED:
1262 		status = CAM_REQ_ABORTED;
1263 		break;
1264 	case VIRTIO_SCSI_S_BAD_TARGET:
1265 		status = CAM_SEL_TIMEOUT;
1266 		break;
1267 	case VIRTIO_SCSI_S_RESET:
1268 		status = CAM_SCSI_BUS_RESET;
1269 		break;
1270 	case VIRTIO_SCSI_S_BUSY:
1271 		status = CAM_SCSI_BUSY;
1272 		break;
1273 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1274 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1275 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1276 		status = CAM_SCSI_IT_NEXUS_LOST;
1277 		break;
1278 	default: /* VIRTIO_SCSI_S_FAILURE */
1279 		status = CAM_REQ_CMP_ERR;
1280 		break;
1281 	}
1282 
1283 	return (status);
1284 }
1285 
1286 static cam_status
1287 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1288     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1289 {
1290 	cam_status status;
1291 
1292 	csio->scsi_status = cmd_resp->status;
1293 	csio->resid = cmd_resp->resid;
1294 
1295 	if (csio->scsi_status == SCSI_STATUS_OK)
1296 		status = CAM_REQ_CMP;
1297 	else
1298 		status = CAM_SCSI_STATUS_ERROR;
1299 
1300 	if (cmd_resp->sense_len > 0) {
1301 		status |= CAM_AUTOSNS_VALID;
1302 
1303 		if (cmd_resp->sense_len < csio->sense_len)
1304 			csio->sense_resid = csio->sense_len -
1305 			    cmd_resp->sense_len;
1306 		else
1307 			csio->sense_resid = 0;
1308 
1309 		bzero(&csio->sense_data, sizeof(csio->sense_data));
1310 		memcpy(cmd_resp->sense, &csio->sense_data,
1311 		    csio->sense_len - csio->sense_resid);
1312 	}
1313 
1314 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1315 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1316 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1317 
1318 	return (status);
1319 }
1320 
1321 static void
1322 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1323 {
1324 	struct ccb_hdr *ccbh;
1325 	struct ccb_scsiio *csio;
1326 	struct virtio_scsi_cmd_resp *cmd_resp;
1327 	cam_status status;
1328 
1329 	csio = &req->vsr_ccb->csio;
1330 	ccbh = &csio->ccb_h;
1331 	cmd_resp = &req->vsr_cmd_resp;
1332 
1333 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1334 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1335 
1336 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1337 		callout_stop(&req->vsr_callout);
1338 
1339 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1340 	if (status == CAM_REQ_ABORTED) {
1341 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1342 			status = CAM_CMD_TIMEOUT;
1343 	} else if (status == CAM_REQ_CMP)
1344 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1345 
1346 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1347 		status |= CAM_DEV_QFRZN;
1348 		xpt_freeze_devq(ccbh->path, 1);
1349 	}
1350 
1351 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1352 		status |= CAM_RELEASE_SIMQ;
1353 
1354 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1355 	    req, ccbh, status);
1356 
1357 	ccbh->status = status;
1358 	xpt_done(req->vsr_ccb);
1359 	vtscsi_enqueue_request(sc, req);
1360 }
1361 
1362 static void
1363 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1364 {
1365 
1366 	/* XXX We probably shouldn't poll forever. */
1367 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1368 	do
1369 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1370 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1371 
1372 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1373 }
1374 
1375 static int
1376 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1377     struct sglist *sg, int readable, int writable, int flag)
1378 {
1379 	struct virtqueue *vq;
1380 	int error;
1381 
1382 	vq = sc->vtscsi_control_vq;
1383 
1384 	KKASSERT(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1385 
1386 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1387 	if (error) {
1388 		/*
1389 		 * Return EAGAIN when the virtqueue does not have enough
1390 		 * descriptors available.
1391 		 */
1392 		if (error == ENOSPC || error == EMSGSIZE)
1393 			error = EAGAIN;
1394 
1395 		return (error);
1396 	}
1397 
1398 	virtqueue_notify(vq, NULL);
1399 	if (flag == VTSCSI_EXECUTE_POLL)
1400 		vtscsi_poll_ctrl_req(sc, req);
1401 
1402 	return (0);
1403 }
1404 
1405 static void
1406 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1407     struct vtscsi_request *req)
1408 {
1409 	union ccb *ccb;
1410 	struct ccb_hdr *ccbh;
1411 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1412 
1413 	ccb = req->vsr_ccb;
1414 	ccbh = &ccb->ccb_h;
1415 	tmf_resp = &req->vsr_tmf_resp;
1416 
1417 	switch (tmf_resp->response) {
1418 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1419 		ccbh->status = CAM_REQ_CMP;
1420 		break;
1421 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1422 		ccbh->status = CAM_UA_ABORT;
1423 		break;
1424 	default:
1425 		ccbh->status = CAM_REQ_CMP_ERR;
1426 		break;
1427 	}
1428 
1429 	xpt_done(ccb);
1430 	vtscsi_enqueue_request(sc, req);
1431 }
1432 
1433 static int
1434 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1435     struct vtscsi_request *req)
1436 {
1437 	struct sglist *sg;
1438 	struct ccb_abort *cab;
1439 	struct ccb_hdr *ccbh;
1440 	struct ccb_hdr *abort_ccbh;
1441 	struct vtscsi_request *abort_req;
1442 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1443 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1444 	int error;
1445 
1446 	sg = sc->vtscsi_sglist;
1447 	cab = &req->vsr_ccb->cab;
1448 	ccbh = &cab->ccb_h;
1449 	tmf_req = &req->vsr_tmf_req;
1450 	tmf_resp = &req->vsr_tmf_resp;
1451 
1452 	/* CCB header and request that's to be aborted. */
1453 	abort_ccbh = &cab->abort_ccb->ccb_h;
1454 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1455 
1456 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1457 		error = EINVAL;
1458 		goto fail;
1459 	}
1460 
1461 	/* Only attempt to abort requests that could be in-flight. */
1462 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1463 		error = EALREADY;
1464 		goto fail;
1465 	}
1466 
1467 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1468 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1469 		callout_stop(&abort_req->vsr_callout);
1470 
1471 	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1472 	    (uintptr_t) abort_ccbh, tmf_req);
1473 
1474 	sglist_reset(sg);
1475 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1476 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1477 
1478 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1479 	tmf_resp->response = -1;
1480 
1481 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1482 	    VTSCSI_EXECUTE_ASYNC);
1483 
1484 fail:
1485 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1486 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1487 
1488 	return (error);
1489 }
1490 
1491 static void
1492 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1493     struct vtscsi_request *req)
1494 {
1495 	union ccb *ccb;
1496 	struct ccb_hdr *ccbh;
1497 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1498 
1499 	ccb = req->vsr_ccb;
1500 	ccbh = &ccb->ccb_h;
1501 	tmf_resp = &req->vsr_tmf_resp;
1502 
1503 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1504 	    req, ccb, tmf_resp->response);
1505 
1506 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1507 		ccbh->status = CAM_REQ_CMP;
1508 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1509 		    ccbh->target_lun);
1510 	} else
1511 		ccbh->status = CAM_REQ_CMP_ERR;
1512 
1513 	xpt_done(ccb);
1514 	vtscsi_enqueue_request(sc, req);
1515 }
1516 
1517 static int
1518 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1519     struct vtscsi_request *req)
1520 {
1521 	struct sglist *sg;
1522 	struct ccb_resetdev *crd;
1523 	struct ccb_hdr *ccbh;
1524 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1525 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1526 	uint32_t subtype;
1527 	int error;
1528 
1529 	sg = sc->vtscsi_sglist;
1530 	crd = &req->vsr_ccb->crd;
1531 	ccbh = &crd->ccb_h;
1532 	tmf_req = &req->vsr_tmf_req;
1533 	tmf_resp = &req->vsr_tmf_resp;
1534 
1535 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1536 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1537 	else
1538 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1539 
1540 	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1541 
1542 	sglist_reset(sg);
1543 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1544 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1545 
1546 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1547 	tmf_resp->response = -1;
1548 
1549 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1550 	    VTSCSI_EXECUTE_ASYNC);
1551 
1552 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1553 	    error, req, ccbh);
1554 
1555 	return (error);
1556 }
1557 
1558 static void
1559 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1560 {
1561 
1562 	*target_id = lun[1];
1563 	*lun_id = (lun[2] << 8) | lun[3];
1564 }
1565 
1566 static void
1567 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1568 {
1569 
1570 	lun[0] = 1;
1571 	lun[1] = ccbh->target_id;
1572 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1573 	lun[3] = ccbh->target_lun & 0xFF;
1574 }
1575 
1576 static void
1577 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1578     struct virtio_scsi_cmd_req *cmd_req)
1579 {
1580 	uint8_t attr;
1581 
1582 	switch (csio->tag_action) {
1583 	case MSG_HEAD_OF_Q_TAG:
1584 		attr = VIRTIO_SCSI_S_HEAD;
1585 		break;
1586 	case MSG_ORDERED_Q_TAG:
1587 		attr = VIRTIO_SCSI_S_ORDERED;
1588 		break;
1589 	case MSG_ACA_TASK:
1590 		attr = VIRTIO_SCSI_S_ACA;
1591 		break;
1592 	default: /* MSG_SIMPLE_Q_TAG */
1593 		attr = VIRTIO_SCSI_S_SIMPLE;
1594 		break;
1595 	}
1596 
1597 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1598 	cmd_req->tag = (uintptr_t) csio;
1599 	cmd_req->task_attr = attr;
1600 
1601 	memcpy(cmd_req->cdb,
1602 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1603 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1604 	    csio->cdb_len);
1605 }
1606 
1607 static void
1608 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1609     uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1610 {
1611 
1612 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1613 
1614 	tmf_req->type = VIRTIO_SCSI_T_TMF;
1615 	tmf_req->subtype = subtype;
1616 	tmf_req->tag = tag;
1617 }
1618 
1619 static void
1620 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1621 {
1622 	int frozen;
1623 
1624 	frozen = sc->vtscsi_frozen;
1625 
1626 	if (reason & VTSCSI_REQUEST &&
1627 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1628 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1629 
1630 	if (reason & VTSCSI_REQUEST_VQ &&
1631 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1632 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1633 
1634 	/* Freeze the SIMQ if transitioned to frozen. */
1635 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1636 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1637 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1638 	}
1639 }
1640 
1641 static int
1642 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1643 {
1644 	int thawed;
1645 
1646 	if (sc->vtscsi_frozen == 0 || reason == 0)
1647 		return (0);
1648 
1649 	if (reason & VTSCSI_REQUEST &&
1650 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1651 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1652 
1653 	if (reason & VTSCSI_REQUEST_VQ &&
1654 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1655 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1656 
1657 	thawed = sc->vtscsi_frozen == 0;
1658 	if (thawed != 0)
1659 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1660 
1661 	return (thawed);
1662 }
1663 
1664 static void
1665 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1666     target_id_t target_id, lun_id_t lun_id)
1667 {
1668 	struct cam_path *path;
1669 
1670 	/* Use the wildcard path from our softc for bus announcements. */
1671 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1672 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1673 		return;
1674 	}
1675 
1676 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1677 	    target_id, lun_id) != CAM_REQ_CMP) {
1678 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1679 		return;
1680 	}
1681 
1682 	xpt_async(ac_code, path, NULL);
1683 	xpt_free_path(path);
1684 }
1685 
1686 static void
1687 vtscsi_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
1688 {
1689 	xpt_free_path(ccb->ccb_h.path);
1690 	xpt_free_ccb(ccb);
1691 }
1692 
1693 static void
1694 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1695     lun_id_t lun_id)
1696 {
1697 	union ccb *ccb;
1698 	cam_status status;
1699 
1700 	ccb = xpt_alloc_ccb();
1701 	if (ccb == NULL) {
1702 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1703 		return;
1704 	}
1705 
1706 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1707 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1708 	if (status != CAM_REQ_CMP) {
1709 		xpt_free_ccb(ccb);
1710 		return;
1711 	}
1712 
1713 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1714 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
1715 	ccb->ccb_h.cbfcnp = vtscsi_cam_rescan_callback;
1716 	ccb->crcn.flags = CAM_FLAG_NONE;
1717 	xpt_action(ccb);
1718 }
1719 
1720 static void
1721 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1722 {
1723 	union ccb *ccb;
1724 	cam_status status;
1725 
1726 	ccb = xpt_alloc_ccb();
1727 	if (ccb == NULL) {
1728 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1729 		return;
1730 	}
1731 
1732 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1733 	    cam_sim_path(sc->vtscsi_sim),
1734 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1735 	if (status != CAM_REQ_CMP) {
1736 		xpt_free_ccb(ccb);
1737 		return;
1738 	}
1739 
1740 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1741 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
1742 	ccb->ccb_h.cbfcnp = vtscsi_cam_rescan_callback;
1743 	ccb->crcn.flags = CAM_FLAG_NONE;
1744 	xpt_action(ccb);
1745 }
1746 
1747 static void
1748 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1749     struct virtio_scsi_event *event)
1750 {
1751 	target_id_t target_id;
1752 	lun_id_t lun_id;
1753 
1754 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1755 
1756 	switch (event->reason) {
1757 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1758 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1759 		vtscsi_execute_rescan(sc, target_id, lun_id);
1760 		break;
1761 	default:
1762 		device_printf(sc->vtscsi_dev,
1763 		    "unhandled transport event reason: %d\n", event->reason);
1764 		break;
1765 	}
1766 }
1767 
1768 static void
1769 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1770 {
1771 	int error;
1772 
1773 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1774 		switch (event->event) {
1775 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1776 			vtscsi_transport_reset_event(sc, event);
1777 			break;
1778 		default:
1779 			device_printf(sc->vtscsi_dev,
1780 			    "unhandled event: %d\n", event->event);
1781 			break;
1782 		}
1783 	} else
1784 		vtscsi_execute_rescan_bus(sc);
1785 
1786 	/*
1787 	 * This should always be successful since the buffer
1788 	 * was just dequeued.
1789 	 */
1790 	error = vtscsi_enqueue_event_buf(sc, event);
1791 	KASSERT(error == 0,
1792 	    ("cannot requeue event buffer: %d", error));
1793 }
1794 
1795 static int
1796 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1797     struct virtio_scsi_event *event)
1798 {
1799 	struct sglist *sg;
1800 	struct virtqueue *vq;
1801 	int size, error;
1802 
1803 	sg = sc->vtscsi_sglist;
1804 	vq = sc->vtscsi_event_vq;
1805 	size = sc->vtscsi_event_buf_size;
1806 
1807 	bzero(event, size);
1808 
1809 	sglist_reset(sg);
1810 	error = sglist_append(sg, event, size);
1811 	if (error)
1812 		return (error);
1813 
1814 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1815 	if (error)
1816 		return (error);
1817 
1818 	virtqueue_notify(vq, NULL);
1819 
1820 	return (0);
1821 }
1822 
1823 static int
1824 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1825 {
1826 	struct virtio_scsi_event *event;
1827 	int i, size, error;
1828 
1829 	/*
1830 	 * The first release of QEMU with VirtIO SCSI support would crash
1831 	 * when attempting to notify the event virtqueue. This was fixed
1832 	 * when hotplug support was added.
1833 	 */
1834 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1835 		size = sc->vtscsi_event_buf_size;
1836 	else
1837 		size = 0;
1838 
1839 	if (size < sizeof(struct virtio_scsi_event))
1840 		return (0);
1841 
1842 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1843 		event = &sc->vtscsi_event_bufs[i];
1844 
1845 		error = vtscsi_enqueue_event_buf(sc, event);
1846 		if (error)
1847 			break;
1848 	}
1849 
1850 	/*
1851 	 * Even just one buffer is enough. Missed events are
1852 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1853 	 */
1854 	if (i > 0)
1855 		error = 0;
1856 
1857 	return (error);
1858 }
1859 
1860 static void
1861 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1862 {
1863 	struct virtio_scsi_event *event;
1864 	int i, error;
1865 
1866 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1867 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1868 		return;
1869 
1870 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1871 		event = &sc->vtscsi_event_bufs[i];
1872 
1873 		error = vtscsi_enqueue_event_buf(sc, event);
1874 		if (error)
1875 			break;
1876 	}
1877 
1878 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1879 }
1880 
1881 static void
1882 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1883 {
1884 	struct virtqueue *vq;
1885 	int last;
1886 
1887 	vq = sc->vtscsi_event_vq;
1888 	last = 0;
1889 
1890 	while (virtqueue_drain(vq, &last) != NULL)
1891 		;
1892 
1893 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1894 }
1895 
1896 static void
1897 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1898 {
1899 
1900 	VTSCSI_LOCK_OWNED(sc);
1901 
1902 	if (sc->vtscsi_request_vq != NULL)
1903 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1904 	if (sc->vtscsi_control_vq != NULL)
1905 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1906 }
1907 
1908 static void
1909 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1910 {
1911 
1912 	VTSCSI_LOCK(sc);
1913 	vtscsi_complete_vqs_locked(sc);
1914 	VTSCSI_UNLOCK(sc);
1915 }
1916 
1917 static void
1918 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1919 {
1920 	union ccb *ccb;
1921 	int detach;
1922 
1923 	ccb = req->vsr_ccb;
1924 
1925 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1926 
1927 	/*
1928 	 * The callout must be drained when detaching since the request is
1929 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1930 	 * case the callout is pending because there is a deadlock potential.
1931 	 * Otherwise, the virtqueue is being drained because of a bus reset
1932 	 * so we only need to attempt to stop the callouts.
1933 	 */
1934 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1935 	if (detach != 0)
1936 		VTSCSI_LOCK_NOTOWNED(sc);
1937 	else
1938 		VTSCSI_LOCK_OWNED(sc);
1939 
1940 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1941 		if (detach != 0)
1942 			callout_drain(&req->vsr_callout);
1943 		else
1944 			callout_stop(&req->vsr_callout);
1945 	}
1946 
1947 	if (ccb != NULL) {
1948 		if (detach != 0) {
1949 			VTSCSI_LOCK(sc);
1950 			ccb->ccb_h.status = CAM_NO_HBA;
1951 		} else
1952 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1953 		xpt_done(ccb);
1954 		if (detach != 0)
1955 			VTSCSI_UNLOCK(sc);
1956 	}
1957 
1958 	vtscsi_enqueue_request(sc, req);
1959 }
1960 
1961 static void
1962 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1963 {
1964 	struct vtscsi_request *req;
1965 	int last;
1966 
1967 	last = 0;
1968 
1969 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1970 
1971 	while ((req = virtqueue_drain(vq, &last)) != NULL)
1972 		vtscsi_cancel_request(sc, req);
1973 
1974 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1975 }
1976 
1977 static void
1978 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1979 {
1980 
1981 	if (sc->vtscsi_control_vq != NULL)
1982 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1983 	if (sc->vtscsi_request_vq != NULL)
1984 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1985 	if (sc->vtscsi_event_vq != NULL)
1986 		vtscsi_drain_event_vq(sc);
1987 }
1988 
1989 static void
1990 vtscsi_stop(struct vtscsi_softc *sc)
1991 {
1992 
1993 	vtscsi_disable_vqs_intr(sc);
1994 	virtio_stop(sc->vtscsi_dev);
1995 }
1996 
1997 static int
1998 vtscsi_reset_bus(struct vtscsi_softc *sc)
1999 {
2000 	int error;
2001 
2002 	VTSCSI_LOCK_OWNED(sc);
2003 
2004 	if (vtscsi_bus_reset_disable != 0) {
2005 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2006 		return (0);
2007 	}
2008 
2009 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2010 
2011 	/*
2012 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2013 	 * Those requests are then completed here so CAM will retry them
2014 	 * after the reset is complete.
2015 	 */
2016 	vtscsi_stop(sc);
2017 	vtscsi_complete_vqs_locked(sc);
2018 
2019 	/* Rid the virtqueues of any remaining requests. */
2020 	vtscsi_drain_vqs(sc);
2021 
2022 	/*
2023 	 * Any resource shortage that froze the SIMQ cannot persist across
2024 	 * a bus reset so ensure it gets thawed here.
2025 	 */
2026 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2027 		xpt_release_simq(sc->vtscsi_sim, 0);
2028 
2029 	error = vtscsi_reinit(sc);
2030 	if (error) {
2031 		device_printf(sc->vtscsi_dev,
2032 		    "reinitialization failed, stopping device...\n");
2033 		vtscsi_stop(sc);
2034 	} else
2035 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2036 		    CAM_LUN_WILDCARD);
2037 
2038 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2039 
2040 	return (error);
2041 }
2042 
2043 static void
2044 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2045 {
2046 
2047 #ifdef INVARIANTS
2048 	int req_nsegs, resp_nsegs;
2049 
2050 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2051 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2052 
2053 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2054 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2055 #endif
2056 
2057 	req->vsr_softc = sc;
2058 	callout_init_lk(&req->vsr_callout, VTSCSI_MTX(sc));
2059 }
2060 
2061 static int
2062 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2063 {
2064 	struct vtscsi_request *req;
2065 	int i, nreqs;
2066 
2067 	/*
2068 	 * Commands destined for either the request or control queues come
2069 	 * from the same SIM queue. Use the size of the request virtqueue
2070 	 * as it (should) be much more frequently used. Some additional
2071 	 * requests are allocated for internal (TMF) use.
2072 	 */
2073 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2074 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2075 		nreqs /= VTSCSI_MIN_SEGMENTS;
2076 	nreqs += VTSCSI_RESERVED_REQUESTS;
2077 
2078 	for (i = 0; i < nreqs; i++) {
2079 		req = contigmalloc(sizeof(struct vtscsi_request), M_DEVBUF,
2080 		    M_WAITOK, 0, BUS_SPACE_MAXADDR, 16, 0);
2081 		if (req == NULL)
2082 			return (ENOMEM);
2083 
2084 		vtscsi_init_request(sc, req);
2085 
2086 		sc->vtscsi_nrequests++;
2087 		vtscsi_enqueue_request(sc, req);
2088 	}
2089 
2090 	return (0);
2091 }
2092 
2093 static void
2094 vtscsi_free_requests(struct vtscsi_softc *sc)
2095 {
2096 	struct vtscsi_request *req;
2097 
2098 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2099 		KASSERT(callout_active(&req->vsr_callout) == 0,
2100 		    ("request callout still active"));
2101 
2102 		sc->vtscsi_nrequests--;
2103 		contigfree(req, sizeof(struct vtscsi_request), M_DEVBUF);
2104 	}
2105 
2106 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2107 	    sc->vtscsi_nrequests));
2108 }
2109 
2110 static void
2111 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2112 {
2113 
2114 	KASSERT(req->vsr_softc == sc,
2115 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2116 
2117 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2118 
2119 	/* A request is available so the SIMQ could be released. */
2120 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2121 		xpt_release_simq(sc->vtscsi_sim, 1);
2122 
2123 	req->vsr_ccb = NULL;
2124 	req->vsr_complete = NULL;
2125 	req->vsr_ptr0 = NULL;
2126 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2127 	req->vsr_flags = 0;
2128 
2129 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2130 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2131 
2132 	/*
2133 	 * We insert at the tail of the queue in order to make it
2134 	 * very unlikely a request will be reused if we race with
2135 	 * stopping its callout handler.
2136 	 */
2137 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2138 }
2139 
2140 static struct vtscsi_request *
2141 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2142 {
2143 	struct vtscsi_request *req;
2144 
2145 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2146 	if (req != NULL) {
2147 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2148 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2149 	} else
2150 		sc->vtscsi_stats.dequeue_no_requests++;
2151 
2152 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2153 
2154 	return (req);
2155 }
2156 
2157 static void
2158 vtscsi_complete_request(struct vtscsi_request *req)
2159 {
2160 
2161 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2162 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2163 
2164 	if (req->vsr_complete != NULL)
2165 		req->vsr_complete(req->vsr_softc, req);
2166 }
2167 
2168 static void
2169 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2170 {
2171 	struct vtscsi_request *req;
2172 
2173 	VTSCSI_LOCK_OWNED(sc);
2174 
2175 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2176 		vtscsi_complete_request(req);
2177 }
2178 
2179 static int
2180 vtscsi_control_vq_intr(void *xsc)
2181 {
2182 	struct vtscsi_softc *sc;
2183 	struct virtqueue *vq;
2184 
2185 	sc = xsc;
2186 	vq = sc->vtscsi_control_vq;
2187 
2188 again:
2189 	VTSCSI_LOCK(sc);
2190 
2191 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2192 
2193 	if (virtqueue_enable_intr(vq) != 0) {
2194 		virtqueue_disable_intr(vq);
2195 		VTSCSI_UNLOCK(sc);
2196 		goto again;
2197 	}
2198 
2199 	VTSCSI_UNLOCK(sc);
2200 
2201 	return (1);
2202 }
2203 
2204 static int
2205 vtscsi_event_vq_intr(void *xsc)
2206 {
2207 	struct vtscsi_softc *sc;
2208 	struct virtqueue *vq;
2209 	struct virtio_scsi_event *event;
2210 
2211 	sc = xsc;
2212 	vq = sc->vtscsi_event_vq;
2213 
2214 again:
2215 	VTSCSI_LOCK(sc);
2216 
2217 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2218 		vtscsi_handle_event(sc, event);
2219 
2220 	if (virtqueue_enable_intr(vq) != 0) {
2221 		virtqueue_disable_intr(vq);
2222 		VTSCSI_UNLOCK(sc);
2223 		goto again;
2224 	}
2225 
2226 	VTSCSI_UNLOCK(sc);
2227 
2228 	return (1);
2229 }
2230 
2231 static int
2232 vtscsi_request_vq_intr(void *xsc)
2233 {
2234 	struct vtscsi_softc *sc;
2235 	struct virtqueue *vq;
2236 
2237 	sc = xsc;
2238 	vq = sc->vtscsi_request_vq;
2239 
2240 again:
2241 	VTSCSI_LOCK(sc);
2242 
2243 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2244 
2245 	if (virtqueue_enable_intr(vq) != 0) {
2246 		virtqueue_disable_intr(vq);
2247 		VTSCSI_UNLOCK(sc);
2248 		goto again;
2249 	}
2250 
2251 	VTSCSI_UNLOCK(sc);
2252 
2253 	return (1);
2254 }
2255 
2256 static void
2257 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2258 {
2259 
2260 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2261 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2262 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2263 }
2264 
2265 static void
2266 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2267 {
2268 
2269 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2270 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2271 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2272 }
2273 
2274 static void
2275 vtscsi_get_tunables(struct vtscsi_softc *sc)
2276 {
2277 	char tmpstr[64];
2278 
2279 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2280 
2281 	ksnprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2282 	    device_get_unit(sc->vtscsi_dev));
2283 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2284 }
2285 
2286 static void
2287 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2288 {
2289 	device_t dev;
2290 	struct vtscsi_statistics *stats;
2291         struct sysctl_ctx_list *ctx;
2292 	struct sysctl_oid *tree;
2293 	struct sysctl_oid_list *child;
2294 
2295 	dev = sc->vtscsi_dev;
2296 	stats = &sc->vtscsi_stats;
2297 	ctx = device_get_sysctl_ctx(dev);
2298 	tree = device_get_sysctl_tree(dev);
2299 	child = SYSCTL_CHILDREN(tree);
2300 
2301 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2302 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2303 	    "Debug level");
2304 
2305 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2306 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2307 	    "SCSI command timeouts");
2308 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2309 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2310 	    "No available requests to dequeue");
2311 }
2312 
2313 static void
2314 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2315     const char *fmt, ...)
2316 {
2317 	struct vtscsi_softc *sc;
2318 	union ccb *ccb;
2319 	struct sbuf sb;
2320 	__va_list ap;
2321 	char str[192];
2322 	char path_str[64];
2323 
2324 	if (req == NULL)
2325 		return;
2326 
2327 	sc = req->vsr_softc;
2328 	ccb = req->vsr_ccb;
2329 
2330 	__va_start(ap, fmt);
2331 	sbuf_new(&sb, str, sizeof(str), 0);
2332 
2333 	if (ccb == NULL) {
2334 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2335 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2336 		    cam_sim_bus(sc->vtscsi_sim));
2337 	} else {
2338 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2339 		sbuf_cat(&sb, path_str);
2340 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2341 			scsi_command_string(&ccb->csio, &sb);
2342 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2343 		}
2344 	}
2345 
2346 	sbuf_vprintf(&sb, fmt, ap);
2347 	__va_end(ap);
2348 
2349 	sbuf_finish(&sb);
2350 	kprintf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2351 	    sbuf_data(&sb));
2352 }
2353