xref: /freebsd/sys/dev/virtio/scsi/virtio_scsi.c (revision e6cc42f1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO SCSI devices. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/callout.h>
45 #include <sys/queue.h>
46 #include <sys/sbuf.h>
47 
48 #include <machine/stdarg.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
54 
55 #include <cam/cam.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_debug.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 
64 #include <dev/virtio/virtio.h>
65 #include <dev/virtio/virtqueue.h>
66 #include <dev/virtio/scsi/virtio_scsi.h>
67 #include <dev/virtio/scsi/virtio_scsivar.h>
68 
69 #include "virtio_if.h"
70 
71 static int	vtscsi_modevent(module_t, int, void *);
72 
73 static int	vtscsi_probe(device_t);
74 static int	vtscsi_attach(device_t);
75 static int	vtscsi_detach(device_t);
76 static int	vtscsi_suspend(device_t);
77 static int	vtscsi_resume(device_t);
78 
79 static int	vtscsi_negotiate_features(struct vtscsi_softc *);
80 static int	vtscsi_setup_features(struct vtscsi_softc *);
81 static void	vtscsi_read_config(struct vtscsi_softc *,
82 		    struct virtio_scsi_config *);
83 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
84 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
85 static void	vtscsi_check_sizes(struct vtscsi_softc *);
86 static void	vtscsi_write_device_config(struct vtscsi_softc *);
87 static int	vtscsi_reinit(struct vtscsi_softc *);
88 
89 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
90 static int	vtscsi_register_cam(struct vtscsi_softc *);
91 static void	vtscsi_free_cam(struct vtscsi_softc *);
92 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
93 static int	vtscsi_register_async(struct vtscsi_softc *);
94 static void	vtscsi_deregister_async(struct vtscsi_softc *);
95 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
96 static void	vtscsi_cam_poll(struct cam_sim *);
97 
98 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
99 		    union ccb *);
100 static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
101 		    union ccb *);
102 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
103 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
104 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
105 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
106 		    struct cam_sim *, union ccb *);
107 
108 static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
109 		    struct sglist *, struct ccb_scsiio *);
110 static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
111 		    struct vtscsi_request *, int *, int *);
112 static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
113 		    struct vtscsi_request *);
114 static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
115 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
116 		    struct vtscsi_request *);
117 static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
118 		    struct vtscsi_request *);
119 static void	vtscsi_timedout_scsi_cmd(void *);
120 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
121 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
122 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
123 static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
124 		    struct vtscsi_request *);
125 
126 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
127 		    struct vtscsi_request *);
128 static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
129 		    struct vtscsi_request *, struct sglist *, int, int, int);
130 static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
131 		    struct vtscsi_request *);
132 static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
133 		    struct vtscsi_request *);
134 static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
135 		    struct vtscsi_request *);
136 
137 static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
138 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
139 static void	vtscsi_init_scsi_cmd_req(struct vtscsi_softc *,
140 		    struct ccb_scsiio *, struct virtio_scsi_cmd_req *);
141 static void	vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *, struct ccb_hdr *,
142 		    uint32_t, uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
143 
144 static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
145 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
146 
147 static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
148 		    lun_id_t);
149 static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
150 		    lun_id_t);
151 static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
152 
153 static void	vtscsi_handle_event(struct vtscsi_softc *,
154 		    struct virtio_scsi_event *);
155 static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
156 		    struct virtio_scsi_event *);
157 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
158 static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
159 static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
160 
161 static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
162 static void	vtscsi_complete_vqs(struct vtscsi_softc *);
163 static void	vtscsi_drain_vqs(struct vtscsi_softc *);
164 static void	vtscsi_cancel_request(struct vtscsi_softc *,
165 		    struct vtscsi_request *);
166 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
167 static void	vtscsi_stop(struct vtscsi_softc *);
168 static int	vtscsi_reset_bus(struct vtscsi_softc *);
169 
170 static void	vtscsi_init_request(struct vtscsi_softc *,
171 		    struct vtscsi_request *);
172 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
173 static void	vtscsi_free_requests(struct vtscsi_softc *);
174 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
175 		    struct vtscsi_request *);
176 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
177 
178 static void	vtscsi_complete_request(struct vtscsi_request *);
179 static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
180 
181 static void	vtscsi_control_vq_intr(void *);
182 static void	vtscsi_event_vq_intr(void *);
183 static void	vtscsi_request_vq_intr(void *);
184 static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
185 static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
186 
187 static void	vtscsi_get_tunables(struct vtscsi_softc *);
188 static void	vtscsi_setup_sysctl(struct vtscsi_softc *);
189 
190 static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
191 		    const char *, ...);
192 
193 #define vtscsi_modern(_sc) (((_sc)->vtscsi_features & VIRTIO_F_VERSION_1) != 0)
194 #define vtscsi_htog16(_sc, _val)	virtio_htog16(vtscsi_modern(_sc), _val)
195 #define vtscsi_htog32(_sc, _val)	virtio_htog32(vtscsi_modern(_sc), _val)
196 #define vtscsi_htog64(_sc, _val)	virtio_htog64(vtscsi_modern(_sc), _val)
197 #define vtscsi_gtoh16(_sc, _val)	virtio_gtoh16(vtscsi_modern(_sc), _val)
198 #define vtscsi_gtoh32(_sc, _val)	virtio_gtoh32(vtscsi_modern(_sc), _val)
199 #define vtscsi_gtoh64(_sc, _val)	virtio_gtoh64(vtscsi_modern(_sc), _val)
200 
201 /* Global tunables. */
202 /*
203  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
204  * IO during virtio_stop(). So in-flight requests still complete after the
205  * device reset. We would have to wait for all the in-flight IO to complete,
206  * which defeats the typical purpose of a bus reset. We could simulate the
207  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
208  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
209  * control virtqueue). But this isn't very useful if things really go off
210  * the rails, so default to disabled for now.
211  */
212 static int vtscsi_bus_reset_disable = 1;
213 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
214 
215 static struct virtio_feature_desc vtscsi_feature_desc[] = {
216 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
217 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
218 	{ VIRTIO_SCSI_F_CHANGE,		"ChangeEvent"	},
219 	{ VIRTIO_SCSI_F_T10_PI, 	"T10PI"		},
220 
221 	{ 0, NULL }
222 };
223 
224 static device_method_t vtscsi_methods[] = {
225 	/* Device methods. */
226 	DEVMETHOD(device_probe,		vtscsi_probe),
227 	DEVMETHOD(device_attach,	vtscsi_attach),
228 	DEVMETHOD(device_detach,	vtscsi_detach),
229 	DEVMETHOD(device_suspend,	vtscsi_suspend),
230 	DEVMETHOD(device_resume,	vtscsi_resume),
231 
232 	DEVMETHOD_END
233 };
234 
235 static driver_t vtscsi_driver = {
236 	"vtscsi",
237 	vtscsi_methods,
238 	sizeof(struct vtscsi_softc)
239 };
240 static devclass_t vtscsi_devclass;
241 
242 DRIVER_MODULE(virtio_scsi, virtio_mmio, vtscsi_driver, vtscsi_devclass,
243     vtscsi_modevent, 0);
244 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
245     vtscsi_modevent, 0);
246 MODULE_VERSION(virtio_scsi, 1);
247 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
248 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
249 
250 VIRTIO_SIMPLE_PNPTABLE(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
251 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_scsi);
252 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_scsi);
253 
254 static int
255 vtscsi_modevent(module_t mod, int type, void *unused)
256 {
257 	int error;
258 
259 	switch (type) {
260 	case MOD_LOAD:
261 	case MOD_QUIESCE:
262 	case MOD_UNLOAD:
263 	case MOD_SHUTDOWN:
264 		error = 0;
265 		break;
266 	default:
267 		error = EOPNOTSUPP;
268 		break;
269 	}
270 
271 	return (error);
272 }
273 
274 static int
275 vtscsi_probe(device_t dev)
276 {
277 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
278 }
279 
280 static int
281 vtscsi_attach(device_t dev)
282 {
283 	struct vtscsi_softc *sc;
284 	struct virtio_scsi_config scsicfg;
285 	int error;
286 
287 	sc = device_get_softc(dev);
288 	sc->vtscsi_dev = dev;
289 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
290 
291 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
292 	TAILQ_INIT(&sc->vtscsi_req_free);
293 
294 	vtscsi_get_tunables(sc);
295 	vtscsi_setup_sysctl(sc);
296 
297 	error = vtscsi_setup_features(sc);
298 	if (error) {
299 		device_printf(dev, "cannot setup features\n");
300 		goto fail;
301 	}
302 
303 	vtscsi_read_config(sc, &scsicfg);
304 
305 	sc->vtscsi_max_channel = scsicfg.max_channel;
306 	sc->vtscsi_max_target = scsicfg.max_target;
307 	sc->vtscsi_max_lun = scsicfg.max_lun;
308 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
309 
310 	vtscsi_write_device_config(sc);
311 
312 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
313 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
314 	if (sc->vtscsi_sglist == NULL) {
315 		error = ENOMEM;
316 		device_printf(dev, "cannot allocate sglist\n");
317 		goto fail;
318 	}
319 
320 	error = vtscsi_alloc_virtqueues(sc);
321 	if (error) {
322 		device_printf(dev, "cannot allocate virtqueues\n");
323 		goto fail;
324 	}
325 
326 	vtscsi_check_sizes(sc);
327 
328 	error = vtscsi_init_event_vq(sc);
329 	if (error) {
330 		device_printf(dev, "cannot populate the eventvq\n");
331 		goto fail;
332 	}
333 
334 	error = vtscsi_alloc_requests(sc);
335 	if (error) {
336 		device_printf(dev, "cannot allocate requests\n");
337 		goto fail;
338 	}
339 
340 	error = vtscsi_alloc_cam(sc);
341 	if (error) {
342 		device_printf(dev, "cannot allocate CAM structures\n");
343 		goto fail;
344 	}
345 
346 	error = virtio_setup_intr(dev, INTR_TYPE_CAM);
347 	if (error) {
348 		device_printf(dev, "cannot setup virtqueue interrupts\n");
349 		goto fail;
350 	}
351 
352 	vtscsi_enable_vqs_intr(sc);
353 
354 	/*
355 	 * Register with CAM after interrupts are enabled so we will get
356 	 * notified of the probe responses.
357 	 */
358 	error = vtscsi_register_cam(sc);
359 	if (error) {
360 		device_printf(dev, "cannot register with CAM\n");
361 		goto fail;
362 	}
363 
364 fail:
365 	if (error)
366 		vtscsi_detach(dev);
367 
368 	return (error);
369 }
370 
371 static int
372 vtscsi_detach(device_t dev)
373 {
374 	struct vtscsi_softc *sc;
375 
376 	sc = device_get_softc(dev);
377 
378 	VTSCSI_LOCK(sc);
379 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
380 	if (device_is_attached(dev))
381 		vtscsi_stop(sc);
382 	VTSCSI_UNLOCK(sc);
383 
384 	vtscsi_complete_vqs(sc);
385 	vtscsi_drain_vqs(sc);
386 
387 	vtscsi_free_cam(sc);
388 	vtscsi_free_requests(sc);
389 
390 	if (sc->vtscsi_sglist != NULL) {
391 		sglist_free(sc->vtscsi_sglist);
392 		sc->vtscsi_sglist = NULL;
393 	}
394 
395 	VTSCSI_LOCK_DESTROY(sc);
396 
397 	return (0);
398 }
399 
400 static int
401 vtscsi_suspend(device_t dev)
402 {
403 
404 	return (0);
405 }
406 
407 static int
408 vtscsi_resume(device_t dev)
409 {
410 
411 	return (0);
412 }
413 
414 static int
415 vtscsi_negotiate_features(struct vtscsi_softc *sc)
416 {
417 	device_t dev;
418 	uint64_t features;
419 
420 	dev = sc->vtscsi_dev;
421 	features = VTSCSI_FEATURES;
422 
423 	sc->vtscsi_features = virtio_negotiate_features(dev, features);
424 	return (virtio_finalize_features(dev));
425 }
426 
427 static int
428 vtscsi_setup_features(struct vtscsi_softc *sc)
429 {
430 	device_t dev;
431 	int error;
432 
433 	dev = sc->vtscsi_dev;
434 
435 	error = vtscsi_negotiate_features(sc);
436 	if (error)
437 		return (error);
438 
439 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
440 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
441 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
442 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
443 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
444 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
445 
446 	return (0);
447 }
448 
449 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
450 	virtio_read_device_config(_dev,				\
451 	    offsetof(struct virtio_scsi_config, _field),	\
452 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
453 
454 static void
455 vtscsi_read_config(struct vtscsi_softc *sc,
456     struct virtio_scsi_config *scsicfg)
457 {
458 	device_t dev;
459 
460 	dev = sc->vtscsi_dev;
461 
462 	bzero(scsicfg, sizeof(struct virtio_scsi_config));
463 
464 	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
465 	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
466 	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
467 	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
468 	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
469 	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
470 	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
471 	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
472 	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
473 	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
474 }
475 
476 #undef VTSCSI_GET_CONFIG
477 
478 static int
479 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
480 {
481 	int nsegs;
482 
483 	nsegs = VTSCSI_MIN_SEGMENTS;
484 
485 	if (seg_max > 0) {
486 		nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
487 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
488 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
489 	} else
490 		nsegs += 1;
491 
492 	return (nsegs);
493 }
494 
495 static int
496 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
497 {
498 	device_t dev;
499 	struct vq_alloc_info vq_info[3];
500 	int nvqs;
501 
502 	dev = sc->vtscsi_dev;
503 	nvqs = 3;
504 
505 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
506 	    &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
507 
508 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
509 	    &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
510 
511 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
512 	    vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
513 	    "%s request", device_get_nameunit(dev));
514 
515 	return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
516 }
517 
518 static void
519 vtscsi_check_sizes(struct vtscsi_softc *sc)
520 {
521 	int rqsize;
522 
523 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
524 		/*
525 		 * Ensure the assertions in virtqueue_enqueue(),
526 		 * even if the hypervisor reports a bad seg_max.
527 		 */
528 		rqsize = virtqueue_size(sc->vtscsi_request_vq);
529 		if (sc->vtscsi_max_nsegs > rqsize) {
530 			device_printf(sc->vtscsi_dev,
531 			    "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
532 			    rqsize);
533 			sc->vtscsi_max_nsegs = rqsize;
534 		}
535 	}
536 }
537 
538 static void
539 vtscsi_write_device_config(struct vtscsi_softc *sc)
540 {
541 
542 	virtio_write_dev_config_4(sc->vtscsi_dev,
543 	    offsetof(struct virtio_scsi_config, sense_size),
544 	    VIRTIO_SCSI_SENSE_SIZE);
545 
546 	/*
547 	 * This is the size in the virtio_scsi_cmd_req structure. Note
548 	 * this value (32) is larger than the maximum CAM CDB size (16).
549 	 */
550 	virtio_write_dev_config_4(sc->vtscsi_dev,
551 	    offsetof(struct virtio_scsi_config, cdb_size),
552 	    VIRTIO_SCSI_CDB_SIZE);
553 }
554 
555 static int
556 vtscsi_reinit(struct vtscsi_softc *sc)
557 {
558 	device_t dev;
559 	int error;
560 
561 	dev = sc->vtscsi_dev;
562 
563 	error = virtio_reinit(dev, sc->vtscsi_features);
564 	if (error == 0) {
565 		vtscsi_write_device_config(sc);
566 		virtio_reinit_complete(dev);
567 		vtscsi_reinit_event_vq(sc);
568 
569 		vtscsi_enable_vqs_intr(sc);
570 	}
571 
572 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
573 
574 	return (error);
575 }
576 
577 static int
578 vtscsi_alloc_cam(struct vtscsi_softc *sc)
579 {
580 	device_t dev;
581 	struct cam_devq *devq;
582 	int openings;
583 
584 	dev = sc->vtscsi_dev;
585 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
586 
587 	devq = cam_simq_alloc(openings);
588 	if (devq == NULL) {
589 		device_printf(dev, "cannot allocate SIM queue\n");
590 		return (ENOMEM);
591 	}
592 
593 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
594 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
595 	    openings, devq);
596 	if (sc->vtscsi_sim == NULL) {
597 		cam_simq_free(devq);
598 		device_printf(dev, "cannot allocate SIM\n");
599 		return (ENOMEM);
600 	}
601 
602 	return (0);
603 }
604 
605 static int
606 vtscsi_register_cam(struct vtscsi_softc *sc)
607 {
608 	device_t dev;
609 	int registered, error;
610 
611 	dev = sc->vtscsi_dev;
612 	registered = 0;
613 
614 	VTSCSI_LOCK(sc);
615 
616 	if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
617 		error = ENOMEM;
618 		device_printf(dev, "cannot register XPT bus\n");
619 		goto fail;
620 	}
621 
622 	registered = 1;
623 
624 	if (xpt_create_path(&sc->vtscsi_path, NULL,
625 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
626 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
627 		error = ENOMEM;
628 		device_printf(dev, "cannot create bus path\n");
629 		goto fail;
630 	}
631 
632 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
633 		error = EIO;
634 		device_printf(dev, "cannot register async callback\n");
635 		goto fail;
636 	}
637 
638 	VTSCSI_UNLOCK(sc);
639 
640 	return (0);
641 
642 fail:
643 	if (sc->vtscsi_path != NULL) {
644 		xpt_free_path(sc->vtscsi_path);
645 		sc->vtscsi_path = NULL;
646 	}
647 
648 	if (registered != 0)
649 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
650 
651 	VTSCSI_UNLOCK(sc);
652 
653 	return (error);
654 }
655 
656 static void
657 vtscsi_free_cam(struct vtscsi_softc *sc)
658 {
659 
660 	VTSCSI_LOCK(sc);
661 
662 	if (sc->vtscsi_path != NULL) {
663 		vtscsi_deregister_async(sc);
664 
665 		xpt_free_path(sc->vtscsi_path);
666 		sc->vtscsi_path = NULL;
667 
668 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
669 	}
670 
671 	if (sc->vtscsi_sim != NULL) {
672 		cam_sim_free(sc->vtscsi_sim, 1);
673 		sc->vtscsi_sim = NULL;
674 	}
675 
676 	VTSCSI_UNLOCK(sc);
677 }
678 
679 static void
680 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
681 {
682 	struct cam_sim *sim;
683 	struct vtscsi_softc *sc;
684 
685 	sim = cb_arg;
686 	sc = cam_sim_softc(sim);
687 
688 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
689 
690 	/*
691 	 * TODO Once QEMU supports event reporting, we should
692 	 *      (un)subscribe to events here.
693 	 */
694 	switch (code) {
695 	case AC_FOUND_DEVICE:
696 		break;
697 	case AC_LOST_DEVICE:
698 		break;
699 	}
700 }
701 
702 static int
703 vtscsi_register_async(struct vtscsi_softc *sc)
704 {
705 	struct ccb_setasync csa;
706 
707 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
708 	csa.ccb_h.func_code = XPT_SASYNC_CB;
709 	csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
710 	csa.callback = vtscsi_cam_async;
711 	csa.callback_arg = sc->vtscsi_sim;
712 
713 	xpt_action((union ccb *) &csa);
714 
715 	return (csa.ccb_h.status);
716 }
717 
718 static void
719 vtscsi_deregister_async(struct vtscsi_softc *sc)
720 {
721 	struct ccb_setasync csa;
722 
723 	xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
724 	csa.ccb_h.func_code = XPT_SASYNC_CB;
725 	csa.event_enable = 0;
726 	csa.callback = vtscsi_cam_async;
727 	csa.callback_arg = sc->vtscsi_sim;
728 
729 	xpt_action((union ccb *) &csa);
730 }
731 
732 static void
733 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
734 {
735 	struct vtscsi_softc *sc;
736 	struct ccb_hdr *ccbh;
737 
738 	sc = cam_sim_softc(sim);
739 	ccbh = &ccb->ccb_h;
740 
741 	VTSCSI_LOCK_OWNED(sc);
742 
743 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
744 		/*
745 		 * The VTSCSI_MTX is briefly dropped between setting
746 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
747 		 * drop any CCBs that come in during that window.
748 		 */
749 		ccbh->status = CAM_NO_HBA;
750 		xpt_done(ccb);
751 		return;
752 	}
753 
754 	switch (ccbh->func_code) {
755 	case XPT_SCSI_IO:
756 		vtscsi_cam_scsi_io(sc, sim, ccb);
757 		break;
758 
759 	case XPT_SET_TRAN_SETTINGS:
760 		ccbh->status = CAM_FUNC_NOTAVAIL;
761 		xpt_done(ccb);
762 		break;
763 
764 	case XPT_GET_TRAN_SETTINGS:
765 		vtscsi_cam_get_tran_settings(sc, ccb);
766 		break;
767 
768 	case XPT_RESET_BUS:
769 		vtscsi_cam_reset_bus(sc, ccb);
770 		break;
771 
772 	case XPT_RESET_DEV:
773 		vtscsi_cam_reset_dev(sc, ccb);
774 		break;
775 
776 	case XPT_ABORT:
777 		vtscsi_cam_abort(sc, ccb);
778 		break;
779 
780 	case XPT_CALC_GEOMETRY:
781 		cam_calc_geometry(&ccb->ccg, 1);
782 		xpt_done(ccb);
783 		break;
784 
785 	case XPT_PATH_INQ:
786 		vtscsi_cam_path_inquiry(sc, sim, ccb);
787 		break;
788 
789 	default:
790 		vtscsi_dprintf(sc, VTSCSI_ERROR,
791 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
792 
793 		ccbh->status = CAM_REQ_INVALID;
794 		xpt_done(ccb);
795 		break;
796 	}
797 }
798 
799 static void
800 vtscsi_cam_poll(struct cam_sim *sim)
801 {
802 	struct vtscsi_softc *sc;
803 
804 	sc = cam_sim_softc(sim);
805 
806 	vtscsi_complete_vqs_locked(sc);
807 }
808 
809 static void
810 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
811     union ccb *ccb)
812 {
813 	struct ccb_hdr *ccbh;
814 	struct ccb_scsiio *csio;
815 	int error;
816 
817 	ccbh = &ccb->ccb_h;
818 	csio = &ccb->csio;
819 
820 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
821 		error = EINVAL;
822 		ccbh->status = CAM_REQ_INVALID;
823 		goto done;
824 	}
825 
826 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
827 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
828 		error = EINVAL;
829 		ccbh->status = CAM_REQ_INVALID;
830 		goto done;
831 	}
832 
833 	error = vtscsi_start_scsi_cmd(sc, ccb);
834 
835 done:
836 	if (error) {
837 		vtscsi_dprintf(sc, VTSCSI_ERROR,
838 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
839 		xpt_done(ccb);
840 	}
841 }
842 
843 static void
844 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
845 {
846 	struct ccb_trans_settings *cts;
847 	struct ccb_trans_settings_scsi *scsi;
848 
849 	cts = &ccb->cts;
850 	scsi = &cts->proto_specific.scsi;
851 
852 	cts->protocol = PROTO_SCSI;
853 	cts->protocol_version = SCSI_REV_SPC3;
854 	cts->transport = XPORT_SAS;
855 	cts->transport_version = 0;
856 
857 	scsi->valid = CTS_SCSI_VALID_TQ;
858 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
859 
860 	ccb->ccb_h.status = CAM_REQ_CMP;
861 	xpt_done(ccb);
862 }
863 
864 static void
865 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
866 {
867 	int error;
868 
869 	error = vtscsi_reset_bus(sc);
870 	if (error == 0)
871 		ccb->ccb_h.status = CAM_REQ_CMP;
872 	else
873 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
874 
875 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
876 	    error, ccb, ccb->ccb_h.status);
877 
878 	xpt_done(ccb);
879 }
880 
881 static void
882 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
883 {
884 	struct ccb_hdr *ccbh;
885 	struct vtscsi_request *req;
886 	int error;
887 
888 	ccbh = &ccb->ccb_h;
889 
890 	req = vtscsi_dequeue_request(sc);
891 	if (req == NULL) {
892 		error = EAGAIN;
893 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
894 		goto fail;
895 	}
896 
897 	req->vsr_ccb = ccb;
898 
899 	error = vtscsi_execute_reset_dev_cmd(sc, req);
900 	if (error == 0)
901 		return;
902 
903 	vtscsi_enqueue_request(sc, req);
904 
905 fail:
906 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
907 	    error, req, ccb);
908 
909 	if (error == EAGAIN)
910 		ccbh->status = CAM_RESRC_UNAVAIL;
911 	else
912 		ccbh->status = CAM_REQ_CMP_ERR;
913 
914 	xpt_done(ccb);
915 }
916 
917 static void
918 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
919 {
920 	struct vtscsi_request *req;
921 	struct ccb_hdr *ccbh;
922 	int error;
923 
924 	ccbh = &ccb->ccb_h;
925 
926 	req = vtscsi_dequeue_request(sc);
927 	if (req == NULL) {
928 		error = EAGAIN;
929 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
930 		goto fail;
931 	}
932 
933 	req->vsr_ccb = ccb;
934 
935 	error = vtscsi_execute_abort_task_cmd(sc, req);
936 	if (error == 0)
937 		return;
938 
939 	vtscsi_enqueue_request(sc, req);
940 
941 fail:
942 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
943 	    error, req, ccb);
944 
945 	if (error == EAGAIN)
946 		ccbh->status = CAM_RESRC_UNAVAIL;
947 	else
948 		ccbh->status = CAM_REQ_CMP_ERR;
949 
950 	xpt_done(ccb);
951 }
952 
953 static void
954 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
955     union ccb *ccb)
956 {
957 	device_t dev;
958 	struct ccb_pathinq *cpi;
959 
960 	dev = sc->vtscsi_dev;
961 	cpi = &ccb->cpi;
962 
963 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
964 
965 	cpi->version_num = 1;
966 	cpi->hba_inquiry = PI_TAG_ABLE;
967 	cpi->target_sprt = 0;
968 	cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
969 	if (vtscsi_bus_reset_disable != 0)
970 		cpi->hba_misc |= PIM_NOBUSRESET;
971 	cpi->hba_eng_cnt = 0;
972 
973 	cpi->max_target = sc->vtscsi_max_target;
974 	cpi->max_lun = sc->vtscsi_max_lun;
975 	cpi->initiator_id = cpi->max_target + 1;
976 
977 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
978 	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
979 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
980 
981 	cpi->unit_number = cam_sim_unit(sim);
982 	cpi->bus_id = cam_sim_bus(sim);
983 
984 	cpi->base_transfer_speed = 300000;
985 
986 	cpi->protocol = PROTO_SCSI;
987 	cpi->protocol_version = SCSI_REV_SPC3;
988 	cpi->transport = XPORT_SAS;
989 	cpi->transport_version = 0;
990 
991 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
992 	    PAGE_SIZE;
993 
994 	cpi->hba_vendor = virtio_get_vendor(dev);
995 	cpi->hba_device = virtio_get_device(dev);
996 	cpi->hba_subvendor = virtio_get_subvendor(dev);
997 	cpi->hba_subdevice = virtio_get_subdevice(dev);
998 
999 	ccb->ccb_h.status = CAM_REQ_CMP;
1000 	xpt_done(ccb);
1001 }
1002 
1003 static int
1004 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
1005     struct ccb_scsiio *csio)
1006 {
1007 	struct ccb_hdr *ccbh;
1008 	struct bus_dma_segment *dseg;
1009 	int i, error;
1010 
1011 	ccbh = &csio->ccb_h;
1012 	error = 0;
1013 
1014 	switch ((ccbh->flags & CAM_DATA_MASK)) {
1015 	case CAM_DATA_VADDR:
1016 		error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
1017 		break;
1018 	case CAM_DATA_PADDR:
1019 		error = sglist_append_phys(sg,
1020 		    (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
1021 		break;
1022 	case CAM_DATA_SG:
1023 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1024 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1025 			error = sglist_append(sg,
1026 			    (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
1027 		}
1028 		break;
1029 	case CAM_DATA_SG_PADDR:
1030 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1031 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1032 			error = sglist_append_phys(sg,
1033 			    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1034 		}
1035 		break;
1036 	case CAM_DATA_BIO:
1037 		error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1038 		break;
1039 	default:
1040 		error = EINVAL;
1041 		break;
1042 	}
1043 
1044 	return (error);
1045 }
1046 
1047 static int
1048 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1049     int *readable, int *writable)
1050 {
1051 	struct sglist *sg;
1052 	struct ccb_hdr *ccbh;
1053 	struct ccb_scsiio *csio;
1054 	struct virtio_scsi_cmd_req *cmd_req;
1055 	struct virtio_scsi_cmd_resp *cmd_resp;
1056 	int error;
1057 
1058 	sg = sc->vtscsi_sglist;
1059 	csio = &req->vsr_ccb->csio;
1060 	ccbh = &csio->ccb_h;
1061 	cmd_req = &req->vsr_cmd_req;
1062 	cmd_resp = &req->vsr_cmd_resp;
1063 
1064 	sglist_reset(sg);
1065 
1066 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1067 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1068 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1069 		/* At least one segment must be left for the response. */
1070 		if (error || sg->sg_nseg == sg->sg_maxseg)
1071 			goto fail;
1072 	}
1073 
1074 	*readable = sg->sg_nseg;
1075 
1076 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1077 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1078 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1079 		if (error)
1080 			goto fail;
1081 	}
1082 
1083 	*writable = sg->sg_nseg - *readable;
1084 
1085 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1086 	    "writable=%d\n", req, ccbh, *readable, *writable);
1087 
1088 	return (0);
1089 
1090 fail:
1091 	/*
1092 	 * This should never happen unless maxio was incorrectly set.
1093 	 */
1094 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1095 
1096 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1097 	    "nseg=%d maxseg=%d\n",
1098 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1099 
1100 	return (EFBIG);
1101 }
1102 
1103 static int
1104 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1105 {
1106 	struct sglist *sg;
1107 	struct virtqueue *vq;
1108 	struct ccb_scsiio *csio;
1109 	struct ccb_hdr *ccbh;
1110 	struct virtio_scsi_cmd_req *cmd_req;
1111 	struct virtio_scsi_cmd_resp *cmd_resp;
1112 	int readable, writable, error;
1113 
1114 	sg = sc->vtscsi_sglist;
1115 	vq = sc->vtscsi_request_vq;
1116 	csio = &req->vsr_ccb->csio;
1117 	ccbh = &csio->ccb_h;
1118 	cmd_req = &req->vsr_cmd_req;
1119 	cmd_resp = &req->vsr_cmd_resp;
1120 
1121 	vtscsi_init_scsi_cmd_req(sc, csio, cmd_req);
1122 
1123 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1124 	if (error)
1125 		return (error);
1126 
1127 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1128 	cmd_resp->response = -1;
1129 
1130 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1131 	if (error) {
1132 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1133 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1134 
1135 		ccbh->status = CAM_REQUEUE_REQ;
1136 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1137 		return (error);
1138 	}
1139 
1140 	ccbh->status |= CAM_SIM_QUEUED;
1141 	ccbh->ccbh_vtscsi_req = req;
1142 
1143 	virtqueue_notify(vq);
1144 
1145 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1146 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1147 		callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1148 		    0, vtscsi_timedout_scsi_cmd, req, 0);
1149 	}
1150 
1151 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1152 	    req, ccbh);
1153 
1154 	return (0);
1155 }
1156 
1157 static int
1158 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1159 {
1160 	struct vtscsi_request *req;
1161 	int error;
1162 
1163 	req = vtscsi_dequeue_request(sc);
1164 	if (req == NULL) {
1165 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1166 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1167 		return (ENOBUFS);
1168 	}
1169 
1170 	req->vsr_ccb = ccb;
1171 
1172 	error = vtscsi_execute_scsi_cmd(sc, req);
1173 	if (error)
1174 		vtscsi_enqueue_request(sc, req);
1175 
1176 	return (error);
1177 }
1178 
1179 static void
1180 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1181     struct vtscsi_request *req)
1182 {
1183 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1184 	struct vtscsi_request *to_req;
1185 	uint8_t response;
1186 
1187 	tmf_resp = &req->vsr_tmf_resp;
1188 	response = tmf_resp->response;
1189 	to_req = req->vsr_timedout_req;
1190 
1191 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1192 	    req, to_req, response);
1193 
1194 	vtscsi_enqueue_request(sc, req);
1195 
1196 	/*
1197 	 * The timedout request could have completed between when the
1198 	 * abort task was sent and when the host processed it.
1199 	 */
1200 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1201 		return;
1202 
1203 	/* The timedout request was successfully aborted. */
1204 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1205 		return;
1206 
1207 	/* Don't bother if the device is going away. */
1208 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1209 		return;
1210 
1211 	/* The timedout request will be aborted by the reset. */
1212 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1213 		return;
1214 
1215 	vtscsi_reset_bus(sc);
1216 }
1217 
1218 static int
1219 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1220     struct vtscsi_request *to_req)
1221 {
1222 	struct sglist *sg;
1223 	struct ccb_hdr *to_ccbh;
1224 	struct vtscsi_request *req;
1225 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1226 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1227 	int error;
1228 
1229 	sg = sc->vtscsi_sglist;
1230 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1231 
1232 	req = vtscsi_dequeue_request(sc);
1233 	if (req == NULL) {
1234 		error = ENOBUFS;
1235 		goto fail;
1236 	}
1237 
1238 	tmf_req = &req->vsr_tmf_req;
1239 	tmf_resp = &req->vsr_tmf_resp;
1240 
1241 	vtscsi_init_ctrl_tmf_req(sc, to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1242 	    (uintptr_t) to_ccbh, tmf_req);
1243 
1244 	sglist_reset(sg);
1245 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1246 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1247 
1248 	req->vsr_timedout_req = to_req;
1249 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1250 	tmf_resp->response = -1;
1251 
1252 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1253 	    VTSCSI_EXECUTE_ASYNC);
1254 	if (error == 0)
1255 		return (0);
1256 
1257 	vtscsi_enqueue_request(sc, req);
1258 
1259 fail:
1260 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1261 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1262 
1263 	return (error);
1264 }
1265 
1266 static void
1267 vtscsi_timedout_scsi_cmd(void *xreq)
1268 {
1269 	struct vtscsi_softc *sc;
1270 	struct vtscsi_request *to_req;
1271 
1272 	to_req = xreq;
1273 	sc = to_req->vsr_softc;
1274 
1275 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1276 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1277 
1278 	/* Don't bother if the device is going away. */
1279 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1280 		return;
1281 
1282 	/*
1283 	 * Bail if the request is not in use. We likely raced when
1284 	 * stopping the callout handler or it has already been aborted.
1285 	 */
1286 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1287 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1288 		return;
1289 
1290 	/*
1291 	 * Complete the request queue in case the timedout request is
1292 	 * actually just pending.
1293 	 */
1294 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1295 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1296 		return;
1297 
1298 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1299 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1300 
1301 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1302 		return;
1303 
1304 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1305 	vtscsi_reset_bus(sc);
1306 }
1307 
1308 static cam_status
1309 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1310 {
1311 	cam_status status;
1312 
1313 	switch (cmd_resp->response) {
1314 	case VIRTIO_SCSI_S_OK:
1315 		status = CAM_REQ_CMP;
1316 		break;
1317 	case VIRTIO_SCSI_S_OVERRUN:
1318 		status = CAM_DATA_RUN_ERR;
1319 		break;
1320 	case VIRTIO_SCSI_S_ABORTED:
1321 		status = CAM_REQ_ABORTED;
1322 		break;
1323 	case VIRTIO_SCSI_S_BAD_TARGET:
1324 		status = CAM_SEL_TIMEOUT;
1325 		break;
1326 	case VIRTIO_SCSI_S_RESET:
1327 		status = CAM_SCSI_BUS_RESET;
1328 		break;
1329 	case VIRTIO_SCSI_S_BUSY:
1330 		status = CAM_SCSI_BUSY;
1331 		break;
1332 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1333 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1334 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1335 		status = CAM_SCSI_IT_NEXUS_LOST;
1336 		break;
1337 	default: /* VIRTIO_SCSI_S_FAILURE */
1338 		status = CAM_REQ_CMP_ERR;
1339 		break;
1340 	}
1341 
1342 	return (status);
1343 }
1344 
1345 static cam_status
1346 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1347     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1348 {
1349 	uint32_t resp_sense_length;
1350 	cam_status status;
1351 
1352 	csio->scsi_status = cmd_resp->status;
1353 	csio->resid = vtscsi_htog32(sc, cmd_resp->resid);
1354 
1355 	if (csio->scsi_status == SCSI_STATUS_OK)
1356 		status = CAM_REQ_CMP;
1357 	else
1358 		status = CAM_SCSI_STATUS_ERROR;
1359 
1360 	resp_sense_length = vtscsi_htog32(sc, cmd_resp->sense_len);
1361 
1362 	if (resp_sense_length > 0) {
1363 		status |= CAM_AUTOSNS_VALID;
1364 
1365 		if (resp_sense_length < csio->sense_len)
1366 			csio->sense_resid = csio->sense_len - resp_sense_length;
1367 		else
1368 			csio->sense_resid = 0;
1369 
1370 		memcpy(&csio->sense_data, cmd_resp->sense,
1371 		    csio->sense_len - csio->sense_resid);
1372 	}
1373 
1374 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1375 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1376 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1377 
1378 	return (status);
1379 }
1380 
1381 static void
1382 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1383 {
1384 	struct ccb_hdr *ccbh;
1385 	struct ccb_scsiio *csio;
1386 	struct virtio_scsi_cmd_resp *cmd_resp;
1387 	cam_status status;
1388 
1389 	csio = &req->vsr_ccb->csio;
1390 	ccbh = &csio->ccb_h;
1391 	cmd_resp = &req->vsr_cmd_resp;
1392 
1393 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1394 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1395 
1396 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1397 		callout_stop(&req->vsr_callout);
1398 
1399 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1400 	if (status == CAM_REQ_ABORTED) {
1401 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1402 			status = CAM_CMD_TIMEOUT;
1403 	} else if (status == CAM_REQ_CMP)
1404 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1405 
1406 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1407 		status |= CAM_DEV_QFRZN;
1408 		xpt_freeze_devq(ccbh->path, 1);
1409 	}
1410 
1411 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1412 		status |= CAM_RELEASE_SIMQ;
1413 
1414 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1415 	    req, ccbh, status);
1416 
1417 	ccbh->status = status;
1418 	xpt_done(req->vsr_ccb);
1419 	vtscsi_enqueue_request(sc, req);
1420 }
1421 
1422 static void
1423 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1424 {
1425 
1426 	/* XXX We probably shouldn't poll forever. */
1427 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1428 	do
1429 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1430 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1431 
1432 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1433 }
1434 
1435 static int
1436 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1437     struct sglist *sg, int readable, int writable, int flag)
1438 {
1439 	struct virtqueue *vq;
1440 	int error;
1441 
1442 	vq = sc->vtscsi_control_vq;
1443 
1444 	MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1445 
1446 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1447 	if (error) {
1448 		/*
1449 		 * Return EAGAIN when the virtqueue does not have enough
1450 		 * descriptors available.
1451 		 */
1452 		if (error == ENOSPC || error == EMSGSIZE)
1453 			error = EAGAIN;
1454 
1455 		return (error);
1456 	}
1457 
1458 	virtqueue_notify(vq);
1459 	if (flag == VTSCSI_EXECUTE_POLL)
1460 		vtscsi_poll_ctrl_req(sc, req);
1461 
1462 	return (0);
1463 }
1464 
1465 static void
1466 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1467     struct vtscsi_request *req)
1468 {
1469 	union ccb *ccb;
1470 	struct ccb_hdr *ccbh;
1471 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1472 
1473 	ccb = req->vsr_ccb;
1474 	ccbh = &ccb->ccb_h;
1475 	tmf_resp = &req->vsr_tmf_resp;
1476 
1477 	switch (tmf_resp->response) {
1478 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1479 		ccbh->status = CAM_REQ_CMP;
1480 		break;
1481 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1482 		ccbh->status = CAM_UA_ABORT;
1483 		break;
1484 	default:
1485 		ccbh->status = CAM_REQ_CMP_ERR;
1486 		break;
1487 	}
1488 
1489 	xpt_done(ccb);
1490 	vtscsi_enqueue_request(sc, req);
1491 }
1492 
1493 static int
1494 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1495     struct vtscsi_request *req)
1496 {
1497 	struct sglist *sg;
1498 	struct ccb_abort *cab;
1499 	struct ccb_hdr *ccbh;
1500 	struct ccb_hdr *abort_ccbh;
1501 	struct vtscsi_request *abort_req;
1502 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1503 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1504 	int error;
1505 
1506 	sg = sc->vtscsi_sglist;
1507 	cab = &req->vsr_ccb->cab;
1508 	ccbh = &cab->ccb_h;
1509 	tmf_req = &req->vsr_tmf_req;
1510 	tmf_resp = &req->vsr_tmf_resp;
1511 
1512 	/* CCB header and request that's to be aborted. */
1513 	abort_ccbh = &cab->abort_ccb->ccb_h;
1514 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1515 
1516 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1517 		error = EINVAL;
1518 		goto fail;
1519 	}
1520 
1521 	/* Only attempt to abort requests that could be in-flight. */
1522 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1523 		error = EALREADY;
1524 		goto fail;
1525 	}
1526 
1527 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1528 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1529 		callout_stop(&abort_req->vsr_callout);
1530 
1531 	vtscsi_init_ctrl_tmf_req(sc, ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1532 	    (uintptr_t) abort_ccbh, tmf_req);
1533 
1534 	sglist_reset(sg);
1535 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1536 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1537 
1538 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1539 	tmf_resp->response = -1;
1540 
1541 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1542 	    VTSCSI_EXECUTE_ASYNC);
1543 
1544 fail:
1545 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1546 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1547 
1548 	return (error);
1549 }
1550 
1551 static void
1552 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1553     struct vtscsi_request *req)
1554 {
1555 	union ccb *ccb;
1556 	struct ccb_hdr *ccbh;
1557 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1558 
1559 	ccb = req->vsr_ccb;
1560 	ccbh = &ccb->ccb_h;
1561 	tmf_resp = &req->vsr_tmf_resp;
1562 
1563 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1564 	    req, ccb, tmf_resp->response);
1565 
1566 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1567 		ccbh->status = CAM_REQ_CMP;
1568 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1569 		    ccbh->target_lun);
1570 	} else
1571 		ccbh->status = CAM_REQ_CMP_ERR;
1572 
1573 	xpt_done(ccb);
1574 	vtscsi_enqueue_request(sc, req);
1575 }
1576 
1577 static int
1578 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1579     struct vtscsi_request *req)
1580 {
1581 	struct sglist *sg;
1582 	struct ccb_resetdev *crd;
1583 	struct ccb_hdr *ccbh;
1584 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1585 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1586 	uint32_t subtype;
1587 	int error;
1588 
1589 	sg = sc->vtscsi_sglist;
1590 	crd = &req->vsr_ccb->crd;
1591 	ccbh = &crd->ccb_h;
1592 	tmf_req = &req->vsr_tmf_req;
1593 	tmf_resp = &req->vsr_tmf_resp;
1594 
1595 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1596 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1597 	else
1598 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1599 
1600 	vtscsi_init_ctrl_tmf_req(sc, ccbh, subtype, 0, tmf_req);
1601 
1602 	sglist_reset(sg);
1603 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1604 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1605 
1606 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1607 	tmf_resp->response = -1;
1608 
1609 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1610 	    VTSCSI_EXECUTE_ASYNC);
1611 
1612 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1613 	    error, req, ccbh);
1614 
1615 	return (error);
1616 }
1617 
1618 static void
1619 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1620 {
1621 
1622 	*target_id = lun[1];
1623 	*lun_id = (lun[2] << 8) | lun[3];
1624 }
1625 
1626 static void
1627 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1628 {
1629 
1630 	lun[0] = 1;
1631 	lun[1] = ccbh->target_id;
1632 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1633 	lun[3] = ccbh->target_lun & 0xFF;
1634 }
1635 
1636 static void
1637 vtscsi_init_scsi_cmd_req(struct vtscsi_softc *sc, struct ccb_scsiio *csio,
1638     struct virtio_scsi_cmd_req *cmd_req)
1639 {
1640 	uint8_t attr;
1641 
1642 	switch (csio->tag_action) {
1643 	case MSG_HEAD_OF_Q_TAG:
1644 		attr = VIRTIO_SCSI_S_HEAD;
1645 		break;
1646 	case MSG_ORDERED_Q_TAG:
1647 		attr = VIRTIO_SCSI_S_ORDERED;
1648 		break;
1649 	case MSG_ACA_TASK:
1650 		attr = VIRTIO_SCSI_S_ACA;
1651 		break;
1652 	default: /* MSG_SIMPLE_Q_TAG */
1653 		attr = VIRTIO_SCSI_S_SIMPLE;
1654 		break;
1655 	}
1656 
1657 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1658 	cmd_req->tag = vtscsi_gtoh64(sc, (uintptr_t) csio);
1659 	cmd_req->task_attr = attr;
1660 
1661 	memcpy(cmd_req->cdb,
1662 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1663 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1664 	    csio->cdb_len);
1665 }
1666 
1667 static void
1668 vtscsi_init_ctrl_tmf_req(struct vtscsi_softc *sc, struct ccb_hdr *ccbh,
1669     uint32_t subtype, uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1670 {
1671 
1672 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1673 
1674 	tmf_req->type = vtscsi_gtoh32(sc, VIRTIO_SCSI_T_TMF);
1675 	tmf_req->subtype = vtscsi_gtoh32(sc, subtype);
1676 	tmf_req->tag = vtscsi_gtoh64(sc, tag);
1677 }
1678 
1679 static void
1680 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1681 {
1682 	int frozen;
1683 
1684 	frozen = sc->vtscsi_frozen;
1685 
1686 	if (reason & VTSCSI_REQUEST &&
1687 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1688 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1689 
1690 	if (reason & VTSCSI_REQUEST_VQ &&
1691 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1692 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1693 
1694 	/* Freeze the SIMQ if transitioned to frozen. */
1695 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1696 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1697 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1698 	}
1699 }
1700 
1701 static int
1702 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1703 {
1704 	int thawed;
1705 
1706 	if (sc->vtscsi_frozen == 0 || reason == 0)
1707 		return (0);
1708 
1709 	if (reason & VTSCSI_REQUEST &&
1710 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1711 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1712 
1713 	if (reason & VTSCSI_REQUEST_VQ &&
1714 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1715 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1716 
1717 	thawed = sc->vtscsi_frozen == 0;
1718 	if (thawed != 0)
1719 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1720 
1721 	return (thawed);
1722 }
1723 
1724 static void
1725 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1726     target_id_t target_id, lun_id_t lun_id)
1727 {
1728 	struct cam_path *path;
1729 
1730 	/* Use the wildcard path from our softc for bus announcements. */
1731 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1732 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1733 		return;
1734 	}
1735 
1736 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1737 	    target_id, lun_id) != CAM_REQ_CMP) {
1738 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1739 		return;
1740 	}
1741 
1742 	xpt_async(ac_code, path, NULL);
1743 	xpt_free_path(path);
1744 }
1745 
1746 static void
1747 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1748     lun_id_t lun_id)
1749 {
1750 	union ccb *ccb;
1751 	cam_status status;
1752 
1753 	ccb = xpt_alloc_ccb_nowait();
1754 	if (ccb == NULL) {
1755 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1756 		return;
1757 	}
1758 
1759 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1760 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1761 	if (status != CAM_REQ_CMP) {
1762 		xpt_free_ccb(ccb);
1763 		return;
1764 	}
1765 
1766 	xpt_rescan(ccb);
1767 }
1768 
1769 static void
1770 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1771 {
1772 
1773 	vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1774 }
1775 
1776 static void
1777 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1778     struct virtio_scsi_event *event)
1779 {
1780 	target_id_t target_id;
1781 	lun_id_t lun_id;
1782 
1783 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1784 
1785 	switch (event->reason) {
1786 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1787 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1788 		vtscsi_execute_rescan(sc, target_id, lun_id);
1789 		break;
1790 	default:
1791 		device_printf(sc->vtscsi_dev,
1792 		    "unhandled transport event reason: %d\n", event->reason);
1793 		break;
1794 	}
1795 }
1796 
1797 static void
1798 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1799 {
1800 	int error;
1801 
1802 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1803 		switch (event->event) {
1804 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1805 			vtscsi_transport_reset_event(sc, event);
1806 			break;
1807 		default:
1808 			device_printf(sc->vtscsi_dev,
1809 			    "unhandled event: %d\n", event->event);
1810 			break;
1811 		}
1812 	} else
1813 		vtscsi_execute_rescan_bus(sc);
1814 
1815 	/*
1816 	 * This should always be successful since the buffer
1817 	 * was just dequeued.
1818 	 */
1819 	error = vtscsi_enqueue_event_buf(sc, event);
1820 	KASSERT(error == 0,
1821 	    ("cannot requeue event buffer: %d", error));
1822 }
1823 
1824 static int
1825 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1826     struct virtio_scsi_event *event)
1827 {
1828 	struct sglist *sg;
1829 	struct virtqueue *vq;
1830 	int size, error;
1831 
1832 	sg = sc->vtscsi_sglist;
1833 	vq = sc->vtscsi_event_vq;
1834 	size = sc->vtscsi_event_buf_size;
1835 
1836 	bzero(event, size);
1837 
1838 	sglist_reset(sg);
1839 	error = sglist_append(sg, event, size);
1840 	if (error)
1841 		return (error);
1842 
1843 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1844 	if (error)
1845 		return (error);
1846 
1847 	virtqueue_notify(vq);
1848 
1849 	return (0);
1850 }
1851 
1852 static int
1853 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1854 {
1855 	struct virtio_scsi_event *event;
1856 	int i, size, error;
1857 
1858 	/*
1859 	 * The first release of QEMU with VirtIO SCSI support would crash
1860 	 * when attempting to notify the event virtqueue. This was fixed
1861 	 * when hotplug support was added.
1862 	 */
1863 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1864 		size = sc->vtscsi_event_buf_size;
1865 	else
1866 		size = 0;
1867 
1868 	if (size < sizeof(struct virtio_scsi_event))
1869 		return (0);
1870 
1871 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1872 		event = &sc->vtscsi_event_bufs[i];
1873 
1874 		error = vtscsi_enqueue_event_buf(sc, event);
1875 		if (error)
1876 			break;
1877 	}
1878 
1879 	/*
1880 	 * Even just one buffer is enough. Missed events are
1881 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1882 	 */
1883 	if (i > 0)
1884 		error = 0;
1885 
1886 	return (error);
1887 }
1888 
1889 static void
1890 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1891 {
1892 	struct virtio_scsi_event *event;
1893 	int i, error;
1894 
1895 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1896 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1897 		return;
1898 
1899 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1900 		event = &sc->vtscsi_event_bufs[i];
1901 
1902 		error = vtscsi_enqueue_event_buf(sc, event);
1903 		if (error)
1904 			break;
1905 	}
1906 
1907 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1908 }
1909 
1910 static void
1911 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1912 {
1913 	struct virtqueue *vq;
1914 	int last;
1915 
1916 	vq = sc->vtscsi_event_vq;
1917 	last = 0;
1918 
1919 	while (virtqueue_drain(vq, &last) != NULL)
1920 		;
1921 
1922 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1923 }
1924 
1925 static void
1926 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1927 {
1928 
1929 	VTSCSI_LOCK_OWNED(sc);
1930 
1931 	if (sc->vtscsi_request_vq != NULL)
1932 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1933 	if (sc->vtscsi_control_vq != NULL)
1934 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1935 }
1936 
1937 static void
1938 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1939 {
1940 
1941 	VTSCSI_LOCK(sc);
1942 	vtscsi_complete_vqs_locked(sc);
1943 	VTSCSI_UNLOCK(sc);
1944 }
1945 
1946 static void
1947 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1948 {
1949 	union ccb *ccb;
1950 	int detach;
1951 
1952 	ccb = req->vsr_ccb;
1953 
1954 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1955 
1956 	/*
1957 	 * The callout must be drained when detaching since the request is
1958 	 * about to be freed. The VTSCSI_MTX must not be held for this in
1959 	 * case the callout is pending because there is a deadlock potential.
1960 	 * Otherwise, the virtqueue is being drained because of a bus reset
1961 	 * so we only need to attempt to stop the callouts.
1962 	 */
1963 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1964 	if (detach != 0)
1965 		VTSCSI_LOCK_NOTOWNED(sc);
1966 	else
1967 		VTSCSI_LOCK_OWNED(sc);
1968 
1969 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1970 		if (detach != 0)
1971 			callout_drain(&req->vsr_callout);
1972 		else
1973 			callout_stop(&req->vsr_callout);
1974 	}
1975 
1976 	if (ccb != NULL) {
1977 		if (detach != 0) {
1978 			VTSCSI_LOCK(sc);
1979 			ccb->ccb_h.status = CAM_NO_HBA;
1980 		} else
1981 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
1982 		xpt_done(ccb);
1983 		if (detach != 0)
1984 			VTSCSI_UNLOCK(sc);
1985 	}
1986 
1987 	vtscsi_enqueue_request(sc, req);
1988 }
1989 
1990 static void
1991 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1992 {
1993 	struct vtscsi_request *req;
1994 	int last;
1995 
1996 	last = 0;
1997 
1998 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1999 
2000 	while ((req = virtqueue_drain(vq, &last)) != NULL)
2001 		vtscsi_cancel_request(sc, req);
2002 
2003 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
2004 }
2005 
2006 static void
2007 vtscsi_drain_vqs(struct vtscsi_softc *sc)
2008 {
2009 
2010 	if (sc->vtscsi_control_vq != NULL)
2011 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
2012 	if (sc->vtscsi_request_vq != NULL)
2013 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
2014 	if (sc->vtscsi_event_vq != NULL)
2015 		vtscsi_drain_event_vq(sc);
2016 }
2017 
2018 static void
2019 vtscsi_stop(struct vtscsi_softc *sc)
2020 {
2021 
2022 	vtscsi_disable_vqs_intr(sc);
2023 	virtio_stop(sc->vtscsi_dev);
2024 }
2025 
2026 static int
2027 vtscsi_reset_bus(struct vtscsi_softc *sc)
2028 {
2029 	int error;
2030 
2031 	VTSCSI_LOCK_OWNED(sc);
2032 
2033 	if (vtscsi_bus_reset_disable != 0) {
2034 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2035 		return (0);
2036 	}
2037 
2038 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2039 
2040 	/*
2041 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2042 	 * Those requests are then completed here so CAM will retry them
2043 	 * after the reset is complete.
2044 	 */
2045 	vtscsi_stop(sc);
2046 	vtscsi_complete_vqs_locked(sc);
2047 
2048 	/* Rid the virtqueues of any remaining requests. */
2049 	vtscsi_drain_vqs(sc);
2050 
2051 	/*
2052 	 * Any resource shortage that froze the SIMQ cannot persist across
2053 	 * a bus reset so ensure it gets thawed here.
2054 	 */
2055 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2056 		xpt_release_simq(sc->vtscsi_sim, 0);
2057 
2058 	error = vtscsi_reinit(sc);
2059 	if (error) {
2060 		device_printf(sc->vtscsi_dev,
2061 		    "reinitialization failed, stopping device...\n");
2062 		vtscsi_stop(sc);
2063 	} else
2064 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2065 		    CAM_LUN_WILDCARD);
2066 
2067 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2068 
2069 	return (error);
2070 }
2071 
2072 static void
2073 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2074 {
2075 
2076 #ifdef INVARIANTS
2077 	int req_nsegs, resp_nsegs;
2078 
2079 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2080 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2081 
2082 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2083 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2084 #endif
2085 
2086 	req->vsr_softc = sc;
2087 	callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2088 }
2089 
2090 static int
2091 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2092 {
2093 	struct vtscsi_request *req;
2094 	int i, nreqs;
2095 
2096 	/*
2097 	 * Commands destined for either the request or control queues come
2098 	 * from the same SIM queue. Use the size of the request virtqueue
2099 	 * as it (should) be much more frequently used. Some additional
2100 	 * requests are allocated for internal (TMF) use.
2101 	 */
2102 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2103 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2104 		nreqs /= VTSCSI_MIN_SEGMENTS;
2105 	nreqs += VTSCSI_RESERVED_REQUESTS;
2106 
2107 	for (i = 0; i < nreqs; i++) {
2108 		req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2109 		    M_NOWAIT);
2110 		if (req == NULL)
2111 			return (ENOMEM);
2112 
2113 		vtscsi_init_request(sc, req);
2114 
2115 		sc->vtscsi_nrequests++;
2116 		vtscsi_enqueue_request(sc, req);
2117 	}
2118 
2119 	return (0);
2120 }
2121 
2122 static void
2123 vtscsi_free_requests(struct vtscsi_softc *sc)
2124 {
2125 	struct vtscsi_request *req;
2126 
2127 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2128 		KASSERT(callout_active(&req->vsr_callout) == 0,
2129 		    ("request callout still active"));
2130 
2131 		sc->vtscsi_nrequests--;
2132 		free(req, M_DEVBUF);
2133 	}
2134 
2135 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2136 	    sc->vtscsi_nrequests));
2137 }
2138 
2139 static void
2140 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2141 {
2142 
2143 	KASSERT(req->vsr_softc == sc,
2144 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2145 
2146 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2147 
2148 	/* A request is available so the SIMQ could be released. */
2149 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2150 		xpt_release_simq(sc->vtscsi_sim, 1);
2151 
2152 	req->vsr_ccb = NULL;
2153 	req->vsr_complete = NULL;
2154 	req->vsr_ptr0 = NULL;
2155 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2156 	req->vsr_flags = 0;
2157 
2158 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2159 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2160 
2161 	/*
2162 	 * We insert at the tail of the queue in order to make it
2163 	 * very unlikely a request will be reused if we race with
2164 	 * stopping its callout handler.
2165 	 */
2166 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2167 }
2168 
2169 static struct vtscsi_request *
2170 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2171 {
2172 	struct vtscsi_request *req;
2173 
2174 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2175 	if (req != NULL) {
2176 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2177 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2178 	} else
2179 		sc->vtscsi_stats.dequeue_no_requests++;
2180 
2181 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2182 
2183 	return (req);
2184 }
2185 
2186 static void
2187 vtscsi_complete_request(struct vtscsi_request *req)
2188 {
2189 
2190 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2191 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2192 
2193 	if (req->vsr_complete != NULL)
2194 		req->vsr_complete(req->vsr_softc, req);
2195 }
2196 
2197 static void
2198 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2199 {
2200 	struct vtscsi_request *req;
2201 
2202 	VTSCSI_LOCK_OWNED(sc);
2203 
2204 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2205 		vtscsi_complete_request(req);
2206 }
2207 
2208 static void
2209 vtscsi_control_vq_intr(void *xsc)
2210 {
2211 	struct vtscsi_softc *sc;
2212 	struct virtqueue *vq;
2213 
2214 	sc = xsc;
2215 	vq = sc->vtscsi_control_vq;
2216 
2217 again:
2218 	VTSCSI_LOCK(sc);
2219 
2220 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2221 
2222 	if (virtqueue_enable_intr(vq) != 0) {
2223 		virtqueue_disable_intr(vq);
2224 		VTSCSI_UNLOCK(sc);
2225 		goto again;
2226 	}
2227 
2228 	VTSCSI_UNLOCK(sc);
2229 }
2230 
2231 static void
2232 vtscsi_event_vq_intr(void *xsc)
2233 {
2234 	struct vtscsi_softc *sc;
2235 	struct virtqueue *vq;
2236 	struct virtio_scsi_event *event;
2237 
2238 	sc = xsc;
2239 	vq = sc->vtscsi_event_vq;
2240 
2241 again:
2242 	VTSCSI_LOCK(sc);
2243 
2244 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2245 		vtscsi_handle_event(sc, event);
2246 
2247 	if (virtqueue_enable_intr(vq) != 0) {
2248 		virtqueue_disable_intr(vq);
2249 		VTSCSI_UNLOCK(sc);
2250 		goto again;
2251 	}
2252 
2253 	VTSCSI_UNLOCK(sc);
2254 }
2255 
2256 static void
2257 vtscsi_request_vq_intr(void *xsc)
2258 {
2259 	struct vtscsi_softc *sc;
2260 	struct virtqueue *vq;
2261 
2262 	sc = xsc;
2263 	vq = sc->vtscsi_request_vq;
2264 
2265 again:
2266 	VTSCSI_LOCK(sc);
2267 
2268 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2269 
2270 	if (virtqueue_enable_intr(vq) != 0) {
2271 		virtqueue_disable_intr(vq);
2272 		VTSCSI_UNLOCK(sc);
2273 		goto again;
2274 	}
2275 
2276 	VTSCSI_UNLOCK(sc);
2277 }
2278 
2279 static void
2280 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2281 {
2282 
2283 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2284 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2285 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2286 }
2287 
2288 static void
2289 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2290 {
2291 
2292 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2293 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2294 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2295 }
2296 
2297 static void
2298 vtscsi_get_tunables(struct vtscsi_softc *sc)
2299 {
2300 	char tmpstr[64];
2301 
2302 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2303 
2304 	snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2305 	    device_get_unit(sc->vtscsi_dev));
2306 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2307 }
2308 
2309 static void
2310 vtscsi_setup_sysctl(struct vtscsi_softc *sc)
2311 {
2312 	device_t dev;
2313 	struct vtscsi_statistics *stats;
2314         struct sysctl_ctx_list *ctx;
2315 	struct sysctl_oid *tree;
2316 	struct sysctl_oid_list *child;
2317 
2318 	dev = sc->vtscsi_dev;
2319 	stats = &sc->vtscsi_stats;
2320 	ctx = device_get_sysctl_ctx(dev);
2321 	tree = device_get_sysctl_tree(dev);
2322 	child = SYSCTL_CHILDREN(tree);
2323 
2324 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2325 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2326 	    "Debug level");
2327 
2328 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2329 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2330 	    "SCSI command timeouts");
2331 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2332 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2333 	    "No available requests to dequeue");
2334 }
2335 
2336 static void
2337 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2338     const char *fmt, ...)
2339 {
2340 	struct vtscsi_softc *sc;
2341 	union ccb *ccb;
2342 	struct sbuf sb;
2343 	va_list ap;
2344 	char str[192];
2345 	char path_str[64];
2346 
2347 	if (req == NULL)
2348 		return;
2349 
2350 	sc = req->vsr_softc;
2351 	ccb = req->vsr_ccb;
2352 
2353 	va_start(ap, fmt);
2354 	sbuf_new(&sb, str, sizeof(str), 0);
2355 
2356 	if (ccb == NULL) {
2357 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2358 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2359 		    cam_sim_bus(sc->vtscsi_sim));
2360 	} else {
2361 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2362 		sbuf_cat(&sb, path_str);
2363 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2364 			scsi_command_string(&ccb->csio, &sb);
2365 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2366 		}
2367 	}
2368 
2369 	sbuf_vprintf(&sb, fmt, ap);
2370 	va_end(ap);
2371 
2372 	sbuf_finish(&sb);
2373 	printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2374 	    sbuf_data(&sb));
2375 }
2376