1 /*-
2  * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD: head/sys/dev/virtio/scsi/virtio_scsi.c 311305 2017-01-04 20:26:42Z asomers $
27  */
28 
29 /* Driver for VirtIO SCSI devices. */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/kthread.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/sglist.h>
38 #include <sys/sysctl.h>
39 #include <sys/lock.h>
40 #include <sys/callout.h>
41 #include <sys/queue.h>
42 #include <sys/sbuf.h>
43 
44 #include <machine/stdarg.h>
45 
46 #include <sys/bus.h>
47 #include <sys/rman.h>
48 
49 #include <bus/cam/cam.h>
50 #include <bus/cam/cam_ccb.h>
51 #include <bus/cam/cam_sim.h>
52 #include <bus/cam/cam_periph.h>
53 #include <bus/cam/cam_xpt_periph.h>
54 #include <bus/cam/cam_xpt_sim.h>
55 #include <bus/cam/cam_debug.h>
56 #include <bus/cam/scsi/scsi_all.h>
57 #include <bus/cam/scsi/scsi_message.h>
58 
59 #include <dev/virtual/virtio/virtio/virtio.h>
60 #include <dev/virtual/virtio/virtio/virtqueue.h>
61 #include <dev/virtual/virtio/scsi/virtio_scsi.h>
62 #include <dev/virtual/virtio/scsi/virtio_scsivar.h>
63 
64 static int	vtscsi_modevent(module_t, int, void *);
65 
66 static int	vtscsi_probe(device_t);
67 static int	vtscsi_attach(device_t);
68 static int	vtscsi_detach(device_t);
69 static int	vtscsi_suspend(device_t);
70 static int	vtscsi_resume(device_t);
71 
72 static void	vtscsi_negotiate_features(struct vtscsi_softc *);
73 static void	vtscsi_read_config(struct vtscsi_softc *,
74 		    struct virtio_scsi_config *);
75 static int	vtscsi_maximum_segments(struct vtscsi_softc *, int);
76 static int	vtscsi_alloc_intrs(struct vtscsi_softc *);
77 static int	vtscsi_alloc_virtqueues(struct vtscsi_softc *);
78 static void	vtscsi_write_device_config(struct vtscsi_softc *);
79 static int	vtscsi_reinit(struct vtscsi_softc *);
80 
81 static int	vtscsi_alloc_cam(struct vtscsi_softc *);
82 static int	vtscsi_register_cam(struct vtscsi_softc *);
83 static void	vtscsi_free_cam(struct vtscsi_softc *);
84 static void	vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
85 static int	vtscsi_register_async(struct vtscsi_softc *);
86 static void	vtscsi_deregister_async(struct vtscsi_softc *);
87 static void	vtscsi_cam_action(struct cam_sim *, union ccb *);
88 static void	vtscsi_cam_poll(struct cam_sim *);
89 
90 static void	vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
91 		    union ccb *);
92 static void	vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
93 		    union ccb *);
94 static void	vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
95 static void	vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
96 static void	vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
97 static void	vtscsi_cam_path_inquiry(struct vtscsi_softc *,
98 		    struct cam_sim *, union ccb *);
99 
100 static int	vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
101 		    struct sglist *, struct ccb_scsiio *);
102 static int	vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
103 		    struct vtscsi_request *, int *, int *);
104 static int	vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
105 		    struct vtscsi_request *);
106 static int	vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
107 static void	vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
108 		    struct vtscsi_request *);
109 static int	vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
110 		    struct vtscsi_request *);
111 static void	vtscsi_timedout_scsi_cmd(void *);
112 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
113 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
114 		    struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
115 static void	vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
116 		    struct vtscsi_request *);
117 
118 static void	vtscsi_poll_ctrl_req(struct vtscsi_softc *,
119 		    struct vtscsi_request *);
120 static int	vtscsi_execute_ctrl_req(struct vtscsi_softc *,
121 		    struct vtscsi_request *, struct sglist *, int, int, int);
122 static void	vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
123 		    struct vtscsi_request *);
124 static int	vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
125 		    struct vtscsi_request *);
126 static int	vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
127 		    struct vtscsi_request *);
128 
129 static void	vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
130 static void	vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
131 static void	vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
132 		    struct virtio_scsi_cmd_req *);
133 static void	vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
134 		    uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
135 
136 static void	vtscsi_freeze_simq(struct vtscsi_softc *, int);
137 static int	vtscsi_thaw_simq(struct vtscsi_softc *, int);
138 
139 static void	vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
140 		    lun_id_t);
141 static void	vtscsi_cam_rescan_callback(struct cam_periph *periph,
142 		    union ccb *ccb);
143 static void	vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
144 		    lun_id_t);
145 static void	vtscsi_execute_rescan_bus(struct vtscsi_softc *);
146 
147 static void	vtscsi_handle_event(struct vtscsi_softc *,
148 		    struct virtio_scsi_event *);
149 static int	vtscsi_enqueue_event_buf(struct vtscsi_softc *,
150 		    struct virtio_scsi_event *);
151 static int	vtscsi_init_event_vq(struct vtscsi_softc *);
152 static void	vtscsi_reinit_event_vq(struct vtscsi_softc *);
153 static void	vtscsi_drain_event_vq(struct vtscsi_softc *);
154 
155 static void	vtscsi_complete_vqs_locked(struct vtscsi_softc *);
156 static void	vtscsi_complete_vqs(struct vtscsi_softc *);
157 static void	vtscsi_drain_vqs(struct vtscsi_softc *);
158 static void	vtscsi_cancel_request(struct vtscsi_softc *,
159 		    struct vtscsi_request *);
160 static void	vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
161 static void	vtscsi_stop(struct vtscsi_softc *);
162 static int	vtscsi_reset_bus(struct vtscsi_softc *);
163 
164 static void	vtscsi_init_request(struct vtscsi_softc *,
165 		    struct vtscsi_request *);
166 static int	vtscsi_alloc_requests(struct vtscsi_softc *);
167 static void	vtscsi_free_requests(struct vtscsi_softc *);
168 static void	vtscsi_enqueue_request(struct vtscsi_softc *,
169 		    struct vtscsi_request *);
170 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
171 
172 static void	vtscsi_complete_request(struct vtscsi_request *);
173 static void	vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
174 
175 static void	vtscsi_control_vq_intr(void *);
176 static void	vtscsi_event_vq_intr(void *);
177 static void	vtscsi_request_vq_intr(void *);
178 static void	vtscsi_disable_vqs_intr(struct vtscsi_softc *);
179 static void	vtscsi_enable_vqs_intr(struct vtscsi_softc *);
180 
181 static void	vtscsi_get_tunables(struct vtscsi_softc *);
182 static void	vtscsi_add_sysctl(struct vtscsi_softc *);
183 
184 static void	vtscsi_printf_req(struct vtscsi_request *, const char *,
185 		    const char *, ...) __printflike(3, 4);
186 
187 /* Global tunables. */
188 /*
189  * The current QEMU VirtIO SCSI implementation does not cancel in-flight
190  * IO during virtio_stop(). So in-flight requests still complete after the
191  * device reset. We would have to wait for all the in-flight IO to complete,
192  * which defeats the typical purpose of a bus reset. We could simulate the
193  * bus reset with either I_T_NEXUS_RESET of all the targets, or with
194  * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
195  * control virtqueue). But this isn't very useful if things really go off
196  * the rails, so default to disabled for now.
197  */
198 static int vtscsi_bus_reset_disable = 1;
199 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
200 
201 static struct virtio_feature_desc vtscsi_feature_desc[] = {
202 	{ VIRTIO_SCSI_F_INOUT,		"InOut"		},
203 	{ VIRTIO_SCSI_F_HOTPLUG,	"Hotplug"	},
204 
205 	{ 0, NULL }
206 };
207 
208 static device_method_t vtscsi_methods[] = {
209 	/* Device methods. */
210 	DEVMETHOD(device_probe,		vtscsi_probe),
211 	DEVMETHOD(device_attach,	vtscsi_attach),
212 	DEVMETHOD(device_detach,	vtscsi_detach),
213 	DEVMETHOD(device_suspend,	vtscsi_suspend),
214 	DEVMETHOD(device_resume,	vtscsi_resume),
215 
216 	DEVMETHOD_END
217 };
218 
219 static driver_t vtscsi_driver = {
220 	"vtscsi",
221 	vtscsi_methods,
222 	sizeof(struct vtscsi_softc)
223 };
224 static devclass_t vtscsi_devclass;
225 
226 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
227     vtscsi_modevent, NULL);
228 MODULE_VERSION(virtio_scsi, 1);
229 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
230 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
231 
232 static int
vtscsi_modevent(module_t mod,int type,void * unused)233 vtscsi_modevent(module_t mod, int type, void *unused)
234 {
235 	int error;
236 
237 	switch (type) {
238 	case MOD_LOAD:
239 	case MOD_UNLOAD:
240 	case MOD_SHUTDOWN:
241 		error = 0;
242 		break;
243 	default:
244 		error = EOPNOTSUPP;
245 		break;
246 	}
247 
248 	return (error);
249 }
250 
251 static int
vtscsi_probe(device_t dev)252 vtscsi_probe(device_t dev)
253 {
254 
255 	if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
256 		return (ENXIO);
257 
258 	device_set_desc(dev, "VirtIO SCSI Adapter");
259 
260 	return (BUS_PROBE_DEFAULT);
261 }
262 
263 struct irqmap {
264 	int irq;
265 	driver_intr_t *handler;
266 };
267 
268 static int
vtscsi_attach(device_t dev)269 vtscsi_attach(device_t dev)
270 {
271 	struct vtscsi_softc *sc;
272 	struct virtio_scsi_config scsicfg;
273 	int i, error;
274 
275 	sc = device_get_softc(dev);
276 	sc->vtscsi_dev = dev;
277 
278 	VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
279 	TAILQ_INIT(&sc->vtscsi_req_free);
280 
281 	vtscsi_get_tunables(sc);
282 	vtscsi_add_sysctl(sc);
283 
284 	virtio_set_feature_desc(dev, vtscsi_feature_desc);
285 	vtscsi_negotiate_features(sc);
286 
287 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
288 		sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
289 #ifndef __DragonFly__ /* XXX swildner */
290 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
291 		sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
292 #endif
293 	if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
294 		sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
295 
296 	vtscsi_read_config(sc, &scsicfg);
297 
298 	sc->vtscsi_max_channel = scsicfg.max_channel;
299 	sc->vtscsi_max_target = scsicfg.max_target;
300 	sc->vtscsi_max_lun = scsicfg.max_lun;
301 	sc->vtscsi_event_buf_size = scsicfg.event_info_size;
302 
303 	vtscsi_write_device_config(sc);
304 
305 	sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
306 	sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
307 	if (sc->vtscsi_sglist == NULL) {
308 		error = ENOMEM;
309 		device_printf(dev, "cannot allocate sglist\n");
310 		goto fail;
311 	}
312 
313 	error = vtscsi_alloc_intrs(sc);
314 	if (error) {
315 		device_printf(dev, "cannot allocate interrupts\n");
316 		goto fail;
317 	}
318 
319 	error = vtscsi_alloc_virtqueues(sc);
320 	if (error) {
321 		device_printf(dev, "cannot allocate virtqueues\n");
322 		goto fail;
323 	}
324 
325 	/* XXX Separate function */
326 	struct irqmap info[3];
327 
328 	/* Possible "Virtqueue <-> IRQ" configurations */
329 	switch (sc->vtscsi_nintr) {
330 	case 1:
331 		info[0] = (struct irqmap){0, vtscsi_control_vq_intr};
332 		info[1] = (struct irqmap){0, vtscsi_event_vq_intr};
333 		info[2] = (struct irqmap){0, vtscsi_request_vq_intr};
334 		break;
335 	case 2:
336 		info[0] = (struct irqmap){0, vtscsi_control_vq_intr};
337 		info[1] = (struct irqmap){0, vtscsi_event_vq_intr};
338 		info[2] = (struct irqmap){1, vtscsi_request_vq_intr};
339 		break;
340 	case 3:
341 		info[0] = (struct irqmap){0, vtscsi_control_vq_intr};
342 		info[1] = (struct irqmap){1, vtscsi_event_vq_intr};
343 		info[2] = (struct irqmap){2, vtscsi_request_vq_intr};
344 		break;
345 	default:
346 		device_printf(dev, "Invalid interrupt vector count: %d\n",
347 		    sc->vtscsi_nintr);
348 		goto fail;
349 	}
350 	for (i = 0; i < 3; i++) {
351 		error = virtio_bind_intr(sc->vtscsi_dev, info[i].irq, i,
352 		    info[i].handler, sc);
353 		if (error) {
354 			device_printf(dev,
355 			    "cannot bind virtqueue IRQs\n");
356 			goto fail;
357 		}
358 	}
359 
360 	error = vtscsi_init_event_vq(sc);
361 	if (error) {
362 		device_printf(dev, "cannot populate the eventvq\n");
363 		goto fail;
364 	}
365 
366 	error = vtscsi_alloc_requests(sc);
367 	if (error) {
368 		device_printf(dev, "cannot allocate requests\n");
369 		goto fail;
370 	}
371 
372 	error = vtscsi_alloc_cam(sc);
373 	if (error) {
374 		device_printf(dev, "cannot allocate CAM structures\n");
375 		goto fail;
376 	}
377 
378 	for (i = 0; i < sc->vtscsi_nintr; i++) {
379 		error = virtio_setup_intr(dev, i, NULL);
380 		if (error) {
381 			device_printf(dev, "cannot setup virtqueue "
382 			    "interrupts\n");
383 			goto fail;
384 		}
385 	}
386 
387 	vtscsi_enable_vqs_intr(sc);
388 
389 	/*
390 	 * Register with CAM after interrupts are enabled so we will get
391 	 * notified of the probe responses.
392 	 */
393 	error = vtscsi_register_cam(sc);
394 	if (error) {
395 		device_printf(dev, "cannot register with CAM\n");
396 		goto fail;
397 	}
398 
399 fail:
400 	if (error)
401 		vtscsi_detach(dev);
402 
403 	return (error);
404 }
405 
406 static int
vtscsi_detach(device_t dev)407 vtscsi_detach(device_t dev)
408 {
409 	struct vtscsi_softc *sc;
410 	int i;
411 
412 	sc = device_get_softc(dev);
413 
414 	for (i = 0; i < sc->vtscsi_nintr; i++)
415 		virtio_teardown_intr(dev, i);
416 
417 	VTSCSI_LOCK(sc);
418 	sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
419 	if (device_is_attached(dev))
420 		vtscsi_stop(sc);
421 	VTSCSI_UNLOCK(sc);
422 
423 	vtscsi_complete_vqs(sc);
424 	vtscsi_drain_vqs(sc);
425 
426 	vtscsi_free_cam(sc);
427 	vtscsi_free_requests(sc);
428 
429 	if (sc->vtscsi_sglist != NULL) {
430 		sglist_free(sc->vtscsi_sglist);
431 		sc->vtscsi_sglist = NULL;
432 	}
433 
434 	VTSCSI_LOCK_DESTROY(sc);
435 
436 	return (0);
437 }
438 
439 static int
vtscsi_suspend(device_t dev)440 vtscsi_suspend(device_t dev)
441 {
442 
443 	return (0);
444 }
445 
446 static int
vtscsi_resume(device_t dev)447 vtscsi_resume(device_t dev)
448 {
449 
450 	return (0);
451 }
452 
453 static void
vtscsi_negotiate_features(struct vtscsi_softc * sc)454 vtscsi_negotiate_features(struct vtscsi_softc *sc)
455 {
456 	device_t dev;
457 	uint64_t features;
458 
459 	dev = sc->vtscsi_dev;
460 	features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
461 	sc->vtscsi_features = features;
462 }
463 
464 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg)			\
465 	virtio_read_device_config(_dev,				\
466 	    offsetof(struct virtio_scsi_config, _field),	\
467 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
468 
469 static void
vtscsi_read_config(struct vtscsi_softc * sc,struct virtio_scsi_config * scsicfg)470 vtscsi_read_config(struct vtscsi_softc *sc,
471     struct virtio_scsi_config *scsicfg)
472 {
473 	device_t dev;
474 
475 	dev = sc->vtscsi_dev;
476 
477 	bzero(scsicfg, sizeof(struct virtio_scsi_config));
478 
479 	VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
480 	VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
481 	VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
482 	VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
483 	VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
484 	VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
485 	VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
486 	VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
487 	VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
488 	VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
489 }
490 
491 #undef VTSCSI_GET_CONFIG
492 
493 static int
vtscsi_maximum_segments(struct vtscsi_softc * sc,int seg_max)494 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
495 {
496 	int nsegs;
497 
498 	nsegs = VTSCSI_MIN_SEGMENTS;
499 
500 	if (seg_max > 0) {
501 		nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
502 		if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
503 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
504 	} else
505 		nsegs += 1;
506 
507 	return (nsegs);
508 }
509 
510 
511 static int
vtscsi_alloc_intrs(struct vtscsi_softc * sc)512 vtscsi_alloc_intrs(struct vtscsi_softc *sc)
513 {
514 	int intrcount = virtio_intr_count(sc->vtscsi_dev);
515 	int cnt, i, error;
516 
517 	for (i = 0; i < NELEM(sc->vtscsi_cpus); i++)
518 		sc->vtscsi_cpus[i] = -1;
519 
520 	intrcount = imin(intrcount, 3);
521 	if (intrcount < 1)
522 		return (ENXIO);
523 
524 	cnt = intrcount;
525 	error = virtio_intr_alloc(sc->vtscsi_dev, &cnt, 0, sc->vtscsi_cpus);
526 	if (error != 0) {
527 		virtio_intr_release(sc->vtscsi_dev);
528 		return (error);
529 	}
530 	sc->vtscsi_nintr = cnt;
531 	return (0);
532 }
533 
534 static int
vtscsi_alloc_virtqueues(struct vtscsi_softc * sc)535 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
536 {
537 	device_t dev = sc->vtscsi_dev;
538 	struct vq_alloc_info vq_info[3];
539 	int nvqs = 3;
540 
541 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, &sc->vtscsi_control_vq,
542 	    "%s control", device_get_nameunit(dev));
543 
544 	VQ_ALLOC_INFO_INIT(&vq_info[1], 0, &sc->vtscsi_event_vq,
545 	    "%s event", device_get_nameunit(dev));
546 
547 	VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
548 	    &sc->vtscsi_request_vq, "%s request", device_get_nameunit(dev));
549 
550 	return (virtio_alloc_virtqueues(dev, nvqs, vq_info));
551 }
552 
553 static void
vtscsi_write_device_config(struct vtscsi_softc * sc)554 vtscsi_write_device_config(struct vtscsi_softc *sc)
555 {
556 
557 	virtio_write_dev_config_4(sc->vtscsi_dev,
558 	    offsetof(struct virtio_scsi_config, sense_size),
559 	    VIRTIO_SCSI_SENSE_SIZE);
560 
561 	/*
562 	 * This is the size in the virtio_scsi_cmd_req structure. Note
563 	 * this value (32) is larger than the maximum CAM CDB size (16).
564 	 */
565 	virtio_write_dev_config_4(sc->vtscsi_dev,
566 	    offsetof(struct virtio_scsi_config, cdb_size),
567 	    VIRTIO_SCSI_CDB_SIZE);
568 }
569 
570 static int
vtscsi_reinit(struct vtscsi_softc * sc)571 vtscsi_reinit(struct vtscsi_softc *sc)
572 {
573 	device_t dev;
574 	int error;
575 
576 	dev = sc->vtscsi_dev;
577 
578 	error = virtio_reinit(dev, sc->vtscsi_features);
579 	if (error == 0) {
580 		vtscsi_write_device_config(sc);
581 		vtscsi_reinit_event_vq(sc);
582 		virtio_reinit_complete(dev);
583 
584 		vtscsi_enable_vqs_intr(sc);
585 	}
586 
587 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
588 
589 	return (error);
590 }
591 
592 static int
vtscsi_alloc_cam(struct vtscsi_softc * sc)593 vtscsi_alloc_cam(struct vtscsi_softc *sc)
594 {
595 	device_t dev;
596 	struct cam_devq *devq;
597 	int openings;
598 
599 	dev = sc->vtscsi_dev;
600 	openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
601 
602 	devq = cam_simq_alloc(openings);
603 	if (devq == NULL) {
604 		device_printf(dev, "cannot allocate SIM queue\n");
605 		return (ENOMEM);
606 	}
607 
608 	sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
609 	    "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
610 	    openings, devq);
611 	cam_simq_release(devq);
612 	if (sc->vtscsi_sim == NULL) {
613 		device_printf(dev, "cannot allocate SIM\n");
614 		return (ENOMEM);
615 	}
616 
617 	return (0);
618 }
619 
620 static int
vtscsi_register_cam(struct vtscsi_softc * sc)621 vtscsi_register_cam(struct vtscsi_softc *sc)
622 {
623 	device_t dev;
624 	int registered, error;
625 
626 	dev = sc->vtscsi_dev;
627 	registered = 0;
628 
629 	VTSCSI_LOCK(sc);
630 
631 	if (xpt_bus_register(sc->vtscsi_sim, 0) != CAM_SUCCESS) {
632 		error = ENOMEM;
633 		device_printf(dev, "cannot register XPT bus\n");
634 		goto fail;
635 	}
636 
637 	registered = 1;
638 
639 	if (xpt_create_path(&sc->vtscsi_path, NULL,
640 	    cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
641 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
642 		error = ENOMEM;
643 		device_printf(dev, "cannot create bus path\n");
644 		goto fail;
645 	}
646 
647 	if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
648 		error = EIO;
649 		device_printf(dev, "cannot register async callback\n");
650 		goto fail;
651 	}
652 
653 	VTSCSI_UNLOCK(sc);
654 
655 	return (0);
656 
657 fail:
658 	if (sc->vtscsi_path != NULL) {
659 		xpt_free_path(sc->vtscsi_path);
660 		sc->vtscsi_path = NULL;
661 	}
662 
663 	if (registered != 0)
664 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
665 
666 	VTSCSI_UNLOCK(sc);
667 
668 	return (error);
669 }
670 
671 static void
vtscsi_free_cam(struct vtscsi_softc * sc)672 vtscsi_free_cam(struct vtscsi_softc *sc)
673 {
674 
675 	VTSCSI_LOCK(sc);
676 
677 	if (sc->vtscsi_path != NULL) {
678 		vtscsi_deregister_async(sc);
679 
680 		xpt_free_path(sc->vtscsi_path);
681 		sc->vtscsi_path = NULL;
682 
683 		xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
684 	}
685 
686 	if (sc->vtscsi_sim != NULL) {
687 		cam_sim_free(sc->vtscsi_sim);
688 		sc->vtscsi_sim = NULL;
689 	}
690 
691 	VTSCSI_UNLOCK(sc);
692 }
693 
694 static void
vtscsi_cam_async(void * cb_arg,uint32_t code,struct cam_path * path,void * arg)695 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
696 {
697 	struct cam_sim *sim;
698 	struct vtscsi_softc *sc;
699 
700 	sim = cb_arg;
701 	sc = cam_sim_softc(sim);
702 
703 	vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
704 
705 	/*
706 	 * TODO Once QEMU supports event reporting, we should
707 	 *      (un)subscribe to events here.
708 	 */
709 	switch (code) {
710 	case AC_FOUND_DEVICE:
711 		break;
712 	case AC_LOST_DEVICE:
713 		break;
714 	}
715 }
716 
717 static int
vtscsi_register_async(struct vtscsi_softc * sc)718 vtscsi_register_async(struct vtscsi_softc *sc)
719 {
720 	struct ccb_setasync *csa;
721 	u_int32_t status;
722 
723 	csa = &xpt_alloc_ccb()->csa;
724 
725 	xpt_setup_ccb(&csa->ccb_h, sc->vtscsi_path, 5);
726 	csa->ccb_h.func_code = XPT_SASYNC_CB;
727 	csa->event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
728 	csa->callback = vtscsi_cam_async;
729 	csa->callback_arg = sc->vtscsi_sim;
730 
731 	xpt_action((union ccb *)csa);
732 	status = csa->ccb_h.status;
733 	xpt_free_ccb(&csa->ccb_h);
734 
735 	return status;
736 }
737 
738 static void
vtscsi_deregister_async(struct vtscsi_softc * sc)739 vtscsi_deregister_async(struct vtscsi_softc *sc)
740 {
741 	struct ccb_setasync *csa;
742 
743 	csa = &xpt_alloc_ccb()->csa;
744 	xpt_setup_ccb(&csa->ccb_h, sc->vtscsi_path, 5);
745 	csa->ccb_h.func_code = XPT_SASYNC_CB;
746 	csa->event_enable = 0;
747 	csa->callback = vtscsi_cam_async;
748 	csa->callback_arg = sc->vtscsi_sim;
749 	xpt_action((union ccb *)csa);
750 	xpt_free_ccb(&csa->ccb_h);
751 }
752 
753 static void
vtscsi_cam_action(struct cam_sim * sim,union ccb * ccb)754 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
755 {
756 	struct vtscsi_softc *sc;
757 	struct ccb_hdr *ccbh;
758 
759 	sc = cam_sim_softc(sim);
760 	ccbh = &ccb->ccb_h;
761 
762 	VTSCSI_LOCK_OWNED(sc);
763 
764 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
765 		/*
766 		 * The VTSCSI_MTX is briefly dropped between setting
767 		 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
768 		 * drop any CCBs that come in during that window.
769 		 */
770 		ccbh->status = CAM_NO_HBA;
771 		xpt_done(ccb);
772 		return;
773 	}
774 
775 	switch (ccbh->func_code) {
776 	case XPT_SCSI_IO:
777 		vtscsi_cam_scsi_io(sc, sim, ccb);
778 		break;
779 
780 	case XPT_SET_TRAN_SETTINGS:
781 		ccbh->status = CAM_FUNC_NOTAVAIL;
782 		xpt_done(ccb);
783 		break;
784 
785 	case XPT_GET_TRAN_SETTINGS:
786 		vtscsi_cam_get_tran_settings(sc, ccb);
787 		break;
788 
789 	case XPT_RESET_BUS:
790 		vtscsi_cam_reset_bus(sc, ccb);
791 		break;
792 
793 	case XPT_RESET_DEV:
794 		vtscsi_cam_reset_dev(sc, ccb);
795 		break;
796 
797 	case XPT_ABORT:
798 		vtscsi_cam_abort(sc, ccb);
799 		break;
800 
801 	case XPT_CALC_GEOMETRY:
802 		cam_calc_geometry(&ccb->ccg, 1);
803 		xpt_done(ccb);
804 		break;
805 
806 	case XPT_PATH_INQ:
807 		vtscsi_cam_path_inquiry(sc, sim, ccb);
808 		break;
809 
810 	default:
811 		vtscsi_dprintf(sc, VTSCSI_ERROR,
812 		    "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
813 
814 		ccbh->status = CAM_REQ_INVALID;
815 		xpt_done(ccb);
816 		break;
817 	}
818 }
819 
820 static void
vtscsi_cam_poll(struct cam_sim * sim)821 vtscsi_cam_poll(struct cam_sim *sim)
822 {
823 	struct vtscsi_softc *sc;
824 
825 	sc = cam_sim_softc(sim);
826 
827 	vtscsi_complete_vqs_locked(sc);
828 }
829 
830 static void
vtscsi_cam_scsi_io(struct vtscsi_softc * sc,struct cam_sim * sim,union ccb * ccb)831 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
832     union ccb *ccb)
833 {
834 	struct ccb_hdr *ccbh;
835 	struct ccb_scsiio *csio;
836 	int error;
837 
838 	ccbh = &ccb->ccb_h;
839 	csio = &ccb->csio;
840 
841 	if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
842 		error = EINVAL;
843 		ccbh->status = CAM_REQ_INVALID;
844 		goto done;
845 	}
846 
847 #ifndef __DragonFly__ /* XXX swildner */
848 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
849 	    (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
850 		error = EINVAL;
851 		ccbh->status = CAM_REQ_INVALID;
852 		goto done;
853 	}
854 #endif
855 
856 	error = vtscsi_start_scsi_cmd(sc, ccb);
857 
858 done:
859 	if (error) {
860 		vtscsi_dprintf(sc, VTSCSI_ERROR,
861 		    "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
862 		xpt_done(ccb);
863 	}
864 }
865 
866 static void
vtscsi_cam_get_tran_settings(struct vtscsi_softc * sc,union ccb * ccb)867 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
868 {
869 	struct ccb_trans_settings *cts;
870 	struct ccb_trans_settings_scsi *scsi;
871 
872 	cts = &ccb->cts;
873 	scsi = &cts->proto_specific.scsi;
874 
875 	cts->protocol = PROTO_SCSI;
876 	cts->protocol_version = SCSI_REV_SPC3;
877 	cts->transport = XPORT_SAS;
878 	cts->transport_version = 0;
879 
880 	scsi->valid = CTS_SCSI_VALID_TQ;
881 	scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
882 
883 	ccb->ccb_h.status = CAM_REQ_CMP;
884 	xpt_done(ccb);
885 }
886 
887 static void
vtscsi_cam_reset_bus(struct vtscsi_softc * sc,union ccb * ccb)888 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
889 {
890 	int error;
891 
892 	error = vtscsi_reset_bus(sc);
893 	if (error == 0)
894 		ccb->ccb_h.status = CAM_REQ_CMP;
895 	else
896 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
897 
898 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
899 	    error, ccb, ccb->ccb_h.status);
900 
901 	xpt_done(ccb);
902 }
903 
904 static void
vtscsi_cam_reset_dev(struct vtscsi_softc * sc,union ccb * ccb)905 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
906 {
907 	struct ccb_hdr *ccbh;
908 	struct vtscsi_request *req;
909 	int error;
910 
911 	ccbh = &ccb->ccb_h;
912 
913 	req = vtscsi_dequeue_request(sc);
914 	if (req == NULL) {
915 		error = EAGAIN;
916 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
917 		goto fail;
918 	}
919 
920 	req->vsr_ccb = ccb;
921 
922 	error = vtscsi_execute_reset_dev_cmd(sc, req);
923 	if (error == 0)
924 		return;
925 
926 	vtscsi_enqueue_request(sc, req);
927 
928 fail:
929 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
930 	    error, req, ccb);
931 
932 	if (error == EAGAIN)
933 		ccbh->status = CAM_RESRC_UNAVAIL;
934 	else
935 		ccbh->status = CAM_REQ_CMP_ERR;
936 
937 	xpt_done(ccb);
938 }
939 
940 static void
vtscsi_cam_abort(struct vtscsi_softc * sc,union ccb * ccb)941 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
942 {
943 	struct vtscsi_request *req;
944 	struct ccb_hdr *ccbh;
945 	int error;
946 
947 	ccbh = &ccb->ccb_h;
948 
949 	req = vtscsi_dequeue_request(sc);
950 	if (req == NULL) {
951 		error = EAGAIN;
952 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
953 		goto fail;
954 	}
955 
956 	req->vsr_ccb = ccb;
957 
958 	error = vtscsi_execute_abort_task_cmd(sc, req);
959 	if (error == 0)
960 		return;
961 
962 	vtscsi_enqueue_request(sc, req);
963 
964 fail:
965 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
966 	    error, req, ccb);
967 
968 	if (error == EAGAIN)
969 		ccbh->status = CAM_RESRC_UNAVAIL;
970 	else
971 		ccbh->status = CAM_REQ_CMP_ERR;
972 
973 	xpt_done(ccb);
974 }
975 
976 static void
vtscsi_cam_path_inquiry(struct vtscsi_softc * sc,struct cam_sim * sim,union ccb * ccb)977 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
978     union ccb *ccb)
979 {
980 	device_t dev;
981 	struct ccb_pathinq *cpi;
982 
983 	dev = sc->vtscsi_dev;
984 	cpi = &ccb->cpi;
985 
986 	vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
987 
988 	cpi->version_num = 1;
989 	cpi->hba_inquiry = PI_TAG_ABLE;
990 	cpi->target_sprt = 0;
991 	cpi->hba_misc = PIM_SEQSCAN;
992 	if (vtscsi_bus_reset_disable != 0)
993 		cpi->hba_misc |= PIM_NOBUSRESET;
994 	cpi->hba_eng_cnt = 0;
995 
996 	cpi->max_target = sc->vtscsi_max_target;
997 	cpi->max_lun = sc->vtscsi_max_lun;
998 	cpi->initiator_id = VTSCSI_INITIATOR_ID;
999 
1000 	strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1001 	strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
1002 	strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1003 
1004 	cpi->unit_number = cam_sim_unit(sim);
1005 	cpi->bus_id = cam_sim_bus(sim);
1006 
1007 	cpi->base_transfer_speed = 300000;
1008 
1009 	cpi->protocol = PROTO_SCSI;
1010 	cpi->protocol_version = SCSI_REV_SPC3;
1011 	cpi->transport = XPORT_SAS;
1012 	cpi->transport_version = 0;
1013 
1014 	cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
1015 	    PAGE_SIZE;
1016 
1017 #if 0
1018 	cpi->hba_vendor = virtio_get_vendor(dev);
1019 	cpi->hba_device = virtio_get_device(dev);
1020 	cpi->hba_subvendor = virtio_get_subvendor(dev);
1021 	cpi->hba_subdevice = virtio_get_subdevice(dev);
1022 #endif
1023 
1024 	ccb->ccb_h.status = CAM_REQ_CMP;
1025 	xpt_done(ccb);
1026 }
1027 
1028 static int
vtscsi_sg_append_scsi_buf(struct vtscsi_softc * sc,struct sglist * sg,struct ccb_scsiio * csio)1029 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
1030     struct ccb_scsiio *csio)
1031 {
1032 	struct ccb_hdr *ccbh;
1033 	struct bus_dma_segment *dseg;
1034 	int i, error;
1035 
1036 	ccbh = &csio->ccb_h;
1037 	error = 0;
1038 
1039 	if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1040 
1041 		if ((ccbh->flags & CAM_DATA_PHYS) == 0)
1042 			error = sglist_append(sg,
1043 			    csio->data_ptr, csio->dxfer_len);
1044 		else
1045 			error = sglist_append_phys(sg,
1046 			    (vm_paddr_t)(vm_offset_t) csio->data_ptr,
1047 			    csio->dxfer_len);
1048 	} else {
1049 
1050 		for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
1051 			dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
1052 
1053 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0)
1054 				error = sglist_append(sg,
1055 				    (void *)(vm_offset_t) dseg->ds_addr,
1056 				    dseg->ds_len);
1057 			else
1058 				error = sglist_append_phys(sg,
1059 				    (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1060 		}
1061 	}
1062 
1063 	return (error);
1064 }
1065 
1066 static int
vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc * sc,struct vtscsi_request * req,int * readable,int * writable)1067 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1068     int *readable, int *writable)
1069 {
1070 	struct sglist *sg;
1071 	struct ccb_hdr *ccbh;
1072 	struct ccb_scsiio *csio;
1073 	struct virtio_scsi_cmd_req *cmd_req;
1074 	struct virtio_scsi_cmd_resp *cmd_resp;
1075 	int error;
1076 
1077 	sg = sc->vtscsi_sglist;
1078 	csio = &req->vsr_ccb->csio;
1079 	ccbh = &csio->ccb_h;
1080 	cmd_req = &req->vsr_cmd_req;
1081 	cmd_resp = &req->vsr_cmd_resp;
1082 
1083 	sglist_reset(sg);
1084 
1085 	sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1086 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1087 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1088 		/* At least one segment must be left for the response. */
1089 		if (error || sg->sg_nseg == sg->sg_maxseg)
1090 			goto fail;
1091 	}
1092 
1093 	*readable = sg->sg_nseg;
1094 
1095 	sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1096 	if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1097 		error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1098 		if (error)
1099 			goto fail;
1100 	}
1101 
1102 	*writable = sg->sg_nseg - *readable;
1103 
1104 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1105 	    "writable=%d\n", req, ccbh, *readable, *writable);
1106 
1107 	return (0);
1108 
1109 fail:
1110 	/*
1111 	 * This should never happen unless maxio was incorrectly set.
1112 	 */
1113 	vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1114 
1115 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1116 	    "nseg=%d maxseg=%d\n",
1117 	    error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1118 
1119 	return (EFBIG);
1120 }
1121 
1122 static int
vtscsi_execute_scsi_cmd(struct vtscsi_softc * sc,struct vtscsi_request * req)1123 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1124 {
1125 	struct sglist *sg;
1126 	struct virtqueue *vq;
1127 	struct ccb_scsiio *csio;
1128 	struct ccb_hdr *ccbh;
1129 	struct virtio_scsi_cmd_req *cmd_req;
1130 	struct virtio_scsi_cmd_resp *cmd_resp;
1131 	int readable, writable, error;
1132 
1133 	sg = sc->vtscsi_sglist;
1134 	vq = sc->vtscsi_request_vq;
1135 	csio = &req->vsr_ccb->csio;
1136 	ccbh = &csio->ccb_h;
1137 	cmd_req = &req->vsr_cmd_req;
1138 	cmd_resp = &req->vsr_cmd_resp;
1139 
1140 	vtscsi_init_scsi_cmd_req(csio, cmd_req);
1141 
1142 	error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1143 	if (error)
1144 		return (error);
1145 
1146 	req->vsr_complete = vtscsi_complete_scsi_cmd;
1147 	cmd_resp->response = -1;
1148 
1149 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1150 	if (error) {
1151 		vtscsi_dprintf(sc, VTSCSI_ERROR,
1152 		    "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1153 
1154 		ccbh->status = CAM_REQUEUE_REQ;
1155 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1156 		return (error);
1157 	}
1158 
1159 	ccbh->status |= CAM_SIM_QUEUED;
1160 	ccbh->ccbh_vtscsi_req = req;
1161 
1162 	virtqueue_notify(vq, NULL);
1163 
1164 	if (ccbh->timeout != CAM_TIME_INFINITY) {
1165 		req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1166 		callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1167 		    vtscsi_timedout_scsi_cmd, req);
1168 	}
1169 
1170 	vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1171 	    req, ccbh);
1172 
1173 	return (0);
1174 }
1175 
1176 static int
vtscsi_start_scsi_cmd(struct vtscsi_softc * sc,union ccb * ccb)1177 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1178 {
1179 	struct vtscsi_request *req;
1180 	int error;
1181 
1182 	req = vtscsi_dequeue_request(sc);
1183 	if (req == NULL) {
1184 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1185 		vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1186 		return (ENOBUFS);
1187 	}
1188 
1189 	req->vsr_ccb = ccb;
1190 
1191 	error = vtscsi_execute_scsi_cmd(sc, req);
1192 	if (error)
1193 		vtscsi_enqueue_request(sc, req);
1194 
1195 	return (error);
1196 }
1197 
1198 static void
vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc * sc,struct vtscsi_request * req)1199 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1200     struct vtscsi_request *req)
1201 {
1202 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1203 	struct vtscsi_request *to_req;
1204 	uint8_t response;
1205 
1206 	tmf_resp = &req->vsr_tmf_resp;
1207 	response = tmf_resp->response;
1208 	to_req = req->vsr_timedout_req;
1209 
1210 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1211 	    req, to_req, response);
1212 
1213 	vtscsi_enqueue_request(sc, req);
1214 
1215 	/*
1216 	 * The timedout request could have completed between when the
1217 	 * abort task was sent and when the host processed it.
1218 	 */
1219 	if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1220 		return;
1221 
1222 	/* The timedout request was successfully aborted. */
1223 	if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1224 		return;
1225 
1226 	/* Don't bother if the device is going away. */
1227 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1228 		return;
1229 
1230 	/* The timedout request will be aborted by the reset. */
1231 	if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1232 		return;
1233 
1234 	vtscsi_reset_bus(sc);
1235 }
1236 
1237 static int
vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc * sc,struct vtscsi_request * to_req)1238 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1239     struct vtscsi_request *to_req)
1240 {
1241 	struct sglist *sg;
1242 	struct ccb_hdr *to_ccbh;
1243 	struct vtscsi_request *req;
1244 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1245 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1246 	int error;
1247 
1248 	sg = sc->vtscsi_sglist;
1249 	to_ccbh = &to_req->vsr_ccb->ccb_h;
1250 
1251 	req = vtscsi_dequeue_request(sc);
1252 	if (req == NULL) {
1253 		error = ENOBUFS;
1254 		goto fail;
1255 	}
1256 
1257 	tmf_req = &req->vsr_tmf_req;
1258 	tmf_resp = &req->vsr_tmf_resp;
1259 
1260 	vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1261 	    (uintptr_t) to_ccbh, tmf_req);
1262 
1263 	sglist_reset(sg);
1264 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1265 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1266 
1267 	req->vsr_timedout_req = to_req;
1268 	req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1269 	tmf_resp->response = -1;
1270 
1271 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1272 	    VTSCSI_EXECUTE_ASYNC);
1273 	if (error == 0)
1274 		return (0);
1275 
1276 	vtscsi_enqueue_request(sc, req);
1277 
1278 fail:
1279 	vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1280 	    "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1281 
1282 	return (error);
1283 }
1284 
1285 static void
vtscsi_timedout_scsi_cmd(void * xreq)1286 vtscsi_timedout_scsi_cmd(void *xreq)
1287 {
1288 	struct vtscsi_softc *sc;
1289 	struct vtscsi_request *to_req;
1290 
1291 	to_req = xreq;
1292 	sc = to_req->vsr_softc;
1293 
1294 	vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1295 	    to_req, to_req->vsr_ccb, to_req->vsr_state);
1296 
1297 	/* Don't bother if the device is going away. */
1298 	if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1299 		return;
1300 
1301 	/*
1302 	 * Bail if the request is not in use. We likely raced when
1303 	 * stopping the callout handler or it has already been aborted.
1304 	 */
1305 	if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1306 	    (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1307 		return;
1308 
1309 	/*
1310 	 * Complete the request queue in case the timedout request is
1311 	 * actually just pending.
1312 	 */
1313 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1314 	if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1315 		return;
1316 
1317 	sc->vtscsi_stats.scsi_cmd_timeouts++;
1318 	to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1319 
1320 	if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1321 		return;
1322 
1323 	vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1324 	vtscsi_reset_bus(sc);
1325 }
1326 
1327 static cam_status
vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp * cmd_resp)1328 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1329 {
1330 	cam_status status;
1331 
1332 	switch (cmd_resp->response) {
1333 	case VIRTIO_SCSI_S_OK:
1334 		status = CAM_REQ_CMP;
1335 		break;
1336 	case VIRTIO_SCSI_S_OVERRUN:
1337 		status = CAM_DATA_RUN_ERR;
1338 		break;
1339 	case VIRTIO_SCSI_S_ABORTED:
1340 		status = CAM_REQ_ABORTED;
1341 		break;
1342 	case VIRTIO_SCSI_S_BAD_TARGET:
1343 		/*
1344 		 * A CAM_SEL_TIMEOUT here will cause the entire device to
1345 		 * be lost, which is not desirable when scanning LUNs.
1346 		 * Use CAM_DEV_NOT_THERE instead.
1347 		 */
1348 		status = CAM_DEV_NOT_THERE;
1349 		break;
1350 	case VIRTIO_SCSI_S_RESET:
1351 		status = CAM_SCSI_BUS_RESET;
1352 		break;
1353 	case VIRTIO_SCSI_S_BUSY:
1354 		status = CAM_SCSI_BUSY;
1355 		break;
1356 	case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1357 	case VIRTIO_SCSI_S_TARGET_FAILURE:
1358 	case VIRTIO_SCSI_S_NEXUS_FAILURE:
1359 		status = CAM_SCSI_IT_NEXUS_LOST;
1360 		break;
1361 	default: /* VIRTIO_SCSI_S_FAILURE */
1362 		status = CAM_REQ_CMP_ERR;
1363 		break;
1364 	}
1365 
1366 	return (status);
1367 }
1368 
1369 static cam_status
vtscsi_complete_scsi_cmd_response(struct vtscsi_softc * sc,struct ccb_scsiio * csio,struct virtio_scsi_cmd_resp * cmd_resp)1370 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1371     struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1372 {
1373 	cam_status status;
1374 
1375 	csio->scsi_status = cmd_resp->status;
1376 	csio->resid = cmd_resp->resid;
1377 
1378 	if (csio->scsi_status == SCSI_STATUS_OK)
1379 		status = CAM_REQ_CMP;
1380 	else
1381 		status = CAM_SCSI_STATUS_ERROR;
1382 
1383 	if (cmd_resp->sense_len > 0) {
1384 		status |= CAM_AUTOSNS_VALID;
1385 
1386 		if (cmd_resp->sense_len < csio->sense_len)
1387 			csio->sense_resid = csio->sense_len -
1388 			    cmd_resp->sense_len;
1389 		else
1390 			csio->sense_resid = 0;
1391 
1392 		bzero(&csio->sense_data, sizeof(csio->sense_data));
1393 		memcpy(cmd_resp->sense, &csio->sense_data,
1394 		    csio->sense_len - csio->sense_resid);
1395 	}
1396 
1397 	vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1398 	    "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1399 	    csio, csio->scsi_status, csio->resid, csio->sense_resid);
1400 
1401 	return (status);
1402 }
1403 
1404 static void
vtscsi_complete_scsi_cmd(struct vtscsi_softc * sc,struct vtscsi_request * req)1405 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1406 {
1407 	struct ccb_hdr *ccbh;
1408 	struct ccb_scsiio *csio;
1409 	struct virtio_scsi_cmd_resp *cmd_resp;
1410 	cam_status status;
1411 
1412 	csio = &req->vsr_ccb->csio;
1413 	ccbh = &csio->ccb_h;
1414 	cmd_resp = &req->vsr_cmd_resp;
1415 
1416 	KASSERT(ccbh->ccbh_vtscsi_req == req,
1417 	    ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1418 
1419 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1420 		callout_stop(&req->vsr_callout);
1421 
1422 	status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1423 	if (status == CAM_REQ_ABORTED) {
1424 		if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1425 			status = CAM_CMD_TIMEOUT;
1426 	} else if (status == CAM_REQ_CMP)
1427 		status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1428 
1429 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1430 		status |= CAM_DEV_QFRZN;
1431 		xpt_freeze_devq(ccbh->path, 1);
1432 	}
1433 
1434 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1435 		status |= CAM_RELEASE_SIMQ;
1436 
1437 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1438 	    req, ccbh, status);
1439 
1440 	ccbh->status = status;
1441 	xpt_done(req->vsr_ccb);
1442 	vtscsi_enqueue_request(sc, req);
1443 }
1444 
1445 static void
vtscsi_poll_ctrl_req(struct vtscsi_softc * sc,struct vtscsi_request * req)1446 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1447 {
1448 
1449 	/* XXX We probably shouldn't poll forever. */
1450 	req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1451 	do
1452 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1453 	while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1454 
1455 	req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1456 }
1457 
1458 static int
vtscsi_execute_ctrl_req(struct vtscsi_softc * sc,struct vtscsi_request * req,struct sglist * sg,int readable,int writable,int flag)1459 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1460     struct sglist *sg, int readable, int writable, int flag)
1461 {
1462 	struct virtqueue *vq;
1463 	int error;
1464 
1465 	vq = sc->vtscsi_control_vq;
1466 
1467 	KKASSERT(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1468 
1469 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1470 	if (error) {
1471 		/*
1472 		 * Return EAGAIN when the virtqueue does not have enough
1473 		 * descriptors available.
1474 		 */
1475 		if (error == ENOSPC || error == EMSGSIZE)
1476 			error = EAGAIN;
1477 
1478 		return (error);
1479 	}
1480 
1481 	virtqueue_notify(vq, NULL);
1482 	if (flag == VTSCSI_EXECUTE_POLL)
1483 		vtscsi_poll_ctrl_req(sc, req);
1484 
1485 	return (0);
1486 }
1487 
1488 static void
vtscsi_complete_abort_task_cmd(struct vtscsi_softc * sc,struct vtscsi_request * req)1489 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1490     struct vtscsi_request *req)
1491 {
1492 	union ccb *ccb;
1493 	struct ccb_hdr *ccbh;
1494 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1495 
1496 	ccb = req->vsr_ccb;
1497 	ccbh = &ccb->ccb_h;
1498 	tmf_resp = &req->vsr_tmf_resp;
1499 
1500 	switch (tmf_resp->response) {
1501 	case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1502 		ccbh->status = CAM_REQ_CMP;
1503 		break;
1504 	case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1505 		ccbh->status = CAM_UA_ABORT;
1506 		break;
1507 	default:
1508 		ccbh->status = CAM_REQ_CMP_ERR;
1509 		break;
1510 	}
1511 
1512 	xpt_done(ccb);
1513 	vtscsi_enqueue_request(sc, req);
1514 }
1515 
1516 static int
vtscsi_execute_abort_task_cmd(struct vtscsi_softc * sc,struct vtscsi_request * req)1517 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1518     struct vtscsi_request *req)
1519 {
1520 	struct sglist *sg;
1521 	struct ccb_abort *cab;
1522 	struct ccb_hdr *ccbh;
1523 	struct ccb_hdr *abort_ccbh;
1524 	struct vtscsi_request *abort_req;
1525 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1526 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1527 	int error;
1528 
1529 	sg = sc->vtscsi_sglist;
1530 	cab = &req->vsr_ccb->cab;
1531 	ccbh = &cab->ccb_h;
1532 	tmf_req = &req->vsr_tmf_req;
1533 	tmf_resp = &req->vsr_tmf_resp;
1534 
1535 	/* CCB header and request that's to be aborted. */
1536 	abort_ccbh = &cab->abort_ccb->ccb_h;
1537 	abort_req = abort_ccbh->ccbh_vtscsi_req;
1538 
1539 	if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1540 		error = EINVAL;
1541 		goto fail;
1542 	}
1543 
1544 	/* Only attempt to abort requests that could be in-flight. */
1545 	if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1546 		error = EALREADY;
1547 		goto fail;
1548 	}
1549 
1550 	abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1551 	if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1552 		callout_stop(&abort_req->vsr_callout);
1553 
1554 	vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1555 	    (uintptr_t) abort_ccbh, tmf_req);
1556 
1557 	sglist_reset(sg);
1558 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1559 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1560 
1561 	req->vsr_complete = vtscsi_complete_abort_task_cmd;
1562 	tmf_resp->response = -1;
1563 
1564 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1565 	    VTSCSI_EXECUTE_ASYNC);
1566 
1567 fail:
1568 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1569 	    "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1570 
1571 	return (error);
1572 }
1573 
1574 static void
vtscsi_complete_reset_dev_cmd(struct vtscsi_softc * sc,struct vtscsi_request * req)1575 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1576     struct vtscsi_request *req)
1577 {
1578 	union ccb *ccb;
1579 	struct ccb_hdr *ccbh;
1580 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1581 
1582 	ccb = req->vsr_ccb;
1583 	ccbh = &ccb->ccb_h;
1584 	tmf_resp = &req->vsr_tmf_resp;
1585 
1586 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1587 	    req, ccb, tmf_resp->response);
1588 
1589 	if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1590 		ccbh->status = CAM_REQ_CMP;
1591 		vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1592 		    ccbh->target_lun);
1593 	} else
1594 		ccbh->status = CAM_REQ_CMP_ERR;
1595 
1596 	xpt_done(ccb);
1597 	vtscsi_enqueue_request(sc, req);
1598 }
1599 
1600 static int
vtscsi_execute_reset_dev_cmd(struct vtscsi_softc * sc,struct vtscsi_request * req)1601 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1602     struct vtscsi_request *req)
1603 {
1604 	struct sglist *sg;
1605 	struct ccb_resetdev *crd;
1606 	struct ccb_hdr *ccbh;
1607 	struct virtio_scsi_ctrl_tmf_req *tmf_req;
1608 	struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1609 	uint32_t subtype;
1610 	int error;
1611 
1612 	sg = sc->vtscsi_sglist;
1613 	crd = &req->vsr_ccb->crd;
1614 	ccbh = &crd->ccb_h;
1615 	tmf_req = &req->vsr_tmf_req;
1616 	tmf_resp = &req->vsr_tmf_resp;
1617 
1618 	if (ccbh->target_lun == CAM_LUN_WILDCARD)
1619 		subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1620 	else
1621 		subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1622 
1623 	vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1624 
1625 	sglist_reset(sg);
1626 	sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1627 	sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1628 
1629 	req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1630 	tmf_resp->response = -1;
1631 
1632 	error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1633 	    VTSCSI_EXECUTE_ASYNC);
1634 
1635 	vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1636 	    error, req, ccbh);
1637 
1638 	return (error);
1639 }
1640 
1641 static void
vtscsi_get_request_lun(uint8_t lun[],target_id_t * target_id,lun_id_t * lun_id)1642 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1643 {
1644 
1645 	*target_id = lun[1];
1646 	*lun_id = (lun[2] << 8) | lun[3];
1647 }
1648 
1649 static void
vtscsi_set_request_lun(struct ccb_hdr * ccbh,uint8_t lun[])1650 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1651 {
1652 
1653 	lun[0] = 1;
1654 	lun[1] = ccbh->target_id;
1655 	lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1656 	lun[3] = ccbh->target_lun & 0xFF;
1657 }
1658 
1659 static void
vtscsi_init_scsi_cmd_req(struct ccb_scsiio * csio,struct virtio_scsi_cmd_req * cmd_req)1660 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1661     struct virtio_scsi_cmd_req *cmd_req)
1662 {
1663 	uint8_t attr;
1664 
1665 	switch (csio->tag_action) {
1666 	case MSG_HEAD_OF_Q_TAG:
1667 		attr = VIRTIO_SCSI_S_HEAD;
1668 		break;
1669 	case MSG_ORDERED_Q_TAG:
1670 		attr = VIRTIO_SCSI_S_ORDERED;
1671 		break;
1672 	case MSG_ACA_TASK:
1673 		attr = VIRTIO_SCSI_S_ACA;
1674 		break;
1675 	default: /* MSG_SIMPLE_Q_TAG */
1676 		attr = VIRTIO_SCSI_S_SIMPLE;
1677 		break;
1678 	}
1679 
1680 	vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1681 	cmd_req->tag = (uintptr_t) csio;
1682 	cmd_req->task_attr = attr;
1683 
1684 	memcpy(cmd_req->cdb,
1685 	    csio->ccb_h.flags & CAM_CDB_POINTER ?
1686 	        csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1687 	    csio->cdb_len);
1688 }
1689 
1690 static void
vtscsi_init_ctrl_tmf_req(struct ccb_hdr * ccbh,uint32_t subtype,uintptr_t tag,struct virtio_scsi_ctrl_tmf_req * tmf_req)1691 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1692     uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1693 {
1694 
1695 	vtscsi_set_request_lun(ccbh, tmf_req->lun);
1696 
1697 	tmf_req->type = VIRTIO_SCSI_T_TMF;
1698 	tmf_req->subtype = subtype;
1699 	tmf_req->tag = tag;
1700 }
1701 
1702 static void
vtscsi_freeze_simq(struct vtscsi_softc * sc,int reason)1703 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1704 {
1705 	int frozen;
1706 
1707 	frozen = sc->vtscsi_frozen;
1708 
1709 	if (reason & VTSCSI_REQUEST &&
1710 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1711 		sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1712 
1713 	if (reason & VTSCSI_REQUEST_VQ &&
1714 	    (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1715 		sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1716 
1717 	/* Freeze the SIMQ if transitioned to frozen. */
1718 	if (frozen == 0 && sc->vtscsi_frozen != 0) {
1719 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1720 		xpt_freeze_simq(sc->vtscsi_sim, 1);
1721 	}
1722 }
1723 
1724 static int
vtscsi_thaw_simq(struct vtscsi_softc * sc,int reason)1725 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1726 {
1727 	int thawed;
1728 
1729 	if (sc->vtscsi_frozen == 0 || reason == 0)
1730 		return (0);
1731 
1732 	if (reason & VTSCSI_REQUEST &&
1733 	    sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1734 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1735 
1736 	if (reason & VTSCSI_REQUEST_VQ &&
1737 	    sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1738 		sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1739 
1740 	thawed = sc->vtscsi_frozen == 0;
1741 	if (thawed != 0)
1742 		vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1743 
1744 	return (thawed);
1745 }
1746 
1747 static void
vtscsi_announce(struct vtscsi_softc * sc,uint32_t ac_code,target_id_t target_id,lun_id_t lun_id)1748 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1749     target_id_t target_id, lun_id_t lun_id)
1750 {
1751 	struct cam_path *path;
1752 
1753 	/* Use the wildcard path from our softc for bus announcements. */
1754 	if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1755 		xpt_async(ac_code, sc->vtscsi_path, NULL);
1756 		return;
1757 	}
1758 
1759 	if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1760 	    target_id, lun_id) != CAM_REQ_CMP) {
1761 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1762 		return;
1763 	}
1764 
1765 	xpt_async(ac_code, path, NULL);
1766 	xpt_free_path(path);
1767 }
1768 
1769 static void
vtscsi_cam_rescan_callback(struct cam_periph * periph,union ccb * ccb)1770 vtscsi_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
1771 {
1772 	xpt_free_path(ccb->ccb_h.path);
1773 	xpt_free_ccb(&ccb->ccb_h);
1774 }
1775 
1776 static void
vtscsi_execute_rescan(struct vtscsi_softc * sc,target_id_t target_id,lun_id_t lun_id)1777 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1778     lun_id_t lun_id)
1779 {
1780 	union ccb *ccb;
1781 	cam_status status;
1782 
1783 	ccb = xpt_alloc_ccb();
1784 	if (ccb == NULL) {
1785 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1786 		return;
1787 	}
1788 
1789 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1790 	    cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1791 	if (status != CAM_REQ_CMP) {
1792 		xpt_free_ccb(&ccb->ccb_h);
1793 		return;
1794 	}
1795 
1796 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1797 	ccb->ccb_h.func_code = XPT_SCAN_LUN;
1798 	ccb->ccb_h.cbfcnp = vtscsi_cam_rescan_callback;
1799 	ccb->crcn.flags = CAM_FLAG_NONE;
1800 	xpt_action(ccb);
1801 }
1802 
1803 static void
vtscsi_execute_rescan_bus(struct vtscsi_softc * sc)1804 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1805 {
1806 	union ccb *ccb;
1807 	cam_status status;
1808 
1809 	ccb = xpt_alloc_ccb();
1810 	if (ccb == NULL) {
1811 		vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1812 		return;
1813 	}
1814 
1815 	status = xpt_create_path(&ccb->ccb_h.path, NULL,
1816 	    cam_sim_path(sc->vtscsi_sim),
1817 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1818 	if (status != CAM_REQ_CMP) {
1819 		xpt_free_ccb(&ccb->ccb_h);
1820 		return;
1821 	}
1822 
1823 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1824 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
1825 	ccb->ccb_h.cbfcnp = vtscsi_cam_rescan_callback;
1826 	ccb->crcn.flags = CAM_FLAG_NONE;
1827 	xpt_action(ccb);
1828 }
1829 
1830 static void
vtscsi_transport_reset_event(struct vtscsi_softc * sc,struct virtio_scsi_event * event)1831 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1832     struct virtio_scsi_event *event)
1833 {
1834 	target_id_t target_id;
1835 	lun_id_t lun_id;
1836 
1837 	vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1838 
1839 	switch (event->reason) {
1840 	case VIRTIO_SCSI_EVT_RESET_RESCAN:
1841 	case VIRTIO_SCSI_EVT_RESET_REMOVED:
1842 		vtscsi_execute_rescan(sc, target_id, lun_id);
1843 		break;
1844 	default:
1845 		device_printf(sc->vtscsi_dev,
1846 		    "unhandled transport event reason: %d\n", event->reason);
1847 		break;
1848 	}
1849 }
1850 
1851 static void
vtscsi_handle_event(struct vtscsi_softc * sc,struct virtio_scsi_event * event)1852 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1853 {
1854 	int error;
1855 
1856 	if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1857 		switch (event->event) {
1858 		case VIRTIO_SCSI_T_TRANSPORT_RESET:
1859 			vtscsi_transport_reset_event(sc, event);
1860 			break;
1861 		default:
1862 			device_printf(sc->vtscsi_dev,
1863 			    "unhandled event: %d\n", event->event);
1864 			break;
1865 		}
1866 	} else
1867 		vtscsi_execute_rescan_bus(sc);
1868 
1869 	/*
1870 	 * This should always be successful since the buffer
1871 	 * was just dequeued.
1872 	 */
1873 	error = vtscsi_enqueue_event_buf(sc, event);
1874 	KASSERT(error == 0,
1875 	    ("cannot requeue event buffer: %d", error));
1876 }
1877 
1878 static int
vtscsi_enqueue_event_buf(struct vtscsi_softc * sc,struct virtio_scsi_event * event)1879 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1880     struct virtio_scsi_event *event)
1881 {
1882 	struct sglist *sg;
1883 	struct virtqueue *vq;
1884 	int size, error;
1885 
1886 	sg = sc->vtscsi_sglist;
1887 	vq = sc->vtscsi_event_vq;
1888 	size = sc->vtscsi_event_buf_size;
1889 
1890 	bzero(event, size);
1891 
1892 	sglist_reset(sg);
1893 	error = sglist_append(sg, event, size);
1894 	if (error)
1895 		return (error);
1896 
1897 	error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1898 	if (error)
1899 		return (error);
1900 
1901 	virtqueue_notify(vq, NULL);
1902 
1903 	return (0);
1904 }
1905 
1906 static int
vtscsi_init_event_vq(struct vtscsi_softc * sc)1907 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1908 {
1909 	struct virtio_scsi_event *event;
1910 	int i, size, error;
1911 
1912 	/*
1913 	 * The first release of QEMU with VirtIO SCSI support would crash
1914 	 * when attempting to notify the event virtqueue. This was fixed
1915 	 * when hotplug support was added.
1916 	 */
1917 	if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1918 		size = sc->vtscsi_event_buf_size;
1919 	else
1920 		size = 0;
1921 
1922 	if (size < sizeof(struct virtio_scsi_event))
1923 		return (0);
1924 
1925 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1926 		event = &sc->vtscsi_event_bufs[i];
1927 
1928 		error = vtscsi_enqueue_event_buf(sc, event);
1929 		if (error)
1930 			break;
1931 	}
1932 
1933 	/*
1934 	 * Even just one buffer is enough. Missed events are
1935 	 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1936 	 */
1937 	if (i > 0)
1938 		error = 0;
1939 
1940 	return (error);
1941 }
1942 
1943 static void
vtscsi_reinit_event_vq(struct vtscsi_softc * sc)1944 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1945 {
1946 	struct virtio_scsi_event *event;
1947 	int i, error;
1948 
1949 	if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1950 	    sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1951 		return;
1952 
1953 	for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1954 		event = &sc->vtscsi_event_bufs[i];
1955 
1956 		error = vtscsi_enqueue_event_buf(sc, event);
1957 		if (error)
1958 			break;
1959 	}
1960 
1961 	KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1962 }
1963 
1964 static void
vtscsi_drain_event_vq(struct vtscsi_softc * sc)1965 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1966 {
1967 	struct virtqueue *vq;
1968 	int last;
1969 
1970 	vq = sc->vtscsi_event_vq;
1971 	last = 0;
1972 
1973 	while (virtqueue_drain(vq, &last) != NULL)
1974 		;
1975 
1976 	KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1977 }
1978 
1979 static void
vtscsi_complete_vqs_locked(struct vtscsi_softc * sc)1980 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1981 {
1982 
1983 	VTSCSI_LOCK_OWNED(sc);
1984 
1985 	if (sc->vtscsi_request_vq != NULL)
1986 		vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1987 	if (sc->vtscsi_control_vq != NULL)
1988 		vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1989 }
1990 
1991 static void
vtscsi_complete_vqs(struct vtscsi_softc * sc)1992 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1993 {
1994 
1995 	VTSCSI_LOCK(sc);
1996 	vtscsi_complete_vqs_locked(sc);
1997 	VTSCSI_UNLOCK(sc);
1998 }
1999 
2000 static void
vtscsi_cancel_request(struct vtscsi_softc * sc,struct vtscsi_request * req)2001 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2002 {
2003 	union ccb *ccb;
2004 	int detach;
2005 
2006 	ccb = req->vsr_ccb;
2007 
2008 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
2009 
2010 	/*
2011 	 * The callout must be drained when detaching since the request is
2012 	 * about to be freed. The VTSCSI_MTX must not be held for this in
2013 	 * case the callout is pending because there is a deadlock potential.
2014 	 * Otherwise, the virtqueue is being drained because of a bus reset
2015 	 * so we only need to attempt to stop the callouts.
2016 	 */
2017 	detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
2018 	if (detach != 0)
2019 		VTSCSI_LOCK_NOTOWNED(sc);
2020 	else
2021 		VTSCSI_LOCK_OWNED(sc);
2022 
2023 	if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
2024 		if (detach != 0)
2025 			callout_drain(&req->vsr_callout);
2026 		else
2027 			callout_stop(&req->vsr_callout);
2028 	}
2029 
2030 	if (ccb != NULL) {
2031 		if (detach != 0) {
2032 			VTSCSI_LOCK(sc);
2033 			ccb->ccb_h.status = CAM_NO_HBA;
2034 		} else
2035 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2036 		xpt_done(ccb);
2037 		if (detach != 0)
2038 			VTSCSI_UNLOCK(sc);
2039 	}
2040 
2041 	vtscsi_enqueue_request(sc, req);
2042 }
2043 
2044 static void
vtscsi_drain_vq(struct vtscsi_softc * sc,struct virtqueue * vq)2045 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2046 {
2047 	struct vtscsi_request *req;
2048 	int last;
2049 
2050 	last = 0;
2051 
2052 	vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
2053 
2054 	while ((req = virtqueue_drain(vq, &last)) != NULL)
2055 		vtscsi_cancel_request(sc, req);
2056 
2057 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
2058 }
2059 
2060 static void
vtscsi_drain_vqs(struct vtscsi_softc * sc)2061 vtscsi_drain_vqs(struct vtscsi_softc *sc)
2062 {
2063 
2064 	if (sc->vtscsi_control_vq != NULL)
2065 		vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
2066 	if (sc->vtscsi_request_vq != NULL)
2067 		vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
2068 	if (sc->vtscsi_event_vq != NULL)
2069 		vtscsi_drain_event_vq(sc);
2070 }
2071 
2072 static void
vtscsi_stop(struct vtscsi_softc * sc)2073 vtscsi_stop(struct vtscsi_softc *sc)
2074 {
2075 
2076 	vtscsi_disable_vqs_intr(sc);
2077 	virtio_stop(sc->vtscsi_dev);
2078 }
2079 
2080 static int
vtscsi_reset_bus(struct vtscsi_softc * sc)2081 vtscsi_reset_bus(struct vtscsi_softc *sc)
2082 {
2083 	int error;
2084 
2085 	VTSCSI_LOCK_OWNED(sc);
2086 
2087 	if (vtscsi_bus_reset_disable != 0) {
2088 		device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2089 		return (0);
2090 	}
2091 
2092 	sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2093 
2094 	/*
2095 	 * vtscsi_stop() will cause the in-flight requests to be canceled.
2096 	 * Those requests are then completed here so CAM will retry them
2097 	 * after the reset is complete.
2098 	 */
2099 	vtscsi_stop(sc);
2100 	vtscsi_complete_vqs_locked(sc);
2101 
2102 	/* Rid the virtqueues of any remaining requests. */
2103 	vtscsi_drain_vqs(sc);
2104 
2105 	/*
2106 	 * Any resource shortage that froze the SIMQ cannot persist across
2107 	 * a bus reset so ensure it gets thawed here.
2108 	 */
2109 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2110 		xpt_release_simq(sc->vtscsi_sim, 0);
2111 
2112 	error = vtscsi_reinit(sc);
2113 	if (error) {
2114 		device_printf(sc->vtscsi_dev,
2115 		    "reinitialization failed, stopping device...\n");
2116 		vtscsi_stop(sc);
2117 	} else
2118 		vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2119 		    CAM_LUN_WILDCARD);
2120 
2121 	sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2122 
2123 	return (error);
2124 }
2125 
2126 static void
vtscsi_init_request(struct vtscsi_softc * sc,struct vtscsi_request * req)2127 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2128 {
2129 
2130 #ifdef INVARIANTS
2131 	int req_nsegs, resp_nsegs;
2132 
2133 	req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2134 	resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2135 
2136 	KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2137 	KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2138 #endif
2139 
2140 	req->vsr_softc = sc;
2141 	callout_init_lk(&req->vsr_callout, VTSCSI_MTX(sc));
2142 }
2143 
2144 static int
vtscsi_alloc_requests(struct vtscsi_softc * sc)2145 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2146 {
2147 	struct vtscsi_request *req;
2148 	int i, nreqs;
2149 
2150 	/*
2151 	 * Commands destined for either the request or control queues come
2152 	 * from the same SIM queue. Use the size of the request virtqueue
2153 	 * as it (should) be much more frequently used. Some additional
2154 	 * requests are allocated for internal (TMF) use.
2155 	 */
2156 	nreqs = virtqueue_size(sc->vtscsi_request_vq);
2157 	if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2158 		nreqs /= VTSCSI_MIN_SEGMENTS;
2159 	nreqs += VTSCSI_RESERVED_REQUESTS;
2160 
2161 	for (i = 0; i < nreqs; i++) {
2162 		req = contigmalloc(sizeof(struct vtscsi_request), M_DEVBUF,
2163 		    M_WAITOK, 0, BUS_SPACE_MAXADDR, 16, 0);
2164 		if (req == NULL)
2165 			return (ENOMEM);
2166 
2167 		vtscsi_init_request(sc, req);
2168 
2169 		sc->vtscsi_nrequests++;
2170 		vtscsi_enqueue_request(sc, req);
2171 	}
2172 
2173 	return (0);
2174 }
2175 
2176 static void
vtscsi_free_requests(struct vtscsi_softc * sc)2177 vtscsi_free_requests(struct vtscsi_softc *sc)
2178 {
2179 	struct vtscsi_request *req;
2180 
2181 	while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2182 		KASSERT(callout_active(&req->vsr_callout) == 0,
2183 		    ("request callout still active"));
2184 
2185 		sc->vtscsi_nrequests--;
2186 		contigfree(req, sizeof(struct vtscsi_request), M_DEVBUF);
2187 	}
2188 
2189 	KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2190 	    sc->vtscsi_nrequests));
2191 }
2192 
2193 static void
vtscsi_enqueue_request(struct vtscsi_softc * sc,struct vtscsi_request * req)2194 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2195 {
2196 
2197 	KASSERT(req->vsr_softc == sc,
2198 	    ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2199 
2200 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2201 
2202 	/* A request is available so the SIMQ could be released. */
2203 	if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2204 		xpt_release_simq(sc->vtscsi_sim, 1);
2205 
2206 	req->vsr_ccb = NULL;
2207 	req->vsr_complete = NULL;
2208 	req->vsr_ptr0 = NULL;
2209 	req->vsr_state = VTSCSI_REQ_STATE_FREE;
2210 	req->vsr_flags = 0;
2211 
2212 	bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2213 	bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2214 
2215 	/*
2216 	 * We insert at the tail of the queue in order to make it
2217 	 * very unlikely a request will be reused if we race with
2218 	 * stopping its callout handler.
2219 	 */
2220 	TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2221 }
2222 
2223 static struct vtscsi_request *
vtscsi_dequeue_request(struct vtscsi_softc * sc)2224 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2225 {
2226 	struct vtscsi_request *req;
2227 
2228 	req = TAILQ_FIRST(&sc->vtscsi_req_free);
2229 	if (req != NULL) {
2230 		req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2231 		TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2232 	} else
2233 		sc->vtscsi_stats.dequeue_no_requests++;
2234 
2235 	vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2236 
2237 	return (req);
2238 }
2239 
2240 static void
vtscsi_complete_request(struct vtscsi_request * req)2241 vtscsi_complete_request(struct vtscsi_request *req)
2242 {
2243 
2244 	if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2245 		req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2246 
2247 	if (req->vsr_complete != NULL)
2248 		req->vsr_complete(req->vsr_softc, req);
2249 }
2250 
2251 static void
vtscsi_complete_vq(struct vtscsi_softc * sc,struct virtqueue * vq)2252 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2253 {
2254 	struct vtscsi_request *req;
2255 
2256 	VTSCSI_LOCK_OWNED(sc);
2257 
2258 	while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2259 		vtscsi_complete_request(req);
2260 }
2261 
2262 static void
vtscsi_control_vq_intr(void * xsc)2263 vtscsi_control_vq_intr(void *xsc)
2264 {
2265 	struct vtscsi_softc *sc;
2266 	struct virtqueue *vq;
2267 
2268 	sc = xsc;
2269 	vq = sc->vtscsi_control_vq;
2270 
2271 again:
2272 	VTSCSI_LOCK(sc);
2273 	if (!virtqueue_pending(vq))
2274 		goto done;
2275 
2276 	vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2277 
2278 	if (virtqueue_enable_intr(vq) != 0) {
2279 		virtqueue_disable_intr(vq);
2280 		VTSCSI_UNLOCK(sc);
2281 		goto again;
2282 	}
2283 
2284 done:
2285 	VTSCSI_UNLOCK(sc);
2286 }
2287 
2288 static void
vtscsi_event_vq_intr(void * xsc)2289 vtscsi_event_vq_intr(void *xsc)
2290 {
2291 	struct vtscsi_softc *sc;
2292 	struct virtqueue *vq;
2293 	struct virtio_scsi_event *event;
2294 
2295 	sc = xsc;
2296 	vq = sc->vtscsi_event_vq;
2297 
2298 again:
2299 	VTSCSI_LOCK(sc);
2300 	if (!virtqueue_pending(vq))
2301 		goto done;
2302 
2303 	while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2304 		vtscsi_handle_event(sc, event);
2305 
2306 	if (virtqueue_enable_intr(vq) != 0) {
2307 		virtqueue_disable_intr(vq);
2308 		VTSCSI_UNLOCK(sc);
2309 		goto again;
2310 	}
2311 
2312 done:
2313 	VTSCSI_UNLOCK(sc);
2314 }
2315 
2316 static void
vtscsi_request_vq_intr(void * xsc)2317 vtscsi_request_vq_intr(void *xsc)
2318 {
2319 	struct vtscsi_softc *sc;
2320 	struct virtqueue *vq;
2321 
2322 	sc = xsc;
2323 	vq = sc->vtscsi_request_vq;
2324 
2325 again:
2326 	VTSCSI_LOCK(sc);
2327 	if (!virtqueue_pending(vq))
2328 		goto done;
2329 
2330 	vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2331 
2332 	if (virtqueue_enable_intr(vq) != 0) {
2333 		virtqueue_disable_intr(vq);
2334 		VTSCSI_UNLOCK(sc);
2335 		goto again;
2336 	}
2337 
2338 done:
2339 	VTSCSI_UNLOCK(sc);
2340 }
2341 
2342 static void
vtscsi_disable_vqs_intr(struct vtscsi_softc * sc)2343 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2344 {
2345 
2346 	virtqueue_disable_intr(sc->vtscsi_control_vq);
2347 	virtqueue_disable_intr(sc->vtscsi_event_vq);
2348 	virtqueue_disable_intr(sc->vtscsi_request_vq);
2349 }
2350 
2351 static void
vtscsi_enable_vqs_intr(struct vtscsi_softc * sc)2352 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2353 {
2354 
2355 	virtqueue_enable_intr(sc->vtscsi_control_vq);
2356 	virtqueue_enable_intr(sc->vtscsi_event_vq);
2357 	virtqueue_enable_intr(sc->vtscsi_request_vq);
2358 }
2359 
2360 static void
vtscsi_get_tunables(struct vtscsi_softc * sc)2361 vtscsi_get_tunables(struct vtscsi_softc *sc)
2362 {
2363 	char tmpstr[64];
2364 
2365 	TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2366 
2367 	ksnprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2368 	    device_get_unit(sc->vtscsi_dev));
2369 	TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2370 }
2371 
2372 static void
vtscsi_add_sysctl(struct vtscsi_softc * sc)2373 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2374 {
2375 	device_t dev;
2376 	struct vtscsi_statistics *stats;
2377         struct sysctl_ctx_list *ctx;
2378 	struct sysctl_oid *tree;
2379 	struct sysctl_oid_list *child;
2380 
2381 	dev = sc->vtscsi_dev;
2382 	stats = &sc->vtscsi_stats;
2383 	ctx = device_get_sysctl_ctx(dev);
2384 	tree = device_get_sysctl_tree(dev);
2385 	child = SYSCTL_CHILDREN(tree);
2386 
2387 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2388 	    CTLFLAG_RW, &sc->vtscsi_debug, 0,
2389 	    "Debug level");
2390 
2391 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2392 	    CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2393 	    "SCSI command timeouts");
2394 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2395 	    CTLFLAG_RD, &stats->dequeue_no_requests,
2396 	    "No available requests to dequeue");
2397 }
2398 
2399 static void
vtscsi_printf_req(struct vtscsi_request * req,const char * func,const char * fmt,...)2400 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2401     const char *fmt, ...)
2402 {
2403 	struct vtscsi_softc *sc;
2404 	union ccb *ccb;
2405 	struct sbuf sb;
2406 	__va_list ap;
2407 	char str[192];
2408 	char path_str[64];
2409 
2410 	if (req == NULL)
2411 		return;
2412 
2413 	sc = req->vsr_softc;
2414 	ccb = req->vsr_ccb;
2415 
2416 	__va_start(ap, fmt);
2417 	sbuf_new(&sb, str, sizeof(str), 0);
2418 
2419 	if (ccb == NULL) {
2420 		sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2421 		    cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2422 		    cam_sim_bus(sc->vtscsi_sim));
2423 	} else {
2424 		xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2425 		sbuf_cat(&sb, path_str);
2426 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2427 			scsi_command_string(&ccb->csio, &sb);
2428 			sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2429 		}
2430 	}
2431 
2432 	sbuf_vprintf(&sb, fmt, ap);
2433 	__va_end(ap);
2434 
2435 	sbuf_finish(&sb);
2436 	kprintf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,
2437 	    sbuf_data(&sb));
2438 }
2439