xref: /freebsd/usr.sbin/bhyve/pci_virtio_scsi.c (revision 42249ef2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
5  * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer
13  *    in this position and unchanged.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/linker_set.h>
36 #include <sys/types.h>
37 #include <sys/uio.h>
38 #include <sys/time.h>
39 #include <sys/queue.h>
40 #include <sys/sbuf.h>
41 
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <stdbool.h>
47 #include <string.h>
48 #include <unistd.h>
49 #include <assert.h>
50 #include <pthread.h>
51 #include <pthread_np.h>
52 
53 #include <cam/scsi/scsi_all.h>
54 #include <cam/scsi/scsi_message.h>
55 #include <cam/ctl/ctl.h>
56 #include <cam/ctl/ctl_io.h>
57 #include <cam/ctl/ctl_backend.h>
58 #include <cam/ctl/ctl_ioctl.h>
59 #include <cam/ctl/ctl_util.h>
60 #include <cam/ctl/ctl_scsi_all.h>
61 #include <camlib.h>
62 
63 #include "bhyverun.h"
64 #include "pci_emul.h"
65 #include "virtio.h"
66 #include "iov.h"
67 
68 #define VTSCSI_RINGSZ		64
69 #define	VTSCSI_REQUESTQ		1
70 #define	VTSCSI_THR_PER_Q	16
71 #define	VTSCSI_MAXQ		(VTSCSI_REQUESTQ + 2)
72 #define	VTSCSI_MAXSEG		64
73 
74 #define	VTSCSI_IN_HEADER_LEN(_sc)	\
75 	(sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
76 
77 #define	VTSCSI_OUT_HEADER_LEN(_sc) 	\
78 	(sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
79 
80 #define	VIRTIO_SCSI_MAX_CHANNEL	0
81 #define	VIRTIO_SCSI_MAX_TARGET	0
82 #define	VIRTIO_SCSI_MAX_LUN	16383
83 
84 #define	VIRTIO_SCSI_F_INOUT	(1 << 0)
85 #define	VIRTIO_SCSI_F_HOTPLUG	(1 << 1)
86 #define	VIRTIO_SCSI_F_CHANGE	(1 << 2)
87 
88 static int pci_vtscsi_debug = 0;
89 #define	DPRINTF(params) if (pci_vtscsi_debug) printf params
90 #define	WPRINTF(params) printf params
91 
92 struct pci_vtscsi_config {
93 	uint32_t num_queues;
94 	uint32_t seg_max;
95 	uint32_t max_sectors;
96 	uint32_t cmd_per_lun;
97 	uint32_t event_info_size;
98 	uint32_t sense_size;
99 	uint32_t cdb_size;
100 	uint16_t max_channel;
101 	uint16_t max_target;
102 	uint32_t max_lun;
103 } __attribute__((packed));
104 
105 struct pci_vtscsi_queue {
106 	struct pci_vtscsi_softc *         vsq_sc;
107 	struct vqueue_info *              vsq_vq;
108 	pthread_mutex_t                   vsq_mtx;
109 	pthread_mutex_t                   vsq_qmtx;
110 	pthread_cond_t                    vsq_cv;
111 	STAILQ_HEAD(, pci_vtscsi_request) vsq_requests;
112 	LIST_HEAD(, pci_vtscsi_worker)    vsq_workers;
113 };
114 
115 struct pci_vtscsi_worker {
116 	struct pci_vtscsi_queue *     vsw_queue;
117 	pthread_t                     vsw_thread;
118 	bool                          vsw_exiting;
119 	LIST_ENTRY(pci_vtscsi_worker) vsw_link;
120 };
121 
122 struct pci_vtscsi_request {
123 	struct pci_vtscsi_queue * vsr_queue;
124 	struct iovec              vsr_iov_in[VTSCSI_MAXSEG];
125 	int                       vsr_niov_in;
126 	struct iovec              vsr_iov_out[VTSCSI_MAXSEG];
127 	int                       vsr_niov_out;
128 	uint32_t                  vsr_idx;
129 	STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
130 };
131 
132 /*
133  * Per-device softc
134  */
135 struct pci_vtscsi_softc {
136 	struct virtio_softc      vss_vs;
137 	struct vqueue_info       vss_vq[VTSCSI_MAXQ];
138 	struct pci_vtscsi_queue  vss_queues[VTSCSI_REQUESTQ];
139 	pthread_mutex_t          vss_mtx;
140 	int                      vss_iid;
141 	int                      vss_ctl_fd;
142 	uint32_t                 vss_features;
143 	struct pci_vtscsi_config vss_config;
144 };
145 
146 #define	VIRTIO_SCSI_T_TMF			0
147 #define	VIRTIO_SCSI_T_TMF_ABORT_TASK		0
148 #define	VIRTIO_SCSI_T_TMF_ABORT_TASK_SET	1
149 #define	VIRTIO_SCSI_T_TMF_CLEAR_ACA		2
150 #define	VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET	3
151 #define	VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET	4
152 #define	VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET	5
153 #define	VIRTIO_SCSI_T_TMF_QUERY_TASK		6
154 #define	VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 	7
155 
156 /* command-specific response values */
157 #define	VIRTIO_SCSI_S_FUNCTION_COMPLETE		0
158 #define	VIRTIO_SCSI_S_FUNCTION_SUCCEEDED	10
159 #define	VIRTIO_SCSI_S_FUNCTION_REJECTED		11
160 
161 struct pci_vtscsi_ctrl_tmf {
162 	uint32_t type;
163 	uint32_t subtype;
164 	uint8_t lun[8];
165 	uint64_t id;
166 	uint8_t response;
167 } __attribute__((packed));
168 
169 #define	VIRTIO_SCSI_T_AN_QUERY			1
170 #define	VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
171 #define	VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT	4
172 #define	VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST	8
173 #define	VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE	16
174 #define	VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST	32
175 #define	VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY	64
176 
177 struct pci_vtscsi_ctrl_an {
178 	uint32_t type;
179 	uint8_t lun[8];
180 	uint32_t event_requested;
181 	uint32_t event_actual;
182 	uint8_t response;
183 } __attribute__((packed));
184 
185 /* command-specific response values */
186 #define	VIRTIO_SCSI_S_OK 			0
187 #define	VIRTIO_SCSI_S_OVERRUN			1
188 #define	VIRTIO_SCSI_S_ABORTED			2
189 #define	VIRTIO_SCSI_S_BAD_TARGET		3
190 #define	VIRTIO_SCSI_S_RESET			4
191 #define	VIRTIO_SCSI_S_BUSY			5
192 #define	VIRTIO_SCSI_S_TRANSPORT_FAILURE		6
193 #define	VIRTIO_SCSI_S_TARGET_FAILURE		7
194 #define	VIRTIO_SCSI_S_NEXUS_FAILURE		8
195 #define	VIRTIO_SCSI_S_FAILURE			9
196 #define	VIRTIO_SCSI_S_INCORRECT_LUN		12
197 
198 /* task_attr */
199 #define	VIRTIO_SCSI_S_SIMPLE			0
200 #define	VIRTIO_SCSI_S_ORDERED			1
201 #define	VIRTIO_SCSI_S_HEAD			2
202 #define	VIRTIO_SCSI_S_ACA			3
203 
204 struct pci_vtscsi_event {
205 	uint32_t event;
206 	uint8_t lun[8];
207 	uint32_t reason;
208 } __attribute__((packed));
209 
210 struct pci_vtscsi_req_cmd_rd {
211 	uint8_t lun[8];
212 	uint64_t id;
213 	uint8_t task_attr;
214 	uint8_t prio;
215 	uint8_t crn;
216 	uint8_t cdb[];
217 } __attribute__((packed));
218 
219 struct pci_vtscsi_req_cmd_wr {
220 	uint32_t sense_len;
221 	uint32_t residual;
222 	uint16_t status_qualifier;
223 	uint8_t status;
224 	uint8_t response;
225 	uint8_t sense[];
226 } __attribute__((packed));
227 
228 static void *pci_vtscsi_proc(void *);
229 static void pci_vtscsi_reset(void *);
230 static void pci_vtscsi_neg_features(void *, uint64_t);
231 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
232 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
233 static inline int pci_vtscsi_get_lun(uint8_t *);
234 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t);
235 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *,
236     struct pci_vtscsi_ctrl_tmf *);
237 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc *,
238     struct pci_vtscsi_ctrl_an *);
239 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue *, struct iovec *,
240     int, struct iovec *, int);
241 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *);
242 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *);
243 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *);
244 static int  pci_vtscsi_init_queue(struct pci_vtscsi_softc *,
245     struct pci_vtscsi_queue *, int);
246 static int pci_vtscsi_init(struct vmctx *, struct pci_devinst *, char *);
247 
248 static struct virtio_consts vtscsi_vi_consts = {
249 	"vtscsi",				/* our name */
250 	VTSCSI_MAXQ,				/* we support 2+n virtqueues */
251 	sizeof(struct pci_vtscsi_config),	/* config reg size */
252 	pci_vtscsi_reset,			/* reset */
253 	NULL,					/* device-wide qnotify */
254 	pci_vtscsi_cfgread,			/* read virtio config */
255 	pci_vtscsi_cfgwrite,			/* write virtio config */
256 	pci_vtscsi_neg_features,		/* apply negotiated features */
257 	0,					/* our capabilities */
258 };
259 
260 static void *
261 pci_vtscsi_proc(void *arg)
262 {
263 	struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg;
264 	struct pci_vtscsi_queue *q = worker->vsw_queue;
265 	struct pci_vtscsi_request *req;
266 	int iolen;
267 
268 	for (;;) {
269 		pthread_mutex_lock(&q->vsq_mtx);
270 
271 		while (STAILQ_EMPTY(&q->vsq_requests)
272 		    && !worker->vsw_exiting)
273 			pthread_cond_wait(&q->vsq_cv, &q->vsq_mtx);
274 
275 		if (worker->vsw_exiting)
276 			break;
277 
278 		req = STAILQ_FIRST(&q->vsq_requests);
279 		STAILQ_REMOVE_HEAD(&q->vsq_requests, vsr_link);
280 
281 		pthread_mutex_unlock(&q->vsq_mtx);
282 		iolen = pci_vtscsi_request_handle(q, req->vsr_iov_in,
283 		    req->vsr_niov_in, req->vsr_iov_out, req->vsr_niov_out);
284 
285 		pthread_mutex_lock(&q->vsq_qmtx);
286 		vq_relchain(q->vsq_vq, req->vsr_idx, iolen);
287 		vq_endchains(q->vsq_vq, 0);
288 		pthread_mutex_unlock(&q->vsq_qmtx);
289 
290 		DPRINTF(("virtio-scsi: request <idx=%d> completed\n",
291 		    req->vsr_idx));
292 		free(req);
293 	}
294 
295 	pthread_mutex_unlock(&q->vsq_mtx);
296 	return (NULL);
297 }
298 
299 static void
300 pci_vtscsi_reset(void *vsc)
301 {
302 	struct pci_vtscsi_softc *sc;
303 
304 	sc = vsc;
305 
306 	DPRINTF(("vtscsi: device reset requested\n"));
307 	vi_reset_dev(&sc->vss_vs);
308 
309 	/* initialize config structure */
310 	sc->vss_config = (struct pci_vtscsi_config){
311 		.num_queues = VTSCSI_REQUESTQ,
312 		/* Leave room for the request and the response. */
313 		.seg_max = VTSCSI_MAXSEG - 2,
314 		.max_sectors = 2,
315 		.cmd_per_lun = 1,
316 		.event_info_size = sizeof(struct pci_vtscsi_event),
317 		.sense_size = 96,
318 		.cdb_size = 32,
319 		.max_channel = VIRTIO_SCSI_MAX_CHANNEL,
320 		.max_target = VIRTIO_SCSI_MAX_TARGET,
321 		.max_lun = VIRTIO_SCSI_MAX_LUN
322 	};
323 }
324 
325 static void
326 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features)
327 {
328 	struct pci_vtscsi_softc *sc = vsc;
329 
330 	sc->vss_features = negotiated_features;
331 }
332 
333 static int
334 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval)
335 {
336 	struct pci_vtscsi_softc *sc = vsc;
337 	void *ptr;
338 
339 	ptr = (uint8_t *)&sc->vss_config + offset;
340 	memcpy(retval, ptr, size);
341 	return (0);
342 }
343 
344 static int
345 pci_vtscsi_cfgwrite(void *vsc, int offset, int size, uint32_t val)
346 {
347 
348 	return (0);
349 }
350 
351 static inline int
352 pci_vtscsi_get_lun(uint8_t *lun)
353 {
354 
355 	return (((lun[2] << 8) | lun[3]) & 0x3fff);
356 }
357 
358 static int
359 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf,
360     size_t bufsize)
361 {
362 	struct pci_vtscsi_ctrl_tmf *tmf;
363 	struct pci_vtscsi_ctrl_an *an;
364 	uint32_t type;
365 
366 	type = *(uint32_t *)buf;
367 
368 	if (type == VIRTIO_SCSI_T_TMF) {
369 		tmf = (struct pci_vtscsi_ctrl_tmf *)buf;
370 		return (pci_vtscsi_tmf_handle(sc, tmf));
371 	}
372 
373 	if (type == VIRTIO_SCSI_T_AN_QUERY) {
374 		an = (struct pci_vtscsi_ctrl_an *)buf;
375 		return (pci_vtscsi_an_handle(sc, an));
376 	}
377 
378 	return (0);
379 }
380 
381 static int
382 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc,
383     struct pci_vtscsi_ctrl_tmf *tmf)
384 {
385 	union ctl_io *io;
386 	int err;
387 
388 	io = ctl_scsi_alloc_io(sc->vss_iid);
389 	ctl_scsi_zero_io(io);
390 
391 	io->io_hdr.io_type = CTL_IO_TASK;
392 	io->io_hdr.nexus.initid = sc->vss_iid;
393 	io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun);
394 	io->taskio.tag_type = CTL_TAG_SIMPLE;
395 	io->taskio.tag_num = (uint32_t)tmf->id;
396 
397 	switch (tmf->subtype) {
398 	case VIRTIO_SCSI_T_TMF_ABORT_TASK:
399 		io->taskio.task_action = CTL_TASK_ABORT_TASK;
400 		break;
401 
402 	case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
403 		io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
404 		break;
405 
406 	case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
407 		io->taskio.task_action = CTL_TASK_CLEAR_ACA;
408 		break;
409 
410 	case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
411 		io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
412 		break;
413 
414 	case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
415 		io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
416 		break;
417 
418 	case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
419 		io->taskio.task_action = CTL_TASK_LUN_RESET;
420 		break;
421 
422 	case VIRTIO_SCSI_T_TMF_QUERY_TASK:
423 		io->taskio.task_action = CTL_TASK_QUERY_TASK;
424 		break;
425 
426 	case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
427 		io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
428 		break;
429 	}
430 
431 	if (pci_vtscsi_debug) {
432 		struct sbuf *sb = sbuf_new_auto();
433 		ctl_io_sbuf(io, sb);
434 		sbuf_finish(sb);
435 		DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb)));
436 		sbuf_delete(sb);
437 	}
438 
439 	err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
440 	if (err != 0)
441 		WPRINTF(("CTL_IO: err=%d (%s)\n", errno, strerror(errno)));
442 
443 	tmf->response = io->taskio.task_status;
444 	ctl_scsi_free_io(io);
445 	return (1);
446 }
447 
448 static int
449 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc,
450     struct pci_vtscsi_ctrl_an *an)
451 {
452 
453 	return (0);
454 }
455 
456 static int
457 pci_vtscsi_request_handle(struct pci_vtscsi_queue *q, struct iovec *iov_in,
458     int niov_in, struct iovec *iov_out, int niov_out)
459 {
460 	struct pci_vtscsi_softc *sc = q->vsq_sc;
461 	struct pci_vtscsi_req_cmd_rd *cmd_rd = NULL;
462 	struct pci_vtscsi_req_cmd_wr *cmd_wr;
463 	struct iovec data_iov_in[VTSCSI_MAXSEG], data_iov_out[VTSCSI_MAXSEG];
464 	union ctl_io *io;
465 	int data_niov_in, data_niov_out;
466 	void *ext_data_ptr = NULL;
467 	uint32_t ext_data_len = 0, ext_sg_entries = 0;
468 	int err, nxferred;
469 
470 	seek_iov(iov_in, niov_in, data_iov_in, &data_niov_in,
471 	    VTSCSI_IN_HEADER_LEN(sc));
472 	seek_iov(iov_out, niov_out, data_iov_out, &data_niov_out,
473 	    VTSCSI_OUT_HEADER_LEN(sc));
474 
475 	truncate_iov(iov_in, &niov_in, VTSCSI_IN_HEADER_LEN(sc));
476 	truncate_iov(iov_out, &niov_out, VTSCSI_OUT_HEADER_LEN(sc));
477 	iov_to_buf(iov_in, niov_in, (void **)&cmd_rd);
478 
479 	cmd_wr = malloc(VTSCSI_OUT_HEADER_LEN(sc));
480 	io = ctl_scsi_alloc_io(sc->vss_iid);
481 	ctl_scsi_zero_io(io);
482 
483 	io->io_hdr.nexus.initid = sc->vss_iid;
484 	io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(cmd_rd->lun);
485 
486 	io->io_hdr.io_type = CTL_IO_SCSI;
487 
488 	if (data_niov_in > 0) {
489 		ext_data_ptr = (void *)data_iov_in;
490 		ext_sg_entries = data_niov_in;
491 		ext_data_len = count_iov(data_iov_in, data_niov_in);
492 		io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
493 	} else if (data_niov_out > 0) {
494 		ext_data_ptr = (void *)data_iov_out;
495 		ext_sg_entries = data_niov_out;
496 		ext_data_len = count_iov(data_iov_out, data_niov_out);
497 		io->io_hdr.flags |= CTL_FLAG_DATA_IN;
498 	}
499 
500 	io->scsiio.sense_len = sc->vss_config.sense_size;
501 	io->scsiio.tag_num = (uint32_t)cmd_rd->id;
502 	switch (cmd_rd->task_attr) {
503 	case VIRTIO_SCSI_S_ORDERED:
504 		io->scsiio.tag_type = CTL_TAG_ORDERED;
505 		break;
506 	case VIRTIO_SCSI_S_HEAD:
507 		io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
508 		break;
509 	case VIRTIO_SCSI_S_ACA:
510 		io->scsiio.tag_type = CTL_TAG_ACA;
511 		break;
512 	case VIRTIO_SCSI_S_SIMPLE:
513 	default:
514 		io->scsiio.tag_type = CTL_TAG_SIMPLE;
515 		break;
516 	}
517 	io->scsiio.ext_sg_entries = ext_sg_entries;
518 	io->scsiio.ext_data_ptr = ext_data_ptr;
519 	io->scsiio.ext_data_len = ext_data_len;
520 	io->scsiio.ext_data_filled = 0;
521 	io->scsiio.cdb_len = sc->vss_config.cdb_size;
522 	memcpy(io->scsiio.cdb, cmd_rd->cdb, sc->vss_config.cdb_size);
523 
524 	if (pci_vtscsi_debug) {
525 		struct sbuf *sb = sbuf_new_auto();
526 		ctl_io_sbuf(io, sb);
527 		sbuf_finish(sb);
528 		DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb)));
529 		sbuf_delete(sb);
530 	}
531 
532 	err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
533 	if (err != 0) {
534 		WPRINTF(("CTL_IO: err=%d (%s)\n", errno, strerror(errno)));
535 		cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
536 	} else {
537 		cmd_wr->sense_len = MIN(io->scsiio.sense_len,
538 		    sc->vss_config.sense_size);
539 		cmd_wr->residual = io->scsiio.residual;
540 		cmd_wr->status = io->scsiio.scsi_status;
541 		cmd_wr->response = VIRTIO_SCSI_S_OK;
542 		memcpy(&cmd_wr->sense, &io->scsiio.sense_data,
543 		    cmd_wr->sense_len);
544 	}
545 
546 	buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(sc), iov_out, niov_out, 0);
547 	nxferred = VTSCSI_OUT_HEADER_LEN(sc) + io->scsiio.ext_data_filled;
548 	free(cmd_rd);
549 	free(cmd_wr);
550 	ctl_scsi_free_io(io);
551 	return (nxferred);
552 }
553 
554 static void
555 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq)
556 {
557 	struct pci_vtscsi_softc *sc;
558 	struct iovec iov[VTSCSI_MAXSEG];
559 	uint16_t idx, n;
560 	void *buf = NULL;
561 	size_t bufsize;
562 	int iolen;
563 
564 	sc = vsc;
565 
566 	while (vq_has_descs(vq)) {
567 		n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, NULL);
568 		bufsize = iov_to_buf(iov, n, &buf);
569 		iolen = pci_vtscsi_control_handle(sc, buf, bufsize);
570 		buf_to_iov(buf + bufsize - iolen, iolen, iov, n,
571 		    bufsize - iolen);
572 
573 		/*
574 		 * Release this chain and handle more
575 		 */
576 		vq_relchain(vq, idx, iolen);
577 	}
578 	vq_endchains(vq, 1);	/* Generate interrupt if appropriate. */
579 	free(buf);
580 }
581 
582 static void
583 pci_vtscsi_eventq_notify(void *vsc, struct vqueue_info *vq)
584 {
585 
586 	vq_kick_disable(vq);
587 }
588 
589 static void
590 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq)
591 {
592 	struct pci_vtscsi_softc *sc;
593 	struct pci_vtscsi_queue *q;
594 	struct pci_vtscsi_request *req;
595 	struct iovec iov[VTSCSI_MAXSEG];
596 	uint16_t flags[VTSCSI_MAXSEG];
597 	uint16_t idx, n, i;
598 	int readable;
599 
600 	sc = vsc;
601 	q = &sc->vss_queues[vq->vq_num - 2];
602 
603 	while (vq_has_descs(vq)) {
604 		readable = 0;
605 		n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, flags);
606 
607 		/* Count readable descriptors */
608 		for (i = 0; i < n; i++) {
609 			if (flags[i] & VRING_DESC_F_WRITE)
610 				break;
611 
612 			readable++;
613 		}
614 
615 		req = calloc(1, sizeof(struct pci_vtscsi_request));
616 		req->vsr_idx = idx;
617 		req->vsr_queue = q;
618 		req->vsr_niov_in = readable;
619 		req->vsr_niov_out = n - readable;
620 		memcpy(req->vsr_iov_in, iov,
621 		    req->vsr_niov_in * sizeof(struct iovec));
622 		memcpy(req->vsr_iov_out, iov + readable,
623 		    req->vsr_niov_out * sizeof(struct iovec));
624 
625 		pthread_mutex_lock(&q->vsq_mtx);
626 		STAILQ_INSERT_TAIL(&q->vsq_requests, req, vsr_link);
627 		pthread_cond_signal(&q->vsq_cv);
628 		pthread_mutex_unlock(&q->vsq_mtx);
629 
630 		DPRINTF(("virtio-scsi: request <idx=%d> enqueued\n", idx));
631 	}
632 }
633 
634 static int
635 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc,
636     struct pci_vtscsi_queue *queue, int num)
637 {
638 	struct pci_vtscsi_worker *worker;
639 	char tname[MAXCOMLEN + 1];
640 	int i;
641 
642 	queue->vsq_sc = sc;
643 	queue->vsq_vq = &sc->vss_vq[num + 2];
644 
645 	pthread_mutex_init(&queue->vsq_mtx, NULL);
646 	pthread_mutex_init(&queue->vsq_qmtx, NULL);
647 	pthread_cond_init(&queue->vsq_cv, NULL);
648 	STAILQ_INIT(&queue->vsq_requests);
649 	LIST_INIT(&queue->vsq_workers);
650 
651 	for (i = 0; i < VTSCSI_THR_PER_Q; i++) {
652 		worker = calloc(1, sizeof(struct pci_vtscsi_worker));
653 		worker->vsw_queue = queue;
654 
655 		pthread_create(&worker->vsw_thread, NULL, &pci_vtscsi_proc,
656 		    (void *)worker);
657 
658 		snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i);
659 		pthread_set_name_np(worker->vsw_thread, tname);
660 		LIST_INSERT_HEAD(&queue->vsq_workers, worker, vsw_link);
661 	}
662 
663 	return (0);
664 }
665 
666 static int
667 pci_vtscsi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
668 {
669 	struct pci_vtscsi_softc *sc;
670 	char *opt, *optname;
671 	const char *devname;
672 	int i, optidx = 0;
673 
674 	sc = calloc(1, sizeof(struct pci_vtscsi_softc));
675 	devname = "/dev/cam/ctl";
676 	while ((opt = strsep(&opts, ",")) != NULL) {
677 		optname = strsep(&opt, "=");
678 		if (opt == NULL && optidx == 0) {
679 			if (optname[0] != 0)
680 				devname = optname;
681 		} else if (strcmp(optname, "dev") == 0 && opt != NULL) {
682 			devname = opt;
683 		} else if (strcmp(optname, "iid") == 0 && opt != NULL) {
684 			sc->vss_iid = strtoul(opt, NULL, 10);
685 		} else {
686 			fprintf(stderr, "Invalid option %s\n", optname);
687 			free(sc);
688 			return (1);
689 		}
690 		optidx++;
691 	}
692 
693 	sc->vss_ctl_fd = open(devname, O_RDWR);
694 	if (sc->vss_ctl_fd < 0) {
695 		WPRINTF(("cannot open %s: %s\n", devname, strerror(errno)));
696 		free(sc);
697 		return (1);
698 	}
699 
700 	vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq);
701 	sc->vss_vs.vs_mtx = &sc->vss_mtx;
702 
703 	/* controlq */
704 	sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ;
705 	sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify;
706 
707 	/* eventq */
708 	sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ;
709 	sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify;
710 
711 	/* request queues */
712 	for (i = 2; i < VTSCSI_MAXQ; i++) {
713 		sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ;
714 		sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify;
715 		pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2);
716 	}
717 
718 	/* initialize config space */
719 	pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI);
720 	pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
721 	pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
722 	pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_SCSI);
723 	pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
724 
725 	if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix()))
726 		return (1);
727 	vi_set_io_bar(&sc->vss_vs, 0);
728 
729 	return (0);
730 }
731 
732 
733 struct pci_devemu pci_de_vscsi = {
734 	.pe_emu =	"virtio-scsi",
735 	.pe_init =	pci_vtscsi_init,
736 	.pe_barwrite =	vi_pci_write,
737 	.pe_barread =	vi_pci_read
738 };
739 PCI_EMUL_SET(pci_de_vscsi);
740