xref: /openbsd/usr.sbin/vmd/vioscsi.c (revision 09467b48)
1 /*	$OpenBSD: vioscsi.c,v 1.12 2019/01/10 18:59:56 sf Exp $  */
2 
3 /*
4  * Copyright (c) 2017 Carlos Cardenas <ccardenas@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 #include <dev/pci/virtio_pcireg.h>
21 #include <dev/pv/vioscsireg.h>
22 #include <scsi/scsi_all.h>
23 #include <scsi/scsi_disk.h>
24 #include <scsi/scsiconf.h>
25 #include <scsi/cd.h>
26 
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include <unistd.h>
31 
32 #include "vmd.h"
33 #include "vioscsi.h"
34 #include "virtio.h"
35 
36 extern char *__progname;
37 
38 static void
39 vioscsi_prepare_resp(struct virtio_scsi_res_hdr *resp, uint8_t vio_status,
40     uint8_t scsi_status, uint8_t err_flags, uint8_t add_sense_code,
41     uint8_t add_sense_code_qual)
42 {
43 	/* Set lower 8 bits of status and response fields */
44 	resp->response &= 0xFFFFFF00;
45 	resp->response |= vio_status;
46 	resp->status &= 0xFFFFFF00;
47 	resp->status |= scsi_status;
48 
49 	resp->sense_len = 0;
50 
51 	/* determine if we need to populate the sense field */
52 	if (scsi_status == SCSI_CHECK) {
53 		/*
54 		 * sense data is a 96 byte field.
55 		 * We only need to use the first 14 bytes
56 		 * - set the sense_len accordingly
57 		 * - set error_code to Current Command
58 		 * ref scsi/scsi_all.h:struct scsi_sense_data
59 		 */
60 		memset(resp->sense, 0, VIOSCSI_SENSE_LEN);
61 		resp->sense_len = RESP_SENSE_LEN;
62 		resp->sense[0] = SSD_ERRCODE_CURRENT;
63 		resp->sense[2] = err_flags;
64 		resp->sense[12] = add_sense_code;
65 		resp->sense[13] = add_sense_code_qual;
66 	}
67 }
68 
69 static struct vring_desc*
70 vioscsi_next_ring_desc(struct vring_desc* desc, struct vring_desc* cur,
71     uint16_t *idx)
72 {
73 	*idx = cur->next & VIOSCSI_QUEUE_MASK;
74 	return &desc[*idx];
75 }
76 
77 static void
78 vioscsi_next_ring_item(struct vioscsi_dev *dev, struct vring_avail *avail,
79     struct vring_used *used, struct vring_desc *desc, uint16_t idx)
80 {
81 	used->ring[used->idx & VIOSCSI_QUEUE_MASK].id = idx;
82 	used->ring[used->idx & VIOSCSI_QUEUE_MASK].len = desc->len;
83 	used->idx++;
84 
85 	dev->vq[dev->cfg.queue_notify].last_avail =
86 	    avail->idx & VIOSCSI_QUEUE_MASK;
87 }
88 
89 static const char *
90 vioscsi_op_names(uint8_t type)
91 {
92 	switch (type) {
93 	/* defined in scsi_all.h */
94 	case TEST_UNIT_READY: return "TEST_UNIT_READY";
95 	case REQUEST_SENSE: return "REQUEST_SENSE";
96 	case INQUIRY: return "INQUIRY";
97 	case MODE_SELECT: return "MODE_SELECT";
98 	case RESERVE: return "RESERVE";
99 	case RELEASE: return "RELEASE";
100 	case MODE_SENSE: return "MODE_SENSE";
101 	case START_STOP: return "START_STOP";
102 	case RECEIVE_DIAGNOSTIC: return "RECEIVE_DIAGNOSTIC";
103 	case SEND_DIAGNOSTIC: return "SEND_DIAGNOSTIC";
104 	case PREVENT_ALLOW: return "PREVENT_ALLOW";
105 	case POSITION_TO_ELEMENT: return "POSITION_TO_ELEMENT";
106 	case WRITE_BUFFER: return "WRITE_BUFFER";
107 	case READ_BUFFER: return "READ_BUFFER";
108 	case CHANGE_DEFINITION: return "CHANGE_DEFINITION";
109 	case MODE_SELECT_BIG: return "MODE_SELECT_BIG";
110 	case MODE_SENSE_BIG: return "MODE_SENSE_BIG";
111 	case REPORT_LUNS: return "REPORT_LUNS";
112 	/* defined in scsi_disk.h */
113 	case REASSIGN_BLOCKS: return "REASSIGN_BLOCKS";
114 	case READ_COMMAND: return "READ_COMMAND";
115 	case WRITE_COMMAND: return "WRITE_COMMAND";
116 	case READ_CAPACITY: return "READ_CAPACITY";
117 	case READ_CAPACITY_16: return "READ_CAPACITY_16";
118 	case READ_BIG: return "READ_BIG";
119 	case WRITE_BIG: return "WRITE_BIG";
120 	case READ_12: return "READ_12";
121 	case WRITE_12: return "WRITE_12";
122 	case READ_16: return "READ_16";
123 	case WRITE_16: return "WRITE_16";
124 	case SYNCHRONIZE_CACHE: return "SYNCHRONIZE_CACHE";
125 	case WRITE_SAME_10: return "WRITE_SAME_10";
126 	case WRITE_SAME_16: return "WRITE_SAME_16";
127 	/* defined in cd.h */
128 	case READ_SUBCHANNEL: return "READ_SUBCHANNEL";
129 	case READ_TOC: return "READ_TOC";
130 	case READ_HEADER: return "READ_HEADER";
131 	case PLAY: return "PLAY";
132 	case PLAY_MSF: return "PLAY_MSF";
133 	case PLAY_TRACK: return "PLAY_TRACK";
134 	case PLAY_TRACK_REL: return "PLAY_TRACK_REL";
135 	case PAUSE: return "PAUSE";
136 	case READ_TRACK_INFO: return "READ_TRACK_INFO";
137 	case CLOSE_TRACK: return "CLOSE_TRACK";
138 	case BLANK: return "BLANK";
139 	case PLAY_BIG: return "PLAY_BIG";
140 	case LOAD_UNLOAD: return "LOAD_UNLOAD";
141 	case PLAY_TRACK_REL_BIG: return "PLAY_TRACK_REL_BIG";
142 	case SET_CD_SPEED: return "SET_CD_SPEED";
143 	/* defined locally */
144 	case READ_DISC_INFORMATION: return "READ_DISC_INFORMATION";
145 	case GET_CONFIGURATION: return "GET_CONFIGURATION";
146 	case MECHANISM_STATUS: return "MECHANISM_STATUS";
147 	case GET_EVENT_STATUS_NOTIFICATION:
148 	    return "GET_EVENT_STATUS_NOTIFICATION";
149 	default: return "UNKNOWN";
150 	}
151 }
152 
153 static const char *
154 vioscsi_reg_name(uint8_t reg)
155 {
156 	switch (reg) {
157 	case VIRTIO_CONFIG_DEVICE_FEATURES: return "device feature";
158 	case VIRTIO_CONFIG_GUEST_FEATURES: return "guest feature";
159 	case VIRTIO_CONFIG_QUEUE_ADDRESS: return "queue address";
160 	case VIRTIO_CONFIG_QUEUE_SIZE: return "queue size";
161 	case VIRTIO_CONFIG_QUEUE_SELECT: return "queue select";
162 	case VIRTIO_CONFIG_QUEUE_NOTIFY: return "queue notify";
163 	case VIRTIO_CONFIG_DEVICE_STATUS: return "device status";
164 	case VIRTIO_CONFIG_ISR_STATUS: return "isr status";
165 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI: return "num_queues";
166 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 4: return "seg_max";
167 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 8: return "max_sectors";
168 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 12: return "cmd_per_lun";
169 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 16: return "event_info_size";
170 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 20: return "sense_size";
171 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 24: return "cdb_size";
172 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 28: return "max_channel";
173 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 30: return "max_target";
174 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 32: return "max_lun";
175 	default: return "unknown";
176 	}
177 }
178 
179 static void
180 vioscsi_free_info(struct ioinfo *info)
181 {
182 	if (!info)
183 		return;
184 	free(info->buf);
185 	free(info);
186 }
187 
188 static struct ioinfo *
189 vioscsi_start_read(struct vioscsi_dev *dev, off_t block, ssize_t n_blocks)
190 {
191 	struct ioinfo *info;
192 
193 	info = calloc(1, sizeof(*info));
194 	if (!info)
195 		goto nomem;
196 	info->buf = malloc(n_blocks * VIOSCSI_BLOCK_SIZE_CDROM);
197 	if (info->buf == NULL)
198 		goto nomem;
199 	info->len = n_blocks * VIOSCSI_BLOCK_SIZE_CDROM;
200 	info->offset = block * VIOSCSI_BLOCK_SIZE_CDROM;
201 	info->file = &dev->file;
202 
203 	return info;
204 
205 nomem:
206 	free(info);
207 	log_warn("malloc error vioscsi read");
208 	return (NULL);
209 }
210 
211 static const uint8_t *
212 vioscsi_finish_read(struct ioinfo *info)
213 {
214 	struct virtio_backing *f;
215 
216 	f = info->file;
217 	if (f->pread(f->p, info->buf, info->len, info->offset) != info->len) {
218 		info->error = errno;
219 		log_warn("vioscsi read error");
220 		return NULL;
221 	}
222 
223 	return info->buf;
224 }
225 
226 static int
227 vioscsi_handle_tur(struct vioscsi_dev *dev, struct virtio_scsi_req_hdr *req,
228     struct virtio_vq_acct *acct)
229 {
230 	int ret = 0;
231 	struct virtio_scsi_res_hdr resp;
232 
233 	memset(&resp, 0, sizeof(resp));
234 	/* Move index for response */
235 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
236 	    &(acct->resp_idx));
237 
238 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
239 
240 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
241 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
242 		    __func__, acct->resp_desc->addr);
243 	} else {
244 		ret = 1;
245 		dev->cfg.isr_status = 1;
246 		/* Move ring indexes */
247 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
248 		    acct->req_desc, acct->req_idx);
249 	}
250 
251 	return (ret);
252 }
253 
254 static int
255 vioscsi_handle_inquiry(struct vioscsi_dev *dev,
256     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
257 {
258 	int ret = 0;
259 	struct virtio_scsi_res_hdr resp;
260 	uint16_t inq_len;
261 	struct scsi_inquiry *inq;
262 	struct scsi_inquiry_data *inq_data;
263 
264 	memset(&resp, 0, sizeof(resp));
265 	inq = (struct scsi_inquiry *)(req->cdb);
266 	inq_len = (uint16_t)_2btol(inq->length);
267 
268 	DPRINTF("%s: INQ - EVPD %d PAGE_CODE 0x%08x LEN %d", __func__,
269 	    inq->flags & SI_EVPD, inq->pagecode, inq_len);
270 
271 	vioscsi_prepare_resp(&resp,
272 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
273 
274 	inq_data = calloc(1, sizeof(struct scsi_inquiry_data));
275 
276 	if (inq_data == NULL) {
277 		log_warnx("%s: cannot alloc inq_data", __func__);
278 		goto inq_out;
279 	}
280 
281 	inq_data->device = T_CDROM;
282 	inq_data->dev_qual2 = SID_REMOVABLE;
283 	/* Leave version zero to say we don't comply */
284 	inq_data->response_format = INQUIRY_RESPONSE_FORMAT;
285 	inq_data->additional_length = SID_SCSI2_ALEN;
286 	memcpy(inq_data->vendor, INQUIRY_VENDOR, INQUIRY_VENDOR_LEN);
287 	memcpy(inq_data->product, INQUIRY_PRODUCT, INQUIRY_PRODUCT_LEN);
288 	memcpy(inq_data->revision, INQUIRY_REVISION, INQUIRY_REVISION_LEN);
289 
290 	/* Move index for response */
291 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
292 	    &(acct->resp_idx));
293 
294 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
295 	    "idx %d req_idx %d global_idx %d", __func__, acct->resp_desc->addr,
296 	    acct->resp_desc->len, acct->resp_idx, acct->req_idx, acct->idx);
297 
298 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
299 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
300 		    __func__, acct->resp_desc->addr);
301 		goto free_inq;
302 	}
303 
304 	/* Move index for inquiry_data */
305 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
306 	    &(acct->resp_idx));
307 
308 	DPRINTF("%s: writing inq_data to 0x%llx size %d at "
309 	    "local idx %d req_idx %d global_idx %d",
310 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
311 	    acct->resp_idx, acct->req_idx, acct->idx);
312 
313 	if (write_mem(acct->resp_desc->addr, inq_data, acct->resp_desc->len)) {
314 		log_warnx("%s: unable to write inquiry"
315 		    " response to gpa @ 0x%llx",
316 		    __func__, acct->resp_desc->addr);
317 	} else {
318 		ret = 1;
319 		dev->cfg.isr_status = 1;
320 		/* Move ring indexes */
321 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
322 		    acct->req_desc, acct->req_idx);
323 	}
324 
325 free_inq:
326 	free(inq_data);
327 inq_out:
328 	return (ret);
329 }
330 
331 static int
332 vioscsi_handle_mode_sense(struct vioscsi_dev *dev,
333     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
334 {
335 	int ret = 0;
336 	struct virtio_scsi_res_hdr resp;
337 	uint8_t mode_page_ctl;
338 	uint8_t mode_page_code;
339 	uint8_t *mode_reply;
340 	uint8_t mode_reply_len;
341 	struct scsi_mode_sense *mode_sense;
342 
343 	memset(&resp, 0, sizeof(resp));
344 	mode_sense = (struct scsi_mode_sense *)(req->cdb);
345 	mode_page_ctl = mode_sense->page & SMS_PAGE_CTRL;
346 	mode_page_code = mode_sense->page & SMS_PAGE_CODE;
347 
348 	DPRINTF("%s: M_SENSE - DBD %d Page Ctrl 0x%x Code 0x%x Len %u",
349 	    __func__, mode_sense->byte2 & SMS_DBD, mode_page_ctl,
350 	    mode_page_code, mode_sense->length);
351 
352 	if (mode_page_ctl == SMS_PAGE_CTRL_CURRENT &&
353 	    (mode_page_code == ERR_RECOVERY_PAGE ||
354 	    mode_page_code == CDVD_CAPABILITIES_PAGE)) {
355 		/*
356 		 * mode sense header is 4 bytes followed
357 		 * by a variable page
358 		 * ERR_RECOVERY_PAGE is 12 bytes
359 		 * CDVD_CAPABILITIES_PAGE is 27 bytes
360 		 */
361 		switch (mode_page_code) {
362 		case ERR_RECOVERY_PAGE:
363 			mode_reply_len = 16;
364 			mode_reply =
365 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
366 			if (mode_reply == NULL)
367 				goto mode_sense_out;
368 
369 			/* set the page header */
370 			*mode_reply = mode_reply_len - 1;
371 			*(mode_reply + 1) = MODE_MEDIUM_TYPE_CODE;
372 
373 			/* set the page data, 7.3.2.1 mmc-5 */
374 			*(mode_reply + 4) = MODE_ERR_RECOVERY_PAGE_CODE;
375 			*(mode_reply + 5) = MODE_ERR_RECOVERY_PAGE_LEN;
376 			*(mode_reply + 7) = MODE_READ_RETRY_COUNT;
377 			break;
378 		case CDVD_CAPABILITIES_PAGE:
379 			mode_reply_len = 31;
380 			mode_reply =
381 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
382 			if (mode_reply == NULL)
383 				goto mode_sense_out;
384 
385 			/* set the page header */
386 			*mode_reply = mode_reply_len - 1;
387 			*(mode_reply + 1) = MODE_MEDIUM_TYPE_CODE;
388 
389 			/* set the page data, 6.3.11 mmc-3 */
390 			*(mode_reply + 4) = MODE_CDVD_CAP_PAGE_CODE;
391 			*(mode_reply + 5) = mode_reply_len - 6;
392 			*(mode_reply + 6) = MODE_CDVD_CAP_READ_CODE;
393 			_lto2b(MODE_CDVD_CAP_NUM_LEVELS, mode_reply + 14);
394 			break;
395 		default:
396 			goto mode_sense_error;
397 			break;
398 		}
399 
400 		/* Move index for response */
401 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
402 		    acct->req_desc, &(acct->resp_idx));
403 
404 		DPRINTF("%s: writing resp to 0x%llx size %d "
405 		    "at local idx %d req_idx %d global_idx %d",
406 		    __func__, acct->resp_desc->addr, acct->resp_desc->len,
407 		    acct->resp_idx, acct->req_idx, acct->idx);
408 
409 		if (write_mem(acct->resp_desc->addr, &resp,
410 		    acct->resp_desc->len)) {
411 			log_warnx("%s: unable to write OK"
412 			    " resp status data @ 0x%llx",
413 			    __func__, acct->resp_desc->addr);
414 			free(mode_reply);
415 			goto mode_sense_out;
416 		}
417 
418 		/* Move index for mode_reply */
419 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
420 		    acct->resp_desc, &(acct->resp_idx));
421 
422 		DPRINTF("%s: writing mode_reply to 0x%llx "
423 		    "size %d at local idx %d req_idx %d "
424 		    "global_idx %d",__func__, acct->resp_desc->addr,
425 		    acct->resp_desc->len, acct->resp_idx, acct->req_idx,
426 		    acct->idx);
427 
428 		if (write_mem(acct->resp_desc->addr, mode_reply,
429 		    acct->resp_desc->len)) {
430 			log_warnx("%s: unable to write "
431 			    "mode_reply to gpa @ 0x%llx",
432 			    __func__, acct->resp_desc->addr);
433 			free(mode_reply);
434 			goto mode_sense_out;
435 		}
436 
437 		free(mode_reply);
438 
439 		ret = 1;
440 		dev->cfg.isr_status = 1;
441 		/* Move ring indexes */
442 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
443 		    acct->req_desc, acct->req_idx);
444 	} else {
445 mode_sense_error:
446 		/* send back un-supported */
447 		vioscsi_prepare_resp(&resp,
448 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
449 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
450 
451 		/* Move index for response */
452 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
453 		    acct->req_desc, &(acct->resp_idx));
454 
455 		if (write_mem(acct->resp_desc->addr, &resp,
456 		    acct->resp_desc->len)) {
457 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
458 			    __func__, acct->resp_desc->addr);
459 			goto mode_sense_out;
460 		}
461 
462 		ret = 1;
463 		dev->cfg.isr_status = 1;
464 		/* Move ring indexes */
465 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
466 		    acct->req_desc, acct->req_idx);
467 	}
468 mode_sense_out:
469 	return (ret);
470 }
471 
472 static int
473 vioscsi_handle_mode_sense_big(struct vioscsi_dev *dev,
474     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
475 {
476 	int ret = 0;
477 	struct virtio_scsi_res_hdr resp;
478 	uint8_t mode_page_ctl;
479 	uint8_t mode_page_code;
480 	uint8_t *mode_reply;
481 	uint8_t mode_reply_len;
482 	uint16_t mode_sense_len;
483 	struct scsi_mode_sense_big *mode_sense_10;
484 
485 	memset(&resp, 0, sizeof(resp));
486 	mode_sense_10 = (struct scsi_mode_sense_big *)(req->cdb);
487 	mode_page_ctl = mode_sense_10->page & SMS_PAGE_CTRL;
488 	mode_page_code = mode_sense_10->page & SMS_PAGE_CODE;
489 	mode_sense_len = (uint16_t)_2btol(mode_sense_10->length);
490 
491 	DPRINTF("%s: M_SENSE_10 - DBD %d Page Ctrl 0x%x Code 0x%x Len %u",
492 	    __func__, mode_sense_10->byte2 & SMS_DBD, mode_page_ctl,
493 	    mode_page_code, mode_sense_len);
494 
495 	if (mode_page_ctl == SMS_PAGE_CTRL_CURRENT &&
496 	    (mode_page_code == ERR_RECOVERY_PAGE ||
497 	    mode_page_code == CDVD_CAPABILITIES_PAGE)) {
498 		/*
499 		 * mode sense header is 8 bytes followed
500 		 * by a variable page
501 		 * ERR_RECOVERY_PAGE is 12 bytes
502 		 * CDVD_CAPABILITIES_PAGE is 27 bytes
503 		 */
504 		switch (mode_page_code) {
505 		case ERR_RECOVERY_PAGE:
506 			mode_reply_len = 20;
507 			mode_reply =
508 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
509 			if (mode_reply == NULL)
510 				goto mode_sense_big_out;
511 
512 			/* set the page header */
513 			_lto2b(mode_reply_len - 2, mode_reply);
514 			*(mode_reply + 2) = MODE_MEDIUM_TYPE_CODE;
515 
516 			/* set the page data, 7.3.2.1 mmc-5 */
517 			*(mode_reply + 8) = MODE_ERR_RECOVERY_PAGE_CODE;
518 			*(mode_reply + 9) = MODE_ERR_RECOVERY_PAGE_LEN;
519 			*(mode_reply + 11) = MODE_READ_RETRY_COUNT;
520 			break;
521 		case CDVD_CAPABILITIES_PAGE:
522 			mode_reply_len = 35;
523 			mode_reply =
524 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
525 			if (mode_reply == NULL)
526 				goto mode_sense_big_out;
527 
528 			/* set the page header */
529 			_lto2b(mode_reply_len - 2, mode_reply);
530 			*(mode_reply + 2) = MODE_MEDIUM_TYPE_CODE;
531 
532 			/* set the page data, 6.3.11 mmc-3 */
533 			*(mode_reply + 8) = MODE_CDVD_CAP_PAGE_CODE;
534 			*(mode_reply + 9) = mode_reply_len - 6;
535 			*(mode_reply + 10) = MODE_CDVD_CAP_READ_CODE;
536 			_lto2b(MODE_CDVD_CAP_NUM_LEVELS, mode_reply + 18);
537 			break;
538 		default:
539 			goto mode_sense_big_error;
540 			break;
541 		}
542 
543 		/* Move index for response */
544 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
545 		    acct->req_desc, &(acct->resp_idx));
546 
547 		DPRINTF("%s: writing resp to 0x%llx size %d "
548 		    "at local idx %d req_idx %d global_idx %d",
549 		    __func__, acct->resp_desc->addr, acct->resp_desc->len,
550 		    acct->resp_idx, acct->req_idx, acct->idx);
551 
552 		if (write_mem(acct->resp_desc->addr, &resp,
553 		    acct->resp_desc->len)) {
554 			log_warnx("%s: unable to write OK"
555 			    " resp status data @ 0x%llx",
556 			    __func__, acct->resp_desc->addr);
557 			free(mode_reply);
558 			goto mode_sense_big_out;
559 		}
560 
561 		/* Move index for mode_reply */
562 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
563 		    acct->resp_desc, &(acct->resp_idx));
564 
565 		DPRINTF("%s: writing mode_reply to 0x%llx "
566 		    "size %d at local idx %d req_idx %d global_idx %d",
567 		    __func__, acct->resp_desc->addr, acct->resp_desc->len,
568 		    acct->resp_idx, acct->req_idx, acct->idx);
569 
570 		if (write_mem(acct->resp_desc->addr, mode_reply,
571 		    acct->resp_desc->len)) {
572 			log_warnx("%s: unable to write "
573 			    "mode_reply to gpa @ 0x%llx",
574 			    __func__, acct->resp_desc->addr);
575 			free(mode_reply);
576 			goto mode_sense_big_out;
577 		}
578 
579 		free(mode_reply);
580 
581 		ret = 1;
582 		dev->cfg.isr_status = 1;
583 		/* Move ring indexes */
584 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
585 		    acct->req_desc, acct->req_idx);
586 	} else {
587 mode_sense_big_error:
588 		/* send back un-supported */
589 		vioscsi_prepare_resp(&resp,
590 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
591 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
592 
593 		/* Move index for response */
594 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
595 		    acct->req_desc, &(acct->resp_idx));
596 
597 		if (write_mem(acct->resp_desc->addr, &resp,
598 		    acct->resp_desc->len)) {
599 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
600 			    __func__, acct->resp_desc->addr);
601 			goto mode_sense_big_out;
602 		}
603 
604 		ret = 1;
605 		dev->cfg.isr_status = 1;
606 		/* Move ring indexes */
607 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
608 		    acct->req_desc, acct->req_idx);
609 	}
610 mode_sense_big_out:
611 	return (ret);
612 }
613 
614 static int
615 vioscsi_handle_read_capacity(struct vioscsi_dev *dev,
616     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
617 {
618 	int ret = 0;
619 	struct virtio_scsi_res_hdr resp;
620 	uint32_t r_cap_addr;
621 	struct scsi_read_capacity *r_cap;
622 	struct scsi_read_cap_data *r_cap_data;
623 
624 	memset(&resp, 0, sizeof(resp));
625 	r_cap = (struct scsi_read_capacity *)(req->cdb);
626 	r_cap_addr = _4btol(r_cap->addr);
627 	DPRINTF("%s: %s - Addr 0x%08x", __func__,
628 	    vioscsi_op_names(r_cap->opcode), r_cap_addr);
629 
630 	vioscsi_prepare_resp(&resp,
631 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
632 
633 	r_cap_data = calloc(1, sizeof(struct scsi_read_cap_data));
634 
635 	if (r_cap_data == NULL) {
636 		log_warnx("%s: cannot alloc r_cap_data", __func__);
637 		goto read_capacity_out;
638 	}
639 
640 	DPRINTF("%s: ISO has %lld bytes and %lld blocks",
641 	    __func__, dev->sz, dev->n_blocks);
642 
643 	/*
644 	 * determine if num blocks of iso image > UINT32_MAX
645 	 * if it is, set addr to UINT32_MAX (0xffffffff)
646 	 * indicating to hosts that READ_CAPACITY_16 should
647 	 * be called to retrieve the full size
648 	 */
649 	if (dev->n_blocks >= UINT32_MAX) {
650 		_lto4b(UINT32_MAX, r_cap_data->addr);
651 		_lto4b(VIOSCSI_BLOCK_SIZE_CDROM, r_cap_data->length);
652 		log_warnx("%s: ISO sz %lld is bigger than "
653 		    "UINT32_MAX %u, all data may not be read",
654 		    __func__, dev->sz, UINT32_MAX);
655 	} else {
656 		_lto4b(dev->n_blocks - 1, r_cap_data->addr);
657 		_lto4b(VIOSCSI_BLOCK_SIZE_CDROM, r_cap_data->length);
658 	}
659 
660 	/* Move index for response */
661 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
662 	    &(acct->resp_idx));
663 
664 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
665 	    "idx %d req_idx %d global_idx %d",
666 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
667 	    acct->resp_idx, acct->req_idx, acct->idx);
668 
669 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
670 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
671 		    __func__, acct->resp_desc->addr);
672 		goto free_read_capacity;
673 	}
674 
675 	/* Move index for r_cap_data */
676 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
677 	    &(acct->resp_idx));
678 
679 	DPRINTF("%s: writing r_cap_data to 0x%llx size %d at "
680 	    "local idx %d req_idx %d global_idx %d",
681 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
682 	    acct->resp_idx, acct->req_idx, acct->idx);
683 
684 	if (write_mem(acct->resp_desc->addr, r_cap_data,
685 	    acct->resp_desc->len)) {
686 		log_warnx("%s: unable to write read_cap_data"
687 		    " response to gpa @ 0x%llx",
688 		    __func__, acct->resp_desc->addr);
689 	} else {
690 		ret = 1;
691 		dev->cfg.isr_status = 1;
692 		/* Move ring indexes */
693 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
694 		    acct->req_desc, acct->req_idx);
695 	}
696 
697 free_read_capacity:
698 	free(r_cap_data);
699 read_capacity_out:
700 	return (ret);
701 }
702 
703 static int
704 vioscsi_handle_read_capacity_16(struct vioscsi_dev *dev,
705     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
706 {
707 	int ret = 0;
708 	struct virtio_scsi_res_hdr resp;
709 	uint64_t r_cap_addr_16;
710 	struct scsi_read_capacity_16 *r_cap_16;
711 	struct scsi_read_cap_data_16 *r_cap_data_16;
712 
713 	memset(&resp, 0, sizeof(resp));
714 	r_cap_16 = (struct scsi_read_capacity_16 *)(req->cdb);
715 	r_cap_addr_16 = _8btol(r_cap_16->addr);
716 	DPRINTF("%s: %s - Addr 0x%016llx", __func__,
717 	    vioscsi_op_names(r_cap_16->opcode), r_cap_addr_16);
718 
719 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
720 
721 	r_cap_data_16 = calloc(1, sizeof(struct scsi_read_cap_data_16));
722 
723 	if (r_cap_data_16 == NULL) {
724 		log_warnx("%s: cannot alloc r_cap_data_16",
725 		    __func__);
726 		goto read_capacity_16_out;
727 	}
728 
729 	DPRINTF("%s: ISO has %lld bytes and %lld blocks", __func__,
730 	    dev->sz, dev->n_blocks);
731 
732 	_lto8b(dev->n_blocks - 1, r_cap_data_16->addr);
733 	_lto4b(VIOSCSI_BLOCK_SIZE_CDROM, r_cap_data_16->length);
734 
735 	/* Move index for response */
736 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
737 	    &(acct->resp_idx));
738 
739 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
740 	    "idx %d req_idx %d global_idx %d",
741 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
742 	    acct->resp_idx, acct->req_idx, acct->idx);
743 
744 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
745 		log_warnx("%s: unable to write OK resp status "
746 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
747 		goto free_read_capacity_16;
748 	}
749 
750 	/* Move index for r_cap_data_16 */
751 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
752 	    &(acct->resp_idx));
753 
754 	DPRINTF("%s: writing r_cap_data_16 to 0x%llx size %d "
755 	    "at local idx %d req_idx %d global_idx %d",
756 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
757 	    acct->resp_idx, acct->req_idx, acct->idx);
758 
759 	if (write_mem(acct->resp_desc->addr, r_cap_data_16,
760 	    acct->resp_desc->len)) {
761 		log_warnx("%s: unable to write read_cap_data_16"
762 		    " response to gpa @ 0x%llx",
763 		    __func__, acct->resp_desc->addr);
764 	} else {
765 		ret = 1;
766 		dev->cfg.isr_status = 1;
767 		/* Move ring indexes */
768 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
769 		    acct->req_desc, acct->req_idx);
770 	}
771 
772 free_read_capacity_16:
773 	free(r_cap_data_16);
774 read_capacity_16_out:
775 	return (ret);
776 }
777 
778 static int
779 vioscsi_handle_report_luns(struct vioscsi_dev *dev,
780     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
781 {
782 	int ret = 0;
783 	struct virtio_scsi_res_hdr resp;
784 	uint32_t rpl_length;
785 	struct scsi_report_luns *rpl;
786 	struct vioscsi_report_luns_data *reply_rpl;
787 
788 	memset(&resp, 0, sizeof(resp));
789 	rpl = (struct scsi_report_luns *)(req->cdb);
790 	rpl_length = _4btol(rpl->length);
791 
792 	DPRINTF("%s: REPORT_LUNS Report 0x%x Length %d", __func__,
793 	    rpl->selectreport, rpl_length);
794 
795 	if (rpl_length < RPL_MIN_SIZE) {
796 		DPRINTF("%s: RPL_Length %d < %d (RPL_MIN_SIZE)", __func__,
797 		    rpl_length, RPL_MIN_SIZE);
798 
799 		vioscsi_prepare_resp(&resp,
800 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
801 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
802 
803 		/* Move index for response */
804 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
805 		    acct->req_desc, &(acct->resp_idx));
806 
807 		if (write_mem(acct->resp_desc->addr, &resp,
808 		    acct->resp_desc->len)) {
809 			log_warnx("%s: unable to set ERR "
810 			    "status data @ 0x%llx", __func__,
811 			    acct->resp_desc->addr);
812 		} else {
813 			ret = 1;
814 			dev->cfg.isr_status = 1;
815 			/* Move ring indexes */
816 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
817 			    acct->req_desc, acct->req_idx);
818 		}
819 		goto rpl_out;
820 
821 	}
822 
823 	reply_rpl = calloc(1, sizeof(*reply_rpl));
824 
825 	if (reply_rpl == NULL) {
826 		log_warnx("%s: cannot alloc reply_rpl", __func__);
827 		goto rpl_out;
828 	}
829 
830 	_lto4b(RPL_SINGLE_LUN, reply_rpl->length);
831 	memcpy(reply_rpl->lun, req->lun, RPL_SINGLE_LUN);
832 
833 	vioscsi_prepare_resp(&resp,
834 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
835 
836 	/* Move index for response */
837 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
838 	    &(acct->resp_idx));
839 
840 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
841 	    "idx %d req_idx %d global_idx %d", __func__, acct->resp_desc->addr,
842 	    acct->resp_desc->len, acct->resp_idx, acct->req_idx, acct->idx);
843 
844 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
845 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
846 		    __func__, acct->resp_desc->addr);
847 		goto free_rpl;
848 	}
849 
850 	/* Move index for reply_rpl */
851 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
852 	    &(acct->resp_idx));
853 
854 	DPRINTF("%s: writing reply_rpl to 0x%llx size %d at "
855 	    "local idx %d req_idx %d global_idx %d",
856 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
857 	    acct->resp_idx, acct->req_idx, acct->idx);
858 
859 	if (write_mem(acct->resp_desc->addr, reply_rpl, acct->resp_desc->len)) {
860 		log_warnx("%s: unable to write reply_rpl"
861 		    " response to gpa @ 0x%llx",
862 		    __func__, acct->resp_desc->addr);
863 	} else {
864 		ret = 1;
865 		dev->cfg.isr_status = 1;
866 		/* Move ring indexes */
867 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
868 		    acct->req_desc, acct->req_idx);
869 	}
870 
871 free_rpl:
872 	free(reply_rpl);
873 rpl_out:
874 	return (ret);
875 }
876 
877 static int
878 vioscsi_handle_read_6(struct vioscsi_dev *dev,
879     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
880 {
881 	int ret = 0;
882 	struct virtio_scsi_res_hdr resp;
883 	const uint8_t *read_buf;
884 	uint32_t read_lba;
885 	struct ioinfo *info;
886 	struct scsi_rw *read_6;
887 
888 	memset(&resp, 0, sizeof(resp));
889 	read_6 = (struct scsi_rw *)(req->cdb);
890 	read_lba = ((read_6->addr[0] & SRW_TOPADDR) << 16 ) |
891 	    (read_6->addr[1] << 8) | read_6->addr[2];
892 
893 	DPRINTF("%s: READ Addr 0x%08x Len %d (%d)",
894 	    __func__, read_lba, read_6->length, read_6->length * dev->max_xfer);
895 
896 	/* check if lba is in range */
897 	if (read_lba > dev->n_blocks - 1) {
898 		DPRINTF("%s: requested block out of range req: %ud max: %lld",
899 		    __func__, read_lba, dev->n_blocks);
900 
901 		vioscsi_prepare_resp(&resp,
902 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
903 		    SENSE_LBA_OUT_OF_RANGE, SENSE_DEFAULT_ASCQ);
904 
905 		/* Move index for response */
906 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
907 		    acct->req_desc, &(acct->resp_idx));
908 
909 		if (write_mem(acct->resp_desc->addr, &resp,
910 		    acct->resp_desc->len)) {
911 			log_warnx("%s: unable to set ERR "
912 			    "status data @ 0x%llx", __func__,
913 			    acct->resp_desc->addr);
914 		} else {
915 			ret = 1;
916 			dev->cfg.isr_status = 1;
917 			/* Move ring indexes */
918 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
919 			    acct->req_desc, acct->req_idx);
920 		}
921 		goto read_6_out;
922 	}
923 
924 	info = vioscsi_start_read(dev, read_lba, read_6->length);
925 
926 	if (info == NULL) {
927 		log_warnx("%s: cannot alloc for read", __func__);
928 		goto read_6_out;
929 	}
930 
931 	/* read block */
932 	read_buf = vioscsi_finish_read(info);
933 
934 	if (read_buf == NULL) {
935 		log_warnx("%s: error reading position %ud",
936 		    __func__, read_lba);
937 		vioscsi_prepare_resp(&resp,
938 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_MEDIUM_ERROR,
939 		    SENSE_MEDIUM_NOT_PRESENT, SENSE_DEFAULT_ASCQ);
940 
941 		/* Move index for response */
942 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
943 		    acct->req_desc, &(acct->resp_idx));
944 
945 		if (write_mem(acct->resp_desc->addr, &resp,
946 		    acct->resp_desc->len)) {
947 			log_warnx("%s: unable to set ERR "
948 			    "status data @ 0x%llx", __func__,
949 			    acct->resp_desc->addr);
950 		} else {
951 			ret = 1;
952 			dev->cfg.isr_status = 1;
953 			/* Move ring indexes */
954 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
955 			    acct->req_desc, acct->req_idx);
956 		}
957 
958 		goto free_read_6;
959 	}
960 
961 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
962 
963 	/* Move index for response */
964 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
965 	    &(acct->resp_idx));
966 
967 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
968 	    "idx %d req_idx %d global_idx %d",
969 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
970 	    acct->resp_idx, acct->req_idx, acct->idx);
971 
972 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
973 		log_warnx("%s: unable to write OK resp status "
974 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
975 		goto free_read_6;
976 	}
977 
978 	/* Move index for read_buf */
979 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
980 	    &(acct->resp_idx));
981 
982 	DPRINTF("%s: writing read_buf to 0x%llx size %d at "
983 	    "local idx %d req_idx %d global_idx %d",
984 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
985 	    acct->resp_idx, acct->req_idx, acct->idx);
986 
987 	if (write_mem(acct->resp_desc->addr, read_buf,
988 	    acct->resp_desc->len)) {
989 		log_warnx("%s: unable to write read_buf to gpa @ 0x%llx",
990 		    __func__, acct->resp_desc->addr);
991 	} else {
992 		ret = 1;
993 		dev->cfg.isr_status = 1;
994 		/* Move ring indexes */
995 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
996 		    acct->req_desc, acct->req_idx);
997 	}
998 
999 free_read_6:
1000 	vioscsi_free_info(info);
1001 read_6_out:
1002 	return (ret);
1003 }
1004 
1005 static int
1006 vioscsi_handle_read_10(struct vioscsi_dev *dev,
1007     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1008 {
1009 	int ret = 0;
1010 	struct virtio_scsi_res_hdr resp;
1011 	const uint8_t *read_buf;
1012 	uint32_t read_lba;
1013 	uint16_t read_10_len;
1014 	off_t chunk_offset;
1015 	struct ioinfo *info;
1016 	struct scsi_rw_big *read_10;
1017 
1018 	memset(&resp, 0, sizeof(resp));
1019 	read_10 = (struct scsi_rw_big *)(req->cdb);
1020 	read_lba = _4btol(read_10->addr);
1021 	read_10_len = _2btol(read_10->length);
1022 	chunk_offset = 0;
1023 
1024 	DPRINTF("%s: READ_10 Addr 0x%08x Len %d (%d)",
1025 	    __func__, read_lba, read_10_len, read_10_len * dev->max_xfer);
1026 
1027 	/* check if lba is in range */
1028 	if (read_lba > dev->n_blocks - 1) {
1029 		DPRINTF("%s: requested block out of range req: %ud max: %lld",
1030 		    __func__, read_lba, dev->n_blocks);
1031 
1032 		vioscsi_prepare_resp(&resp,
1033 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1034 		    SENSE_LBA_OUT_OF_RANGE, SENSE_DEFAULT_ASCQ);
1035 
1036 		/* Move index for response */
1037 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1038 		    acct->req_desc, &(acct->resp_idx));
1039 
1040 		if (write_mem(acct->resp_desc->addr, &resp,
1041 		    acct->resp_desc->len)) {
1042 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
1043 			    __func__, acct->resp_desc->addr);
1044 		} else {
1045 			ret = 1;
1046 			dev->cfg.isr_status = 1;
1047 			/* Move ring indexes */
1048 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
1049 			    acct->req_desc, acct->req_idx);
1050 		}
1051 
1052 		goto read_10_out;
1053 	}
1054 
1055 	info = vioscsi_start_read(dev, read_lba, read_10_len);
1056 
1057 	if (info == NULL) {
1058 		log_warnx("%s: cannot alloc for read", __func__);
1059 		goto read_10_out;
1060 	}
1061 
1062 	/* read block */
1063 	read_buf = vioscsi_finish_read(info);
1064 
1065 	if (read_buf == NULL) {
1066 		log_warnx("%s: error reading position %ud", __func__, read_lba);
1067 		vioscsi_prepare_resp(&resp,
1068 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_MEDIUM_ERROR,
1069 		    SENSE_MEDIUM_NOT_PRESENT, SENSE_DEFAULT_ASCQ);
1070 
1071 		/* Move index for response */
1072 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1073 		    acct->req_desc, &(acct->resp_idx));
1074 
1075 		if (write_mem(acct->resp_desc->addr, &resp,
1076 		    acct->resp_desc->len)) {
1077 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
1078 			    __func__, acct->resp_desc->addr);
1079 		} else {
1080 			ret = 1;
1081 			dev->cfg.isr_status = 1;
1082 			/* Move ring indexes */
1083 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
1084 			    acct->req_desc, acct->req_idx);
1085 		}
1086 
1087 		goto free_read_10;
1088 	}
1089 
1090 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1091 
1092 	/* Move index for response */
1093 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1094 	    &(acct->resp_idx));
1095 
1096 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1097 	    "idx %d req_idx %d global_idx %d",
1098 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1099 	    acct->resp_idx, acct->req_idx, acct->idx);
1100 
1101 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
1102 		log_warnx("%s: unable to write OK resp status "
1103 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
1104 		goto free_read_10;
1105 	}
1106 
1107 	/*
1108 	 * Perform possible chunking of writes of read_buf
1109 	 * based on the segment length allocated by the host.
1110 	 * At least one write will be performed.
1111 	 * If chunk_offset == info->len, no more writes
1112 	 */
1113 	do {
1114 		/* Move index for read_buf */
1115 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1116 		    acct->resp_desc, &(acct->resp_idx));
1117 
1118 		DPRINTF("%s: writing read_buf to 0x%llx size "
1119 		    "%d at local idx %d req_idx %d global_idx %d",
1120 		    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1121 		    acct->resp_idx, acct->req_idx, acct->idx);
1122 
1123 		if (write_mem(acct->resp_desc->addr,
1124 		    read_buf + chunk_offset, acct->resp_desc->len)) {
1125 			log_warnx("%s: unable to write read_buf"
1126 			    " to gpa @ 0x%llx", __func__,
1127 			    acct->resp_desc->addr);
1128 			goto free_read_10;
1129 		}
1130 		chunk_offset += acct->resp_desc->len;
1131 	} while (chunk_offset < info->len);
1132 
1133 	ret = 1;
1134 	dev->cfg.isr_status = 1;
1135 	/* Move ring indexes */
1136 	vioscsi_next_ring_item(dev, acct->avail, acct->used, acct->req_desc,
1137 	    acct->req_idx);
1138 
1139 free_read_10:
1140 	vioscsi_free_info(info);
1141 read_10_out:
1142 	return (ret);
1143 }
1144 
1145 static int
1146 vioscsi_handle_prevent_allow(struct vioscsi_dev *dev,
1147     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1148 {
1149 	int ret = 0;
1150 	struct virtio_scsi_res_hdr resp;
1151 
1152 	memset(&resp, 0, sizeof(resp));
1153 	/* Move index for response */
1154 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1155 	    &(acct->resp_idx));
1156 
1157 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1158 
1159 	if (dev->locked) {
1160 		DPRINTF("%s: unlocking medium", __func__);
1161 	} else {
1162 		DPRINTF("%s: locking medium", __func__);
1163 	}
1164 
1165 	dev->locked = dev->locked ? 0 : 1;
1166 
1167 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
1168 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
1169 		    __func__, acct->resp_desc->addr);
1170 	} else {
1171 		ret = 1;
1172 		dev->cfg.isr_status = 1;
1173 		/* Move ring indexes */
1174 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1175 		    acct->req_desc, acct->req_idx);
1176 	}
1177 
1178 	return (ret);
1179 }
1180 
1181 static int
1182 vioscsi_handle_mechanism_status(struct vioscsi_dev *dev,
1183     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1184 {
1185 	int ret = 0;
1186 	struct virtio_scsi_res_hdr resp;
1187 	uint16_t mech_status_len;
1188 	struct scsi_mechanism_status *mech_status;
1189 	struct scsi_mechanism_status_header *mech_status_header;
1190 
1191 	memset(&resp, 0, sizeof(resp));
1192 	mech_status = (struct scsi_mechanism_status *)(req->cdb);
1193 	mech_status_len = (uint16_t)_2btol(mech_status->length);
1194 	DPRINTF("%s: MECH_STATUS Len %u", __func__, mech_status_len);
1195 
1196 	mech_status_header = calloc(1, sizeof(*mech_status_header));
1197 
1198 	if (mech_status_header == NULL)
1199 		goto mech_out;
1200 
1201 	/* return a 0 header since we are not a changer */
1202 	vioscsi_prepare_resp(&resp,
1203 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1204 
1205 	/* Move index for response */
1206 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1207 	    acct->req_desc, &(acct->resp_idx));
1208 
1209 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
1210 		log_warnx("%s: unable to set ERR status data @ 0x%llx",
1211 		    __func__, acct->resp_desc->addr);
1212 		goto free_mech;
1213 	}
1214 
1215 	/* Move index for mech_status_header */
1216 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1217 	    &(acct->resp_idx));
1218 
1219 	if (write_mem(acct->resp_desc->addr, mech_status_header,
1220 	    acct->resp_desc->len)) {
1221 		log_warnx("%s: unable to write "
1222 		    "mech_status_header response to "
1223 		    "gpa @ 0x%llx",
1224 		    __func__, acct->resp_desc->addr);
1225 	} else {
1226 		ret = 1;
1227 		dev->cfg.isr_status = 1;
1228 		/* Move ring indexes */
1229 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1230 		    acct->req_desc, acct->req_idx);
1231 	}
1232 
1233 free_mech:
1234 	free(mech_status_header);
1235 mech_out:
1236 	return (ret);
1237 }
1238 
1239 static int
1240 vioscsi_handle_read_toc(struct vioscsi_dev *dev,
1241     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1242 {
1243 	int ret = 0;
1244 	struct virtio_scsi_res_hdr resp;
1245 	uint16_t toc_len;
1246 	uint16_t toc_data_len;
1247 	uint8_t toc_data[TOC_DATA_SIZE];
1248 	uint8_t *toc_data_p;
1249 	struct scsi_read_toc *toc;
1250 
1251 	memset(&resp, 0, sizeof(resp));
1252 	toc = (struct scsi_read_toc *)(req->cdb);
1253 	toc_len = (uint16_t)_2btol(toc->data_len);
1254 	DPRINTF("%s: %s - MSF %d Track 0x%02x Addr 0x%04x",
1255 	    __func__, vioscsi_op_names(toc->opcode),
1256 	    ((toc->byte2 >> 1) & 1), toc->from_track, toc_len);
1257 
1258 	memset(toc_data, 0, sizeof(toc_data));
1259 
1260 	/* Tracks should be 0, 1, or LEAD_OUT_TRACK, 0xaa */
1261 	if (toc->from_track > 1 &&
1262 	    toc->from_track != READ_TOC_LEAD_OUT_TRACK) {
1263 		/* illegal request */
1264 		log_warnx("%s: illegal request Track 0x%02x",
1265 		    __func__, toc->from_track);
1266 
1267 		vioscsi_prepare_resp(&resp,
1268 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1269 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
1270 
1271 		/* Move index for response */
1272 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1273 		    acct->req_desc, &(acct->resp_idx));
1274 
1275 		if (write_mem(acct->resp_desc->addr, &resp,
1276 		    acct->resp_desc->len)) {
1277 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
1278 			    __func__, acct->resp_desc->addr);
1279 			goto read_toc_out;
1280 		}
1281 
1282 		ret = 1;
1283 		dev->cfg.isr_status = 1;
1284 		/* Move ring indexes */
1285 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1286 		    acct->req_desc, acct->req_idx);
1287 
1288 		goto read_toc_out;
1289 	}
1290 
1291 	/*
1292 	 * toc_data is defined as:
1293 	 * [0-1]: TOC Data Length, typically 0x1a
1294 	 * [2]: First Track, 1
1295 	 * [3]: Last Track, 1
1296 	 *
1297 	 * Track 1 Descriptor
1298 	 * [0]: Reserved, 0
1299 	 * [1]: ADR,Control, 0x14
1300 	 * [2]: Track #, 1
1301 	 * [3]: Reserved, 0
1302 	 * [4-7]: Track Start Address, LBA
1303 	 *
1304 	 * Track 0xaa (Lead Out) Descriptor
1305 	 * [0]: Reserved, 0
1306 	 * [1]: ADR,Control, 0x14
1307 	 * [2]: Track #, 0xaa
1308 	 * [3]: Reserved, 0
1309 	 * [4-7]: Track Start Address, LBA
1310 	 */
1311 	toc_data_p = toc_data + 2;
1312 	*toc_data_p++ = READ_TOC_START_TRACK;
1313 	*toc_data_p++ = READ_TOC_LAST_TRACK;
1314 	if (toc->from_track <= 1) {
1315 		/* first track descriptor */
1316 		*toc_data_p++ = 0x0;
1317 		*toc_data_p++ = READ_TOC_ADR_CTL;
1318 		*toc_data_p++ = READ_TOC_START_TRACK;
1319 		*toc_data_p++ = 0x0;
1320 		/* start addr for first track is 0 */
1321 		*toc_data_p++ = 0x0;
1322 		*toc_data_p++ = 0x0;
1323 		*toc_data_p++ = 0x0;
1324 		*toc_data_p++ = 0x0;
1325 	}
1326 
1327 	/* last track descriptor */
1328 	*toc_data_p++ = 0x0;
1329 	*toc_data_p++ = READ_TOC_ADR_CTL;
1330 	*toc_data_p++ = READ_TOC_LEAD_OUT_TRACK;
1331 	*toc_data_p++ = 0x0;
1332 
1333 	_lto4b((uint32_t)dev->n_blocks, toc_data_p);
1334 	toc_data_p += 4;
1335 
1336 	toc_data_len = toc_data_p - toc_data;
1337 	_lto2b((uint32_t)toc_data_len - 2, toc_data);
1338 
1339 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1340 
1341 	/* Move index for response */
1342 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1343 	    &(acct->resp_idx));
1344 
1345 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1346 	    "idx %d req_idx %d global_idx %d",
1347 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1348 	    acct->resp_idx, acct->req_idx, acct->idx);
1349 
1350 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
1351 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
1352 		    __func__, acct->resp_desc->addr);
1353 		goto read_toc_out;
1354 	}
1355 
1356 	/* Move index for toc descriptor */
1357 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1358 	    &(acct->resp_idx));
1359 
1360 	DPRINTF("%s: writing toc_data to 0x%llx size %d at "
1361 	    "local idx %d req_idx %d global_idx %d",
1362 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1363 	    acct->resp_idx, acct->req_idx, acct->idx);
1364 
1365 	if (write_mem(acct->resp_desc->addr, toc_data,
1366 	    acct->resp_desc->len)) {
1367 		log_warnx("%s: unable to write toc descriptor data @ 0x%llx",
1368 		    __func__, acct->resp_desc->addr);
1369 	} else {
1370 		ret = 1;
1371 		dev->cfg.isr_status = 1;
1372 		/* Move ring indexes */
1373 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1374 		    acct->req_desc, acct->req_idx);
1375 	}
1376 
1377 read_toc_out:
1378 	return (ret);
1379 }
1380 
1381 static int
1382 vioscsi_handle_read_disc_info(struct vioscsi_dev *dev,
1383     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1384 {
1385 	int ret = 0;
1386 	struct virtio_scsi_res_hdr resp;
1387 	struct scsi_read_disc_information *read_disc;
1388 
1389 	memset(&resp, 0, sizeof(resp));
1390 	read_disc =
1391 	    (struct scsi_read_disc_information *)(req->cdb);
1392 	DPRINTF("%s: Disc Info %x", __func__, read_disc->byte2);
1393 
1394 	/* send back unsupported */
1395 	vioscsi_prepare_resp(&resp,
1396 	    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1397 	    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
1398 
1399 	/* Move index for response */
1400 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1401 	    acct->req_desc, &(acct->resp_idx));
1402 
1403 	if (write_mem(acct->resp_desc->addr, &resp,
1404 	    acct->resp_desc->len)) {
1405 		log_warnx("%s: unable to set ERR status data @ 0x%llx",
1406 		    __func__, acct->resp_desc->addr);
1407 	} else {
1408 		ret = 1;
1409 		dev->cfg.isr_status = 1;
1410 		/* Move ring indexes */
1411 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1412 		    acct->req_desc, acct->req_idx);
1413 	}
1414 
1415 	return (ret);
1416 }
1417 
1418 static int
1419 vioscsi_handle_gesn(struct vioscsi_dev *dev,
1420     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1421 {
1422 	int ret = 0;
1423 	struct virtio_scsi_res_hdr resp;
1424 	uint8_t gesn_reply[GESN_SIZE];
1425 	struct scsi_gesn *gesn;
1426 	struct scsi_gesn_event_header *gesn_event_header;
1427 	struct scsi_gesn_power_event *gesn_power_event;
1428 
1429 	memset(&resp, 0, sizeof(resp));
1430 	gesn = (struct scsi_gesn *)(req->cdb);
1431 	DPRINTF("%s: GESN Method %s", __func__,
1432 	    gesn->byte2 ? "Polling" : "Asynchronous");
1433 
1434 	if (gesn->byte2 == 0) {
1435 		/* we don't support asynchronous */
1436 		vioscsi_prepare_resp(&resp,
1437 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1438 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
1439 
1440 		/* Move index for response */
1441 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1442 		    acct->req_desc, &(acct->resp_idx));
1443 
1444 		if (write_mem(acct->resp_desc->addr, &resp,
1445 		    acct->resp_desc->len)) {
1446 			log_warnx("%s: unable to set ERR status  data @ 0x%llx",
1447 			    __func__, acct->resp_desc->addr);
1448 			goto gesn_out;
1449 		}
1450 
1451 		ret = 1;
1452 		dev->cfg.isr_status = 1;
1453 		/* Move ring indexes */
1454 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1455 		    acct->req_desc, acct->req_idx);
1456 
1457 		goto gesn_out;
1458 	}
1459 	memset(gesn_reply, 0, sizeof(gesn_reply));
1460 	gesn_event_header = (struct scsi_gesn_event_header *)(gesn_reply);
1461 	gesn_power_event = (struct scsi_gesn_power_event *)(gesn_reply + 4);
1462 	/* set event header length and notification */
1463 	_lto2b(GESN_HEADER_LEN, gesn_event_header->length);
1464 	gesn_event_header->notification = GESN_NOTIFY_POWER_MGMT;
1465 	gesn_event_header->supported_event = GESN_EVENT_POWER_MGMT;
1466 
1467 	/* set event descriptor */
1468 	gesn_power_event->event_code = GESN_CODE_NOCHG;
1469 	if (dev->locked)
1470 		gesn_power_event->status = GESN_STATUS_ACTIVE;
1471 	else
1472 		gesn_power_event->status = GESN_STATUS_IDLE;
1473 
1474 	/* Move index for response */
1475 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1476 	    &(acct->resp_idx));
1477 
1478 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1479 	    "idx %d req_idx %d global_idx %d",
1480 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1481 	    acct->resp_idx, acct->req_idx, acct->idx);
1482 
1483 	if (write_mem(acct->resp_desc->addr, &resp, acct->resp_desc->len)) {
1484 		log_warnx("%s: unable to write OK resp status "
1485 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
1486 		goto gesn_out;
1487 	}
1488 
1489 	/* Move index for gesn_reply */
1490 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1491 	    &(acct->resp_idx));
1492 
1493 	DPRINTF("%s: writing gesn_reply to 0x%llx size %d at "
1494 	    "local idx %d req_idx %d global_idx %d",
1495 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1496 	    acct->resp_idx, acct->req_idx, acct->idx);
1497 
1498 	if (write_mem(acct->resp_desc->addr, gesn_reply,
1499 	    acct->resp_desc->len)) {
1500 		log_warnx("%s: unable to write gesn_reply"
1501 		    " response to gpa @ 0x%llx",
1502 		    __func__, acct->resp_desc->addr);
1503 	} else {
1504 		ret = 1;
1505 		dev->cfg.isr_status = 1;
1506 		/* Move ring indexes */
1507 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1508 		    acct->req_desc, acct->req_idx);
1509 	}
1510 
1511 gesn_out:
1512 	return (ret);
1513 }
1514 
1515 static int
1516 vioscsi_handle_get_config(struct vioscsi_dev *dev,
1517     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1518 {
1519 	int ret = 0;
1520 	struct virtio_scsi_res_hdr resp;
1521 	uint16_t get_conf_feature;
1522 	uint16_t get_conf_len;
1523 	uint8_t *get_conf_reply;
1524 	struct scsi_get_configuration *get_configuration;
1525 	struct scsi_config_feature_header *config_feature_header;
1526 	struct scsi_config_generic_descriptor *config_generic_desc;
1527 	struct scsi_config_profile_descriptor *config_profile_desc;
1528 	struct scsi_config_core_descriptor *config_core_desc;
1529 	struct scsi_config_morphing_descriptor *config_morphing_desc;
1530 	struct scsi_config_remove_media_descriptor *config_remove_media_desc;
1531 	struct scsi_config_random_read_descriptor *config_random_read_desc;
1532 
1533 	memset(&resp, 0, sizeof(resp));
1534 	get_configuration = (struct scsi_get_configuration *)(req->cdb);
1535 	get_conf_feature = (uint16_t)_2btol(get_configuration->feature);
1536 	get_conf_len = (uint16_t)_2btol(get_configuration->length);
1537 	DPRINTF("%s: Conf RT %x Feature %d Len %d", __func__,
1538 	    get_configuration->byte2, get_conf_feature, get_conf_len);
1539 
1540 	get_conf_reply = (uint8_t*)calloc(G_CONFIG_REPLY_SIZE, sizeof(uint8_t));
1541 
1542 	if (get_conf_reply == NULL)
1543 		goto get_config_out;
1544 
1545 	/*
1546 	 * Use MMC-5 6.6 for structure and
1547 	 * MMC-5 5.2 to send back:
1548 	 * feature header - 8 bytes
1549 	 * feature descriptor for profile list - 8 bytes
1550 	 * feature descriptor for core feature - 12 bytes
1551 	 * feature descriptor for morphing feature - 8 bytes
1552 	 * feature descriptor for removable media - 8 bytes
1553 	 * feature descriptor for random read feature - 12 bytes
1554 	 */
1555 
1556 	config_feature_header =
1557 	    (struct scsi_config_feature_header *)(get_conf_reply);
1558 	config_generic_desc =
1559 	    (struct scsi_config_generic_descriptor *)(get_conf_reply + 8);
1560 	config_profile_desc =
1561 	    (struct scsi_config_profile_descriptor *)(get_conf_reply + 12);
1562 	config_core_desc =
1563 	    (struct scsi_config_core_descriptor *)(get_conf_reply + 16);
1564 	config_morphing_desc =
1565 	    (struct scsi_config_morphing_descriptor *)(get_conf_reply + 28);
1566 	config_remove_media_desc =
1567 	    (struct scsi_config_remove_media_descriptor *)(get_conf_reply + 36);
1568 	config_random_read_desc =
1569 	    (struct scsi_config_random_read_descriptor *)(get_conf_reply + 44);
1570 
1571 	/* set size to be get_conf_reply - size field */
1572 	_lto4b(G_CONFIG_REPLY_SIZE_HEX, config_feature_header->length);
1573 	/* set current profile to be non-conforming */
1574 	_lto2b(CONFIG_PROFILE_NON_CONFORM,
1575 	    config_feature_header->current_profile);
1576 
1577 	/* fill out profile list feature */
1578 	_lto2b(CONFIG_FEATURE_CODE_PROFILE, config_generic_desc->feature_code);
1579 	config_generic_desc->byte3 = CONFIG_PROFILELIST_BYTE3;
1580 	config_generic_desc->length = CONFIG_PROFILELIST_LENGTH;
1581 	/* fill out profile descriptor for NON_COFORM */
1582 	_lto2b(CONFIG_PROFILE_NON_CONFORM, config_profile_desc->profile_number);
1583 	config_profile_desc->byte3 = CONFIG_PROFILE_BYTE3;
1584 
1585 	/* fill out core feature */
1586 	_lto2b(CONFIG_FEATURE_CODE_CORE, config_core_desc->feature_code);
1587 	config_core_desc->byte3 = CONFIG_CORE_BYTE3;
1588 	config_core_desc->length = CONFIG_CORE_LENGTH;
1589 	_lto4b(CONFIG_CORE_PHY_SCSI, config_core_desc->phy_std);
1590 
1591 	/* fill out morphing feature */
1592 	_lto2b(CONFIG_FEATURE_CODE_MORPHING,
1593 	    config_morphing_desc->feature_code);
1594 	config_morphing_desc->byte3 = CONFIG_MORPHING_BYTE3;
1595 	config_morphing_desc->length = CONFIG_MORPHING_LENGTH;
1596 	config_morphing_desc->byte5 = CONFIG_MORPHING_BYTE5;
1597 
1598 	/* fill out removable media feature */
1599 	_lto2b(CONFIG_FEATURE_CODE_REMOVE_MEDIA,
1600 	    config_remove_media_desc->feature_code);
1601 	config_remove_media_desc->byte3 = CONFIG_REMOVE_MEDIA_BYTE3;
1602 	config_remove_media_desc->length = CONFIG_REMOVE_MEDIA_LENGTH;
1603 	config_remove_media_desc->byte5 = CONFIG_REMOVE_MEDIA_BYTE5;
1604 
1605 	/* fill out random read feature */
1606 	_lto2b(CONFIG_FEATURE_CODE_RANDOM_READ,
1607 	    config_random_read_desc->feature_code);
1608 	config_random_read_desc->byte3 = CONFIG_RANDOM_READ_BYTE3;
1609 	config_random_read_desc->length = CONFIG_RANDOM_READ_LENGTH;
1610 	if (dev->n_blocks >= UINT32_MAX)
1611 		_lto4b(UINT32_MAX, config_random_read_desc->block_size);
1612 	else
1613 		_lto4b(dev->n_blocks - 1, config_random_read_desc->block_size);
1614 	_lto2b(CONFIG_RANDOM_READ_BLOCKING_TYPE,
1615 	    config_random_read_desc->blocking_type);
1616 
1617 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1618 
1619 	/* Move index for response */
1620 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1621 	    acct->req_desc, &(acct->resp_idx));
1622 
1623 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1624 	    "idx %d req_idx %d global_idx %d",
1625 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1626 	    acct->resp_idx, acct->req_idx, acct->idx);
1627 
1628 	if (write_mem(acct->resp_desc->addr, &resp,
1629 	    acct->resp_desc->len)) {
1630 		log_warnx("%s: unable to set Ok status data @ 0x%llx",
1631 		    __func__, acct->resp_desc->addr);
1632 		goto free_get_config;
1633 	}
1634 
1635 	/* Move index for get_conf_reply */
1636 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1637 	    &(acct->resp_idx));
1638 
1639 	DPRINTF("%s: writing get_conf_reply to 0x%llx size %d "
1640 	    "at local idx %d req_idx %d global_idx %d",
1641 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1642 	    acct->resp_idx, acct->req_idx, acct->idx);
1643 
1644 	if (write_mem(acct->resp_desc->addr, get_conf_reply,
1645 	    acct->resp_desc->len)) {
1646 		log_warnx("%s: unable to write get_conf_reply"
1647 		    " response to gpa @ 0x%llx",
1648 		    __func__, acct->resp_desc->addr);
1649 	} else {
1650 		ret = 1;
1651 		dev->cfg.isr_status = 1;
1652 		/* Move ring indexes */
1653 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1654 		    acct->req_desc, acct->req_idx);
1655 	}
1656 
1657 free_get_config:
1658 	free(get_conf_reply);
1659 get_config_out:
1660 	return (ret);
1661 }
1662 
1663 int
1664 vioscsi_io(int dir, uint16_t reg, uint32_t *data, uint8_t *intr,
1665     void *cookie, uint8_t sz)
1666 {
1667 	struct vioscsi_dev *dev = (struct vioscsi_dev *)cookie;
1668 
1669 	*intr = 0xFF;
1670 
1671 	DPRINTF("%s: request %s reg %u,%s sz %u", __func__,
1672 	    dir ? "READ" : "WRITE", reg, vioscsi_reg_name(reg), sz);
1673 
1674 	if (dir == 0) {
1675 		switch (reg) {
1676 		case VIRTIO_CONFIG_DEVICE_FEATURES:
1677 		case VIRTIO_CONFIG_QUEUE_SIZE:
1678 		case VIRTIO_CONFIG_ISR_STATUS:
1679 			log_warnx("%s: illegal write %x to %s",
1680 			    __progname, *data, vioscsi_reg_name(reg));
1681 			break;
1682 		case VIRTIO_CONFIG_GUEST_FEATURES:
1683 			dev->cfg.guest_feature = *data;
1684 			DPRINTF("%s: guest feature set to %u",
1685 			    __func__, dev->cfg.guest_feature);
1686 			break;
1687 		case VIRTIO_CONFIG_QUEUE_ADDRESS:
1688 			dev->cfg.queue_address = *data;
1689 			vioscsi_update_qa(dev);
1690 			break;
1691 		case VIRTIO_CONFIG_QUEUE_SELECT:
1692 			dev->cfg.queue_select = *data;
1693 			vioscsi_update_qs(dev);
1694 			break;
1695 		case VIRTIO_CONFIG_QUEUE_NOTIFY:
1696 			dev->cfg.queue_notify = *data;
1697 			if (vioscsi_notifyq(dev))
1698 				*intr = 1;
1699 			break;
1700 		case VIRTIO_CONFIG_DEVICE_STATUS:
1701 			dev->cfg.device_status = *data;
1702 			DPRINTF("%s: device status set to %u",
1703 			    __func__, dev->cfg.device_status);
1704 			if (dev->cfg.device_status == 0) {
1705 				log_debug("%s: device reset", __func__);
1706 				dev->cfg.guest_feature = 0;
1707 				dev->cfg.queue_address = 0;
1708 				vioscsi_update_qa(dev);
1709 				dev->cfg.queue_size = 0;
1710 				vioscsi_update_qs(dev);
1711 				dev->cfg.queue_select = 0;
1712 				dev->cfg.queue_notify = 0;
1713 				dev->cfg.isr_status = 0;
1714 				dev->vq[0].last_avail = 0;
1715 				dev->vq[1].last_avail = 0;
1716 				dev->vq[2].last_avail = 0;
1717 			}
1718 			break;
1719 		default:
1720 			break;
1721 		}
1722 	} else {
1723 		switch (reg) {
1724 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI:
1725 			/* VIRTIO_SCSI_CONFIG_NUM_QUEUES, 32bit */
1726 			if (sz == 4)
1727 				*data = (uint32_t)VIOSCSI_NUM_QUEUES;
1728 			else if (sz == 1) {
1729 				/* read first byte of num_queues */
1730 				*data &= 0xFFFFFF00;
1731 				*data |= (uint32_t)(VIOSCSI_NUM_QUEUES) & 0xFF;
1732 			}
1733 			break;
1734 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 1:
1735 			if (sz == 1) {
1736 				/* read second byte of num_queues */
1737 				*data &= 0xFFFFFF00;
1738 				*data |=
1739 				    (uint32_t)(VIOSCSI_NUM_QUEUES >> 8) & 0xFF;
1740 			}
1741 			break;
1742 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 2:
1743 			if (sz == 1) {
1744 				/* read third byte of num_queues */
1745 				*data &= 0xFFFFFF00;
1746 				*data |=
1747 				    (uint32_t)(VIOSCSI_NUM_QUEUES >> 16) & 0xFF;
1748 			}
1749 			break;
1750 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 3:
1751 			if (sz == 1) {
1752 				/* read fourth byte of num_queues */
1753 				*data &= 0xFFFFFF00;
1754 				*data |=
1755 				    (uint32_t)(VIOSCSI_NUM_QUEUES >> 24) & 0xFF;
1756 			}
1757 			break;
1758 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 4:
1759 			/* VIRTIO_SCSI_CONFIG_SEG_MAX, 32bit */
1760 			if (sz == 4)
1761 				*data = (uint32_t)(VIOSCSI_SEG_MAX);
1762 			else if (sz == 1) {
1763 				/* read first byte of seg_max */
1764 				*data &= 0xFFFFFF00;
1765 				*data |= (uint32_t)(VIOSCSI_SEG_MAX) & 0xFF;
1766 			}
1767 			break;
1768 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 5:
1769 			if (sz == 1) {
1770 				/* read second byte of seg_max */
1771 				*data &= 0xFFFFFF00;
1772 				*data |=
1773 				    (uint32_t)(VIOSCSI_SEG_MAX >> 8) & 0xFF;
1774 			}
1775 			break;
1776 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 6:
1777 			if (sz == 1) {
1778 				/* read third byte of seg_max */
1779 				*data &= 0xFFFFFF00;
1780 				*data |=
1781 				    (uint32_t)(VIOSCSI_SEG_MAX >> 16) & 0xFF;
1782 			}
1783 			break;
1784 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 7:
1785 			if (sz == 1) {
1786 				/* read fourth byte of seg_max */
1787 				*data &= 0xFFFFFF00;
1788 				*data |=
1789 				    (uint32_t)(VIOSCSI_SEG_MAX >> 24) & 0xFF;
1790 			}
1791 			break;
1792 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 8:
1793 			/* VIRTIO_SCSI_CONFIG_MAX_SECTORS, 32bit */
1794 			if (sz == 4)
1795 				*data = (uint32_t)(dev->max_xfer);
1796 			else if (sz == 1) {
1797 				/* read first byte of max_xfer */
1798 				*data &= 0xFFFFFF00;
1799 				*data |= (uint32_t)(dev->max_xfer) & 0xFF;
1800 			}
1801 			break;
1802 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 9:
1803 			if (sz == 1) {
1804 				/* read second byte of max_xfer */
1805 				*data &= 0xFFFFFF00;
1806 				*data |=
1807 				    (uint32_t)(dev->max_xfer >> 8) & 0xFF;
1808 			}
1809 			break;
1810 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 10:
1811 			if (sz == 1) {
1812 				/* read third byte of max_xfer */
1813 				*data &= 0xFFFFFF00;
1814 				*data |=
1815 				    (uint32_t)(dev->max_xfer >> 16) & 0xFF;
1816 			}
1817 			break;
1818 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 11:
1819 			if (sz == 1) {
1820 				/* read fourth byte of max_xfer */
1821 				*data &= 0xFFFFFF00;
1822 				*data |=
1823 				    (uint32_t)(dev->max_xfer >> 24) & 0xFF;
1824 			}
1825 			break;
1826 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 12:
1827 			/* VIRTIO_SCSI_CONFIG_CMD_PER_LUN, 32bit */
1828 			if (sz == 4)
1829 				*data = (uint32_t)(VIOSCSI_CMD_PER_LUN);
1830 			else if (sz == 1) {
1831 				/* read first byte of cmd_per_lun */
1832 				*data &= 0xFFFFFF00;
1833 				*data |= (uint32_t)(VIOSCSI_CMD_PER_LUN) & 0xFF;
1834 			}
1835 			break;
1836 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 13:
1837 			if (sz == 1) {
1838 				/* read second byte of cmd_per_lun */
1839 				*data &= 0xFFFFFF00;
1840 				*data |=
1841 				    (uint32_t)(VIOSCSI_CMD_PER_LUN >> 8) & 0xFF;
1842 			}
1843 			break;
1844 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 14:
1845 			if (sz == 1) {
1846 				/* read third byte of cmd_per_lun */
1847 				*data &= 0xFFFFFF00;
1848 				*data |= (uint32_t)(VIOSCSI_CMD_PER_LUN >> 16)
1849 				    & 0xFF;
1850 			}
1851 			break;
1852 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 15:
1853 			if (sz == 1) {
1854 				/* read fourth byte of cmd_per_lun */
1855 				*data &= 0xFFFFFF00;
1856 				*data |= (uint32_t)(VIOSCSI_CMD_PER_LUN >> 24)
1857 				    & 0xFF;
1858 			}
1859 			break;
1860 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 16:
1861 			/* VIRTIO_SCSI_CONFIG_EVENT_INFO_SIZE, 32bit */
1862 			*data = 0x00;
1863 			break;
1864 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 20:
1865 			/* VIRTIO_SCSI_CONFIG_SENSE_SIZE, 32bit */
1866 			if (sz == 4)
1867 				*data = (uint32_t)(VIOSCSI_SENSE_LEN);
1868 			else if (sz == 1) {
1869 				/* read first byte of sense_size */
1870 				*data &= 0xFFFFFF00;
1871 				*data |= (uint32_t)(VIOSCSI_SENSE_LEN) & 0xFF;
1872 			}
1873 			break;
1874 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 21:
1875 			if (sz == 1) {
1876 				/* read second byte of sense_size */
1877 				*data &= 0xFFFFFF00;
1878 				*data |=
1879 				    (uint32_t)(VIOSCSI_SENSE_LEN >> 8) & 0xFF;
1880 			}
1881 			break;
1882 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 22:
1883 			if (sz == 1) {
1884 				/* read third byte of sense_size */
1885 				*data &= 0xFFFFFF00;
1886 				*data |=
1887 				    (uint32_t)(VIOSCSI_SENSE_LEN >> 16) & 0xFF;
1888 			}
1889 			break;
1890 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 23:
1891 			if (sz == 1) {
1892 				/* read fourth byte of sense_size */
1893 				*data &= 0xFFFFFF00;
1894 				*data |=
1895 				    (uint32_t)(VIOSCSI_SENSE_LEN >> 24) & 0xFF;
1896 			}
1897 			break;
1898 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 24:
1899 			/* VIRTIO_SCSI_CONFIG_CDB_SIZE, 32bit */
1900 			if (sz == 4)
1901 				*data = (uint32_t)(VIOSCSI_CDB_LEN);
1902 			else if (sz == 1) {
1903 				/* read first byte of cdb_len */
1904 				*data &= 0xFFFFFF00;
1905 				*data |= (uint32_t)(VIOSCSI_CDB_LEN) & 0xFF;
1906 			}
1907 			break;
1908 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 25:
1909 			if (sz == 1) {
1910 				/* read second byte of cdb_len */
1911 				*data &= 0xFFFFFF00;
1912 				*data |=
1913 				    (uint32_t)(VIOSCSI_CDB_LEN >> 8) & 0xFF;
1914 			}
1915 			break;
1916 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 26:
1917 			if (sz == 1) {
1918 				/* read third byte of cdb_len */
1919 				*data &= 0xFFFFFF00;
1920 				*data |=
1921 				    (uint32_t)(VIOSCSI_CDB_LEN >> 16) & 0xFF;
1922 			}
1923 			break;
1924 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 27:
1925 			if (sz == 1) {
1926 				/* read fourth byte of cdb_len */
1927 				*data &= 0xFFFFFF00;
1928 				*data |=
1929 				    (uint32_t)(VIOSCSI_CDB_LEN >> 24) & 0xFF;
1930 			}
1931 			break;
1932 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 28:
1933 			/* VIRTIO_SCSI_CONFIG_MAX_CHANNEL, 16bit */
1934 
1935 			/* defined by standard to be zero */
1936 			*data &= 0xFFFF0000;
1937 			break;
1938 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 29:
1939 			/* defined by standard to be zero */
1940 			*data &= 0xFFFF0000;
1941 			break;
1942 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 30:
1943 			/* VIRTIO_SCSI_CONFIG_MAX_TARGET, 16bit */
1944 			if (sz == 2) {
1945 				*data &= 0xFFFF0000;
1946 				*data |=
1947 				    (uint32_t)(VIOSCSI_MAX_TARGET) & 0xFFFF;
1948 			} else if (sz == 1) {
1949 				/* read first byte of max_target */
1950 				*data &= 0xFFFFFF00;
1951 				*data |=
1952 				    (uint32_t)(VIOSCSI_MAX_TARGET) & 0xFF;
1953 			}
1954 			break;
1955 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 31:
1956 			if (sz == 1) {
1957 				/* read second byte of max_target */
1958 				*data &= 0xFFFFFF00;
1959 				*data |=
1960 				    (uint32_t)(VIOSCSI_MAX_TARGET >> 8) & 0xFF;
1961 			}
1962 			break;
1963 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 32:
1964 			/* VIRTIO_SCSI_CONFIG_MAX_LUN, 32bit */
1965 			if (sz == 4)
1966 				*data = (uint32_t)(VIOSCSI_MAX_LUN);
1967 			else if (sz == 1) {
1968 				/* read first byte of max_lun */
1969 				*data &= 0xFFFFFF00;
1970 				*data |= (uint32_t)(VIOSCSI_MAX_LUN) & 0xFF;
1971 			}
1972 			break;
1973 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 33:
1974 			if (sz == 1) {
1975 				/* read second byte of max_lun */
1976 				*data &= 0xFFFFFF00;
1977 				*data |=
1978 				    (uint32_t)(VIOSCSI_MAX_LUN >> 8) & 0xFF;
1979 			}
1980 			break;
1981 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 34:
1982 			if (sz == 1) {
1983 				/* read third byte of max_lun */
1984 				*data &= 0xFFFFFF00;
1985 				*data |=
1986 				    (uint32_t)(VIOSCSI_MAX_LUN >> 16) & 0xFF;
1987 			}
1988 			break;
1989 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 35:
1990 			if (sz == 1) {
1991 				/* read fourth byte of max_lun */
1992 				*data &= 0xFFFFFF00;
1993 				*data |=
1994 				    (uint32_t)(VIOSCSI_MAX_LUN >> 24) & 0xFF;
1995 			}
1996 			break;
1997 		case VIRTIO_CONFIG_DEVICE_FEATURES:
1998 			*data = dev->cfg.device_feature;
1999 			break;
2000 		case VIRTIO_CONFIG_GUEST_FEATURES:
2001 			*data = dev->cfg.guest_feature;
2002 			break;
2003 		case VIRTIO_CONFIG_QUEUE_ADDRESS:
2004 			*data = dev->cfg.queue_address;
2005 			break;
2006 		case VIRTIO_CONFIG_QUEUE_SIZE:
2007 			if (sz == 4)
2008 				*data = dev->cfg.queue_size;
2009 			else if (sz == 2) {
2010 				*data &= 0xFFFF0000;
2011 				*data |= (uint16_t)dev->cfg.queue_size;
2012 			} else if (sz == 1) {
2013 				*data &= 0xFFFFFF00;
2014 				*data |= (uint8_t)dev->cfg.queue_size;
2015 			}
2016 			break;
2017 		case VIRTIO_CONFIG_QUEUE_SELECT:
2018 			*data = dev->cfg.queue_select;
2019 			break;
2020 		case VIRTIO_CONFIG_QUEUE_NOTIFY:
2021 			*data = dev->cfg.queue_notify;
2022 			break;
2023 		case VIRTIO_CONFIG_DEVICE_STATUS:
2024 			if (sz == 4)
2025 				*data = dev->cfg.device_status;
2026 			else if (sz == 2) {
2027 				*data &= 0xFFFF0000;
2028 				*data |= (uint16_t)dev->cfg.device_status;
2029 			} else if (sz == 1) {
2030 				*data &= 0xFFFFFF00;
2031 				*data |= (uint8_t)dev->cfg.device_status;
2032 			}
2033 			break;
2034 		case VIRTIO_CONFIG_ISR_STATUS:
2035 			*data = dev->cfg.isr_status;
2036 			dev->cfg.isr_status = 0;
2037 			break;
2038 		}
2039 	}
2040 
2041 
2042 	return (0);
2043 }
2044 
2045 void
2046 vioscsi_update_qs(struct vioscsi_dev *dev)
2047 {
2048 	/* Invalid queue? */
2049 	if (dev->cfg.queue_select > VIRTIO_MAX_QUEUES) {
2050 		dev->cfg.queue_size = 0;
2051 		return;
2052 	}
2053 
2054 	/* Update queue address/size based on queue select */
2055 	dev->cfg.queue_address = dev->vq[dev->cfg.queue_select].qa;
2056 	dev->cfg.queue_size = dev->vq[dev->cfg.queue_select].qs;
2057 }
2058 
2059 void
2060 vioscsi_update_qa(struct vioscsi_dev *dev)
2061 {
2062 	/* Invalid queue? */
2063 	if (dev->cfg.queue_select > VIRTIO_MAX_QUEUES)
2064 		return;
2065 
2066 	dev->vq[dev->cfg.queue_select].qa = dev->cfg.queue_address;
2067 }
2068 
2069 /*
2070  * Process message(s) in the queue(s)
2071  * vioscsi driver will be placing the following in the queue for each iteration
2072  * virtio_scsi_req_hdr with a possible SCSI_DATA_OUT buffer
2073  * along with a virtio_scsi_res_hdr with a possible SCSI_DATA_IN buffer
2074  * for consumption.
2075  *
2076  * Return 1 if an interrupt should be generated (response written)
2077  *        0 otherwise
2078  */
2079 int
2080 vioscsi_notifyq(struct vioscsi_dev *dev)
2081 {
2082 	uint64_t q_gpa;
2083 	uint32_t vr_sz;
2084 	int ret;
2085 	char *vr;
2086 	struct virtio_scsi_req_hdr req;
2087 	struct virtio_scsi_res_hdr resp;
2088 	struct virtio_vq_acct acct;
2089 
2090 	ret = 0;
2091 
2092 	/* Invalid queue? */
2093 	if (dev->cfg.queue_notify > VIRTIO_MAX_QUEUES)
2094 		return (ret);
2095 
2096 	vr_sz = vring_size(VIOSCSI_QUEUE_SIZE);
2097 	q_gpa = dev->vq[dev->cfg.queue_notify].qa;
2098 	q_gpa = q_gpa * VIRTIO_PAGE_SIZE;
2099 
2100 	vr = calloc(1, vr_sz);
2101 	if (vr == NULL) {
2102 		log_warn("%s: calloc error getting vioscsi ring", __func__);
2103 		return (ret);
2104 	}
2105 
2106 	if (read_mem(q_gpa, vr, vr_sz)) {
2107 		log_warnx("%s: error reading gpa 0x%llx", __func__, q_gpa);
2108 		goto out;
2109 	}
2110 
2111 	/* Compute offsets in ring of descriptors, avail ring, and used ring */
2112 	acct.desc = (struct vring_desc *)(vr);
2113 	acct.avail = (struct vring_avail *)(vr +
2114 	    dev->vq[dev->cfg.queue_notify].vq_availoffset);
2115 	acct.used = (struct vring_used *)(vr +
2116 	    dev->vq[dev->cfg.queue_notify].vq_usedoffset);
2117 
2118 	acct.idx =
2119 	    dev->vq[dev->cfg.queue_notify].last_avail & VIOSCSI_QUEUE_MASK;
2120 
2121 	if ((acct.avail->idx & VIOSCSI_QUEUE_MASK) == acct.idx) {
2122 		log_warnx("%s:nothing to do?", __func__);
2123 		goto out;
2124 	}
2125 
2126 	while (acct.idx != (acct.avail->idx & VIOSCSI_QUEUE_MASK)) {
2127 
2128 		acct.req_idx = acct.avail->ring[acct.idx] & VIOSCSI_QUEUE_MASK;
2129 		acct.req_desc = &(acct.desc[acct.req_idx]);
2130 
2131 		/* Clear resp for next message */
2132 		memset(&resp, 0, sizeof(resp));
2133 
2134 		if ((acct.req_desc->flags & VRING_DESC_F_NEXT) == 0) {
2135 			log_warnx("%s: unchained req descriptor received "
2136 			    "(idx %d)", __func__, acct.req_idx);
2137 			goto out;
2138 		}
2139 
2140 		/* Read command from descriptor ring */
2141 		if (read_mem(acct.req_desc->addr, &req, acct.req_desc->len)) {
2142 			log_warnx("%s: command read_mem error @ 0x%llx",
2143 			    __func__, acct.req_desc->addr);
2144 			goto out;
2145 		}
2146 
2147 		/*
2148 		 * req.lun is defined by virtio as
2149 		 * lun[0] - Always set to 1
2150 		 * lun[1] - Target, negotiated as VIOSCSI_MAX_TARGET
2151 		 * lun[2-3] - represent single level LUN structure
2152 		 * lun[4-7] - Zero
2153 		 * At this current time, we are only servicing one device per
2154 		 * bus (1:0:X:0).
2155 		 *
2156 		 * Various implementations will attempt to scan all possible
2157 		 * targets (256) looking for devices or scan for all possible
2158 		 * LUNs in a single level.  When Target is greater than
2159 		 * VIOSCSI_MAX_TARGET or when lun[3] is greater than zero,
2160 		 * respond with a BAD_TARGET response.
2161 		 */
2162 		if (req.lun[1] >= VIOSCSI_MAX_TARGET || req.lun[3] > 0) {
2163 			DPRINTF("%s: Ignore CMD 0x%02x,%s on lun %u:%u:%u:%u",
2164 			    __func__, req.cdb[0], vioscsi_op_names(req.cdb[0]),
2165 			    req.lun[0], req.lun[1], req.lun[2], req.lun[3]);
2166 			/* Move index for response */
2167 			acct.resp_desc = vioscsi_next_ring_desc(acct.desc,
2168 			    acct.req_desc, &(acct.resp_idx));
2169 
2170 			vioscsi_prepare_resp(&resp,
2171 			    VIRTIO_SCSI_S_BAD_TARGET, SCSI_OK, 0, 0, 0);
2172 
2173 			if (write_mem(acct.resp_desc->addr, &resp,
2174 			    acct.resp_desc->len)) {
2175 				log_warnx("%s: unable to write BAD_TARGET"
2176 				    " resp status data @ 0x%llx",
2177 				    __func__, acct.resp_desc->addr);
2178 				goto out;
2179 			}
2180 
2181 			ret = 1;
2182 			dev->cfg.isr_status = 1;
2183 			/* Move ring indexes */
2184 			vioscsi_next_ring_item(dev, acct.avail, acct.used,
2185 			    acct.req_desc, acct.req_idx);
2186 
2187 			if (write_mem(q_gpa, vr, vr_sz)) {
2188 				log_warnx("%s: error writing vioring",
2189 				    __func__);
2190 			}
2191 			goto next_msg;
2192 		}
2193 
2194 		DPRINTF("%s: Queue %d id 0x%llx lun %u:%u:%u:%u"
2195 		    " cdb OP 0x%02x,%s",
2196 		    __func__, dev->cfg.queue_notify, req.id,
2197 		    req.lun[0], req.lun[1], req.lun[2], req.lun[3],
2198 		    req.cdb[0], vioscsi_op_names(req.cdb[0]));
2199 
2200 		/* opcode is first byte */
2201 		switch (req.cdb[0]) {
2202 		case TEST_UNIT_READY:
2203 		case START_STOP:
2204 			ret = vioscsi_handle_tur(dev, &req, &acct);
2205 			if (ret) {
2206 				if (write_mem(q_gpa, vr, vr_sz)) {
2207 					log_warnx("%s: error writing vioring",
2208 					    __func__);
2209 				}
2210 			}
2211 			break;
2212 		case PREVENT_ALLOW:
2213 			ret = vioscsi_handle_prevent_allow(dev, &req, &acct);
2214 			if (ret) {
2215 				if (write_mem(q_gpa, vr, vr_sz)) {
2216 					log_warnx("%s: error writing vioring",
2217 					    __func__);
2218 				}
2219 			}
2220 			break;
2221 		case READ_TOC:
2222 			ret = vioscsi_handle_read_toc(dev, &req, &acct);
2223 			if (ret) {
2224 				if (write_mem(q_gpa, vr, vr_sz)) {
2225 					log_warnx("%s: error writing vioring",
2226 					    __func__);
2227 				}
2228 			}
2229 			break;
2230 		case READ_CAPACITY:
2231 			ret = vioscsi_handle_read_capacity(dev, &req, &acct);
2232 			if (ret) {
2233 				if (write_mem(q_gpa, vr, vr_sz)) {
2234 					log_warnx("%s: error writing vioring",
2235 					    __func__);
2236 				}
2237 			}
2238 			break;
2239 		case READ_CAPACITY_16:
2240 			ret = vioscsi_handle_read_capacity_16(dev, &req, &acct);
2241 			if (ret) {
2242 				if (write_mem(q_gpa, vr, vr_sz)) {
2243 					log_warnx("%s: error writing vioring",
2244 					    __func__);
2245 				}
2246 			}
2247 			break;
2248 		case READ_COMMAND:
2249 			ret = vioscsi_handle_read_6(dev, &req, &acct);
2250 			if (ret) {
2251 				if (write_mem(q_gpa, vr, vr_sz)) {
2252 					log_warnx("%s: error writing vioring",
2253 					    __func__);
2254 				}
2255 			}
2256 			break;
2257 		case READ_BIG:
2258 			ret = vioscsi_handle_read_10(dev, &req, &acct);
2259 			if (ret) {
2260 				if (write_mem(q_gpa, vr, vr_sz)) {
2261 					log_warnx("%s: error writing vioring",
2262 					    __func__);
2263 				}
2264 			}
2265 			break;
2266 		case INQUIRY:
2267 			ret = vioscsi_handle_inquiry(dev, &req, &acct);
2268 			if (ret) {
2269 				if (write_mem(q_gpa, vr, vr_sz)) {
2270 					log_warnx("%s: error writing vioring",
2271 					    __func__);
2272 				}
2273 			}
2274 			break;
2275 		case MODE_SENSE:
2276 			ret = vioscsi_handle_mode_sense(dev, &req, &acct);
2277 			if (ret) {
2278 				if (write_mem(q_gpa, vr, vr_sz)) {
2279 					log_warnx("%s: error writing vioring",
2280 					    __func__);
2281 				}
2282 			}
2283 			break;
2284 		case MODE_SENSE_BIG:
2285 			ret = vioscsi_handle_mode_sense_big(dev, &req, &acct);
2286 			if (ret) {
2287 				if (write_mem(q_gpa, vr, vr_sz)) {
2288 					log_warnx("%s: error writing vioring",
2289 					    __func__);
2290 				}
2291 			}
2292 			break;
2293 		case GET_EVENT_STATUS_NOTIFICATION:
2294 			ret = vioscsi_handle_gesn(dev, &req, &acct);
2295 			if (ret) {
2296 				if (write_mem(q_gpa, vr, vr_sz)) {
2297 					log_warnx("%s: error writing vioring",
2298 					    __func__);
2299 				}
2300 			}
2301 			break;
2302 		case READ_DISC_INFORMATION:
2303 			ret = vioscsi_handle_read_disc_info(dev, &req, &acct);
2304 			if (ret) {
2305 				if (write_mem(q_gpa, vr, vr_sz)) {
2306 					log_warnx("%s: error writing vioring",
2307 					    __func__);
2308 				}
2309 			}
2310 			break;
2311 		case GET_CONFIGURATION:
2312 			ret = vioscsi_handle_get_config(dev, &req, &acct);
2313 			if (ret) {
2314 				if (write_mem(q_gpa, vr, vr_sz)) {
2315 					log_warnx("%s: error writing vioring",
2316 					    __func__);
2317 				}
2318 			}
2319 			break;
2320 		case MECHANISM_STATUS:
2321 			ret = vioscsi_handle_mechanism_status(dev, &req, &acct);
2322 			if (ret) {
2323 				if (write_mem(q_gpa, vr, vr_sz)) {
2324 					log_warnx("%s: error writing vioring",
2325 					    __func__);
2326 				}
2327 			}
2328 			break;
2329 		case REPORT_LUNS:
2330 			ret = vioscsi_handle_report_luns(dev, &req, &acct);
2331 			if (ret) {
2332 				if (write_mem(q_gpa, vr, vr_sz)) {
2333 					log_warnx("%s: error writing vioring",
2334 					    __func__);
2335 				}
2336 			}
2337 			break;
2338 		default:
2339 			log_warnx("%s: unsupported opcode 0x%02x,%s",
2340 			    __func__, req.cdb[0], vioscsi_op_names(req.cdb[0]));
2341 			/* Move ring indexes */
2342 			vioscsi_next_ring_item(dev, acct.avail, acct.used,
2343 			    acct.req_desc, acct.req_idx);
2344 			break;
2345 		}
2346 next_msg:
2347 		/* Increment to the next queue slot */
2348 		acct.idx = (acct.idx + 1) & VIOSCSI_QUEUE_MASK;
2349 	}
2350 out:
2351 	free(vr);
2352 	return (ret);
2353 }
2354