xref: /openbsd/usr.sbin/vmd/vioscsi.c (revision 65bbee46)
1 /*	$OpenBSD: vioscsi.c,v 1.25 2024/09/26 01:45:13 jsg Exp $  */
2 
3 /*
4  * Copyright (c) 2017 Carlos Cardenas <ccardenas@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/types.h>
20 
21 #include <dev/pci/virtio_pcireg.h>
22 #include <dev/pv/vioscsireg.h>
23 #include <scsi/scsi_all.h>
24 #include <scsi/scsi_disk.h>
25 #include <scsi/scsiconf.h>
26 #include <scsi/cd.h>
27 
28 #include <stdlib.h>
29 #include <string.h>
30 
31 #include "vmd.h"
32 #include "vioscsi.h"
33 #include "virtio.h"
34 
35 extern char *__progname;
36 
37 static void
vioscsi_prepare_resp(struct virtio_scsi_res_hdr * resp,uint8_t vio_status,uint8_t scsi_status,uint8_t err_flags,uint8_t add_sense_code,uint8_t add_sense_code_qual)38 vioscsi_prepare_resp(struct virtio_scsi_res_hdr *resp, uint8_t vio_status,
39     uint8_t scsi_status, uint8_t err_flags, uint8_t add_sense_code,
40     uint8_t add_sense_code_qual)
41 {
42 	/* Set lower 8 bits of status and response fields */
43 	resp->response &= 0xFFFFFF00;
44 	resp->response |= vio_status;
45 	resp->status &= 0xFFFFFF00;
46 	resp->status |= scsi_status;
47 
48 	resp->sense_len = 0;
49 
50 	/* determine if we need to populate the sense field */
51 	if (scsi_status == SCSI_CHECK) {
52 		/*
53 		 * sense data is a 96 byte field.
54 		 * We only need to use the first 14 bytes
55 		 * - set the sense_len accordingly
56 		 * - set error_code to Current Command
57 		 * ref scsi/scsi_all.h:struct scsi_sense_data
58 		 */
59 		memset(resp->sense, 0, VIOSCSI_SENSE_LEN);
60 		resp->sense_len = RESP_SENSE_LEN;
61 		resp->sense[0] = SSD_ERRCODE_CURRENT;
62 		resp->sense[2] = err_flags;
63 		resp->sense[12] = add_sense_code;
64 		resp->sense[13] = add_sense_code_qual;
65 	}
66 }
67 
68 static struct vring_desc*
vioscsi_next_ring_desc(struct vring_desc * desc,struct vring_desc * cur,uint16_t * idx)69 vioscsi_next_ring_desc(struct vring_desc* desc, struct vring_desc* cur,
70     uint16_t *idx)
71 {
72 	*idx = cur->next & VIOSCSI_QUEUE_MASK;
73 	return &desc[*idx];
74 }
75 
76 static void
vioscsi_next_ring_item(struct vioscsi_dev * dev,struct vring_avail * avail,struct vring_used * used,struct vring_desc * desc,uint16_t idx)77 vioscsi_next_ring_item(struct vioscsi_dev *dev, struct vring_avail *avail,
78     struct vring_used *used, struct vring_desc *desc, uint16_t idx)
79 {
80 	used->ring[used->idx & VIOSCSI_QUEUE_MASK].id = idx;
81 	used->ring[used->idx & VIOSCSI_QUEUE_MASK].len = desc->len;
82 	__sync_synchronize();
83 	used->idx++;
84 
85 	dev->vq[dev->cfg.queue_notify].last_avail =
86 	    avail->idx & VIOSCSI_QUEUE_MASK;
87 }
88 
89 static const char *
vioscsi_op_names(uint8_t type)90 vioscsi_op_names(uint8_t type)
91 {
92 	switch (type) {
93 	/* defined in scsi_all.h */
94 	case TEST_UNIT_READY: return "TEST_UNIT_READY";
95 	case REQUEST_SENSE: return "REQUEST_SENSE";
96 	case INQUIRY: return "INQUIRY";
97 	case MODE_SELECT: return "MODE_SELECT";
98 	case RESERVE: return "RESERVE";
99 	case RELEASE: return "RELEASE";
100 	case MODE_SENSE: return "MODE_SENSE";
101 	case START_STOP: return "START_STOP";
102 	case RECEIVE_DIAGNOSTIC: return "RECEIVE_DIAGNOSTIC";
103 	case SEND_DIAGNOSTIC: return "SEND_DIAGNOSTIC";
104 	case PREVENT_ALLOW: return "PREVENT_ALLOW";
105 	case POSITION_TO_ELEMENT: return "POSITION_TO_ELEMENT";
106 	case WRITE_BUFFER: return "WRITE_BUFFER";
107 	case READ_BUFFER: return "READ_BUFFER";
108 	case CHANGE_DEFINITION: return "CHANGE_DEFINITION";
109 	case MODE_SELECT_BIG: return "MODE_SELECT_BIG";
110 	case MODE_SENSE_BIG: return "MODE_SENSE_BIG";
111 	case REPORT_LUNS: return "REPORT_LUNS";
112 	/* defined in scsi_disk.h */
113 	case REASSIGN_BLOCKS: return "REASSIGN_BLOCKS";
114 	case READ_COMMAND: return "READ_COMMAND";
115 	case WRITE_COMMAND: return "WRITE_COMMAND";
116 	case READ_CAPACITY: return "READ_CAPACITY";
117 	case READ_CAPACITY_16: return "READ_CAPACITY_16";
118 	case READ_10: return "READ_10";
119 	case WRITE_10: return "WRITE_10";
120 	case READ_12: return "READ_12";
121 	case WRITE_12: return "WRITE_12";
122 	case READ_16: return "READ_16";
123 	case WRITE_16: return "WRITE_16";
124 	case SYNCHRONIZE_CACHE: return "SYNCHRONIZE_CACHE";
125 	case WRITE_SAME_10: return "WRITE_SAME_10";
126 	case WRITE_SAME_16: return "WRITE_SAME_16";
127 	/* defined in cd.h */
128 	case READ_SUBCHANNEL: return "READ_SUBCHANNEL";
129 	case READ_TOC: return "READ_TOC";
130 	case READ_HEADER: return "READ_HEADER";
131 	case PLAY: return "PLAY";
132 	case PLAY_MSF: return "PLAY_MSF";
133 	case PLAY_TRACK: return "PLAY_TRACK";
134 	case PLAY_TRACK_REL: return "PLAY_TRACK_REL";
135 	case PAUSE: return "PAUSE";
136 	case READ_TRACK_INFO: return "READ_TRACK_INFO";
137 	case CLOSE_TRACK: return "CLOSE_TRACK";
138 	case BLANK: return "BLANK";
139 	case PLAY_BIG: return "PLAY_BIG";
140 	case LOAD_UNLOAD: return "LOAD_UNLOAD";
141 	case PLAY_TRACK_REL_BIG: return "PLAY_TRACK_REL_BIG";
142 	case SET_CD_SPEED: return "SET_CD_SPEED";
143 	/* defined locally */
144 	case READ_DISC_INFORMATION: return "READ_DISC_INFORMATION";
145 	case GET_CONFIGURATION: return "GET_CONFIGURATION";
146 	case MECHANISM_STATUS: return "MECHANISM_STATUS";
147 	case GET_EVENT_STATUS_NOTIFICATION:
148 	    return "GET_EVENT_STATUS_NOTIFICATION";
149 	default: return "UNKNOWN";
150 	}
151 }
152 
153 static const char *
vioscsi_reg_name(uint8_t reg)154 vioscsi_reg_name(uint8_t reg)
155 {
156 	switch (reg) {
157 	case VIRTIO_CONFIG_DEVICE_FEATURES: return "device feature";
158 	case VIRTIO_CONFIG_GUEST_FEATURES: return "guest feature";
159 	case VIRTIO_CONFIG_QUEUE_PFN: return "queue pfn";
160 	case VIRTIO_CONFIG_QUEUE_SIZE: return "queue size";
161 	case VIRTIO_CONFIG_QUEUE_SELECT: return "queue select";
162 	case VIRTIO_CONFIG_QUEUE_NOTIFY: return "queue notify";
163 	case VIRTIO_CONFIG_DEVICE_STATUS: return "device status";
164 	case VIRTIO_CONFIG_ISR_STATUS: return "isr status";
165 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI: return "num_queues";
166 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 4: return "seg_max";
167 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 8: return "max_sectors";
168 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 12: return "cmd_per_lun";
169 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 16: return "event_info_size";
170 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 20: return "sense_size";
171 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 24: return "cdb_size";
172 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 28: return "max_channel";
173 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 30: return "max_target";
174 	case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 32: return "max_lun";
175 	default: return "unknown";
176 	}
177 }
178 
179 static void
vioscsi_free_info(struct ioinfo * info)180 vioscsi_free_info(struct ioinfo *info)
181 {
182 	if (!info)
183 		return;
184 	free(info->buf);
185 	free(info);
186 }
187 
188 static struct ioinfo *
vioscsi_start_read(struct vioscsi_dev * dev,off_t block,size_t n_blocks)189 vioscsi_start_read(struct vioscsi_dev *dev, off_t block, size_t n_blocks)
190 {
191 	struct ioinfo *info;
192 
193 	/* Limit to 64M for now */
194 	if (n_blocks * VIOSCSI_BLOCK_SIZE_CDROM > (1 << 26)) {
195 		log_warnx("%s: read size exceeded 64M", __func__);
196 		return (NULL);
197 	}
198 
199 	info = calloc(1, sizeof(*info));
200 	if (!info)
201 		goto nomem;
202 	info->buf = malloc(n_blocks * VIOSCSI_BLOCK_SIZE_CDROM);
203 	if (info->buf == NULL)
204 		goto nomem;
205 	info->len = n_blocks * VIOSCSI_BLOCK_SIZE_CDROM;
206 	info->offset = block * VIOSCSI_BLOCK_SIZE_CDROM;
207 
208 	return info;
209 
210 nomem:
211 	free(info);
212 	log_warn("malloc error vioscsi read");
213 	return (NULL);
214 }
215 
216 static const uint8_t *
vioscsi_finish_read(struct vioscsi_dev * dev,struct ioinfo * info)217 vioscsi_finish_read(struct vioscsi_dev *dev, struct ioinfo *info)
218 {
219 	struct virtio_backing *f = &dev->file;
220 
221 	if (f->pread(f->p, info->buf, info->len, info->offset) != info->len) {
222 		log_warn("vioscsi read error");
223 		return NULL;
224 	}
225 
226 	return info->buf;
227 }
228 
229 static int
vioscsi_handle_tur(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)230 vioscsi_handle_tur(struct vioscsi_dev *dev, struct virtio_scsi_req_hdr *req,
231     struct virtio_vq_acct *acct)
232 {
233 	int ret = 0;
234 	struct virtio_scsi_res_hdr resp;
235 
236 	memset(&resp, 0, sizeof(resp));
237 	/* Move index for response */
238 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
239 	    &(acct->resp_idx));
240 
241 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
242 
243 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
244 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
245 		    __func__, acct->resp_desc->addr);
246 	} else {
247 		ret = 1;
248 		dev->cfg.isr_status = 1;
249 		/* Move ring indexes */
250 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
251 		    acct->req_desc, acct->req_idx);
252 	}
253 
254 	return (ret);
255 }
256 
257 static int
vioscsi_handle_inquiry(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)258 vioscsi_handle_inquiry(struct vioscsi_dev *dev,
259     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
260 {
261 	int ret = 0;
262 	struct virtio_scsi_res_hdr resp;
263 	struct scsi_inquiry_data *inq_data;
264 
265 #if DEBUG
266 	struct scsi_inquiry *inq = (struct scsi_inquiry *)(req->cdb);
267 	log_debug("%s: INQ - EVPD %d PAGE_CODE 0x%08x LEN %d", __func__,
268 	    inq->flags & SI_EVPD, inq->pagecode, _2btol(inq->length));
269 #endif /* DEBUG */
270 
271 	memset(&resp, 0, sizeof(resp));
272 	vioscsi_prepare_resp(&resp,
273 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
274 
275 	inq_data = calloc(1, sizeof(struct scsi_inquiry_data));
276 
277 	if (inq_data == NULL) {
278 		log_warnx("%s: cannot alloc inq_data", __func__);
279 		goto inq_out;
280 	}
281 
282 	inq_data->device = T_CDROM;
283 	inq_data->dev_qual2 = SID_REMOVABLE;
284 	/* Leave version zero to say we don't comply */
285 	inq_data->response_format = SID_SCSI2_RESPONSE;
286 	inq_data->additional_length = SID_SCSI2_ALEN;
287 	memcpy(inq_data->vendor, INQUIRY_VENDOR, INQUIRY_VENDOR_LEN);
288 	memcpy(inq_data->product, INQUIRY_PRODUCT, INQUIRY_PRODUCT_LEN);
289 	memcpy(inq_data->revision, INQUIRY_REVISION, INQUIRY_REVISION_LEN);
290 
291 	/* Move index for response */
292 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
293 	    &(acct->resp_idx));
294 
295 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
296 	    "idx %d req_idx %d global_idx %d", __func__, acct->resp_desc->addr,
297 	    acct->resp_desc->len, acct->resp_idx, acct->req_idx, acct->idx);
298 
299 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
300 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
301 		    __func__, acct->resp_desc->addr);
302 		goto free_inq;
303 	}
304 
305 	/* Move index for inquiry_data */
306 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
307 	    &(acct->resp_idx));
308 
309 	DPRINTF("%s: writing inq_data to 0x%llx size %d at "
310 	    "local idx %d req_idx %d global_idx %d",
311 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
312 	    acct->resp_idx, acct->req_idx, acct->idx);
313 
314 	if (write_mem(acct->resp_desc->addr, inq_data,
315 		sizeof(struct scsi_inquiry_data))) {
316 		log_warnx("%s: unable to write inquiry"
317 		    " response to gpa @ 0x%llx",
318 		    __func__, acct->resp_desc->addr);
319 	} else {
320 		ret = 1;
321 		dev->cfg.isr_status = 1;
322 		/* Move ring indexes */
323 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
324 		    acct->req_desc, acct->req_idx);
325 	}
326 
327 free_inq:
328 	free(inq_data);
329 inq_out:
330 	return (ret);
331 }
332 
333 static int
vioscsi_handle_mode_sense(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)334 vioscsi_handle_mode_sense(struct vioscsi_dev *dev,
335     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
336 {
337 	int ret = 0;
338 	struct virtio_scsi_res_hdr resp;
339 	uint8_t mode_page_ctl;
340 	uint8_t mode_page_code;
341 	uint8_t *mode_reply;
342 	uint8_t mode_reply_len = 0;
343 	struct scsi_mode_sense *mode_sense;
344 
345 	memset(&resp, 0, sizeof(resp));
346 	mode_sense = (struct scsi_mode_sense *)(req->cdb);
347 	mode_page_ctl = mode_sense->page & SMS_PAGE_CTRL;
348 	mode_page_code = mode_sense->page & SMS_PAGE_CODE;
349 
350 	DPRINTF("%s: M_SENSE - DBD %d Page Ctrl 0x%x Code 0x%x Len %u",
351 	    __func__, mode_sense->byte2 & SMS_DBD, mode_page_ctl,
352 	    mode_page_code, mode_sense->length);
353 
354 	if (mode_page_ctl == SMS_PAGE_CTRL_CURRENT &&
355 	    (mode_page_code == ERR_RECOVERY_PAGE ||
356 	    mode_page_code == CDVD_CAPABILITIES_PAGE)) {
357 		/*
358 		 * mode sense header is 4 bytes followed
359 		 * by a variable page
360 		 * ERR_RECOVERY_PAGE is 12 bytes
361 		 * CDVD_CAPABILITIES_PAGE is 32 bytes
362 		 */
363 		switch (mode_page_code) {
364 		case ERR_RECOVERY_PAGE:
365 			mode_reply_len = 16;
366 			mode_reply =
367 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
368 			if (mode_reply == NULL)
369 				goto mode_sense_out;
370 
371 			/* set the page header */
372 			*mode_reply = mode_reply_len - 1;
373 			*(mode_reply + 1) = MODE_MEDIUM_TYPE_CODE;
374 
375 			/* set the page data, 7.3.2.1 mmc-5 */
376 			*(mode_reply + 4) = MODE_ERR_RECOVERY_PAGE_CODE;
377 			*(mode_reply + 5) = MODE_ERR_RECOVERY_PAGE_LEN;
378 			*(mode_reply + 7) = MODE_READ_RETRY_COUNT;
379 			break;
380 		case CDVD_CAPABILITIES_PAGE:
381 			mode_reply_len = 36;
382 			mode_reply =
383 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
384 			if (mode_reply == NULL)
385 				goto mode_sense_out;
386 
387 			/* set the page header */
388 			*mode_reply = mode_reply_len - 1;
389 			*(mode_reply + 1) = MODE_MEDIUM_TYPE_CODE;
390 
391 			/* set the page data, 6.3.11 mmc-3 */
392 			*(mode_reply + 4) = MODE_CDVD_CAP_PAGE_CODE;
393 			*(mode_reply + 5) = mode_reply_len - 6;
394 			*(mode_reply + 6) = MODE_CDVD_CAP_READ_CODE;
395 			_lto2b(MODE_CDVD_CAP_NUM_LEVELS, mode_reply + 14);
396 			break;
397 		default:
398 			goto mode_sense_error;
399 			break;
400 		}
401 
402 		/* Move index for response */
403 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
404 		    acct->req_desc, &(acct->resp_idx));
405 
406 		DPRINTF("%s: writing resp to 0x%llx size %d "
407 		    "at local idx %d req_idx %d global_idx %d",
408 		    __func__, acct->resp_desc->addr, mode_reply_len,
409 		    acct->resp_idx, acct->req_idx, acct->idx);
410 
411 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
412 			log_warnx("%s: unable to write OK"
413 			    " resp status data @ 0x%llx",
414 			    __func__, acct->resp_desc->addr);
415 			free(mode_reply);
416 			goto mode_sense_out;
417 		}
418 
419 		/* Move index for mode_reply */
420 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
421 		    acct->resp_desc, &(acct->resp_idx));
422 
423 		DPRINTF("%s: writing mode_reply to 0x%llx "
424 		    "size %d at local idx %d req_idx %d "
425 		    "global_idx %d", __func__, acct->resp_desc->addr,
426 		    mode_reply_len, acct->resp_idx, acct->req_idx, acct->idx);
427 
428 		if (write_mem(acct->resp_desc->addr, mode_reply,
429 			mode_reply_len)) {
430 			log_warnx("%s: unable to write "
431 			    "mode_reply to gpa @ 0x%llx",
432 			    __func__, acct->resp_desc->addr);
433 			free(mode_reply);
434 			goto mode_sense_out;
435 		}
436 
437 		free(mode_reply);
438 
439 		ret = 1;
440 		dev->cfg.isr_status = 1;
441 		/* Move ring indexes */
442 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
443 		    acct->req_desc, acct->req_idx);
444 	} else {
445 mode_sense_error:
446 		/* send back un-supported */
447 		vioscsi_prepare_resp(&resp,
448 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
449 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
450 
451 		/* Move index for response */
452 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
453 		    acct->req_desc, &(acct->resp_idx));
454 
455 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
456 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
457 			    __func__, acct->resp_desc->addr);
458 			goto mode_sense_out;
459 		}
460 
461 		ret = 1;
462 		dev->cfg.isr_status = 1;
463 		/* Move ring indexes */
464 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
465 		    acct->req_desc, acct->req_idx);
466 	}
467 mode_sense_out:
468 	return (ret);
469 }
470 
471 static int
vioscsi_handle_mode_sense_big(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)472 vioscsi_handle_mode_sense_big(struct vioscsi_dev *dev,
473     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
474 {
475 	int ret = 0;
476 	struct virtio_scsi_res_hdr resp;
477 	uint8_t mode_page_ctl;
478 	uint8_t mode_page_code;
479 	uint8_t *mode_reply;
480 	uint8_t mode_reply_len = 0;
481 	struct scsi_mode_sense_big *mode_sense_10;
482 
483 	memset(&resp, 0, sizeof(resp));
484 	mode_sense_10 = (struct scsi_mode_sense_big *)(req->cdb);
485 	mode_page_ctl = mode_sense_10->page & SMS_PAGE_CTRL;
486 	mode_page_code = mode_sense_10->page & SMS_PAGE_CODE;
487 
488 	DPRINTF("%s: M_SENSE_10 - DBD %d Page Ctrl 0x%x Code 0x%x Len %u",
489 	    __func__, mode_sense_10->byte2 & SMS_DBD, mode_page_ctl,
490 	    mode_page_code, (uint16_t)_2btol(mode_sense_10->length));
491 
492 	if (mode_page_ctl == SMS_PAGE_CTRL_CURRENT &&
493 	    (mode_page_code == ERR_RECOVERY_PAGE ||
494 	    mode_page_code == CDVD_CAPABILITIES_PAGE)) {
495 		/*
496 		 * mode sense header is 8 bytes followed
497 		 * by a variable page
498 		 * ERR_RECOVERY_PAGE is 12 bytes
499 		 * CDVD_CAPABILITIES_PAGE is 32 bytes
500 		 */
501 		switch (mode_page_code) {
502 		case ERR_RECOVERY_PAGE:
503 			mode_reply_len = 20;
504 			mode_reply =
505 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
506 			if (mode_reply == NULL)
507 				goto mode_sense_big_out;
508 
509 			/* set the page header */
510 			_lto2b(mode_reply_len - 2, mode_reply);
511 			*(mode_reply + 2) = MODE_MEDIUM_TYPE_CODE;
512 
513 			/* set the page data, 7.3.2.1 mmc-5 */
514 			*(mode_reply + 8) = MODE_ERR_RECOVERY_PAGE_CODE;
515 			*(mode_reply + 9) = MODE_ERR_RECOVERY_PAGE_LEN;
516 			*(mode_reply + 11) = MODE_READ_RETRY_COUNT;
517 			break;
518 		case CDVD_CAPABILITIES_PAGE:
519 			mode_reply_len = 40;
520 			mode_reply =
521 			    (uint8_t*)calloc(mode_reply_len, sizeof(uint8_t));
522 			if (mode_reply == NULL)
523 				goto mode_sense_big_out;
524 
525 			/* set the page header */
526 			_lto2b(mode_reply_len - 2, mode_reply);
527 			*(mode_reply + 2) = MODE_MEDIUM_TYPE_CODE;
528 
529 			/* set the page data, 6.3.11 mmc-3 */
530 			*(mode_reply + 8) = MODE_CDVD_CAP_PAGE_CODE;
531 			*(mode_reply + 9) = mode_reply_len - 6;
532 			*(mode_reply + 10) = MODE_CDVD_CAP_READ_CODE;
533 			_lto2b(MODE_CDVD_CAP_NUM_LEVELS, mode_reply + 18);
534 			break;
535 		default:
536 			goto mode_sense_big_error;
537 			break;
538 		}
539 
540 		/* Move index for response */
541 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
542 		    acct->req_desc, &(acct->resp_idx));
543 
544 		DPRINTF("%s: writing resp to 0x%llx size %d "
545 		    "at local idx %d req_idx %d global_idx %d",
546 		    __func__, acct->resp_desc->addr, acct->resp_desc->len,
547 		    acct->resp_idx, acct->req_idx, acct->idx);
548 
549 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
550 			log_warnx("%s: unable to write OK"
551 			    " resp status data @ 0x%llx",
552 			    __func__, acct->resp_desc->addr);
553 			free(mode_reply);
554 			goto mode_sense_big_out;
555 		}
556 
557 		/* Move index for mode_reply */
558 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
559 		    acct->resp_desc, &(acct->resp_idx));
560 
561 		DPRINTF("%s: writing mode_reply to 0x%llx "
562 		    "size %d at local idx %d req_idx %d global_idx %d",
563 		    __func__, acct->resp_desc->addr, mode_reply_len,
564 		    acct->resp_idx, acct->req_idx, acct->idx);
565 
566 		if (write_mem(acct->resp_desc->addr, mode_reply,
567 			mode_reply_len)) {
568 			log_warnx("%s: unable to write "
569 			    "mode_reply to gpa @ 0x%llx",
570 			    __func__, acct->resp_desc->addr);
571 			free(mode_reply);
572 			goto mode_sense_big_out;
573 		}
574 
575 		free(mode_reply);
576 
577 		ret = 1;
578 		dev->cfg.isr_status = 1;
579 		/* Move ring indexes */
580 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
581 		    acct->req_desc, acct->req_idx);
582 	} else {
583 mode_sense_big_error:
584 		/* send back un-supported */
585 		vioscsi_prepare_resp(&resp,
586 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
587 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
588 
589 		/* Move index for response */
590 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
591 		    acct->req_desc, &(acct->resp_idx));
592 
593 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
594 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
595 			    __func__, acct->resp_desc->addr);
596 			goto mode_sense_big_out;
597 		}
598 
599 		ret = 1;
600 		dev->cfg.isr_status = 1;
601 		/* Move ring indexes */
602 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
603 		    acct->req_desc, acct->req_idx);
604 	}
605 mode_sense_big_out:
606 	return (ret);
607 }
608 
609 static int
vioscsi_handle_read_capacity(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)610 vioscsi_handle_read_capacity(struct vioscsi_dev *dev,
611     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
612 {
613 	int ret = 0;
614 	struct virtio_scsi_res_hdr resp;
615 	struct scsi_read_cap_data *r_cap_data;
616 
617 #if DEBUG
618 	struct scsi_read_capacity *r_cap =
619 	    (struct scsi_read_capacity *)(req->cdb);
620 	log_debug("%s: %s - Addr 0x%08x", __func__,
621 	    vioscsi_op_names(r_cap->opcode), _4btol(r_cap->addr));
622 #endif /* DEBUG */
623 
624 	memset(&resp, 0, sizeof(resp));
625 	vioscsi_prepare_resp(&resp,
626 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
627 
628 	r_cap_data = calloc(1, sizeof(struct scsi_read_cap_data));
629 
630 	if (r_cap_data == NULL) {
631 		log_warnx("%s: cannot alloc r_cap_data", __func__);
632 		goto read_capacity_out;
633 	}
634 
635 	DPRINTF("%s: ISO has %lld bytes and %lld blocks",
636 	    __func__, dev->sz, dev->n_blocks);
637 
638 	/*
639 	 * determine if num blocks of iso image > UINT32_MAX
640 	 * if it is, set addr to UINT32_MAX (0xffffffff)
641 	 * indicating to hosts that READ_CAPACITY_16 should
642 	 * be called to retrieve the full size
643 	 */
644 	if (dev->n_blocks >= UINT32_MAX) {
645 		_lto4b(UINT32_MAX, r_cap_data->addr);
646 		_lto4b(VIOSCSI_BLOCK_SIZE_CDROM, r_cap_data->length);
647 		log_warnx("%s: ISO sz %lld is bigger than "
648 		    "UINT32_MAX %u, all data may not be read",
649 		    __func__, dev->sz, UINT32_MAX);
650 	} else {
651 		_lto4b(dev->n_blocks - 1, r_cap_data->addr);
652 		_lto4b(VIOSCSI_BLOCK_SIZE_CDROM, r_cap_data->length);
653 	}
654 
655 	/* Move index for response */
656 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
657 	    &(acct->resp_idx));
658 
659 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
660 	    "idx %d req_idx %d global_idx %d",
661 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
662 	    acct->resp_idx, acct->req_idx, acct->idx);
663 
664 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
665 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
666 		    __func__, acct->resp_desc->addr);
667 		goto free_read_capacity;
668 	}
669 
670 	/* Move index for r_cap_data */
671 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
672 	    &(acct->resp_idx));
673 
674 	DPRINTF("%s: writing r_cap_data to 0x%llx size %d at "
675 	    "local idx %d req_idx %d global_idx %d",
676 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
677 	    acct->resp_idx, acct->req_idx, acct->idx);
678 
679 	if (write_mem(acct->resp_desc->addr, r_cap_data,
680 		sizeof(struct scsi_read_cap_data))) {
681 		log_warnx("%s: unable to write read_cap_data"
682 		    " response to gpa @ 0x%llx",
683 		    __func__, acct->resp_desc->addr);
684 	} else {
685 		ret = 1;
686 		dev->cfg.isr_status = 1;
687 		/* Move ring indexes */
688 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
689 		    acct->req_desc, acct->req_idx);
690 	}
691 
692 free_read_capacity:
693 	free(r_cap_data);
694 read_capacity_out:
695 	return (ret);
696 }
697 
698 static int
vioscsi_handle_read_capacity_16(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)699 vioscsi_handle_read_capacity_16(struct vioscsi_dev *dev,
700     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
701 {
702 	int ret = 0;
703 	struct virtio_scsi_res_hdr resp;
704 	struct scsi_read_cap_data_16 *r_cap_data_16;
705 
706 #if DEBUG
707 	struct scsi_read_capacity_16 *r_cap_16 =
708 	    (struct scsi_read_capacity_16 *)(req->cdb);
709 	log_debug("%s: %s - Addr 0x%016llx", __func__,
710 	    vioscsi_op_names(r_cap_16->opcode), _8btol(r_cap_16->addr));
711 #endif /* DEBUG */
712 
713 	memset(&resp, 0, sizeof(resp));
714 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
715 
716 	r_cap_data_16 = calloc(1, sizeof(struct scsi_read_cap_data_16));
717 
718 	if (r_cap_data_16 == NULL) {
719 		log_warnx("%s: cannot alloc r_cap_data_16",
720 		    __func__);
721 		goto read_capacity_16_out;
722 	}
723 
724 	DPRINTF("%s: ISO has %lld bytes and %lld blocks", __func__,
725 	    dev->sz, dev->n_blocks);
726 
727 	_lto8b(dev->n_blocks - 1, r_cap_data_16->addr);
728 	_lto4b(VIOSCSI_BLOCK_SIZE_CDROM, r_cap_data_16->length);
729 
730 	/* Move index for response */
731 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
732 	    &(acct->resp_idx));
733 
734 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
735 	    "idx %d req_idx %d global_idx %d",
736 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
737 	    acct->resp_idx, acct->req_idx, acct->idx);
738 
739 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
740 		log_warnx("%s: unable to write OK resp status "
741 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
742 		goto free_read_capacity_16;
743 	}
744 
745 	/* Move index for r_cap_data_16 */
746 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
747 	    &(acct->resp_idx));
748 
749 	DPRINTF("%s: writing r_cap_data_16 to 0x%llx size %d "
750 	    "at local idx %d req_idx %d global_idx %d",
751 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
752 	    acct->resp_idx, acct->req_idx, acct->idx);
753 
754 	if (write_mem(acct->resp_desc->addr, r_cap_data_16,
755 		sizeof(struct scsi_read_cap_data_16))) {
756 		log_warnx("%s: unable to write read_cap_data_16"
757 		    " response to gpa @ 0x%llx",
758 		    __func__, acct->resp_desc->addr);
759 	} else {
760 		ret = 1;
761 		dev->cfg.isr_status = 1;
762 		/* Move ring indexes */
763 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
764 		    acct->req_desc, acct->req_idx);
765 	}
766 
767 free_read_capacity_16:
768 	free(r_cap_data_16);
769 read_capacity_16_out:
770 	return (ret);
771 }
772 
773 static int
vioscsi_handle_report_luns(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)774 vioscsi_handle_report_luns(struct vioscsi_dev *dev,
775     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
776 {
777 	int ret = 0;
778 	struct virtio_scsi_res_hdr resp;
779 	uint32_t rpl_length;
780 	struct scsi_report_luns *rpl;
781 	struct vioscsi_report_luns_data *reply_rpl;
782 
783 	memset(&resp, 0, sizeof(resp));
784 	rpl = (struct scsi_report_luns *)(req->cdb);
785 	rpl_length = _4btol(rpl->length);
786 
787 	DPRINTF("%s: REPORT_LUNS Report 0x%x Length %d", __func__,
788 	    rpl->selectreport, rpl_length);
789 
790 	if (rpl_length < RPL_MIN_SIZE) {
791 		DPRINTF("%s: RPL_Length %d < %d (RPL_MIN_SIZE)", __func__,
792 		    rpl_length, RPL_MIN_SIZE);
793 
794 		vioscsi_prepare_resp(&resp,
795 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
796 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
797 
798 		/* Move index for response */
799 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
800 		    acct->req_desc, &(acct->resp_idx));
801 
802 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
803 			log_warnx("%s: unable to set ERR "
804 			    "status data @ 0x%llx", __func__,
805 			    acct->resp_desc->addr);
806 		} else {
807 			ret = 1;
808 			dev->cfg.isr_status = 1;
809 			/* Move ring indexes */
810 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
811 			    acct->req_desc, acct->req_idx);
812 		}
813 		goto rpl_out;
814 
815 	}
816 
817 	reply_rpl = calloc(1, sizeof(struct vioscsi_report_luns_data));
818 
819 	if (reply_rpl == NULL) {
820 		log_warnx("%s: cannot alloc reply_rpl", __func__);
821 		goto rpl_out;
822 	}
823 
824 	_lto4b(RPL_SINGLE_LUN, reply_rpl->length);
825 	memcpy(reply_rpl->lun, req->lun, RPL_SINGLE_LUN);
826 
827 	vioscsi_prepare_resp(&resp,
828 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
829 
830 	/* Move index for response */
831 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
832 	    &(acct->resp_idx));
833 
834 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
835 	    "idx %d req_idx %d global_idx %d", __func__, acct->resp_desc->addr,
836 	    acct->resp_desc->len, acct->resp_idx, acct->req_idx, acct->idx);
837 
838 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
839 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
840 		    __func__, acct->resp_desc->addr);
841 		goto free_rpl;
842 	}
843 
844 	/* Move index for reply_rpl */
845 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
846 	    &(acct->resp_idx));
847 
848 	DPRINTF("%s: writing reply_rpl to 0x%llx size %d at "
849 	    "local idx %d req_idx %d global_idx %d",
850 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
851 	    acct->resp_idx, acct->req_idx, acct->idx);
852 
853 	if (write_mem(acct->resp_desc->addr, reply_rpl,
854 		sizeof(struct vioscsi_report_luns_data))) {
855 		log_warnx("%s: unable to write reply_rpl"
856 		    " response to gpa @ 0x%llx",
857 		    __func__, acct->resp_desc->addr);
858 	} else {
859 		ret = 1;
860 		dev->cfg.isr_status = 1;
861 		/* Move ring indexes */
862 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
863 		    acct->req_desc, acct->req_idx);
864 	}
865 
866 free_rpl:
867 	free(reply_rpl);
868 rpl_out:
869 	return (ret);
870 }
871 
872 static int
vioscsi_handle_read_6(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)873 vioscsi_handle_read_6(struct vioscsi_dev *dev,
874     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
875 {
876 	int ret = 0;
877 	struct virtio_scsi_res_hdr resp;
878 	const uint8_t *read_buf;
879 	uint32_t read_lba;
880 	struct ioinfo *info;
881 	struct scsi_rw *read_6;
882 
883 	memset(&resp, 0, sizeof(resp));
884 	read_6 = (struct scsi_rw *)(req->cdb);
885 	read_lba = ((read_6->addr[0] & SRW_TOPADDR) << 16 ) |
886 	    (read_6->addr[1] << 8) | read_6->addr[2];
887 
888 	DPRINTF("%s: READ Addr 0x%08x Len %d (%d)",
889 	    __func__, read_lba, read_6->length, read_6->length * dev->max_xfer);
890 
891 	/* check if lba is in range */
892 	if (read_lba > dev->n_blocks - 1) {
893 		DPRINTF("%s: requested block out of range req: %ud max: %lld",
894 		    __func__, read_lba, dev->n_blocks);
895 
896 		vioscsi_prepare_resp(&resp,
897 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
898 		    SENSE_LBA_OUT_OF_RANGE, SENSE_DEFAULT_ASCQ);
899 
900 		/* Move index for response */
901 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
902 		    acct->req_desc, &(acct->resp_idx));
903 
904 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
905 			log_warnx("%s: unable to set ERR "
906 			    "status data @ 0x%llx", __func__,
907 			    acct->resp_desc->addr);
908 		} else {
909 			ret = 1;
910 			dev->cfg.isr_status = 1;
911 			/* Move ring indexes */
912 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
913 			    acct->req_desc, acct->req_idx);
914 		}
915 		goto read_6_out;
916 	}
917 
918 	info = vioscsi_start_read(dev, read_lba, read_6->length);
919 
920 	if (info == NULL) {
921 		log_warnx("%s: cannot alloc for read", __func__);
922 		goto read_6_out;
923 	}
924 
925 	/* read block */
926 	read_buf = vioscsi_finish_read(dev, info);
927 
928 	if (read_buf == NULL) {
929 		log_warnx("%s: error reading position %ud",
930 		    __func__, read_lba);
931 		vioscsi_prepare_resp(&resp,
932 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_MEDIUM_ERROR,
933 		    SENSE_MEDIUM_NOT_PRESENT, SENSE_DEFAULT_ASCQ);
934 
935 		/* Move index for response */
936 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
937 		    acct->req_desc, &(acct->resp_idx));
938 
939 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
940 			log_warnx("%s: unable to set ERR "
941 			    "status data @ 0x%llx", __func__,
942 			    acct->resp_desc->addr);
943 		} else {
944 			ret = 1;
945 			dev->cfg.isr_status = 1;
946 			/* Move ring indexes */
947 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
948 			    acct->req_desc, acct->req_idx);
949 		}
950 
951 		goto free_read_6;
952 	}
953 
954 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
955 
956 	/* Move index for response */
957 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
958 	    &(acct->resp_idx));
959 
960 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
961 	    "idx %d req_idx %d global_idx %d",
962 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
963 	    acct->resp_idx, acct->req_idx, acct->idx);
964 
965 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
966 		log_warnx("%s: unable to write OK resp status "
967 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
968 		goto free_read_6;
969 	}
970 
971 	/* Move index for read_buf */
972 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
973 	    &(acct->resp_idx));
974 
975 	DPRINTF("%s: writing read_buf to 0x%llx size %d at "
976 	    "local idx %d req_idx %d global_idx %d",
977 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
978 	    acct->resp_idx, acct->req_idx, acct->idx);
979 
980 	if (write_mem(acct->resp_desc->addr, read_buf, info->len)) {
981 		log_warnx("%s: unable to write read_buf to gpa @ 0x%llx",
982 		    __func__, acct->resp_desc->addr);
983 	} else {
984 		ret = 1;
985 		dev->cfg.isr_status = 1;
986 		/* Move ring indexes */
987 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
988 		    acct->req_desc, acct->req_idx);
989 	}
990 
991 free_read_6:
992 	vioscsi_free_info(info);
993 read_6_out:
994 	return (ret);
995 }
996 
997 static int
vioscsi_handle_read_10(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)998 vioscsi_handle_read_10(struct vioscsi_dev *dev,
999     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1000 {
1001 	int ret = 0;
1002 	struct virtio_scsi_res_hdr resp;
1003 	const uint8_t *read_buf;
1004 	uint32_t read_lba;
1005 	uint16_t read_10_len;
1006 	off_t chunk_offset;
1007 	struct ioinfo *info;
1008 	struct scsi_rw_10 *read_10;
1009 	size_t chunk_len = 0;
1010 
1011 	memset(&resp, 0, sizeof(resp));
1012 	read_10 = (struct scsi_rw_10 *)(req->cdb);
1013 	read_lba = _4btol(read_10->addr);
1014 	read_10_len = _2btol(read_10->length);
1015 	chunk_offset = 0;
1016 
1017 	DPRINTF("%s: READ_10 Addr 0x%08x Len %d (%d)",
1018 	    __func__, read_lba, read_10_len, read_10_len * dev->max_xfer);
1019 
1020 	/* check if lba is in range */
1021 	if (read_lba > dev->n_blocks - 1) {
1022 		DPRINTF("%s: requested block out of range req: %ud max: %lld",
1023 		    __func__, read_lba, dev->n_blocks);
1024 
1025 		vioscsi_prepare_resp(&resp,
1026 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1027 		    SENSE_LBA_OUT_OF_RANGE, SENSE_DEFAULT_ASCQ);
1028 
1029 		/* Move index for response */
1030 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1031 		    acct->req_desc, &(acct->resp_idx));
1032 
1033 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1034 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
1035 			    __func__, acct->resp_desc->addr);
1036 		} else {
1037 			ret = 1;
1038 			dev->cfg.isr_status = 1;
1039 			/* Move ring indexes */
1040 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
1041 			    acct->req_desc, acct->req_idx);
1042 		}
1043 
1044 		goto read_10_out;
1045 	}
1046 
1047 	info = vioscsi_start_read(dev, read_lba, read_10_len);
1048 
1049 	if (info == NULL) {
1050 		log_warnx("%s: cannot alloc for read", __func__);
1051 		goto read_10_out;
1052 	}
1053 
1054 	/* read block */
1055 	read_buf = vioscsi_finish_read(dev, info);
1056 
1057 	if (read_buf == NULL) {
1058 		log_warnx("%s: error reading position %ud", __func__, read_lba);
1059 		vioscsi_prepare_resp(&resp,
1060 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_MEDIUM_ERROR,
1061 		    SENSE_MEDIUM_NOT_PRESENT, SENSE_DEFAULT_ASCQ);
1062 
1063 		/* Move index for response */
1064 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1065 		    acct->req_desc, &(acct->resp_idx));
1066 
1067 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1068 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
1069 			    __func__, acct->resp_desc->addr);
1070 		} else {
1071 			ret = 1;
1072 			dev->cfg.isr_status = 1;
1073 			/* Move ring indexes */
1074 			vioscsi_next_ring_item(dev, acct->avail, acct->used,
1075 			    acct->req_desc, acct->req_idx);
1076 		}
1077 
1078 		goto free_read_10;
1079 	}
1080 
1081 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1082 
1083 	/* Move index for response */
1084 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1085 	    &(acct->resp_idx));
1086 
1087 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1088 	    "idx %d req_idx %d global_idx %d",
1089 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1090 	    acct->resp_idx, acct->req_idx, acct->idx);
1091 
1092 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1093 		log_warnx("%s: unable to write OK resp status "
1094 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
1095 		goto free_read_10;
1096 	}
1097 
1098 	/*
1099 	 * Perform possible chunking of writes of read_buf
1100 	 * based on the segment length allocated by the host.
1101 	 * At least one write will be performed.
1102 	 * If chunk_offset == info->len, no more writes
1103 	 */
1104 	do {
1105 		/* Move index for read_buf */
1106 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1107 		    acct->resp_desc, &(acct->resp_idx));
1108 
1109 		DPRINTF("%s: writing read_buf to 0x%llx size "
1110 		    "%d at local idx %d req_idx %d global_idx %d",
1111 		    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1112 		    acct->resp_idx, acct->req_idx, acct->idx);
1113 
1114 		/* Check we don't read beyond read_buf boundaries. */
1115 		if (acct->resp_desc->len > info->len - chunk_offset) {
1116 			log_warnx("%s: descriptor length beyond read_buf len",
1117 			    __func__);
1118 			chunk_len = info->len - chunk_offset;
1119 		} else
1120 			chunk_len = acct->resp_desc->len;
1121 
1122 		if (write_mem(acct->resp_desc->addr, read_buf + chunk_offset,
1123 			chunk_len)) {
1124 			log_warnx("%s: unable to write read_buf"
1125 			    " to gpa @ 0x%llx", __func__,
1126 			    acct->resp_desc->addr);
1127 			goto free_read_10;
1128 		}
1129 		chunk_offset += acct->resp_desc->len;
1130 	} while (chunk_offset < info->len);
1131 
1132 	ret = 1;
1133 	dev->cfg.isr_status = 1;
1134 	/* Move ring indexes */
1135 	vioscsi_next_ring_item(dev, acct->avail, acct->used, acct->req_desc,
1136 	    acct->req_idx);
1137 
1138 free_read_10:
1139 	vioscsi_free_info(info);
1140 read_10_out:
1141 	return (ret);
1142 }
1143 
1144 static int
vioscsi_handle_prevent_allow(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)1145 vioscsi_handle_prevent_allow(struct vioscsi_dev *dev,
1146     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1147 {
1148 	int ret = 0;
1149 	struct virtio_scsi_res_hdr resp;
1150 
1151 	memset(&resp, 0, sizeof(resp));
1152 	/* Move index for response */
1153 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1154 	    &(acct->resp_idx));
1155 
1156 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1157 
1158 	if (dev->locked) {
1159 		DPRINTF("%s: unlocking medium", __func__);
1160 	} else {
1161 		DPRINTF("%s: locking medium", __func__);
1162 	}
1163 
1164 	dev->locked = dev->locked ? 0 : 1;
1165 
1166 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1167 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
1168 		    __func__, acct->resp_desc->addr);
1169 	} else {
1170 		ret = 1;
1171 		dev->cfg.isr_status = 1;
1172 		/* Move ring indexes */
1173 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1174 		    acct->req_desc, acct->req_idx);
1175 	}
1176 
1177 	return (ret);
1178 }
1179 
1180 static int
vioscsi_handle_mechanism_status(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)1181 vioscsi_handle_mechanism_status(struct vioscsi_dev *dev,
1182     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1183 {
1184 	int ret = 0;
1185 	struct virtio_scsi_res_hdr resp;
1186 	struct scsi_mechanism_status_header *mech_status_header;
1187 
1188 	DPRINTF("%s: MECH_STATUS Len %u", __func__,
1189 	    _2btol(((struct scsi_mechanism_status *)(req->cdb))->length));
1190 
1191 	mech_status_header = calloc(1,
1192 	    sizeof(struct scsi_mechanism_status_header));
1193 
1194 	if (mech_status_header == NULL)
1195 		goto mech_out;
1196 
1197 	/* return a 0 header since we are not a changer */
1198 	memset(&resp, 0, sizeof(resp));
1199 	vioscsi_prepare_resp(&resp,
1200 	    VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1201 
1202 	/* Move index for response */
1203 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1204 	    acct->req_desc, &(acct->resp_idx));
1205 
1206 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1207 		log_warnx("%s: unable to set ERR status data @ 0x%llx",
1208 		    __func__, acct->resp_desc->addr);
1209 		goto free_mech;
1210 	}
1211 
1212 	/* Move index for mech_status_header */
1213 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1214 	    &(acct->resp_idx));
1215 
1216 	if (write_mem(acct->resp_desc->addr, mech_status_header,
1217 		sizeof(struct scsi_mechanism_status_header))) {
1218 		log_warnx("%s: unable to write "
1219 		    "mech_status_header response to "
1220 		    "gpa @ 0x%llx",
1221 		    __func__, acct->resp_desc->addr);
1222 	} else {
1223 		ret = 1;
1224 		dev->cfg.isr_status = 1;
1225 		/* Move ring indexes */
1226 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1227 		    acct->req_desc, acct->req_idx);
1228 	}
1229 
1230 free_mech:
1231 	free(mech_status_header);
1232 mech_out:
1233 	return (ret);
1234 }
1235 
1236 static int
vioscsi_handle_read_toc(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)1237 vioscsi_handle_read_toc(struct vioscsi_dev *dev,
1238     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1239 {
1240 	int ret = 0;
1241 	struct virtio_scsi_res_hdr resp;
1242 	uint16_t toc_data_len;
1243 	uint8_t toc_data[TOC_DATA_SIZE];
1244 	uint8_t *toc_data_p;
1245 	struct scsi_read_toc *toc = (struct scsi_read_toc *)(req->cdb);
1246 
1247 	DPRINTF("%s: %s - MSF %d Track 0x%02x Addr 0x%04x",
1248 	    __func__, vioscsi_op_names(toc->opcode), ((toc->byte2 >> 1) & 1),
1249 	    toc->from_track, _2btol(toc->data_len));
1250 
1251 	/* Tracks should be 0, 1, or LEAD_OUT_TRACK, 0xaa */
1252 	if (toc->from_track > 1 &&
1253 	    toc->from_track != READ_TOC_LEAD_OUT_TRACK) {
1254 		/* illegal request */
1255 		log_warnx("%s: illegal request Track 0x%02x",
1256 		    __func__, toc->from_track);
1257 
1258 		memset(&resp, 0, sizeof(resp));
1259 		vioscsi_prepare_resp(&resp,
1260 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1261 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
1262 
1263 		/* Move index for response */
1264 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1265 		    acct->req_desc, &(acct->resp_idx));
1266 
1267 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1268 			log_warnx("%s: unable to set ERR status data @ 0x%llx",
1269 			    __func__, acct->resp_desc->addr);
1270 			goto read_toc_out;
1271 		}
1272 
1273 		ret = 1;
1274 		dev->cfg.isr_status = 1;
1275 		/* Move ring indexes */
1276 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1277 		    acct->req_desc, acct->req_idx);
1278 
1279 		goto read_toc_out;
1280 	}
1281 
1282 	/*
1283 	 * toc_data is defined as:
1284 	 * [0-1]: TOC Data Length, typically 0x1a
1285 	 * [2]: First Track, 1
1286 	 * [3]: Last Track, 1
1287 	 *
1288 	 * Track 1 Descriptor
1289 	 * [0]: Reserved, 0
1290 	 * [1]: ADR,Control, 0x14
1291 	 * [2]: Track #, 1
1292 	 * [3]: Reserved, 0
1293 	 * [4-7]: Track Start Address, LBA
1294 	 *
1295 	 * Track 0xaa (Lead Out) Descriptor
1296 	 * [0]: Reserved, 0
1297 	 * [1]: ADR,Control, 0x14
1298 	 * [2]: Track #, 0xaa
1299 	 * [3]: Reserved, 0
1300 	 * [4-7]: Track Start Address, LBA
1301 	 */
1302 	memset(toc_data, 0, sizeof(toc_data));
1303 	toc_data_p = toc_data + 2;
1304 	*toc_data_p++ = READ_TOC_START_TRACK;
1305 	*toc_data_p++ = READ_TOC_LAST_TRACK;
1306 	if (toc->from_track <= 1) {
1307 		/* first track descriptor */
1308 		*toc_data_p++ = 0x0;
1309 		*toc_data_p++ = READ_TOC_ADR_CTL;
1310 		*toc_data_p++ = READ_TOC_START_TRACK;
1311 		*toc_data_p++ = 0x0;
1312 		/* start addr for first track is 0 */
1313 		*toc_data_p++ = 0x0;
1314 		*toc_data_p++ = 0x0;
1315 		*toc_data_p++ = 0x0;
1316 		*toc_data_p++ = 0x0;
1317 	}
1318 
1319 	/* last track descriptor */
1320 	*toc_data_p++ = 0x0;
1321 	*toc_data_p++ = READ_TOC_ADR_CTL;
1322 	*toc_data_p++ = READ_TOC_LEAD_OUT_TRACK;
1323 	*toc_data_p++ = 0x0;
1324 
1325 	_lto4b((uint32_t)dev->n_blocks, toc_data_p);
1326 	toc_data_p += 4;
1327 
1328 	toc_data_len = toc_data_p - toc_data;
1329 	_lto2b((uint32_t)toc_data_len - 2, toc_data);
1330 
1331 	memset(&resp, 0, sizeof(resp));
1332 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1333 
1334 	/* Move index for response */
1335 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1336 	    &(acct->resp_idx));
1337 
1338 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1339 	    "idx %d req_idx %d global_idx %d",
1340 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1341 	    acct->resp_idx, acct->req_idx, acct->idx);
1342 
1343 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1344 		log_warnx("%s: unable to write OK resp status data @ 0x%llx",
1345 		    __func__, acct->resp_desc->addr);
1346 		goto read_toc_out;
1347 	}
1348 
1349 	/* Move index for toc descriptor */
1350 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1351 	    &(acct->resp_idx));
1352 
1353 	DPRINTF("%s: writing toc_data to 0x%llx size %d at "
1354 	    "local idx %d req_idx %d global_idx %d",
1355 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1356 	    acct->resp_idx, acct->req_idx, acct->idx);
1357 
1358 	if (write_mem(acct->resp_desc->addr, toc_data, sizeof(toc_data))) {
1359 		log_warnx("%s: unable to write toc descriptor data @ 0x%llx",
1360 		    __func__, acct->resp_desc->addr);
1361 	} else {
1362 		ret = 1;
1363 		dev->cfg.isr_status = 1;
1364 		/* Move ring indexes */
1365 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1366 		    acct->req_desc, acct->req_idx);
1367 	}
1368 
1369 read_toc_out:
1370 	return (ret);
1371 }
1372 
1373 static int
vioscsi_handle_read_disc_info(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)1374 vioscsi_handle_read_disc_info(struct vioscsi_dev *dev,
1375     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1376 {
1377 	int ret = 0;
1378 	struct virtio_scsi_res_hdr resp;
1379 
1380 	DPRINTF("%s: Disc Info %x", __func__,
1381 		((struct scsi_read_disc_information *)(req->cdb))->byte2);
1382 
1383 	/* send back unsupported */
1384 	memset(&resp, 0, sizeof(resp));
1385 	vioscsi_prepare_resp(&resp,
1386 	    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1387 	    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
1388 
1389 	/* Move index for response */
1390 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1391 	    acct->req_desc, &(acct->resp_idx));
1392 
1393 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1394 		log_warnx("%s: unable to set ERR status data @ 0x%llx",
1395 		    __func__, acct->resp_desc->addr);
1396 	} else {
1397 		ret = 1;
1398 		dev->cfg.isr_status = 1;
1399 		/* Move ring indexes */
1400 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1401 		    acct->req_desc, acct->req_idx);
1402 	}
1403 
1404 	return (ret);
1405 }
1406 
1407 static int
vioscsi_handle_gesn(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)1408 vioscsi_handle_gesn(struct vioscsi_dev *dev,
1409     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1410 {
1411 	int ret = 0;
1412 	struct virtio_scsi_res_hdr resp;
1413 	uint8_t gesn_reply[GESN_SIZE];
1414 	struct scsi_gesn *gesn;
1415 	struct scsi_gesn_event_header *gesn_event_header;
1416 	struct scsi_gesn_power_event *gesn_power_event;
1417 
1418 	memset(&resp, 0, sizeof(resp));
1419 	gesn = (struct scsi_gesn *)(req->cdb);
1420 	DPRINTF("%s: GESN Method %s", __func__,
1421 	    gesn->byte2 ? "Polling" : "Asynchronous");
1422 
1423 	if (gesn->byte2 == 0) {
1424 		/* we don't support asynchronous */
1425 		vioscsi_prepare_resp(&resp,
1426 		    VIRTIO_SCSI_S_OK, SCSI_CHECK, SKEY_ILLEGAL_REQUEST,
1427 		    SENSE_ILLEGAL_CDB_FIELD, SENSE_DEFAULT_ASCQ);
1428 
1429 		/* Move index for response */
1430 		acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1431 		    acct->req_desc, &(acct->resp_idx));
1432 
1433 		if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1434 			log_warnx("%s: unable to set ERR status  data @ 0x%llx",
1435 			    __func__, acct->resp_desc->addr);
1436 			goto gesn_out;
1437 		}
1438 
1439 		ret = 1;
1440 		dev->cfg.isr_status = 1;
1441 		/* Move ring indexes */
1442 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1443 		    acct->req_desc, acct->req_idx);
1444 
1445 		goto gesn_out;
1446 	}
1447 	memset(gesn_reply, 0, sizeof(gesn_reply));
1448 	gesn_event_header = (struct scsi_gesn_event_header *)(gesn_reply);
1449 	gesn_power_event = (struct scsi_gesn_power_event *)(gesn_reply + 4);
1450 	/* set event header length and notification */
1451 	_lto2b(GESN_HEADER_LEN, gesn_event_header->length);
1452 	gesn_event_header->notification = GESN_NOTIFY_POWER_MGMT;
1453 	gesn_event_header->supported_event = GESN_EVENT_POWER_MGMT;
1454 
1455 	/* set event descriptor */
1456 	gesn_power_event->event_code = GESN_CODE_NOCHG;
1457 	if (dev->locked)
1458 		gesn_power_event->status = GESN_STATUS_ACTIVE;
1459 	else
1460 		gesn_power_event->status = GESN_STATUS_IDLE;
1461 
1462 	/* Move index for response */
1463 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->req_desc,
1464 	    &(acct->resp_idx));
1465 
1466 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1467 	    "idx %d req_idx %d global_idx %d",
1468 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1469 	    acct->resp_idx, acct->req_idx, acct->idx);
1470 
1471 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1472 		log_warnx("%s: unable to write OK resp status "
1473 		    "data @ 0x%llx", __func__, acct->resp_desc->addr);
1474 		goto gesn_out;
1475 	}
1476 
1477 	/* Move index for gesn_reply */
1478 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1479 	    &(acct->resp_idx));
1480 
1481 	DPRINTF("%s: writing gesn_reply to 0x%llx size %d at "
1482 	    "local idx %d req_idx %d global_idx %d",
1483 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1484 	    acct->resp_idx, acct->req_idx, acct->idx);
1485 
1486 	if (write_mem(acct->resp_desc->addr, gesn_reply, sizeof(gesn_reply))) {
1487 		log_warnx("%s: unable to write gesn_reply"
1488 		    " response to gpa @ 0x%llx",
1489 		    __func__, acct->resp_desc->addr);
1490 	} else {
1491 		ret = 1;
1492 		dev->cfg.isr_status = 1;
1493 		/* Move ring indexes */
1494 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1495 		    acct->req_desc, acct->req_idx);
1496 	}
1497 
1498 gesn_out:
1499 	return (ret);
1500 }
1501 
1502 static int
vioscsi_handle_get_config(struct vioscsi_dev * dev,struct virtio_scsi_req_hdr * req,struct virtio_vq_acct * acct)1503 vioscsi_handle_get_config(struct vioscsi_dev *dev,
1504     struct virtio_scsi_req_hdr *req, struct virtio_vq_acct *acct)
1505 {
1506 	int ret = 0;
1507 	struct virtio_scsi_res_hdr resp;
1508 	uint8_t *get_conf_reply;
1509 	struct scsi_config_feature_header *config_feature_header;
1510 	struct scsi_config_generic_descriptor *config_generic_desc;
1511 	struct scsi_config_profile_descriptor *config_profile_desc;
1512 	struct scsi_config_core_descriptor *config_core_desc;
1513 	struct scsi_config_morphing_descriptor *config_morphing_desc;
1514 	struct scsi_config_remove_media_descriptor *config_remove_media_desc;
1515 	struct scsi_config_random_read_descriptor *config_random_read_desc;
1516 
1517 #if DEBUG
1518 	struct scsi_get_configuration *get_configuration =
1519 	    (struct scsi_get_configuration *)(req->cdb);
1520 	log_debug("%s: Conf RT %x Feature %d Len %d", __func__,
1521 	    get_configuration->byte2, _2btol(get_configuration->feature),
1522 	    _2btol(get_configuration->length));
1523 #endif /* DEBUG */
1524 
1525 	get_conf_reply = (uint8_t*)calloc(G_CONFIG_REPLY_SIZE, sizeof(uint8_t));
1526 
1527 	if (get_conf_reply == NULL)
1528 		goto get_config_out;
1529 
1530 	/*
1531 	 * Use MMC-5 6.6 for structure and
1532 	 * MMC-5 5.2 to send back:
1533 	 * feature header - 8 bytes
1534 	 * feature descriptor for profile list - 8 bytes
1535 	 * feature descriptor for core feature - 12 bytes
1536 	 * feature descriptor for morphing feature - 8 bytes
1537 	 * feature descriptor for removable media - 8 bytes
1538 	 * feature descriptor for random read feature - 12 bytes
1539 	 */
1540 
1541 	config_feature_header =
1542 	    (struct scsi_config_feature_header *)(get_conf_reply);
1543 	config_generic_desc =
1544 	    (struct scsi_config_generic_descriptor *)(get_conf_reply + 8);
1545 	config_profile_desc =
1546 	    (struct scsi_config_profile_descriptor *)(get_conf_reply + 12);
1547 	config_core_desc =
1548 	    (struct scsi_config_core_descriptor *)(get_conf_reply + 16);
1549 	config_morphing_desc =
1550 	    (struct scsi_config_morphing_descriptor *)(get_conf_reply + 28);
1551 	config_remove_media_desc =
1552 	    (struct scsi_config_remove_media_descriptor *)(get_conf_reply + 36);
1553 	config_random_read_desc =
1554 	    (struct scsi_config_random_read_descriptor *)(get_conf_reply + 44);
1555 
1556 	/* set size to be get_conf_reply - size field */
1557 	_lto4b(G_CONFIG_REPLY_SIZE_HEX, config_feature_header->length);
1558 	/* set current profile to be non-conforming */
1559 	_lto2b(CONFIG_PROFILE_NON_CONFORM,
1560 	    config_feature_header->current_profile);
1561 
1562 	/* fill out profile list feature */
1563 	_lto2b(CONFIG_FEATURE_CODE_PROFILE, config_generic_desc->feature_code);
1564 	config_generic_desc->byte3 = CONFIG_PROFILELIST_BYTE3;
1565 	config_generic_desc->length = CONFIG_PROFILELIST_LENGTH;
1566 	/* fill out profile descriptor for NON_COFORM */
1567 	_lto2b(CONFIG_PROFILE_NON_CONFORM, config_profile_desc->profile_number);
1568 	config_profile_desc->byte3 = CONFIG_PROFILE_BYTE3;
1569 
1570 	/* fill out core feature */
1571 	_lto2b(CONFIG_FEATURE_CODE_CORE, config_core_desc->feature_code);
1572 	config_core_desc->byte3 = CONFIG_CORE_BYTE3;
1573 	config_core_desc->length = CONFIG_CORE_LENGTH;
1574 	_lto4b(CONFIG_CORE_PHY_SCSI, config_core_desc->phy_std);
1575 
1576 	/* fill out morphing feature */
1577 	_lto2b(CONFIG_FEATURE_CODE_MORPHING,
1578 	    config_morphing_desc->feature_code);
1579 	config_morphing_desc->byte3 = CONFIG_MORPHING_BYTE3;
1580 	config_morphing_desc->length = CONFIG_MORPHING_LENGTH;
1581 	config_morphing_desc->byte5 = CONFIG_MORPHING_BYTE5;
1582 
1583 	/* fill out removable media feature */
1584 	_lto2b(CONFIG_FEATURE_CODE_REMOVE_MEDIA,
1585 	    config_remove_media_desc->feature_code);
1586 	config_remove_media_desc->byte3 = CONFIG_REMOVE_MEDIA_BYTE3;
1587 	config_remove_media_desc->length = CONFIG_REMOVE_MEDIA_LENGTH;
1588 	config_remove_media_desc->byte5 = CONFIG_REMOVE_MEDIA_BYTE5;
1589 
1590 	/* fill out random read feature */
1591 	_lto2b(CONFIG_FEATURE_CODE_RANDOM_READ,
1592 	    config_random_read_desc->feature_code);
1593 	config_random_read_desc->byte3 = CONFIG_RANDOM_READ_BYTE3;
1594 	config_random_read_desc->length = CONFIG_RANDOM_READ_LENGTH;
1595 	if (dev->n_blocks >= UINT32_MAX)
1596 		_lto4b(UINT32_MAX, config_random_read_desc->block_size);
1597 	else
1598 		_lto4b(dev->n_blocks - 1, config_random_read_desc->block_size);
1599 	_lto2b(CONFIG_RANDOM_READ_BLOCKING_TYPE,
1600 	    config_random_read_desc->blocking_type);
1601 
1602 	memset(&resp, 0, sizeof(resp));
1603 	vioscsi_prepare_resp(&resp, VIRTIO_SCSI_S_OK, SCSI_OK, 0, 0, 0);
1604 
1605 	/* Move index for response */
1606 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc,
1607 	    acct->req_desc, &(acct->resp_idx));
1608 
1609 	DPRINTF("%s: writing resp to 0x%llx size %d at local "
1610 	    "idx %d req_idx %d global_idx %d",
1611 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1612 	    acct->resp_idx, acct->req_idx, acct->idx);
1613 
1614 	if (write_mem(acct->resp_desc->addr, &resp, sizeof(resp))) {
1615 		log_warnx("%s: unable to set Ok status data @ 0x%llx",
1616 		    __func__, acct->resp_desc->addr);
1617 		goto free_get_config;
1618 	}
1619 
1620 	/* Move index for get_conf_reply */
1621 	acct->resp_desc = vioscsi_next_ring_desc(acct->desc, acct->resp_desc,
1622 	    &(acct->resp_idx));
1623 
1624 	DPRINTF("%s: writing get_conf_reply to 0x%llx size %d "
1625 	    "at local idx %d req_idx %d global_idx %d",
1626 	    __func__, acct->resp_desc->addr, acct->resp_desc->len,
1627 	    acct->resp_idx, acct->req_idx, acct->idx);
1628 
1629 	if (write_mem(acct->resp_desc->addr, get_conf_reply,
1630 	    G_CONFIG_REPLY_SIZE)) {
1631 		log_warnx("%s: unable to write get_conf_reply"
1632 		    " response to gpa @ 0x%llx",
1633 		    __func__, acct->resp_desc->addr);
1634 	} else {
1635 		ret = 1;
1636 		dev->cfg.isr_status = 1;
1637 		/* Move ring indexes */
1638 		vioscsi_next_ring_item(dev, acct->avail, acct->used,
1639 		    acct->req_desc, acct->req_idx);
1640 	}
1641 
1642 free_get_config:
1643 	free(get_conf_reply);
1644 get_config_out:
1645 	return (ret);
1646 }
1647 
1648 int
vioscsi_io(int dir,uint16_t reg,uint32_t * data,uint8_t * intr,void * cookie,uint8_t sz)1649 vioscsi_io(int dir, uint16_t reg, uint32_t *data, uint8_t *intr,
1650     void *cookie, uint8_t sz)
1651 {
1652 	struct vioscsi_dev *dev = (struct vioscsi_dev *)cookie;
1653 
1654 	*intr = 0xFF;
1655 
1656 	DPRINTF("%s: request %s reg %u, %s sz %u", __func__,
1657 	    dir ? "READ" : "WRITE", reg, vioscsi_reg_name(reg), sz);
1658 
1659 	if (dir == 0) {
1660 		switch (reg) {
1661 		case VIRTIO_CONFIG_DEVICE_FEATURES:
1662 		case VIRTIO_CONFIG_QUEUE_SIZE:
1663 		case VIRTIO_CONFIG_ISR_STATUS:
1664 			log_warnx("%s: illegal write %x to %s",
1665 			    __progname, *data, vioscsi_reg_name(reg));
1666 			break;
1667 		case VIRTIO_CONFIG_GUEST_FEATURES:
1668 			dev->cfg.guest_feature = *data;
1669 			DPRINTF("%s: guest feature set to %u",
1670 			    __func__, dev->cfg.guest_feature);
1671 			break;
1672 		case VIRTIO_CONFIG_QUEUE_PFN:
1673 			dev->cfg.queue_pfn = *data;
1674 			vioscsi_update_qa(dev);
1675 			break;
1676 		case VIRTIO_CONFIG_QUEUE_SELECT:
1677 			dev->cfg.queue_select = *data;
1678 			vioscsi_update_qs(dev);
1679 			break;
1680 		case VIRTIO_CONFIG_QUEUE_NOTIFY:
1681 			dev->cfg.queue_notify = *data;
1682 			if (vioscsi_notifyq(dev))
1683 				*intr = 1;
1684 			break;
1685 		case VIRTIO_CONFIG_DEVICE_STATUS:
1686 			dev->cfg.device_status = *data;
1687 			DPRINTF("%s: device status set to %u",
1688 			    __func__, dev->cfg.device_status);
1689 			if (dev->cfg.device_status == 0) {
1690 				log_debug("%s: device reset", __func__);
1691 				dev->cfg.guest_feature = 0;
1692 				dev->cfg.queue_pfn = 0;
1693 				vioscsi_update_qa(dev);
1694 				dev->cfg.queue_size = 0;
1695 				vioscsi_update_qs(dev);
1696 				dev->cfg.queue_select = 0;
1697 				dev->cfg.queue_notify = 0;
1698 				dev->cfg.isr_status = 0;
1699 				dev->vq[0].last_avail = 0;
1700 				dev->vq[1].last_avail = 0;
1701 				dev->vq[2].last_avail = 0;
1702 			}
1703 			break;
1704 		default:
1705 			break;
1706 		}
1707 	} else {
1708 		switch (reg) {
1709 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI:
1710 			/* VIRTIO_SCSI_CONFIG_NUM_QUEUES, 32bit */
1711 			if (sz == 4)
1712 				*data = (uint32_t)VIOSCSI_NUM_QUEUES;
1713 			else if (sz == 1) {
1714 				/* read first byte of num_queues */
1715 				*data &= 0xFFFFFF00;
1716 				*data |= (uint32_t)(VIOSCSI_NUM_QUEUES) & 0xFF;
1717 			}
1718 			break;
1719 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 1:
1720 			if (sz == 1) {
1721 				/* read second byte of num_queues */
1722 				*data &= 0xFFFFFF00;
1723 				*data |=
1724 				    (uint32_t)(VIOSCSI_NUM_QUEUES >> 8) & 0xFF;
1725 			}
1726 			break;
1727 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 2:
1728 			if (sz == 1) {
1729 				/* read third byte of num_queues */
1730 				*data &= 0xFFFFFF00;
1731 				*data |=
1732 				    (uint32_t)(VIOSCSI_NUM_QUEUES >> 16) & 0xFF;
1733 			}
1734 			break;
1735 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 3:
1736 			if (sz == 1) {
1737 				/* read fourth byte of num_queues */
1738 				*data &= 0xFFFFFF00;
1739 				*data |=
1740 				    (uint32_t)(VIOSCSI_NUM_QUEUES >> 24) & 0xFF;
1741 			}
1742 			break;
1743 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 4:
1744 			/* VIRTIO_SCSI_CONFIG_SEG_MAX, 32bit */
1745 			if (sz == 4)
1746 				*data = (uint32_t)(VIOSCSI_SEG_MAX);
1747 			else if (sz == 1) {
1748 				/* read first byte of seg_max */
1749 				*data &= 0xFFFFFF00;
1750 				*data |= (uint32_t)(VIOSCSI_SEG_MAX) & 0xFF;
1751 			}
1752 			break;
1753 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 5:
1754 			if (sz == 1) {
1755 				/* read second byte of seg_max */
1756 				*data &= 0xFFFFFF00;
1757 				*data |=
1758 				    (uint32_t)(VIOSCSI_SEG_MAX >> 8) & 0xFF;
1759 			}
1760 			break;
1761 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 6:
1762 			if (sz == 1) {
1763 				/* read third byte of seg_max */
1764 				*data &= 0xFFFFFF00;
1765 				*data |=
1766 				    (uint32_t)(VIOSCSI_SEG_MAX >> 16) & 0xFF;
1767 			}
1768 			break;
1769 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 7:
1770 			if (sz == 1) {
1771 				/* read fourth byte of seg_max */
1772 				*data &= 0xFFFFFF00;
1773 				*data |=
1774 				    (uint32_t)(VIOSCSI_SEG_MAX >> 24) & 0xFF;
1775 			}
1776 			break;
1777 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 8:
1778 			/* VIRTIO_SCSI_CONFIG_MAX_SECTORS, 32bit */
1779 			if (sz == 4)
1780 				*data = (uint32_t)(dev->max_xfer);
1781 			else if (sz == 1) {
1782 				/* read first byte of max_xfer */
1783 				*data &= 0xFFFFFF00;
1784 				*data |= (uint32_t)(dev->max_xfer) & 0xFF;
1785 			}
1786 			break;
1787 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 9:
1788 			if (sz == 1) {
1789 				/* read second byte of max_xfer */
1790 				*data &= 0xFFFFFF00;
1791 				*data |=
1792 				    (uint32_t)(dev->max_xfer >> 8) & 0xFF;
1793 			}
1794 			break;
1795 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 10:
1796 			if (sz == 1) {
1797 				/* read third byte of max_xfer */
1798 				*data &= 0xFFFFFF00;
1799 				*data |=
1800 				    (uint32_t)(dev->max_xfer >> 16) & 0xFF;
1801 			}
1802 			break;
1803 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 11:
1804 			if (sz == 1) {
1805 				/* read fourth byte of max_xfer */
1806 				*data &= 0xFFFFFF00;
1807 				*data |=
1808 				    (uint32_t)(dev->max_xfer >> 24) & 0xFF;
1809 			}
1810 			break;
1811 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 12:
1812 			/* VIRTIO_SCSI_CONFIG_CMD_PER_LUN, 32bit */
1813 			if (sz == 4)
1814 				*data = (uint32_t)(VIOSCSI_CMD_PER_LUN);
1815 			else if (sz == 1) {
1816 				/* read first byte of cmd_per_lun */
1817 				*data &= 0xFFFFFF00;
1818 				*data |= (uint32_t)(VIOSCSI_CMD_PER_LUN) & 0xFF;
1819 			}
1820 			break;
1821 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 13:
1822 			if (sz == 1) {
1823 				/* read second byte of cmd_per_lun */
1824 				*data &= 0xFFFFFF00;
1825 				*data |=
1826 				    (uint32_t)(VIOSCSI_CMD_PER_LUN >> 8) & 0xFF;
1827 			}
1828 			break;
1829 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 14:
1830 			if (sz == 1) {
1831 				/* read third byte of cmd_per_lun */
1832 				*data &= 0xFFFFFF00;
1833 				*data |= (uint32_t)(VIOSCSI_CMD_PER_LUN >> 16)
1834 				    & 0xFF;
1835 			}
1836 			break;
1837 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 15:
1838 			if (sz == 1) {
1839 				/* read fourth byte of cmd_per_lun */
1840 				*data &= 0xFFFFFF00;
1841 				*data |= (uint32_t)(VIOSCSI_CMD_PER_LUN >> 24)
1842 				    & 0xFF;
1843 			}
1844 			break;
1845 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 16:
1846 			/* VIRTIO_SCSI_CONFIG_EVENT_INFO_SIZE, 32bit */
1847 			*data = 0x00;
1848 			break;
1849 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 20:
1850 			/* VIRTIO_SCSI_CONFIG_SENSE_SIZE, 32bit */
1851 			if (sz == 4)
1852 				*data = (uint32_t)(VIOSCSI_SENSE_LEN);
1853 			else if (sz == 1) {
1854 				/* read first byte of sense_size */
1855 				*data &= 0xFFFFFF00;
1856 				*data |= (uint32_t)(VIOSCSI_SENSE_LEN) & 0xFF;
1857 			}
1858 			break;
1859 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 21:
1860 			if (sz == 1) {
1861 				/* read second byte of sense_size */
1862 				*data &= 0xFFFFFF00;
1863 				*data |=
1864 				    (uint32_t)(VIOSCSI_SENSE_LEN >> 8) & 0xFF;
1865 			}
1866 			break;
1867 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 22:
1868 			if (sz == 1) {
1869 				/* read third byte of sense_size */
1870 				*data &= 0xFFFFFF00;
1871 				*data |=
1872 				    (uint32_t)(VIOSCSI_SENSE_LEN >> 16) & 0xFF;
1873 			}
1874 			break;
1875 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 23:
1876 			if (sz == 1) {
1877 				/* read fourth byte of sense_size */
1878 				*data &= 0xFFFFFF00;
1879 				*data |=
1880 				    (uint32_t)(VIOSCSI_SENSE_LEN >> 24) & 0xFF;
1881 			}
1882 			break;
1883 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 24:
1884 			/* VIRTIO_SCSI_CONFIG_CDB_SIZE, 32bit */
1885 			if (sz == 4)
1886 				*data = (uint32_t)(VIOSCSI_CDB_LEN);
1887 			else if (sz == 1) {
1888 				/* read first byte of cdb_len */
1889 				*data &= 0xFFFFFF00;
1890 				*data |= (uint32_t)(VIOSCSI_CDB_LEN) & 0xFF;
1891 			}
1892 			break;
1893 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 25:
1894 			if (sz == 1) {
1895 				/* read second byte of cdb_len */
1896 				*data &= 0xFFFFFF00;
1897 				*data |=
1898 				    (uint32_t)(VIOSCSI_CDB_LEN >> 8) & 0xFF;
1899 			}
1900 			break;
1901 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 26:
1902 			if (sz == 1) {
1903 				/* read third byte of cdb_len */
1904 				*data &= 0xFFFFFF00;
1905 				*data |=
1906 				    (uint32_t)(VIOSCSI_CDB_LEN >> 16) & 0xFF;
1907 			}
1908 			break;
1909 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 27:
1910 			if (sz == 1) {
1911 				/* read fourth byte of cdb_len */
1912 				*data &= 0xFFFFFF00;
1913 				*data |=
1914 				    (uint32_t)(VIOSCSI_CDB_LEN >> 24) & 0xFF;
1915 			}
1916 			break;
1917 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 28:
1918 			/* VIRTIO_SCSI_CONFIG_MAX_CHANNEL, 16bit */
1919 
1920 			/* defined by standard to be zero */
1921 			*data &= 0xFFFF0000;
1922 			break;
1923 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 29:
1924 			/* defined by standard to be zero */
1925 			*data &= 0xFFFF0000;
1926 			break;
1927 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 30:
1928 			/* VIRTIO_SCSI_CONFIG_MAX_TARGET, 16bit */
1929 			if (sz == 2) {
1930 				*data &= 0xFFFF0000;
1931 				*data |=
1932 				    (uint32_t)(VIOSCSI_MAX_TARGET) & 0xFFFF;
1933 			} else if (sz == 1) {
1934 				/* read first byte of max_target */
1935 				*data &= 0xFFFFFF00;
1936 				*data |=
1937 				    (uint32_t)(VIOSCSI_MAX_TARGET) & 0xFF;
1938 			}
1939 			break;
1940 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 31:
1941 			if (sz == 1) {
1942 				/* read second byte of max_target */
1943 				*data &= 0xFFFFFF00;
1944 				*data |=
1945 				    (uint32_t)(VIOSCSI_MAX_TARGET >> 8) & 0xFF;
1946 			}
1947 			break;
1948 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 32:
1949 			/* VIRTIO_SCSI_CONFIG_MAX_LUN, 32bit */
1950 			if (sz == 4)
1951 				*data = (uint32_t)(VIOSCSI_MAX_LUN);
1952 			else if (sz == 1) {
1953 				/* read first byte of max_lun */
1954 				*data &= 0xFFFFFF00;
1955 				*data |= (uint32_t)(VIOSCSI_MAX_LUN) & 0xFF;
1956 			}
1957 			break;
1958 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 33:
1959 			if (sz == 1) {
1960 				/* read second byte of max_lun */
1961 				*data &= 0xFFFFFF00;
1962 				*data |=
1963 				    (uint32_t)(VIOSCSI_MAX_LUN >> 8) & 0xFF;
1964 			}
1965 			break;
1966 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 34:
1967 			if (sz == 1) {
1968 				/* read third byte of max_lun */
1969 				*data &= 0xFFFFFF00;
1970 				*data |=
1971 				    (uint32_t)(VIOSCSI_MAX_LUN >> 16) & 0xFF;
1972 			}
1973 			break;
1974 		case VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI + 35:
1975 			if (sz == 1) {
1976 				/* read fourth byte of max_lun */
1977 				*data &= 0xFFFFFF00;
1978 				*data |=
1979 				    (uint32_t)(VIOSCSI_MAX_LUN >> 24) & 0xFF;
1980 			}
1981 			break;
1982 		case VIRTIO_CONFIG_DEVICE_FEATURES:
1983 			*data = dev->cfg.device_feature;
1984 			break;
1985 		case VIRTIO_CONFIG_GUEST_FEATURES:
1986 			*data = dev->cfg.guest_feature;
1987 			break;
1988 		case VIRTIO_CONFIG_QUEUE_PFN:
1989 			*data = dev->cfg.queue_pfn;
1990 			break;
1991 		case VIRTIO_CONFIG_QUEUE_SIZE:
1992 			if (sz == 4)
1993 				*data = dev->cfg.queue_size;
1994 			else if (sz == 2) {
1995 				*data &= 0xFFFF0000;
1996 				*data |= (uint16_t)dev->cfg.queue_size;
1997 			} else if (sz == 1) {
1998 				*data &= 0xFFFFFF00;
1999 				*data |= (uint8_t)dev->cfg.queue_size;
2000 			}
2001 			break;
2002 		case VIRTIO_CONFIG_QUEUE_SELECT:
2003 			*data = dev->cfg.queue_select;
2004 			break;
2005 		case VIRTIO_CONFIG_QUEUE_NOTIFY:
2006 			*data = dev->cfg.queue_notify;
2007 			break;
2008 		case VIRTIO_CONFIG_DEVICE_STATUS:
2009 			if (sz == 4)
2010 				*data = dev->cfg.device_status;
2011 			else if (sz == 2) {
2012 				*data &= 0xFFFF0000;
2013 				*data |= (uint16_t)dev->cfg.device_status;
2014 			} else if (sz == 1) {
2015 				*data &= 0xFFFFFF00;
2016 				*data |= (uint8_t)dev->cfg.device_status;
2017 			}
2018 			break;
2019 		case VIRTIO_CONFIG_ISR_STATUS:
2020 			*data = dev->cfg.isr_status;
2021 			dev->cfg.isr_status = 0;
2022 			break;
2023 		}
2024 	}
2025 
2026 
2027 	return (0);
2028 }
2029 
2030 void
vioscsi_update_qs(struct vioscsi_dev * dev)2031 vioscsi_update_qs(struct vioscsi_dev *dev)
2032 {
2033 	struct virtio_vq_info *vq_info;
2034 
2035 	/* Invalid queue? */
2036 	if (dev->cfg.queue_select >= VIRTIO_MAX_QUEUES) {
2037 		dev->cfg.queue_size = 0;
2038 		return;
2039 	}
2040 
2041 	vq_info = &dev->vq[dev->cfg.queue_select];
2042 
2043 	/* Update queue pfn/size based on queue select */
2044 	dev->cfg.queue_pfn = vq_info->q_gpa >> 12;
2045 	dev->cfg.queue_size = vq_info->qs;
2046 }
2047 
2048 void
vioscsi_update_qa(struct vioscsi_dev * dev)2049 vioscsi_update_qa(struct vioscsi_dev *dev)
2050 {
2051 	struct virtio_vq_info *vq_info;
2052 	void *hva = NULL;
2053 
2054 	/* Invalid queue? */
2055 	if (dev->cfg.queue_select >= VIRTIO_MAX_QUEUES)
2056 		return;
2057 
2058 	vq_info = &dev->vq[dev->cfg.queue_select];
2059 	vq_info->q_gpa = (uint64_t)dev->cfg.queue_pfn * VIRTIO_PAGE_SIZE;
2060 
2061 	hva = hvaddr_mem(vq_info->q_gpa, vring_size(VIOSCSI_QUEUE_SIZE));
2062 	if (hva == NULL)
2063 		fatal("vioscsi_update_qa");
2064 	vq_info->q_hva = hva;
2065 }
2066 
2067 /*
2068  * Process message(s) in the queue(s)
2069  * vioscsi driver will be placing the following in the queue for each iteration
2070  * virtio_scsi_req_hdr with a possible SCSI_DATA_OUT buffer
2071  * along with a virtio_scsi_res_hdr with a possible SCSI_DATA_IN buffer
2072  * for consumption.
2073  *
2074  * Return 1 if an interrupt should be generated (response written)
2075  *        0 otherwise
2076  */
2077 int
vioscsi_notifyq(struct vioscsi_dev * dev)2078 vioscsi_notifyq(struct vioscsi_dev *dev)
2079 {
2080 	int cnt, ret = 0;
2081 	char *vr;
2082 	struct virtio_scsi_req_hdr req;
2083 	struct virtio_scsi_res_hdr resp;
2084 	struct virtio_vq_acct acct;
2085 	struct virtio_vq_info *vq_info;
2086 
2087 	ret = 0;
2088 
2089 	/* Invalid queue? */
2090 	if (dev->cfg.queue_notify >= VIRTIO_MAX_QUEUES)
2091 		return (ret);
2092 
2093 	vq_info = &dev->vq[dev->cfg.queue_notify];
2094 	vr = vq_info->q_hva;
2095 	if (vr == NULL)
2096 		fatalx("%s: null vring", __func__);
2097 
2098 	/* Compute offsets in ring of descriptors, avail ring, and used ring */
2099 	acct.desc = (struct vring_desc *)(vr);
2100 	acct.avail = (struct vring_avail *)(vr + vq_info->vq_availoffset);
2101 	acct.used = (struct vring_used *)(vr + vq_info->vq_usedoffset);
2102 
2103 	acct.idx = vq_info->last_avail & VIOSCSI_QUEUE_MASK;
2104 
2105 	if ((acct.avail->idx & VIOSCSI_QUEUE_MASK) == acct.idx) {
2106 		log_debug("%s - nothing to do?", __func__);
2107 		return (0);
2108 	}
2109 
2110 	cnt = 0;
2111 	while (acct.idx != (acct.avail->idx & VIOSCSI_QUEUE_MASK)) {
2112 
2113 		/* Guard against infinite descriptor chains */
2114 		if (++cnt >= VIOSCSI_QUEUE_SIZE) {
2115 			log_warnx("%s: invalid descriptor table", __func__);
2116 			goto out;
2117 		}
2118 
2119 		acct.req_idx = acct.avail->ring[acct.idx] & VIOSCSI_QUEUE_MASK;
2120 		acct.req_desc = &(acct.desc[acct.req_idx]);
2121 
2122 		/* Clear resp for next message */
2123 		memset(&resp, 0, sizeof(resp));
2124 
2125 		if ((acct.req_desc->flags & VRING_DESC_F_NEXT) == 0) {
2126 			log_warnx("%s: unchained req descriptor received "
2127 			    "(idx %d)", __func__, acct.req_idx);
2128 			goto out;
2129 		}
2130 
2131 		/* Read command from descriptor ring */
2132 		if (read_mem(acct.req_desc->addr, &req, sizeof(req))) {
2133 			log_warnx("%s: command read_mem error @ 0x%llx",
2134 			    __func__, acct.req_desc->addr);
2135 			goto out;
2136 		}
2137 
2138 		/*
2139 		 * req.lun is defined by virtio as
2140 		 * lun[0] - Always set to 1
2141 		 * lun[1] - Target, negotiated as VIOSCSI_MAX_TARGET
2142 		 * lun[2-3] - represent single level LUN structure
2143 		 * lun[4-7] - Zero
2144 		 * At this current time, we are only servicing one device per
2145 		 * bus (1:0:X:0).
2146 		 *
2147 		 * Various implementations will attempt to scan all possible
2148 		 * targets (256) looking for devices or scan for all possible
2149 		 * LUNs in a single level.  When Target is greater than
2150 		 * VIOSCSI_MAX_TARGET or when lun[3] is greater than zero,
2151 		 * respond with a BAD_TARGET response.
2152 		 */
2153 		if (req.lun[1] >= VIOSCSI_MAX_TARGET || req.lun[3] > 0) {
2154 			DPRINTF("%s: Ignore CMD 0x%02x,%s on lun %u:%u:%u:%u",
2155 			    __func__, req.cdb[0], vioscsi_op_names(req.cdb[0]),
2156 			    req.lun[0], req.lun[1], req.lun[2], req.lun[3]);
2157 			/* Move index for response */
2158 			acct.resp_desc = vioscsi_next_ring_desc(acct.desc,
2159 			    acct.req_desc, &(acct.resp_idx));
2160 
2161 			vioscsi_prepare_resp(&resp,
2162 			    VIRTIO_SCSI_S_BAD_TARGET, SCSI_OK, 0, 0, 0);
2163 
2164 			if (acct.resp_desc->len > sizeof(resp)) {
2165 				log_warnx("%s: invalid descriptor length",
2166 				    __func__);
2167 				goto out;
2168 			}
2169 			if (write_mem(acct.resp_desc->addr, &resp,
2170 				sizeof(resp))) {
2171 				log_warnx("%s: unable to write BAD_TARGET"
2172 				    " resp status data @ 0x%llx",
2173 				    __func__, acct.resp_desc->addr);
2174 				goto out;
2175 			}
2176 
2177 			ret = 1;
2178 			dev->cfg.isr_status = 1;
2179 
2180 			/* Move ring indexes (updates the used ring index) */
2181 			vioscsi_next_ring_item(dev, acct.avail, acct.used,
2182 			    acct.req_desc, acct.req_idx);
2183 			goto next_msg;
2184 		}
2185 
2186 		DPRINTF("%s: Queue %d id 0x%llx lun %u:%u:%u:%u"
2187 		    " cdb OP 0x%02x,%s",
2188 		    __func__, dev->cfg.queue_notify, req.id,
2189 		    req.lun[0], req.lun[1], req.lun[2], req.lun[3],
2190 		    req.cdb[0], vioscsi_op_names(req.cdb[0]));
2191 
2192 		/* opcode is first byte */
2193 		switch (req.cdb[0]) {
2194 		case TEST_UNIT_READY:
2195 		case START_STOP:
2196 			ret = vioscsi_handle_tur(dev, &req, &acct);
2197 			break;
2198 		case PREVENT_ALLOW:
2199 			ret = vioscsi_handle_prevent_allow(dev, &req, &acct);
2200 			break;
2201 		case READ_TOC:
2202 			ret = vioscsi_handle_read_toc(dev, &req, &acct);
2203 			break;
2204 		case READ_CAPACITY:
2205 			ret = vioscsi_handle_read_capacity(dev, &req, &acct);
2206 			break;
2207 		case READ_CAPACITY_16:
2208 			ret = vioscsi_handle_read_capacity_16(dev, &req, &acct);
2209 			break;
2210 		case READ_COMMAND:
2211 			ret = vioscsi_handle_read_6(dev, &req, &acct);
2212 			break;
2213 		case READ_10:
2214 			ret = vioscsi_handle_read_10(dev, &req, &acct);
2215 			break;
2216 		case INQUIRY:
2217 			ret = vioscsi_handle_inquiry(dev, &req, &acct);
2218 			break;
2219 		case MODE_SENSE:
2220 			ret = vioscsi_handle_mode_sense(dev, &req, &acct);
2221 			break;
2222 		case MODE_SENSE_BIG:
2223 			ret = vioscsi_handle_mode_sense_big(dev, &req, &acct);
2224 			break;
2225 		case GET_EVENT_STATUS_NOTIFICATION:
2226 			ret = vioscsi_handle_gesn(dev, &req, &acct);
2227 			break;
2228 		case READ_DISC_INFORMATION:
2229 			ret = vioscsi_handle_read_disc_info(dev, &req, &acct);
2230 			break;
2231 		case GET_CONFIGURATION:
2232 			ret = vioscsi_handle_get_config(dev, &req, &acct);
2233 			break;
2234 		case MECHANISM_STATUS:
2235 			ret = vioscsi_handle_mechanism_status(dev, &req, &acct);
2236 			break;
2237 		case REPORT_LUNS:
2238 			ret = vioscsi_handle_report_luns(dev, &req, &acct);
2239 			break;
2240 		default:
2241 			log_warnx("%s: unsupported opcode 0x%02x,%s",
2242 			    __func__, req.cdb[0], vioscsi_op_names(req.cdb[0]));
2243 			/* Move ring indexes */
2244 			vioscsi_next_ring_item(dev, acct.avail, acct.used,
2245 			    acct.req_desc, acct.req_idx);
2246 			break;
2247 		}
2248 next_msg:
2249 		/* Increment to the next queue slot */
2250 		acct.idx = (acct.idx + 1) & VIOSCSI_QUEUE_MASK;
2251 	}
2252 out:
2253 	return (ret);
2254 }
2255