1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 #include <sys/sdt.h>
38 
39 #include <stmf.h>
40 #include <lpif.h>
41 #include <portif.h>
42 #include <stmf_ioctl.h>
43 #include <stmf_sbd.h>
44 #include <sbd_impl.h>
45 
46 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
47 static void sbd_handle_sync_cache(struct scsi_task *task,
48     struct stmf_data_buf *initial_dbuf);
49 void sbd_handle_read_xfer_completion(struct scsi_task *task,
50     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
51 
52 /*
53  * IMPORTANT NOTE:
54  * =================
55  * The whole world here is based on the assumption that everything within
56  * a scsi task executes in a single threaded manner, even the aborts.
57  * Dont ever change that. There wont be any performance gain but there
58  * will be tons of race conditions.
59  */
60 
61 void
62 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
63 					struct stmf_data_buf *dbuf)
64 {
65 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
66 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
67 	uint64_t laddr;
68 	uint32_t len, buflen, iolen;
69 	int ndx;
70 	int bufs_to_take;
71 
72 	/* Lets try not to hog all the buffers the port has. */
73 	bufs_to_take = ((task->task_max_nbufs > 2) &&
74 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
75 	    task->task_max_nbufs;
76 
77 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
78 	laddr = scmd->addr + scmd->current_ro + slu->sl_sli->sli_lu_data_offset;
79 
80 	for (buflen = 0, ndx = 0; (buflen < len) &&
81 	    (ndx < dbuf->db_sglist_length); ndx++) {
82 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
83 		if (iolen == 0)
84 			break;
85 		if (sst->sst_data_read(sst, laddr, (uint64_t)iolen,
86 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
87 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
88 			/* Do not need to do xfer anymore, just complete it */
89 			dbuf->db_data_size = 0;
90 			dbuf->db_xfer_status = STMF_SUCCESS;
91 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
92 			return;
93 		}
94 		buflen += iolen;
95 		laddr += (uint64_t)iolen;
96 	}
97 	dbuf->db_relative_offset = scmd->current_ro;
98 	dbuf->db_data_size = buflen;
99 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
100 	(void) stmf_xfer_data(task, dbuf, 0);
101 	scmd->len -= buflen;
102 	scmd->current_ro += buflen;
103 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
104 		uint32_t maxsize, minsize, old_minsize;
105 
106 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
107 		minsize = maxsize >> 2;
108 		do {
109 			/*
110 			 * A bad port implementation can keep on failing the
111 			 * the request but keep on sending us a false
112 			 * minsize.
113 			 */
114 			old_minsize = minsize;
115 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
116 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
117 		    (minsize >= 512));
118 		if (dbuf == NULL) {
119 			return;
120 		}
121 		scmd->nbufs++;
122 		sbd_do_read_xfer(task, scmd, dbuf);
123 	}
124 }
125 
126 void
127 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
128 				struct stmf_data_buf *dbuf)
129 {
130 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
131 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
132 		    dbuf->db_xfer_status, NULL);
133 		return;
134 	}
135 	task->task_nbytes_transferred += dbuf->db_data_size;
136 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
137 		stmf_free_dbuf(task, dbuf);
138 		scmd->nbufs--;
139 		if (scmd->nbufs)
140 			return;	/* wait for all buffers to complete */
141 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
142 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
143 			stmf_scsilib_send_status(task, STATUS_CHECK,
144 			    STMF_SAA_READ_ERROR);
145 		else
146 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
147 		return;
148 	}
149 	if (dbuf->db_flags & DB_DONT_REUSE) {
150 		/* allocate new dbuf */
151 		uint32_t maxsize, minsize, old_minsize;
152 		stmf_free_dbuf(task, dbuf);
153 
154 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
155 		minsize = maxsize >> 2;
156 		do {
157 			old_minsize = minsize;
158 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
159 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
160 		    (minsize >= 512));
161 		if (dbuf == NULL) {
162 			scmd->nbufs --;
163 			if (scmd->nbufs == 0) {
164 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
165 				    STMF_ALLOC_FAILURE, NULL);
166 			}
167 			return;
168 		}
169 	}
170 	sbd_do_read_xfer(task, scmd, dbuf);
171 }
172 
173 void
174 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
175 {
176 	uint64_t lba, laddr;
177 	uint32_t len;
178 	uint8_t op = task->task_cdb[0];
179 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
180 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
181 	sbd_cmd_t *scmd;
182 	stmf_data_buf_t *dbuf;
183 	int fast_path;
184 
185 	if (op == SCMD_READ) {
186 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
187 		len = (uint32_t)task->task_cdb[4];
188 
189 		if (len == 0) {
190 			len = 256;
191 		}
192 	} else if (op == SCMD_READ_G1) {
193 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
194 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
195 	} else if (op == SCMD_READ_G5) {
196 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
197 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
198 	} else if (op == SCMD_READ_G4) {
199 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
200 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
201 	} else {
202 		stmf_scsilib_send_status(task, STATUS_CHECK,
203 		    STMF_SAA_INVALID_OPCODE);
204 		return;
205 	}
206 
207 	laddr = lba << slu->sl_shift_count;
208 	len <<= slu->sl_shift_count;
209 
210 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
211 		stmf_scsilib_send_status(task, STATUS_CHECK,
212 		    STMF_SAA_LBA_OUT_OF_RANGE);
213 		return;
214 	}
215 
216 	task->task_cmd_xfer_length = len;
217 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
218 		task->task_expected_xfer_length = len;
219 	}
220 
221 	if (len != task->task_expected_xfer_length) {
222 		fast_path = 0;
223 		len = (len > task->task_expected_xfer_length) ?
224 		    task->task_expected_xfer_length : len;
225 	} else {
226 		fast_path = 1;
227 	}
228 
229 	if (len == 0) {
230 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
231 		return;
232 	}
233 
234 	if (initial_dbuf == NULL) {
235 		uint32_t maxsize, minsize, old_minsize;
236 
237 		maxsize = (len > (128*1024)) ? 128*1024 : len;
238 		minsize = maxsize >> 2;
239 		do {
240 			old_minsize = minsize;
241 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
242 			    &minsize, 0);
243 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
244 		    (minsize >= 512));
245 		if (initial_dbuf == NULL) {
246 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
247 			return;
248 		}
249 	}
250 	dbuf = initial_dbuf;
251 
252 	if ((dbuf->db_buf_size >= len) && fast_path &&
253 	    (dbuf->db_sglist_length == 1)) {
254 		if (sst->sst_data_read(sst,
255 		    laddr + slu->sl_sli->sli_lu_data_offset, (uint64_t)len,
256 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
257 			dbuf->db_relative_offset = 0;
258 			dbuf->db_data_size = len;
259 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
260 			    DB_DIRECTION_TO_RPORT;
261 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
262 		} else {
263 			stmf_scsilib_send_status(task, STATUS_CHECK,
264 			    STMF_SAA_READ_ERROR);
265 		}
266 		return;
267 	}
268 
269 	if (task->task_lu_private) {
270 		scmd = (sbd_cmd_t *)task->task_lu_private;
271 	} else {
272 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
273 		task->task_lu_private = scmd;
274 	}
275 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
276 	scmd->cmd_type = SBD_CMD_SCSI_READ;
277 	scmd->nbufs = 1;
278 	scmd->addr = laddr;
279 	scmd->len = len;
280 	scmd->current_ro = 0;
281 
282 	sbd_do_read_xfer(task, scmd, dbuf);
283 }
284 
285 void
286 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
287 					struct stmf_data_buf *dbuf)
288 {
289 	uint32_t len;
290 	int bufs_to_take;
291 
292 	/* Lets try not to hog all the buffers the port has. */
293 	bufs_to_take = ((task->task_max_nbufs > 2) &&
294 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
295 	    task->task_max_nbufs;
296 
297 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
298 
299 	dbuf->db_relative_offset = scmd->current_ro;
300 	dbuf->db_data_size = len;
301 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
302 	(void) stmf_xfer_data(task, dbuf, 0);
303 	scmd->len -= len;
304 	scmd->current_ro += len;
305 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
306 		uint32_t maxsize, minsize, old_minsize;
307 
308 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
309 		minsize = maxsize >> 2;
310 		do {
311 			old_minsize = minsize;
312 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
313 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
314 		    (minsize >= 512));
315 		if (dbuf == NULL) {
316 			return;
317 		}
318 		scmd->nbufs++;
319 		sbd_do_write_xfer(task, scmd, dbuf);
320 	}
321 }
322 
323 void
324 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
325     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
326 {
327 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
328 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
329 	uint64_t laddr;
330 	uint32_t buflen, iolen;
331 	int ndx;
332 
333 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
334 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
335 		    dbuf->db_xfer_status, NULL);
336 		return;
337 	}
338 
339 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
340 		goto WRITE_XFER_DONE;
341 	}
342 
343 	laddr = scmd->addr + dbuf->db_relative_offset +
344 	    slu->sl_sli->sli_lu_data_offset;
345 
346 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
347 	    (ndx < dbuf->db_sglist_length); ndx++) {
348 		iolen = min(dbuf->db_data_size - buflen,
349 		    dbuf->db_sglist[ndx].seg_length);
350 		if (iolen == 0)
351 			break;
352 		if (sst->sst_data_write(sst, laddr, (uint64_t)iolen,
353 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
354 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
355 			break;
356 		}
357 		buflen += iolen;
358 		laddr += (uint64_t)iolen;
359 	}
360 	task->task_nbytes_transferred += buflen;
361 WRITE_XFER_DONE:
362 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
363 		stmf_free_dbuf(task, dbuf);
364 		scmd->nbufs--;
365 		if (scmd->nbufs)
366 			return;	/* wait for all buffers to complete */
367 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
368 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
369 			stmf_scsilib_send_status(task, STATUS_CHECK,
370 			    STMF_SAA_WRITE_ERROR);
371 		else
372 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
373 		return;
374 	}
375 	if (dbuf->db_flags & DB_DONT_REUSE || dbuf_reusable == 0) {
376 		uint32_t maxsize, minsize, old_minsize;
377 		/* free current dbuf and allocate a new one */
378 		stmf_free_dbuf(task, dbuf);
379 
380 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
381 		minsize = maxsize >> 2;
382 		do {
383 			old_minsize = minsize;
384 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
385 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
386 		    (minsize >= 512));
387 		if (dbuf == NULL) {
388 			scmd->nbufs --;
389 			if (scmd->nbufs == 0) {
390 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
391 				    STMF_ALLOC_FAILURE, NULL);
392 			}
393 			return;
394 		}
395 	}
396 	sbd_do_write_xfer(task, scmd, dbuf);
397 }
398 
399 void
400 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
401 {
402 	uint64_t lba, laddr;
403 	uint32_t len;
404 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
405 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
406 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
407 	sbd_cmd_t *scmd;
408 	stmf_data_buf_t *dbuf;
409 
410 	if (op == SCMD_WRITE) {
411 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
412 		len = (uint32_t)task->task_cdb[4];
413 
414 		if (len == 0) {
415 			len = 256;
416 		}
417 	} else if (op == SCMD_WRITE_G1) {
418 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
419 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
420 	} else if (op == SCMD_WRITE_G5) {
421 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
422 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
423 	} else if (op == SCMD_WRITE_G4) {
424 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
425 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
426 	} else {
427 		stmf_scsilib_send_status(task, STATUS_CHECK,
428 		    STMF_SAA_INVALID_OPCODE);
429 		return;
430 	}
431 
432 	laddr = lba << slu->sl_shift_count;
433 	len <<= slu->sl_shift_count;
434 
435 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
436 		stmf_scsilib_send_status(task, STATUS_CHECK,
437 		    STMF_SAA_LBA_OUT_OF_RANGE);
438 		return;
439 	}
440 
441 	task->task_cmd_xfer_length = len;
442 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
443 		task->task_expected_xfer_length = len;
444 	}
445 
446 	len = (len > task->task_expected_xfer_length) ?
447 	    task->task_expected_xfer_length : len;
448 
449 	if (len == 0) {
450 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
451 		return;
452 	}
453 
454 	if (initial_dbuf == NULL) {
455 		uint32_t maxsize, minsize, old_minsize;
456 
457 		maxsize = (len > (128*1024)) ? 128*1024 : len;
458 		minsize = maxsize >> 2;
459 		do {
460 			old_minsize = minsize;
461 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
462 			    &minsize, 0);
463 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
464 		    (minsize >= 512));
465 		if (initial_dbuf == NULL) {
466 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
467 			    STMF_ALLOC_FAILURE, NULL);
468 			return;
469 		}
470 	} else if (task->task_flags & TF_INITIAL_BURST) {
471 		if (initial_dbuf->db_data_size > len) {
472 			if (initial_dbuf->db_data_size >
473 			    task->task_expected_xfer_length) {
474 				/* protocol error */
475 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
476 				    STMF_INVALID_ARG, NULL);
477 				return;
478 			}
479 			initial_dbuf->db_data_size = len;
480 		}
481 		do_immediate_data = 1;
482 	}
483 	dbuf = initial_dbuf;
484 
485 	if (task->task_lu_private) {
486 		scmd = (sbd_cmd_t *)task->task_lu_private;
487 	} else {
488 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
489 		task->task_lu_private = scmd;
490 	}
491 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
492 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
493 	scmd->nbufs = 1;
494 	scmd->addr = laddr;
495 	scmd->len = len;
496 	scmd->current_ro = 0;
497 
498 	if (do_immediate_data) {
499 		scmd->len -= dbuf->db_data_size;
500 		scmd->current_ro += dbuf->db_data_size;
501 		dbuf->db_xfer_status = STMF_SUCCESS;
502 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
503 	} else {
504 		sbd_do_write_xfer(task, scmd, dbuf);
505 	}
506 }
507 
508 /*
509  * Utility routine to handle small non performance data transfers to the
510  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
511  * buffer which is source of data for transfer, cdb_xfer_size is the
512  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
513  * which this command would transfer (the size of data pointed to by 'p').
514  */
515 void
516 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
517     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
518 {
519 	uint32_t bufsize, ndx;
520 	sbd_cmd_t *scmd;
521 
522 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
523 
524 	task->task_cmd_xfer_length = cmd_xfer_size;
525 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
526 		task->task_expected_xfer_length = cmd_xfer_size;
527 	} else {
528 		cmd_xfer_size = min(cmd_xfer_size,
529 		    task->task_expected_xfer_length);
530 	}
531 
532 	if (cmd_xfer_size == 0) {
533 		stmf_scsilib_send_status(task, STATUS_CHECK,
534 		    STMF_SAA_INVALID_FIELD_IN_CDB);
535 		return;
536 	}
537 	if (dbuf == NULL) {
538 		uint32_t minsize = cmd_xfer_size;
539 
540 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
541 	}
542 	if (dbuf == NULL) {
543 		stmf_scsilib_send_status(task, STATUS_QFULL, 0);
544 		return;
545 	}
546 
547 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
548 		uint8_t *d;
549 		uint32_t s;
550 
551 		d = dbuf->db_sglist[ndx].seg_addr;
552 		s = min((cmd_xfer_size - bufsize),
553 		    dbuf->db_sglist[ndx].seg_length);
554 		bcopy(p+bufsize, d, s);
555 		bufsize += s;
556 	}
557 	dbuf->db_relative_offset = 0;
558 	dbuf->db_data_size = cmd_xfer_size;
559 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
560 
561 	if (task->task_lu_private == NULL) {
562 		task->task_lu_private =
563 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
564 	}
565 	scmd = (sbd_cmd_t *)task->task_lu_private;
566 
567 	scmd->cmd_type = SBD_CMD_SMALL_READ;
568 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
569 	(void) stmf_xfer_data(task, dbuf, 0);
570 }
571 
572 void
573 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
574 				struct stmf_data_buf *dbuf)
575 {
576 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
577 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
578 		    dbuf->db_xfer_status, NULL);
579 		return;
580 	}
581 	task->task_nbytes_transferred = dbuf->db_data_size;
582 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
583 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
584 }
585 
586 void
587 sbd_handle_read_capacity(struct scsi_task *task,
588     struct stmf_data_buf *initial_dbuf)
589 {
590 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
591 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
592 	sbd_lu_info_t *sli = slu->sl_sli;
593 	uint32_t cdb_len;
594 	uint8_t p[32];
595 	uint64_t s;
596 
597 	s = sli->sli_lu_data_size >> slu->sl_shift_count;
598 	s--;
599 	switch (task->task_cdb[0]) {
600 	case SCMD_READ_CAPACITY:
601 		if (s & 0xffffffff00000000ull) {
602 			p[0] = p[1] = p[2] = p[3] = 0xFF;
603 		} else {
604 			p[0] = (s >> 24) & 0xff;
605 			p[1] = (s >> 16) & 0xff;
606 			p[2] = (s >> 8) & 0xff;
607 			p[3] = s & 0xff;
608 		}
609 		p[4] = 0; p[5] = 0;
610 		p[6] = (sli->sli_blocksize >> 8) & 0xff;
611 		p[7] = sli->sli_blocksize & 0xff;
612 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
613 		return;
614 
615 	case SCMD_SVC_ACTION_IN_G4:
616 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
617 		bzero(p, 32);
618 		p[0] = (s >> 56) & 0xff;
619 		p[1] = (s >> 48) & 0xff;
620 		p[2] = (s >> 40) & 0xff;
621 		p[3] = (s >> 32) & 0xff;
622 		p[4] = (s >> 24) & 0xff;
623 		p[5] = (s >> 16) & 0xff;
624 		p[6] = (s >> 8) & 0xff;
625 		p[7] = s & 0xff;
626 		p[10] = (sli->sli_blocksize >> 8) & 0xff;
627 		p[11] = sli->sli_blocksize & 0xff;
628 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
629 		    cdb_len, 32);
630 		return;
631 	}
632 }
633 
634 static uint8_t sbd_p3[] =
635 	{3, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 2, 0, 0, 0,
636 	    0, 0, 0, 0, 0x80, 0, 0, 0};
637 static uint8_t sbd_p4[] =
638 	{4, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
639 	    0, 0, 0, 0, 0x15, 0x18, 0, 0};
640 static uint8_t sbd_pa[] = {0xa, 0xa, 0, 0x10, 0, 0, 0, 0, 0, 0, 0, 0};
641 static uint8_t sbd_bd[] = {0, 0, 0, 0, 0, 0, 0x02, 0};
642 
643 void
644 sbd_handle_mode_sense(struct scsi_task *task,
645     struct stmf_data_buf *initial_dbuf)
646 {
647 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
648 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
649 	sbd_lu_info_t *sli = slu->sl_sli;
650 	uint32_t cmd_size, hdrsize, xfer_size, ncyl;
651 	uint8_t payload_buf[8 + 8 + 24 + 24 + 12];
652 	uint8_t *payload, *p;
653 	uint8_t ctrl, page;
654 	uint16_t ps;
655 	uint64_t s = sli->sli_lu_data_size;
656 	uint8_t dbd;
657 
658 	p = &task->task_cdb[0];
659 	page = p[2] & 0x3F;
660 	ctrl = (p[2] >> 6) & 3;
661 	dbd = p[1] & 0x08;
662 
663 	hdrsize = (p[0] == SCMD_MODE_SENSE) ? 4 : 8;
664 
665 	cmd_size = (p[0] == SCMD_MODE_SENSE) ? p[4] :
666 	    READ_SCSI16(&p[7], uint32_t);
667 
668 	switch (page) {
669 	case 0x03:
670 		ps = hdrsize + sizeof (sbd_p3);
671 		break;
672 	case 0x04:
673 		ps = hdrsize + sizeof (sbd_p4);
674 		break;
675 	case 0x0A:
676 		ps = hdrsize + sizeof (sbd_pa);
677 		break;
678 	case MODEPAGE_ALLPAGES:
679 		ps = hdrsize + sizeof (sbd_p3) + sizeof (sbd_p4)
680 		    + sizeof (sbd_pa);
681 
682 		/*
683 		 * If the buffer is big enough, include the block
684 		 * descriptor; otherwise, leave it out.
685 		 */
686 		if (cmd_size < ps) {
687 			dbd = 1;
688 		}
689 
690 		if (dbd == 0) {
691 			ps += 8;
692 		}
693 
694 		break;
695 	default:
696 		stmf_scsilib_send_status(task, STATUS_CHECK,
697 		    STMF_SAA_INVALID_FIELD_IN_CDB);
698 		return;
699 	}
700 
701 	xfer_size = min(cmd_size, ps);
702 
703 	if ((xfer_size < hdrsize) || (ctrl == 1) ||
704 	    (((task->task_additional_flags &
705 	    TASK_AF_NO_EXPECTED_XFER_LENGTH) == 0) &&
706 	    (xfer_size > task->task_expected_xfer_length))) {
707 		stmf_scsilib_send_status(task, STATUS_CHECK,
708 		    STMF_SAA_INVALID_FIELD_IN_CDB);
709 		return;
710 	}
711 
712 	bzero(payload_buf, xfer_size);
713 
714 	if (p[0] == SCMD_MODE_SENSE) {
715 		payload_buf[0] = ps - 1;
716 	} else {
717 		ps -= 2;
718 		*((uint16_t *)payload_buf) = BE_16(ps);
719 	}
720 
721 	payload = payload_buf + hdrsize;
722 
723 	switch (page) {
724 	case 0x03:
725 		bcopy(sbd_p3, payload, sizeof (sbd_p3));
726 		break;
727 
728 	case 0x0A:
729 		bcopy(sbd_pa, payload, sizeof (sbd_pa));
730 		break;
731 
732 	case MODEPAGE_ALLPAGES:
733 		if (dbd == 0) {
734 			payload_buf[3] = sizeof (sbd_bd);
735 			bcopy(sbd_bd, payload, sizeof (sbd_bd));
736 			payload += sizeof (sbd_bd);
737 		}
738 
739 		bcopy(sbd_p3, payload, sizeof (sbd_p3));
740 		payload += sizeof (sbd_p3);
741 		bcopy(sbd_pa, payload, sizeof (sbd_pa));
742 		payload += sizeof (sbd_pa);
743 		/* FALLTHROUGH */
744 
745 	case 0x04:
746 		bcopy(sbd_p4, payload, sizeof (sbd_p4));
747 
748 		if (s > 1024 * 1024 * 1024) {
749 			payload[5] = 16;
750 		} else {
751 			payload[5] = 2;
752 		}
753 		ncyl = (uint32_t)((s/(((uint64_t)payload[5]) * 32 * 512)) + 1);
754 		payload[4] = (uchar_t)ncyl;
755 		payload[3] = (uchar_t)(ncyl >> 8);
756 		payload[2] = (uchar_t)(ncyl >> 16);
757 		break;
758 
759 	}
760 
761 	sbd_handle_short_read_transfers(task, initial_dbuf, payload_buf,
762 	    cmd_size, xfer_size);
763 }
764 
765 
766 void
767 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf,
768 			uint8_t *p, int bsize)
769 {
770 	uint8_t		*cdbp = (uint8_t *)&task->task_cdb[0];
771 	uint32_t	 cmd_size;
772 	uint8_t		 page_length;
773 
774 	/*
775 	 * Basic protocol checks.
776 	 */
777 
778 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
779 		stmf_scsilib_send_status(task, STATUS_CHECK,
780 		    STMF_SAA_INVALID_FIELD_IN_CDB);
781 		return;
782 	}
783 
784 	/*
785 	 * Zero byte allocation length is not an error.  Just
786 	 * return success.
787 	 */
788 
789 	cmd_size = (((uint32_t)cdbp[3]) << 8) | cdbp[4];
790 
791 	if (cmd_size == 0) {
792 		task->task_cmd_xfer_length = 0;
793 		if (task->task_additional_flags &
794 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
795 			task->task_expected_xfer_length = 0;
796 		}
797 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
798 		return;
799 	}
800 
801 	/*
802 	 * Standard inquiry
803 	 */
804 
805 	if ((cdbp[1] & 1) == 0) {
806 		struct scsi_inquiry *inq = (struct scsi_inquiry *)p;
807 
808 		page_length = 31;
809 		bzero(inq, page_length + 5);
810 
811 		inq->inq_dtype = 0;
812 		inq->inq_ansi = 5;	/* SPC-3 */
813 		inq->inq_hisup = 1;
814 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
815 		inq->inq_len = page_length;
816 
817 		inq->inq_tpgs = 1;
818 
819 		inq->inq_cmdque = 1;
820 
821 		(void) strncpy((char *)inq->inq_vid, "SUN     ", 8);
822 		(void) strncpy((char *)inq->inq_pid, "COMSTAR         ", 16);
823 		(void) strncpy((char *)inq->inq_revision, "1.0 ", 4);
824 
825 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
826 		    min(cmd_size, page_length + 5));
827 
828 		return;
829 	}
830 
831 	/*
832 	 * EVPD handling
833 	 */
834 
835 	switch (cdbp[2]) {
836 	case 0x00:
837 		page_length = 3;
838 
839 		bzero(p, page_length + 4);
840 
841 		p[0] = 0;
842 		p[3] = page_length;	/* we support 3 pages, 0, 0x83, 0x86 */
843 		p[5] = 0x83;
844 		p[6] = 0x86;
845 
846 		break;
847 
848 	case 0x83:
849 
850 		page_length = stmf_scsilib_prepare_vpd_page83(task, p,
851 		    bsize, 0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
852 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID) - 4;
853 		break;
854 
855 	case 0x86:
856 		page_length = 0x3c;
857 
858 		bzero(p, page_length + 4);
859 
860 		p[0] = 0;
861 		p[1] = 0x86;		/* Page 86 response */
862 		p[3] = page_length;
863 
864 		/*
865 		 * Bits 0, 1, and 2 will need to be updated
866 		 * to reflect the queue tag handling if/when
867 		 * that is implemented.  For now, we're going
868 		 * to claim support only for Simple TA.
869 		 */
870 		p[5] = 1;
871 
872 		break;
873 
874 	default:
875 		stmf_scsilib_send_status(task, STATUS_CHECK,
876 		    STMF_SAA_INVALID_FIELD_IN_CDB);
877 		return;
878 	}
879 
880 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
881 	    min(cmd_size, page_length + 4));
882 }
883 
884 stmf_status_t
885 sbd_task_alloc(struct scsi_task *task)
886 {
887 	if ((task->task_lu_private =
888 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
889 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
890 		scmd->flags = 0;
891 		return (STMF_SUCCESS);
892 	}
893 	return (STMF_ALLOC_FAILURE);
894 }
895 
896 void
897 sbd_remove_it_handle(sbd_lu_t *slu, sbd_it_data_t *it)
898 {
899 	sbd_it_data_t **ppit;
900 
901 	mutex_enter(&slu->sl_it_list_lock);
902 	for (ppit = &slu->sl_it_list; *ppit != NULL;
903 	    ppit = &((*ppit)->sbd_it_next)) {
904 		if ((*ppit) == it) {
905 			*ppit = it->sbd_it_next;
906 			break;
907 		}
908 	}
909 	mutex_exit(&slu->sl_it_list_lock);
910 
911 	DTRACE_PROBE2(itl__nexus__end, stmf_lu_t *, slu->sl_lu,
912 	    sbd_it_data_t *, it);
913 
914 	kmem_free(it, sizeof (*it));
915 }
916 
917 void
918 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *slu, sbd_it_data_t *it)
919 {
920 	mutex_enter(&slu->sl_it_list_lock);
921 	if ((slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) == 0) {
922 		/* If we dont have any reservations, just get out. */
923 		mutex_exit(&slu->sl_it_list_lock);
924 		return;
925 	}
926 
927 	if (it == NULL) {
928 		/* Find the I_T nexus which is holding the reservation. */
929 		for (it = slu->sl_it_list; it != NULL; it = it->sbd_it_next) {
930 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
931 				ASSERT(it->sbd_it_session_id ==
932 				    slu->sl_rs_owner_session_id);
933 				break;
934 			}
935 		}
936 		ASSERT(it != NULL);
937 	} else {
938 		/*
939 		 * We were passed an I_T nexus. If this nexus does not hold
940 		 * the reservation, do nothing. This is why this function is
941 		 * called "check_and_clear".
942 		 */
943 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
944 			mutex_exit(&slu->sl_it_list_lock);
945 			return;
946 		}
947 	}
948 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
949 	slu->sl_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
950 	mutex_exit(&slu->sl_it_list_lock);
951 }
952 
953 /*
954  * returns non-zero, if this command can be allowed to run even if the
955  * lu has been reserved by another initiator.
956  */
957 int
958 sbd_reserve_allow(scsi_task_t *task)
959 {
960 	uint8_t cdb0 = task->task_cdb[0];
961 	uint8_t cdb1 = task->task_cdb[1];
962 
963 	if ((cdb0 == SCMD_INQUIRY) || (cdb0 == SCMD_READ_CAPACITY) ||
964 	    ((cdb0 == SCMD_SVC_ACTION_IN_G4) &&
965 	    (cdb1 == SSVC_ACTION_READ_CAPACITY_G4))) {
966 		return (1);
967 	}
968 	return (0);
969 }
970 
971 void
972 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
973 {
974 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
975 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
976 	sbd_it_data_t *it;
977 	uint8_t cdb0, cdb1;
978 
979 	if ((it = task->task_lu_itl_handle) == NULL) {
980 		mutex_enter(&slu->sl_it_list_lock);
981 		for (it = slu->sl_it_list; it != NULL; it = it->sbd_it_next) {
982 			if (it->sbd_it_session_id ==
983 			    task->task_session->ss_session_id) {
984 				mutex_exit(&slu->sl_it_list_lock);
985 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
986 				return;
987 			}
988 		}
989 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
990 		if (it == NULL) {
991 			mutex_exit(&slu->sl_it_list_lock);
992 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
993 			return;
994 		}
995 		it->sbd_it_session_id = task->task_session->ss_session_id;
996 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
997 		it->sbd_it_next = slu->sl_it_list;
998 		slu->sl_it_list = it;
999 		mutex_exit(&slu->sl_it_list_lock);
1000 
1001 		DTRACE_PROBE1(itl__nexus__start, scsi_task *, task);
1002 
1003 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
1004 		    task->task_session, it->sbd_it_session_id, it)
1005 		    != STMF_SUCCESS) {
1006 			sbd_remove_it_handle(slu, it);
1007 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1008 			return;
1009 		}
1010 		task->task_lu_itl_handle = it;
1011 		it->sbd_it_ua_conditions = SBD_UA_POR;
1012 	}
1013 
1014 	if (task->task_mgmt_function) {
1015 		stmf_scsilib_handle_task_mgmt(task);
1016 		return;
1017 	}
1018 
1019 	if ((slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) &&
1020 	    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
1021 		if (!sbd_reserve_allow(task)) {
1022 			stmf_scsilib_send_status(task,
1023 			    STATUS_RESERVATION_CONFLICT, 0);
1024 			return;
1025 		}
1026 	}
1027 
1028 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1029 		uint32_t saa = 0;
1030 
1031 		mutex_enter(&slu->sl_it_list_lock);
1032 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1033 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1034 			saa = STMF_SAA_POR;
1035 		} else if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1036 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1037 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1038 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1039 			    (task->task_cdb[1] ==
1040 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1041 				saa = 0;
1042 			} else {
1043 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1044 			}
1045 		} else {
1046 			it->sbd_it_ua_conditions = 0;
1047 			saa = 0;
1048 		}
1049 		mutex_exit(&slu->sl_it_list_lock);
1050 		if (saa) {
1051 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1052 			return;
1053 		}
1054 	}
1055 
1056 
1057 	cdb0 = task->task_cdb[0] & 0x1F;
1058 
1059 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1060 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1061 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1062 			return;
1063 		}
1064 		if (cdb0 == SCMD_READ) {
1065 			sbd_handle_read(task, initial_dbuf);
1066 			return;
1067 		}
1068 		sbd_handle_write(task, initial_dbuf);
1069 		return;
1070 	}
1071 
1072 	cdb0 = task->task_cdb[0];
1073 	cdb1 = task->task_cdb[1];
1074 
1075 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1076 		task->task_cmd_xfer_length = 0;
1077 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1078 		return;
1079 	}
1080 
1081 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1082 		sbd_handle_read_capacity(task, initial_dbuf);
1083 		return;
1084 	}
1085 
1086 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1087 		uint8_t *p;
1088 
1089 		p = (uint8_t *)kmem_zalloc(512, KM_SLEEP);
1090 		sbd_handle_inquiry(task, initial_dbuf, p, 512);
1091 		kmem_free(p, 512);
1092 		return;
1093 	}
1094 
1095 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1096 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1097 			sbd_handle_read_capacity(task, initial_dbuf);
1098 			return;
1099 		/*
1100 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1101 		 * 	sbd_handle_read(task, initial_dbuf);
1102 		 * 	return;
1103 		 */
1104 		}
1105 	}
1106 
1107 	/*
1108 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1109 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1110 	 *		 sbd_handle_write(task, initial_dbuf);
1111 	 * 		return;
1112 	 *	}
1113 	 * }
1114 	 */
1115 
1116 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1117 		/* XXX Implement power management */
1118 		task->task_cmd_xfer_length = 0;
1119 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1120 		return;
1121 	}
1122 #if 0
1123 	/* XXX Remove #if 0 above */
1124 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1125 		sbd_handle_mode_select(task, initial_dbuf);
1126 		return;
1127 	}
1128 #endif
1129 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1130 		sbd_handle_mode_sense(task, initial_dbuf);
1131 		return;
1132 	}
1133 
1134 	if (cdb0 == SCMD_REQUEST_SENSE) {
1135 		/*
1136 		 * LU provider needs to store unretrieved sense data
1137 		 * (e.g. after power-on/reset).  For now, we'll just
1138 		 * return good status with no sense.
1139 		 */
1140 
1141 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1142 		    task->task_cdb[5]) {
1143 			stmf_scsilib_send_status(task, STATUS_CHECK,
1144 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1145 		} else {
1146 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1147 		}
1148 
1149 		return;
1150 	}
1151 
1152 	if (cdb0 == SCMD_VERIFY) {
1153 		/*
1154 		 * Something more likely needs to be done here.
1155 		 */
1156 		task->task_cmd_xfer_length = 0;
1157 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1158 		return;
1159 	}
1160 
1161 	if ((cdb0 == SCMD_RESERVE) || (cdb0 == SCMD_RELEASE)) {
1162 		if (cdb1) {
1163 			stmf_scsilib_send_status(task, STATUS_CHECK,
1164 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1165 			return;
1166 		}
1167 		mutex_enter(&slu->sl_it_list_lock);
1168 		if (slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) {
1169 			if (it->sbd_it_session_id !=
1170 			    slu->sl_rs_owner_session_id) {
1171 				/*
1172 				 * This can only happen if things were in
1173 				 * flux.
1174 				 */
1175 				mutex_exit(&slu->sl_it_list_lock);
1176 				stmf_scsilib_send_status(task,
1177 				    STATUS_RESERVATION_CONFLICT, 0);
1178 				return;
1179 			}
1180 		}
1181 	}
1182 
1183 	if (cdb0 == SCMD_RELEASE) {
1184 		slu->sl_flags &= ~SBD_LU_HAS_SCSI2_RESERVATION;
1185 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1186 		mutex_exit(&slu->sl_it_list_lock);
1187 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1188 		return;
1189 	}
1190 	if (cdb0 == SCMD_RESERVE) {
1191 		slu->sl_flags |= SBD_LU_HAS_SCSI2_RESERVATION;
1192 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1193 		slu->sl_rs_owner_session_id = it->sbd_it_session_id;
1194 		mutex_exit(&slu->sl_it_list_lock);
1195 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1196 		return;
1197 	}
1198 
1199 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1200 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1201 		sbd_handle_sync_cache(task, initial_dbuf);
1202 		return;
1203 	}
1204 
1205 	/* Report Target Port Groups */
1206 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1207 	    ((cdb1 & 0x1F) == 0x0A)) {
1208 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1209 		return;
1210 	}
1211 
1212 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1213 }
1214 
1215 void
1216 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1217 {
1218 	sbd_cmd_t *scmd = NULL;
1219 
1220 	scmd = (sbd_cmd_t *)task->task_lu_private;
1221 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1222 		return;
1223 
1224 	if (scmd->cmd_type == SBD_CMD_SCSI_READ) {
1225 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1226 	} else if (scmd->cmd_type == SBD_CMD_SCSI_WRITE) {
1227 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1228 	} else if (scmd->cmd_type == SBD_CMD_SMALL_READ) {
1229 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1230 	} else {
1231 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1232 	}
1233 }
1234 
1235 /* ARGSUSED */
1236 void
1237 sbd_send_status_done(struct scsi_task *task)
1238 {
1239 	cmn_err(CE_PANIC,
1240 	    "sbd_send_status_done: this should not have been called");
1241 }
1242 
1243 void
1244 sbd_task_free(struct scsi_task *task)
1245 {
1246 	if (task->task_lu_private) {
1247 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1248 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1249 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1250 			    (void *)task);
1251 		}
1252 		kmem_free(scmd, sizeof (sbd_cmd_t));
1253 	}
1254 }
1255 
1256 /*
1257  * Aborts are synchronus w.r.t. I/O AND
1258  * All the I/O which SBD does is synchronous AND
1259  * Everything within a task is single threaded.
1260  *   IT MEANS
1261  * If this function is called, we are doing nothing with this task
1262  * inside of sbd module.
1263  */
1264 /* ARGSUSED */
1265 stmf_status_t
1266 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1267 {
1268 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1269 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1270 	scsi_task_t *task;
1271 
1272 	if (abort_cmd == STMF_LU_RESET_STATE) {
1273 		return (sbd_lu_reset_state(lu));
1274 	}
1275 
1276 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1277 		sbd_check_and_clear_scsi2_reservation(slu,
1278 		    (sbd_it_data_t *)arg);
1279 		sbd_remove_it_handle(slu, (sbd_it_data_t *)arg);
1280 		return (STMF_SUCCESS);
1281 	}
1282 
1283 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1284 	task = (scsi_task_t *)arg;
1285 	if (task->task_lu_private) {
1286 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1287 
1288 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1289 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1290 			return (STMF_ABORT_SUCCESS);
1291 		}
1292 	}
1293 
1294 	return (STMF_NOT_FOUND);
1295 }
1296 
1297 /* ARGSUSED */
1298 void
1299 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1300 {
1301 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1302 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1303 	stmf_change_status_t st;
1304 
1305 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1306 	    (cmd == STMF_CMD_LU_OFFLINE) ||
1307 	    (cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1308 	    (cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1309 
1310 	st.st_completion_status = STMF_SUCCESS;
1311 	st.st_additional_info = NULL;
1312 
1313 	switch (cmd) {
1314 	case STMF_CMD_LU_ONLINE:
1315 		if (slu->sl_state == STMF_STATE_ONLINE)
1316 			st.st_completion_status = STMF_ALREADY;
1317 		else if (slu->sl_state != STMF_STATE_OFFLINE)
1318 			st.st_completion_status = STMF_FAILURE;
1319 		if (st.st_completion_status == STMF_SUCCESS) {
1320 			slu->sl_state = STMF_STATE_ONLINING;
1321 			slu->sl_state_not_acked = 1;
1322 			st.st_completion_status = sst->sst_online(sst);
1323 			if (st.st_completion_status != STMF_SUCCESS) {
1324 				slu->sl_state = STMF_STATE_OFFLINE;
1325 				slu->sl_state_not_acked = 0;
1326 			} else {
1327 				slu->sl_state = STMF_STATE_ONLINE;
1328 			}
1329 		}
1330 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1331 		break;
1332 
1333 	case STMF_CMD_LU_OFFLINE:
1334 		if (slu->sl_state == STMF_STATE_OFFLINE)
1335 			st.st_completion_status = STMF_ALREADY;
1336 		else if (slu->sl_state != STMF_STATE_ONLINE)
1337 			st.st_completion_status = STMF_FAILURE;
1338 		if (st.st_completion_status == STMF_SUCCESS) {
1339 			slu->sl_state = STMF_STATE_OFFLINING;
1340 			slu->sl_state_not_acked = 1;
1341 			st.st_completion_status = sst->sst_offline(sst);
1342 			if (st.st_completion_status != STMF_SUCCESS) {
1343 				slu->sl_state = STMF_STATE_ONLINE;
1344 				slu->sl_state_not_acked = 0;
1345 			} else {
1346 				slu->sl_state = STMF_STATE_OFFLINE;
1347 			}
1348 		}
1349 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
1350 		break;
1351 
1352 	case STMF_ACK_LU_ONLINE_COMPLETE:
1353 		/* Fallthrough */
1354 	case STMF_ACK_LU_OFFLINE_COMPLETE:
1355 		slu->sl_state_not_acked = 0;
1356 		break;
1357 
1358 	}
1359 }
1360 
1361 /* ARGSUSED */
1362 stmf_status_t
1363 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
1364 						uint32_t *bufsizep)
1365 {
1366 	return (STMF_NOT_SUPPORTED);
1367 }
1368 
1369 stmf_status_t
1370 sbd_lu_reset_state(stmf_lu_t *lu)
1371 {
1372 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1373 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1374 
1375 	sbd_check_and_clear_scsi2_reservation(slu, NULL);
1376 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
1377 		return (STMF_FAILURE);
1378 	}
1379 	return (STMF_SUCCESS);
1380 }
1381 
1382 /* ARGSUSED */
1383 static void
1384 sbd_handle_sync_cache(struct scsi_task *task,
1385     struct stmf_data_buf *initial_dbuf)
1386 {
1387 	sbd_store_t	*sst =
1388 	    (sbd_store_t *)task->task_lu->lu_provider_private;
1389 	sbd_lu_t	*slu = (sbd_lu_t *)sst->sst_sbd_private;
1390 	uint64_t	lba, laddr;
1391 	uint32_t	len;
1392 	int		is_g4 = 0;
1393 	int		immed;
1394 
1395 	/*
1396 	 * Determine if this is a 10 or 16 byte CDB
1397 	 */
1398 
1399 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
1400 		is_g4 = 1;
1401 
1402 	/*
1403 	 * Determine other requested parameters
1404 	 *
1405 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
1406 	 * Do not support the IMMED bit.
1407 	 */
1408 
1409 	immed = (task->task_cdb[1] & 0x02);
1410 
1411 	if (immed) {
1412 		stmf_scsilib_send_status(task, STATUS_CHECK,
1413 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1414 		return;
1415 	}
1416 
1417 	/*
1418 	 * Check to be sure we're not being asked to sync an LBA
1419 	 * that is out of range.  While checking, verify reserved fields.
1420 	 */
1421 
1422 	if (is_g4) {
1423 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
1424 		    task->task_cdb[15]) {
1425 			stmf_scsilib_send_status(task, STATUS_CHECK,
1426 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1427 			return;
1428 		}
1429 
1430 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
1431 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
1432 	} else {
1433 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
1434 		    task->task_cdb[9]) {
1435 			stmf_scsilib_send_status(task, STATUS_CHECK,
1436 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1437 			return;
1438 		}
1439 
1440 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
1441 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
1442 	}
1443 
1444 	laddr = lba << slu->sl_shift_count;
1445 	len <<= slu->sl_shift_count;
1446 
1447 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
1448 		stmf_scsilib_send_status(task, STATUS_CHECK,
1449 		    STMF_SAA_LBA_OUT_OF_RANGE);
1450 		return;
1451 	}
1452 
1453 	if (sst->sst_data_flush(sst) != STMF_SUCCESS) {
1454 		stmf_scsilib_send_status(task, STATUS_CHECK,
1455 		    STMF_SAA_WRITE_ERROR);
1456 		return;
1457 	}
1458 
1459 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1460 }
1461