1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 #include <sys/sdt.h>
38 #include <sys/dkio.h>
39 
40 #include <stmf.h>
41 #include <lpif.h>
42 #include <portif.h>
43 #include <stmf_ioctl.h>
44 #include <stmf_sbd.h>
45 #include <stmf_sbd_ioctl.h>
46 #include <sbd_impl.h>
47 
48 #define	SCSI2_CONFLICT_FREE_CMDS(cdb)	( \
49 	/* ----------------------- */                                      \
50 	/* Refer Both		   */                                      \
51 	/* SPC-2 (rev 20) Table 10 */                                      \
52 	/* SPC-3 (rev 23) Table 31 */                                      \
53 	/* ----------------------- */                                      \
54 	((cdb[0]) == SCMD_INQUIRY)					|| \
55 	((cdb[0]) == SCMD_LOG_SENSE_G1)					|| \
56 	((cdb[0]) == SCMD_RELEASE)					|| \
57 	((cdb[0]) == SCMD_RELEASE_G1)					|| \
58 	((cdb[0]) == SCMD_REPORT_LUNS)					|| \
59 	((cdb[0]) == SCMD_REQUEST_SENSE)				|| \
60 	/* PREVENT ALLOW MEDIUM REMOVAL with prevent == 0 */               \
61 	((((cdb[0]) == SCMD_DOORLOCK) && (((cdb[4]) & 0x3) == 0)))	|| \
62 	/* SERVICE ACTION IN with READ MEDIA SERIAL NUMBER (0x01) */       \
63 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G5) && (                          \
64 	    ((cdb[1]) & 0x1F) == 0x01))					|| \
65 	/* MAINTENANCE IN with service actions REPORT ALIASES (0x0Bh) */   \
66 	/* REPORT DEVICE IDENTIFIER (0x05)  REPORT PRIORITY (0x0Eh) */     \
67 	/* REPORT TARGET PORT GROUPS (0x0A) REPORT TIMESTAMP (0x0F) */     \
68 	(((cdb[0]) == SCMD_MAINTENANCE_IN) && (                            \
69 	    (((cdb[1]) & 0x1F) == 0x0B) ||                                 \
70 	    (((cdb[1]) & 0x1F) == 0x05) ||                                 \
71 	    (((cdb[1]) & 0x1F) == 0x0E) ||                                 \
72 	    (((cdb[1]) & 0x1F) == 0x0A) ||                                 \
73 	    (((cdb[1]) & 0x1F) == 0x0F)))				|| \
74 	/* ----------------------- */                                      \
75 	/* SBC-3 (rev 17) Table 3  */                                      \
76 	/* ----------------------- */                                      \
77 	/* READ CAPACITY(10) */                                            \
78 	((cdb[0]) == SCMD_READ_CAPACITY)				|| \
79 	/* READ CAPACITY(16) */                                            \
80 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G4) && (                          \
81 	    ((cdb[1]) & 0x1F) == 0x10))					|| \
82 	/* START STOP UNIT with START bit 0 and POWER CONDITION 0  */      \
83 	(((cdb[0]) == SCMD_START_STOP) && (                                \
84 	    (((cdb[4]) & 0xF0) == 0) && (((cdb[4]) & 0x01) == 0))))
85 /* End of SCSI2_CONFLICT_FREE_CMDS */
86 
87 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
88 static void sbd_handle_sync_cache(struct scsi_task *task,
89     struct stmf_data_buf *initial_dbuf);
90 void sbd_handle_read_xfer_completion(struct scsi_task *task,
91     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
92 void sbd_handle_short_write_xfer_completion(scsi_task_t *task,
93     stmf_data_buf_t *dbuf);
94 void sbd_handle_short_write_transfers(scsi_task_t *task,
95     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size);
96 static void sbd_handle_sync_cache(struct scsi_task *task,
97     struct stmf_data_buf *initial_dbuf);
98 void sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf,
99     uint32_t buflen);
100 void sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf);
101 
102 extern void sbd_pgr_initialize_it(scsi_task_t *);
103 extern int sbd_pgr_reservation_conflict(scsi_task_t *);
104 extern void sbd_pgr_reset(sbd_lu_t *);
105 extern void sbd_pgr_remove_it_handle(sbd_lu_t *, sbd_it_data_t *);
106 extern void sbd_handle_pgr_in_cmd(scsi_task_t *, stmf_data_buf_t *);
107 extern void sbd_handle_pgr_out_cmd(scsi_task_t *, stmf_data_buf_t *);
108 extern void sbd_handle_pgr_out_data(scsi_task_t *, stmf_data_buf_t *);
109 /*
110  * IMPORTANT NOTE:
111  * =================
112  * The whole world here is based on the assumption that everything within
113  * a scsi task executes in a single threaded manner, even the aborts.
114  * Dont ever change that. There wont be any performance gain but there
115  * will be tons of race conditions.
116  */
117 
118 void
119 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
120 					struct stmf_data_buf *dbuf)
121 {
122 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
123 	uint64_t laddr;
124 	uint32_t len, buflen, iolen;
125 	int ndx;
126 	int bufs_to_take;
127 
128 	/* Lets try not to hog all the buffers the port has. */
129 	bufs_to_take = ((task->task_max_nbufs > 2) &&
130 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
131 	    task->task_max_nbufs;
132 
133 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
134 	laddr = scmd->addr + scmd->current_ro;
135 
136 	for (buflen = 0, ndx = 0; (buflen < len) &&
137 	    (ndx < dbuf->db_sglist_length); ndx++) {
138 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
139 		if (iolen == 0)
140 			break;
141 		if (sbd_data_read(sl, laddr, (uint64_t)iolen,
142 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
143 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
144 			/* Do not need to do xfer anymore, just complete it */
145 			dbuf->db_data_size = 0;
146 			dbuf->db_xfer_status = STMF_SUCCESS;
147 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
148 			return;
149 		}
150 		buflen += iolen;
151 		laddr += (uint64_t)iolen;
152 	}
153 	dbuf->db_relative_offset = scmd->current_ro;
154 	dbuf->db_data_size = buflen;
155 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
156 	(void) stmf_xfer_data(task, dbuf, 0);
157 	scmd->len -= buflen;
158 	scmd->current_ro += buflen;
159 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
160 		uint32_t maxsize, minsize, old_minsize;
161 
162 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
163 		minsize = maxsize >> 2;
164 		do {
165 			/*
166 			 * A bad port implementation can keep on failing the
167 			 * the request but keep on sending us a false
168 			 * minsize.
169 			 */
170 			old_minsize = minsize;
171 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
172 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
173 		    (minsize >= 512));
174 		if (dbuf == NULL) {
175 			return;
176 		}
177 		scmd->nbufs++;
178 		sbd_do_read_xfer(task, scmd, dbuf);
179 	}
180 }
181 
182 void
183 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
184 				struct stmf_data_buf *dbuf)
185 {
186 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
187 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
188 		    dbuf->db_xfer_status, NULL);
189 		return;
190 	}
191 	task->task_nbytes_transferred += dbuf->db_data_size;
192 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
193 		stmf_free_dbuf(task, dbuf);
194 		scmd->nbufs--;
195 		if (scmd->nbufs)
196 			return;	/* wait for all buffers to complete */
197 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
198 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
199 			stmf_scsilib_send_status(task, STATUS_CHECK,
200 			    STMF_SAA_READ_ERROR);
201 		else
202 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
203 		return;
204 	}
205 	if (dbuf->db_flags & DB_DONT_REUSE) {
206 		/* allocate new dbuf */
207 		uint32_t maxsize, minsize, old_minsize;
208 		stmf_free_dbuf(task, dbuf);
209 
210 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
211 		minsize = maxsize >> 2;
212 		do {
213 			old_minsize = minsize;
214 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
215 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
216 		    (minsize >= 512));
217 		if (dbuf == NULL) {
218 			scmd->nbufs --;
219 			if (scmd->nbufs == 0) {
220 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
221 				    STMF_ALLOC_FAILURE, NULL);
222 			}
223 			return;
224 		}
225 	}
226 	sbd_do_read_xfer(task, scmd, dbuf);
227 }
228 
229 void
230 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
231 {
232 	uint64_t lba, laddr;
233 	uint32_t len;
234 	uint8_t op = task->task_cdb[0];
235 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
236 	sbd_cmd_t *scmd;
237 	stmf_data_buf_t *dbuf;
238 	int fast_path;
239 
240 	if (op == SCMD_READ) {
241 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
242 		len = (uint32_t)task->task_cdb[4];
243 
244 		if (len == 0) {
245 			len = 256;
246 		}
247 	} else if (op == SCMD_READ_G1) {
248 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
249 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
250 	} else if (op == SCMD_READ_G5) {
251 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
252 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
253 	} else if (op == SCMD_READ_G4) {
254 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
255 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
256 	} else {
257 		stmf_scsilib_send_status(task, STATUS_CHECK,
258 		    STMF_SAA_INVALID_OPCODE);
259 		return;
260 	}
261 
262 	laddr = lba << sl->sl_data_blocksize_shift;
263 	len <<= sl->sl_data_blocksize_shift;
264 
265 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
266 		stmf_scsilib_send_status(task, STATUS_CHECK,
267 		    STMF_SAA_LBA_OUT_OF_RANGE);
268 		return;
269 	}
270 
271 	task->task_cmd_xfer_length = len;
272 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
273 		task->task_expected_xfer_length = len;
274 	}
275 
276 	if (len != task->task_expected_xfer_length) {
277 		fast_path = 0;
278 		len = (len > task->task_expected_xfer_length) ?
279 		    task->task_expected_xfer_length : len;
280 	} else {
281 		fast_path = 1;
282 	}
283 
284 	if (len == 0) {
285 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
286 		return;
287 	}
288 
289 	if (initial_dbuf == NULL) {
290 		uint32_t maxsize, minsize, old_minsize;
291 
292 		maxsize = (len > (128*1024)) ? 128*1024 : len;
293 		minsize = maxsize >> 2;
294 		do {
295 			old_minsize = minsize;
296 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
297 			    &minsize, 0);
298 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
299 		    (minsize >= 512));
300 		if (initial_dbuf == NULL) {
301 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
302 			return;
303 		}
304 	}
305 	dbuf = initial_dbuf;
306 
307 	if ((dbuf->db_buf_size >= len) && fast_path &&
308 	    (dbuf->db_sglist_length == 1)) {
309 		if (sbd_data_read(sl, laddr, (uint64_t)len,
310 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
311 			dbuf->db_relative_offset = 0;
312 			dbuf->db_data_size = len;
313 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
314 			    DB_DIRECTION_TO_RPORT;
315 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
316 		} else {
317 			stmf_scsilib_send_status(task, STATUS_CHECK,
318 			    STMF_SAA_READ_ERROR);
319 		}
320 		return;
321 	}
322 
323 	if (task->task_lu_private) {
324 		scmd = (sbd_cmd_t *)task->task_lu_private;
325 	} else {
326 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
327 		task->task_lu_private = scmd;
328 	}
329 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
330 	scmd->cmd_type = SBD_CMD_SCSI_READ;
331 	scmd->nbufs = 1;
332 	scmd->addr = laddr;
333 	scmd->len = len;
334 	scmd->current_ro = 0;
335 
336 	sbd_do_read_xfer(task, scmd, dbuf);
337 }
338 
339 void
340 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
341 					struct stmf_data_buf *dbuf)
342 {
343 	uint32_t len;
344 	int bufs_to_take;
345 
346 	/* Lets try not to hog all the buffers the port has. */
347 	bufs_to_take = ((task->task_max_nbufs > 2) &&
348 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
349 	    task->task_max_nbufs;
350 
351 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
352 
353 	dbuf->db_relative_offset = scmd->current_ro;
354 	dbuf->db_data_size = len;
355 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
356 	(void) stmf_xfer_data(task, dbuf, 0);
357 	scmd->len -= len;
358 	scmd->current_ro += len;
359 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
360 		uint32_t maxsize, minsize, old_minsize;
361 
362 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
363 		minsize = maxsize >> 2;
364 		do {
365 			old_minsize = minsize;
366 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
367 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
368 		    (minsize >= 512));
369 		if (dbuf == NULL) {
370 			return;
371 		}
372 		scmd->nbufs++;
373 		sbd_do_write_xfer(task, scmd, dbuf);
374 	}
375 }
376 
377 void
378 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
379     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
380 {
381 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
382 	uint64_t laddr;
383 	uint32_t buflen, iolen;
384 	int ndx;
385 
386 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
387 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
388 		    dbuf->db_xfer_status, NULL);
389 		return;
390 	}
391 
392 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
393 		goto WRITE_XFER_DONE;
394 	}
395 
396 	laddr = scmd->addr + dbuf->db_relative_offset;
397 
398 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
399 	    (ndx < dbuf->db_sglist_length); ndx++) {
400 		iolen = min(dbuf->db_data_size - buflen,
401 		    dbuf->db_sglist[ndx].seg_length);
402 		if (iolen == 0)
403 			break;
404 		if (sbd_data_write(sl, laddr, (uint64_t)iolen,
405 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
406 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
407 			break;
408 		}
409 		buflen += iolen;
410 		laddr += (uint64_t)iolen;
411 	}
412 	task->task_nbytes_transferred += buflen;
413 WRITE_XFER_DONE:
414 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
415 		stmf_free_dbuf(task, dbuf);
416 		scmd->nbufs--;
417 		if (scmd->nbufs)
418 			return;	/* wait for all buffers to complete */
419 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
420 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
421 			stmf_scsilib_send_status(task, STATUS_CHECK,
422 			    STMF_SAA_WRITE_ERROR);
423 		else
424 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
425 		return;
426 	}
427 	if (dbuf->db_flags & DB_DONT_REUSE || dbuf_reusable == 0) {
428 		uint32_t maxsize, minsize, old_minsize;
429 		/* free current dbuf and allocate a new one */
430 		stmf_free_dbuf(task, dbuf);
431 
432 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
433 		minsize = maxsize >> 2;
434 		do {
435 			old_minsize = minsize;
436 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
437 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
438 		    (minsize >= 512));
439 		if (dbuf == NULL) {
440 			scmd->nbufs --;
441 			if (scmd->nbufs == 0) {
442 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
443 				    STMF_ALLOC_FAILURE, NULL);
444 			}
445 			return;
446 		}
447 	}
448 	sbd_do_write_xfer(task, scmd, dbuf);
449 }
450 
451 void
452 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
453 {
454 	uint64_t lba, laddr;
455 	uint32_t len;
456 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
457 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
458 	sbd_cmd_t *scmd;
459 	stmf_data_buf_t *dbuf;
460 
461 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
462 		stmf_scsilib_send_status(task, STATUS_CHECK,
463 		    STMF_SAA_WRITE_PROTECTED);
464 		return;
465 	}
466 	if (op == SCMD_WRITE) {
467 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
468 		len = (uint32_t)task->task_cdb[4];
469 
470 		if (len == 0) {
471 			len = 256;
472 		}
473 	} else if (op == SCMD_WRITE_G1) {
474 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
475 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
476 	} else if (op == SCMD_WRITE_G5) {
477 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
478 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
479 	} else if (op == SCMD_WRITE_G4) {
480 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
481 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
482 	} else {
483 		stmf_scsilib_send_status(task, STATUS_CHECK,
484 		    STMF_SAA_INVALID_OPCODE);
485 		return;
486 	}
487 
488 	laddr = lba << sl->sl_data_blocksize_shift;
489 	len <<= sl->sl_data_blocksize_shift;
490 
491 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
492 		stmf_scsilib_send_status(task, STATUS_CHECK,
493 		    STMF_SAA_LBA_OUT_OF_RANGE);
494 		return;
495 	}
496 
497 	task->task_cmd_xfer_length = len;
498 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
499 		task->task_expected_xfer_length = len;
500 	}
501 
502 	len = (len > task->task_expected_xfer_length) ?
503 	    task->task_expected_xfer_length : len;
504 
505 	if (len == 0) {
506 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
507 		return;
508 	}
509 
510 	if (initial_dbuf == NULL) {
511 		uint32_t maxsize, minsize, old_minsize;
512 
513 		maxsize = (len > (128*1024)) ? 128*1024 : len;
514 		minsize = maxsize >> 2;
515 		do {
516 			old_minsize = minsize;
517 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
518 			    &minsize, 0);
519 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
520 		    (minsize >= 512));
521 		if (initial_dbuf == NULL) {
522 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
523 			    STMF_ALLOC_FAILURE, NULL);
524 			return;
525 		}
526 	} else if (task->task_flags & TF_INITIAL_BURST) {
527 		if (initial_dbuf->db_data_size > len) {
528 			if (initial_dbuf->db_data_size >
529 			    task->task_expected_xfer_length) {
530 				/* protocol error */
531 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
532 				    STMF_INVALID_ARG, NULL);
533 				return;
534 			}
535 			initial_dbuf->db_data_size = len;
536 		}
537 		do_immediate_data = 1;
538 	}
539 	dbuf = initial_dbuf;
540 
541 	if (task->task_lu_private) {
542 		scmd = (sbd_cmd_t *)task->task_lu_private;
543 	} else {
544 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
545 		task->task_lu_private = scmd;
546 	}
547 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
548 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
549 	scmd->nbufs = 1;
550 	scmd->addr = laddr;
551 	scmd->len = len;
552 	scmd->current_ro = 0;
553 
554 	if (do_immediate_data) {
555 		scmd->len -= dbuf->db_data_size;
556 		scmd->current_ro += dbuf->db_data_size;
557 		dbuf->db_xfer_status = STMF_SUCCESS;
558 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
559 	} else {
560 		sbd_do_write_xfer(task, scmd, dbuf);
561 	}
562 }
563 
564 /*
565  * Utility routine to handle small non performance data transfers to the
566  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
567  * buffer which is source of data for transfer, cdb_xfer_size is the
568  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
569  * which this command would transfer (the size of data pointed to by 'p').
570  */
571 void
572 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
573     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
574 {
575 	uint32_t bufsize, ndx;
576 	sbd_cmd_t *scmd;
577 
578 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
579 
580 	task->task_cmd_xfer_length = cmd_xfer_size;
581 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
582 		task->task_expected_xfer_length = cmd_xfer_size;
583 	} else {
584 		cmd_xfer_size = min(cmd_xfer_size,
585 		    task->task_expected_xfer_length);
586 	}
587 
588 	if (cmd_xfer_size == 0) {
589 		stmf_scsilib_send_status(task, STATUS_CHECK,
590 		    STMF_SAA_INVALID_FIELD_IN_CDB);
591 		return;
592 	}
593 	if (dbuf == NULL) {
594 		uint32_t minsize = cmd_xfer_size;
595 
596 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
597 	}
598 	if (dbuf == NULL) {
599 		stmf_scsilib_send_status(task, STATUS_QFULL, 0);
600 		return;
601 	}
602 
603 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
604 		uint8_t *d;
605 		uint32_t s;
606 
607 		d = dbuf->db_sglist[ndx].seg_addr;
608 		s = min((cmd_xfer_size - bufsize),
609 		    dbuf->db_sglist[ndx].seg_length);
610 		bcopy(p+bufsize, d, s);
611 		bufsize += s;
612 	}
613 	dbuf->db_relative_offset = 0;
614 	dbuf->db_data_size = cmd_xfer_size;
615 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
616 
617 	if (task->task_lu_private == NULL) {
618 		task->task_lu_private =
619 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
620 	}
621 	scmd = (sbd_cmd_t *)task->task_lu_private;
622 
623 	scmd->cmd_type = SBD_CMD_SMALL_READ;
624 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
625 	(void) stmf_xfer_data(task, dbuf, 0);
626 }
627 
628 void
629 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
630 				struct stmf_data_buf *dbuf)
631 {
632 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
633 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
634 		    dbuf->db_xfer_status, NULL);
635 		return;
636 	}
637 	task->task_nbytes_transferred = dbuf->db_data_size;
638 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
639 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
640 }
641 
642 void
643 sbd_handle_short_write_transfers(scsi_task_t *task,
644     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size)
645 {
646 	sbd_cmd_t *scmd;
647 
648 	task->task_cmd_xfer_length = cdb_xfer_size;
649 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
650 		task->task_expected_xfer_length = cdb_xfer_size;
651 	} else {
652 		cdb_xfer_size = min(cdb_xfer_size,
653 		    task->task_expected_xfer_length);
654 	}
655 
656 	if (cdb_xfer_size == 0) {
657 		stmf_scsilib_send_status(task, STATUS_CHECK,
658 		    STMF_SAA_INVALID_FIELD_IN_CDB);
659 		return;
660 	}
661 	if (task->task_lu_private == NULL) {
662 		task->task_lu_private = kmem_zalloc(sizeof (sbd_cmd_t),
663 		    KM_SLEEP);
664 	} else {
665 		bzero(task->task_lu_private, sizeof (sbd_cmd_t));
666 	}
667 	scmd = (sbd_cmd_t *)task->task_lu_private;
668 	scmd->cmd_type = SBD_CMD_SMALL_WRITE;
669 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
670 	scmd->len = cdb_xfer_size;
671 	if (dbuf == NULL) {
672 		uint32_t minsize = cdb_xfer_size;
673 
674 		dbuf = stmf_alloc_dbuf(task, cdb_xfer_size, &minsize, 0);
675 		if (dbuf == NULL) {
676 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
677 			    STMF_ALLOC_FAILURE, NULL);
678 			return;
679 		}
680 		dbuf->db_data_size = cdb_xfer_size;
681 		dbuf->db_relative_offset = 0;
682 		dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
683 		stmf_xfer_data(task, dbuf, 0);
684 	} else {
685 		if (dbuf->db_data_size < cdb_xfer_size) {
686 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
687 			    STMF_ABORTED, NULL);
688 			return;
689 		}
690 		dbuf->db_data_size = cdb_xfer_size;
691 		sbd_handle_short_write_xfer_completion(task, dbuf);
692 	}
693 }
694 
695 void
696 sbd_handle_short_write_xfer_completion(scsi_task_t *task,
697     stmf_data_buf_t *dbuf)
698 {
699 	sbd_cmd_t *scmd;
700 	stmf_status_t st_ret;
701 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
702 
703 	/*
704 	 * For now lets assume we will get only one sglist element
705 	 * for short writes. If that ever changes, we should allocate
706 	 * a local buffer and copy all the sg elements to one linear space.
707 	 */
708 	if ((dbuf->db_xfer_status != STMF_SUCCESS) ||
709 	    (dbuf->db_sglist_length > 1)) {
710 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
711 		    dbuf->db_xfer_status, NULL);
712 		return;
713 	}
714 
715 	task->task_nbytes_transferred = dbuf->db_data_size;
716 	scmd = (sbd_cmd_t *)task->task_lu_private;
717 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
718 
719 	/* Lets find out who to call */
720 	switch (task->task_cdb[0]) {
721 	case SCMD_MODE_SELECT:
722 	case SCMD_MODE_SELECT_G1:
723 		if (sl->sl_access_state == SBD_LU_STANDBY) {
724 			st_ret = stmf_proxy_scsi_cmd(task, dbuf);
725 			if (st_ret != STMF_SUCCESS) {
726 				stmf_scsilib_send_status(task, STATUS_CHECK,
727 				    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
728 			}
729 		} else {
730 			sbd_handle_mode_select_xfer(task,
731 			    dbuf->db_sglist[0].seg_addr, dbuf->db_data_size);
732 		}
733 		break;
734 	case SCMD_PERSISTENT_RESERVE_OUT:
735 		if (sl->sl_access_state == SBD_LU_STANDBY) {
736 			st_ret = stmf_proxy_scsi_cmd(task, dbuf);
737 			if (st_ret != STMF_SUCCESS) {
738 				stmf_scsilib_send_status(task, STATUS_CHECK,
739 				    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
740 			}
741 		} else {
742 			sbd_handle_pgr_out_data(task, dbuf);
743 		}
744 		break;
745 	default:
746 		/* This should never happen */
747 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
748 		    STMF_ABORTED, NULL);
749 	}
750 }
751 
752 void
753 sbd_handle_read_capacity(struct scsi_task *task,
754     struct stmf_data_buf *initial_dbuf)
755 {
756 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
757 	uint32_t cdb_len;
758 	uint8_t p[32];
759 	uint64_t s;
760 	uint16_t blksize;
761 
762 	s = sl->sl_lu_size >> sl->sl_data_blocksize_shift;
763 	s--;
764 	blksize = ((uint16_t)1) << sl->sl_data_blocksize_shift;
765 
766 	switch (task->task_cdb[0]) {
767 	case SCMD_READ_CAPACITY:
768 		if (s & 0xffffffff00000000ull) {
769 			p[0] = p[1] = p[2] = p[3] = 0xFF;
770 		} else {
771 			p[0] = (s >> 24) & 0xff;
772 			p[1] = (s >> 16) & 0xff;
773 			p[2] = (s >> 8) & 0xff;
774 			p[3] = s & 0xff;
775 		}
776 		p[4] = 0; p[5] = 0;
777 		p[6] = (blksize >> 8) & 0xff;
778 		p[7] = blksize & 0xff;
779 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
780 		break;
781 
782 	case SCMD_SVC_ACTION_IN_G4:
783 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
784 		bzero(p, 32);
785 		p[0] = (s >> 56) & 0xff;
786 		p[1] = (s >> 48) & 0xff;
787 		p[2] = (s >> 40) & 0xff;
788 		p[3] = (s >> 32) & 0xff;
789 		p[4] = (s >> 24) & 0xff;
790 		p[5] = (s >> 16) & 0xff;
791 		p[6] = (s >> 8) & 0xff;
792 		p[7] = s & 0xff;
793 		p[10] = (blksize >> 8) & 0xff;
794 		p[11] = blksize & 0xff;
795 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
796 		    cdb_len, 32);
797 		break;
798 	}
799 }
800 
801 void
802 sbd_calc_geometry(uint64_t s, uint16_t blksize, uint8_t *nsectors,
803     uint8_t *nheads, uint32_t *ncyl)
804 {
805 	if (s < (4ull * 1024ull * 1024ull * 1024ull)) {
806 		*nsectors = 32;
807 		*nheads = 8;
808 	} else {
809 		*nsectors = 254;
810 		*nheads = 254;
811 	}
812 	*ncyl = s / ((uint64_t)blksize * (uint64_t)(*nsectors) *
813 	    (uint64_t)(*nheads));
814 }
815 
816 void
817 sbd_handle_mode_sense(struct scsi_task *task,
818     struct stmf_data_buf *initial_dbuf, uint8_t *buf)
819 {
820 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
821 	uint32_t cmd_size, n;
822 	uint8_t *cdb;
823 	uint32_t ncyl;
824 	uint8_t nsectors, nheads;
825 	uint8_t page, ctrl, header_size, pc_valid;
826 	uint16_t nbytes;
827 	uint8_t *p;
828 	uint64_t s = sl->sl_lu_size;
829 	uint32_t dev_spec_param_offset;
830 
831 	p = buf;	/* buf is assumed to be zeroed out and large enough */
832 	n = 0;
833 	cdb = &task->task_cdb[0];
834 	page = cdb[2] & 0x3F;
835 	ctrl = (cdb[2] >> 6) & 3;
836 	cmd_size = (cdb[0] == SCMD_MODE_SENSE) ? cdb[4] :
837 	    READ_SCSI16(&cdb[7], uint32_t);
838 
839 	if (cdb[0] == SCMD_MODE_SENSE) {
840 		header_size = 4;
841 		dev_spec_param_offset = 2;
842 	} else {
843 		header_size = 8;
844 		dev_spec_param_offset = 3;
845 	}
846 
847 	/* Now validate the command */
848 	if ((cdb[2] == 0) || (page == MODEPAGE_ALLPAGES) || (page == 0x08) ||
849 	    (page == 0x0A) || (page == 0x03) || (page == 0x04)) {
850 		pc_valid = 1;
851 	} else {
852 		pc_valid = 0;
853 	}
854 	if ((cmd_size < header_size) || (pc_valid == 0)) {
855 		stmf_scsilib_send_status(task, STATUS_CHECK,
856 		    STMF_SAA_INVALID_FIELD_IN_CDB);
857 		return;
858 	}
859 
860 	/* We will update the length in the mode header at the end */
861 
862 	/* Block dev device specific param in mode param header has wp bit */
863 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
864 		p[n + dev_spec_param_offset] = BIT_7;
865 	}
866 	n += header_size;
867 	/* We are not going to return any block descriptor */
868 
869 	nbytes = ((uint16_t)1) << sl->sl_data_blocksize_shift;
870 	sbd_calc_geometry(s, nbytes, &nsectors, &nheads, &ncyl);
871 
872 	if ((page == 0x03) || (page == MODEPAGE_ALLPAGES)) {
873 		p[n] = 0x03;
874 		p[n+1] = 0x16;
875 		if (ctrl != 1) {
876 			p[n + 11] = nsectors;
877 			p[n + 12] = nbytes >> 8;
878 			p[n + 13] = nbytes & 0xff;
879 			p[n + 20] = 0x80;
880 		}
881 		n += 24;
882 	}
883 	if ((page == 0x04) || (page == MODEPAGE_ALLPAGES)) {
884 		p[n] = 0x04;
885 		p[n + 1] = 0x16;
886 		if (ctrl != 1) {
887 			p[n + 2] = ncyl >> 16;
888 			p[n + 3] = ncyl >> 8;
889 			p[n + 4] = ncyl & 0xff;
890 			p[n + 5] = nheads;
891 			p[n + 20] = 0x15;
892 			p[n + 21] = 0x18;
893 		}
894 		n += 24;
895 	}
896 	if ((page == MODEPAGE_CACHING) || (page == MODEPAGE_ALLPAGES)) {
897 		struct mode_caching *mode_caching_page;
898 
899 		mode_caching_page = (struct mode_caching *)&p[n];
900 
901 		mode_caching_page->mode_page.code = MODEPAGE_CACHING;
902 		mode_caching_page->mode_page.ps = 1; /* A saveable page */
903 		mode_caching_page->mode_page.length = 0x12;
904 
905 		switch (ctrl) {
906 		case (0):
907 			/* Current */
908 			if ((sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) == 0) {
909 				mode_caching_page->wce = 1;
910 			}
911 			break;
912 
913 		case (1):
914 			/* Changeable */
915 			if ((sl->sl_flags &
916 			    SL_WRITEBACK_CACHE_SET_UNSUPPORTED) == 0) {
917 				mode_caching_page->wce = 1;
918 			}
919 			break;
920 
921 		default:
922 			if ((sl->sl_flags &
923 			    SL_SAVED_WRITE_CACHE_DISABLE) == 0) {
924 				mode_caching_page->wce = 1;
925 			}
926 			break;
927 		}
928 		n += (sizeof (struct mode_page) +
929 		    mode_caching_page->mode_page.length);
930 	}
931 	if ((page == MODEPAGE_CTRL_MODE) || (page == MODEPAGE_ALLPAGES)) {
932 		struct mode_control_scsi3 *mode_control_page;
933 
934 		mode_control_page = (struct mode_control_scsi3 *)&p[n];
935 
936 		mode_control_page->mode_page.code = MODEPAGE_CTRL_MODE;
937 		mode_control_page->mode_page.length =
938 		    PAGELENGTH_MODE_CONTROL_SCSI3;
939 		if (ctrl != 1) {
940 			/* If not looking for changeable values, report this. */
941 			mode_control_page->que_mod = CTRL_QMOD_UNRESTRICT;
942 		}
943 		n += (sizeof (struct mode_page) +
944 		    mode_control_page->mode_page.length);
945 	}
946 
947 	if (cdb[0] == SCMD_MODE_SENSE) {
948 		if (n > 255) {
949 			stmf_scsilib_send_status(task, STATUS_CHECK,
950 			    STMF_SAA_INVALID_FIELD_IN_CDB);
951 			return;
952 		}
953 		/*
954 		 * Mode parameter header length doesn't include the number
955 		 * of bytes in the length field, so adjust the count.
956 		 * Byte count minus header length field size.
957 		 */
958 		buf[0] = (n - 1) & 0xff;
959 	} else {
960 		/* Byte count minus header length field size. */
961 		buf[1] = (n - 2) & 0xff;
962 		buf[0] = ((n - 2) >> 8) & 0xff;
963 	}
964 
965 	sbd_handle_short_read_transfers(task, initial_dbuf, buf,
966 	    cmd_size, n);
967 }
968 
969 void
970 sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf)
971 {
972 	uint32_t cmd_xfer_len;
973 
974 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
975 		cmd_xfer_len = (uint32_t)task->task_cdb[4];
976 	} else {
977 		cmd_xfer_len = READ_SCSI16(&task->task_cdb[7], uint32_t);
978 	}
979 
980 	if ((task->task_cdb[1] & 0xFE) != 0x10) {
981 		stmf_scsilib_send_status(task, STATUS_CHECK,
982 		    STMF_SAA_INVALID_FIELD_IN_CDB);
983 		return;
984 	}
985 
986 	if (cmd_xfer_len == 0) {
987 		/* zero byte mode selects are allowed */
988 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
989 		return;
990 	}
991 
992 	sbd_handle_short_write_transfers(task, dbuf, cmd_xfer_len);
993 }
994 
995 void
996 sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf, uint32_t buflen)
997 {
998 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
999 	sbd_it_data_t *it;
1000 	int hdr_len, bd_len;
1001 	sbd_status_t sret;
1002 	int i;
1003 
1004 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
1005 		hdr_len = 4;
1006 	} else {
1007 		hdr_len = 8;
1008 	}
1009 
1010 	if (buflen < hdr_len)
1011 		goto mode_sel_param_len_err;
1012 
1013 	bd_len = hdr_len == 4 ? buf[3] : READ_SCSI16(&buf[6], int);
1014 
1015 	if (buflen < (hdr_len + bd_len + 2))
1016 		goto mode_sel_param_len_err;
1017 
1018 	buf += hdr_len + bd_len;
1019 	buflen -= hdr_len + bd_len;
1020 
1021 	if ((buf[0] != 8) || (buflen != ((uint32_t)buf[1] + 2))) {
1022 		goto mode_sel_param_len_err;
1023 	}
1024 
1025 	if (buf[2] & 0xFB) {
1026 		goto mode_sel_param_field_err;
1027 	}
1028 
1029 	for (i = 3; i < (buf[1] + 2); i++) {
1030 		if (buf[i]) {
1031 			goto mode_sel_param_field_err;
1032 		}
1033 	}
1034 
1035 	sret = SBD_SUCCESS;
1036 
1037 	/* All good. Lets handle the write cache change, if any */
1038 	if (buf[2] & BIT_2) {
1039 		sret = sbd_wcd_set(0, sl);
1040 	} else {
1041 		sret = sbd_wcd_set(1, sl);
1042 	}
1043 
1044 	if (sret != SBD_SUCCESS) {
1045 		stmf_scsilib_send_status(task, STATUS_CHECK,
1046 		    STMF_SAA_WRITE_ERROR);
1047 		return;
1048 	}
1049 
1050 	/* set on the device passed, now set the flags */
1051 	mutex_enter(&sl->sl_lock);
1052 	if (buf[2] & BIT_2) {
1053 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
1054 	} else {
1055 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
1056 	}
1057 
1058 	for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1059 		if (it == task->task_lu_itl_handle)
1060 			continue;
1061 		it->sbd_it_ua_conditions |= SBD_UA_MODE_PARAMETERS_CHANGED;
1062 	}
1063 
1064 	if (task->task_cdb[1] & 1) {
1065 		if (buf[2] & BIT_2) {
1066 			sl->sl_flags &= ~SL_SAVED_WRITE_CACHE_DISABLE;
1067 		} else {
1068 			sl->sl_flags |= SL_SAVED_WRITE_CACHE_DISABLE;
1069 		}
1070 		mutex_exit(&sl->sl_lock);
1071 		sret = sbd_write_lu_info(sl);
1072 	} else {
1073 		mutex_exit(&sl->sl_lock);
1074 	}
1075 	if (sret == SBD_SUCCESS) {
1076 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1077 	} else {
1078 		stmf_scsilib_send_status(task, STATUS_CHECK,
1079 		    STMF_SAA_WRITE_ERROR);
1080 	}
1081 	return;
1082 
1083 mode_sel_param_len_err:
1084 	stmf_scsilib_send_status(task, STATUS_CHECK,
1085 	    STMF_SAA_PARAM_LIST_LENGTH_ERROR);
1086 	return;
1087 mode_sel_param_field_err:
1088 	stmf_scsilib_send_status(task, STATUS_CHECK,
1089 	    STMF_SAA_INVALID_FIELD_IN_PARAM_LIST);
1090 }
1091 
1092 /*
1093  * This function parse through a string, passed to it as a pointer to a string,
1094  * by adjusting the pointer to the first non-space character and returns
1095  * the count/length of the first bunch of non-space characters. Multiple
1096  * Management URLs are stored as a space delimited string in sl_mgmt_url
1097  * field of sbd_lu_t. This function is used to retrieve one url at a time.
1098  *
1099  * i/p : pointer to pointer to a url string
1100  * o/p : Adjust the pointer to the url to the first non white character
1101  *       and returns the length of the URL
1102  */
1103 uint16_t
1104 sbd_parse_mgmt_url(char **url_addr) {
1105 	uint16_t url_length = 0;
1106 	char *url;
1107 	url = *url_addr;
1108 
1109 	while (*url != '\0') {
1110 		if (*url == ' ' || *url == '\t' || *url == '\n') {
1111 			(*url_addr)++;
1112 			url = *url_addr;
1113 		} else {
1114 			break;
1115 		}
1116 	}
1117 
1118 	while (*url != '\0') {
1119 		if (*url == ' ' || *url == '\t' ||
1120 		    *url == '\n' || *url == '\0') {
1121 			break;
1122 		}
1123 		url++;
1124 		url_length++;
1125 	}
1126 	return (url_length);
1127 }
1128 
1129 void
1130 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1131 {
1132 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1133 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
1134 	uint8_t *p;
1135 	uint8_t byte0;
1136 	uint8_t page_length;
1137 	uint16_t bsize = 512;
1138 	uint16_t cmd_size;
1139 	uint32_t xfer_size = 4;
1140 	uint32_t mgmt_url_size = 0;
1141 
1142 
1143 	byte0 = DTYPE_DIRECT;
1144 	/*
1145 	 * Basic protocol checks.
1146 	 */
1147 
1148 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
1149 		stmf_scsilib_send_status(task, STATUS_CHECK,
1150 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1151 		return;
1152 	}
1153 
1154 	/*
1155 	 * Zero byte allocation length is not an error.  Just
1156 	 * return success.
1157 	 */
1158 
1159 	cmd_size = (((uint16_t)cdbp[3]) << 8) | cdbp[4];
1160 
1161 	if (cmd_size == 0) {
1162 		task->task_cmd_xfer_length = 0;
1163 		if (task->task_additional_flags &
1164 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1165 			task->task_expected_xfer_length = 0;
1166 		}
1167 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1168 		return;
1169 	}
1170 
1171 	if (sl->sl_mgmt_url) {
1172 		mgmt_url_size = strlen(sl->sl_mgmt_url);
1173 	}
1174 
1175 	/*
1176 	 * Standard inquiry
1177 	 */
1178 
1179 	if ((cdbp[1] & 1) == 0) {
1180 		int	i;
1181 		struct scsi_inquiry *inq;
1182 
1183 		p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1184 		inq = (struct scsi_inquiry *)p;
1185 
1186 		page_length = 69;
1187 		xfer_size = page_length + 5;
1188 
1189 		inq->inq_dtype = DTYPE_DIRECT;
1190 		inq->inq_ansi = 5;	/* SPC-3 */
1191 		inq->inq_hisup = 1;
1192 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
1193 		inq->inq_len = page_length;
1194 
1195 		inq->inq_tpgs = TPGS_FAILOVER_IMPLICIT;
1196 		inq->inq_cmdque = 1;
1197 
1198 		if (sl->sl_flags & SL_VID_VALID) {
1199 			bcopy(sl->sl_vendor_id, inq->inq_vid, 8);
1200 		} else {
1201 			bcopy(sbd_vendor_id, inq->inq_vid, 8);
1202 		}
1203 
1204 		if (sl->sl_flags & SL_PID_VALID) {
1205 			bcopy(sl->sl_product_id, inq->inq_pid, 16);
1206 		} else {
1207 			bcopy(sbd_product_id, inq->inq_pid, 16);
1208 		}
1209 
1210 		if (sl->sl_flags & SL_REV_VALID) {
1211 			bcopy(sl->sl_revision, inq->inq_revision, 4);
1212 		} else {
1213 			bcopy(sbd_revision, inq->inq_revision, 4);
1214 		}
1215 
1216 		/* Adding Version Descriptors */
1217 		i = 0;
1218 		/* SAM-3 no version */
1219 		inq->inq_vd[i].inq_vd_msb = 0x00;
1220 		inq->inq_vd[i].inq_vd_lsb = 0x60;
1221 		i++;
1222 
1223 		/* transport */
1224 		switch (task->task_lport->lport_id->protocol_id) {
1225 		case PROTOCOL_FIBRE_CHANNEL:
1226 			inq->inq_vd[i].inq_vd_msb = 0x09;
1227 			inq->inq_vd[i].inq_vd_lsb = 0x00;
1228 			i++;
1229 			break;
1230 
1231 		case PROTOCOL_PARALLEL_SCSI:
1232 		case PROTOCOL_SSA:
1233 		case PROTOCOL_IEEE_1394:
1234 			/* Currently no claims of conformance */
1235 			break;
1236 
1237 		case PROTOCOL_SRP:
1238 			inq->inq_vd[i].inq_vd_msb = 0x09;
1239 			inq->inq_vd[i].inq_vd_lsb = 0x40;
1240 			i++;
1241 			break;
1242 
1243 		case PROTOCOL_iSCSI:
1244 			inq->inq_vd[i].inq_vd_msb = 0x09;
1245 			inq->inq_vd[i].inq_vd_lsb = 0x60;
1246 			i++;
1247 			break;
1248 
1249 		case PROTOCOL_SAS:
1250 		case PROTOCOL_ADT:
1251 		case PROTOCOL_ATAPI:
1252 		default:
1253 			/* Currently no claims of conformance */
1254 			break;
1255 		}
1256 
1257 		/* SPC-3 no version */
1258 		inq->inq_vd[i].inq_vd_msb = 0x03;
1259 		inq->inq_vd[i].inq_vd_lsb = 0x00;
1260 		i++;
1261 
1262 		/* SBC-2 no version */
1263 		inq->inq_vd[i].inq_vd_msb = 0x03;
1264 		inq->inq_vd[i].inq_vd_lsb = 0x20;
1265 
1266 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1267 		    min(cmd_size, xfer_size));
1268 		kmem_free(p, bsize);
1269 
1270 		return;
1271 	}
1272 
1273 	/*
1274 	 * EVPD handling
1275 	 */
1276 
1277 	/* Default 512 bytes may not be enough, increase bsize if necessary */
1278 	if (cdbp[2] == 0x83 || cdbp[2] == 0x85) {
1279 		if (bsize <  cmd_size)
1280 			bsize = cmd_size;
1281 	}
1282 	p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1283 
1284 	switch (cdbp[2]) {
1285 	case 0x00:
1286 		page_length = 4 + (mgmt_url_size ? 1 : 0);
1287 
1288 		p[0] = byte0;
1289 		p[3] = page_length;
1290 		/* Supported VPD pages in ascending order */
1291 		{
1292 			uint8_t i = 5;
1293 
1294 			p[i++] = 0x80;
1295 			p[i++] = 0x83;
1296 			if (mgmt_url_size != 0)
1297 				p[i++] = 0x85;
1298 			p[i++] = 0x86;
1299 		}
1300 		xfer_size = page_length + 4;
1301 		break;
1302 
1303 	case 0x80:
1304 		if (sl->sl_serial_no_size) {
1305 			page_length = sl->sl_serial_no_size;
1306 			bcopy(sl->sl_serial_no, p + 4, sl->sl_serial_no_size);
1307 		} else {
1308 			/* if no serial num is specified set 4 spaces */
1309 			page_length = 4;
1310 			bcopy("    ", p + 4, 4);
1311 		}
1312 		p[0] = byte0;
1313 		p[1] = 0x80;
1314 		p[3] = page_length;
1315 		xfer_size = page_length + 4;
1316 		break;
1317 
1318 	case 0x83:
1319 		xfer_size = stmf_scsilib_prepare_vpd_page83(task, p,
1320 		    bsize, byte0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
1321 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID);
1322 		break;
1323 
1324 	case 0x85:
1325 		if (mgmt_url_size == 0) {
1326 			stmf_scsilib_send_status(task, STATUS_CHECK,
1327 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1328 			kmem_free(p, bsize);
1329 			return;
1330 		}
1331 		{
1332 			uint16_t idx, newidx, sz, url_size;
1333 			char *url;
1334 
1335 			p[0] = byte0;
1336 			p[1] = 0x85;
1337 
1338 			idx = 4;
1339 			url = sl->sl_mgmt_url;
1340 			url_size = sbd_parse_mgmt_url(&url);
1341 			/* Creating Network Service Descriptors */
1342 			while (url_size != 0) {
1343 				/* Null terminated and 4 Byte aligned */
1344 				sz = url_size + 1;
1345 				sz += (sz % 4) ? 4 - (sz % 4) : 0;
1346 				newidx = idx + sz + 4;
1347 
1348 				if (newidx < bsize) {
1349 					/*
1350 					 * SPC-3r23 : Table 320  (Sec 7.6.5)
1351 					 * (Network service descriptor format
1352 					 *
1353 					 * Note: Hard coding service type as
1354 					 * "Storage Configuration Service".
1355 					 */
1356 					p[idx] = 1;
1357 					SCSI_WRITE16(p + idx + 2, sz);
1358 					bcopy(url, p + idx + 4, url_size);
1359 					xfer_size = newidx + 4;
1360 				}
1361 				idx = newidx;
1362 
1363 				/* skip to next mgmt url if any */
1364 				url += url_size;
1365 				url_size = sbd_parse_mgmt_url(&url);
1366 			}
1367 
1368 			/* Total descriptor length */
1369 			SCSI_WRITE16(p + 2, idx - 4);
1370 			break;
1371 		}
1372 
1373 	case 0x86:
1374 		page_length = 0x3c;
1375 
1376 		p[0] = byte0;
1377 		p[1] = 0x86;		/* Page 86 response */
1378 		p[3] = page_length;
1379 
1380 		/*
1381 		 * Bits 0, 1, and 2 will need to be updated
1382 		 * to reflect the queue tag handling if/when
1383 		 * that is implemented.  For now, we're going
1384 		 * to claim support only for Simple TA.
1385 		 */
1386 		p[5] = 1;
1387 		xfer_size = page_length + 4;
1388 		break;
1389 
1390 	default:
1391 		stmf_scsilib_send_status(task, STATUS_CHECK,
1392 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1393 		kmem_free(p, bsize);
1394 		return;
1395 	}
1396 
1397 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1398 	    min(cmd_size, xfer_size));
1399 	kmem_free(p, bsize);
1400 }
1401 
1402 stmf_status_t
1403 sbd_task_alloc(struct scsi_task *task)
1404 {
1405 	if ((task->task_lu_private =
1406 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
1407 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1408 		scmd->flags = 0;
1409 		return (STMF_SUCCESS);
1410 	}
1411 	return (STMF_ALLOC_FAILURE);
1412 }
1413 
1414 void
1415 sbd_remove_it_handle(sbd_lu_t *sl, sbd_it_data_t *it)
1416 {
1417 	sbd_it_data_t **ppit;
1418 
1419 	sbd_pgr_remove_it_handle(sl, it);
1420 	mutex_enter(&sl->sl_lock);
1421 	for (ppit = &sl->sl_it_list; *ppit != NULL;
1422 	    ppit = &((*ppit)->sbd_it_next)) {
1423 		if ((*ppit) == it) {
1424 			*ppit = it->sbd_it_next;
1425 			break;
1426 		}
1427 	}
1428 	mutex_exit(&sl->sl_lock);
1429 
1430 	DTRACE_PROBE2(itl__nexus__end, stmf_lu_t *, sl->sl_lu,
1431 	    sbd_it_data_t *, it);
1432 
1433 	kmem_free(it, sizeof (*it));
1434 }
1435 
1436 void
1437 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *sl, sbd_it_data_t *it)
1438 {
1439 	mutex_enter(&sl->sl_lock);
1440 	if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) == 0) {
1441 		/* If we dont have any reservations, just get out. */
1442 		mutex_exit(&sl->sl_lock);
1443 		return;
1444 	}
1445 
1446 	if (it == NULL) {
1447 		/* Find the I_T nexus which is holding the reservation. */
1448 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1449 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
1450 				ASSERT(it->sbd_it_session_id ==
1451 				    sl->sl_rs_owner_session_id);
1452 				break;
1453 			}
1454 		}
1455 		ASSERT(it != NULL);
1456 	} else {
1457 		/*
1458 		 * We were passed an I_T nexus. If this nexus does not hold
1459 		 * the reservation, do nothing. This is why this function is
1460 		 * called "check_and_clear".
1461 		 */
1462 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
1463 			mutex_exit(&sl->sl_lock);
1464 			return;
1465 		}
1466 	}
1467 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1468 	sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1469 	mutex_exit(&sl->sl_lock);
1470 }
1471 
1472 
1473 
1474 void
1475 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1476 {
1477 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1478 	sbd_it_data_t *it;
1479 	uint8_t cdb0, cdb1;
1480 	stmf_status_t st_ret;
1481 
1482 	if ((it = task->task_lu_itl_handle) == NULL) {
1483 		mutex_enter(&sl->sl_lock);
1484 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1485 			if (it->sbd_it_session_id ==
1486 			    task->task_session->ss_session_id) {
1487 				mutex_exit(&sl->sl_lock);
1488 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1489 				return;
1490 			}
1491 		}
1492 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
1493 		if (it == NULL) {
1494 			mutex_exit(&sl->sl_lock);
1495 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1496 			return;
1497 		}
1498 		it->sbd_it_session_id = task->task_session->ss_session_id;
1499 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
1500 		it->sbd_it_next = sl->sl_it_list;
1501 		sl->sl_it_list = it;
1502 		mutex_exit(&sl->sl_lock);
1503 
1504 		DTRACE_PROBE1(itl__nexus__start, scsi_task *, task);
1505 
1506 		sbd_pgr_initialize_it(task);
1507 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
1508 		    task->task_session, it->sbd_it_session_id, it)
1509 		    != STMF_SUCCESS) {
1510 			sbd_remove_it_handle(sl, it);
1511 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1512 			return;
1513 		}
1514 		task->task_lu_itl_handle = it;
1515 		if (sl->sl_access_state != SBD_LU_STANDBY) {
1516 			it->sbd_it_ua_conditions = SBD_UA_POR;
1517 		}
1518 	} else if (it->sbd_it_flags & SBD_IT_PGR_CHECK_FLAG) {
1519 		sbd_pgr_initialize_it(task);
1520 		mutex_enter(&sl->sl_lock);
1521 		it->sbd_it_flags &= ~SBD_IT_PGR_CHECK_FLAG;
1522 		mutex_exit(&sl->sl_lock);
1523 	}
1524 
1525 	if (task->task_mgmt_function) {
1526 		stmf_scsilib_handle_task_mgmt(task);
1527 		return;
1528 	}
1529 
1530 	/*
1531 	 * if we're transitioning between access
1532 	 * states, return NOT READY
1533 	 */
1534 	if (sl->sl_access_state == SBD_LU_TRANSITION_TO_STANDBY ||
1535 	    sl->sl_access_state == SBD_LU_TRANSITION_TO_ACTIVE) {
1536 		stmf_scsilib_send_status(task, STATUS_CHECK,
1537 		    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
1538 		return;
1539 	}
1540 
1541 	/* Checking ua conditions as per SAM3R14 5.3.2 specified order */
1542 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1543 		uint32_t saa = 0;
1544 
1545 		mutex_enter(&sl->sl_lock);
1546 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1547 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1548 			saa = STMF_SAA_POR;
1549 		}
1550 		mutex_exit(&sl->sl_lock);
1551 		if (saa) {
1552 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1553 			return;
1554 		}
1555 	}
1556 
1557 	/* Reservation conflict checks */
1558 	if (sl->sl_access_state != SBD_LU_STANDBY) {
1559 		if (SBD_PGR_RSVD(sl->sl_pgr)) {
1560 			if (sbd_pgr_reservation_conflict(task)) {
1561 				stmf_scsilib_send_status(task,
1562 				    STATUS_RESERVATION_CONFLICT, 0);
1563 				return;
1564 			}
1565 		} else if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) &&
1566 		    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
1567 			if (!(SCSI2_CONFLICT_FREE_CMDS(task->task_cdb))) {
1568 				stmf_scsilib_send_status(task,
1569 				    STATUS_RESERVATION_CONFLICT, 0);
1570 				return;
1571 			}
1572 		}
1573 	}
1574 
1575 	/* Rest of the ua conndition checks */
1576 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1577 		uint32_t saa = 0;
1578 
1579 		mutex_enter(&sl->sl_lock);
1580 		if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1581 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1582 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1583 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1584 			    (task->task_cdb[1] ==
1585 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1586 				saa = 0;
1587 			} else {
1588 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1589 			}
1590 		} else if (it->sbd_it_ua_conditions &
1591 		    SBD_UA_MODE_PARAMETERS_CHANGED) {
1592 			it->sbd_it_ua_conditions &=
1593 			    ~SBD_UA_MODE_PARAMETERS_CHANGED;
1594 			saa = STMF_SAA_MODE_PARAMETERS_CHANGED;
1595 		} else if (it->sbd_it_ua_conditions &
1596 		    SBD_UA_ASYMMETRIC_ACCESS_CHANGED) {
1597 			it->sbd_it_ua_conditions &=
1598 			    ~SBD_UA_ASYMMETRIC_ACCESS_CHANGED;
1599 			saa = STMF_SAA_ASYMMETRIC_ACCESS_CHANGED;
1600 		} else if (it->sbd_it_ua_conditions &
1601 		    SBD_UA_ACCESS_STATE_TRANSITION) {
1602 			it->sbd_it_ua_conditions &=
1603 			    ~SBD_UA_ACCESS_STATE_TRANSITION;
1604 			saa = STMF_SAA_LU_NO_ACCESS_TRANSITION;
1605 		} else {
1606 			it->sbd_it_ua_conditions = 0;
1607 			saa = 0;
1608 		}
1609 		mutex_exit(&sl->sl_lock);
1610 		if (saa) {
1611 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1612 			return;
1613 		}
1614 	}
1615 
1616 	cdb0 = task->task_cdb[0];
1617 	cdb1 = task->task_cdb[1];
1618 
1619 	if (sl->sl_access_state == SBD_LU_STANDBY) {
1620 		if (cdb0 != SCMD_INQUIRY &&
1621 		    cdb0 != SCMD_MODE_SENSE &&
1622 		    cdb0 != SCMD_MODE_SENSE_G1 &&
1623 		    cdb0 != SCMD_MODE_SELECT &&
1624 		    cdb0 != SCMD_MODE_SELECT_G1 &&
1625 		    cdb0 != SCMD_RESERVE &&
1626 		    cdb0 != SCMD_RELEASE &&
1627 		    cdb0 != SCMD_PERSISTENT_RESERVE_OUT &&
1628 		    cdb0 != SCMD_PERSISTENT_RESERVE_IN &&
1629 		    cdb0 != SCMD_REQUEST_SENSE &&
1630 		    !(cdb0 == SCMD_MAINTENANCE_IN &&
1631 		    (cdb1 & 0x1F) == 0x0A)) {
1632 			stmf_scsilib_send_status(task, STATUS_CHECK,
1633 			    STMF_SAA_LU_NO_ACCESS_STANDBY);
1634 			return;
1635 		}
1636 
1637 		/*
1638 		 * is this a short write?
1639 		 * if so, we'll need to wait until we have the buffer
1640 		 * before proxying the command
1641 		 */
1642 		switch (cdb0) {
1643 			case SCMD_MODE_SELECT:
1644 			case SCMD_MODE_SELECT_G1:
1645 			case SCMD_PERSISTENT_RESERVE_OUT:
1646 				break;
1647 			default:
1648 				st_ret = stmf_proxy_scsi_cmd(task,
1649 				    initial_dbuf);
1650 				if (st_ret != STMF_SUCCESS) {
1651 					stmf_scsilib_send_status(task,
1652 					    STATUS_CHECK,
1653 					    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
1654 				}
1655 				return;
1656 		}
1657 	}
1658 
1659 	cdb0 = task->task_cdb[0] & 0x1F;
1660 
1661 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1662 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1663 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1664 			return;
1665 		}
1666 		if (cdb0 == SCMD_READ) {
1667 			sbd_handle_read(task, initial_dbuf);
1668 			return;
1669 		}
1670 		sbd_handle_write(task, initial_dbuf);
1671 		return;
1672 	}
1673 
1674 	cdb0 = task->task_cdb[0];
1675 	cdb1 = task->task_cdb[1];
1676 
1677 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1678 		sbd_handle_inquiry(task, initial_dbuf);
1679 		return;
1680 	}
1681 
1682 	if (cdb0  == SCMD_PERSISTENT_RESERVE_OUT) {
1683 		sbd_handle_pgr_out_cmd(task, initial_dbuf);
1684 		return;
1685 	}
1686 
1687 	if (cdb0  == SCMD_PERSISTENT_RESERVE_IN) {
1688 		sbd_handle_pgr_in_cmd(task, initial_dbuf);
1689 		return;
1690 	}
1691 
1692 	if (cdb0 == SCMD_RELEASE) {
1693 		if (cdb1) {
1694 			stmf_scsilib_send_status(task, STATUS_CHECK,
1695 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1696 			return;
1697 		}
1698 
1699 		mutex_enter(&sl->sl_lock);
1700 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1701 			/* If not owner don't release it, just return good */
1702 			if (it->sbd_it_session_id !=
1703 			    sl->sl_rs_owner_session_id) {
1704 				mutex_exit(&sl->sl_lock);
1705 				stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1706 				return;
1707 			}
1708 		}
1709 		sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1710 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1711 		mutex_exit(&sl->sl_lock);
1712 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1713 		return;
1714 	}
1715 
1716 	if (cdb0 == SCMD_RESERVE) {
1717 		if (cdb1) {
1718 			stmf_scsilib_send_status(task, STATUS_CHECK,
1719 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1720 			return;
1721 		}
1722 
1723 		mutex_enter(&sl->sl_lock);
1724 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1725 			/* If not owner, return conflict status */
1726 			if (it->sbd_it_session_id !=
1727 			    sl->sl_rs_owner_session_id) {
1728 				mutex_exit(&sl->sl_lock);
1729 				stmf_scsilib_send_status(task,
1730 				    STATUS_RESERVATION_CONFLICT, 0);
1731 				return;
1732 			}
1733 		}
1734 		sl->sl_flags |= SL_LU_HAS_SCSI2_RESERVATION;
1735 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1736 		sl->sl_rs_owner_session_id = it->sbd_it_session_id;
1737 		mutex_exit(&sl->sl_lock);
1738 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1739 		return;
1740 	}
1741 
1742 	if (cdb0 == SCMD_REQUEST_SENSE) {
1743 		/*
1744 		 * LU provider needs to store unretrieved sense data
1745 		 * (e.g. after power-on/reset).  For now, we'll just
1746 		 * return good status with no sense.
1747 		 */
1748 
1749 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1750 		    task->task_cdb[5]) {
1751 			stmf_scsilib_send_status(task, STATUS_CHECK,
1752 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1753 		} else {
1754 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1755 		}
1756 
1757 		return;
1758 	}
1759 
1760 	/* Report Target Port Groups */
1761 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1762 	    ((cdb1 & 0x1F) == 0x0A)) {
1763 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1764 		return;
1765 	}
1766 
1767 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1768 		task->task_cmd_xfer_length = 0;
1769 		if (task->task_cdb[4] & 0xFC) {
1770 			stmf_scsilib_send_status(task, STATUS_CHECK,
1771 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1772 			return;
1773 		}
1774 		if (task->task_cdb[4] & 2) {
1775 			stmf_scsilib_send_status(task, STATUS_CHECK,
1776 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1777 		} else {
1778 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1779 		}
1780 		return;
1781 
1782 	}
1783 
1784 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1785 		uint8_t *p;
1786 		p = kmem_zalloc(512, KM_SLEEP);
1787 		sbd_handle_mode_sense(task, initial_dbuf, p);
1788 		kmem_free(p, 512);
1789 		return;
1790 	}
1791 
1792 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1793 		sbd_handle_mode_select(task, initial_dbuf);
1794 		return;
1795 	}
1796 
1797 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1798 		task->task_cmd_xfer_length = 0;
1799 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1800 		return;
1801 	}
1802 
1803 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1804 		sbd_handle_read_capacity(task, initial_dbuf);
1805 		return;
1806 	}
1807 
1808 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1809 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1810 			sbd_handle_read_capacity(task, initial_dbuf);
1811 			return;
1812 		/*
1813 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1814 		 * 	sbd_handle_read(task, initial_dbuf);
1815 		 * 	return;
1816 		 */
1817 		}
1818 	}
1819 
1820 	/*
1821 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1822 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1823 	 *		 sbd_handle_write(task, initial_dbuf);
1824 	 * 		return;
1825 	 *	}
1826 	 * }
1827 	 */
1828 
1829 	if (cdb0 == SCMD_VERIFY) {
1830 		/*
1831 		 * Something more likely needs to be done here.
1832 		 */
1833 		task->task_cmd_xfer_length = 0;
1834 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1835 		return;
1836 	}
1837 
1838 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1839 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1840 		sbd_handle_sync_cache(task, initial_dbuf);
1841 		return;
1842 	}
1843 
1844 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1845 }
1846 
1847 void
1848 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1849 {
1850 	sbd_cmd_t *scmd = NULL;
1851 
1852 	scmd = (sbd_cmd_t *)task->task_lu_private;
1853 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1854 		return;
1855 
1856 	switch (scmd->cmd_type) {
1857 	case (SBD_CMD_SCSI_READ):
1858 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1859 		break;
1860 
1861 	case (SBD_CMD_SCSI_WRITE):
1862 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1863 		break;
1864 
1865 	case (SBD_CMD_SMALL_READ):
1866 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1867 		break;
1868 
1869 	case (SBD_CMD_SMALL_WRITE):
1870 		sbd_handle_short_write_xfer_completion(task, dbuf);
1871 		break;
1872 
1873 	default:
1874 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1875 		break;
1876 	}
1877 }
1878 
1879 /* ARGSUSED */
1880 void
1881 sbd_send_status_done(struct scsi_task *task)
1882 {
1883 	cmn_err(CE_PANIC,
1884 	    "sbd_send_status_done: this should not have been called");
1885 }
1886 
1887 void
1888 sbd_task_free(struct scsi_task *task)
1889 {
1890 	if (task->task_lu_private) {
1891 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1892 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1893 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1894 			    (void *)task);
1895 		}
1896 		kmem_free(scmd, sizeof (sbd_cmd_t));
1897 	}
1898 }
1899 
1900 /*
1901  * Aborts are synchronus w.r.t. I/O AND
1902  * All the I/O which SBD does is synchronous AND
1903  * Everything within a task is single threaded.
1904  *   IT MEANS
1905  * If this function is called, we are doing nothing with this task
1906  * inside of sbd module.
1907  */
1908 /* ARGSUSED */
1909 stmf_status_t
1910 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1911 {
1912 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1913 	scsi_task_t *task;
1914 
1915 	if (abort_cmd == STMF_LU_RESET_STATE) {
1916 		return (sbd_lu_reset_state(lu));
1917 	}
1918 
1919 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1920 		sbd_check_and_clear_scsi2_reservation(sl, (sbd_it_data_t *)arg);
1921 		sbd_remove_it_handle(sl, (sbd_it_data_t *)arg);
1922 		return (STMF_SUCCESS);
1923 	}
1924 
1925 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1926 	task = (scsi_task_t *)arg;
1927 	if (task->task_lu_private) {
1928 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1929 
1930 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1931 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1932 			return (STMF_ABORT_SUCCESS);
1933 		}
1934 	}
1935 
1936 	return (STMF_NOT_FOUND);
1937 }
1938 
1939 /* ARGSUSED */
1940 void
1941 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1942 {
1943 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1944 	stmf_change_status_t st;
1945 
1946 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1947 	    (cmd == STMF_CMD_LU_OFFLINE) ||
1948 	    (cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1949 	    (cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1950 
1951 	st.st_completion_status = STMF_SUCCESS;
1952 	st.st_additional_info = NULL;
1953 
1954 	switch (cmd) {
1955 	case STMF_CMD_LU_ONLINE:
1956 		if (sl->sl_state == STMF_STATE_ONLINE)
1957 			st.st_completion_status = STMF_ALREADY;
1958 		else if (sl->sl_state != STMF_STATE_OFFLINE)
1959 			st.st_completion_status = STMF_FAILURE;
1960 		if (st.st_completion_status == STMF_SUCCESS) {
1961 			sl->sl_state = STMF_STATE_ONLINE;
1962 			sl->sl_state_not_acked = 1;
1963 		}
1964 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1965 		break;
1966 
1967 	case STMF_CMD_LU_OFFLINE:
1968 		if (sl->sl_state == STMF_STATE_OFFLINE)
1969 			st.st_completion_status = STMF_ALREADY;
1970 		else if (sl->sl_state != STMF_STATE_ONLINE)
1971 			st.st_completion_status = STMF_FAILURE;
1972 		if (st.st_completion_status == STMF_SUCCESS) {
1973 			sl->sl_flags &= ~(SL_MEDIUM_REMOVAL_PREVENTED |
1974 			    SL_LU_HAS_SCSI2_RESERVATION);
1975 			sl->sl_state = STMF_STATE_OFFLINE;
1976 			sl->sl_state_not_acked = 1;
1977 			sbd_pgr_reset(sl);
1978 		}
1979 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
1980 		break;
1981 
1982 	case STMF_ACK_LU_ONLINE_COMPLETE:
1983 		/* Fallthrough */
1984 	case STMF_ACK_LU_OFFLINE_COMPLETE:
1985 		sl->sl_state_not_acked = 0;
1986 		break;
1987 
1988 	}
1989 }
1990 
1991 /* ARGSUSED */
1992 stmf_status_t
1993 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
1994     uint32_t *bufsizep)
1995 {
1996 	return (STMF_NOT_SUPPORTED);
1997 }
1998 
1999 stmf_status_t
2000 sbd_lu_reset_state(stmf_lu_t *lu)
2001 {
2002 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
2003 
2004 	mutex_enter(&sl->sl_lock);
2005 	if (sl->sl_flags & SL_SAVED_WRITE_CACHE_DISABLE) {
2006 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
2007 		mutex_exit(&sl->sl_lock);
2008 		if (sl->sl_access_state == SBD_LU_ACTIVE) {
2009 			(void) sbd_wcd_set(1, sl);
2010 		}
2011 	} else {
2012 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
2013 		mutex_exit(&sl->sl_lock);
2014 		if (sl->sl_access_state == SBD_LU_ACTIVE) {
2015 			(void) sbd_wcd_set(0, sl);
2016 		}
2017 	}
2018 	sbd_pgr_reset(sl);
2019 	sbd_check_and_clear_scsi2_reservation(sl, NULL);
2020 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
2021 		return (STMF_FAILURE);
2022 	}
2023 	return (STMF_SUCCESS);
2024 }
2025 
2026 sbd_status_t
2027 sbd_flush_data_cache(sbd_lu_t *sl, int fsync_done)
2028 {
2029 	int r = 0;
2030 	int ret;
2031 
2032 	if (fsync_done)
2033 		goto over_fsync;
2034 	if ((sl->sl_data_vtype == VREG) || (sl->sl_data_vtype == VBLK)) {
2035 		if (VOP_FSYNC(sl->sl_data_vp, FSYNC, kcred, NULL))
2036 			return (SBD_FAILURE);
2037 	}
2038 over_fsync:
2039 	if (((sl->sl_data_vtype == VCHR) || (sl->sl_data_vtype == VBLK)) &&
2040 	    ((sl->sl_flags & SL_NO_DATA_DKIOFLUSH) == 0)) {
2041 		ret = VOP_IOCTL(sl->sl_data_vp, DKIOCFLUSHWRITECACHE, NULL,
2042 		    FKIOCTL, kcred, &r, NULL);
2043 		if ((ret == ENOTTY) || (ret == ENOTSUP)) {
2044 			mutex_enter(&sl->sl_lock);
2045 			sl->sl_flags |= SL_NO_DATA_DKIOFLUSH;
2046 			mutex_exit(&sl->sl_lock);
2047 		} else if (ret != 0) {
2048 			return (SBD_FAILURE);
2049 		}
2050 	}
2051 
2052 	return (SBD_SUCCESS);
2053 }
2054 
2055 /* ARGSUSED */
2056 static void
2057 sbd_handle_sync_cache(struct scsi_task *task,
2058     struct stmf_data_buf *initial_dbuf)
2059 {
2060 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
2061 	uint64_t	lba, laddr;
2062 	sbd_status_t	sret;
2063 	uint32_t	len;
2064 	int		is_g4 = 0;
2065 	int		immed;
2066 
2067 	task->task_cmd_xfer_length = 0;
2068 	/*
2069 	 * Determine if this is a 10 or 16 byte CDB
2070 	 */
2071 
2072 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
2073 		is_g4 = 1;
2074 
2075 	/*
2076 	 * Determine other requested parameters
2077 	 *
2078 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
2079 	 * Do not support the IMMED bit.
2080 	 */
2081 
2082 	immed = (task->task_cdb[1] & 0x02);
2083 
2084 	if (immed) {
2085 		stmf_scsilib_send_status(task, STATUS_CHECK,
2086 		    STMF_SAA_INVALID_FIELD_IN_CDB);
2087 		return;
2088 	}
2089 
2090 	/*
2091 	 * Check to be sure we're not being asked to sync an LBA
2092 	 * that is out of range.  While checking, verify reserved fields.
2093 	 */
2094 
2095 	if (is_g4) {
2096 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
2097 		    task->task_cdb[15]) {
2098 			stmf_scsilib_send_status(task, STATUS_CHECK,
2099 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2100 			return;
2101 		}
2102 
2103 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
2104 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
2105 	} else {
2106 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
2107 		    task->task_cdb[9]) {
2108 			stmf_scsilib_send_status(task, STATUS_CHECK,
2109 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2110 			return;
2111 		}
2112 
2113 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
2114 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
2115 	}
2116 
2117 	laddr = lba << sl->sl_data_blocksize_shift;
2118 	len <<= sl->sl_data_blocksize_shift;
2119 
2120 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
2121 		stmf_scsilib_send_status(task, STATUS_CHECK,
2122 		    STMF_SAA_LBA_OUT_OF_RANGE);
2123 		return;
2124 	}
2125 
2126 	sret = sbd_flush_data_cache(sl, 0);
2127 	if (sret != SBD_SUCCESS) {
2128 		stmf_scsilib_send_status(task, STATUS_CHECK,
2129 		    STMF_SAA_WRITE_ERROR);
2130 		return;
2131 	}
2132 
2133 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
2134 }
2135