1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 #include <sys/sdt.h>
38 #include <sys/dkio.h>
39 
40 #include <stmf.h>
41 #include <lpif.h>
42 #include <portif.h>
43 #include <stmf_ioctl.h>
44 #include <stmf_sbd.h>
45 #include <stmf_sbd_ioctl.h>
46 #include <sbd_impl.h>
47 
48 #define	SCSI2_CONFLICT_FREE_CMDS(cdb)	( \
49 	/* ----------------------- */                                      \
50 	/* Refer Both		   */                                      \
51 	/* SPC-2 (rev 20) Table 10 */                                      \
52 	/* SPC-3 (rev 23) Table 31 */                                      \
53 	/* ----------------------- */                                      \
54 	((cdb[0]) == SCMD_INQUIRY)					|| \
55 	((cdb[0]) == SCMD_LOG_SENSE_G1)					|| \
56 	((cdb[0]) == SCMD_RELEASE)					|| \
57 	((cdb[0]) == SCMD_RELEASE_G1)					|| \
58 	((cdb[0]) == SCMD_REPORT_LUNS)					|| \
59 	((cdb[0]) == SCMD_REQUEST_SENSE)				|| \
60 	/* PREVENT ALLOW MEDIUM REMOVAL with prevent == 0 */               \
61 	((((cdb[0]) == SCMD_DOORLOCK) && (((cdb[4]) & 0x3) == 0)))	|| \
62 	/* SERVICE ACTION IN with READ MEDIA SERIAL NUMBER (0x01) */       \
63 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G5) && (                          \
64 	    ((cdb[1]) & 0x1F) == 0x01))					|| \
65 	/* MAINTENANCE IN with service actions REPORT ALIASES (0x0Bh) */   \
66 	/* REPORT DEVICE IDENTIFIER (0x05)  REPORT PRIORITY (0x0Eh) */     \
67 	/* REPORT TARGET PORT GROUPS (0x0A) REPORT TIMESTAMP (0x0F) */     \
68 	(((cdb[0]) == SCMD_MAINTENANCE_IN) && (                            \
69 	    (((cdb[1]) & 0x1F) == 0x0B) ||                                 \
70 	    (((cdb[1]) & 0x1F) == 0x05) ||                                 \
71 	    (((cdb[1]) & 0x1F) == 0x0E) ||                                 \
72 	    (((cdb[1]) & 0x1F) == 0x0A) ||                                 \
73 	    (((cdb[1]) & 0x1F) == 0x0F)))				|| \
74 	/* ----------------------- */                                      \
75 	/* SBC-3 (rev 17) Table 3  */                                      \
76 	/* ----------------------- */                                      \
77 	/* READ CAPACITY(10) */                                            \
78 	((cdb[0]) == SCMD_READ_CAPACITY)				|| \
79 	/* READ CAPACITY(16) */                                            \
80 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G4) && (                          \
81 	    ((cdb[1]) & 0x1F) == 0x10))					|| \
82 	/* START STOP UNIT with START bit 0 and POWER CONDITION 0  */      \
83 	(((cdb[0]) == SCMD_START_STOP) && (                                \
84 	    (((cdb[4]) & 0xF0) == 0) && (((cdb[4]) & 0x01) == 0))))
85 /* End of SCSI2_CONFLICT_FREE_CMDS */
86 
87 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
88 static void sbd_handle_sync_cache(struct scsi_task *task,
89     struct stmf_data_buf *initial_dbuf);
90 void sbd_handle_read_xfer_completion(struct scsi_task *task,
91     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
92 void sbd_handle_short_write_xfer_completion(scsi_task_t *task,
93     stmf_data_buf_t *dbuf);
94 void sbd_handle_short_write_transfers(scsi_task_t *task,
95     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size);
96 static void sbd_handle_sync_cache(struct scsi_task *task,
97     struct stmf_data_buf *initial_dbuf);
98 void sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf,
99     uint32_t buflen);
100 void sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf);
101 
102 extern void sbd_pgr_initialize_it(scsi_task_t *);
103 extern int sbd_pgr_reservation_conflict(scsi_task_t *);
104 extern void sbd_pgr_remove_it_handle(sbd_lu_t *, sbd_it_data_t *);
105 extern void sbd_handle_pgr_in_cmd(scsi_task_t *, stmf_data_buf_t *);
106 extern void sbd_handle_pgr_out_cmd(scsi_task_t *, stmf_data_buf_t *);
107 extern void sbd_handle_pgr_out_data(scsi_task_t *, stmf_data_buf_t *);
108 /*
109  * IMPORTANT NOTE:
110  * =================
111  * The whole world here is based on the assumption that everything within
112  * a scsi task executes in a single threaded manner, even the aborts.
113  * Dont ever change that. There wont be any performance gain but there
114  * will be tons of race conditions.
115  */
116 
117 void
118 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
119 					struct stmf_data_buf *dbuf)
120 {
121 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
122 	uint64_t laddr;
123 	uint32_t len, buflen, iolen;
124 	int ndx;
125 	int bufs_to_take;
126 
127 	/* Lets try not to hog all the buffers the port has. */
128 	bufs_to_take = ((task->task_max_nbufs > 2) &&
129 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
130 	    task->task_max_nbufs;
131 
132 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
133 	laddr = scmd->addr + scmd->current_ro;
134 
135 	for (buflen = 0, ndx = 0; (buflen < len) &&
136 	    (ndx < dbuf->db_sglist_length); ndx++) {
137 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
138 		if (iolen == 0)
139 			break;
140 		if (sbd_data_read(sl, laddr, (uint64_t)iolen,
141 		    dbuf->db_sglist[0].seg_addr) != STMF_SUCCESS) {
142 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
143 			/* Do not need to do xfer anymore, just complete it */
144 			dbuf->db_data_size = 0;
145 			dbuf->db_xfer_status = STMF_SUCCESS;
146 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
147 			return;
148 		}
149 		buflen += iolen;
150 		laddr += (uint64_t)iolen;
151 	}
152 	dbuf->db_relative_offset = scmd->current_ro;
153 	dbuf->db_data_size = buflen;
154 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
155 	(void) stmf_xfer_data(task, dbuf, 0);
156 	scmd->len -= buflen;
157 	scmd->current_ro += buflen;
158 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
159 		uint32_t maxsize, minsize, old_minsize;
160 
161 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
162 		minsize = maxsize >> 2;
163 		do {
164 			/*
165 			 * A bad port implementation can keep on failing the
166 			 * the request but keep on sending us a false
167 			 * minsize.
168 			 */
169 			old_minsize = minsize;
170 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
171 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
172 		    (minsize >= 512));
173 		if (dbuf == NULL) {
174 			return;
175 		}
176 		scmd->nbufs++;
177 		sbd_do_read_xfer(task, scmd, dbuf);
178 	}
179 }
180 
181 void
182 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
183 				struct stmf_data_buf *dbuf)
184 {
185 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
186 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
187 		    dbuf->db_xfer_status, NULL);
188 		return;
189 	}
190 	task->task_nbytes_transferred += dbuf->db_data_size;
191 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
192 		stmf_free_dbuf(task, dbuf);
193 		scmd->nbufs--;
194 		if (scmd->nbufs)
195 			return;	/* wait for all buffers to complete */
196 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
197 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
198 			stmf_scsilib_send_status(task, STATUS_CHECK,
199 			    STMF_SAA_READ_ERROR);
200 		else
201 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
202 		return;
203 	}
204 	if (dbuf->db_flags & DB_DONT_REUSE) {
205 		/* allocate new dbuf */
206 		uint32_t maxsize, minsize, old_minsize;
207 		stmf_free_dbuf(task, dbuf);
208 
209 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
210 		minsize = maxsize >> 2;
211 		do {
212 			old_minsize = minsize;
213 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
214 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
215 		    (minsize >= 512));
216 		if (dbuf == NULL) {
217 			scmd->nbufs --;
218 			if (scmd->nbufs == 0) {
219 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
220 				    STMF_ALLOC_FAILURE, NULL);
221 			}
222 			return;
223 		}
224 	}
225 	sbd_do_read_xfer(task, scmd, dbuf);
226 }
227 
228 void
229 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
230 {
231 	uint64_t lba, laddr;
232 	uint32_t len;
233 	uint8_t op = task->task_cdb[0];
234 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
235 	sbd_cmd_t *scmd;
236 	stmf_data_buf_t *dbuf;
237 	int fast_path;
238 
239 	if (op == SCMD_READ) {
240 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
241 		len = (uint32_t)task->task_cdb[4];
242 
243 		if (len == 0) {
244 			len = 256;
245 		}
246 	} else if (op == SCMD_READ_G1) {
247 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
248 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
249 	} else if (op == SCMD_READ_G5) {
250 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
251 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
252 	} else if (op == SCMD_READ_G4) {
253 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
254 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
255 	} else {
256 		stmf_scsilib_send_status(task, STATUS_CHECK,
257 		    STMF_SAA_INVALID_OPCODE);
258 		return;
259 	}
260 
261 	laddr = lba << sl->sl_data_blocksize_shift;
262 	len <<= sl->sl_data_blocksize_shift;
263 
264 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
265 		stmf_scsilib_send_status(task, STATUS_CHECK,
266 		    STMF_SAA_LBA_OUT_OF_RANGE);
267 		return;
268 	}
269 
270 	task->task_cmd_xfer_length = len;
271 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
272 		task->task_expected_xfer_length = len;
273 	}
274 
275 	if (len != task->task_expected_xfer_length) {
276 		fast_path = 0;
277 		len = (len > task->task_expected_xfer_length) ?
278 		    task->task_expected_xfer_length : len;
279 	} else {
280 		fast_path = 1;
281 	}
282 
283 	if (len == 0) {
284 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
285 		return;
286 	}
287 
288 	if (initial_dbuf == NULL) {
289 		uint32_t maxsize, minsize, old_minsize;
290 
291 		maxsize = (len > (128*1024)) ? 128*1024 : len;
292 		minsize = maxsize >> 2;
293 		do {
294 			old_minsize = minsize;
295 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
296 			    &minsize, 0);
297 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
298 		    (minsize >= 512));
299 		if (initial_dbuf == NULL) {
300 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
301 			return;
302 		}
303 	}
304 	dbuf = initial_dbuf;
305 
306 	if ((dbuf->db_buf_size >= len) && fast_path &&
307 	    (dbuf->db_sglist_length == 1)) {
308 		if (sbd_data_read(sl, laddr, (uint64_t)len,
309 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
310 			dbuf->db_relative_offset = 0;
311 			dbuf->db_data_size = len;
312 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
313 			    DB_DIRECTION_TO_RPORT;
314 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
315 		} else {
316 			stmf_scsilib_send_status(task, STATUS_CHECK,
317 			    STMF_SAA_READ_ERROR);
318 		}
319 		return;
320 	}
321 
322 	if (task->task_lu_private) {
323 		scmd = (sbd_cmd_t *)task->task_lu_private;
324 	} else {
325 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
326 		task->task_lu_private = scmd;
327 	}
328 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
329 	scmd->cmd_type = SBD_CMD_SCSI_READ;
330 	scmd->nbufs = 1;
331 	scmd->addr = laddr;
332 	scmd->len = len;
333 	scmd->current_ro = 0;
334 
335 	sbd_do_read_xfer(task, scmd, dbuf);
336 }
337 
338 void
339 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
340 					struct stmf_data_buf *dbuf)
341 {
342 	uint32_t len;
343 	int bufs_to_take;
344 
345 	/* Lets try not to hog all the buffers the port has. */
346 	bufs_to_take = ((task->task_max_nbufs > 2) &&
347 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
348 	    task->task_max_nbufs;
349 
350 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
351 
352 	dbuf->db_relative_offset = scmd->current_ro;
353 	dbuf->db_data_size = len;
354 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
355 	(void) stmf_xfer_data(task, dbuf, 0);
356 	scmd->len -= len;
357 	scmd->current_ro += len;
358 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
359 		uint32_t maxsize, minsize, old_minsize;
360 
361 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
362 		minsize = maxsize >> 2;
363 		do {
364 			old_minsize = minsize;
365 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
366 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
367 		    (minsize >= 512));
368 		if (dbuf == NULL) {
369 			return;
370 		}
371 		scmd->nbufs++;
372 		sbd_do_write_xfer(task, scmd, dbuf);
373 	}
374 }
375 
376 void
377 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
378     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
379 {
380 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
381 	uint64_t laddr;
382 	uint32_t buflen, iolen;
383 	int ndx;
384 
385 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
386 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
387 		    dbuf->db_xfer_status, NULL);
388 		return;
389 	}
390 
391 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
392 		goto WRITE_XFER_DONE;
393 	}
394 
395 	laddr = scmd->addr + dbuf->db_relative_offset;
396 
397 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
398 	    (ndx < dbuf->db_sglist_length); ndx++) {
399 		iolen = min(dbuf->db_data_size - buflen,
400 		    dbuf->db_sglist[ndx].seg_length);
401 		if (iolen == 0)
402 			break;
403 		if (sbd_data_write(sl, laddr, (uint64_t)iolen,
404 		    dbuf->db_sglist[0].seg_addr) != STMF_SUCCESS) {
405 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
406 			break;
407 		}
408 		buflen += iolen;
409 		laddr += (uint64_t)iolen;
410 	}
411 	task->task_nbytes_transferred += buflen;
412 WRITE_XFER_DONE:
413 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
414 		stmf_free_dbuf(task, dbuf);
415 		scmd->nbufs--;
416 		if (scmd->nbufs)
417 			return;	/* wait for all buffers to complete */
418 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
419 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
420 			stmf_scsilib_send_status(task, STATUS_CHECK,
421 			    STMF_SAA_WRITE_ERROR);
422 		else
423 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
424 		return;
425 	}
426 	if (dbuf->db_flags & DB_DONT_REUSE || dbuf_reusable == 0) {
427 		uint32_t maxsize, minsize, old_minsize;
428 		/* free current dbuf and allocate a new one */
429 		stmf_free_dbuf(task, dbuf);
430 
431 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
432 		minsize = maxsize >> 2;
433 		do {
434 			old_minsize = minsize;
435 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
436 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
437 		    (minsize >= 512));
438 		if (dbuf == NULL) {
439 			scmd->nbufs --;
440 			if (scmd->nbufs == 0) {
441 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
442 				    STMF_ALLOC_FAILURE, NULL);
443 			}
444 			return;
445 		}
446 	}
447 	sbd_do_write_xfer(task, scmd, dbuf);
448 }
449 
450 void
451 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
452 {
453 	uint64_t lba, laddr;
454 	uint32_t len;
455 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
456 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
457 	sbd_cmd_t *scmd;
458 	stmf_data_buf_t *dbuf;
459 
460 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
461 		stmf_scsilib_send_status(task, STATUS_CHECK,
462 		    STMF_SAA_WRITE_PROTECTED);
463 		return;
464 	}
465 	if (op == SCMD_WRITE) {
466 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
467 		len = (uint32_t)task->task_cdb[4];
468 
469 		if (len == 0) {
470 			len = 256;
471 		}
472 	} else if (op == SCMD_WRITE_G1) {
473 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
474 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
475 	} else if (op == SCMD_WRITE_G5) {
476 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
477 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
478 	} else if (op == SCMD_WRITE_G4) {
479 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
480 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
481 	} else {
482 		stmf_scsilib_send_status(task, STATUS_CHECK,
483 		    STMF_SAA_INVALID_OPCODE);
484 		return;
485 	}
486 
487 	laddr = lba << sl->sl_data_blocksize_shift;
488 	len <<= sl->sl_data_blocksize_shift;
489 
490 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
491 		stmf_scsilib_send_status(task, STATUS_CHECK,
492 		    STMF_SAA_LBA_OUT_OF_RANGE);
493 		return;
494 	}
495 
496 	task->task_cmd_xfer_length = len;
497 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
498 		task->task_expected_xfer_length = len;
499 	}
500 
501 	len = (len > task->task_expected_xfer_length) ?
502 	    task->task_expected_xfer_length : len;
503 
504 	if (len == 0) {
505 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
506 		return;
507 	}
508 
509 	if (initial_dbuf == NULL) {
510 		uint32_t maxsize, minsize, old_minsize;
511 
512 		maxsize = (len > (128*1024)) ? 128*1024 : len;
513 		minsize = maxsize >> 2;
514 		do {
515 			old_minsize = minsize;
516 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
517 			    &minsize, 0);
518 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
519 		    (minsize >= 512));
520 		if (initial_dbuf == NULL) {
521 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
522 			    STMF_ALLOC_FAILURE, NULL);
523 			return;
524 		}
525 	} else if (task->task_flags & TF_INITIAL_BURST) {
526 		if (initial_dbuf->db_data_size > len) {
527 			if (initial_dbuf->db_data_size >
528 			    task->task_expected_xfer_length) {
529 				/* protocol error */
530 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
531 				    STMF_INVALID_ARG, NULL);
532 				return;
533 			}
534 			initial_dbuf->db_data_size = len;
535 		}
536 		do_immediate_data = 1;
537 	}
538 	dbuf = initial_dbuf;
539 
540 	if (task->task_lu_private) {
541 		scmd = (sbd_cmd_t *)task->task_lu_private;
542 	} else {
543 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
544 		task->task_lu_private = scmd;
545 	}
546 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
547 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
548 	scmd->nbufs = 1;
549 	scmd->addr = laddr;
550 	scmd->len = len;
551 	scmd->current_ro = 0;
552 
553 	if (do_immediate_data) {
554 		scmd->len -= dbuf->db_data_size;
555 		scmd->current_ro += dbuf->db_data_size;
556 		dbuf->db_xfer_status = STMF_SUCCESS;
557 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
558 	} else {
559 		sbd_do_write_xfer(task, scmd, dbuf);
560 	}
561 }
562 
563 /*
564  * Utility routine to handle small non performance data transfers to the
565  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
566  * buffer which is source of data for transfer, cdb_xfer_size is the
567  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
568  * which this command would transfer (the size of data pointed to by 'p').
569  */
570 void
571 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
572     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
573 {
574 	uint32_t bufsize, ndx;
575 	sbd_cmd_t *scmd;
576 
577 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
578 
579 	task->task_cmd_xfer_length = cmd_xfer_size;
580 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
581 		task->task_expected_xfer_length = cmd_xfer_size;
582 	} else {
583 		cmd_xfer_size = min(cmd_xfer_size,
584 		    task->task_expected_xfer_length);
585 	}
586 
587 	if (cmd_xfer_size == 0) {
588 		stmf_scsilib_send_status(task, STATUS_CHECK,
589 		    STMF_SAA_INVALID_FIELD_IN_CDB);
590 		return;
591 	}
592 	if (dbuf == NULL) {
593 		uint32_t minsize = cmd_xfer_size;
594 
595 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
596 	}
597 	if (dbuf == NULL) {
598 		stmf_scsilib_send_status(task, STATUS_QFULL, 0);
599 		return;
600 	}
601 
602 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
603 		uint8_t *d;
604 		uint32_t s;
605 
606 		d = dbuf->db_sglist[ndx].seg_addr;
607 		s = min((cmd_xfer_size - bufsize),
608 		    dbuf->db_sglist[ndx].seg_length);
609 		bcopy(p+bufsize, d, s);
610 		bufsize += s;
611 	}
612 	dbuf->db_relative_offset = 0;
613 	dbuf->db_data_size = cmd_xfer_size;
614 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
615 
616 	if (task->task_lu_private == NULL) {
617 		task->task_lu_private =
618 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
619 	}
620 	scmd = (sbd_cmd_t *)task->task_lu_private;
621 
622 	scmd->cmd_type = SBD_CMD_SMALL_READ;
623 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
624 	(void) stmf_xfer_data(task, dbuf, 0);
625 }
626 
627 void
628 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
629 				struct stmf_data_buf *dbuf)
630 {
631 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
632 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
633 		    dbuf->db_xfer_status, NULL);
634 		return;
635 	}
636 	task->task_nbytes_transferred = dbuf->db_data_size;
637 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
638 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
639 }
640 
641 void
642 sbd_handle_short_write_transfers(scsi_task_t *task,
643     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size)
644 {
645 	sbd_cmd_t *scmd;
646 
647 	task->task_cmd_xfer_length = cdb_xfer_size;
648 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
649 		task->task_expected_xfer_length = cdb_xfer_size;
650 	} else {
651 		cdb_xfer_size = min(cdb_xfer_size,
652 		    task->task_expected_xfer_length);
653 	}
654 
655 	if (cdb_xfer_size == 0) {
656 		stmf_scsilib_send_status(task, STATUS_CHECK,
657 		    STMF_SAA_INVALID_FIELD_IN_CDB);
658 		return;
659 	}
660 	if (task->task_lu_private == NULL) {
661 		task->task_lu_private = kmem_zalloc(sizeof (sbd_cmd_t),
662 		    KM_SLEEP);
663 	} else {
664 		bzero(task->task_lu_private, sizeof (sbd_cmd_t));
665 	}
666 	scmd = (sbd_cmd_t *)task->task_lu_private;
667 	scmd->cmd_type = SBD_CMD_SMALL_WRITE;
668 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
669 	scmd->len = cdb_xfer_size;
670 	if (dbuf == NULL) {
671 		uint32_t minsize = cdb_xfer_size;
672 
673 		dbuf = stmf_alloc_dbuf(task, cdb_xfer_size, &minsize, 0);
674 		if (dbuf == NULL) {
675 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
676 			    STMF_ALLOC_FAILURE, NULL);
677 			return;
678 		}
679 		dbuf->db_data_size = cdb_xfer_size;
680 		dbuf->db_relative_offset = 0;
681 		dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
682 		stmf_xfer_data(task, dbuf, 0);
683 	} else {
684 		if (dbuf->db_data_size < cdb_xfer_size) {
685 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
686 			    STMF_ABORTED, NULL);
687 			return;
688 		}
689 		dbuf->db_data_size = cdb_xfer_size;
690 		sbd_handle_short_write_xfer_completion(task, dbuf);
691 	}
692 }
693 
694 void
695 sbd_handle_short_write_xfer_completion(scsi_task_t *task,
696     stmf_data_buf_t *dbuf)
697 {
698 	sbd_cmd_t *scmd;
699 
700 	/*
701 	 * For now lets assume we will get only one sglist element
702 	 * for short writes. If that ever changes, we should allocate
703 	 * a local buffer and copy all the sg elements to one linear space.
704 	 */
705 	if ((dbuf->db_xfer_status != STMF_SUCCESS) ||
706 	    (dbuf->db_sglist_length > 1)) {
707 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
708 		    dbuf->db_xfer_status, NULL);
709 		return;
710 	}
711 
712 	task->task_nbytes_transferred = dbuf->db_data_size;
713 	scmd = (sbd_cmd_t *)task->task_lu_private;
714 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
715 
716 	/* Lets find out who to call */
717 	switch (task->task_cdb[0]) {
718 	case SCMD_MODE_SELECT:
719 	case SCMD_MODE_SELECT_G1:
720 		sbd_handle_mode_select_xfer(task,
721 		    dbuf->db_sglist[0].seg_addr, dbuf->db_data_size);
722 		break;
723 	case SCMD_PERSISTENT_RESERVE_OUT:
724 		sbd_handle_pgr_out_data(task, dbuf);
725 		break;
726 	default:
727 		/* This should never happen */
728 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
729 		    STMF_ABORTED, NULL);
730 	}
731 }
732 
733 void
734 sbd_handle_read_capacity(struct scsi_task *task,
735     struct stmf_data_buf *initial_dbuf)
736 {
737 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
738 	uint32_t cdb_len;
739 	uint8_t p[32];
740 	uint64_t s;
741 	uint16_t blksize;
742 
743 	s = sl->sl_lu_size >> sl->sl_data_blocksize_shift;
744 	s--;
745 	blksize = ((uint16_t)1) << sl->sl_data_blocksize_shift;
746 
747 	switch (task->task_cdb[0]) {
748 	case SCMD_READ_CAPACITY:
749 		if (s & 0xffffffff00000000ull) {
750 			p[0] = p[1] = p[2] = p[3] = 0xFF;
751 		} else {
752 			p[0] = (s >> 24) & 0xff;
753 			p[1] = (s >> 16) & 0xff;
754 			p[2] = (s >> 8) & 0xff;
755 			p[3] = s & 0xff;
756 		}
757 		p[4] = 0; p[5] = 0;
758 		p[6] = (blksize >> 8) & 0xff;
759 		p[7] = blksize & 0xff;
760 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
761 		break;
762 
763 	case SCMD_SVC_ACTION_IN_G4:
764 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
765 		bzero(p, 32);
766 		p[0] = (s >> 56) & 0xff;
767 		p[1] = (s >> 48) & 0xff;
768 		p[2] = (s >> 40) & 0xff;
769 		p[3] = (s >> 32) & 0xff;
770 		p[4] = (s >> 24) & 0xff;
771 		p[5] = (s >> 16) & 0xff;
772 		p[6] = (s >> 8) & 0xff;
773 		p[7] = s & 0xff;
774 		p[10] = (blksize >> 8) & 0xff;
775 		p[11] = blksize & 0xff;
776 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
777 		    cdb_len, 32);
778 		break;
779 	}
780 }
781 
782 void
783 sbd_calc_geometry(uint64_t s, uint16_t blksize, uint8_t *nsectors,
784     uint8_t *nheads, uint32_t *ncyl)
785 {
786 	if (s < (4ull * 1024ull * 1024ull * 1024ull)) {
787 		*nsectors = 32;
788 		*nheads = 8;
789 	} else {
790 		*nsectors = 254;
791 		*nheads = 254;
792 	}
793 	*ncyl = s / ((uint64_t)blksize * (uint64_t)(*nsectors) *
794 	    (uint64_t)(*nheads));
795 }
796 
797 void
798 sbd_handle_mode_sense(struct scsi_task *task,
799     struct stmf_data_buf *initial_dbuf, uint8_t *buf)
800 {
801 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
802 	uint32_t cmd_size, n;
803 	uint8_t *cdb;
804 	uint32_t ncyl;
805 	uint8_t nsectors, nheads;
806 	uint8_t page, ctrl, header_size, pc_valid;
807 	uint16_t nbytes;
808 	uint8_t *p;
809 	uint64_t s = sl->sl_lu_size;
810 	uint32_t dev_spec_param_offset;
811 
812 	p = buf;	/* buf is assumed to be zeroed out and large enough */
813 	n = 0;
814 	cdb = &task->task_cdb[0];
815 	page = cdb[2] & 0x3F;
816 	ctrl = (cdb[2] >> 6) & 3;
817 	cmd_size = (cdb[0] == SCMD_MODE_SENSE) ? cdb[4] :
818 	    READ_SCSI16(&cdb[7], uint32_t);
819 
820 	if (cdb[0] == SCMD_MODE_SENSE) {
821 		header_size = 4;
822 		dev_spec_param_offset = 2;
823 	} else {
824 		header_size = 8;
825 		dev_spec_param_offset = 3;
826 	}
827 
828 	/* Now validate the command */
829 	if ((cdb[2] == 0) || (page == MODEPAGE_ALLPAGES) || (page == 0x08) ||
830 	    (page == 0x0A) || (page == 0x03) || (page == 0x04)) {
831 		pc_valid = 1;
832 	} else {
833 		pc_valid = 0;
834 	}
835 	if ((cmd_size < header_size) || (pc_valid == 0)) {
836 		stmf_scsilib_send_status(task, STATUS_CHECK,
837 		    STMF_SAA_INVALID_FIELD_IN_CDB);
838 		return;
839 	}
840 
841 	/* We will update the length in the mode header at the end */
842 
843 	/* Block dev device specific param in mode param header has wp bit */
844 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
845 		p[n + dev_spec_param_offset] = BIT_7;
846 	}
847 	n += header_size;
848 	/* We are not going to return any block descriptor */
849 
850 	nbytes = ((uint16_t)1) << sl->sl_data_blocksize_shift;
851 	sbd_calc_geometry(s, nbytes, &nsectors, &nheads, &ncyl);
852 
853 	if ((page == 0x03) || (page == MODEPAGE_ALLPAGES)) {
854 		p[n] = 0x03;
855 		p[n+1] = 0x16;
856 		if (ctrl != 1) {
857 			p[n + 11] = nsectors;
858 			p[n + 12] = nbytes >> 8;
859 			p[n + 13] = nbytes & 0xff;
860 			p[n + 20] = 0x80;
861 		}
862 		n += 24;
863 	}
864 	if ((page == 0x04) || (page == MODEPAGE_ALLPAGES)) {
865 		p[n] = 0x04;
866 		p[n + 1] = 0x16;
867 		if (ctrl != 1) {
868 			p[n + 2] = ncyl >> 16;
869 			p[n + 3] = ncyl >> 8;
870 			p[n + 4] = ncyl & 0xff;
871 			p[n + 5] = nheads;
872 			p[n + 20] = 0x15;
873 			p[n + 21] = 0x18;
874 		}
875 		n += 24;
876 	}
877 	if ((page == MODEPAGE_CACHING) || (page == MODEPAGE_ALLPAGES)) {
878 		struct mode_caching *mode_caching_page;
879 
880 		mode_caching_page = (struct mode_caching *)&p[n];
881 
882 		mode_caching_page->mode_page.code = MODEPAGE_CACHING;
883 		mode_caching_page->mode_page.ps = 1; /* A saveable page */
884 		mode_caching_page->mode_page.length = 0x12;
885 
886 		switch (ctrl) {
887 		case (0):
888 			/* Current */
889 			if ((sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) == 0) {
890 				mode_caching_page->wce = 1;
891 			}
892 			break;
893 
894 		case (1):
895 			/* Changeable */
896 			if ((sl->sl_flags &
897 			    SL_WRITEBACK_CACHE_SET_UNSUPPORTED) == 0) {
898 				mode_caching_page->wce = 1;
899 			}
900 			break;
901 
902 		default:
903 			if ((sl->sl_flags &
904 			    SL_SAVED_WRITE_CACHE_DISABLE) == 0) {
905 				mode_caching_page->wce = 1;
906 			}
907 			break;
908 		}
909 		n += (sizeof (struct mode_page) +
910 		    mode_caching_page->mode_page.length);
911 	}
912 	if ((page == MODEPAGE_CTRL_MODE) || (page == MODEPAGE_ALLPAGES)) {
913 		struct mode_control_scsi3 *mode_control_page;
914 
915 		mode_control_page = (struct mode_control_scsi3 *)&p[n];
916 
917 		mode_control_page->mode_page.code = MODEPAGE_CTRL_MODE;
918 		mode_control_page->mode_page.length =
919 		    PAGELENGTH_MODE_CONTROL_SCSI3;
920 		if (ctrl != 1) {
921 			/* If not looking for changeable values, report this. */
922 			mode_control_page->que_mod = CTRL_QMOD_UNRESTRICT;
923 		}
924 		n += (sizeof (struct mode_page) +
925 		    mode_control_page->mode_page.length);
926 	}
927 
928 	if (cdb[0] == SCMD_MODE_SENSE) {
929 		if (n > 255) {
930 			stmf_scsilib_send_status(task, STATUS_CHECK,
931 			    STMF_SAA_INVALID_FIELD_IN_CDB);
932 			return;
933 		}
934 		/*
935 		 * Mode parameter header length doesn't include the number
936 		 * of bytes in the length field, so adjust the count.
937 		 * Byte count minus header length field size.
938 		 */
939 		buf[0] = (n - 1) & 0xff;
940 	} else {
941 		/* Byte count minus header length field size. */
942 		buf[1] = (n - 2) & 0xff;
943 		buf[0] = ((n - 2) >> 8) & 0xff;
944 	}
945 
946 	sbd_handle_short_read_transfers(task, initial_dbuf, buf,
947 	    cmd_size, n);
948 }
949 
950 void
951 sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf)
952 {
953 	uint32_t cmd_xfer_len;
954 
955 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
956 		cmd_xfer_len = (uint32_t)task->task_cdb[4];
957 	} else {
958 		cmd_xfer_len = READ_SCSI16(&task->task_cdb[7], uint32_t);
959 	}
960 
961 	if ((task->task_cdb[1] & 0xFE) != 0x10) {
962 		stmf_scsilib_send_status(task, STATUS_CHECK,
963 		    STMF_SAA_INVALID_FIELD_IN_CDB);
964 		return;
965 	}
966 
967 	if (cmd_xfer_len == 0) {
968 		/* zero byte mode selects are allowed */
969 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
970 		return;
971 	}
972 
973 	sbd_handle_short_write_transfers(task, dbuf, cmd_xfer_len);
974 }
975 
976 void
977 sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf, uint32_t buflen)
978 {
979 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
980 	sbd_it_data_t *it;
981 	int hdr_len, bd_len;
982 	sbd_status_t sret;
983 	int i;
984 
985 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
986 		hdr_len = 4;
987 	} else {
988 		hdr_len = 8;
989 	}
990 
991 	if (buflen < hdr_len)
992 		goto mode_sel_param_len_err;
993 
994 	bd_len = hdr_len == 4 ? buf[3] : READ_SCSI16(&buf[6], int);
995 
996 	if (buflen < (hdr_len + bd_len + 2))
997 		goto mode_sel_param_len_err;
998 
999 	buf += hdr_len + bd_len;
1000 	buflen -= hdr_len + bd_len;
1001 
1002 	if ((buf[0] != 8) || (buflen != ((uint32_t)buf[1] + 2))) {
1003 		goto mode_sel_param_len_err;
1004 	}
1005 
1006 	if (buf[2] & 0xFB) {
1007 		goto mode_sel_param_field_err;
1008 	}
1009 
1010 	for (i = 3; i < (buf[1] + 2); i++) {
1011 		if (buf[i]) {
1012 			goto mode_sel_param_field_err;
1013 		}
1014 	}
1015 
1016 	sret = SBD_SUCCESS;
1017 
1018 	/* All good. Lets handle the write cache change, if any */
1019 	if (buf[2] & BIT_2) {
1020 		sret = sbd_wcd_set(0, sl);
1021 	} else {
1022 		sret = sbd_wcd_set(1, sl);
1023 	}
1024 
1025 	if (sret != SBD_SUCCESS) {
1026 		stmf_scsilib_send_status(task, STATUS_CHECK,
1027 		    STMF_SAA_WRITE_ERROR);
1028 		return;
1029 	}
1030 
1031 	/* set on the device passed, now set the flags */
1032 	mutex_enter(&sl->sl_lock);
1033 	if (buf[2] & BIT_2) {
1034 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
1035 	} else {
1036 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
1037 	}
1038 
1039 	for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1040 		if (it == task->task_lu_itl_handle)
1041 			continue;
1042 		it->sbd_it_ua_conditions |= SBD_UA_MODE_PARAMETERS_CHANGED;
1043 	}
1044 
1045 	if (task->task_cdb[1] & 1) {
1046 		if (buf[2] & BIT_2) {
1047 			sl->sl_flags &= ~SL_SAVED_WRITE_CACHE_DISABLE;
1048 		} else {
1049 			sl->sl_flags |= SL_SAVED_WRITE_CACHE_DISABLE;
1050 		}
1051 		mutex_exit(&sl->sl_lock);
1052 		sret = sbd_write_lu_info(sl);
1053 	} else {
1054 		mutex_exit(&sl->sl_lock);
1055 	}
1056 	if (sret == SBD_SUCCESS) {
1057 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1058 	} else {
1059 		stmf_scsilib_send_status(task, STATUS_CHECK,
1060 		    STMF_SAA_WRITE_ERROR);
1061 	}
1062 	return;
1063 
1064 mode_sel_param_len_err:
1065 	stmf_scsilib_send_status(task, STATUS_CHECK,
1066 	    STMF_SAA_PARAM_LIST_LENGTH_ERROR);
1067 	return;
1068 mode_sel_param_field_err:
1069 	stmf_scsilib_send_status(task, STATUS_CHECK,
1070 	    STMF_SAA_INVALID_FIELD_IN_PARAM_LIST);
1071 }
1072 
1073 void
1074 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf,
1075 			uint8_t *p, int bsize)
1076 {
1077 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1078 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
1079 	uint32_t cmd_size;
1080 	uint8_t page_length;
1081 	uint8_t byte0;
1082 
1083 	byte0 = DTYPE_DIRECT;
1084 	/*
1085 	 * Basic protocol checks.
1086 	 */
1087 
1088 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
1089 		stmf_scsilib_send_status(task, STATUS_CHECK,
1090 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1091 		return;
1092 	}
1093 
1094 	/*
1095 	 * Zero byte allocation length is not an error.  Just
1096 	 * return success.
1097 	 */
1098 
1099 	cmd_size = (((uint32_t)cdbp[3]) << 8) | cdbp[4];
1100 
1101 	if (cmd_size == 0) {
1102 		task->task_cmd_xfer_length = 0;
1103 		if (task->task_additional_flags &
1104 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1105 			task->task_expected_xfer_length = 0;
1106 		}
1107 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1108 		return;
1109 	}
1110 
1111 	/*
1112 	 * Standard inquiry
1113 	 */
1114 
1115 	if ((cdbp[1] & 1) == 0) {
1116 		struct scsi_inquiry *inq = (struct scsi_inquiry *)p;
1117 
1118 		page_length = 31;
1119 		bzero(inq, page_length + 5);
1120 
1121 		inq->inq_dtype = DTYPE_DIRECT;
1122 		inq->inq_ansi = 5;	/* SPC-3 */
1123 		inq->inq_hisup = 1;
1124 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
1125 		inq->inq_len = page_length;
1126 
1127 		inq->inq_tpgs = 1;
1128 		inq->inq_cmdque = 1;
1129 
1130 		if (sl->sl_flags & SL_VID_VALID) {
1131 			bcopy(sl->sl_vendor_id, inq->inq_vid, 8);
1132 		} else {
1133 			bcopy(sbd_vendor_id, inq->inq_vid, 8);
1134 		}
1135 
1136 		if (sl->sl_flags & SL_PID_VALID) {
1137 			bcopy(sl->sl_product_id, inq->inq_pid, 16);
1138 		} else {
1139 			bcopy(sbd_product_id, inq->inq_pid, 16);
1140 		}
1141 
1142 		if (sl->sl_flags & SL_REV_VALID) {
1143 			bcopy(sl->sl_revision, inq->inq_revision, 4);
1144 		} else {
1145 			bcopy(sbd_revision, inq->inq_revision, 4);
1146 		}
1147 
1148 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1149 		    min(cmd_size, page_length + 5));
1150 
1151 		return;
1152 	}
1153 
1154 	/*
1155 	 * EVPD handling
1156 	 */
1157 
1158 	switch (cdbp[2]) {
1159 	case 0x00:
1160 		page_length = 4;
1161 
1162 		bzero(p, page_length + 4);
1163 
1164 		p[0] = byte0;
1165 		p[3] = page_length;
1166 		p[5] = 0x80;
1167 		p[6] = 0x83;
1168 		p[7] = 0x86;
1169 
1170 		break;
1171 
1172 	case 0x80:
1173 		if (sl->sl_serial_no_size) {
1174 			page_length = sl->sl_serial_no_size;
1175 			bcopy(sl->sl_serial_no, p + 4, sl->sl_serial_no_size);
1176 		} else {
1177 			bcopy("    ", p + 4, 4);
1178 		}
1179 		p[0] = byte0;
1180 		p[1] = 0x80;
1181 		p[3] = page_length;
1182 		break;
1183 
1184 	case 0x83:
1185 
1186 		page_length = stmf_scsilib_prepare_vpd_page83(task, p,
1187 		    bsize, byte0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
1188 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID) - 4;
1189 		break;
1190 
1191 	case 0x86:
1192 		page_length = 0x3c;
1193 
1194 		bzero(p, page_length + 4);
1195 
1196 		p[0] = byte0;
1197 		p[1] = 0x86;		/* Page 86 response */
1198 		p[3] = page_length;
1199 
1200 		/*
1201 		 * Bits 0, 1, and 2 will need to be updated
1202 		 * to reflect the queue tag handling if/when
1203 		 * that is implemented.  For now, we're going
1204 		 * to claim support only for Simple TA.
1205 		 */
1206 		p[5] = 1;
1207 
1208 		break;
1209 
1210 	default:
1211 		stmf_scsilib_send_status(task, STATUS_CHECK,
1212 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1213 		return;
1214 	}
1215 
1216 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1217 	    min(cmd_size, page_length + 4));
1218 }
1219 
1220 stmf_status_t
1221 sbd_task_alloc(struct scsi_task *task)
1222 {
1223 	if ((task->task_lu_private =
1224 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
1225 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1226 		scmd->flags = 0;
1227 		return (STMF_SUCCESS);
1228 	}
1229 	return (STMF_ALLOC_FAILURE);
1230 }
1231 
1232 void
1233 sbd_remove_it_handle(sbd_lu_t *sl, sbd_it_data_t *it)
1234 {
1235 	sbd_it_data_t **ppit;
1236 
1237 	sbd_pgr_remove_it_handle(sl, it);
1238 	mutex_enter(&sl->sl_lock);
1239 	for (ppit = &sl->sl_it_list; *ppit != NULL;
1240 	    ppit = &((*ppit)->sbd_it_next)) {
1241 		if ((*ppit) == it) {
1242 			*ppit = it->sbd_it_next;
1243 			break;
1244 		}
1245 	}
1246 	mutex_exit(&sl->sl_lock);
1247 
1248 	DTRACE_PROBE2(itl__nexus__end, stmf_lu_t *, sl->sl_lu,
1249 	    sbd_it_data_t *, it);
1250 
1251 	kmem_free(it, sizeof (*it));
1252 }
1253 
1254 void
1255 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *sl, sbd_it_data_t *it)
1256 {
1257 	mutex_enter(&sl->sl_lock);
1258 	if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) == 0) {
1259 		/* If we dont have any reservations, just get out. */
1260 		mutex_exit(&sl->sl_lock);
1261 		return;
1262 	}
1263 
1264 	if (it == NULL) {
1265 		/* Find the I_T nexus which is holding the reservation. */
1266 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1267 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
1268 				ASSERT(it->sbd_it_session_id ==
1269 				    sl->sl_rs_owner_session_id);
1270 				break;
1271 			}
1272 		}
1273 		ASSERT(it != NULL);
1274 	} else {
1275 		/*
1276 		 * We were passed an I_T nexus. If this nexus does not hold
1277 		 * the reservation, do nothing. This is why this function is
1278 		 * called "check_and_clear".
1279 		 */
1280 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
1281 			mutex_exit(&sl->sl_lock);
1282 			return;
1283 		}
1284 	}
1285 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1286 	sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1287 	mutex_exit(&sl->sl_lock);
1288 }
1289 
1290 
1291 
1292 void
1293 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1294 {
1295 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1296 	sbd_it_data_t *it;
1297 	uint8_t cdb0, cdb1;
1298 
1299 	if ((it = task->task_lu_itl_handle) == NULL) {
1300 		mutex_enter(&sl->sl_lock);
1301 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1302 			if (it->sbd_it_session_id ==
1303 			    task->task_session->ss_session_id) {
1304 				mutex_exit(&sl->sl_lock);
1305 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1306 				return;
1307 			}
1308 		}
1309 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
1310 		if (it == NULL) {
1311 			mutex_exit(&sl->sl_lock);
1312 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1313 			return;
1314 		}
1315 		it->sbd_it_session_id = task->task_session->ss_session_id;
1316 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
1317 		it->sbd_it_next = sl->sl_it_list;
1318 		sl->sl_it_list = it;
1319 		mutex_exit(&sl->sl_lock);
1320 
1321 		DTRACE_PROBE1(itl__nexus__start, scsi_task *, task);
1322 
1323 		sbd_pgr_initialize_it(task);
1324 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
1325 		    task->task_session, it->sbd_it_session_id, it)
1326 		    != STMF_SUCCESS) {
1327 			sbd_remove_it_handle(sl, it);
1328 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1329 			return;
1330 		}
1331 		task->task_lu_itl_handle = it;
1332 		it->sbd_it_ua_conditions = SBD_UA_POR;
1333 	} else if (it->sbd_it_flags & SBD_IT_PGR_CHECK_FLAG) {
1334 		sbd_pgr_initialize_it(task);
1335 		mutex_enter(&sl->sl_lock);
1336 		it->sbd_it_flags &= ~SBD_IT_PGR_CHECK_FLAG;
1337 		mutex_exit(&sl->sl_lock);
1338 	}
1339 
1340 	if (task->task_mgmt_function) {
1341 		stmf_scsilib_handle_task_mgmt(task);
1342 		return;
1343 	}
1344 
1345 	/* Checking ua conditions as per SAM3R14 5.3.2 specified order */
1346 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1347 		uint32_t saa = 0;
1348 
1349 		mutex_enter(&sl->sl_lock);
1350 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1351 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1352 			saa = STMF_SAA_POR;
1353 		}
1354 		mutex_exit(&sl->sl_lock);
1355 		if (saa) {
1356 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1357 			return;
1358 		}
1359 	}
1360 
1361 	/* Reservation conflict checks */
1362 	if (SBD_PGR_RSVD(sl->sl_pgr)) {
1363 		if (sbd_pgr_reservation_conflict(task)) {
1364 			stmf_scsilib_send_status(task,
1365 			    STATUS_RESERVATION_CONFLICT, 0);
1366 			return;
1367 		}
1368 	} else if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) &&
1369 	    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
1370 		if (!(SCSI2_CONFLICT_FREE_CMDS(task->task_cdb))) {
1371 			stmf_scsilib_send_status(task,
1372 			    STATUS_RESERVATION_CONFLICT, 0);
1373 			return;
1374 		}
1375 	}
1376 
1377 	/* Rest of the ua conndition checks */
1378 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1379 		uint32_t saa = 0;
1380 
1381 		mutex_enter(&sl->sl_lock);
1382 		if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1383 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1384 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1385 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1386 			    (task->task_cdb[1] ==
1387 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1388 				saa = 0;
1389 			} else {
1390 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1391 			}
1392 		} else if (it->sbd_it_ua_conditions &
1393 		    SBD_UA_MODE_PARAMETERS_CHANGED) {
1394 			it->sbd_it_ua_conditions &=
1395 			    ~SBD_UA_MODE_PARAMETERS_CHANGED;
1396 			saa = STMF_SAA_MODE_PARAMETERS_CHANGED;
1397 		} else {
1398 			it->sbd_it_ua_conditions = 0;
1399 			saa = 0;
1400 		}
1401 		mutex_exit(&sl->sl_lock);
1402 		if (saa) {
1403 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1404 			return;
1405 		}
1406 	}
1407 
1408 	cdb0 = task->task_cdb[0] & 0x1F;
1409 
1410 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1411 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1412 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1413 			return;
1414 		}
1415 		if (cdb0 == SCMD_READ) {
1416 			sbd_handle_read(task, initial_dbuf);
1417 			return;
1418 		}
1419 		sbd_handle_write(task, initial_dbuf);
1420 		return;
1421 	}
1422 
1423 	cdb0 = task->task_cdb[0];
1424 	cdb1 = task->task_cdb[1];
1425 
1426 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1427 		uint8_t *p;
1428 
1429 		p = (uint8_t *)kmem_zalloc(512, KM_SLEEP);
1430 		sbd_handle_inquiry(task, initial_dbuf, p, 512);
1431 		kmem_free(p, 512);
1432 		return;
1433 	}
1434 
1435 	if (cdb0  == SCMD_PERSISTENT_RESERVE_OUT) {
1436 		sbd_handle_pgr_out_cmd(task, initial_dbuf);
1437 		return;
1438 	}
1439 
1440 	if (cdb0  == SCMD_PERSISTENT_RESERVE_IN) {
1441 		sbd_handle_pgr_in_cmd(task, initial_dbuf);
1442 		return;
1443 	}
1444 
1445 	if (cdb0 == SCMD_RELEASE) {
1446 		if (cdb1) {
1447 			stmf_scsilib_send_status(task, STATUS_CHECK,
1448 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1449 			return;
1450 		}
1451 
1452 		mutex_enter(&sl->sl_lock);
1453 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1454 			/* If not owner don't release it, just return good */
1455 			if (it->sbd_it_session_id !=
1456 			    sl->sl_rs_owner_session_id) {
1457 				mutex_exit(&sl->sl_lock);
1458 				stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1459 				return;
1460 			}
1461 		}
1462 		sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1463 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1464 		mutex_exit(&sl->sl_lock);
1465 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1466 		return;
1467 	}
1468 
1469 	if (cdb0 == SCMD_RESERVE) {
1470 		if (cdb1) {
1471 			stmf_scsilib_send_status(task, STATUS_CHECK,
1472 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1473 			return;
1474 		}
1475 
1476 		mutex_enter(&sl->sl_lock);
1477 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1478 			/* If not owner, return conflict status */
1479 			if (it->sbd_it_session_id !=
1480 			    sl->sl_rs_owner_session_id) {
1481 				mutex_exit(&sl->sl_lock);
1482 				stmf_scsilib_send_status(task,
1483 				    STATUS_RESERVATION_CONFLICT, 0);
1484 				return;
1485 			}
1486 		}
1487 		sl->sl_flags |= SL_LU_HAS_SCSI2_RESERVATION;
1488 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1489 		sl->sl_rs_owner_session_id = it->sbd_it_session_id;
1490 		mutex_exit(&sl->sl_lock);
1491 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1492 		return;
1493 	}
1494 
1495 	if (cdb0 == SCMD_REQUEST_SENSE) {
1496 		/*
1497 		 * LU provider needs to store unretrieved sense data
1498 		 * (e.g. after power-on/reset).  For now, we'll just
1499 		 * return good status with no sense.
1500 		 */
1501 
1502 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1503 		    task->task_cdb[5]) {
1504 			stmf_scsilib_send_status(task, STATUS_CHECK,
1505 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1506 		} else {
1507 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1508 		}
1509 
1510 		return;
1511 	}
1512 
1513 	/* Report Target Port Groups */
1514 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1515 	    ((cdb1 & 0x1F) == 0x0A)) {
1516 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1517 		return;
1518 	}
1519 
1520 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1521 		task->task_cmd_xfer_length = 0;
1522 		if (task->task_cdb[4] & 0xFC) {
1523 			stmf_scsilib_send_status(task, STATUS_CHECK,
1524 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1525 			return;
1526 		}
1527 		if (task->task_cdb[4] & 2) {
1528 			stmf_scsilib_send_status(task, STATUS_CHECK,
1529 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1530 		} else {
1531 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1532 		}
1533 		return;
1534 
1535 	}
1536 
1537 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1538 		uint8_t *p;
1539 		p = kmem_zalloc(512, KM_SLEEP);
1540 		sbd_handle_mode_sense(task, initial_dbuf, p);
1541 		kmem_free(p, 512);
1542 		return;
1543 	}
1544 
1545 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1546 		sbd_handle_mode_select(task, initial_dbuf);
1547 		return;
1548 	}
1549 
1550 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1551 		task->task_cmd_xfer_length = 0;
1552 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1553 		return;
1554 	}
1555 
1556 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1557 		sbd_handle_read_capacity(task, initial_dbuf);
1558 		return;
1559 	}
1560 
1561 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1562 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1563 			sbd_handle_read_capacity(task, initial_dbuf);
1564 			return;
1565 		/*
1566 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1567 		 * 	sbd_handle_read(task, initial_dbuf);
1568 		 * 	return;
1569 		 */
1570 		}
1571 	}
1572 
1573 	/*
1574 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1575 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1576 	 *		 sbd_handle_write(task, initial_dbuf);
1577 	 * 		return;
1578 	 *	}
1579 	 * }
1580 	 */
1581 
1582 	if (cdb0 == SCMD_VERIFY) {
1583 		/*
1584 		 * Something more likely needs to be done here.
1585 		 */
1586 		task->task_cmd_xfer_length = 0;
1587 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1588 		return;
1589 	}
1590 
1591 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1592 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1593 		sbd_handle_sync_cache(task, initial_dbuf);
1594 		return;
1595 	}
1596 
1597 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1598 }
1599 
1600 void
1601 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1602 {
1603 	sbd_cmd_t *scmd = NULL;
1604 
1605 	scmd = (sbd_cmd_t *)task->task_lu_private;
1606 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1607 		return;
1608 
1609 	switch (scmd->cmd_type) {
1610 	case (SBD_CMD_SCSI_READ):
1611 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1612 		break;
1613 
1614 	case (SBD_CMD_SCSI_WRITE):
1615 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1616 		break;
1617 
1618 	case (SBD_CMD_SMALL_READ):
1619 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1620 		break;
1621 
1622 	case (SBD_CMD_SMALL_WRITE):
1623 		sbd_handle_short_write_xfer_completion(task, dbuf);
1624 		break;
1625 
1626 	default:
1627 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1628 		break;
1629 	}
1630 }
1631 
1632 /* ARGSUSED */
1633 void
1634 sbd_send_status_done(struct scsi_task *task)
1635 {
1636 	cmn_err(CE_PANIC,
1637 	    "sbd_send_status_done: this should not have been called");
1638 }
1639 
1640 void
1641 sbd_task_free(struct scsi_task *task)
1642 {
1643 	if (task->task_lu_private) {
1644 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1645 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1646 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1647 			    (void *)task);
1648 		}
1649 		kmem_free(scmd, sizeof (sbd_cmd_t));
1650 	}
1651 }
1652 
1653 /*
1654  * Aborts are synchronus w.r.t. I/O AND
1655  * All the I/O which SBD does is synchronous AND
1656  * Everything within a task is single threaded.
1657  *   IT MEANS
1658  * If this function is called, we are doing nothing with this task
1659  * inside of sbd module.
1660  */
1661 /* ARGSUSED */
1662 stmf_status_t
1663 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1664 {
1665 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1666 	scsi_task_t *task;
1667 
1668 	if (abort_cmd == STMF_LU_RESET_STATE) {
1669 		return (sbd_lu_reset_state(lu));
1670 	}
1671 
1672 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1673 		sbd_check_and_clear_scsi2_reservation(sl, (sbd_it_data_t *)arg);
1674 		sbd_remove_it_handle(sl, (sbd_it_data_t *)arg);
1675 		return (STMF_SUCCESS);
1676 	}
1677 
1678 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1679 	task = (scsi_task_t *)arg;
1680 	if (task->task_lu_private) {
1681 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1682 
1683 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1684 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1685 			return (STMF_ABORT_SUCCESS);
1686 		}
1687 	}
1688 
1689 	return (STMF_NOT_FOUND);
1690 }
1691 
1692 /* ARGSUSED */
1693 void
1694 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1695 {
1696 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1697 	stmf_change_status_t st;
1698 
1699 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1700 	    (cmd == STMF_CMD_LU_OFFLINE) ||
1701 	    (cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1702 	    (cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1703 
1704 	st.st_completion_status = STMF_SUCCESS;
1705 	st.st_additional_info = NULL;
1706 
1707 	switch (cmd) {
1708 	case STMF_CMD_LU_ONLINE:
1709 		if (sl->sl_state == STMF_STATE_ONLINE)
1710 			st.st_completion_status = STMF_ALREADY;
1711 		else if (sl->sl_state != STMF_STATE_OFFLINE)
1712 			st.st_completion_status = STMF_FAILURE;
1713 		if (st.st_completion_status == STMF_SUCCESS) {
1714 			sl->sl_state = STMF_STATE_ONLINE;
1715 			sl->sl_state_not_acked = 1;
1716 		}
1717 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1718 		break;
1719 
1720 	case STMF_CMD_LU_OFFLINE:
1721 		if (sl->sl_state == STMF_STATE_OFFLINE)
1722 			st.st_completion_status = STMF_ALREADY;
1723 		else if (sl->sl_state != STMF_STATE_ONLINE)
1724 			st.st_completion_status = STMF_FAILURE;
1725 		if (st.st_completion_status == STMF_SUCCESS) {
1726 			sl->sl_flags &= ~(SL_MEDIUM_REMOVAL_PREVENTED |
1727 			    SL_LU_HAS_SCSI2_RESERVATION);
1728 			sl->sl_state = STMF_STATE_OFFLINE;
1729 			sl->sl_state_not_acked = 1;
1730 		}
1731 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
1732 		break;
1733 
1734 	case STMF_ACK_LU_ONLINE_COMPLETE:
1735 		/* Fallthrough */
1736 	case STMF_ACK_LU_OFFLINE_COMPLETE:
1737 		sl->sl_state_not_acked = 0;
1738 		break;
1739 
1740 	}
1741 }
1742 
1743 /* ARGSUSED */
1744 stmf_status_t
1745 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
1746     uint32_t *bufsizep)
1747 {
1748 	return (STMF_NOT_SUPPORTED);
1749 }
1750 
1751 stmf_status_t
1752 sbd_lu_reset_state(stmf_lu_t *lu)
1753 {
1754 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1755 
1756 	mutex_enter(&sl->sl_lock);
1757 	if (sl->sl_flags & SL_SAVED_WRITE_CACHE_DISABLE) {
1758 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
1759 		mutex_exit(&sl->sl_lock);
1760 		(void) sbd_wcd_set(1, sl);
1761 	} else {
1762 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
1763 		mutex_exit(&sl->sl_lock);
1764 		(void) sbd_wcd_set(0, sl);
1765 	}
1766 	sbd_check_and_clear_scsi2_reservation(sl, NULL);
1767 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
1768 		return (STMF_FAILURE);
1769 	}
1770 	return (STMF_SUCCESS);
1771 }
1772 
1773 sbd_status_t
1774 sbd_flush_data_cache(sbd_lu_t *sl, int fsync_done)
1775 {
1776 	int r = 0;
1777 	int ret;
1778 
1779 	if (fsync_done)
1780 		goto over_fsync;
1781 	if ((sl->sl_data_vtype == VREG) || (sl->sl_data_vtype == VBLK)) {
1782 		if (VOP_FSYNC(sl->sl_data_vp, FSYNC, kcred, NULL))
1783 			return (SBD_FAILURE);
1784 	}
1785 over_fsync:
1786 	if (((sl->sl_data_vtype == VCHR) || (sl->sl_data_vtype == VBLK)) &&
1787 	    ((sl->sl_flags & SL_NO_DATA_DKIOFLUSH) == 0)) {
1788 		ret = VOP_IOCTL(sl->sl_data_vp, DKIOCFLUSHWRITECACHE, NULL,
1789 		    FKIOCTL, kcred, &r, NULL);
1790 		if ((ret == ENOTTY) || (ret == ENOTSUP)) {
1791 			mutex_enter(&sl->sl_lock);
1792 			sl->sl_flags |= SL_NO_DATA_DKIOFLUSH;
1793 			mutex_exit(&sl->sl_lock);
1794 		} else if (ret != 0) {
1795 			return (SBD_FAILURE);
1796 		}
1797 	}
1798 
1799 	return (SBD_SUCCESS);
1800 }
1801 
1802 /* ARGSUSED */
1803 static void
1804 sbd_handle_sync_cache(struct scsi_task *task,
1805     struct stmf_data_buf *initial_dbuf)
1806 {
1807 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1808 	uint64_t	lba, laddr;
1809 	sbd_status_t	sret;
1810 	uint32_t	len;
1811 	int		is_g4 = 0;
1812 	int		immed;
1813 
1814 	task->task_cmd_xfer_length = 0;
1815 	/*
1816 	 * Determine if this is a 10 or 16 byte CDB
1817 	 */
1818 
1819 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
1820 		is_g4 = 1;
1821 
1822 	/*
1823 	 * Determine other requested parameters
1824 	 *
1825 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
1826 	 * Do not support the IMMED bit.
1827 	 */
1828 
1829 	immed = (task->task_cdb[1] & 0x02);
1830 
1831 	if (immed) {
1832 		stmf_scsilib_send_status(task, STATUS_CHECK,
1833 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1834 		return;
1835 	}
1836 
1837 	/*
1838 	 * Check to be sure we're not being asked to sync an LBA
1839 	 * that is out of range.  While checking, verify reserved fields.
1840 	 */
1841 
1842 	if (is_g4) {
1843 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
1844 		    task->task_cdb[15]) {
1845 			stmf_scsilib_send_status(task, STATUS_CHECK,
1846 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1847 			return;
1848 		}
1849 
1850 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
1851 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
1852 	} else {
1853 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
1854 		    task->task_cdb[9]) {
1855 			stmf_scsilib_send_status(task, STATUS_CHECK,
1856 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1857 			return;
1858 		}
1859 
1860 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
1861 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
1862 	}
1863 
1864 	laddr = lba << sl->sl_data_blocksize_shift;
1865 	len <<= sl->sl_data_blocksize_shift;
1866 
1867 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
1868 		stmf_scsilib_send_status(task, STATUS_CHECK,
1869 		    STMF_SAA_LBA_OUT_OF_RANGE);
1870 		return;
1871 	}
1872 
1873 	sret = sbd_flush_data_cache(sl, 0);
1874 	if (sret != SBD_SUCCESS) {
1875 		stmf_scsilib_send_status(task, STATUS_CHECK,
1876 		    STMF_SAA_WRITE_ERROR);
1877 		return;
1878 	}
1879 
1880 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1881 }
1882