xref: /freebsd/sys/cam/ctl/ctl_error.c (revision 75577946)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
5  * Copyright (c) 2011 Spectra Logic Corporation
6  * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon
18  *    including a substantially similar Disclaimer requirement for further
19  *    binary redistribution.
20  *
21  * NO WARRANTY
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGES.
33  *
34  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_error.c#2 $
35  */
36 /*
37  * CAM Target Layer error reporting routines.
38  *
39  * Author: Ken Merry <ken@FreeBSD.org>
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/types.h>
46 #include <sys/malloc.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/condvar.h>
50 #include <sys/stddef.h>
51 #include <sys/ctype.h>
52 #include <sys/sysctl.h>
53 #include <machine/stdarg.h>
54 
55 #include <cam/scsi/scsi_all.h>
56 #include <cam/scsi/scsi_da.h>
57 #include <cam/ctl/ctl_io.h>
58 #include <cam/ctl/ctl.h>
59 #include <cam/ctl/ctl_frontend.h>
60 #include <cam/ctl/ctl_backend.h>
61 #include <cam/ctl/ctl_ioctl.h>
62 #include <cam/ctl/ctl_error.h>
63 #include <cam/ctl/ctl_ha.h>
64 #include <cam/ctl/ctl_private.h>
65 
66 void
ctl_set_sense_data_va(struct scsi_sense_data * sense_data,u_int * sense_len,void * lunptr,scsi_sense_data_type sense_format,int current_error,int sense_key,int asc,int ascq,va_list ap)67 ctl_set_sense_data_va(struct scsi_sense_data *sense_data, u_int *sense_len,
68     void *lunptr, scsi_sense_data_type sense_format, int current_error,
69     int sense_key, int asc, int ascq, va_list ap)
70 {
71 	struct ctl_lun *lun;
72 
73 	lun = (struct ctl_lun *)lunptr;
74 
75 	/*
76 	 * Determine whether to return fixed or descriptor format sense
77 	 * data.
78 	 */
79 	if (sense_format == SSD_TYPE_NONE) {
80 		/*
81 		 * SPC-3 and up require some UAs to be returned as fixed.
82 		 */
83 		if (asc == 0x29 || (asc == 0x2A && ascq == 0x01))
84 			sense_format = SSD_TYPE_FIXED;
85 		else
86 		/*
87 		 * If the format isn't specified, we only return descriptor
88 		 * sense if the LUN exists and descriptor sense is turned
89 		 * on for that LUN.
90 		 */
91 		if ((lun != NULL) && (lun->MODE_CTRL.rlec & SCP_DSENSE))
92 			sense_format = SSD_TYPE_DESC;
93 		else
94 			sense_format = SSD_TYPE_FIXED;
95 	}
96 
97 	/*
98 	 * Determine maximum sense data length to return.
99 	 */
100 	if (*sense_len == 0) {
101 		if ((lun != NULL) && (lun->MODE_CTRLE.max_sense != 0))
102 			*sense_len = lun->MODE_CTRLE.max_sense;
103 		else
104 			*sense_len = SSD_FULL_SIZE;
105 	}
106 
107 	scsi_set_sense_data_va(sense_data, sense_len, sense_format,
108 	    current_error, sense_key, asc, ascq, ap);
109 }
110 
111 void
ctl_set_sense_data(struct scsi_sense_data * sense_data,u_int * sense_len,void * lunptr,scsi_sense_data_type sense_format,int current_error,int sense_key,int asc,int ascq,...)112 ctl_set_sense_data(struct scsi_sense_data *sense_data, u_int *sense_len,
113     void *lunptr, scsi_sense_data_type sense_format, int current_error,
114     int sense_key, int asc, int ascq, ...)
115 {
116 	va_list ap;
117 
118 	va_start(ap, ascq);
119 	ctl_set_sense_data_va(sense_data, sense_len, lunptr, sense_format,
120 	    current_error, sense_key, asc, ascq, ap);
121 	va_end(ap);
122 }
123 
124 void
ctl_set_sense(struct ctl_scsiio * ctsio,int current_error,int sense_key,int asc,int ascq,...)125 ctl_set_sense(struct ctl_scsiio *ctsio, int current_error, int sense_key,
126 	      int asc, int ascq, ...)
127 {
128 	va_list ap;
129 	struct ctl_lun *lun;
130 	u_int sense_len;
131 
132 	/*
133 	 * The LUN can't go away until all of the commands have been
134 	 * completed.  Therefore we can safely access the LUN structure and
135 	 * flags without the lock.
136 	 */
137 	lun = CTL_LUN(ctsio);
138 
139 	va_start(ap, ascq);
140 	sense_len = 0;
141 	ctl_set_sense_data_va(&ctsio->sense_data, &sense_len,
142 			      lun,
143 			      SSD_TYPE_NONE,
144 			      current_error,
145 			      sense_key,
146 			      asc,
147 			      ascq,
148 			      ap);
149 	va_end(ap);
150 
151 	ctsio->scsi_status = SCSI_STATUS_CHECK_COND;
152 	ctsio->sense_len = sense_len;
153 	ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE;
154 }
155 
156 /*
157  * Transform fixed sense data into descriptor sense data.
158  *
159  * For simplicity's sake, we assume that both sense structures are
160  * SSD_FULL_SIZE.  Otherwise, the logic gets more complicated.
161  */
162 void
ctl_sense_to_desc(struct scsi_sense_data_fixed * sense_src,struct scsi_sense_data_desc * sense_dest)163 ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src,
164 		  struct scsi_sense_data_desc *sense_dest)
165 {
166 	struct scsi_sense_stream stream_sense;
167 	int current_error;
168 	u_int sense_len;
169 	uint8_t stream_bits;
170 
171 	bzero(sense_dest, sizeof(*sense_dest));
172 
173 	if ((sense_src->error_code & SSD_ERRCODE) == SSD_DEFERRED_ERROR)
174 		current_error = 0;
175 	else
176 		current_error = 1;
177 
178 	bzero(&stream_sense, sizeof(stream_sense));
179 
180 	/*
181 	 * Check to see whether any of the tape-specific bits are set.  If
182 	 * so, we'll need a stream sense descriptor.
183 	 */
184 	if (sense_src->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK))
185 		stream_bits = sense_src->flags & ~SSD_KEY;
186 	else
187 		stream_bits = 0;
188 
189 	/*
190 	 * Utilize our sense setting routine to do the transform.  If a
191 	 * value is set in the fixed sense data, set it in the descriptor
192 	 * data.  Otherwise, skip it.
193 	 */
194 	sense_len = SSD_FULL_SIZE;
195 	ctl_set_sense_data((struct scsi_sense_data *)sense_dest, &sense_len,
196 			   /*lun*/ NULL,
197 			   /*sense_format*/ SSD_TYPE_DESC,
198 			   current_error,
199 			   /*sense_key*/ sense_src->flags & SSD_KEY,
200 			   /*asc*/ sense_src->add_sense_code,
201 			   /*ascq*/ sense_src->add_sense_code_qual,
202 
203 			   /* Information Bytes */
204 			   (sense_src->error_code & SSD_ERRCODE_VALID) ?
205 			   SSD_ELEM_INFO : SSD_ELEM_SKIP,
206 			   sizeof(sense_src->info),
207 			   sense_src->info,
208 
209 			   /* Command specific bytes */
210 			   (scsi_4btoul(sense_src->cmd_spec_info) != 0) ?
211 			   SSD_ELEM_COMMAND : SSD_ELEM_SKIP,
212 			   sizeof(sense_src->cmd_spec_info),
213 			   sense_src->cmd_spec_info,
214 
215 			   /* FRU */
216 			   (sense_src->fru != 0) ?
217 			   SSD_ELEM_FRU : SSD_ELEM_SKIP,
218 			   sizeof(sense_src->fru),
219 			   &sense_src->fru,
220 
221 			   /* Sense Key Specific */
222 			   (sense_src->sense_key_spec[0] & SSD_SCS_VALID) ?
223 			   SSD_ELEM_SKS : SSD_ELEM_SKIP,
224 			   sizeof(sense_src->sense_key_spec),
225 			   sense_src->sense_key_spec,
226 
227 			   /* Tape bits */
228 			   (stream_bits != 0) ?
229 			   SSD_ELEM_STREAM : SSD_ELEM_SKIP,
230 			   sizeof(stream_bits),
231 			   &stream_bits,
232 
233 			   SSD_ELEM_NONE);
234 }
235 
236 /*
237  * Transform descriptor format sense data into fixed sense data.
238  *
239  * Some data may be lost in translation, because there are descriptors
240  * thant can't be represented as fixed sense data.
241  *
242  * For simplicity's sake, we assume that both sense structures are
243  * SSD_FULL_SIZE.  Otherwise, the logic gets more complicated.
244  */
245 void
ctl_sense_to_fixed(struct scsi_sense_data_desc * sense_src,struct scsi_sense_data_fixed * sense_dest)246 ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src,
247 		   struct scsi_sense_data_fixed *sense_dest)
248 {
249 	int current_error;
250 	uint8_t *info_ptr = NULL, *cmd_ptr = NULL, *fru_ptr = NULL;
251 	uint8_t *sks_ptr = NULL, *stream_ptr = NULL;
252 	int info_size = 0, cmd_size = 0, fru_size = 0;
253 	int sks_size = 0, stream_size = 0;
254 	int pos;
255 	u_int sense_len;
256 
257 	if ((sense_src->error_code & SSD_ERRCODE) == SSD_DESC_CURRENT_ERROR)
258 		current_error = 1;
259 	else
260 		current_error = 0;
261 
262 	for (pos = 0; pos < (int)(sense_src->extra_len - 1);) {
263 		struct scsi_sense_desc_header *header;
264 
265 		header = (struct scsi_sense_desc_header *)
266 		    &sense_src->sense_desc[pos];
267 
268 		/*
269 		 * See if this record goes past the end of the sense data.
270 		 * It shouldn't, but check just in case.
271 		 */
272 		if ((pos + header->length + sizeof(*header)) >
273 		     sense_src->extra_len)
274 			break;
275 
276 		switch (sense_src->sense_desc[pos]) {
277 		case SSD_DESC_INFO: {
278 			struct scsi_sense_info *info;
279 
280 			info = (struct scsi_sense_info *)header;
281 
282 			info_ptr = info->info;
283 			info_size = sizeof(info->info);
284 
285 			pos += info->length +
286 			    sizeof(struct scsi_sense_desc_header);
287 			break;
288 		}
289 		case SSD_DESC_COMMAND: {
290 			struct scsi_sense_command *cmd;
291 
292 			cmd = (struct scsi_sense_command *)header;
293 			cmd_ptr = cmd->command_info;
294 			cmd_size = sizeof(cmd->command_info);
295 
296 			pos += cmd->length +
297 			    sizeof(struct scsi_sense_desc_header);
298 			break;
299 		}
300 		case SSD_DESC_FRU: {
301 			struct scsi_sense_fru *fru;
302 
303 			fru = (struct scsi_sense_fru *)header;
304 			fru_ptr = &fru->fru;
305 			fru_size = sizeof(fru->fru);
306 			pos += fru->length +
307 			    sizeof(struct scsi_sense_desc_header);
308 			break;
309 		}
310 		case SSD_DESC_SKS: {
311 			struct scsi_sense_sks *sks;
312 
313 			sks = (struct scsi_sense_sks *)header;
314 			sks_ptr = sks->sense_key_spec;
315 			sks_size = sizeof(sks->sense_key_spec);
316 
317 			pos = sks->length +
318 			    sizeof(struct scsi_sense_desc_header);
319 			break;
320 		}
321 		case SSD_DESC_STREAM: {
322 			struct scsi_sense_stream *stream_sense;
323 
324 			stream_sense = (struct scsi_sense_stream *)header;
325 			stream_ptr = &stream_sense->byte3;
326 			stream_size = sizeof(stream_sense->byte3);
327 			pos = stream_sense->length +
328 			    sizeof(struct scsi_sense_desc_header);
329 			break;
330 		}
331 		default:
332 			/*
333 			 * We don't recognize this particular sense
334 			 * descriptor type, so just skip it.
335 			 */
336 			pos += sizeof(*header) + header->length;
337 			break;
338 		}
339 	}
340 
341 	sense_len = SSD_FULL_SIZE;
342 	ctl_set_sense_data((struct scsi_sense_data *)sense_dest, &sense_len,
343 			   /*lun*/ NULL,
344 			   /*sense_format*/ SSD_TYPE_FIXED,
345 			   current_error,
346 			   /*sense_key*/ sense_src->sense_key & SSD_KEY,
347 			   /*asc*/ sense_src->add_sense_code,
348 			   /*ascq*/ sense_src->add_sense_code_qual,
349 
350 			   /* Information Bytes */
351 			   (info_ptr != NULL) ? SSD_ELEM_INFO : SSD_ELEM_SKIP,
352 			   info_size,
353 			   info_ptr,
354 
355 			   /* Command specific bytes */
356 			   (cmd_ptr != NULL) ? SSD_ELEM_COMMAND : SSD_ELEM_SKIP,
357 			   cmd_size,
358 			   cmd_ptr,
359 
360 			   /* FRU */
361 			   (fru_ptr != NULL) ? SSD_ELEM_FRU : SSD_ELEM_SKIP,
362 			   fru_size,
363 			   fru_ptr,
364 
365 			   /* Sense Key Specific */
366 			   (sks_ptr != NULL) ? SSD_ELEM_SKS : SSD_ELEM_SKIP,
367 			   sks_size,
368 			   sks_ptr,
369 
370 			   /* Tape bits */
371 			   (stream_ptr != NULL) ? SSD_ELEM_STREAM : SSD_ELEM_SKIP,
372 			   stream_size,
373 			   stream_ptr,
374 
375 			   SSD_ELEM_NONE);
376 }
377 
378 void
ctl_set_ua(struct ctl_scsiio * ctsio,int asc,int ascq)379 ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq)
380 {
381 	ctl_set_sense(ctsio,
382 		      /*current_error*/ 1,
383 		      /*sense_key*/ SSD_KEY_UNIT_ATTENTION,
384 		      asc,
385 		      ascq,
386 		      SSD_ELEM_NONE);
387 }
388 
389 static void
ctl_ua_to_ascq(struct ctl_lun * lun,ctl_ua_type ua_to_build,int * asc,int * ascq,ctl_ua_type * ua_to_clear,uint8_t ** info)390 ctl_ua_to_ascq(struct ctl_lun *lun, ctl_ua_type ua_to_build, int *asc,
391     int *ascq, ctl_ua_type *ua_to_clear, uint8_t **info)
392 {
393 
394 	switch (ua_to_build) {
395 	case CTL_UA_POWERON:
396 		/* 29h/01h  POWER ON OCCURRED */
397 		*asc = 0x29;
398 		*ascq = 0x01;
399 		*ua_to_clear = ~0;
400 		break;
401 	case CTL_UA_BUS_RESET:
402 		/* 29h/02h  SCSI BUS RESET OCCURRED */
403 		*asc = 0x29;
404 		*ascq = 0x02;
405 		*ua_to_clear = ~0;
406 		break;
407 	case CTL_UA_TARG_RESET:
408 		/* 29h/03h  BUS DEVICE RESET FUNCTION OCCURRED*/
409 		*asc = 0x29;
410 		*ascq = 0x03;
411 		*ua_to_clear = ~0;
412 		break;
413 	case CTL_UA_I_T_NEXUS_LOSS:
414 		/* 29h/07h  I_T NEXUS LOSS OCCURRED */
415 		*asc = 0x29;
416 		*ascq = 0x07;
417 		*ua_to_clear = ~0;
418 		break;
419 	case CTL_UA_LUN_RESET:
420 		/* 29h/00h  POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
421 		/*
422 		 * Since we don't have a specific ASC/ASCQ pair for a LUN
423 		 * reset, just return the generic reset code.
424 		 */
425 		*asc = 0x29;
426 		*ascq = 0x00;
427 		break;
428 	case CTL_UA_LUN_CHANGE:
429 		/* 3Fh/0Eh  REPORTED LUNS DATA HAS CHANGED */
430 		*asc = 0x3F;
431 		*ascq = 0x0E;
432 		break;
433 	case CTL_UA_MODE_CHANGE:
434 		/* 2Ah/01h  MODE PARAMETERS CHANGED */
435 		*asc = 0x2A;
436 		*ascq = 0x01;
437 		break;
438 	case CTL_UA_LOG_CHANGE:
439 		/* 2Ah/02h  LOG PARAMETERS CHANGED */
440 		*asc = 0x2A;
441 		*ascq = 0x02;
442 		break;
443 	case CTL_UA_INQ_CHANGE:
444 		/* 3Fh/03h  INQUIRY DATA HAS CHANGED */
445 		*asc = 0x3F;
446 		*ascq = 0x03;
447 		break;
448 	case CTL_UA_RES_PREEMPT:
449 		/* 2Ah/03h  RESERVATIONS PREEMPTED */
450 		*asc = 0x2A;
451 		*ascq = 0x03;
452 		break;
453 	case CTL_UA_RES_RELEASE:
454 		/* 2Ah/04h  RESERVATIONS RELEASED */
455 		*asc = 0x2A;
456 		*ascq = 0x04;
457 		break;
458 	case CTL_UA_REG_PREEMPT:
459 		/* 2Ah/05h  REGISTRATIONS PREEMPTED */
460 		*asc = 0x2A;
461 		*ascq = 0x05;
462 		break;
463 	case CTL_UA_ASYM_ACC_CHANGE:
464 		/* 2Ah/06h  ASYMMETRIC ACCESS STATE CHANGED */
465 		*asc = 0x2A;
466 		*ascq = 0x06;
467 		break;
468 	case CTL_UA_CAPACITY_CHANGE:
469 		/* 2Ah/09h  CAPACITY DATA HAS CHANGED */
470 		*asc = 0x2A;
471 		*ascq = 0x09;
472 		break;
473 	case CTL_UA_THIN_PROV_THRES:
474 		/* 38h/07h  THIN PROVISIONING SOFT THRESHOLD REACHED */
475 		*asc = 0x38;
476 		*ascq = 0x07;
477 		*info = lun->ua_tpt_info;
478 		break;
479 	case CTL_UA_MEDIUM_CHANGE:
480 		/* 28h/00h  NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
481 		*asc = 0x28;
482 		*ascq = 0x00;
483 		break;
484 	case CTL_UA_IE:
485 		/* Informational exception */
486 		*asc = lun->ie_asc;
487 		*ascq = lun->ie_ascq;
488 		break;
489 	default:
490 		panic("%s: Unknown UA %x", __func__, ua_to_build);
491 	}
492 }
493 
494 ctl_ua_type
ctl_build_qae(struct ctl_lun * lun,uint32_t initidx,uint8_t * resp)495 ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp)
496 {
497 	ctl_ua_type ua;
498 	ctl_ua_type ua_to_build, ua_to_clear;
499 	uint8_t *info;
500 	int asc, ascq;
501 	uint32_t p, i;
502 
503 	mtx_assert(&lun->lun_lock, MA_OWNED);
504 	p = initidx / CTL_MAX_INIT_PER_PORT;
505 	i = initidx % CTL_MAX_INIT_PER_PORT;
506 	if (lun->pending_ua[p] == NULL)
507 		ua = CTL_UA_POWERON;
508 	else
509 		ua = lun->pending_ua[p][i];
510 	if (ua == CTL_UA_NONE)
511 		return (CTL_UA_NONE);
512 
513 	ua_to_build = (1 << (ffs(ua) - 1));
514 	ua_to_clear = ua_to_build;
515 	info = NULL;
516 	ctl_ua_to_ascq(lun, ua_to_build, &asc, &ascq, &ua_to_clear, &info);
517 
518 	resp[0] = SSD_KEY_UNIT_ATTENTION;
519 	if (ua_to_build == ua)
520 		resp[0] |= 0x10;
521 	else
522 		resp[0] |= 0x20;
523 	resp[1] = asc;
524 	resp[2] = ascq;
525 	return (ua_to_build);
526 }
527 
528 ctl_ua_type
ctl_build_ua(struct ctl_lun * lun,uint32_t initidx,struct scsi_sense_data * sense,u_int * sense_len,scsi_sense_data_type sense_format)529 ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
530     struct scsi_sense_data *sense, u_int *sense_len,
531     scsi_sense_data_type sense_format)
532 {
533 	ctl_ua_type *ua;
534 	ctl_ua_type ua_to_build, ua_to_clear;
535 	uint8_t *info;
536 	int asc, ascq;
537 	uint32_t p, i;
538 
539 	mtx_assert(&lun->lun_lock, MA_OWNED);
540 	mtx_assert(&lun->ctl_softc->ctl_lock, MA_NOTOWNED);
541 	p = initidx / CTL_MAX_INIT_PER_PORT;
542 	if ((ua = lun->pending_ua[p]) == NULL) {
543 		mtx_unlock(&lun->lun_lock);
544 		ua = malloc(sizeof(ctl_ua_type) * CTL_MAX_INIT_PER_PORT,
545 		    M_CTL, M_WAITOK);
546 		mtx_lock(&lun->lun_lock);
547 		if (lun->pending_ua[p] == NULL) {
548 			lun->pending_ua[p] = ua;
549 			for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++)
550 				ua[i] = CTL_UA_POWERON;
551 		} else {
552 			free(ua, M_CTL);
553 			ua = lun->pending_ua[p];
554 		}
555 	}
556 	i = initidx % CTL_MAX_INIT_PER_PORT;
557 	if (ua[i] == CTL_UA_NONE)
558 		return (CTL_UA_NONE);
559 
560 	ua_to_build = (1 << (ffs(ua[i]) - 1));
561 	ua_to_clear = ua_to_build;
562 	info = NULL;
563 	ctl_ua_to_ascq(lun, ua_to_build, &asc, &ascq, &ua_to_clear, &info);
564 
565 	ctl_set_sense_data(sense, sense_len, lun, sense_format, 1,
566 	    /*sense_key*/ SSD_KEY_UNIT_ATTENTION, asc, ascq,
567 	    ((info != NULL) ? SSD_ELEM_INFO : SSD_ELEM_SKIP), 8, info,
568 	    SSD_ELEM_NONE);
569 
570 	/* We're reporting this UA, so clear it */
571 	ua[i] &= ~ua_to_clear;
572 
573 	if (ua_to_build == CTL_UA_LUN_CHANGE) {
574 		mtx_unlock(&lun->lun_lock);
575 		mtx_lock(&lun->ctl_softc->ctl_lock);
576 		ctl_clr_ua_allluns(lun->ctl_softc, initidx, ua_to_build);
577 		mtx_unlock(&lun->ctl_softc->ctl_lock);
578 		mtx_lock(&lun->lun_lock);
579 	} else if (ua_to_build == CTL_UA_THIN_PROV_THRES &&
580 	    (lun->MODE_LBP.main.flags & SLBPP_SITUA) != 0) {
581 		ctl_clr_ua_all(lun, -1, ua_to_build);
582 	}
583 
584 	return (ua_to_build);
585 }
586 
587 void
ctl_set_overlapped_cmd(struct ctl_scsiio * ctsio)588 ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio)
589 {
590 	/* OVERLAPPED COMMANDS ATTEMPTED */
591 	ctl_set_sense(ctsio,
592 		      /*current_error*/ 1,
593 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
594 		      /*asc*/ 0x4E,
595 		      /*ascq*/ 0x00,
596 		      SSD_ELEM_NONE);
597 }
598 
599 void
ctl_set_overlapped_tag(struct ctl_scsiio * ctsio,uint8_t tag)600 ctl_set_overlapped_tag(struct ctl_scsiio *ctsio, uint8_t tag)
601 {
602 	/* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */
603 	ctl_set_sense(ctsio,
604 		      /*current_error*/ 1,
605 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
606 		      /*asc*/ 0x4D,
607 		      /*ascq*/ tag,
608 		      SSD_ELEM_NONE);
609 }
610 
611 /*
612  * Tell the user that there was a problem with the command or data he sent.
613  */
614 void
ctl_set_invalid_field(struct ctl_scsiio * ctsio,int sks_valid,int command,int field,int bit_valid,int bit)615 ctl_set_invalid_field(struct ctl_scsiio *ctsio, int sks_valid, int command,
616 		      int field, int bit_valid, int bit)
617 {
618 	uint8_t sks[3];
619 	int asc;
620 
621 	if (command != 0) {
622 		/* "Invalid field in CDB" */
623 		asc = 0x24;
624 	} else {
625 		/* "Invalid field in parameter list" */
626 		asc = 0x26;
627 	}
628 
629 	if (sks_valid) {
630 		sks[0] = SSD_SCS_VALID;
631 		if (command)
632 			sks[0] |= SSD_FIELDPTR_CMD;
633 		scsi_ulto2b(field, &sks[1]);
634 
635 		if (bit_valid)
636 			sks[0] |= SSD_BITPTR_VALID | bit;
637 	}
638 
639 	ctl_set_sense(ctsio,
640 		      /*current_error*/ 1,
641 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
642 		      asc,
643 		      /*ascq*/ 0x00,
644 		      /*type*/ (sks_valid != 0) ? SSD_ELEM_SKS : SSD_ELEM_SKIP,
645 		      /*size*/ sizeof(sks),
646 		      /*data*/ sks,
647 		      SSD_ELEM_NONE);
648 }
649 void
ctl_set_invalid_field_ciu(struct ctl_scsiio * ctsio)650 ctl_set_invalid_field_ciu(struct ctl_scsiio *ctsio)
651 {
652 
653 	/* "Invalid field in command information unit" */
654 	ctl_set_sense(ctsio,
655 		      /*current_error*/ 1,
656 		      /*sense_key*/ SSD_KEY_ABORTED_COMMAND,
657 		      /*ascq*/ 0x0E,
658 		      /*ascq*/ 0x03,
659 		      SSD_ELEM_NONE);
660 }
661 
662 void
ctl_set_invalid_opcode(struct ctl_scsiio * ctsio)663 ctl_set_invalid_opcode(struct ctl_scsiio *ctsio)
664 {
665 	uint8_t sks[3];
666 
667 	sks[0] = SSD_SCS_VALID | SSD_FIELDPTR_CMD;
668 	scsi_ulto2b(0, &sks[1]);
669 
670 	/* "Invalid command operation code" */
671 	ctl_set_sense(ctsio,
672 		      /*current_error*/ 1,
673 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
674 		      /*asc*/ 0x20,
675 		      /*ascq*/ 0x00,
676 		      /*type*/ SSD_ELEM_SKS,
677 		      /*size*/ sizeof(sks),
678 		      /*data*/ sks,
679 		      SSD_ELEM_NONE);
680 }
681 
682 void
ctl_set_param_len_error(struct ctl_scsiio * ctsio)683 ctl_set_param_len_error(struct ctl_scsiio *ctsio)
684 {
685 	/* "Parameter list length error" */
686 	ctl_set_sense(ctsio,
687 		      /*current_error*/ 1,
688 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
689 		      /*asc*/ 0x1a,
690 		      /*ascq*/ 0x00,
691 		      SSD_ELEM_NONE);
692 }
693 
694 void
ctl_set_already_locked(struct ctl_scsiio * ctsio)695 ctl_set_already_locked(struct ctl_scsiio *ctsio)
696 {
697 	/* Vendor unique "Somebody already is locked" */
698 	ctl_set_sense(ctsio,
699 		      /*current_error*/ 1,
700 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
701 		      /*asc*/ 0x81,
702 		      /*ascq*/ 0x00,
703 		      SSD_ELEM_NONE);
704 }
705 
706 void
ctl_set_unsupported_lun(struct ctl_scsiio * ctsio)707 ctl_set_unsupported_lun(struct ctl_scsiio *ctsio)
708 {
709 	/* "Logical unit not supported" */
710 	ctl_set_sense(ctsio,
711 		      /*current_error*/ 1,
712 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
713 		      /*asc*/ 0x25,
714 		      /*ascq*/ 0x00,
715 		      SSD_ELEM_NONE);
716 }
717 
718 void
ctl_set_internal_failure(struct ctl_scsiio * ctsio,int sks_valid,uint16_t retry_count)719 ctl_set_internal_failure(struct ctl_scsiio *ctsio, int sks_valid,
720 			 uint16_t retry_count)
721 {
722 	uint8_t sks[3];
723 
724 	if (sks_valid) {
725 		sks[0] = SSD_SCS_VALID;
726 		sks[1] = (retry_count >> 8) & 0xff;
727 		sks[2] = retry_count & 0xff;
728 	}
729 
730 	/* "Internal target failure" */
731 	ctl_set_sense(ctsio,
732 		      /*current_error*/ 1,
733 		      /*sense_key*/ SSD_KEY_HARDWARE_ERROR,
734 		      /*asc*/ 0x44,
735 		      /*ascq*/ 0x00,
736 		      /*type*/ (sks_valid != 0) ? SSD_ELEM_SKS : SSD_ELEM_SKIP,
737 		      /*size*/ sizeof(sks),
738 		      /*data*/ sks,
739 		      SSD_ELEM_NONE);
740 }
741 
742 void
ctl_set_medium_error(struct ctl_scsiio * ctsio,int read)743 ctl_set_medium_error(struct ctl_scsiio *ctsio, int read)
744 {
745 	if (read) {
746 		/* "Unrecovered read error" */
747 		ctl_set_sense(ctsio,
748 			      /*current_error*/ 1,
749 			      /*sense_key*/ SSD_KEY_MEDIUM_ERROR,
750 			      /*asc*/ 0x11,
751 			      /*ascq*/ 0x00,
752 			      SSD_ELEM_NONE);
753 	} else {
754 		/* "Write error - auto reallocation failed" */
755 		ctl_set_sense(ctsio,
756 			      /*current_error*/ 1,
757 			      /*sense_key*/ SSD_KEY_MEDIUM_ERROR,
758 			      /*asc*/ 0x0C,
759 			      /*ascq*/ 0x02,
760 			      SSD_ELEM_NONE);
761 	}
762 }
763 
764 void
ctl_set_aborted(struct ctl_scsiio * ctsio)765 ctl_set_aborted(struct ctl_scsiio *ctsio)
766 {
767 	ctl_set_sense(ctsio,
768 		      /*current_error*/ 1,
769 		      /*sense_key*/ SSD_KEY_ABORTED_COMMAND,
770 		      /*asc*/ 0x45,
771 		      /*ascq*/ 0x00,
772 		      SSD_ELEM_NONE);
773 }
774 
775 void
ctl_set_lba_out_of_range(struct ctl_scsiio * ctsio,uint64_t lba)776 ctl_set_lba_out_of_range(struct ctl_scsiio *ctsio, uint64_t lba)
777 {
778 	uint8_t	info[8];
779 
780 	scsi_u64to8b(lba, info);
781 
782 	/* "Logical block address out of range" */
783 	ctl_set_sense(ctsio,
784 		      /*current_error*/ 1,
785 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
786 		      /*asc*/ 0x21,
787 		      /*ascq*/ 0x00,
788 		      /*type*/ (lba != 0) ? SSD_ELEM_INFO : SSD_ELEM_SKIP,
789 		      /*size*/ sizeof(info), /*data*/ &info,
790 		      SSD_ELEM_NONE);
791 }
792 
793 void
ctl_set_lun_stopped(struct ctl_scsiio * ctsio)794 ctl_set_lun_stopped(struct ctl_scsiio *ctsio)
795 {
796 	/* "Logical unit not ready, initializing cmd. required" */
797 	ctl_set_sense(ctsio,
798 		      /*current_error*/ 1,
799 		      /*sense_key*/ SSD_KEY_NOT_READY,
800 		      /*asc*/ 0x04,
801 		      /*ascq*/ 0x02,
802 		      SSD_ELEM_NONE);
803 }
804 
805 void
ctl_set_lun_int_reqd(struct ctl_scsiio * ctsio)806 ctl_set_lun_int_reqd(struct ctl_scsiio *ctsio)
807 {
808 	/* "Logical unit not ready, manual intervention required" */
809 	ctl_set_sense(ctsio,
810 		      /*current_error*/ 1,
811 		      /*sense_key*/ SSD_KEY_NOT_READY,
812 		      /*asc*/ 0x04,
813 		      /*ascq*/ 0x03,
814 		      SSD_ELEM_NONE);
815 }
816 
817 void
ctl_set_lun_ejected(struct ctl_scsiio * ctsio)818 ctl_set_lun_ejected(struct ctl_scsiio *ctsio)
819 {
820 	/* "Medium not present - tray open" */
821 	ctl_set_sense(ctsio,
822 		      /*current_error*/ 1,
823 		      /*sense_key*/ SSD_KEY_NOT_READY,
824 		      /*asc*/ 0x3A,
825 		      /*ascq*/ 0x02,
826 		      SSD_ELEM_NONE);
827 }
828 
829 void
ctl_set_lun_no_media(struct ctl_scsiio * ctsio)830 ctl_set_lun_no_media(struct ctl_scsiio *ctsio)
831 {
832 	/* "Medium not present - tray closed" */
833 	ctl_set_sense(ctsio,
834 		      /*current_error*/ 1,
835 		      /*sense_key*/ SSD_KEY_NOT_READY,
836 		      /*asc*/ 0x3A,
837 		      /*ascq*/ 0x01,
838 		      SSD_ELEM_NONE);
839 }
840 
841 void
ctl_set_illegal_pr_release(struct ctl_scsiio * ctsio)842 ctl_set_illegal_pr_release(struct ctl_scsiio *ctsio)
843 {
844 	/* "Invalid release of persistent reservation" */
845 	ctl_set_sense(ctsio,
846 		      /*current_error*/ 1,
847 		      /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
848 		      /*asc*/ 0x26,
849 		      /*ascq*/ 0x04,
850 		      SSD_ELEM_NONE);
851 }
852 
853 void
ctl_set_lun_transit(struct ctl_scsiio * ctsio)854 ctl_set_lun_transit(struct ctl_scsiio *ctsio)
855 {
856 	/* "Logical unit not ready, asymmetric access state transition" */
857 	ctl_set_sense(ctsio,
858 		      /*current_error*/ 1,
859 		      /*sense_key*/ SSD_KEY_NOT_READY,
860 		      /*asc*/ 0x04,
861 		      /*ascq*/ 0x0a,
862 		      SSD_ELEM_NONE);
863 }
864 
865 void
ctl_set_lun_standby(struct ctl_scsiio * ctsio)866 ctl_set_lun_standby(struct ctl_scsiio *ctsio)
867 {
868 	/* "Logical unit not ready, target port in standby state" */
869 	ctl_set_sense(ctsio,
870 		      /*current_error*/ 1,
871 		      /*sense_key*/ SSD_KEY_NOT_READY,
872 		      /*asc*/ 0x04,
873 		      /*ascq*/ 0x0b,
874 		      SSD_ELEM_NONE);
875 }
876 
877 void
ctl_set_lun_unavail(struct ctl_scsiio * ctsio)878 ctl_set_lun_unavail(struct ctl_scsiio *ctsio)
879 {
880 	/* "Logical unit not ready, target port in unavailable state" */
881 	ctl_set_sense(ctsio,
882 		      /*current_error*/ 1,
883 		      /*sense_key*/ SSD_KEY_NOT_READY,
884 		      /*asc*/ 0x04,
885 		      /*ascq*/ 0x0c,
886 		      SSD_ELEM_NONE);
887 }
888 
889 void
ctl_set_medium_format_corrupted(struct ctl_scsiio * ctsio)890 ctl_set_medium_format_corrupted(struct ctl_scsiio *ctsio)
891 {
892 	/* "Medium format corrupted" */
893 	ctl_set_sense(ctsio,
894 		      /*current_error*/ 1,
895 		      /*sense_key*/ SSD_KEY_MEDIUM_ERROR,
896 		      /*asc*/ 0x31,
897 		      /*ascq*/ 0x00,
898 		      SSD_ELEM_NONE);
899 }
900 
901 void
ctl_set_medium_magazine_inaccessible(struct ctl_scsiio * ctsio)902 ctl_set_medium_magazine_inaccessible(struct ctl_scsiio *ctsio)
903 {
904 	/* "Medium magazine not accessible" */
905 	ctl_set_sense(ctsio,
906 		      /*current_error*/ 1,
907 		      /*sense_key*/ SSD_KEY_NOT_READY,
908 		      /*asc*/ 0x3b,
909 		      /*ascq*/ 0x11,
910 		      SSD_ELEM_NONE);
911 }
912 
913 void
ctl_set_data_phase_error(struct ctl_scsiio * ctsio)914 ctl_set_data_phase_error(struct ctl_scsiio *ctsio)
915 {
916 	/* "Data phase error" */
917 	ctl_set_sense(ctsio,
918 		      /*current_error*/ 1,
919 		      /*sense_key*/ SSD_KEY_NOT_READY,
920 		      /*asc*/ 0x4b,
921 		      /*ascq*/ 0x00,
922 		      SSD_ELEM_NONE);
923 }
924 
925 void
ctl_set_reservation_conflict(struct ctl_scsiio * ctsio)926 ctl_set_reservation_conflict(struct ctl_scsiio *ctsio)
927 {
928 
929 	ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
930 	ctsio->sense_len = 0;
931 	ctsio->io_hdr.status = CTL_SCSI_ERROR;
932 }
933 
934 void
ctl_set_queue_full(struct ctl_scsiio * ctsio)935 ctl_set_queue_full(struct ctl_scsiio *ctsio)
936 {
937 
938 	ctsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
939 	ctsio->sense_len = 0;
940 	ctsio->io_hdr.status = CTL_SCSI_ERROR;
941 }
942 
943 void
ctl_set_busy(struct ctl_scsiio * ctsio)944 ctl_set_busy(struct ctl_scsiio *ctsio)
945 {
946 
947 	ctsio->scsi_status = SCSI_STATUS_BUSY;
948 	ctsio->sense_len = 0;
949 	ctsio->io_hdr.status = CTL_SCSI_ERROR;
950 }
951 
952 void
ctl_set_task_aborted(struct ctl_scsiio * ctsio)953 ctl_set_task_aborted(struct ctl_scsiio *ctsio)
954 {
955 
956 	ctsio->scsi_status = SCSI_STATUS_TASK_ABORTED;
957 	ctsio->sense_len = 0;
958 	ctsio->io_hdr.status = CTL_CMD_ABORTED;
959 }
960 
961 void
ctl_set_hw_write_protected(struct ctl_scsiio * ctsio)962 ctl_set_hw_write_protected(struct ctl_scsiio *ctsio)
963 {
964 	/* "Hardware write protected" */
965 	ctl_set_sense(ctsio,
966 		      /*current_error*/ 1,
967 		      /*sense_key*/ SSD_KEY_DATA_PROTECT,
968 		      /*asc*/ 0x27,
969 		      /*ascq*/ 0x01,
970 		      SSD_ELEM_NONE);
971 }
972 
973 void
ctl_set_space_alloc_fail(struct ctl_scsiio * ctsio)974 ctl_set_space_alloc_fail(struct ctl_scsiio *ctsio)
975 {
976 	/* "Space allocation failed write protect" */
977 	ctl_set_sense(ctsio,
978 		      /*current_error*/ 1,
979 		      /*sense_key*/ SSD_KEY_DATA_PROTECT,
980 		      /*asc*/ 0x27,
981 		      /*ascq*/ 0x07,
982 		      SSD_ELEM_NONE);
983 }
984 
985 void
ctl_set_success(struct ctl_scsiio * ctsio)986 ctl_set_success(struct ctl_scsiio *ctsio)
987 {
988 
989 	ctsio->scsi_status = SCSI_STATUS_OK;
990 	ctsio->sense_len = 0;
991 	ctsio->io_hdr.status = CTL_SUCCESS;
992 }
993 
994 void
ctl_nvme_set_error(struct ctl_nvmeio * ctnio,uint8_t sc_type,uint8_t sc_status)995 ctl_nvme_set_error(struct ctl_nvmeio *ctnio, uint8_t sc_type,
996     uint8_t sc_status)
997 {
998 	uint16_t status;
999 
1000 	memset(&ctnio->cpl, 0, sizeof(ctnio->cpl));
1001 	status = NVMEF(NVME_STATUS_SCT, sc_type) |
1002 	    NVMEF(NVME_STATUS_SC, sc_status);
1003 	ctnio->cpl.status = htole16(status);
1004 	ctnio->io_hdr.status = CTL_NVME_ERROR;
1005 }
1006 
1007 void
ctl_nvme_set_generic_error(struct ctl_nvmeio * ctnio,uint8_t sc_status)1008 ctl_nvme_set_generic_error(struct ctl_nvmeio *ctnio, uint8_t sc_status)
1009 {
1010 	ctl_nvme_set_error(ctnio, NVME_SCT_GENERIC, sc_status);
1011 }
1012 
1013 void
ctl_nvme_set_invalid_opcode(struct ctl_nvmeio * ctnio)1014 ctl_nvme_set_invalid_opcode(struct ctl_nvmeio *ctnio)
1015 {
1016 	ctl_nvme_set_generic_error(ctnio, NVME_SC_INVALID_OPCODE);
1017 }
1018 
1019 void
ctl_nvme_set_invalid_field(struct ctl_nvmeio * ctnio)1020 ctl_nvme_set_invalid_field(struct ctl_nvmeio *ctnio)
1021 {
1022 	ctl_nvme_set_generic_error(ctnio, NVME_SC_INVALID_FIELD);
1023 }
1024 
1025 void
ctl_nvme_set_data_transfer_error(struct ctl_nvmeio * ctnio)1026 ctl_nvme_set_data_transfer_error(struct ctl_nvmeio *ctnio)
1027 {
1028 	ctl_nvme_set_generic_error(ctnio, NVME_SC_DATA_TRANSFER_ERROR);
1029 }
1030 
1031 void
ctl_nvme_set_internal_error(struct ctl_nvmeio * ctnio)1032 ctl_nvme_set_internal_error(struct ctl_nvmeio *ctnio)
1033 {
1034 	ctl_nvme_set_generic_error(ctnio, NVME_SC_INTERNAL_DEVICE_ERROR);
1035 }
1036 
1037 void
ctl_nvme_set_invalid_namespace(struct ctl_nvmeio * ctnio)1038 ctl_nvme_set_invalid_namespace(struct ctl_nvmeio *ctnio)
1039 {
1040 	ctl_nvme_set_generic_error(ctnio, NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1041 }
1042 
1043 void
ctl_nvme_set_command_aborted(struct ctl_nvmeio * ctnio)1044 ctl_nvme_set_command_aborted(struct ctl_nvmeio *ctnio)
1045 {
1046 	ctl_nvme_set_generic_error(ctnio, NVME_SC_COMMAND_ABORTED_BY_HOST);
1047 }
1048 
1049 void
ctl_nvme_set_failed_fused_command(struct ctl_nvmeio * ctnio)1050 ctl_nvme_set_failed_fused_command(struct ctl_nvmeio *ctnio)
1051 {
1052 	ctl_nvme_set_generic_error(ctnio, NVME_SC_ABORTED_FAILED_FUSED);
1053 }
1054 
1055 void
ctl_nvme_set_missing_fused_command(struct ctl_nvmeio * ctnio)1056 ctl_nvme_set_missing_fused_command(struct ctl_nvmeio *ctnio)
1057 {
1058 	ctl_nvme_set_generic_error(ctnio, NVME_SC_ABORTED_MISSING_FUSED);
1059 }
1060 
1061 void
ctl_nvme_set_namespace_is_write_protected(struct ctl_nvmeio * ctnio)1062 ctl_nvme_set_namespace_is_write_protected(struct ctl_nvmeio *ctnio)
1063 {
1064 	ctl_nvme_set_generic_error(ctnio, NVME_SC_NAMESPACE_IS_WRITE_PROTECTED);
1065 }
1066 
1067 void
ctl_nvme_set_lba_out_of_range(struct ctl_nvmeio * ctnio)1068 ctl_nvme_set_lba_out_of_range(struct ctl_nvmeio *ctnio)
1069 {
1070 	ctl_nvme_set_generic_error(ctnio, NVME_SC_LBA_OUT_OF_RANGE);
1071 }
1072 
1073 void
ctl_nvme_set_namespace_not_ready(struct ctl_nvmeio * ctnio)1074 ctl_nvme_set_namespace_not_ready(struct ctl_nvmeio *ctnio)
1075 {
1076 	ctl_nvme_set_generic_error(ctnio, NVME_SC_NAMESPACE_NOT_READY);
1077 }
1078 
1079 void
ctl_nvme_set_write_fault(struct ctl_nvmeio * ctnio)1080 ctl_nvme_set_write_fault(struct ctl_nvmeio *ctnio)
1081 {
1082 	ctl_nvme_set_error(ctnio, NVME_SCT_MEDIA_ERROR,
1083 	    NVME_SC_WRITE_FAULTS);
1084 }
1085 
1086 void
ctl_nvme_set_unrecoverable_read_error(struct ctl_nvmeio * ctnio)1087 ctl_nvme_set_unrecoverable_read_error(struct ctl_nvmeio *ctnio)
1088 {
1089 	ctl_nvme_set_error(ctnio, NVME_SCT_MEDIA_ERROR,
1090 	    NVME_SC_UNRECOVERED_READ_ERROR);
1091 }
1092 
1093 void
ctl_nvme_set_compare_failure(struct ctl_nvmeio * ctnio)1094 ctl_nvme_set_compare_failure(struct ctl_nvmeio *ctnio)
1095 {
1096 	ctl_nvme_set_error(ctnio, NVME_SCT_MEDIA_ERROR,
1097 	    NVME_SC_COMPARE_FAILURE);
1098 }
1099 
1100 void
ctl_nvme_set_space_alloc_fail(struct ctl_nvmeio * ctnio)1101 ctl_nvme_set_space_alloc_fail(struct ctl_nvmeio *ctnio)
1102 {
1103 	ctl_nvme_set_error(ctnio, NVME_SCT_MEDIA_ERROR,
1104 	    NVME_SC_DEALLOCATED_OR_UNWRITTEN);
1105 }
1106 
1107 void
ctl_nvme_set_success(struct ctl_nvmeio * ctnio)1108 ctl_nvme_set_success(struct ctl_nvmeio *ctnio)
1109 {
1110 	memset(&ctnio->cpl, 0, sizeof(ctnio->cpl));
1111 	ctnio->io_hdr.status = CTL_SUCCESS;
1112 }
1113 
1114 void
ctl_io_set_invalid_opcode(union ctl_io * io)1115 ctl_io_set_invalid_opcode(union ctl_io *io)
1116 {
1117 	switch (io->io_hdr.io_type) {
1118 	case CTL_IO_SCSI:
1119 		ctl_set_invalid_opcode(&io->scsiio);
1120 		break;
1121 	case CTL_IO_NVME:
1122 		ctl_nvme_set_invalid_opcode(&io->nvmeio);
1123 		break;
1124 	default:
1125 		__assert_unreachable();
1126 	}
1127 }
1128 
1129 void
ctl_io_set_hw_write_protected(union ctl_io * io)1130 ctl_io_set_hw_write_protected(union ctl_io *io)
1131 {
1132 	switch (io->io_hdr.io_type) {
1133 	case CTL_IO_SCSI:
1134 		ctl_set_hw_write_protected(&io->scsiio);
1135 		break;
1136 	case CTL_IO_NVME:
1137 		ctl_nvme_set_namespace_is_write_protected(&io->nvmeio);
1138 		break;
1139 	default:
1140 		__assert_unreachable();
1141 	}
1142 }
1143 
1144 void
ctl_io_set_busy(union ctl_io * io)1145 ctl_io_set_busy(union ctl_io *io)
1146 {
1147 	switch (io->io_hdr.io_type) {
1148 	case CTL_IO_SCSI:
1149 		ctl_set_busy(&io->scsiio);
1150 		break;
1151 	case CTL_IO_NVME:
1152 		ctl_nvme_set_namespace_not_ready(&io->nvmeio);
1153 		break;
1154 	default:
1155 		__assert_unreachable();
1156 	}
1157 }
1158 
1159 void
ctl_io_set_compare_failure(union ctl_io * io,uint64_t offset)1160 ctl_io_set_compare_failure(union ctl_io *io, uint64_t offset)
1161 {
1162 	uint8_t info[8];
1163 
1164 	switch (io->io_hdr.io_type) {
1165 	case CTL_IO_SCSI:
1166 		scsi_u64to8b(offset, info);
1167 		ctl_set_sense(&io->scsiio, /*current_error*/ 1,
1168 		    /*sense_key*/ SSD_KEY_MISCOMPARE,
1169 		    /*asc*/ 0x1D, /*ascq*/ 0x00,
1170 		    /*type*/ SSD_ELEM_INFO,
1171 		    /*size*/ sizeof(info), /*data*/ &info,
1172 		    /*type*/ SSD_ELEM_NONE);
1173 		break;
1174 	case CTL_IO_NVME:
1175 		ctl_nvme_set_compare_failure(&io->nvmeio);
1176 		break;
1177 	default:
1178 		__assert_unreachable();
1179 	}
1180 }
1181 
1182 void
ctl_io_set_space_alloc_fail(union ctl_io * io)1183 ctl_io_set_space_alloc_fail(union ctl_io *io)
1184 {
1185 	switch (io->io_hdr.io_type) {
1186 	case CTL_IO_SCSI:
1187 		ctl_set_space_alloc_fail(&io->scsiio);
1188 		break;
1189 	case CTL_IO_NVME:
1190 		ctl_nvme_set_space_alloc_fail(&io->nvmeio);
1191 		break;
1192 	default:
1193 		__assert_unreachable();
1194 	}
1195 }
1196 
1197 void
ctl_io_set_success(union ctl_io * io)1198 ctl_io_set_success(union ctl_io *io)
1199 {
1200 	switch (io->io_hdr.io_type) {
1201 	case CTL_IO_SCSI:
1202 		ctl_set_success(&io->scsiio);
1203 		break;
1204 	case CTL_IO_NVME:
1205 	case CTL_IO_NVME_ADMIN:
1206 		ctl_nvme_set_success(&io->nvmeio);
1207 		break;
1208 	default:
1209 		__assert_unreachable();
1210 	}
1211 }
1212