xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision 206b73d0)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * Copyright 2000-2020 Broadcom Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
30  *
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /* Communications core for Avago Technologies (LSI) MPT3 */
37 
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sbuf.h>
56 
57 #include <machine/bus.h>
58 #include <machine/resource.h>
59 #include <sys/rman.h>
60 
61 #include <machine/stdarg.h>
62 
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
74 #endif
75 
76 #include <dev/nvme/nvme.h>
77 
78 #include <dev/mpr/mpi/mpi2_type.h>
79 #include <dev/mpr/mpi/mpi2.h>
80 #include <dev/mpr/mpi/mpi2_ioc.h>
81 #include <dev/mpr/mpi/mpi2_sas.h>
82 #include <dev/mpr/mpi/mpi2_pci.h>
83 #include <dev/mpr/mpi/mpi2_cnfg.h>
84 #include <dev/mpr/mpi/mpi2_init.h>
85 #include <dev/mpr/mpi/mpi2_tool.h>
86 #include <dev/mpr/mpr_ioctl.h>
87 #include <dev/mpr/mprvar.h>
88 #include <dev/mpr/mpr_table.h>
89 #include <dev/mpr/mpr_sas.h>
90 
91 #define MPRSAS_DISCOVERY_TIMEOUT	20
92 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
93 
94 /*
95  * static array to check SCSI OpCode for EEDP protection bits
96  */
97 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 };
118 
119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 
121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mprsas_poll(struct cam_sim *sim);
125 static void mprsas_scsiio_timeout(void *data);
126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132     struct mpr_command *cm);
133 static void mprsas_async(void *callback_arg, uint32_t code,
134     struct cam_path *path, void *arg);
135 #if (__FreeBSD_version < 901503) || \
136     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
137 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
138     struct ccb_getdev *cgd);
139 static void mprsas_read_cap_done(struct cam_periph *periph,
140     union ccb *done_ccb);
141 #endif
142 static int mprsas_send_portenable(struct mpr_softc *sc);
143 static void mprsas_portenable_complete(struct mpr_softc *sc,
144     struct mpr_command *cm);
145 
146 #if __FreeBSD_version >= 900026
147 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
148 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149     uint64_t sasaddr);
150 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif //FreeBSD_version >= 900026
152 
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
155     uint16_t handle)
156 {
157 	struct mprsas_target *target;
158 	int i;
159 
160 	for (i = start; i < sassc->maxtargets; i++) {
161 		target = &sassc->targets[i];
162 		if (target->handle == handle)
163 			return (target);
164 	}
165 
166 	return (NULL);
167 }
168 
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170  * commands before device handles have been found by discovery.  Since
171  * discovery involves reading config pages and possibly sending commands,
172  * discovery actions may continue even after we receive the end of discovery
173  * event, so refcount discovery actions instead of assuming we can unfreeze
174  * the simq when we get the event.
175  */
176 void
177 mprsas_startup_increment(struct mprsas_softc *sassc)
178 {
179 	MPR_FUNCTRACE(sassc->sc);
180 
181 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 		if (sassc->startup_refcount++ == 0) {
183 			/* just starting, freeze the simq */
184 			mpr_dprint(sassc->sc, MPR_INIT,
185 			    "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
188 			xpt_hold_boot();
189 #endif
190 			xpt_freeze_simq(sassc->sim, 1);
191 		}
192 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 		    sassc->startup_refcount);
194 	}
195 }
196 
197 void
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 {
200 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 		xpt_release_simq(sassc->sim, 1);
203 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204 	}
205 }
206 
207 void
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 {
210 	MPR_FUNCTRACE(sassc->sc);
211 
212 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 		if (--sassc->startup_refcount == 0) {
214 			/* finished all discovery-related actions, release
215 			 * the simq and rescan for the latest topology.
216 			 */
217 			mpr_dprint(sassc->sc, MPR_INIT,
218 			    "%s releasing simq\n", __func__);
219 			sassc->flags &= ~MPRSAS_IN_STARTUP;
220 			xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
223 			xpt_release_boot();
224 #else
225 			mprsas_rescan_target(sassc->sc, NULL);
226 #endif
227 		}
228 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 		    sassc->startup_refcount);
230 	}
231 }
232 
233 /*
234  * The firmware requires us to stop sending commands when we're doing task
235  * management.
236  * use.
237  * XXX The logic for serializing the device has been made lazy and moved to
238  * mprsas_prepare_for_tm().
239  */
240 struct mpr_command *
241 mprsas_alloc_tm(struct mpr_softc *sc)
242 {
243 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
244 	struct mpr_command *tm;
245 
246 	MPR_FUNCTRACE(sc);
247 	tm = mpr_alloc_high_priority_command(sc);
248 	if (tm == NULL)
249 		return (NULL);
250 
251 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
252 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
253 	return tm;
254 }
255 
256 void
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258 {
259 	int target_id = 0xFFFFFFFF;
260 
261 	MPR_FUNCTRACE(sc);
262 	if (tm == NULL)
263 		return;
264 
265 	/*
266 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
267 	 * free the resources used for freezing the devq.  Must clear the
268 	 * INRESET flag as well or scsi I/O will not work.
269 	 */
270 	if (tm->cm_targ != NULL) {
271 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
272 		target_id = tm->cm_targ->tid;
273 	}
274 	if (tm->cm_ccb) {
275 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
276 		    target_id);
277 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
278 		xpt_free_path(tm->cm_ccb->ccb_h.path);
279 		xpt_free_ccb(tm->cm_ccb);
280 	}
281 
282 	mpr_free_high_priority_command(sc, tm);
283 }
284 
285 void
286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
287 {
288 	struct mprsas_softc *sassc = sc->sassc;
289 	path_id_t pathid;
290 	target_id_t targetid;
291 	union ccb *ccb;
292 
293 	MPR_FUNCTRACE(sc);
294 	pathid = cam_sim_path(sassc->sim);
295 	if (targ == NULL)
296 		targetid = CAM_TARGET_WILDCARD;
297 	else
298 		targetid = targ - sassc->targets;
299 
300 	/*
301 	 * Allocate a CCB and schedule a rescan.
302 	 */
303 	ccb = xpt_alloc_ccb_nowait();
304 	if (ccb == NULL) {
305 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
306 		return;
307 	}
308 
309 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
310 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
312 		xpt_free_ccb(ccb);
313 		return;
314 	}
315 
316 	if (targetid == CAM_TARGET_WILDCARD)
317 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
318 	else
319 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
320 
321 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
322 	xpt_rescan(ccb);
323 }
324 
325 static void
326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
327 {
328 	struct sbuf sb;
329 	va_list ap;
330 	char str[192];
331 	char path_str[64];
332 
333 	if (cm == NULL)
334 		return;
335 
336 	/* No need to be in here if debugging isn't enabled */
337 	if ((cm->cm_sc->mpr_debug & level) == 0)
338 		return;
339 
340 	sbuf_new(&sb, str, sizeof(str), 0);
341 
342 	va_start(ap, fmt);
343 
344 	if (cm->cm_ccb != NULL) {
345 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
346 		    sizeof(path_str));
347 		sbuf_cat(&sb, path_str);
348 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349 			scsi_command_string(&cm->cm_ccb->csio, &sb);
350 			sbuf_printf(&sb, "length %d ",
351 			    cm->cm_ccb->csio.dxfer_len);
352 		}
353 	} else {
354 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355 		    cam_sim_name(cm->cm_sc->sassc->sim),
356 		    cam_sim_unit(cm->cm_sc->sassc->sim),
357 		    cam_sim_bus(cm->cm_sc->sassc->sim),
358 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
359 		    cm->cm_lun);
360 	}
361 
362 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363 	sbuf_vprintf(&sb, fmt, ap);
364 	sbuf_finish(&sb);
365 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
366 
367 	va_end(ap);
368 }
369 
370 static void
371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
372 {
373 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374 	struct mprsas_target *targ;
375 	uint16_t handle;
376 
377 	MPR_FUNCTRACE(sc);
378 
379 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
381 	targ = tm->cm_targ;
382 
383 	if (reply == NULL) {
384 		/* XXX retry the remove after the diag reset completes? */
385 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386 		    "0x%04x\n", __func__, handle);
387 		mprsas_free_tm(sc, tm);
388 		return;
389 	}
390 
391 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392 	    MPI2_IOCSTATUS_SUCCESS) {
393 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
395 	}
396 
397 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398 	    le32toh(reply->TerminationCount));
399 	mpr_free_reply(sc, tm->cm_reply_data);
400 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
401 
402 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
403 	    targ->tid, handle);
404 
405 	/*
406 	 * Don't clear target if remove fails because things will get confusing.
407 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
408 	 * this target id if possible, and so we can assign the same target id
409 	 * to this device if it comes back in the future.
410 	 */
411 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412 	    MPI2_IOCSTATUS_SUCCESS) {
413 		targ = tm->cm_targ;
414 		targ->handle = 0x0;
415 		targ->encl_handle = 0x0;
416 		targ->encl_level_valid = 0x0;
417 		targ->encl_level = 0x0;
418 		targ->connector_name[0] = ' ';
419 		targ->connector_name[1] = ' ';
420 		targ->connector_name[2] = ' ';
421 		targ->connector_name[3] = ' ';
422 		targ->encl_slot = 0x0;
423 		targ->exp_dev_handle = 0x0;
424 		targ->phy_num = 0x0;
425 		targ->linkrate = 0x0;
426 		targ->devinfo = 0x0;
427 		targ->flags = 0x0;
428 		targ->scsi_req_desc_type = 0;
429 	}
430 
431 	mprsas_free_tm(sc, tm);
432 }
433 
434 
435 /*
436  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437  * Otherwise Volume Delete is same as Bare Drive Removal.
438  */
439 void
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
441 {
442 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 	struct mpr_softc *sc;
444 	struct mpr_command *cm;
445 	struct mprsas_target *targ = NULL;
446 
447 	MPR_FUNCTRACE(sassc->sc);
448 	sc = sassc->sc;
449 
450 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
451 	if (targ == NULL) {
452 		/* FIXME: what is the action? */
453 		/* We don't know about this device? */
454 		mpr_dprint(sc, MPR_ERROR,
455 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
456 		return;
457 	}
458 
459 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
460 
461 	cm = mprsas_alloc_tm(sc);
462 	if (cm == NULL) {
463 		mpr_dprint(sc, MPR_ERROR,
464 		    "%s: command alloc failure\n", __func__);
465 		return;
466 	}
467 
468 	mprsas_rescan_target(sc, targ);
469 
470 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 	req->DevHandle = targ->handle;
472 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
473 
474 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475 		/* SAS Hard Link Reset / SATA Link Reset */
476 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
477 	} else {
478 		/* PCIe Protocol Level Reset*/
479 		req->MsgFlags =
480 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
481 	}
482 
483 	cm->cm_targ = targ;
484 	cm->cm_data = NULL;
485 	cm->cm_complete = mprsas_remove_volume;
486 	cm->cm_complete_data = (void *)(uintptr_t)handle;
487 
488 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489 	    __func__, targ->tid);
490 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
491 
492 	mpr_map_command(sc, cm);
493 }
494 
495 /*
496  * The firmware performs debounce on the link to avoid transient link errors
497  * and false removals.  When it does decide that link has been lost and a
498  * device needs to go away, it expects that the host will perform a target reset
499  * and then an op remove.  The reset has the side-effect of aborting any
500  * outstanding requests for the device, which is required for the op-remove to
501  * succeed.  It's not clear if the host should check for the device coming back
502  * alive after the reset.
503  */
504 void
505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
506 {
507 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508 	struct mpr_softc *sc;
509 	struct mpr_command *tm;
510 	struct mprsas_target *targ = NULL;
511 
512 	MPR_FUNCTRACE(sassc->sc);
513 
514 	sc = sassc->sc;
515 
516 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
517 	if (targ == NULL) {
518 		/* FIXME: what is the action? */
519 		/* We don't know about this device? */
520 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
521 		    __func__, handle);
522 		return;
523 	}
524 
525 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
526 
527 	tm = mprsas_alloc_tm(sc);
528 	if (tm == NULL) {
529 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
530 		    __func__);
531 		return;
532 	}
533 
534 	mprsas_rescan_target(sc, targ);
535 
536 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537 	memset(req, 0, sizeof(*req));
538 	req->DevHandle = htole16(targ->handle);
539 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
540 
541 	/* SAS Hard Link Reset / SATA Link Reset */
542 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
543 
544 	tm->cm_targ = targ;
545 	tm->cm_data = NULL;
546 	tm->cm_complete = mprsas_remove_device;
547 	tm->cm_complete_data = (void *)(uintptr_t)handle;
548 
549 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
550 	    __func__, targ->tid);
551 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
552 
553 	mpr_map_command(sc, tm);
554 }
555 
556 static void
557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
558 {
559 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
560 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
561 	struct mprsas_target *targ;
562 	struct mpr_command *next_cm;
563 	uint16_t handle;
564 
565 	MPR_FUNCTRACE(sc);
566 
567 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
568 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
569 	targ = tm->cm_targ;
570 
571 	/*
572 	 * Currently there should be no way we can hit this case.  It only
573 	 * happens when we have a failure to allocate chain frames, and
574 	 * task management commands don't have S/G lists.
575 	 */
576 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
577 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
578 		    "handle %#04x! This should not happen!\n", __func__,
579 		    tm->cm_flags, handle);
580 	}
581 
582 	if (reply == NULL) {
583 		/* XXX retry the remove after the diag reset completes? */
584 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
585 		    "0x%04x\n", __func__, handle);
586 		mprsas_free_tm(sc, tm);
587 		return;
588 	}
589 
590 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
591 	    MPI2_IOCSTATUS_SUCCESS) {
592 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
593 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
594 	}
595 
596 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
597 	    le32toh(reply->TerminationCount));
598 	mpr_free_reply(sc, tm->cm_reply_data);
599 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
600 
601 	/* Reuse the existing command */
602 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
603 	memset(req, 0, sizeof(*req));
604 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
605 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
606 	req->DevHandle = htole16(handle);
607 	tm->cm_data = NULL;
608 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
609 	tm->cm_complete = mprsas_remove_complete;
610 	tm->cm_complete_data = (void *)(uintptr_t)handle;
611 
612 	mpr_map_command(sc, tm);
613 
614 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
615 	    targ->tid, handle);
616 	if (targ->encl_level_valid) {
617 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
618 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
619 		    targ->connector_name);
620 	}
621 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
622 		union ccb *ccb;
623 
624 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
625 		ccb = tm->cm_complete_data;
626 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
627 		mprsas_scsiio_complete(sc, tm);
628 	}
629 }
630 
631 static void
632 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
633 {
634 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
635 	uint16_t handle;
636 	struct mprsas_target *targ;
637 	struct mprsas_lun *lun;
638 
639 	MPR_FUNCTRACE(sc);
640 
641 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
642 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
643 
644 	/*
645 	 * Currently there should be no way we can hit this case.  It only
646 	 * happens when we have a failure to allocate chain frames, and
647 	 * task management commands don't have S/G lists.
648 	 */
649 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
650 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
651 		    "handle %#04x! This should not happen!\n", __func__,
652 		    tm->cm_flags, handle);
653 		mprsas_free_tm(sc, tm);
654 		return;
655 	}
656 
657 	if (reply == NULL) {
658 		/* most likely a chip reset */
659 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
660 		    "0x%04x\n", __func__, handle);
661 		mprsas_free_tm(sc, tm);
662 		return;
663 	}
664 
665 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
666 	    __func__, handle, le16toh(reply->IOCStatus));
667 
668 	/*
669 	 * Don't clear target if remove fails because things will get confusing.
670 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
671 	 * this target id if possible, and so we can assign the same target id
672 	 * to this device if it comes back in the future.
673 	 */
674 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
675 	    MPI2_IOCSTATUS_SUCCESS) {
676 		targ = tm->cm_targ;
677 		targ->handle = 0x0;
678 		targ->encl_handle = 0x0;
679 		targ->encl_level_valid = 0x0;
680 		targ->encl_level = 0x0;
681 		targ->connector_name[0] = ' ';
682 		targ->connector_name[1] = ' ';
683 		targ->connector_name[2] = ' ';
684 		targ->connector_name[3] = ' ';
685 		targ->encl_slot = 0x0;
686 		targ->exp_dev_handle = 0x0;
687 		targ->phy_num = 0x0;
688 		targ->linkrate = 0x0;
689 		targ->devinfo = 0x0;
690 		targ->flags = 0x0;
691 		targ->scsi_req_desc_type = 0;
692 
693 		while (!SLIST_EMPTY(&targ->luns)) {
694 			lun = SLIST_FIRST(&targ->luns);
695 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
696 			free(lun, M_MPR);
697 		}
698 	}
699 
700 	mprsas_free_tm(sc, tm);
701 }
702 
703 static int
704 mprsas_register_events(struct mpr_softc *sc)
705 {
706 	uint8_t events[16];
707 
708 	bzero(events, 16);
709 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
710 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
711 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
712 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
713 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
714 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
715 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
716 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
717 	setbit(events, MPI2_EVENT_IR_VOLUME);
718 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
719 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
720 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
721 	setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
722 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
723 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
724 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
725 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
726 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
727 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
728 		}
729 	}
730 
731 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
732 	    &sc->sassc->mprsas_eh);
733 
734 	return (0);
735 }
736 
737 int
738 mpr_attach_sas(struct mpr_softc *sc)
739 {
740 	struct mprsas_softc *sassc;
741 	cam_status status;
742 	int unit, error = 0, reqs;
743 
744 	MPR_FUNCTRACE(sc);
745 	mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
746 
747 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
748 	if (!sassc) {
749 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
750 		    "Cannot allocate SAS subsystem memory\n");
751 		return (ENOMEM);
752 	}
753 
754 	/*
755 	 * XXX MaxTargets could change during a reinit.  Since we don't
756 	 * resize the targets[] array during such an event, cache the value
757 	 * of MaxTargets here so that we don't get into trouble later.  This
758 	 * should move into the reinit logic.
759 	 */
760 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
761 	sassc->targets = malloc(sizeof(struct mprsas_target) *
762 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
763 	if (!sassc->targets) {
764 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
765 		    "Cannot allocate SAS target memory\n");
766 		free(sassc, M_MPR);
767 		return (ENOMEM);
768 	}
769 	sc->sassc = sassc;
770 	sassc->sc = sc;
771 
772 	reqs = sc->num_reqs - sc->num_prireqs - 1;
773 	if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
774 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
775 		error = ENOMEM;
776 		goto out;
777 	}
778 
779 	unit = device_get_unit(sc->mpr_dev);
780 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
781 	    unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
782 	if (sassc->sim == NULL) {
783 		mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
784 		error = EINVAL;
785 		goto out;
786 	}
787 
788 	TAILQ_INIT(&sassc->ev_queue);
789 
790 	/* Initialize taskqueue for Event Handling */
791 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
792 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
793 	    taskqueue_thread_enqueue, &sassc->ev_tq);
794 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
795 	    device_get_nameunit(sc->mpr_dev));
796 
797 	mpr_lock(sc);
798 
799 	/*
800 	 * XXX There should be a bus for every port on the adapter, but since
801 	 * we're just going to fake the topology for now, we'll pretend that
802 	 * everything is just a target on a single bus.
803 	 */
804 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
805 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
806 		    "Error %d registering SCSI bus\n", error);
807 		mpr_unlock(sc);
808 		goto out;
809 	}
810 
811 	/*
812 	 * Assume that discovery events will start right away.
813 	 *
814 	 * Hold off boot until discovery is complete.
815 	 */
816 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
817 	sc->sassc->startup_refcount = 0;
818 	mprsas_startup_increment(sassc);
819 
820 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
821 
822 	/*
823 	 * Register for async events so we can determine the EEDP
824 	 * capabilities of devices.
825 	 */
826 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
827 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
828 	    CAM_LUN_WILDCARD);
829 	if (status != CAM_REQ_CMP) {
830 		mpr_dprint(sc, MPR_INIT|MPR_ERROR,
831 		    "Error %#x creating sim path\n", status);
832 		sassc->path = NULL;
833 	} else {
834 		int event;
835 
836 #if (__FreeBSD_version >= 1000006) || \
837     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
838 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
839 #else
840 		event = AC_FOUND_DEVICE;
841 #endif
842 
843 		/*
844 		 * Prior to the CAM locking improvements, we can't call
845 		 * xpt_register_async() with a particular path specified.
846 		 *
847 		 * If a path isn't specified, xpt_register_async() will
848 		 * generate a wildcard path and acquire the XPT lock while
849 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
850 		 * It will then drop the XPT lock once that is done.
851 		 *
852 		 * If a path is specified for xpt_register_async(), it will
853 		 * not acquire and drop the XPT lock around the call to
854 		 * xpt_action().  xpt_action() asserts that the caller
855 		 * holds the SIM lock, so the SIM lock has to be held when
856 		 * calling xpt_register_async() when the path is specified.
857 		 *
858 		 * But xpt_register_async calls xpt_for_all_devices(),
859 		 * which calls xptbustraverse(), which will acquire each
860 		 * SIM lock.  When it traverses our particular bus, it will
861 		 * necessarily acquire the SIM lock, which will lead to a
862 		 * recursive lock acquisition.
863 		 *
864 		 * The CAM locking changes fix this problem by acquiring
865 		 * the XPT topology lock around bus traversal in
866 		 * xptbustraverse(), so the caller can hold the SIM lock
867 		 * and it does not cause a recursive lock acquisition.
868 		 *
869 		 * These __FreeBSD_version values are approximate, especially
870 		 * for stable/10, which is two months later than the actual
871 		 * change.
872 		 */
873 
874 #if (__FreeBSD_version < 1000703) || \
875     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
876 		mpr_unlock(sc);
877 		status = xpt_register_async(event, mprsas_async, sc,
878 					    NULL);
879 		mpr_lock(sc);
880 #else
881 		status = xpt_register_async(event, mprsas_async, sc,
882 					    sassc->path);
883 #endif
884 
885 		if (status != CAM_REQ_CMP) {
886 			mpr_dprint(sc, MPR_ERROR,
887 			    "Error %#x registering async handler for "
888 			    "AC_ADVINFO_CHANGED events\n", status);
889 			xpt_free_path(sassc->path);
890 			sassc->path = NULL;
891 		}
892 	}
893 	if (status != CAM_REQ_CMP) {
894 		/*
895 		 * EEDP use is the exception, not the rule.
896 		 * Warn the user, but do not fail to attach.
897 		 */
898 		mpr_printf(sc, "EEDP capabilities disabled.\n");
899 	}
900 
901 	mpr_unlock(sc);
902 
903 	mprsas_register_events(sc);
904 out:
905 	if (error)
906 		mpr_detach_sas(sc);
907 
908 	mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
909 	return (error);
910 }
911 
912 int
913 mpr_detach_sas(struct mpr_softc *sc)
914 {
915 	struct mprsas_softc *sassc;
916 	struct mprsas_lun *lun, *lun_tmp;
917 	struct mprsas_target *targ;
918 	int i;
919 
920 	MPR_FUNCTRACE(sc);
921 
922 	if (sc->sassc == NULL)
923 		return (0);
924 
925 	sassc = sc->sassc;
926 	mpr_deregister_events(sc, sassc->mprsas_eh);
927 
928 	/*
929 	 * Drain and free the event handling taskqueue with the lock
930 	 * unheld so that any parallel processing tasks drain properly
931 	 * without deadlocking.
932 	 */
933 	if (sassc->ev_tq != NULL)
934 		taskqueue_free(sassc->ev_tq);
935 
936 	/* Make sure CAM doesn't wedge if we had to bail out early. */
937 	mpr_lock(sc);
938 
939 	while (sassc->startup_refcount != 0)
940 		mprsas_startup_decrement(sassc);
941 
942 	/* Deregister our async handler */
943 	if (sassc->path != NULL) {
944 		xpt_register_async(0, mprsas_async, sc, sassc->path);
945 		xpt_free_path(sassc->path);
946 		sassc->path = NULL;
947 	}
948 
949 	if (sassc->flags & MPRSAS_IN_STARTUP)
950 		xpt_release_simq(sassc->sim, 1);
951 
952 	if (sassc->sim != NULL) {
953 		xpt_bus_deregister(cam_sim_path(sassc->sim));
954 		cam_sim_free(sassc->sim, FALSE);
955 	}
956 
957 	mpr_unlock(sc);
958 
959 	if (sassc->devq != NULL)
960 		cam_simq_free(sassc->devq);
961 
962 	for (i = 0; i < sassc->maxtargets; i++) {
963 		targ = &sassc->targets[i];
964 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
965 			free(lun, M_MPR);
966 		}
967 	}
968 	free(sassc->targets, M_MPR);
969 	free(sassc, M_MPR);
970 	sc->sassc = NULL;
971 
972 	return (0);
973 }
974 
975 void
976 mprsas_discovery_end(struct mprsas_softc *sassc)
977 {
978 	struct mpr_softc *sc = sassc->sc;
979 
980 	MPR_FUNCTRACE(sc);
981 
982 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
983 		callout_stop(&sassc->discovery_callout);
984 
985 	/*
986 	 * After discovery has completed, check the mapping table for any
987 	 * missing devices and update their missing counts. Only do this once
988 	 * whenever the driver is initialized so that missing counts aren't
989 	 * updated unnecessarily. Note that just because discovery has
990 	 * completed doesn't mean that events have been processed yet. The
991 	 * check_devices function is a callout timer that checks if ALL devices
992 	 * are missing. If so, it will wait a little longer for events to
993 	 * complete and keep resetting itself until some device in the mapping
994 	 * table is not missing, meaning that event processing has started.
995 	 */
996 	if (sc->track_mapping_events) {
997 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
998 		    "completed. Check for missing devices in the mapping "
999 		    "table.\n");
1000 		callout_reset(&sc->device_check_callout,
1001 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1002 		    sc);
1003 	}
1004 }
1005 
1006 static void
1007 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1008 {
1009 	struct mprsas_softc *sassc;
1010 
1011 	sassc = cam_sim_softc(sim);
1012 
1013 	MPR_FUNCTRACE(sassc->sc);
1014 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1015 	    ccb->ccb_h.func_code);
1016 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1017 
1018 	switch (ccb->ccb_h.func_code) {
1019 	case XPT_PATH_INQ:
1020 	{
1021 		struct ccb_pathinq *cpi = &ccb->cpi;
1022 		struct mpr_softc *sc = sassc->sc;
1023 
1024 		cpi->version_num = 1;
1025 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1026 		cpi->target_sprt = 0;
1027 #if (__FreeBSD_version >= 1000039) || \
1028     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1029 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1030 #else
1031 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1032 #endif
1033 		cpi->hba_eng_cnt = 0;
1034 		cpi->max_target = sassc->maxtargets - 1;
1035 		cpi->max_lun = 255;
1036 
1037 		/*
1038 		 * initiator_id is set here to an ID outside the set of valid
1039 		 * target IDs (including volumes).
1040 		 */
1041 		cpi->initiator_id = sassc->maxtargets;
1042 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1043 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1044 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1045 		cpi->unit_number = cam_sim_unit(sim);
1046 		cpi->bus_id = cam_sim_bus(sim);
1047 		/*
1048 		 * XXXSLM-I think this needs to change based on config page or
1049 		 * something instead of hardcoded to 150000.
1050 		 */
1051 		cpi->base_transfer_speed = 150000;
1052 		cpi->transport = XPORT_SAS;
1053 		cpi->transport_version = 0;
1054 		cpi->protocol = PROTO_SCSI;
1055 		cpi->protocol_version = SCSI_REV_SPC;
1056 		cpi->maxio = sc->maxio;
1057 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1058 		break;
1059 	}
1060 	case XPT_GET_TRAN_SETTINGS:
1061 	{
1062 		struct ccb_trans_settings	*cts;
1063 		struct ccb_trans_settings_sas	*sas;
1064 		struct ccb_trans_settings_scsi	*scsi;
1065 		struct mprsas_target *targ;
1066 
1067 		cts = &ccb->cts;
1068 		sas = &cts->xport_specific.sas;
1069 		scsi = &cts->proto_specific.scsi;
1070 
1071 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1072 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1073 		    cts->ccb_h.target_id));
1074 		targ = &sassc->targets[cts->ccb_h.target_id];
1075 		if (targ->handle == 0x0) {
1076 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1077 			break;
1078 		}
1079 
1080 		cts->protocol_version = SCSI_REV_SPC2;
1081 		cts->transport = XPORT_SAS;
1082 		cts->transport_version = 0;
1083 
1084 		sas->valid = CTS_SAS_VALID_SPEED;
1085 		switch (targ->linkrate) {
1086 		case 0x08:
1087 			sas->bitrate = 150000;
1088 			break;
1089 		case 0x09:
1090 			sas->bitrate = 300000;
1091 			break;
1092 		case 0x0a:
1093 			sas->bitrate = 600000;
1094 			break;
1095 		case 0x0b:
1096 			sas->bitrate = 1200000;
1097 			break;
1098 		default:
1099 			sas->valid = 0;
1100 		}
1101 
1102 		cts->protocol = PROTO_SCSI;
1103 		scsi->valid = CTS_SCSI_VALID_TQ;
1104 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1105 
1106 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1107 		break;
1108 	}
1109 	case XPT_CALC_GEOMETRY:
1110 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1111 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1112 		break;
1113 	case XPT_RESET_DEV:
1114 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1115 		    "XPT_RESET_DEV\n");
1116 		mprsas_action_resetdev(sassc, ccb);
1117 		return;
1118 	case XPT_RESET_BUS:
1119 	case XPT_ABORT:
1120 	case XPT_TERM_IO:
1121 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1122 		    "for abort or reset\n");
1123 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1124 		break;
1125 	case XPT_SCSI_IO:
1126 		mprsas_action_scsiio(sassc, ccb);
1127 		return;
1128 #if __FreeBSD_version >= 900026
1129 	case XPT_SMP_IO:
1130 		mprsas_action_smpio(sassc, ccb);
1131 		return;
1132 #endif
1133 	default:
1134 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1135 		break;
1136 	}
1137 	xpt_done(ccb);
1138 
1139 }
1140 
1141 static void
1142 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1143     target_id_t target_id, lun_id_t lun_id)
1144 {
1145 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1146 	struct cam_path *path;
1147 
1148 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1149 	    ac_code, target_id, (uintmax_t)lun_id);
1150 
1151 	if (xpt_create_path(&path, NULL,
1152 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1153 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1154 		    "notification\n");
1155 		return;
1156 	}
1157 
1158 	xpt_async(ac_code, path, NULL);
1159 	xpt_free_path(path);
1160 }
1161 
1162 static void
1163 mprsas_complete_all_commands(struct mpr_softc *sc)
1164 {
1165 	struct mpr_command *cm;
1166 	int i;
1167 	int completed;
1168 
1169 	MPR_FUNCTRACE(sc);
1170 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1171 
1172 	/* complete all commands with a NULL reply */
1173 	for (i = 1; i < sc->num_reqs; i++) {
1174 		cm = &sc->commands[i];
1175 		if (cm->cm_state == MPR_CM_STATE_FREE)
1176 			continue;
1177 
1178 		cm->cm_state = MPR_CM_STATE_BUSY;
1179 		cm->cm_reply = NULL;
1180 		completed = 0;
1181 
1182 		if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1183 			MPASS(cm->cm_data);
1184 			free(cm->cm_data, M_MPR);
1185 			cm->cm_data = NULL;
1186 		}
1187 
1188 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1189 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1190 
1191 		if (cm->cm_complete != NULL) {
1192 			mprsas_log_command(cm, MPR_RECOVERY,
1193 			    "completing cm %p state %x ccb %p for diag reset\n",
1194 			    cm, cm->cm_state, cm->cm_ccb);
1195 			cm->cm_complete(sc, cm);
1196 			completed = 1;
1197 		} else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1198 			mprsas_log_command(cm, MPR_RECOVERY,
1199 			    "waking up cm %p state %x ccb %p for diag reset\n",
1200 			    cm, cm->cm_state, cm->cm_ccb);
1201 			wakeup(cm);
1202 			completed = 1;
1203 		}
1204 
1205 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1206 			/* this should never happen, but if it does, log */
1207 			mprsas_log_command(cm, MPR_RECOVERY,
1208 			    "cm %p state %x flags 0x%x ccb %p during diag "
1209 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1210 			    cm->cm_ccb);
1211 		}
1212 	}
1213 
1214 	sc->io_cmds_active = 0;
1215 }
1216 
1217 void
1218 mprsas_handle_reinit(struct mpr_softc *sc)
1219 {
1220 	int i;
1221 
1222 	/* Go back into startup mode and freeze the simq, so that CAM
1223 	 * doesn't send any commands until after we've rediscovered all
1224 	 * targets and found the proper device handles for them.
1225 	 *
1226 	 * After the reset, portenable will trigger discovery, and after all
1227 	 * discovery-related activities have finished, the simq will be
1228 	 * released.
1229 	 */
1230 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1231 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1232 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1233 	mprsas_startup_increment(sc->sassc);
1234 
1235 	/* notify CAM of a bus reset */
1236 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1237 	    CAM_LUN_WILDCARD);
1238 
1239 	/* complete and cleanup after all outstanding commands */
1240 	mprsas_complete_all_commands(sc);
1241 
1242 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1243 	    __func__, sc->sassc->startup_refcount);
1244 
1245 	/* zero all the target handles, since they may change after the
1246 	 * reset, and we have to rediscover all the targets and use the new
1247 	 * handles.
1248 	 */
1249 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1250 		if (sc->sassc->targets[i].outstanding != 0)
1251 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1252 			    i, sc->sassc->targets[i].outstanding);
1253 		sc->sassc->targets[i].handle = 0x0;
1254 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1255 		sc->sassc->targets[i].outstanding = 0;
1256 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1257 	}
1258 }
1259 static void
1260 mprsas_tm_timeout(void *data)
1261 {
1262 	struct mpr_command *tm = data;
1263 	struct mpr_softc *sc = tm->cm_sc;
1264 
1265 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1266 
1267 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1268 	    "out\n", tm);
1269 
1270 	KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1271 	    ("command not inqueue\n"));
1272 
1273 	tm->cm_state = MPR_CM_STATE_BUSY;
1274 	mpr_reinit(sc);
1275 }
1276 
1277 static void
1278 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1279 {
1280 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1281 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1282 	unsigned int cm_count = 0;
1283 	struct mpr_command *cm;
1284 	struct mprsas_target *targ;
1285 
1286 	callout_stop(&tm->cm_callout);
1287 
1288 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1289 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1290 	targ = tm->cm_targ;
1291 
1292 	/*
1293 	 * Currently there should be no way we can hit this case.  It only
1294 	 * happens when we have a failure to allocate chain frames, and
1295 	 * task management commands don't have S/G lists.
1296 	 */
1297 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1298 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1299 		    "%s: cm_flags = %#x for LUN reset! "
1300 		    "This should not happen!\n", __func__, tm->cm_flags);
1301 		mprsas_free_tm(sc, tm);
1302 		return;
1303 	}
1304 
1305 	if (reply == NULL) {
1306 		mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1307 		    tm);
1308 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1309 			/* this completion was due to a reset, just cleanup */
1310 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1311 			    "reset, ignoring NULL LUN reset reply\n");
1312 			targ->tm = NULL;
1313 			mprsas_free_tm(sc, tm);
1314 		}
1315 		else {
1316 			/* we should have gotten a reply. */
1317 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1318 			    "LUN reset attempt, resetting controller\n");
1319 			mpr_reinit(sc);
1320 		}
1321 		return;
1322 	}
1323 
1324 	mpr_dprint(sc, MPR_RECOVERY,
1325 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1326 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1327 	    le32toh(reply->TerminationCount));
1328 
1329 	/*
1330 	 * See if there are any outstanding commands for this LUN.
1331 	 * This could be made more efficient by using a per-LU data
1332 	 * structure of some sort.
1333 	 */
1334 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1335 		if (cm->cm_lun == tm->cm_lun)
1336 			cm_count++;
1337 	}
1338 
1339 	if (cm_count == 0) {
1340 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1341 		    "Finished recovery after LUN reset for target %u\n",
1342 		    targ->tid);
1343 
1344 		mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1345 		    tm->cm_lun);
1346 
1347 		/*
1348 		 * We've finished recovery for this logical unit.  check and
1349 		 * see if some other logical unit has a timedout command
1350 		 * that needs to be processed.
1351 		 */
1352 		cm = TAILQ_FIRST(&targ->timedout_commands);
1353 		if (cm) {
1354 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1355 			   "More commands to abort for target %u\n", targ->tid);
1356 			mprsas_send_abort(sc, tm, cm);
1357 		} else {
1358 			targ->tm = NULL;
1359 			mprsas_free_tm(sc, tm);
1360 		}
1361 	} else {
1362 		/* if we still have commands for this LUN, the reset
1363 		 * effectively failed, regardless of the status reported.
1364 		 * Escalate to a target reset.
1365 		 */
1366 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1367 		    "logical unit reset complete for target %u, but still "
1368 		    "have %u command(s), sending target reset\n", targ->tid,
1369 		    cm_count);
1370 		if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1371 			mprsas_send_reset(sc, tm,
1372 			    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1373 		else
1374 			mpr_reinit(sc);
1375 	}
1376 }
1377 
1378 static void
1379 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1380 {
1381 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1382 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1383 	struct mprsas_target *targ;
1384 
1385 	callout_stop(&tm->cm_callout);
1386 
1387 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1388 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1389 	targ = tm->cm_targ;
1390 
1391 	/*
1392 	 * Currently there should be no way we can hit this case.  It only
1393 	 * happens when we have a failure to allocate chain frames, and
1394 	 * task management commands don't have S/G lists.
1395 	 */
1396 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1397 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1398 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1399 		mprsas_free_tm(sc, tm);
1400 		return;
1401 	}
1402 
1403 	if (reply == NULL) {
1404 		mpr_dprint(sc, MPR_RECOVERY,
1405 		    "NULL target reset reply for tm %p TaskMID %u\n",
1406 		    tm, le16toh(req->TaskMID));
1407 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1408 			/* this completion was due to a reset, just cleanup */
1409 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1410 			    "reset, ignoring NULL target reset reply\n");
1411 			targ->tm = NULL;
1412 			mprsas_free_tm(sc, tm);
1413 		}
1414 		else {
1415 			/* we should have gotten a reply. */
1416 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1417 			    "target reset attempt, resetting controller\n");
1418 			mpr_reinit(sc);
1419 		}
1420 		return;
1421 	}
1422 
1423 	mpr_dprint(sc, MPR_RECOVERY,
1424 	    "target reset status 0x%x code 0x%x count %u\n",
1425 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1426 	    le32toh(reply->TerminationCount));
1427 
1428 	if (targ->outstanding == 0) {
1429 		/*
1430 		 * We've finished recovery for this target and all
1431 		 * of its logical units.
1432 		 */
1433 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1434 		    "Finished reset recovery for target %u\n", targ->tid);
1435 
1436 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1437 		    CAM_LUN_WILDCARD);
1438 
1439 		targ->tm = NULL;
1440 		mprsas_free_tm(sc, tm);
1441 	} else {
1442 		/*
1443 		 * After a target reset, if this target still has
1444 		 * outstanding commands, the reset effectively failed,
1445 		 * regardless of the status reported.  escalate.
1446 		 */
1447 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1448 		    "Target reset complete for target %u, but still have %u "
1449 		    "command(s), resetting controller\n", targ->tid,
1450 		    targ->outstanding);
1451 		mpr_reinit(sc);
1452 	}
1453 }
1454 
1455 #define MPR_RESET_TIMEOUT 30
1456 
1457 int
1458 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1459 {
1460 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1461 	struct mprsas_target *target;
1462 	int err, timeout;
1463 
1464 	target = tm->cm_targ;
1465 	if (target->handle == 0) {
1466 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1467 		    "%d\n", __func__, target->tid);
1468 		return -1;
1469 	}
1470 
1471 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1472 	req->DevHandle = htole16(target->handle);
1473 	req->TaskType = type;
1474 
1475 	if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1476 		timeout = MPR_RESET_TIMEOUT;
1477 		/*
1478 		 * Target reset method =
1479 		 *     SAS Hard Link Reset / SATA Link Reset
1480 		 */
1481 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1482 	} else {
1483 		timeout = (target->controller_reset_timeout) ? (
1484 		    target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1485 		/* PCIe Protocol Level Reset*/
1486 		req->MsgFlags =
1487 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1488 	}
1489 
1490 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1491 		/* XXX Need to handle invalid LUNs */
1492 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1493 		tm->cm_targ->logical_unit_resets++;
1494 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1495 		    "Sending logical unit reset to target %u lun %d\n",
1496 		    target->tid, tm->cm_lun);
1497 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1498 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1499 	} else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1500 		tm->cm_targ->target_resets++;
1501 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1502 		    "Sending target reset to target %u\n", target->tid);
1503 		tm->cm_complete = mprsas_target_reset_complete;
1504 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1505 	}
1506 	else {
1507 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1508 		return -1;
1509 	}
1510 
1511 	if (target->encl_level_valid) {
1512 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1513 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1514 		    target->encl_level, target->encl_slot,
1515 		    target->connector_name);
1516 	}
1517 
1518 	tm->cm_data = NULL;
1519 	tm->cm_complete_data = (void *)tm;
1520 
1521 	callout_reset(&tm->cm_callout, timeout * hz,
1522 	    mprsas_tm_timeout, tm);
1523 
1524 	err = mpr_map_command(sc, tm);
1525 	if (err)
1526 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1527 		    "error %d sending reset type %u\n", err, type);
1528 
1529 	return err;
1530 }
1531 
1532 
1533 static void
1534 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1535 {
1536 	struct mpr_command *cm;
1537 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1538 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1539 	struct mprsas_target *targ;
1540 
1541 	callout_stop(&tm->cm_callout);
1542 
1543 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1544 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1545 	targ = tm->cm_targ;
1546 
1547 	/*
1548 	 * Currently there should be no way we can hit this case.  It only
1549 	 * happens when we have a failure to allocate chain frames, and
1550 	 * task management commands don't have S/G lists.
1551 	 */
1552 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1553 		mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1554 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1555 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1556 		mprsas_free_tm(sc, tm);
1557 		return;
1558 	}
1559 
1560 	if (reply == NULL) {
1561 		mpr_dprint(sc, MPR_RECOVERY,
1562 		    "NULL abort reply for tm %p TaskMID %u\n",
1563 		    tm, le16toh(req->TaskMID));
1564 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1565 			/* this completion was due to a reset, just cleanup */
1566 			mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1567 			    "reset, ignoring NULL abort reply\n");
1568 			targ->tm = NULL;
1569 			mprsas_free_tm(sc, tm);
1570 		} else {
1571 			/* we should have gotten a reply. */
1572 			mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1573 			    "abort attempt, resetting controller\n");
1574 			mpr_reinit(sc);
1575 		}
1576 		return;
1577 	}
1578 
1579 	mpr_dprint(sc, MPR_RECOVERY,
1580 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1581 	    le16toh(req->TaskMID),
1582 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1583 	    le32toh(reply->TerminationCount));
1584 
1585 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1586 	if (cm == NULL) {
1587 		/*
1588 		 * if there are no more timedout commands, we're done with
1589 		 * error recovery for this target.
1590 		 */
1591 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1592 		    "Finished abort recovery for target %u\n", targ->tid);
1593 		targ->tm = NULL;
1594 		mprsas_free_tm(sc, tm);
1595 	} else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1596 		/* abort success, but we have more timedout commands to abort */
1597 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1598 		    "Continuing abort recovery for target %u\n", targ->tid);
1599 		mprsas_send_abort(sc, tm, cm);
1600 	} else {
1601 		/*
1602 		 * we didn't get a command completion, so the abort
1603 		 * failed as far as we're concerned.  escalate.
1604 		 */
1605 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1606 		    "Abort failed for target %u, sending logical unit reset\n",
1607 		    targ->tid);
1608 
1609 		mprsas_send_reset(sc, tm,
1610 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1611 	}
1612 }
1613 
1614 #define MPR_ABORT_TIMEOUT 5
1615 
1616 static int
1617 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1618     struct mpr_command *cm)
1619 {
1620 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1621 	struct mprsas_target *targ;
1622 	int err, timeout;
1623 
1624 	targ = cm->cm_targ;
1625 	if (targ->handle == 0) {
1626 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1627 		   "%s null devhandle for target_id %d\n",
1628 		    __func__, cm->cm_ccb->ccb_h.target_id);
1629 		return -1;
1630 	}
1631 
1632 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1633 	    "Aborting command %p\n", cm);
1634 
1635 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1636 	req->DevHandle = htole16(targ->handle);
1637 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1638 
1639 	/* XXX Need to handle invalid LUNs */
1640 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1641 
1642 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1643 
1644 	tm->cm_data = NULL;
1645 	tm->cm_complete = mprsas_abort_complete;
1646 	tm->cm_complete_data = (void *)tm;
1647 	tm->cm_targ = cm->cm_targ;
1648 	tm->cm_lun = cm->cm_lun;
1649 
1650 	if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1651 		timeout	= MPR_ABORT_TIMEOUT;
1652 	else
1653 		timeout = sc->nvme_abort_timeout;
1654 
1655 	callout_reset(&tm->cm_callout, timeout * hz,
1656 	    mprsas_tm_timeout, tm);
1657 
1658 	targ->aborts++;
1659 
1660 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1661 
1662 	err = mpr_map_command(sc, tm);
1663 	if (err)
1664 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1665 		    "error %d sending abort for cm %p SMID %u\n",
1666 		    err, cm, req->TaskMID);
1667 	return err;
1668 }
1669 
1670 static void
1671 mprsas_scsiio_timeout(void *data)
1672 {
1673 	sbintime_t elapsed, now;
1674 	union ccb *ccb;
1675 	struct mpr_softc *sc;
1676 	struct mpr_command *cm;
1677 	struct mprsas_target *targ;
1678 
1679 	cm = (struct mpr_command *)data;
1680 	sc = cm->cm_sc;
1681 	ccb = cm->cm_ccb;
1682 	now = sbinuptime();
1683 
1684 	MPR_FUNCTRACE(sc);
1685 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1686 
1687 	mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1688 
1689 	/*
1690 	 * Run the interrupt handler to make sure it's not pending.  This
1691 	 * isn't perfect because the command could have already completed
1692 	 * and been re-used, though this is unlikely.
1693 	 */
1694 	mpr_intr_locked(sc);
1695 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1696 		mprsas_log_command(cm, MPR_XINFO,
1697 		    "SCSI command %p almost timed out\n", cm);
1698 		return;
1699 	}
1700 
1701 	if (cm->cm_ccb == NULL) {
1702 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1703 		return;
1704 	}
1705 
1706 	targ = cm->cm_targ;
1707 	targ->timeouts++;
1708 
1709 	elapsed = now - ccb->ccb_h.qos.sim_data;
1710 	mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1711 	    "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1712 	    targ->tid, targ->handle, ccb->ccb_h.timeout,
1713 	    sbintime_getsec(elapsed), elapsed & 0xffffffff);
1714 	if (targ->encl_level_valid) {
1715 		mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1716 		    "At enclosure level %d, slot %d, connector name (%4s)\n",
1717 		    targ->encl_level, targ->encl_slot, targ->connector_name);
1718 	}
1719 
1720 	/* XXX first, check the firmware state, to see if it's still
1721 	 * operational.  if not, do a diag reset.
1722 	 */
1723 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1724 	cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1725 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1726 
1727 	if (targ->tm != NULL) {
1728 		/* target already in recovery, just queue up another
1729 		 * timedout command to be processed later.
1730 		 */
1731 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1732 		    "processing by tm %p\n", cm, targ->tm);
1733 	}
1734 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1735 
1736 		/* start recovery by aborting the first timedout command */
1737 		mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1738 		    "Sending abort to target %u for SMID %d\n", targ->tid,
1739 		    cm->cm_desc.Default.SMID);
1740 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1741 		    cm, targ->tm);
1742 		mprsas_send_abort(sc, targ->tm, cm);
1743 	}
1744 	else {
1745 		/* XXX queue this target up for recovery once a TM becomes
1746 		 * available.  The firmware only has a limited number of
1747 		 * HighPriority credits for the high priority requests used
1748 		 * for task management, and we ran out.
1749 		 *
1750 		 * Isilon: don't worry about this for now, since we have
1751 		 * more credits than disks in an enclosure, and limit
1752 		 * ourselves to one TM per target for recovery.
1753 		 */
1754 		mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1755 		    "timedout cm %p failed to allocate a tm\n", cm);
1756 	}
1757 }
1758 
1759 /**
1760  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1761  *			     to SCSI Unmap.
1762  * Return 0 - for success,
1763  *	  1 - to immediately return back the command with success status to CAM
1764  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1765  *			   to FW without any translation.
1766  */
1767 static int
1768 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1769     union ccb *ccb, struct mprsas_target *targ)
1770 {
1771 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1772 	struct ccb_scsiio *csio;
1773 	struct unmap_parm_list *plist;
1774 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1775 	struct nvme_command *c;
1776 	int i, res;
1777 	uint16_t ndesc, list_len, data_length;
1778 	struct mpr_prp_page *prp_page_info;
1779 	uint64_t nvme_dsm_ranges_dma_handle;
1780 
1781 	csio = &ccb->csio;
1782 #if __FreeBSD_version >= 1100103
1783 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1784 #else
1785 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1786 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1787 		    ccb->csio.cdb_io.cdb_ptr[8]);
1788 	} else {
1789 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1790 		    ccb->csio.cdb_io.cdb_bytes[8]);
1791 	}
1792 #endif
1793 	if (!list_len) {
1794 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1795 		return -EINVAL;
1796 	}
1797 
1798 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1799 	if (!plist) {
1800 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1801 		    "save UNMAP data\n");
1802 		return -ENOMEM;
1803 	}
1804 
1805 	/* Copy SCSI unmap data to a local buffer */
1806 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1807 
1808 	/* return back the unmap command to CAM with success status,
1809 	 * if number of descripts is zero.
1810 	 */
1811 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1812 	if (!ndesc) {
1813 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1814 		    "UNMAP cmd is Zero\n");
1815 		res = 1;
1816 		goto out;
1817 	}
1818 
1819 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1820 	if (data_length > targ->MDTS) {
1821 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1822 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1823 		res = -EINVAL;
1824 		goto out;
1825 	}
1826 
1827 	prp_page_info = mpr_alloc_prp_page(sc);
1828 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1829 	    "UNMAP command.\n", __func__));
1830 
1831 	/*
1832 	 * Insert the allocated PRP page into the command's PRP page list. This
1833 	 * will be freed when the command is freed.
1834 	 */
1835 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1836 
1837 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1838 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1839 
1840 	bzero(nvme_dsm_ranges, data_length);
1841 
1842 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1843 	 * for each descriptors contained in SCSI UNMAP data.
1844 	 */
1845 	for (i = 0; i < ndesc; i++) {
1846 		nvme_dsm_ranges[i].length =
1847 		    htole32(be32toh(plist->desc[i].nlb));
1848 		nvme_dsm_ranges[i].starting_lba =
1849 		    htole64(be64toh(plist->desc[i].slba));
1850 		nvme_dsm_ranges[i].attributes = 0;
1851 	}
1852 
1853 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1854 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1855 	bzero(req, sizeof(*req));
1856 	req->DevHandle = htole16(targ->handle);
1857 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1858 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1859 	req->ErrorResponseBaseAddress.High =
1860 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1861 	req->ErrorResponseBaseAddress.Low =
1862 	    htole32(cm->cm_sense_busaddr);
1863 	req->ErrorResponseAllocationLength =
1864 	    htole16(sizeof(struct nvme_completion));
1865 	req->EncapsulatedCommandLength =
1866 	    htole16(sizeof(struct nvme_command));
1867 	req->DataLength = htole32(data_length);
1868 
1869 	/* Build NVMe DSM command */
1870 	c = (struct nvme_command *) req->NVMe_Command;
1871 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1872 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1873 	c->cdw10 = htole32(ndesc - 1);
1874 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1875 
1876 	cm->cm_length = data_length;
1877 	cm->cm_data = NULL;
1878 
1879 	cm->cm_complete = mprsas_scsiio_complete;
1880 	cm->cm_complete_data = ccb;
1881 	cm->cm_targ = targ;
1882 	cm->cm_lun = csio->ccb_h.target_lun;
1883 	cm->cm_ccb = ccb;
1884 
1885 	cm->cm_desc.Default.RequestFlags =
1886 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1887 
1888 	csio->ccb_h.qos.sim_data = sbinuptime();
1889 #if __FreeBSD_version >= 1000029
1890 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1891 	    mprsas_scsiio_timeout, cm, 0);
1892 #else //__FreeBSD_version < 1000029
1893 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1894 	    mprsas_scsiio_timeout, cm);
1895 #endif //__FreeBSD_version >= 1000029
1896 
1897 	targ->issued++;
1898 	targ->outstanding++;
1899 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1900 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1901 
1902 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1903 	    __func__, cm, ccb, targ->outstanding);
1904 
1905 	mpr_build_nvme_prp(sc, cm, req,
1906 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1907 	mpr_map_command(sc, cm);
1908 
1909 out:
1910 	free(plist, M_MPR);
1911 	return 0;
1912 }
1913 
1914 static void
1915 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1916 {
1917 	MPI2_SCSI_IO_REQUEST *req;
1918 	struct ccb_scsiio *csio;
1919 	struct mpr_softc *sc;
1920 	struct mprsas_target *targ;
1921 	struct mprsas_lun *lun;
1922 	struct mpr_command *cm;
1923 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1924 	uint16_t eedp_flags;
1925 	uint32_t mpi_control;
1926 	int rc;
1927 
1928 	sc = sassc->sc;
1929 	MPR_FUNCTRACE(sc);
1930 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1931 
1932 	csio = &ccb->csio;
1933 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1934 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1935 	     csio->ccb_h.target_id));
1936 	targ = &sassc->targets[csio->ccb_h.target_id];
1937 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1938 	if (targ->handle == 0x0) {
1939 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1940 		    __func__, csio->ccb_h.target_id);
1941 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1942 		xpt_done(ccb);
1943 		return;
1944 	}
1945 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1946 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1947 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1948 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1949 		xpt_done(ccb);
1950 		return;
1951 	}
1952 	/*
1953 	 * Sometimes, it is possible to get a command that is not "In
1954 	 * Progress" and was actually aborted by the upper layer.  Check for
1955 	 * this here and complete the command without error.
1956 	 */
1957 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1958 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1959 		    "target %u\n", __func__, csio->ccb_h.target_id);
1960 		xpt_done(ccb);
1961 		return;
1962 	}
1963 	/*
1964 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1965 	 * that the volume has timed out.  We want volumes to be enumerated
1966 	 * until they are deleted/removed, not just failed.
1967 	 */
1968 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1969 		if (targ->devinfo == 0)
1970 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1971 		else
1972 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1973 		xpt_done(ccb);
1974 		return;
1975 	}
1976 
1977 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1978 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1979 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1980 		xpt_done(ccb);
1981 		return;
1982 	}
1983 
1984 	/*
1985 	 * If target has a reset in progress, freeze the devq and return.  The
1986 	 * devq will be released when the TM reset is finished.
1987 	 */
1988 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1989 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1990 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1991 		    __func__, targ->tid);
1992 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1993 		xpt_done(ccb);
1994 		return;
1995 	}
1996 
1997 	cm = mpr_alloc_command(sc);
1998 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1999 		if (cm != NULL) {
2000 			mpr_free_command(sc, cm);
2001 		}
2002 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2003 			xpt_freeze_simq(sassc->sim, 1);
2004 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2005 		}
2006 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2007 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2008 		xpt_done(ccb);
2009 		return;
2010 	}
2011 
2012 	/* For NVME device's issue UNMAP command directly to NVME drives by
2013 	 * constructing equivalent native NVMe DataSetManagement command.
2014 	 */
2015 #if __FreeBSD_version >= 1100103
2016 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
2017 #else
2018 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2019 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
2020 	else
2021 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
2022 #endif
2023 	if (scsi_opcode == UNMAP &&
2024 	    targ->is_nvme &&
2025 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2026 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2027 		if (rc == 1) { /* return command to CAM with success status */
2028 			mpr_free_command(sc, cm);
2029 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2030 			xpt_done(ccb);
2031 			return;
2032 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
2033 			return;
2034 	}
2035 
2036 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2037 	bzero(req, sizeof(*req));
2038 	req->DevHandle = htole16(targ->handle);
2039 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2040 	req->MsgFlags = 0;
2041 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2042 	req->SenseBufferLength = MPR_SENSE_LEN;
2043 	req->SGLFlags = 0;
2044 	req->ChainOffset = 0;
2045 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2046 	req->SGLOffset1= 0;
2047 	req->SGLOffset2= 0;
2048 	req->SGLOffset3= 0;
2049 	req->SkipCount = 0;
2050 	req->DataLength = htole32(csio->dxfer_len);
2051 	req->BidirectionalDataLength = 0;
2052 	req->IoFlags = htole16(csio->cdb_len);
2053 	req->EEDPFlags = 0;
2054 
2055 	/* Note: BiDirectional transfers are not supported */
2056 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2057 	case CAM_DIR_IN:
2058 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2059 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2060 		break;
2061 	case CAM_DIR_OUT:
2062 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2063 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2064 		break;
2065 	case CAM_DIR_NONE:
2066 	default:
2067 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2068 		break;
2069 	}
2070 
2071 	if (csio->cdb_len == 32)
2072 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2073 	/*
2074 	 * It looks like the hardware doesn't require an explicit tag
2075 	 * number for each transaction.  SAM Task Management not supported
2076 	 * at the moment.
2077 	 */
2078 	switch (csio->tag_action) {
2079 	case MSG_HEAD_OF_Q_TAG:
2080 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2081 		break;
2082 	case MSG_ORDERED_Q_TAG:
2083 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2084 		break;
2085 	case MSG_ACA_TASK:
2086 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2087 		break;
2088 	case CAM_TAG_ACTION_NONE:
2089 	case MSG_SIMPLE_Q_TAG:
2090 	default:
2091 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2092 		break;
2093 	}
2094 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2095 	req->Control = htole32(mpi_control);
2096 
2097 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2098 		mpr_free_command(sc, cm);
2099 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2100 		xpt_done(ccb);
2101 		return;
2102 	}
2103 
2104 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2105 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2106 	else {
2107 		KASSERT(csio->cdb_len <= IOCDBLEN,
2108 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2109 		    "is not set", csio->cdb_len));
2110 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2111 	}
2112 	req->IoFlags = htole16(csio->cdb_len);
2113 
2114 	/*
2115 	 * Check if EEDP is supported and enabled.  If it is then check if the
2116 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2117 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2118 	 * for EEDP transfer.
2119 	 */
2120 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2121 	if (sc->eedp_enabled && eedp_flags) {
2122 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2123 			if (lun->lun_id == csio->ccb_h.target_lun) {
2124 				break;
2125 			}
2126 		}
2127 
2128 		if ((lun != NULL) && (lun->eedp_formatted)) {
2129 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2130 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2131 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2132 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2133 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2134 				eedp_flags |=
2135 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2136 			}
2137 			req->EEDPFlags = htole16(eedp_flags);
2138 
2139 			/*
2140 			 * If CDB less than 32, fill in Primary Ref Tag with
2141 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2142 			 * already there.  Also, set protection bit.  FreeBSD
2143 			 * currently does not support CDBs bigger than 16, but
2144 			 * the code doesn't hurt, and will be here for the
2145 			 * future.
2146 			 */
2147 			if (csio->cdb_len != 32) {
2148 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2149 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2150 				    PrimaryReferenceTag;
2151 				for (i = 0; i < 4; i++) {
2152 					*ref_tag_addr =
2153 					    req->CDB.CDB32[lba_byte + i];
2154 					ref_tag_addr++;
2155 				}
2156 				req->CDB.EEDP32.PrimaryReferenceTag =
2157 				    htole32(req->
2158 				    CDB.EEDP32.PrimaryReferenceTag);
2159 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2160 				    0xFFFF;
2161 				req->CDB.CDB32[1] =
2162 				    (req->CDB.CDB32[1] & 0x1F) | 0x20;
2163 			} else {
2164 				eedp_flags |=
2165 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2166 				req->EEDPFlags = htole16(eedp_flags);
2167 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2168 				    0x1F) | 0x20;
2169 			}
2170 		}
2171 	}
2172 
2173 	cm->cm_length = csio->dxfer_len;
2174 	if (cm->cm_length != 0) {
2175 		cm->cm_data = ccb;
2176 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2177 	} else {
2178 		cm->cm_data = NULL;
2179 	}
2180 	cm->cm_sge = &req->SGL;
2181 	cm->cm_sglsize = (32 - 24) * 4;
2182 	cm->cm_complete = mprsas_scsiio_complete;
2183 	cm->cm_complete_data = ccb;
2184 	cm->cm_targ = targ;
2185 	cm->cm_lun = csio->ccb_h.target_lun;
2186 	cm->cm_ccb = ccb;
2187 	/*
2188 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2189 	 * and set descriptor type.
2190 	 */
2191 	if (targ->scsi_req_desc_type ==
2192 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2193 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2194 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2195 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2196 		if (!sc->atomic_desc_capable) {
2197 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2198 			    htole16(targ->handle);
2199 		}
2200 	} else {
2201 		cm->cm_desc.SCSIIO.RequestFlags =
2202 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2203 		if (!sc->atomic_desc_capable)
2204 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2205 	}
2206 
2207 	csio->ccb_h.qos.sim_data = sbinuptime();
2208 #if __FreeBSD_version >= 1000029
2209 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2210 	    mprsas_scsiio_timeout, cm, 0);
2211 #else //__FreeBSD_version < 1000029
2212 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2213 	    mprsas_scsiio_timeout, cm);
2214 #endif //__FreeBSD_version >= 1000029
2215 
2216 	targ->issued++;
2217 	targ->outstanding++;
2218 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2219 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2220 
2221 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2222 	    __func__, cm, ccb, targ->outstanding);
2223 
2224 	mpr_map_command(sc, cm);
2225 	return;
2226 }
2227 
2228 /**
2229  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2230  */
2231 static void
2232 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2233     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2234 {
2235 	u32 response_info;
2236 	u8 *response_bytes;
2237 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2238 	    MPI2_IOCSTATUS_MASK;
2239 	u8 scsi_state = mpi_reply->SCSIState;
2240 	u8 scsi_status = mpi_reply->SCSIStatus;
2241 	char *desc_ioc_state = NULL;
2242 	char *desc_scsi_status = NULL;
2243 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2244 
2245 	if (log_info == 0x31170000)
2246 		return;
2247 
2248 	desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2249 	     ioc_status);
2250 	desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2251 	    scsi_status);
2252 
2253 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2254 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2255 	if (targ->encl_level_valid) {
2256 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2257 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2258 		    targ->connector_name);
2259 	}
2260 
2261 	/*
2262 	 * We can add more detail about underflow data here
2263 	 * TO-DO
2264 	 */
2265 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2266 	    "scsi_state %b\n", desc_scsi_status, scsi_status,
2267 	    scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2268 	    "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2269 
2270 	if (sc->mpr_debug & MPR_XINFO &&
2271 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2272 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2273 		scsi_sense_print(csio);
2274 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2275 	}
2276 
2277 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2278 		response_info = le32toh(mpi_reply->ResponseInfo);
2279 		response_bytes = (u8 *)&response_info;
2280 		mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2281 		    response_bytes[0],
2282 		    mpr_describe_table(mpr_scsi_taskmgmt_string,
2283 		    response_bytes[0]));
2284 	}
2285 }
2286 
2287 /** mprsas_nvme_trans_status_code
2288  *
2289  * Convert Native NVMe command error status to
2290  * equivalent SCSI error status.
2291  *
2292  * Returns appropriate scsi_status
2293  */
2294 static u8
2295 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2296     struct mpr_command *cm)
2297 {
2298 	u8 status = MPI2_SCSI_STATUS_GOOD;
2299 	int skey, asc, ascq;
2300 	union ccb *ccb = cm->cm_complete_data;
2301 	int returned_sense_len;
2302 	uint8_t sct, sc;
2303 
2304 	sct = NVME_STATUS_GET_SCT(nvme_status);
2305 	sc = NVME_STATUS_GET_SC(nvme_status);
2306 
2307 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2308 	skey = SSD_KEY_ILLEGAL_REQUEST;
2309 	asc = SCSI_ASC_NO_SENSE;
2310 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2311 
2312 	switch (sct) {
2313 	case NVME_SCT_GENERIC:
2314 		switch (sc) {
2315 		case NVME_SC_SUCCESS:
2316 			status = MPI2_SCSI_STATUS_GOOD;
2317 			skey = SSD_KEY_NO_SENSE;
2318 			asc = SCSI_ASC_NO_SENSE;
2319 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2320 			break;
2321 		case NVME_SC_INVALID_OPCODE:
2322 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2323 			skey = SSD_KEY_ILLEGAL_REQUEST;
2324 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2325 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2326 			break;
2327 		case NVME_SC_INVALID_FIELD:
2328 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2329 			skey = SSD_KEY_ILLEGAL_REQUEST;
2330 			asc = SCSI_ASC_INVALID_CDB;
2331 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2332 			break;
2333 		case NVME_SC_DATA_TRANSFER_ERROR:
2334 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2335 			skey = SSD_KEY_MEDIUM_ERROR;
2336 			asc = SCSI_ASC_NO_SENSE;
2337 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2338 			break;
2339 		case NVME_SC_ABORTED_POWER_LOSS:
2340 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2341 			skey = SSD_KEY_ABORTED_COMMAND;
2342 			asc = SCSI_ASC_WARNING;
2343 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2344 			break;
2345 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2346 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2347 			skey = SSD_KEY_HARDWARE_ERROR;
2348 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2349 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2350 			break;
2351 		case NVME_SC_ABORTED_BY_REQUEST:
2352 		case NVME_SC_ABORTED_SQ_DELETION:
2353 		case NVME_SC_ABORTED_FAILED_FUSED:
2354 		case NVME_SC_ABORTED_MISSING_FUSED:
2355 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2356 			skey = SSD_KEY_ABORTED_COMMAND;
2357 			asc = SCSI_ASC_NO_SENSE;
2358 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2359 			break;
2360 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2361 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2362 			skey = SSD_KEY_ILLEGAL_REQUEST;
2363 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2364 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2365 			break;
2366 		case NVME_SC_LBA_OUT_OF_RANGE:
2367 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2368 			skey = SSD_KEY_ILLEGAL_REQUEST;
2369 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2370 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2371 			break;
2372 		case NVME_SC_CAPACITY_EXCEEDED:
2373 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2374 			skey = SSD_KEY_MEDIUM_ERROR;
2375 			asc = SCSI_ASC_NO_SENSE;
2376 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2377 			break;
2378 		case NVME_SC_NAMESPACE_NOT_READY:
2379 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2380 			skey = SSD_KEY_NOT_READY;
2381 			asc = SCSI_ASC_LUN_NOT_READY;
2382 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2383 			break;
2384 		}
2385 		break;
2386 	case NVME_SCT_COMMAND_SPECIFIC:
2387 		switch (sc) {
2388 		case NVME_SC_INVALID_FORMAT:
2389 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2390 			skey = SSD_KEY_ILLEGAL_REQUEST;
2391 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2392 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2393 			break;
2394 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2395 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2396 			skey = SSD_KEY_ILLEGAL_REQUEST;
2397 			asc = SCSI_ASC_INVALID_CDB;
2398 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2399 			break;
2400 		}
2401 		break;
2402 	case NVME_SCT_MEDIA_ERROR:
2403 		switch (sc) {
2404 		case NVME_SC_WRITE_FAULTS:
2405 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2406 			skey = SSD_KEY_MEDIUM_ERROR;
2407 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2408 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2409 			break;
2410 		case NVME_SC_UNRECOVERED_READ_ERROR:
2411 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2412 			skey = SSD_KEY_MEDIUM_ERROR;
2413 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2414 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2415 			break;
2416 		case NVME_SC_GUARD_CHECK_ERROR:
2417 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2418 			skey = SSD_KEY_MEDIUM_ERROR;
2419 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2420 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2421 			break;
2422 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2423 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2424 			skey = SSD_KEY_MEDIUM_ERROR;
2425 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2426 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2427 			break;
2428 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2429 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2430 			skey = SSD_KEY_MEDIUM_ERROR;
2431 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2432 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2433 			break;
2434 		case NVME_SC_COMPARE_FAILURE:
2435 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2436 			skey = SSD_KEY_MISCOMPARE;
2437 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2438 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2439 			break;
2440 		case NVME_SC_ACCESS_DENIED:
2441 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2442 			skey = SSD_KEY_ILLEGAL_REQUEST;
2443 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2444 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2445 			break;
2446 		}
2447 		break;
2448 	}
2449 
2450 	returned_sense_len = sizeof(struct scsi_sense_data);
2451 	if (returned_sense_len < ccb->csio.sense_len)
2452 		ccb->csio.sense_resid = ccb->csio.sense_len -
2453 		    returned_sense_len;
2454 	else
2455 		ccb->csio.sense_resid = 0;
2456 
2457 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2458 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2459 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2460 
2461 	return status;
2462 }
2463 
2464 /** mprsas_complete_nvme_unmap
2465  *
2466  * Complete native NVMe command issued using NVMe Encapsulated
2467  * Request Message.
2468  */
2469 static u8
2470 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2471 {
2472 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2473 	struct nvme_completion *nvme_completion = NULL;
2474 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2475 
2476 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2477 	if (le16toh(mpi_reply->ErrorResponseCount)){
2478 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2479 		scsi_status = mprsas_nvme_trans_status_code(
2480 		    nvme_completion->status, cm);
2481 	}
2482 	return scsi_status;
2483 }
2484 
2485 static void
2486 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2487 {
2488 	MPI2_SCSI_IO_REPLY *rep;
2489 	union ccb *ccb;
2490 	struct ccb_scsiio *csio;
2491 	struct mprsas_softc *sassc;
2492 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2493 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2494 	int dir = 0, i;
2495 	u16 alloc_len;
2496 	struct mprsas_target *target;
2497 	target_id_t target_id;
2498 
2499 	MPR_FUNCTRACE(sc);
2500 	mpr_dprint(sc, MPR_TRACE,
2501 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2502 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2503 	    cm->cm_targ->outstanding);
2504 
2505 	callout_stop(&cm->cm_callout);
2506 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2507 
2508 	sassc = sc->sassc;
2509 	ccb = cm->cm_complete_data;
2510 	csio = &ccb->csio;
2511 	target_id = csio->ccb_h.target_id;
2512 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2513 	/*
2514 	 * XXX KDM if the chain allocation fails, does it matter if we do
2515 	 * the sync and unload here?  It is simpler to do it in every case,
2516 	 * assuming it doesn't cause problems.
2517 	 */
2518 	if (cm->cm_data != NULL) {
2519 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2520 			dir = BUS_DMASYNC_POSTREAD;
2521 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2522 			dir = BUS_DMASYNC_POSTWRITE;
2523 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2524 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2525 	}
2526 
2527 	cm->cm_targ->completed++;
2528 	cm->cm_targ->outstanding--;
2529 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2530 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2531 
2532 	if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2533 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2534 		KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2535 		    ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2536 		cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2537 		if (cm->cm_reply != NULL)
2538 			mprsas_log_command(cm, MPR_RECOVERY,
2539 			    "completed timedout cm %p ccb %p during recovery "
2540 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2541 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2542 			    rep->SCSIState, le32toh(rep->TransferCount));
2543 		else
2544 			mprsas_log_command(cm, MPR_RECOVERY,
2545 			    "completed timedout cm %p ccb %p during recovery\n",
2546 			    cm, cm->cm_ccb);
2547 	} else if (cm->cm_targ->tm != NULL) {
2548 		if (cm->cm_reply != NULL)
2549 			mprsas_log_command(cm, MPR_RECOVERY,
2550 			    "completed cm %p ccb %p during recovery "
2551 			    "ioc %x scsi %x state %x xfer %u\n",
2552 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2553 			    rep->SCSIStatus, rep->SCSIState,
2554 			    le32toh(rep->TransferCount));
2555 		else
2556 			mprsas_log_command(cm, MPR_RECOVERY,
2557 			    "completed cm %p ccb %p during recovery\n",
2558 			    cm, cm->cm_ccb);
2559 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2560 		mprsas_log_command(cm, MPR_RECOVERY,
2561 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2562 	}
2563 
2564 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2565 		/*
2566 		 * We ran into an error after we tried to map the command,
2567 		 * so we're getting a callback without queueing the command
2568 		 * to the hardware.  So we set the status here, and it will
2569 		 * be retained below.  We'll go through the "fast path",
2570 		 * because there can be no reply when we haven't actually
2571 		 * gone out to the hardware.
2572 		 */
2573 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2574 
2575 		/*
2576 		 * Currently the only error included in the mask is
2577 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2578 		 * chain frames.  We need to freeze the queue until we get
2579 		 * a command that completed without this error, which will
2580 		 * hopefully have some chain frames attached that we can
2581 		 * use.  If we wanted to get smarter about it, we would
2582 		 * only unfreeze the queue in this condition when we're
2583 		 * sure that we're getting some chain frames back.  That's
2584 		 * probably unnecessary.
2585 		 */
2586 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2587 			xpt_freeze_simq(sassc->sim, 1);
2588 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2589 			mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2590 			    "freezing SIM queue\n");
2591 		}
2592 	}
2593 
2594 	/*
2595 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2596 	 * flag, and use it in a few places in the rest of this function for
2597 	 * convenience. Use the macro if available.
2598 	 */
2599 #if __FreeBSD_version >= 1100103
2600 	scsi_cdb = scsiio_cdb_ptr(csio);
2601 #else
2602 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2603 		scsi_cdb = csio->cdb_io.cdb_ptr;
2604 	else
2605 		scsi_cdb = csio->cdb_io.cdb_bytes;
2606 #endif
2607 
2608 	/*
2609 	 * If this is a Start Stop Unit command and it was issued by the driver
2610 	 * during shutdown, decrement the refcount to account for all of the
2611 	 * commands that were sent.  All SSU commands should be completed before
2612 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2613 	 * is TRUE.
2614 	 */
2615 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2616 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2617 		sc->SSU_refcount--;
2618 	}
2619 
2620 	/* Take the fast path to completion */
2621 	if (cm->cm_reply == NULL) {
2622 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2623 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2624 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2625 			else {
2626 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2627 				csio->scsi_status = SCSI_STATUS_OK;
2628 			}
2629 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2630 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2631 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2632 				mpr_dprint(sc, MPR_XINFO,
2633 				    "Unfreezing SIM queue\n");
2634 			}
2635 		}
2636 
2637 		/*
2638 		 * There are two scenarios where the status won't be
2639 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2640 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2641 		 */
2642 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2643 			/*
2644 			 * Freeze the dev queue so that commands are
2645 			 * executed in the correct order after error
2646 			 * recovery.
2647 			 */
2648 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2649 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2650 		}
2651 		mpr_free_command(sc, cm);
2652 		xpt_done(ccb);
2653 		return;
2654 	}
2655 
2656 	target = &sassc->targets[target_id];
2657 	if (scsi_cdb[0] == UNMAP &&
2658 	    target->is_nvme &&
2659 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2660 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2661 		csio->scsi_status = rep->SCSIStatus;
2662 	}
2663 
2664 	mprsas_log_command(cm, MPR_XINFO,
2665 	    "ioc %x scsi %x state %x xfer %u\n",
2666 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2667 	    le32toh(rep->TransferCount));
2668 
2669 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2670 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2671 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2672 		/* FALLTHROUGH */
2673 	case MPI2_IOCSTATUS_SUCCESS:
2674 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2675 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2676 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2677 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2678 
2679 		/* Completion failed at the transport level. */
2680 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2681 		    MPI2_SCSI_STATE_TERMINATED)) {
2682 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2683 			break;
2684 		}
2685 
2686 		/* In a modern packetized environment, an autosense failure
2687 		 * implies that there's not much else that can be done to
2688 		 * recover the command.
2689 		 */
2690 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2691 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2692 			break;
2693 		}
2694 
2695 		/*
2696 		 * CAM doesn't care about SAS Response Info data, but if this is
2697 		 * the state check if TLR should be done.  If not, clear the
2698 		 * TLR_bits for the target.
2699 		 */
2700 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2701 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2702 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2703 			sc->mapping_table[target_id].TLR_bits =
2704 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2705 		}
2706 
2707 		/*
2708 		 * Intentionally override the normal SCSI status reporting
2709 		 * for these two cases.  These are likely to happen in a
2710 		 * multi-initiator environment, and we want to make sure that
2711 		 * CAM retries these commands rather than fail them.
2712 		 */
2713 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2714 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2715 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2716 			break;
2717 		}
2718 
2719 		/* Handle normal status and sense */
2720 		csio->scsi_status = rep->SCSIStatus;
2721 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2722 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2723 		else
2724 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2725 
2726 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2727 			int sense_len, returned_sense_len;
2728 
2729 			returned_sense_len = min(le32toh(rep->SenseCount),
2730 			    sizeof(struct scsi_sense_data));
2731 			if (returned_sense_len < csio->sense_len)
2732 				csio->sense_resid = csio->sense_len -
2733 				    returned_sense_len;
2734 			else
2735 				csio->sense_resid = 0;
2736 
2737 			sense_len = min(returned_sense_len,
2738 			    csio->sense_len - csio->sense_resid);
2739 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2740 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2741 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2742 		}
2743 
2744 		/*
2745 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2746 		 * and it's page code 0 (Supported Page List), and there is
2747 		 * inquiry data, and this is for a sequential access device, and
2748 		 * the device is an SSP target, and TLR is supported by the
2749 		 * controller, turn the TLR_bits value ON if page 0x90 is
2750 		 * supported.
2751 		 */
2752 		if ((scsi_cdb[0] == INQUIRY) &&
2753 		    (scsi_cdb[1] & SI_EVPD) &&
2754 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2755 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2756 		    (csio->data_ptr != NULL) &&
2757 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2758 		    (sc->control_TLR) &&
2759 		    (sc->mapping_table[target_id].device_info &
2760 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2761 			vpd_list = (struct scsi_vpd_supported_page_list *)
2762 			    csio->data_ptr;
2763 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2764 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2765 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2766 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2767 			alloc_len -= csio->resid;
2768 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2769 				if (vpd_list->list[i] == 0x90) {
2770 					*TLR_bits = TLR_on;
2771 					break;
2772 				}
2773 			}
2774 		}
2775 
2776 		/*
2777 		 * If this is a SATA direct-access end device, mark it so that
2778 		 * a SCSI StartStopUnit command will be sent to it when the
2779 		 * driver is being shutdown.
2780 		 */
2781 		if ((scsi_cdb[0] == INQUIRY) &&
2782 		    (csio->data_ptr != NULL) &&
2783 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2784 		    (sc->mapping_table[target_id].device_info &
2785 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2786 		    ((sc->mapping_table[target_id].device_info &
2787 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2788 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2789 			target = &sassc->targets[target_id];
2790 			target->supports_SSU = TRUE;
2791 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2792 			    target_id);
2793 		}
2794 		break;
2795 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2796 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2797 		/*
2798 		 * If devinfo is 0 this will be a volume.  In that case don't
2799 		 * tell CAM that the volume is not there.  We want volumes to
2800 		 * be enumerated until they are deleted/removed, not just
2801 		 * failed.
2802 		 */
2803 		if (cm->cm_targ->devinfo == 0)
2804 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2805 		else
2806 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2807 		break;
2808 	case MPI2_IOCSTATUS_INVALID_SGL:
2809 		mpr_print_scsiio_cmd(sc, cm);
2810 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2811 		break;
2812 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2813 		/*
2814 		 * This is one of the responses that comes back when an I/O
2815 		 * has been aborted.  If it is because of a timeout that we
2816 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2817 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2818 		 * command is the same (it gets retried, subject to the
2819 		 * retry counter), the only difference is what gets printed
2820 		 * on the console.
2821 		 */
2822 		if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2823 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2824 		else
2825 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2826 		break;
2827 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2828 		/* resid is ignored for this condition */
2829 		csio->resid = 0;
2830 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2831 		break;
2832 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2833 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2834 		/*
2835 		 * These can sometimes be transient transport-related
2836 		 * errors, and sometimes persistent drive-related errors.
2837 		 * We used to retry these without decrementing the retry
2838 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2839 		 * we hit a persistent drive problem that returns one of
2840 		 * these error codes, we would retry indefinitely.  So,
2841 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2842 		 * count and avoid infinite retries.  We're taking the
2843 		 * potential risk of flagging false failures in the event
2844 		 * of a topology-related error (e.g. a SAS expander problem
2845 		 * causes a command addressed to a drive to fail), but
2846 		 * avoiding getting into an infinite retry loop.
2847 		 */
2848 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2849 		mpr_dprint(sc, MPR_INFO,
2850 		    "Controller reported %s tgt %u SMID %u loginfo %x\n",
2851 		    mpr_describe_table(mpr_iocstatus_string,
2852 		    le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2853 		    target_id, cm->cm_desc.Default.SMID,
2854 		    le32toh(rep->IOCLogInfo));
2855 		mpr_dprint(sc, MPR_XINFO,
2856 		    "SCSIStatus %x SCSIState %x xfercount %u\n",
2857 		    rep->SCSIStatus, rep->SCSIState,
2858 		    le32toh(rep->TransferCount));
2859 		break;
2860 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2861 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2862 	case MPI2_IOCSTATUS_INVALID_VPID:
2863 	case MPI2_IOCSTATUS_INVALID_FIELD:
2864 	case MPI2_IOCSTATUS_INVALID_STATE:
2865 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2866 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2867 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2868 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2869 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2870 	default:
2871 		mprsas_log_command(cm, MPR_XINFO,
2872 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2873 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2874 		    rep->SCSIStatus, rep->SCSIState,
2875 		    le32toh(rep->TransferCount));
2876 		csio->resid = cm->cm_length;
2877 
2878 		if (scsi_cdb[0] == UNMAP &&
2879 		    target->is_nvme &&
2880 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2881 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2882 		else
2883 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2884 
2885 		break;
2886 	}
2887 
2888 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2889 
2890 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2891 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2892 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2893 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2894 		    "queue\n");
2895 	}
2896 
2897 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2898 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2899 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2900 	}
2901 
2902 	mpr_free_command(sc, cm);
2903 	xpt_done(ccb);
2904 }
2905 
2906 #if __FreeBSD_version >= 900026
2907 static void
2908 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2909 {
2910 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2911 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2912 	uint64_t sasaddr;
2913 	union ccb *ccb;
2914 
2915 	ccb = cm->cm_complete_data;
2916 
2917 	/*
2918 	 * Currently there should be no way we can hit this case.  It only
2919 	 * happens when we have a failure to allocate chain frames, and SMP
2920 	 * commands require two S/G elements only.  That should be handled
2921 	 * in the standard request size.
2922 	 */
2923 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2924 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2925 		    "request!\n", __func__, cm->cm_flags);
2926 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2927 		goto bailout;
2928         }
2929 
2930 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2931 	if (rpl == NULL) {
2932 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2933 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2934 		goto bailout;
2935 	}
2936 
2937 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2938 	sasaddr = le32toh(req->SASAddress.Low);
2939 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2940 
2941 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2942 	    MPI2_IOCSTATUS_SUCCESS ||
2943 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2944 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2945 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2946 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2947 		goto bailout;
2948 	}
2949 
2950 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2951 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
2952 
2953 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2954 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2955 	else
2956 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2957 
2958 bailout:
2959 	/*
2960 	 * We sync in both directions because we had DMAs in the S/G list
2961 	 * in both directions.
2962 	 */
2963 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2964 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2965 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2966 	mpr_free_command(sc, cm);
2967 	xpt_done(ccb);
2968 }
2969 
2970 static void
2971 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2972 {
2973 	struct mpr_command *cm;
2974 	uint8_t *request, *response;
2975 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2976 	struct mpr_softc *sc;
2977 	struct sglist *sg;
2978 	int error;
2979 
2980 	sc = sassc->sc;
2981 	sg = NULL;
2982 	error = 0;
2983 
2984 #if (__FreeBSD_version >= 1000028) || \
2985     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2986 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2987 	case CAM_DATA_PADDR:
2988 	case CAM_DATA_SG_PADDR:
2989 		/*
2990 		 * XXX We don't yet support physical addresses here.
2991 		 */
2992 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2993 		    "supported\n", __func__);
2994 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2995 		xpt_done(ccb);
2996 		return;
2997 	case CAM_DATA_SG:
2998 		/*
2999 		 * The chip does not support more than one buffer for the
3000 		 * request or response.
3001 		 */
3002 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3003 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3004 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3005 			    "response buffer segments not supported for SMP\n",
3006 			    __func__);
3007 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3008 			xpt_done(ccb);
3009 			return;
3010 		}
3011 
3012 		/*
3013 		 * The CAM_SCATTER_VALID flag was originally implemented
3014 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3015 		 * We have two.  So, just take that flag to mean that we
3016 		 * might have S/G lists, and look at the S/G segment count
3017 		 * to figure out whether that is the case for each individual
3018 		 * buffer.
3019 		 */
3020 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3021 			bus_dma_segment_t *req_sg;
3022 
3023 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3024 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3025 		} else
3026 			request = ccb->smpio.smp_request;
3027 
3028 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3029 			bus_dma_segment_t *rsp_sg;
3030 
3031 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3032 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3033 		} else
3034 			response = ccb->smpio.smp_response;
3035 		break;
3036 	case CAM_DATA_VADDR:
3037 		request = ccb->smpio.smp_request;
3038 		response = ccb->smpio.smp_response;
3039 		break;
3040 	default:
3041 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3042 		xpt_done(ccb);
3043 		return;
3044 	}
3045 #else /* __FreeBSD_version < 1000028 */
3046 	/*
3047 	 * XXX We don't yet support physical addresses here.
3048 	 */
3049 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3050 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3051 		    "supported\n", __func__);
3052 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3053 		xpt_done(ccb);
3054 		return;
3055 	}
3056 
3057 	/*
3058 	 * If the user wants to send an S/G list, check to make sure they
3059 	 * have single buffers.
3060 	 */
3061 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3062 		/*
3063 		 * The chip does not support more than one buffer for the
3064 		 * request or response.
3065 		 */
3066 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3067 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3068 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3069 			    "response buffer segments not supported for SMP\n",
3070 			    __func__);
3071 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3072 			xpt_done(ccb);
3073 			return;
3074 		}
3075 
3076 		/*
3077 		 * The CAM_SCATTER_VALID flag was originally implemented
3078 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3079 		 * We have two.  So, just take that flag to mean that we
3080 		 * might have S/G lists, and look at the S/G segment count
3081 		 * to figure out whether that is the case for each individual
3082 		 * buffer.
3083 		 */
3084 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3085 			bus_dma_segment_t *req_sg;
3086 
3087 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3088 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3089 		} else
3090 			request = ccb->smpio.smp_request;
3091 
3092 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3093 			bus_dma_segment_t *rsp_sg;
3094 
3095 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3096 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3097 		} else
3098 			response = ccb->smpio.smp_response;
3099 	} else {
3100 		request = ccb->smpio.smp_request;
3101 		response = ccb->smpio.smp_response;
3102 	}
3103 #endif /* __FreeBSD_version < 1000028 */
3104 
3105 	cm = mpr_alloc_command(sc);
3106 	if (cm == NULL) {
3107 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3108 		    __func__);
3109 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3110 		xpt_done(ccb);
3111 		return;
3112 	}
3113 
3114 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3115 	bzero(req, sizeof(*req));
3116 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3117 
3118 	/* Allow the chip to use any route to this SAS address. */
3119 	req->PhysicalPort = 0xff;
3120 
3121 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3122 	req->SGLFlags =
3123 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3124 
3125 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3126 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3127 
3128 	mpr_init_sge(cm, req, &req->SGL);
3129 
3130 	/*
3131 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3132 	 * do one map command, and one busdma call in there.
3133 	 */
3134 	cm->cm_uio.uio_iov = cm->cm_iovec;
3135 	cm->cm_uio.uio_iovcnt = 2;
3136 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3137 
3138 	/*
3139 	 * The read/write flag isn't used by busdma, but set it just in
3140 	 * case.  This isn't exactly accurate, either, since we're going in
3141 	 * both directions.
3142 	 */
3143 	cm->cm_uio.uio_rw = UIO_WRITE;
3144 
3145 	cm->cm_iovec[0].iov_base = request;
3146 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3147 	cm->cm_iovec[1].iov_base = response;
3148 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3149 
3150 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3151 			       cm->cm_iovec[1].iov_len;
3152 
3153 	/*
3154 	 * Trigger a warning message in mpr_data_cb() for the user if we
3155 	 * wind up exceeding two S/G segments.  The chip expects one
3156 	 * segment for the request and another for the response.
3157 	 */
3158 	cm->cm_max_segs = 2;
3159 
3160 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3161 	cm->cm_complete = mprsas_smpio_complete;
3162 	cm->cm_complete_data = ccb;
3163 
3164 	/*
3165 	 * Tell the mapping code that we're using a uio, and that this is
3166 	 * an SMP passthrough request.  There is a little special-case
3167 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3168 	 * transfer.
3169 	 */
3170 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3171 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3172 
3173 	/* The chip data format is little endian. */
3174 	req->SASAddress.High = htole32(sasaddr >> 32);
3175 	req->SASAddress.Low = htole32(sasaddr);
3176 
3177 	/*
3178 	 * XXX Note that we don't have a timeout/abort mechanism here.
3179 	 * From the manual, it looks like task management requests only
3180 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3181 	 * have a mechanism to retry requests in the event of a chip reset
3182 	 * at least.  Hopefully the chip will insure that any errors short
3183 	 * of that are relayed back to the driver.
3184 	 */
3185 	error = mpr_map_command(sc, cm);
3186 	if ((error != 0) && (error != EINPROGRESS)) {
3187 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3188 		    "mpr_map_command()\n", __func__, error);
3189 		goto bailout_error;
3190 	}
3191 
3192 	return;
3193 
3194 bailout_error:
3195 	mpr_free_command(sc, cm);
3196 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3197 	xpt_done(ccb);
3198 	return;
3199 }
3200 
3201 static void
3202 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3203 {
3204 	struct mpr_softc *sc;
3205 	struct mprsas_target *targ;
3206 	uint64_t sasaddr = 0;
3207 
3208 	sc = sassc->sc;
3209 
3210 	/*
3211 	 * Make sure the target exists.
3212 	 */
3213 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3214 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3215 	targ = &sassc->targets[ccb->ccb_h.target_id];
3216 	if (targ->handle == 0x0) {
3217 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3218 		    __func__, ccb->ccb_h.target_id);
3219 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3220 		xpt_done(ccb);
3221 		return;
3222 	}
3223 
3224 	/*
3225 	 * If this device has an embedded SMP target, we'll talk to it
3226 	 * directly.
3227 	 * figure out what the expander's address is.
3228 	 */
3229 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3230 		sasaddr = targ->sasaddr;
3231 
3232 	/*
3233 	 * If we don't have a SAS address for the expander yet, try
3234 	 * grabbing it from the page 0x83 information cached in the
3235 	 * transport layer for this target.  LSI expanders report the
3236 	 * expander SAS address as the port-associated SAS address in
3237 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3238 	 * 0x83.
3239 	 *
3240 	 * XXX KDM disable this for now, but leave it commented out so that
3241 	 * it is obvious that this is another possible way to get the SAS
3242 	 * address.
3243 	 *
3244 	 * The parent handle method below is a little more reliable, and
3245 	 * the other benefit is that it works for devices other than SES
3246 	 * devices.  So you can send a SMP request to a da(4) device and it
3247 	 * will get routed to the expander that device is attached to.
3248 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3249 	 */
3250 #if 0
3251 	if (sasaddr == 0)
3252 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3253 #endif
3254 
3255 	/*
3256 	 * If we still don't have a SAS address for the expander, look for
3257 	 * the parent device of this device, which is probably the expander.
3258 	 */
3259 	if (sasaddr == 0) {
3260 #ifdef OLD_MPR_PROBE
3261 		struct mprsas_target *parent_target;
3262 #endif
3263 
3264 		if (targ->parent_handle == 0x0) {
3265 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3266 			    "a valid parent handle!\n", __func__, targ->handle);
3267 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3268 			goto bailout;
3269 		}
3270 #ifdef OLD_MPR_PROBE
3271 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3272 		    targ->parent_handle);
3273 
3274 		if (parent_target == NULL) {
3275 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3276 			    "a valid parent target!\n", __func__, targ->handle);
3277 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3278 			goto bailout;
3279 		}
3280 
3281 		if ((parent_target->devinfo &
3282 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3283 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3284 			    "does not have an SMP target!\n", __func__,
3285 			    targ->handle, parent_target->handle);
3286 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3287 			goto bailout;
3288 		}
3289 
3290 		sasaddr = parent_target->sasaddr;
3291 #else /* OLD_MPR_PROBE */
3292 		if ((targ->parent_devinfo &
3293 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3294 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3295 			    "does not have an SMP target!\n", __func__,
3296 			    targ->handle, targ->parent_handle);
3297 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3298 			goto bailout;
3299 
3300 		}
3301 		if (targ->parent_sasaddr == 0x0) {
3302 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3303 			    "%d does not have a valid SAS address!\n", __func__,
3304 			    targ->handle, targ->parent_handle);
3305 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3306 			goto bailout;
3307 		}
3308 
3309 		sasaddr = targ->parent_sasaddr;
3310 #endif /* OLD_MPR_PROBE */
3311 
3312 	}
3313 
3314 	if (sasaddr == 0) {
3315 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3316 		    "handle %d\n", __func__, targ->handle);
3317 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3318 		goto bailout;
3319 	}
3320 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3321 
3322 	return;
3323 
3324 bailout:
3325 	xpt_done(ccb);
3326 
3327 }
3328 #endif //__FreeBSD_version >= 900026
3329 
3330 static void
3331 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3332 {
3333 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3334 	struct mpr_softc *sc;
3335 	struct mpr_command *tm;
3336 	struct mprsas_target *targ;
3337 
3338 	MPR_FUNCTRACE(sassc->sc);
3339 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3340 
3341 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3342 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3343 	sc = sassc->sc;
3344 	tm = mprsas_alloc_tm(sc);
3345 	if (tm == NULL) {
3346 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3347 		    "mprsas_action_resetdev\n");
3348 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3349 		xpt_done(ccb);
3350 		return;
3351 	}
3352 
3353 	targ = &sassc->targets[ccb->ccb_h.target_id];
3354 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3355 	req->DevHandle = htole16(targ->handle);
3356 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3357 
3358 	if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3359 		/* SAS Hard Link Reset / SATA Link Reset */
3360 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3361 	} else {
3362 		/* PCIe Protocol Level Reset*/
3363 		req->MsgFlags =
3364 		    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3365 	}
3366 
3367 	tm->cm_data = NULL;
3368 	tm->cm_complete = mprsas_resetdev_complete;
3369 	tm->cm_complete_data = ccb;
3370 
3371 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3372 	    __func__, targ->tid);
3373 	tm->cm_targ = targ;
3374 
3375 	mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3376 	mpr_map_command(sc, tm);
3377 }
3378 
3379 static void
3380 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3381 {
3382 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3383 	union ccb *ccb;
3384 
3385 	MPR_FUNCTRACE(sc);
3386 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3387 
3388 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3389 	ccb = tm->cm_complete_data;
3390 
3391 	/*
3392 	 * Currently there should be no way we can hit this case.  It only
3393 	 * happens when we have a failure to allocate chain frames, and
3394 	 * task management commands don't have S/G lists.
3395 	 */
3396 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3397 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3398 
3399 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3400 
3401 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3402 		    "handle %#04x! This should not happen!\n", __func__,
3403 		    tm->cm_flags, req->DevHandle);
3404 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3405 		goto bailout;
3406 	}
3407 
3408 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3409 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3410 
3411 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3412 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3413 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3414 		    CAM_LUN_WILDCARD);
3415 	}
3416 	else
3417 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3418 
3419 bailout:
3420 
3421 	mprsas_free_tm(sc, tm);
3422 	xpt_done(ccb);
3423 }
3424 
3425 static void
3426 mprsas_poll(struct cam_sim *sim)
3427 {
3428 	struct mprsas_softc *sassc;
3429 
3430 	sassc = cam_sim_softc(sim);
3431 
3432 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3433 		/* frequent debug messages during a panic just slow
3434 		 * everything down too much.
3435 		 */
3436 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3437 		    __func__);
3438 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3439 	}
3440 
3441 	mpr_intr_locked(sassc->sc);
3442 }
3443 
3444 static void
3445 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3446     void *arg)
3447 {
3448 	struct mpr_softc *sc;
3449 
3450 	sc = (struct mpr_softc *)callback_arg;
3451 
3452 	switch (code) {
3453 #if (__FreeBSD_version >= 1000006) || \
3454     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3455 	case AC_ADVINFO_CHANGED: {
3456 		struct mprsas_target *target;
3457 		struct mprsas_softc *sassc;
3458 		struct scsi_read_capacity_data_long rcap_buf;
3459 		struct ccb_dev_advinfo cdai;
3460 		struct mprsas_lun *lun;
3461 		lun_id_t lunid;
3462 		int found_lun;
3463 		uintptr_t buftype;
3464 
3465 		buftype = (uintptr_t)arg;
3466 
3467 		found_lun = 0;
3468 		sassc = sc->sassc;
3469 
3470 		/*
3471 		 * We're only interested in read capacity data changes.
3472 		 */
3473 		if (buftype != CDAI_TYPE_RCAPLONG)
3474 			break;
3475 
3476 		/*
3477 		 * See the comment in mpr_attach_sas() for a detailed
3478 		 * explanation.  In these versions of FreeBSD we register
3479 		 * for all events and filter out the events that don't
3480 		 * apply to us.
3481 		 */
3482 #if (__FreeBSD_version < 1000703) || \
3483     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3484 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3485 			break;
3486 #endif
3487 
3488 		/*
3489 		 * We should have a handle for this, but check to make sure.
3490 		 */
3491 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3492 		    ("Target %d out of bounds in mprsas_async\n",
3493 		    xpt_path_target_id(path)));
3494 		target = &sassc->targets[xpt_path_target_id(path)];
3495 		if (target->handle == 0)
3496 			break;
3497 
3498 		lunid = xpt_path_lun_id(path);
3499 
3500 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3501 			if (lun->lun_id == lunid) {
3502 				found_lun = 1;
3503 				break;
3504 			}
3505 		}
3506 
3507 		if (found_lun == 0) {
3508 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3509 			    M_NOWAIT | M_ZERO);
3510 			if (lun == NULL) {
3511 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3512 				    "LUN for EEDP support.\n");
3513 				break;
3514 			}
3515 			lun->lun_id = lunid;
3516 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3517 		}
3518 
3519 		bzero(&rcap_buf, sizeof(rcap_buf));
3520 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3521 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3522 		cdai.ccb_h.flags = CAM_DIR_IN;
3523 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3524 #if (__FreeBSD_version >= 1100061) || \
3525     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3526 		cdai.flags = CDAI_FLAG_NONE;
3527 #else
3528 		cdai.flags = 0;
3529 #endif
3530 		cdai.bufsiz = sizeof(rcap_buf);
3531 		cdai.buf = (uint8_t *)&rcap_buf;
3532 		xpt_action((union ccb *)&cdai);
3533 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3534 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3535 
3536 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3537 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3538 			switch (rcap_buf.prot & SRC16_P_TYPE) {
3539 			case SRC16_PTYPE_1:
3540 			case SRC16_PTYPE_3:
3541 				lun->eedp_formatted = TRUE;
3542 				lun->eedp_block_size =
3543 				    scsi_4btoul(rcap_buf.length);
3544 				break;
3545 			case SRC16_PTYPE_2:
3546 			default:
3547 				lun->eedp_formatted = FALSE;
3548 				lun->eedp_block_size = 0;
3549 				break;
3550 			}
3551 		} else {
3552 			lun->eedp_formatted = FALSE;
3553 			lun->eedp_block_size = 0;
3554 		}
3555 		break;
3556 	}
3557 #endif
3558 	case AC_FOUND_DEVICE: {
3559 		struct ccb_getdev *cgd;
3560 
3561 		/*
3562 		 * See the comment in mpr_attach_sas() for a detailed
3563 		 * explanation.  In these versions of FreeBSD we register
3564 		 * for all events and filter out the events that don't
3565 		 * apply to us.
3566 		 */
3567 #if (__FreeBSD_version < 1000703) || \
3568     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3569 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3570 			break;
3571 #endif
3572 
3573 		cgd = arg;
3574 #if (__FreeBSD_version < 901503) || \
3575     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3576 		mprsas_check_eedp(sc, path, cgd);
3577 #endif
3578 		break;
3579 	}
3580 	default:
3581 		break;
3582 	}
3583 }
3584 
3585 #if (__FreeBSD_version < 901503) || \
3586     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3587 static void
3588 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3589     struct ccb_getdev *cgd)
3590 {
3591 	struct mprsas_softc *sassc = sc->sassc;
3592 	struct ccb_scsiio *csio;
3593 	struct scsi_read_capacity_16 *scsi_cmd;
3594 	struct scsi_read_capacity_eedp *rcap_buf;
3595 	path_id_t pathid;
3596 	target_id_t targetid;
3597 	lun_id_t lunid;
3598 	union ccb *ccb;
3599 	struct cam_path *local_path;
3600 	struct mprsas_target *target;
3601 	struct mprsas_lun *lun;
3602 	uint8_t	found_lun;
3603 	char path_str[64];
3604 
3605 	pathid = cam_sim_path(sassc->sim);
3606 	targetid = xpt_path_target_id(path);
3607 	lunid = xpt_path_lun_id(path);
3608 
3609 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3610 	    "mprsas_check_eedp\n", targetid));
3611 	target = &sassc->targets[targetid];
3612 	if (target->handle == 0x0)
3613 		return;
3614 
3615 	/*
3616 	 * Determine if the device is EEDP capable.
3617 	 *
3618 	 * If this flag is set in the inquiry data, the device supports
3619 	 * protection information, and must support the 16 byte read capacity
3620 	 * command, otherwise continue without sending read cap 16.
3621 	 */
3622 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3623 		return;
3624 
3625 	/*
3626 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3627 	 * the LUN is formatted for EEDP support.
3628 	 */
3629 	ccb = xpt_alloc_ccb_nowait();
3630 	if (ccb == NULL) {
3631 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3632 		    "support.\n");
3633 		return;
3634 	}
3635 
3636 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3637 	    CAM_REQ_CMP) {
3638 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3639 		    "support.\n");
3640 		xpt_free_ccb(ccb);
3641 		return;
3642 	}
3643 
3644 	/*
3645 	 * If LUN is already in list, don't create a new one.
3646 	 */
3647 	found_lun = FALSE;
3648 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3649 		if (lun->lun_id == lunid) {
3650 			found_lun = TRUE;
3651 			break;
3652 		}
3653 	}
3654 	if (!found_lun) {
3655 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3656 		    M_NOWAIT | M_ZERO);
3657 		if (lun == NULL) {
3658 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3659 			    "EEDP support.\n");
3660 			xpt_free_path(local_path);
3661 			xpt_free_ccb(ccb);
3662 			return;
3663 		}
3664 		lun->lun_id = lunid;
3665 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3666 	}
3667 
3668 	xpt_path_string(local_path, path_str, sizeof(path_str));
3669 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3670 	    path_str, target->handle);
3671 
3672 	/*
3673 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3674 	 * mprsas_read_cap_done function will load the read cap info into the
3675 	 * LUN struct.
3676 	 */
3677 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3678 	    M_NOWAIT | M_ZERO);
3679 	if (rcap_buf == NULL) {
3680 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3681 		    "buffer for EEDP support.\n");
3682 		xpt_free_path(ccb->ccb_h.path);
3683 		xpt_free_ccb(ccb);
3684 		return;
3685 	}
3686 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3687 	csio = &ccb->csio;
3688 	csio->ccb_h.func_code = XPT_SCSI_IO;
3689 	csio->ccb_h.flags = CAM_DIR_IN;
3690 	csio->ccb_h.retry_count = 4;
3691 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3692 	csio->ccb_h.timeout = 60000;
3693 	csio->data_ptr = (uint8_t *)rcap_buf;
3694 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3695 	csio->sense_len = MPR_SENSE_LEN;
3696 	csio->cdb_len = sizeof(*scsi_cmd);
3697 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3698 
3699 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3700 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3701 	scsi_cmd->opcode = 0x9E;
3702 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3703 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3704 
3705 	ccb->ccb_h.ppriv_ptr1 = sassc;
3706 	xpt_action(ccb);
3707 }
3708 
3709 static void
3710 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3711 {
3712 	struct mprsas_softc *sassc;
3713 	struct mprsas_target *target;
3714 	struct mprsas_lun *lun;
3715 	struct scsi_read_capacity_eedp *rcap_buf;
3716 
3717 	if (done_ccb == NULL)
3718 		return;
3719 
3720 	/* Driver need to release devq, it Scsi command is
3721 	 * generated by driver internally.
3722 	 * Currently there is a single place where driver
3723 	 * calls scsi command internally. In future if driver
3724 	 * calls more scsi command internally, it needs to release
3725 	 * devq internally, since those command will not go back to
3726 	 * cam_periph.
3727 	 */
3728 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3729         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3730 		xpt_release_devq(done_ccb->ccb_h.path,
3731 			       	/*count*/ 1, /*run_queue*/TRUE);
3732 	}
3733 
3734 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3735 
3736 	/*
3737 	 * Get the LUN ID for the path and look it up in the LUN list for the
3738 	 * target.
3739 	 */
3740 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3741 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3742 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3743 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3744 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3745 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3746 			continue;
3747 
3748 		/*
3749 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3750 		 * info. If the READ CAP 16 command had some SCSI error (common
3751 		 * if command is not supported), mark the lun as not supporting
3752 		 * EEDP and set the block size to 0.
3753 		 */
3754 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3755 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3756 			lun->eedp_formatted = FALSE;
3757 			lun->eedp_block_size = 0;
3758 			break;
3759 		}
3760 
3761 		if (rcap_buf->protect & 0x01) {
3762 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3763 			    "%d is formatted for EEDP support.\n",
3764 			    done_ccb->ccb_h.target_lun,
3765 			    done_ccb->ccb_h.target_id);
3766 			lun->eedp_formatted = TRUE;
3767 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3768 		}
3769 		break;
3770 	}
3771 
3772 	// Finished with this CCB and path.
3773 	free(rcap_buf, M_MPR);
3774 	xpt_free_path(done_ccb->ccb_h.path);
3775 	xpt_free_ccb(done_ccb);
3776 }
3777 #endif /* (__FreeBSD_version < 901503) || \
3778           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3779 
3780 /*
3781  * Set the INRESET flag for this target so that no I/O will be sent to
3782  * the target until the reset has completed.  If an I/O request does
3783  * happen, the devq will be frozen.  The CCB holds the path which is
3784  * used to release the devq.  The devq is released and the CCB is freed
3785  * when the TM completes.
3786  */
3787 void
3788 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3789     struct mprsas_target *target, lun_id_t lun_id)
3790 {
3791 	union ccb *ccb;
3792 	path_id_t path_id;
3793 
3794 	ccb = xpt_alloc_ccb_nowait();
3795 	if (ccb) {
3796 		path_id = cam_sim_path(sc->sassc->sim);
3797 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3798 		    target->tid, lun_id) != CAM_REQ_CMP) {
3799 			xpt_free_ccb(ccb);
3800 		} else {
3801 			tm->cm_ccb = ccb;
3802 			tm->cm_targ = target;
3803 			target->flags |= MPRSAS_TARGET_INRESET;
3804 		}
3805 	}
3806 }
3807 
3808 int
3809 mprsas_startup(struct mpr_softc *sc)
3810 {
3811 	/*
3812 	 * Send the port enable message and set the wait_for_port_enable flag.
3813 	 * This flag helps to keep the simq frozen until all discovery events
3814 	 * are processed.
3815 	 */
3816 	sc->wait_for_port_enable = 1;
3817 	mprsas_send_portenable(sc);
3818 	return (0);
3819 }
3820 
3821 static int
3822 mprsas_send_portenable(struct mpr_softc *sc)
3823 {
3824 	MPI2_PORT_ENABLE_REQUEST *request;
3825 	struct mpr_command *cm;
3826 
3827 	MPR_FUNCTRACE(sc);
3828 
3829 	if ((cm = mpr_alloc_command(sc)) == NULL)
3830 		return (EBUSY);
3831 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3832 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3833 	request->MsgFlags = 0;
3834 	request->VP_ID = 0;
3835 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3836 	cm->cm_complete = mprsas_portenable_complete;
3837 	cm->cm_data = NULL;
3838 	cm->cm_sge = NULL;
3839 
3840 	mpr_map_command(sc, cm);
3841 	mpr_dprint(sc, MPR_XINFO,
3842 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3843 	    cm, cm->cm_req, cm->cm_complete);
3844 	return (0);
3845 }
3846 
3847 static void
3848 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3849 {
3850 	MPI2_PORT_ENABLE_REPLY *reply;
3851 	struct mprsas_softc *sassc;
3852 
3853 	MPR_FUNCTRACE(sc);
3854 	sassc = sc->sassc;
3855 
3856 	/*
3857 	 * Currently there should be no way we can hit this case.  It only
3858 	 * happens when we have a failure to allocate chain frames, and
3859 	 * port enable commands don't have S/G lists.
3860 	 */
3861 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3862 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3863 		    "This should not happen!\n", __func__, cm->cm_flags);
3864 	}
3865 
3866 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3867 	if (reply == NULL)
3868 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3869 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3870 	    MPI2_IOCSTATUS_SUCCESS)
3871 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3872 
3873 	mpr_free_command(sc, cm);
3874 	/*
3875 	 * Done waiting for port enable to complete.  Decrement the refcount.
3876 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3877 	 * take place.
3878 	 */
3879 	sc->wait_for_port_enable = 0;
3880 	sc->port_enable_complete = 1;
3881 	wakeup(&sc->port_enable_complete);
3882 	mprsas_startup_decrement(sassc);
3883 }
3884 
3885 int
3886 mprsas_check_id(struct mprsas_softc *sassc, int id)
3887 {
3888 	struct mpr_softc *sc = sassc->sc;
3889 	char *ids;
3890 	char *name;
3891 
3892 	ids = &sc->exclude_ids[0];
3893 	while((name = strsep(&ids, ",")) != NULL) {
3894 		if (name[0] == '\0')
3895 			continue;
3896 		if (strtol(name, NULL, 0) == (long)id)
3897 			return (1);
3898 	}
3899 
3900 	return (0);
3901 }
3902 
3903 void
3904 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3905 {
3906 	struct mprsas_softc *sassc;
3907 	struct mprsas_lun *lun, *lun_tmp;
3908 	struct mprsas_target *targ;
3909 	int i;
3910 
3911 	sassc = sc->sassc;
3912 	/*
3913 	 * The number of targets is based on IOC Facts, so free all of
3914 	 * the allocated LUNs for each target and then the target buffer
3915 	 * itself.
3916 	 */
3917 	for (i=0; i< maxtargets; i++) {
3918 		targ = &sassc->targets[i];
3919 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3920 			free(lun, M_MPR);
3921 		}
3922 	}
3923 	free(sassc->targets, M_MPR);
3924 
3925 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3926 	    M_MPR, M_WAITOK|M_ZERO);
3927 	if (!sassc->targets) {
3928 		panic("%s failed to alloc targets with error %d\n",
3929 		    __func__, ENOMEM);
3930 	}
3931 }
3932