xref: /freebsd/sys/dev/mpr/mpr_sas.c (revision f56f82e0)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * Copyright (c) 2011-2015 LSI Corp.
4  * Copyright (c) 2013-2016 Avago Technologies
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
29  *
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 /* Communications core for Avago Technologies (LSI) MPT3 */
36 
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/conf.h>
46 #include <sys/bio.h>
47 #include <sys/malloc.h>
48 #include <sys/uio.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
54 #include <sys/sbuf.h>
55 
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/rman.h>
59 
60 #include <machine/stdarg.h>
61 
62 #include <cam/cam.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
73 #endif
74 
75 #include <dev/nvme/nvme.h>
76 
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
89 
90 #define MPRSAS_DISCOVERY_TIMEOUT	20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
92 
93 /*
94  * static array to check SCSI OpCode for EEDP protection bits
95  */
96 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 };
117 
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131     struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133     struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137     struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139     union ccb *done_ccb);
140 #endif
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143     struct mpr_command *cm);
144 
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
148     uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
151 
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
154     uint16_t handle)
155 {
156 	struct mprsas_target *target;
157 	int i;
158 
159 	for (i = start; i < sassc->maxtargets; i++) {
160 		target = &sassc->targets[i];
161 		if (target->handle == handle)
162 			return (target);
163 	}
164 
165 	return (NULL);
166 }
167 
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169  * commands before device handles have been found by discovery.  Since
170  * discovery involves reading config pages and possibly sending commands,
171  * discovery actions may continue even after we receive the end of discovery
172  * event, so refcount discovery actions instead of assuming we can unfreeze
173  * the simq when we get the event.
174  */
175 void
176 mprsas_startup_increment(struct mprsas_softc *sassc)
177 {
178 	MPR_FUNCTRACE(sassc->sc);
179 
180 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 		if (sassc->startup_refcount++ == 0) {
182 			/* just starting, freeze the simq */
183 			mpr_dprint(sassc->sc, MPR_INIT,
184 			    "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 			xpt_hold_boot();
188 #endif
189 			xpt_freeze_simq(sassc->sim, 1);
190 		}
191 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 		    sassc->startup_refcount);
193 	}
194 }
195 
196 void
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
198 {
199 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 		xpt_release_simq(sassc->sim, 1);
202 		mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
203 	}
204 }
205 
206 void
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
208 {
209 	MPR_FUNCTRACE(sassc->sc);
210 
211 	if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 		if (--sassc->startup_refcount == 0) {
213 			/* finished all discovery-related actions, release
214 			 * the simq and rescan for the latest topology.
215 			 */
216 			mpr_dprint(sassc->sc, MPR_INIT,
217 			    "%s releasing simq\n", __func__);
218 			sassc->flags &= ~MPRSAS_IN_STARTUP;
219 			xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
222 			xpt_release_boot();
223 #else
224 			mprsas_rescan_target(sassc->sc, NULL);
225 #endif
226 		}
227 		mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 		    sassc->startup_refcount);
229 	}
230 }
231 
232 /* The firmware requires us to stop sending commands when we're doing task
233  * management, so refcount the TMs and keep the simq frozen when any are in
234  * use.
235  */
236 struct mpr_command *
237 mprsas_alloc_tm(struct mpr_softc *sc)
238 {
239 	struct mpr_command *tm;
240 
241 	MPR_FUNCTRACE(sc);
242 	tm = mpr_alloc_high_priority_command(sc);
243 	return tm;
244 }
245 
246 void
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
248 {
249 	int target_id = 0xFFFFFFFF;
250 
251 	MPR_FUNCTRACE(sc);
252 	if (tm == NULL)
253 		return;
254 
255 	/*
256 	 * For TM's the devq is frozen for the device.  Unfreeze it here and
257 	 * free the resources used for freezing the devq.  Must clear the
258 	 * INRESET flag as well or scsi I/O will not work.
259 	 */
260 	if (tm->cm_targ != NULL) {
261 		tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 		target_id = tm->cm_targ->tid;
263 	}
264 	if (tm->cm_ccb) {
265 		mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
266 		    target_id);
267 		xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 		xpt_free_path(tm->cm_ccb->ccb_h.path);
269 		xpt_free_ccb(tm->cm_ccb);
270 	}
271 
272 	mpr_free_high_priority_command(sc, tm);
273 }
274 
275 void
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
277 {
278 	struct mprsas_softc *sassc = sc->sassc;
279 	path_id_t pathid;
280 	target_id_t targetid;
281 	union ccb *ccb;
282 
283 	MPR_FUNCTRACE(sc);
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = xpt_alloc_ccb_nowait();
294 	if (ccb == NULL) {
295 		mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 		return;
297 	}
298 
299 	if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 		mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
302 		xpt_free_ccb(ccb);
303 		return;
304 	}
305 
306 	if (targetid == CAM_TARGET_WILDCARD)
307 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 	else
309 		ccb->ccb_h.func_code = XPT_SCAN_TGT;
310 
311 	mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
312 	xpt_rescan(ccb);
313 }
314 
315 static void
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 {
318 	struct sbuf sb;
319 	va_list ap;
320 	char str[192];
321 	char path_str[64];
322 
323 	if (cm == NULL)
324 		return;
325 
326 	/* No need to be in here if debugging isn't enabled */
327 	if ((cm->cm_sc->mpr_debug & level) == 0)
328 		return;
329 
330 	sbuf_new(&sb, str, sizeof(str), 0);
331 
332 	va_start(ap, fmt);
333 
334 	if (cm->cm_ccb != NULL) {
335 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
336 		    sizeof(path_str));
337 		sbuf_cat(&sb, path_str);
338 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 			scsi_command_string(&cm->cm_ccb->csio, &sb);
340 			sbuf_printf(&sb, "length %d ",
341 			    cm->cm_ccb->csio.dxfer_len);
342 		}
343 	} else {
344 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 		    cam_sim_name(cm->cm_sc->sassc->sim),
346 		    cam_sim_unit(cm->cm_sc->sassc->sim),
347 		    cam_sim_bus(cm->cm_sc->sassc->sim),
348 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 		    cm->cm_lun);
350 	}
351 
352 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 	sbuf_vprintf(&sb, fmt, ap);
354 	sbuf_finish(&sb);
355 	mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
356 
357 	va_end(ap);
358 }
359 
360 static void
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
362 {
363 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 	struct mprsas_target *targ;
365 	uint16_t handle;
366 
367 	MPR_FUNCTRACE(sc);
368 
369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 	targ = tm->cm_targ;
372 
373 	if (reply == NULL) {
374 		/* XXX retry the remove after the diag reset completes? */
375 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 		    "0x%04x\n", __func__, handle);
377 		mprsas_free_tm(sc, tm);
378 		return;
379 	}
380 
381 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 	    MPI2_IOCSTATUS_SUCCESS) {
383 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
385 	}
386 
387 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 	    le32toh(reply->TerminationCount));
389 	mpr_free_reply(sc, tm->cm_reply_data);
390 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
391 
392 	mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 	    targ->tid, handle);
394 
395 	/*
396 	 * Don't clear target if remove fails because things will get confusing.
397 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 	 * this target id if possible, and so we can assign the same target id
399 	 * to this device if it comes back in the future.
400 	 */
401 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 	    MPI2_IOCSTATUS_SUCCESS) {
403 		targ = tm->cm_targ;
404 		targ->handle = 0x0;
405 		targ->encl_handle = 0x0;
406 		targ->encl_level_valid = 0x0;
407 		targ->encl_level = 0x0;
408 		targ->connector_name[0] = ' ';
409 		targ->connector_name[1] = ' ';
410 		targ->connector_name[2] = ' ';
411 		targ->connector_name[3] = ' ';
412 		targ->encl_slot = 0x0;
413 		targ->exp_dev_handle = 0x0;
414 		targ->phy_num = 0x0;
415 		targ->linkrate = 0x0;
416 		targ->devinfo = 0x0;
417 		targ->flags = 0x0;
418 		targ->scsi_req_desc_type = 0;
419 	}
420 
421 	mprsas_free_tm(sc, tm);
422 }
423 
424 
425 /*
426  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427  * Otherwise Volume Delete is same as Bare Drive Removal.
428  */
429 void
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
431 {
432 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 	struct mpr_softc *sc;
434 	struct mpr_command *cm;
435 	struct mprsas_target *targ = NULL;
436 
437 	MPR_FUNCTRACE(sassc->sc);
438 	sc = sassc->sc;
439 
440 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
441 	if (targ == NULL) {
442 		/* FIXME: what is the action? */
443 		/* We don't know about this device? */
444 		mpr_dprint(sc, MPR_ERROR,
445 		   "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 		return;
447 	}
448 
449 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
450 
451 	cm = mprsas_alloc_tm(sc);
452 	if (cm == NULL) {
453 		mpr_dprint(sc, MPR_ERROR,
454 		    "%s: command alloc failure\n", __func__);
455 		return;
456 	}
457 
458 	mprsas_rescan_target(sc, targ);
459 
460 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 	req->DevHandle = targ->handle;
462 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
464 
465 	/* SAS Hard Link Reset / SATA Link Reset */
466 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 
468 	cm->cm_targ = targ;
469 	cm->cm_data = NULL;
470 	cm->cm_desc.HighPriority.RequestFlags =
471 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 	cm->cm_complete = mprsas_remove_volume;
473 	cm->cm_complete_data = (void *)(uintptr_t)handle;
474 
475 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 	    __func__, targ->tid);
477 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
478 
479 	mpr_map_command(sc, cm);
480 }
481 
482 /*
483  * The firmware performs debounce on the link to avoid transient link errors
484  * and false removals.  When it does decide that link has been lost and a
485  * device needs to go away, it expects that the host will perform a target reset
486  * and then an op remove.  The reset has the side-effect of aborting any
487  * outstanding requests for the device, which is required for the op-remove to
488  * succeed.  It's not clear if the host should check for the device coming back
489  * alive after the reset.
490  */
491 void
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
493 {
494 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 	struct mpr_softc *sc;
496 	struct mpr_command *cm;
497 	struct mprsas_target *targ = NULL;
498 
499 	MPR_FUNCTRACE(sassc->sc);
500 
501 	sc = sassc->sc;
502 
503 	targ = mprsas_find_target_by_handle(sassc, 0, handle);
504 	if (targ == NULL) {
505 		/* FIXME: what is the action? */
506 		/* We don't know about this device? */
507 		mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
508 		    __func__, handle);
509 		return;
510 	}
511 
512 	targ->flags |= MPRSAS_TARGET_INREMOVAL;
513 
514 	cm = mprsas_alloc_tm(sc);
515 	if (cm == NULL) {
516 		mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
517 		    __func__);
518 		return;
519 	}
520 
521 	mprsas_rescan_target(sc, targ);
522 
523 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 	memset(req, 0, sizeof(*req));
525 	req->DevHandle = htole16(targ->handle);
526 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 
529 	/* SAS Hard Link Reset / SATA Link Reset */
530 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 
532 	cm->cm_targ = targ;
533 	cm->cm_data = NULL;
534 	cm->cm_desc.HighPriority.RequestFlags =
535 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 	cm->cm_complete = mprsas_remove_device;
537 	cm->cm_complete_data = (void *)(uintptr_t)handle;
538 
539 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 	    __func__, targ->tid);
541 	mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 
543 	mpr_map_command(sc, cm);
544 }
545 
546 static void
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 {
549 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 	struct mprsas_target *targ;
552 	struct mpr_command *next_cm;
553 	uint16_t handle;
554 
555 	MPR_FUNCTRACE(sc);
556 
557 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 	targ = tm->cm_targ;
560 
561 	/*
562 	 * Currently there should be no way we can hit this case.  It only
563 	 * happens when we have a failure to allocate chain frames, and
564 	 * task management commands don't have S/G lists.
565 	 */
566 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 		    "handle %#04x! This should not happen!\n", __func__,
569 		    tm->cm_flags, handle);
570 	}
571 
572 	if (reply == NULL) {
573 		/* XXX retry the remove after the diag reset completes? */
574 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 		    "0x%04x\n", __func__, handle);
576 		mprsas_free_tm(sc, tm);
577 		return;
578 	}
579 
580 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 	    MPI2_IOCSTATUS_SUCCESS) {
582 		mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 		    "device 0x%x\n", le16toh(reply->IOCStatus), handle);
584 	}
585 
586 	mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 	    le32toh(reply->TerminationCount));
588 	mpr_free_reply(sc, tm->cm_reply_data);
589 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
590 
591 	/* Reuse the existing command */
592 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 	memset(req, 0, sizeof(*req));
594 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 	req->DevHandle = htole16(handle);
597 	tm->cm_data = NULL;
598 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 	tm->cm_complete = mprsas_remove_complete;
600 	tm->cm_complete_data = (void *)(uintptr_t)handle;
601 
602 	mpr_map_command(sc, tm);
603 
604 	mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 	    targ->tid, handle);
606 	if (targ->encl_level_valid) {
607 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 		    targ->connector_name);
610 	}
611 	TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
612 		union ccb *ccb;
613 
614 		mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 		ccb = tm->cm_complete_data;
616 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 		mprsas_scsiio_complete(sc, tm);
618 	}
619 }
620 
621 static void
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 {
624 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 	uint16_t handle;
626 	struct mprsas_target *targ;
627 	struct mprsas_lun *lun;
628 
629 	MPR_FUNCTRACE(sc);
630 
631 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
633 
634 	/*
635 	 * Currently there should be no way we can hit this case.  It only
636 	 * happens when we have a failure to allocate chain frames, and
637 	 * task management commands don't have S/G lists.
638 	 */
639 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 		mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 		    "handle %#04x! This should not happen!\n", __func__,
642 		    tm->cm_flags, handle);
643 		mprsas_free_tm(sc, tm);
644 		return;
645 	}
646 
647 	if (reply == NULL) {
648 		/* most likely a chip reset */
649 		mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 		    "0x%04x\n", __func__, handle);
651 		mprsas_free_tm(sc, tm);
652 		return;
653 	}
654 
655 	mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 	    __func__, handle, le16toh(reply->IOCStatus));
657 
658 	/*
659 	 * Don't clear target if remove fails because things will get confusing.
660 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 	 * this target id if possible, and so we can assign the same target id
662 	 * to this device if it comes back in the future.
663 	 */
664 	if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 	    MPI2_IOCSTATUS_SUCCESS) {
666 		targ = tm->cm_targ;
667 		targ->handle = 0x0;
668 		targ->encl_handle = 0x0;
669 		targ->encl_level_valid = 0x0;
670 		targ->encl_level = 0x0;
671 		targ->connector_name[0] = ' ';
672 		targ->connector_name[1] = ' ';
673 		targ->connector_name[2] = ' ';
674 		targ->connector_name[3] = ' ';
675 		targ->encl_slot = 0x0;
676 		targ->exp_dev_handle = 0x0;
677 		targ->phy_num = 0x0;
678 		targ->linkrate = 0x0;
679 		targ->devinfo = 0x0;
680 		targ->flags = 0x0;
681 		targ->scsi_req_desc_type = 0;
682 
683 		while (!SLIST_EMPTY(&targ->luns)) {
684 			lun = SLIST_FIRST(&targ->luns);
685 			SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 			free(lun, M_MPR);
687 		}
688 	}
689 
690 	mprsas_free_tm(sc, tm);
691 }
692 
693 static int
694 mprsas_register_events(struct mpr_softc *sc)
695 {
696 	uint8_t events[16];
697 
698 	bzero(events, 16);
699 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 	setbit(events, MPI2_EVENT_IR_VOLUME);
708 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 	setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 	if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 		setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 		if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 			setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 			setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 			setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
717 		}
718 	}
719 
720 	mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 	    &sc->sassc->mprsas_eh);
722 
723 	return (0);
724 }
725 
726 int
727 mpr_attach_sas(struct mpr_softc *sc)
728 {
729 	struct mprsas_softc *sassc;
730 	cam_status status;
731 	int unit, error = 0;
732 
733 	MPR_FUNCTRACE(sc);
734 
735 	sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
736 	if (!sassc) {
737 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
738 		    __func__, __LINE__);
739 		return (ENOMEM);
740 	}
741 
742 	/*
743 	 * XXX MaxTargets could change during a reinit.  Since we don't
744 	 * resize the targets[] array during such an event, cache the value
745 	 * of MaxTargets here so that we don't get into trouble later.  This
746 	 * should move into the reinit logic.
747 	 */
748 	sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
749 	sassc->targets = malloc(sizeof(struct mprsas_target) *
750 	    sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
751 	if (!sassc->targets) {
752 		device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
753 		    __func__, __LINE__);
754 		free(sassc, M_MPR);
755 		return (ENOMEM);
756 	}
757 	sc->sassc = sassc;
758 	sassc->sc = sc;
759 
760 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
761 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
762 		error = ENOMEM;
763 		goto out;
764 	}
765 
766 	unit = device_get_unit(sc->mpr_dev);
767 	sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
768 	    unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
769 	if (sassc->sim == NULL) {
770 		mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
771 		error = EINVAL;
772 		goto out;
773 	}
774 
775 	TAILQ_INIT(&sassc->ev_queue);
776 
777 	/* Initialize taskqueue for Event Handling */
778 	TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
779 	sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
780 	    taskqueue_thread_enqueue, &sassc->ev_tq);
781 	taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
782 	    device_get_nameunit(sc->mpr_dev));
783 
784 	mpr_lock(sc);
785 
786 	/*
787 	 * XXX There should be a bus for every port on the adapter, but since
788 	 * we're just going to fake the topology for now, we'll pretend that
789 	 * everything is just a target on a single bus.
790 	 */
791 	if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
792 		mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
793 		    error);
794 		mpr_unlock(sc);
795 		goto out;
796 	}
797 
798 	/*
799 	 * Assume that discovery events will start right away.
800 	 *
801 	 * Hold off boot until discovery is complete.
802 	 */
803 	sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
804 	sc->sassc->startup_refcount = 0;
805 	mprsas_startup_increment(sassc);
806 
807 	callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
808 
809 	/*
810 	 * Register for async events so we can determine the EEDP
811 	 * capabilities of devices.
812 	 */
813 	status = xpt_create_path(&sassc->path, /*periph*/NULL,
814 	    cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
815 	    CAM_LUN_WILDCARD);
816 	if (status != CAM_REQ_CMP) {
817 		mpr_printf(sc, "Error %#x creating sim path\n", status);
818 		sassc->path = NULL;
819 	} else {
820 		int event;
821 
822 #if (__FreeBSD_version >= 1000006) || \
823     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
824 		event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
825 #else
826 		event = AC_FOUND_DEVICE;
827 #endif
828 
829 		/*
830 		 * Prior to the CAM locking improvements, we can't call
831 		 * xpt_register_async() with a particular path specified.
832 		 *
833 		 * If a path isn't specified, xpt_register_async() will
834 		 * generate a wildcard path and acquire the XPT lock while
835 		 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
836 		 * It will then drop the XPT lock once that is done.
837 		 *
838 		 * If a path is specified for xpt_register_async(), it will
839 		 * not acquire and drop the XPT lock around the call to
840 		 * xpt_action().  xpt_action() asserts that the caller
841 		 * holds the SIM lock, so the SIM lock has to be held when
842 		 * calling xpt_register_async() when the path is specified.
843 		 *
844 		 * But xpt_register_async calls xpt_for_all_devices(),
845 		 * which calls xptbustraverse(), which will acquire each
846 		 * SIM lock.  When it traverses our particular bus, it will
847 		 * necessarily acquire the SIM lock, which will lead to a
848 		 * recursive lock acquisition.
849 		 *
850 		 * The CAM locking changes fix this problem by acquiring
851 		 * the XPT topology lock around bus traversal in
852 		 * xptbustraverse(), so the caller can hold the SIM lock
853 		 * and it does not cause a recursive lock acquisition.
854 		 *
855 		 * These __FreeBSD_version values are approximate, especially
856 		 * for stable/10, which is two months later than the actual
857 		 * change.
858 		 */
859 
860 #if (__FreeBSD_version < 1000703) || \
861     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
862 		mpr_unlock(sc);
863 		status = xpt_register_async(event, mprsas_async, sc,
864 					    NULL);
865 		mpr_lock(sc);
866 #else
867 		status = xpt_register_async(event, mprsas_async, sc,
868 					    sassc->path);
869 #endif
870 
871 		if (status != CAM_REQ_CMP) {
872 			mpr_dprint(sc, MPR_ERROR,
873 			    "Error %#x registering async handler for "
874 			    "AC_ADVINFO_CHANGED events\n", status);
875 			xpt_free_path(sassc->path);
876 			sassc->path = NULL;
877 		}
878 	}
879 	if (status != CAM_REQ_CMP) {
880 		/*
881 		 * EEDP use is the exception, not the rule.
882 		 * Warn the user, but do not fail to attach.
883 		 */
884 		mpr_printf(sc, "EEDP capabilities disabled.\n");
885 	}
886 
887 	mpr_unlock(sc);
888 
889 	mprsas_register_events(sc);
890 out:
891 	if (error)
892 		mpr_detach_sas(sc);
893 	return (error);
894 }
895 
896 int
897 mpr_detach_sas(struct mpr_softc *sc)
898 {
899 	struct mprsas_softc *sassc;
900 	struct mprsas_lun *lun, *lun_tmp;
901 	struct mprsas_target *targ;
902 	int i;
903 
904 	MPR_FUNCTRACE(sc);
905 
906 	if (sc->sassc == NULL)
907 		return (0);
908 
909 	sassc = sc->sassc;
910 	mpr_deregister_events(sc, sassc->mprsas_eh);
911 
912 	/*
913 	 * Drain and free the event handling taskqueue with the lock
914 	 * unheld so that any parallel processing tasks drain properly
915 	 * without deadlocking.
916 	 */
917 	if (sassc->ev_tq != NULL)
918 		taskqueue_free(sassc->ev_tq);
919 
920 	/* Make sure CAM doesn't wedge if we had to bail out early. */
921 	mpr_lock(sc);
922 
923 	/* Deregister our async handler */
924 	if (sassc->path != NULL) {
925 		xpt_register_async(0, mprsas_async, sc, sassc->path);
926 		xpt_free_path(sassc->path);
927 		sassc->path = NULL;
928 	}
929 
930 	if (sassc->flags & MPRSAS_IN_STARTUP)
931 		xpt_release_simq(sassc->sim, 1);
932 
933 	if (sassc->sim != NULL) {
934 		xpt_bus_deregister(cam_sim_path(sassc->sim));
935 		cam_sim_free(sassc->sim, FALSE);
936 	}
937 
938 	mpr_unlock(sc);
939 
940 	if (sassc->devq != NULL)
941 		cam_simq_free(sassc->devq);
942 
943 	for (i = 0; i < sassc->maxtargets; i++) {
944 		targ = &sassc->targets[i];
945 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
946 			free(lun, M_MPR);
947 		}
948 	}
949 	free(sassc->targets, M_MPR);
950 	free(sassc, M_MPR);
951 	sc->sassc = NULL;
952 
953 	return (0);
954 }
955 
956 void
957 mprsas_discovery_end(struct mprsas_softc *sassc)
958 {
959 	struct mpr_softc *sc = sassc->sc;
960 
961 	MPR_FUNCTRACE(sc);
962 
963 	if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
964 		callout_stop(&sassc->discovery_callout);
965 
966 	/*
967 	 * After discovery has completed, check the mapping table for any
968 	 * missing devices and update their missing counts. Only do this once
969 	 * whenever the driver is initialized so that missing counts aren't
970 	 * updated unnecessarily. Note that just because discovery has
971 	 * completed doesn't mean that events have been processed yet. The
972 	 * check_devices function is a callout timer that checks if ALL devices
973 	 * are missing. If so, it will wait a little longer for events to
974 	 * complete and keep resetting itself until some device in the mapping
975 	 * table is not missing, meaning that event processing has started.
976 	 */
977 	if (sc->track_mapping_events) {
978 		mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
979 		    "completed. Check for missing devices in the mapping "
980 		    "table.\n");
981 		callout_reset(&sc->device_check_callout,
982 		    MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
983 		    sc);
984 	}
985 }
986 
987 static void
988 mprsas_action(struct cam_sim *sim, union ccb *ccb)
989 {
990 	struct mprsas_softc *sassc;
991 
992 	sassc = cam_sim_softc(sim);
993 
994 	MPR_FUNCTRACE(sassc->sc);
995 	mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
996 	    ccb->ccb_h.func_code);
997 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
998 
999 	switch (ccb->ccb_h.func_code) {
1000 	case XPT_PATH_INQ:
1001 	{
1002 		struct ccb_pathinq *cpi = &ccb->cpi;
1003 		struct mpr_softc *sc = sassc->sc;
1004 		uint8_t sges_per_frame;
1005 
1006 		cpi->version_num = 1;
1007 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1008 		cpi->target_sprt = 0;
1009 #if (__FreeBSD_version >= 1000039) || \
1010     ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1011 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1012 #else
1013 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1014 #endif
1015 		cpi->hba_eng_cnt = 0;
1016 		cpi->max_target = sassc->maxtargets - 1;
1017 		cpi->max_lun = 255;
1018 
1019 		/*
1020 		 * initiator_id is set here to an ID outside the set of valid
1021 		 * target IDs (including volumes).
1022 		 */
1023 		cpi->initiator_id = sassc->maxtargets;
1024 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1025 		strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1026 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1027 		cpi->unit_number = cam_sim_unit(sim);
1028 		cpi->bus_id = cam_sim_bus(sim);
1029 		/*
1030 		 * XXXSLM-I think this needs to change based on config page or
1031 		 * something instead of hardcoded to 150000.
1032 		 */
1033 		cpi->base_transfer_speed = 150000;
1034 		cpi->transport = XPORT_SAS;
1035 		cpi->transport_version = 0;
1036 		cpi->protocol = PROTO_SCSI;
1037 		cpi->protocol_version = SCSI_REV_SPC;
1038 
1039 		/*
1040 		 * Max IO Size is Page Size * the following:
1041 		 * ((SGEs per frame - 1 for chain element) *
1042 		 * Max Chain Depth) + 1 for no chain needed in last frame
1043 		 *
1044 		 * If user suggests a Max IO size to use, use the smaller of the
1045 		 * user's value and the calculated value as long as the user's
1046 		 * value is larger than 0. The user's value is in pages.
1047 		 */
1048 		sges_per_frame = (sc->chain_frame_size /
1049 		    sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1050 		cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1051 		cpi->maxio *= PAGE_SIZE;
1052 		if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1053 		    cpi->maxio))
1054 			cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1055 		sc->maxio = cpi->maxio;
1056 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1057 		break;
1058 	}
1059 	case XPT_GET_TRAN_SETTINGS:
1060 	{
1061 		struct ccb_trans_settings	*cts;
1062 		struct ccb_trans_settings_sas	*sas;
1063 		struct ccb_trans_settings_scsi	*scsi;
1064 		struct mprsas_target *targ;
1065 
1066 		cts = &ccb->cts;
1067 		sas = &cts->xport_specific.sas;
1068 		scsi = &cts->proto_specific.scsi;
1069 
1070 		KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1071 		    ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1072 		    cts->ccb_h.target_id));
1073 		targ = &sassc->targets[cts->ccb_h.target_id];
1074 		if (targ->handle == 0x0) {
1075 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1076 			break;
1077 		}
1078 
1079 		cts->protocol_version = SCSI_REV_SPC2;
1080 		cts->transport = XPORT_SAS;
1081 		cts->transport_version = 0;
1082 
1083 		sas->valid = CTS_SAS_VALID_SPEED;
1084 		switch (targ->linkrate) {
1085 		case 0x08:
1086 			sas->bitrate = 150000;
1087 			break;
1088 		case 0x09:
1089 			sas->bitrate = 300000;
1090 			break;
1091 		case 0x0a:
1092 			sas->bitrate = 600000;
1093 			break;
1094 		case 0x0b:
1095 			sas->bitrate = 1200000;
1096 			break;
1097 		default:
1098 			sas->valid = 0;
1099 		}
1100 
1101 		cts->protocol = PROTO_SCSI;
1102 		scsi->valid = CTS_SCSI_VALID_TQ;
1103 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1104 
1105 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1106 		break;
1107 	}
1108 	case XPT_CALC_GEOMETRY:
1109 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
1110 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1111 		break;
1112 	case XPT_RESET_DEV:
1113 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1114 		    "XPT_RESET_DEV\n");
1115 		mprsas_action_resetdev(sassc, ccb);
1116 		return;
1117 	case XPT_RESET_BUS:
1118 	case XPT_ABORT:
1119 	case XPT_TERM_IO:
1120 		mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1121 		    "for abort or reset\n");
1122 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1123 		break;
1124 	case XPT_SCSI_IO:
1125 		mprsas_action_scsiio(sassc, ccb);
1126 		return;
1127 #if __FreeBSD_version >= 900026
1128 	case XPT_SMP_IO:
1129 		mprsas_action_smpio(sassc, ccb);
1130 		return;
1131 #endif
1132 	default:
1133 		mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1134 		break;
1135 	}
1136 	xpt_done(ccb);
1137 
1138 }
1139 
1140 static void
1141 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1142     target_id_t target_id, lun_id_t lun_id)
1143 {
1144 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1145 	struct cam_path *path;
1146 
1147 	mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1148 	    ac_code, target_id, (uintmax_t)lun_id);
1149 
1150 	if (xpt_create_path(&path, NULL,
1151 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1152 		mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1153 		    "notification\n");
1154 		return;
1155 	}
1156 
1157 	xpt_async(ac_code, path, NULL);
1158 	xpt_free_path(path);
1159 }
1160 
1161 static void
1162 mprsas_complete_all_commands(struct mpr_softc *sc)
1163 {
1164 	struct mpr_command *cm;
1165 	int i;
1166 	int completed;
1167 
1168 	MPR_FUNCTRACE(sc);
1169 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1170 
1171 	/* complete all commands with a NULL reply */
1172 	for (i = 1; i < sc->num_reqs; i++) {
1173 		cm = &sc->commands[i];
1174 		cm->cm_reply = NULL;
1175 		completed = 0;
1176 
1177 		if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1178 			cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1179 
1180 		if (cm->cm_complete != NULL) {
1181 			mprsas_log_command(cm, MPR_RECOVERY,
1182 			    "completing cm %p state %x ccb %p for diag reset\n",
1183 			    cm, cm->cm_state, cm->cm_ccb);
1184 			cm->cm_complete(sc, cm);
1185 			completed = 1;
1186 		}
1187 
1188 		if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1189 			mprsas_log_command(cm, MPR_RECOVERY,
1190 			    "waking up cm %p state %x ccb %p for diag reset\n",
1191 			    cm, cm->cm_state, cm->cm_ccb);
1192 			wakeup(cm);
1193 			completed = 1;
1194 		}
1195 
1196 		if (cm->cm_sc->io_cmds_active != 0) {
1197 			cm->cm_sc->io_cmds_active--;
1198 		} else {
1199 			mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1200 			    "io_cmds_active is out of sync - resynching to "
1201 			    "0\n");
1202 		}
1203 
1204 		if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1205 			/* this should never happen, but if it does, log */
1206 			mprsas_log_command(cm, MPR_RECOVERY,
1207 			    "cm %p state %x flags 0x%x ccb %p during diag "
1208 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1209 			    cm->cm_ccb);
1210 		}
1211 	}
1212 }
1213 
1214 void
1215 mprsas_handle_reinit(struct mpr_softc *sc)
1216 {
1217 	int i;
1218 
1219 	/* Go back into startup mode and freeze the simq, so that CAM
1220 	 * doesn't send any commands until after we've rediscovered all
1221 	 * targets and found the proper device handles for them.
1222 	 *
1223 	 * After the reset, portenable will trigger discovery, and after all
1224 	 * discovery-related activities have finished, the simq will be
1225 	 * released.
1226 	 */
1227 	mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1228 	sc->sassc->flags |= MPRSAS_IN_STARTUP;
1229 	sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1230 	mprsas_startup_increment(sc->sassc);
1231 
1232 	/* notify CAM of a bus reset */
1233 	mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1234 	    CAM_LUN_WILDCARD);
1235 
1236 	/* complete and cleanup after all outstanding commands */
1237 	mprsas_complete_all_commands(sc);
1238 
1239 	mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1240 	    __func__, sc->sassc->startup_refcount);
1241 
1242 	/* zero all the target handles, since they may change after the
1243 	 * reset, and we have to rediscover all the targets and use the new
1244 	 * handles.
1245 	 */
1246 	for (i = 0; i < sc->sassc->maxtargets; i++) {
1247 		if (sc->sassc->targets[i].outstanding != 0)
1248 			mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1249 			    i, sc->sassc->targets[i].outstanding);
1250 		sc->sassc->targets[i].handle = 0x0;
1251 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1252 		sc->sassc->targets[i].outstanding = 0;
1253 		sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1254 	}
1255 }
1256 static void
1257 mprsas_tm_timeout(void *data)
1258 {
1259 	struct mpr_command *tm = data;
1260 	struct mpr_softc *sc = tm->cm_sc;
1261 
1262 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1263 
1264 	mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1265 	    "out\n", tm);
1266 	mpr_reinit(sc);
1267 }
1268 
1269 static void
1270 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1271 {
1272 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1273 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1274 	unsigned int cm_count = 0;
1275 	struct mpr_command *cm;
1276 	struct mprsas_target *targ;
1277 
1278 	callout_stop(&tm->cm_callout);
1279 
1280 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1281 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1282 	targ = tm->cm_targ;
1283 
1284 	/*
1285 	 * Currently there should be no way we can hit this case.  It only
1286 	 * happens when we have a failure to allocate chain frames, and
1287 	 * task management commands don't have S/G lists.
1288 	 */
1289 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1290 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1291 		    "This should not happen!\n", __func__, tm->cm_flags);
1292 		mprsas_free_tm(sc, tm);
1293 		return;
1294 	}
1295 
1296 	if (reply == NULL) {
1297 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1298 		    "%p\n", tm);
1299 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1300 			/* this completion was due to a reset, just cleanup */
1301 			targ->tm = NULL;
1302 			mprsas_free_tm(sc, tm);
1303 		}
1304 		else {
1305 			/* we should have gotten a reply. */
1306 			mpr_reinit(sc);
1307 		}
1308 		return;
1309 	}
1310 
1311 	mprsas_log_command(tm, MPR_RECOVERY,
1312 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1313 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1314 	    le32toh(reply->TerminationCount));
1315 
1316 	/* See if there are any outstanding commands for this LUN.
1317 	 * This could be made more efficient by using a per-LU data
1318 	 * structure of some sort.
1319 	 */
1320 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1321 		if (cm->cm_lun == tm->cm_lun)
1322 			cm_count++;
1323 	}
1324 
1325 	if (cm_count == 0) {
1326 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1327 		    "logical unit %u finished recovery after reset\n",
1328 		    tm->cm_lun, tm);
1329 
1330 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1331 		    tm->cm_lun);
1332 
1333 		/* we've finished recovery for this logical unit.  check and
1334 		 * see if some other logical unit has a timedout command
1335 		 * that needs to be processed.
1336 		 */
1337 		cm = TAILQ_FIRST(&targ->timedout_commands);
1338 		if (cm) {
1339 			mprsas_send_abort(sc, tm, cm);
1340 		}
1341 		else {
1342 			targ->tm = NULL;
1343 			mprsas_free_tm(sc, tm);
1344 		}
1345 	}
1346 	else {
1347 		/* if we still have commands for this LUN, the reset
1348 		 * effectively failed, regardless of the status reported.
1349 		 * Escalate to a target reset.
1350 		 */
1351 		mprsas_log_command(tm, MPR_RECOVERY,
1352 		    "logical unit reset complete for tm %p, but still have %u "
1353 		    "command(s)\n", tm, cm_count);
1354 		mprsas_send_reset(sc, tm,
1355 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1356 	}
1357 }
1358 
1359 static void
1360 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1361 {
1362 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1363 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1364 	struct mprsas_target *targ;
1365 
1366 	callout_stop(&tm->cm_callout);
1367 
1368 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1369 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1370 	targ = tm->cm_targ;
1371 
1372 	/*
1373 	 * Currently there should be no way we can hit this case.  It only
1374 	 * happens when we have a failure to allocate chain frames, and
1375 	 * task management commands don't have S/G lists.
1376 	 */
1377 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1378 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1379 		    "reset! This should not happen!\n", __func__, tm->cm_flags);
1380 		mprsas_free_tm(sc, tm);
1381 		return;
1382 	}
1383 
1384 	if (reply == NULL) {
1385 		mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1386 		    "%p\n", tm);
1387 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1388 			/* this completion was due to a reset, just cleanup */
1389 			targ->tm = NULL;
1390 			mprsas_free_tm(sc, tm);
1391 		}
1392 		else {
1393 			/* we should have gotten a reply. */
1394 			mpr_reinit(sc);
1395 		}
1396 		return;
1397 	}
1398 
1399 	mprsas_log_command(tm, MPR_RECOVERY,
1400 	    "target reset status 0x%x code 0x%x count %u\n",
1401 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1402 	    le32toh(reply->TerminationCount));
1403 
1404 	if (targ->outstanding == 0) {
1405 		/* we've finished recovery for this target and all
1406 		 * of its logical units.
1407 		 */
1408 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1409 		    "recovery finished after target reset\n");
1410 
1411 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1412 		    CAM_LUN_WILDCARD);
1413 
1414 		targ->tm = NULL;
1415 		mprsas_free_tm(sc, tm);
1416 	}
1417 	else {
1418 		/* after a target reset, if this target still has
1419 		 * outstanding commands, the reset effectively failed,
1420 		 * regardless of the status reported.  escalate.
1421 		 */
1422 		mprsas_log_command(tm, MPR_RECOVERY,
1423 		    "target reset complete for tm %p, but still have %u "
1424 		    "command(s)\n", tm, targ->outstanding);
1425 		mpr_reinit(sc);
1426 	}
1427 }
1428 
1429 #define MPR_RESET_TIMEOUT 30
1430 
1431 int
1432 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1433 {
1434 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1435 	struct mprsas_target *target;
1436 	int err;
1437 
1438 	target = tm->cm_targ;
1439 	if (target->handle == 0) {
1440 		mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1441 		    "%d\n", __func__, target->tid);
1442 		return -1;
1443 	}
1444 
1445 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1446 	req->DevHandle = htole16(target->handle);
1447 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1448 	req->TaskType = type;
1449 
1450 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1451 		/* XXX Need to handle invalid LUNs */
1452 		MPR_SET_LUN(req->LUN, tm->cm_lun);
1453 		tm->cm_targ->logical_unit_resets++;
1454 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1455 		    "sending logical unit reset\n");
1456 		tm->cm_complete = mprsas_logical_unit_reset_complete;
1457 		mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1458 	}
1459 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1460 		/*
1461 		 * Target reset method =
1462 		 *     SAS Hard Link Reset / SATA Link Reset
1463 		 */
1464 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1465 		tm->cm_targ->target_resets++;
1466 		mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1467 		    "sending target reset\n");
1468 		tm->cm_complete = mprsas_target_reset_complete;
1469 		mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1470 	}
1471 	else {
1472 		mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1473 		return -1;
1474 	}
1475 
1476 	mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1477 	    target->handle);
1478 	if (target->encl_level_valid) {
1479 		mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1480 		    "connector name (%4s)\n", target->encl_level,
1481 		    target->encl_slot, target->connector_name);
1482 	}
1483 
1484 	tm->cm_data = NULL;
1485 	tm->cm_desc.HighPriority.RequestFlags =
1486 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1487 	tm->cm_complete_data = (void *)tm;
1488 
1489 	callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1490 	    mprsas_tm_timeout, tm);
1491 
1492 	err = mpr_map_command(sc, tm);
1493 	if (err)
1494 		mprsas_log_command(tm, MPR_RECOVERY,
1495 		    "error %d sending reset type %u\n", err, type);
1496 
1497 	return err;
1498 }
1499 
1500 
1501 static void
1502 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1503 {
1504 	struct mpr_command *cm;
1505 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1506 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1507 	struct mprsas_target *targ;
1508 
1509 	callout_stop(&tm->cm_callout);
1510 
1511 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1512 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1513 	targ = tm->cm_targ;
1514 
1515 	/*
1516 	 * Currently there should be no way we can hit this case.  It only
1517 	 * happens when we have a failure to allocate chain frames, and
1518 	 * task management commands don't have S/G lists.
1519 	 */
1520 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1521 		mprsas_log_command(tm, MPR_RECOVERY,
1522 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1523 		    tm->cm_flags, tm, le16toh(req->TaskMID));
1524 		mprsas_free_tm(sc, tm);
1525 		return;
1526 	}
1527 
1528 	if (reply == NULL) {
1529 		mprsas_log_command(tm, MPR_RECOVERY,
1530 		    "NULL abort reply for tm %p TaskMID %u\n",
1531 		    tm, le16toh(req->TaskMID));
1532 		if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1533 			/* this completion was due to a reset, just cleanup */
1534 			targ->tm = NULL;
1535 			mprsas_free_tm(sc, tm);
1536 		}
1537 		else {
1538 			/* we should have gotten a reply. */
1539 			mpr_reinit(sc);
1540 		}
1541 		return;
1542 	}
1543 
1544 	mprsas_log_command(tm, MPR_RECOVERY,
1545 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1546 	    le16toh(req->TaskMID),
1547 	    le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1548 	    le32toh(reply->TerminationCount));
1549 
1550 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1551 	if (cm == NULL) {
1552 		/* if there are no more timedout commands, we're done with
1553 		 * error recovery for this target.
1554 		 */
1555 		mprsas_log_command(tm, MPR_RECOVERY,
1556 		    "finished recovery after aborting TaskMID %u\n",
1557 		    le16toh(req->TaskMID));
1558 
1559 		targ->tm = NULL;
1560 		mprsas_free_tm(sc, tm);
1561 	}
1562 	else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1563 		/* abort success, but we have more timedout commands to abort */
1564 		mprsas_log_command(tm, MPR_RECOVERY,
1565 		    "continuing recovery after aborting TaskMID %u\n",
1566 		    le16toh(req->TaskMID));
1567 
1568 		mprsas_send_abort(sc, tm, cm);
1569 	}
1570 	else {
1571 		/* we didn't get a command completion, so the abort
1572 		 * failed as far as we're concerned.  escalate.
1573 		 */
1574 		mprsas_log_command(tm, MPR_RECOVERY,
1575 		    "abort failed for TaskMID %u tm %p\n",
1576 		    le16toh(req->TaskMID), tm);
1577 
1578 		mprsas_send_reset(sc, tm,
1579 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1580 	}
1581 }
1582 
1583 #define MPR_ABORT_TIMEOUT 5
1584 
1585 static int
1586 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1587     struct mpr_command *cm)
1588 {
1589 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1590 	struct mprsas_target *targ;
1591 	int err;
1592 
1593 	targ = cm->cm_targ;
1594 	if (targ->handle == 0) {
1595 		mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1596 		    __func__, cm->cm_ccb->ccb_h.target_id);
1597 		return -1;
1598 	}
1599 
1600 	mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1601 	    "Aborting command %p\n", cm);
1602 
1603 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1604 	req->DevHandle = htole16(targ->handle);
1605 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1606 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1607 
1608 	/* XXX Need to handle invalid LUNs */
1609 	MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1610 
1611 	req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1612 
1613 	tm->cm_data = NULL;
1614 	tm->cm_desc.HighPriority.RequestFlags =
1615 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1616 	tm->cm_complete = mprsas_abort_complete;
1617 	tm->cm_complete_data = (void *)tm;
1618 	tm->cm_targ = cm->cm_targ;
1619 	tm->cm_lun = cm->cm_lun;
1620 
1621 	callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1622 	    mprsas_tm_timeout, tm);
1623 
1624 	targ->aborts++;
1625 
1626 	mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1627 	    __func__, targ->tid);
1628 	mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1629 
1630 	err = mpr_map_command(sc, tm);
1631 	if (err)
1632 		mpr_dprint(sc, MPR_RECOVERY,
1633 		    "error %d sending abort for cm %p SMID %u\n",
1634 		    err, cm, req->TaskMID);
1635 	return err;
1636 }
1637 
1638 static void
1639 mprsas_scsiio_timeout(void *data)
1640 {
1641 	struct mpr_softc *sc;
1642 	struct mpr_command *cm;
1643 	struct mprsas_target *targ;
1644 
1645 	cm = (struct mpr_command *)data;
1646 	sc = cm->cm_sc;
1647 
1648 	MPR_FUNCTRACE(sc);
1649 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1650 
1651 	mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1652 
1653 	/*
1654 	 * Run the interrupt handler to make sure it's not pending.  This
1655 	 * isn't perfect because the command could have already completed
1656 	 * and been re-used, though this is unlikely.
1657 	 */
1658 	mpr_intr_locked(sc);
1659 	if (cm->cm_state == MPR_CM_STATE_FREE) {
1660 		mprsas_log_command(cm, MPR_XINFO,
1661 		    "SCSI command %p almost timed out\n", cm);
1662 		return;
1663 	}
1664 
1665 	if (cm->cm_ccb == NULL) {
1666 		mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1667 		return;
1668 	}
1669 
1670 	targ = cm->cm_targ;
1671 	targ->timeouts++;
1672 
1673 	mprsas_log_command(cm, MPR_ERROR, "command timeout %d cm %p target "
1674 	    "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1675 	    targ->handle);
1676 	if (targ->encl_level_valid) {
1677 		mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1678 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1679 		    targ->connector_name);
1680 	}
1681 
1682 	/* XXX first, check the firmware state, to see if it's still
1683 	 * operational.  if not, do a diag reset.
1684 	 */
1685 	mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1686 	cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1687 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1688 
1689 	if (targ->tm != NULL) {
1690 		/* target already in recovery, just queue up another
1691 		 * timedout command to be processed later.
1692 		 */
1693 		mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1694 		    "processing by tm %p\n", cm, targ->tm);
1695 	}
1696 	else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1697 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1698 		    cm, targ->tm);
1699 
1700 		/* start recovery by aborting the first timedout command */
1701 		mprsas_send_abort(sc, targ->tm, cm);
1702 	}
1703 	else {
1704 		/* XXX queue this target up for recovery once a TM becomes
1705 		 * available.  The firmware only has a limited number of
1706 		 * HighPriority credits for the high priority requests used
1707 		 * for task management, and we ran out.
1708 		 *
1709 		 * Isilon: don't worry about this for now, since we have
1710 		 * more credits than disks in an enclosure, and limit
1711 		 * ourselves to one TM per target for recovery.
1712 		 */
1713 		mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
1714 		    "allocate a tm\n", cm);
1715 	}
1716 }
1717 
1718 /**
1719  * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1720  *			     to SCSI Unmap.
1721  * Return 0 - for success,
1722  *	  1 - to immediately return back the command with success status to CAM
1723  *	  negative value - to fallback to firmware path i.e. issue scsi unmap
1724  *			   to FW without any translation.
1725  */
1726 static int
1727 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1728     union ccb *ccb, struct mprsas_target *targ)
1729 {
1730 	Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1731 	struct ccb_scsiio *csio;
1732 	struct unmap_parm_list *plist;
1733 	struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1734 	struct nvme_command *c;
1735 	int i, res;
1736 	uint16_t ndesc, list_len, data_length;
1737 	struct mpr_prp_page *prp_page_info;
1738 	uint64_t nvme_dsm_ranges_dma_handle;
1739 
1740 	csio = &ccb->csio;
1741 #if __FreeBSD_version >= 1100103
1742 	list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1743 #else
1744 	if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1745 		list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1746 		    ccb->csio.cdb_io.cdb_ptr[8]);
1747 	} else {
1748 		list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1749 		    ccb->csio.cdb_io.cdb_bytes[8]);
1750 	}
1751 #endif
1752 	if (!list_len) {
1753 		mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1754 		return -EINVAL;
1755 	}
1756 
1757 	plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1758 	if (!plist) {
1759 		mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1760 		    "save UNMAP data\n");
1761 		return -ENOMEM;
1762 	}
1763 
1764 	/* Copy SCSI unmap data to a local buffer */
1765 	bcopy(csio->data_ptr, plist, csio->dxfer_len);
1766 
1767 	/* return back the unmap command to CAM with success status,
1768 	 * if number of descripts is zero.
1769 	 */
1770 	ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1771 	if (!ndesc) {
1772 		mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1773 		    "UNMAP cmd is Zero\n");
1774 		res = 1;
1775 		goto out;
1776 	}
1777 
1778 	data_length = ndesc * sizeof(struct nvme_dsm_range);
1779 	if (data_length > targ->MDTS) {
1780 		mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1781 		    "Device's MDTS: %d\n", data_length, targ->MDTS);
1782 		res = -EINVAL;
1783 		goto out;
1784 	}
1785 
1786 	prp_page_info = mpr_alloc_prp_page(sc);
1787 	KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1788 	    "UNMAP command.\n", __func__));
1789 
1790 	/*
1791 	 * Insert the allocated PRP page into the command's PRP page list. This
1792 	 * will be freed when the command is freed.
1793 	 */
1794 	TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1795 
1796 	nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1797 	nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1798 
1799 	bzero(nvme_dsm_ranges, data_length);
1800 
1801 	/* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1802 	 * for each descriptors contained in SCSI UNMAP data.
1803 	 */
1804 	for (i = 0; i < ndesc; i++) {
1805 		nvme_dsm_ranges[i].length =
1806 		    htole32(be32toh(plist->desc[i].nlb));
1807 		nvme_dsm_ranges[i].starting_lba =
1808 		    htole64(be64toh(plist->desc[i].slba));
1809 		nvme_dsm_ranges[i].attributes = 0;
1810 	}
1811 
1812 	/* Build MPI2.6's NVMe Encapsulated Request Message */
1813 	req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1814 	bzero(req, sizeof(*req));
1815 	req->DevHandle = htole16(targ->handle);
1816 	req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1817 	req->Flags = MPI26_NVME_FLAGS_WRITE;
1818 	req->ErrorResponseBaseAddress.High =
1819 	    htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1820 	req->ErrorResponseBaseAddress.Low =
1821 	    htole32(cm->cm_sense_busaddr);
1822 	req->ErrorResponseAllocationLength =
1823 	    htole16(sizeof(struct nvme_completion));
1824 	req->EncapsulatedCommandLength =
1825 	    htole16(sizeof(struct nvme_command));
1826 	req->DataLength = htole32(data_length);
1827 
1828 	/* Build NVMe DSM command */
1829 	c = (struct nvme_command *) req->NVMe_Command;
1830 	c->opc = NVME_OPC_DATASET_MANAGEMENT;
1831 	c->nsid = htole32(csio->ccb_h.target_lun + 1);
1832 	c->cdw10 = htole32(ndesc - 1);
1833 	c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1834 
1835 	cm->cm_length = data_length;
1836 	cm->cm_data = NULL;
1837 
1838 	cm->cm_complete = mprsas_scsiio_complete;
1839 	cm->cm_complete_data = ccb;
1840 	cm->cm_targ = targ;
1841 	cm->cm_lun = csio->ccb_h.target_lun;
1842 	cm->cm_ccb = ccb;
1843 
1844 	cm->cm_desc.Default.RequestFlags =
1845 	    MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1846 
1847 #if __FreeBSD_version >= 1000029
1848 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1849 	    mprsas_scsiio_timeout, cm, 0);
1850 #else //__FreeBSD_version < 1000029
1851 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1852 	    mprsas_scsiio_timeout, cm);
1853 #endif //__FreeBSD_version >= 1000029
1854 
1855 	targ->issued++;
1856 	targ->outstanding++;
1857 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1858 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1859 
1860 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1861 	    __func__, cm, ccb, targ->outstanding);
1862 
1863 	mpr_build_nvme_prp(sc, cm, req,
1864 	    (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1865 	mpr_map_command(sc, cm);
1866 
1867 out:
1868 	free(plist, M_MPR);
1869 	return 0;
1870 }
1871 
1872 static void
1873 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1874 {
1875 	MPI2_SCSI_IO_REQUEST *req;
1876 	struct ccb_scsiio *csio;
1877 	struct mpr_softc *sc;
1878 	struct mprsas_target *targ;
1879 	struct mprsas_lun *lun;
1880 	struct mpr_command *cm;
1881 	uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1882 	uint16_t eedp_flags;
1883 	uint32_t mpi_control;
1884 	int rc;
1885 
1886 	sc = sassc->sc;
1887 	MPR_FUNCTRACE(sc);
1888 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
1889 
1890 	csio = &ccb->csio;
1891 	KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1892 	    ("Target %d out of bounds in XPT_SCSI_IO\n",
1893 	     csio->ccb_h.target_id));
1894 	targ = &sassc->targets[csio->ccb_h.target_id];
1895 	mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1896 	if (targ->handle == 0x0) {
1897 		mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1898 		    __func__, csio->ccb_h.target_id);
1899 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1900 		xpt_done(ccb);
1901 		return;
1902 	}
1903 	if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1904 		mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1905 		    "supported %u\n", __func__, csio->ccb_h.target_id);
1906 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1907 		xpt_done(ccb);
1908 		return;
1909 	}
1910 	/*
1911 	 * Sometimes, it is possible to get a command that is not "In
1912 	 * Progress" and was actually aborted by the upper layer.  Check for
1913 	 * this here and complete the command without error.
1914 	 */
1915 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1916 		mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1917 		    "target %u\n", __func__, csio->ccb_h.target_id);
1918 		xpt_done(ccb);
1919 		return;
1920 	}
1921 	/*
1922 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1923 	 * that the volume has timed out.  We want volumes to be enumerated
1924 	 * until they are deleted/removed, not just failed.
1925 	 */
1926 	if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1927 		if (targ->devinfo == 0)
1928 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1929 		else
1930 			mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1931 		xpt_done(ccb);
1932 		return;
1933 	}
1934 
1935 	if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1936 		mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1937 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1938 		xpt_done(ccb);
1939 		return;
1940 	}
1941 
1942 	/*
1943 	 * If target has a reset in progress, freeze the devq and return.  The
1944 	 * devq will be released when the TM reset is finished.
1945 	 */
1946 	if (targ->flags & MPRSAS_TARGET_INRESET) {
1947 		ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1948 		mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1949 		    __func__, targ->tid);
1950 		xpt_freeze_devq(ccb->ccb_h.path, 1);
1951 		xpt_done(ccb);
1952 		return;
1953 	}
1954 
1955 	cm = mpr_alloc_command(sc);
1956 	if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1957 		if (cm != NULL) {
1958 			mpr_free_command(sc, cm);
1959 		}
1960 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1961 			xpt_freeze_simq(sassc->sim, 1);
1962 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
1963 		}
1964 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1965 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1966 		xpt_done(ccb);
1967 		return;
1968 	}
1969 
1970 	/* For NVME device's issue UNMAP command directly to NVME drives by
1971 	 * constructing equivalent native NVMe DataSetManagement command.
1972 	 */
1973 #if __FreeBSD_version >= 1100103
1974 	scsi_opcode = scsiio_cdb_ptr(csio)[0];
1975 #else
1976 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1977 		scsi_opcode = csio->cdb_io.cdb_ptr[0];
1978 	else
1979 		scsi_opcode = csio->cdb_io.cdb_bytes[0];
1980 #endif
1981 	if (scsi_opcode == UNMAP &&
1982 	    targ->is_nvme &&
1983 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1984 		rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1985 		if (rc == 1) { /* return command to CAM with success status */
1986 			mpr_free_command(sc, cm);
1987 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1988 			xpt_done(ccb);
1989 			return;
1990 		} else if (!rc) /* Issued NVMe Encapsulated Request Message */
1991 			return;
1992 	}
1993 
1994 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1995 	bzero(req, sizeof(*req));
1996 	req->DevHandle = htole16(targ->handle);
1997 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1998 	req->MsgFlags = 0;
1999 	req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2000 	req->SenseBufferLength = MPR_SENSE_LEN;
2001 	req->SGLFlags = 0;
2002 	req->ChainOffset = 0;
2003 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
2004 	req->SGLOffset1= 0;
2005 	req->SGLOffset2= 0;
2006 	req->SGLOffset3= 0;
2007 	req->SkipCount = 0;
2008 	req->DataLength = htole32(csio->dxfer_len);
2009 	req->BidirectionalDataLength = 0;
2010 	req->IoFlags = htole16(csio->cdb_len);
2011 	req->EEDPFlags = 0;
2012 
2013 	/* Note: BiDirectional transfers are not supported */
2014 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2015 	case CAM_DIR_IN:
2016 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
2017 		cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2018 		break;
2019 	case CAM_DIR_OUT:
2020 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2021 		cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2022 		break;
2023 	case CAM_DIR_NONE:
2024 	default:
2025 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2026 		break;
2027 	}
2028 
2029 	if (csio->cdb_len == 32)
2030 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2031 	/*
2032 	 * It looks like the hardware doesn't require an explicit tag
2033 	 * number for each transaction.  SAM Task Management not supported
2034 	 * at the moment.
2035 	 */
2036 	switch (csio->tag_action) {
2037 	case MSG_HEAD_OF_Q_TAG:
2038 		mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2039 		break;
2040 	case MSG_ORDERED_Q_TAG:
2041 		mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2042 		break;
2043 	case MSG_ACA_TASK:
2044 		mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2045 		break;
2046 	case CAM_TAG_ACTION_NONE:
2047 	case MSG_SIMPLE_Q_TAG:
2048 	default:
2049 		mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2050 		break;
2051 	}
2052 	mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2053 	req->Control = htole32(mpi_control);
2054 
2055 	if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2056 		mpr_free_command(sc, cm);
2057 		mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2058 		xpt_done(ccb);
2059 		return;
2060 	}
2061 
2062 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2063 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2064 	else {
2065 		KASSERT(csio->cdb_len <= IOCDBLEN,
2066 		    ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2067 		    "is not set", csio->cdb_len));
2068 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2069 	}
2070 	req->IoFlags = htole16(csio->cdb_len);
2071 
2072 	/*
2073 	 * Check if EEDP is supported and enabled.  If it is then check if the
2074 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
2075 	 * is formatted for EEDP support.  If all of this is true, set CDB up
2076 	 * for EEDP transfer.
2077 	 */
2078 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2079 	if (sc->eedp_enabled && eedp_flags) {
2080 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
2081 			if (lun->lun_id == csio->ccb_h.target_lun) {
2082 				break;
2083 			}
2084 		}
2085 
2086 		if ((lun != NULL) && (lun->eedp_formatted)) {
2087 			req->EEDPBlockSize = htole16(lun->eedp_block_size);
2088 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2089 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2090 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2091 			if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2092 				eedp_flags |=
2093 				    MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2094 			}
2095 			req->EEDPFlags = htole16(eedp_flags);
2096 
2097 			/*
2098 			 * If CDB less than 32, fill in Primary Ref Tag with
2099 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
2100 			 * already there.  Also, set protection bit.  FreeBSD
2101 			 * currently does not support CDBs bigger than 16, but
2102 			 * the code doesn't hurt, and will be here for the
2103 			 * future.
2104 			 */
2105 			if (csio->cdb_len != 32) {
2106 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2107 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2108 				    PrimaryReferenceTag;
2109 				for (i = 0; i < 4; i++) {
2110 					*ref_tag_addr =
2111 					    req->CDB.CDB32[lba_byte + i];
2112 					ref_tag_addr++;
2113 				}
2114 				req->CDB.EEDP32.PrimaryReferenceTag =
2115 				    htole32(req->
2116 				    CDB.EEDP32.PrimaryReferenceTag);
2117 				req->CDB.EEDP32.PrimaryApplicationTagMask =
2118 				    0xFFFF;
2119 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2120 				    0x20;
2121 			} else {
2122 				eedp_flags |=
2123 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2124 				req->EEDPFlags = htole16(eedp_flags);
2125 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2126 				    0x1F) | 0x20;
2127 			}
2128 		}
2129 	}
2130 
2131 	cm->cm_length = csio->dxfer_len;
2132 	if (cm->cm_length != 0) {
2133 		cm->cm_data = ccb;
2134 		cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2135 	} else {
2136 		cm->cm_data = NULL;
2137 	}
2138 	cm->cm_sge = &req->SGL;
2139 	cm->cm_sglsize = (32 - 24) * 4;
2140 	cm->cm_complete = mprsas_scsiio_complete;
2141 	cm->cm_complete_data = ccb;
2142 	cm->cm_targ = targ;
2143 	cm->cm_lun = csio->ccb_h.target_lun;
2144 	cm->cm_ccb = ccb;
2145 	/*
2146 	 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2147 	 * and set descriptor type.
2148 	 */
2149 	if (targ->scsi_req_desc_type ==
2150 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2151 		req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2152 		cm->cm_desc.FastPathSCSIIO.RequestFlags =
2153 		    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2154 		if (!sc->atomic_desc_capable) {
2155 			cm->cm_desc.FastPathSCSIIO.DevHandle =
2156 			    htole16(targ->handle);
2157 		}
2158 	} else {
2159 		cm->cm_desc.SCSIIO.RequestFlags =
2160 		    MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2161 		if (!sc->atomic_desc_capable)
2162 			cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2163 	}
2164 
2165 #if __FreeBSD_version >= 1000029
2166 	callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2167 	    mprsas_scsiio_timeout, cm, 0);
2168 #else //__FreeBSD_version < 1000029
2169 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2170 	    mprsas_scsiio_timeout, cm);
2171 #endif //__FreeBSD_version >= 1000029
2172 
2173 	targ->issued++;
2174 	targ->outstanding++;
2175 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2176 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2177 
2178 	mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2179 	    __func__, cm, ccb, targ->outstanding);
2180 
2181 	mpr_map_command(sc, cm);
2182 	return;
2183 }
2184 
2185 static void
2186 mpr_response_code(struct mpr_softc *sc, u8 response_code)
2187 {
2188         char *desc;
2189 
2190         switch (response_code) {
2191         case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2192                 desc = "task management request completed";
2193                 break;
2194         case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2195                 desc = "invalid frame";
2196                 break;
2197         case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2198                 desc = "task management request not supported";
2199                 break;
2200         case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2201                 desc = "task management request failed";
2202                 break;
2203         case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2204                 desc = "task management request succeeded";
2205                 break;
2206         case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2207                 desc = "invalid lun";
2208                 break;
2209         case 0xA:
2210                 desc = "overlapped tag attempted";
2211                 break;
2212         case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2213                 desc = "task queued, however not sent to target";
2214                 break;
2215         default:
2216                 desc = "unknown";
2217                 break;
2218         }
2219 	mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
2220 	    desc);
2221 }
2222 
2223 /**
2224  * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2225  */
2226 static void
2227 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2228     Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2229 {
2230 	u32 response_info;
2231 	u8 *response_bytes;
2232 	u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2233 	    MPI2_IOCSTATUS_MASK;
2234 	u8 scsi_state = mpi_reply->SCSIState;
2235 	u8 scsi_status = mpi_reply->SCSIStatus;
2236 	char *desc_ioc_state = NULL;
2237 	char *desc_scsi_status = NULL;
2238 	char *desc_scsi_state = sc->tmp_string;
2239 	u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2240 
2241 	if (log_info == 0x31170000)
2242 		return;
2243 
2244 	switch (ioc_status) {
2245 	case MPI2_IOCSTATUS_SUCCESS:
2246 		desc_ioc_state = "success";
2247 		break;
2248 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2249 		desc_ioc_state = "invalid function";
2250 		break;
2251 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2252 		desc_ioc_state = "scsi recovered error";
2253 		break;
2254 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2255 		desc_ioc_state = "scsi invalid dev handle";
2256 		break;
2257 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2258 		desc_ioc_state = "scsi device not there";
2259 		break;
2260 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2261 		desc_ioc_state = "scsi data overrun";
2262 		break;
2263 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2264 		desc_ioc_state = "scsi data underrun";
2265 		break;
2266 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2267 		desc_ioc_state = "scsi io data error";
2268 		break;
2269 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2270 		desc_ioc_state = "scsi protocol error";
2271 		break;
2272 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2273 		desc_ioc_state = "scsi task terminated";
2274 		break;
2275 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2276 		desc_ioc_state = "scsi residual mismatch";
2277 		break;
2278 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2279 		desc_ioc_state = "scsi task mgmt failed";
2280 		break;
2281 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2282 		desc_ioc_state = "scsi ioc terminated";
2283 		break;
2284 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2285 		desc_ioc_state = "scsi ext terminated";
2286 		break;
2287 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2288 		desc_ioc_state = "eedp guard error";
2289 		break;
2290 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2291 		desc_ioc_state = "eedp ref tag error";
2292 		break;
2293 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2294 		desc_ioc_state = "eedp app tag error";
2295 		break;
2296 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
2297 		desc_ioc_state = "insufficient power";
2298 		break;
2299 	default:
2300 		desc_ioc_state = "unknown";
2301 		break;
2302 	}
2303 
2304 	switch (scsi_status) {
2305 	case MPI2_SCSI_STATUS_GOOD:
2306 		desc_scsi_status = "good";
2307 		break;
2308 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
2309 		desc_scsi_status = "check condition";
2310 		break;
2311 	case MPI2_SCSI_STATUS_CONDITION_MET:
2312 		desc_scsi_status = "condition met";
2313 		break;
2314 	case MPI2_SCSI_STATUS_BUSY:
2315 		desc_scsi_status = "busy";
2316 		break;
2317 	case MPI2_SCSI_STATUS_INTERMEDIATE:
2318 		desc_scsi_status = "intermediate";
2319 		break;
2320 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2321 		desc_scsi_status = "intermediate condmet";
2322 		break;
2323 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2324 		desc_scsi_status = "reservation conflict";
2325 		break;
2326 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2327 		desc_scsi_status = "command terminated";
2328 		break;
2329 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
2330 		desc_scsi_status = "task set full";
2331 		break;
2332 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
2333 		desc_scsi_status = "aca active";
2334 		break;
2335 	case MPI2_SCSI_STATUS_TASK_ABORTED:
2336 		desc_scsi_status = "task aborted";
2337 		break;
2338 	default:
2339 		desc_scsi_status = "unknown";
2340 		break;
2341 	}
2342 
2343 	desc_scsi_state[0] = '\0';
2344 	if (!scsi_state)
2345 		desc_scsi_state = " ";
2346 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2347 		strcat(desc_scsi_state, "response info ");
2348 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2349 		strcat(desc_scsi_state, "state terminated ");
2350 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2351 		strcat(desc_scsi_state, "no status ");
2352 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2353 		strcat(desc_scsi_state, "autosense failed ");
2354 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2355 		strcat(desc_scsi_state, "autosense valid ");
2356 
2357 	mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2358 	    le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2359 	if (targ->encl_level_valid) {
2360 		mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2361 		    "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2362 		    targ->connector_name);
2363 	}
2364 	/* We can add more detail about underflow data here
2365 	 * TO-DO
2366 	 * */
2367 	mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2368 	    "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2369 	    desc_scsi_state, scsi_state);
2370 
2371 	if (sc->mpr_debug & MPR_XINFO &&
2372 	    scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2373 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2374 		scsi_sense_print(csio);
2375 		mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2376 	}
2377 
2378 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2379 		response_info = le32toh(mpi_reply->ResponseInfo);
2380 		response_bytes = (u8 *)&response_info;
2381 		mpr_response_code(sc,response_bytes[0]);
2382 	}
2383 }
2384 
2385 /** mprsas_nvme_trans_status_code
2386  *
2387  * Convert Native NVMe command error status to
2388  * equivalent SCSI error status.
2389  *
2390  * Returns appropriate scsi_status
2391  */
2392 static u8
2393 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2394     struct mpr_command *cm)
2395 {
2396 	u8 status = MPI2_SCSI_STATUS_GOOD;
2397 	int skey, asc, ascq;
2398 	union ccb *ccb = cm->cm_complete_data;
2399 	int returned_sense_len;
2400 
2401 	status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2402 	skey = SSD_KEY_ILLEGAL_REQUEST;
2403 	asc = SCSI_ASC_NO_SENSE;
2404 	ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2405 
2406 	switch (nvme_status.sct) {
2407 	case NVME_SCT_GENERIC:
2408 		switch (nvme_status.sc) {
2409 		case NVME_SC_SUCCESS:
2410 			status = MPI2_SCSI_STATUS_GOOD;
2411 			skey = SSD_KEY_NO_SENSE;
2412 			asc = SCSI_ASC_NO_SENSE;
2413 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2414 			break;
2415 		case NVME_SC_INVALID_OPCODE:
2416 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2417 			skey = SSD_KEY_ILLEGAL_REQUEST;
2418 			asc = SCSI_ASC_ILLEGAL_COMMAND;
2419 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2420 			break;
2421 		case NVME_SC_INVALID_FIELD:
2422 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2423 			skey = SSD_KEY_ILLEGAL_REQUEST;
2424 			asc = SCSI_ASC_INVALID_CDB;
2425 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2426 			break;
2427 		case NVME_SC_DATA_TRANSFER_ERROR:
2428 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2429 			skey = SSD_KEY_MEDIUM_ERROR;
2430 			asc = SCSI_ASC_NO_SENSE;
2431 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2432 			break;
2433 		case NVME_SC_ABORTED_POWER_LOSS:
2434 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2435 			skey = SSD_KEY_ABORTED_COMMAND;
2436 			asc = SCSI_ASC_WARNING;
2437 			ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2438 			break;
2439 		case NVME_SC_INTERNAL_DEVICE_ERROR:
2440 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2441 			skey = SSD_KEY_HARDWARE_ERROR;
2442 			asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2443 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2444 			break;
2445 		case NVME_SC_ABORTED_BY_REQUEST:
2446 		case NVME_SC_ABORTED_SQ_DELETION:
2447 		case NVME_SC_ABORTED_FAILED_FUSED:
2448 		case NVME_SC_ABORTED_MISSING_FUSED:
2449 			status = MPI2_SCSI_STATUS_TASK_ABORTED;
2450 			skey = SSD_KEY_ABORTED_COMMAND;
2451 			asc = SCSI_ASC_NO_SENSE;
2452 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2453 			break;
2454 		case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2455 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2456 			skey = SSD_KEY_ILLEGAL_REQUEST;
2457 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2458 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2459 			break;
2460 		case NVME_SC_LBA_OUT_OF_RANGE:
2461 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2462 			skey = SSD_KEY_ILLEGAL_REQUEST;
2463 			asc = SCSI_ASC_ILLEGAL_BLOCK;
2464 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2465 			break;
2466 		case NVME_SC_CAPACITY_EXCEEDED:
2467 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2468 			skey = SSD_KEY_MEDIUM_ERROR;
2469 			asc = SCSI_ASC_NO_SENSE;
2470 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2471 			break;
2472 		case NVME_SC_NAMESPACE_NOT_READY:
2473 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2474 			skey = SSD_KEY_NOT_READY;
2475 			asc = SCSI_ASC_LUN_NOT_READY;
2476 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2477 			break;
2478 		}
2479 		break;
2480 	case NVME_SCT_COMMAND_SPECIFIC:
2481 		switch (nvme_status.sc) {
2482 		case NVME_SC_INVALID_FORMAT:
2483 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2484 			skey = SSD_KEY_ILLEGAL_REQUEST;
2485 			asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2486 			ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2487 			break;
2488 		case NVME_SC_CONFLICTING_ATTRIBUTES:
2489 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2490 			skey = SSD_KEY_ILLEGAL_REQUEST;
2491 			asc = SCSI_ASC_INVALID_CDB;
2492 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2493 			break;
2494 		}
2495 		break;
2496 	case NVME_SCT_MEDIA_ERROR:
2497 		switch (nvme_status.sc) {
2498 		case NVME_SC_WRITE_FAULTS:
2499 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2500 			skey = SSD_KEY_MEDIUM_ERROR;
2501 			asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2502 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2503 			break;
2504 		case NVME_SC_UNRECOVERED_READ_ERROR:
2505 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2506 			skey = SSD_KEY_MEDIUM_ERROR;
2507 			asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2508 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2509 			break;
2510 		case NVME_SC_GUARD_CHECK_ERROR:
2511 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2512 			skey = SSD_KEY_MEDIUM_ERROR;
2513 			asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2514 			ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2515 			break;
2516 		case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2517 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2518 			skey = SSD_KEY_MEDIUM_ERROR;
2519 			asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2520 			ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2521 			break;
2522 		case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2523 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2524 			skey = SSD_KEY_MEDIUM_ERROR;
2525 			asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2526 			ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2527 			break;
2528 		case NVME_SC_COMPARE_FAILURE:
2529 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2530 			skey = SSD_KEY_MISCOMPARE;
2531 			asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2532 			ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2533 			break;
2534 		case NVME_SC_ACCESS_DENIED:
2535 			status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2536 			skey = SSD_KEY_ILLEGAL_REQUEST;
2537 			asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2538 			ascq = SCSI_ASCQ_INVALID_LUN_ID;
2539 			break;
2540 		}
2541 		break;
2542 	}
2543 
2544 	returned_sense_len = sizeof(struct scsi_sense_data);
2545 	if (returned_sense_len < ccb->csio.sense_len)
2546 		ccb->csio.sense_resid = ccb->csio.sense_len -
2547 		    returned_sense_len;
2548 	else
2549 		ccb->csio.sense_resid = 0;
2550 
2551 	scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2552 	    1, skey, asc, ascq, SSD_ELEM_NONE);
2553 	ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2554 
2555 	return status;
2556 }
2557 
2558 /** mprsas_complete_nvme_unmap
2559  *
2560  * Complete native NVMe command issued using NVMe Encapsulated
2561  * Request Message.
2562  */
2563 static u8
2564 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2565 {
2566 	Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2567 	struct nvme_completion *nvme_completion = NULL;
2568 	u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2569 
2570 	mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2571 	if (le16toh(mpi_reply->ErrorResponseCount)){
2572 		nvme_completion = (struct nvme_completion *)cm->cm_sense;
2573 		scsi_status = mprsas_nvme_trans_status_code(
2574 		    nvme_completion->status, cm);
2575 	}
2576 	return scsi_status;
2577 }
2578 
2579 static void
2580 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2581 {
2582 	MPI2_SCSI_IO_REPLY *rep;
2583 	union ccb *ccb;
2584 	struct ccb_scsiio *csio;
2585 	struct mprsas_softc *sassc;
2586 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
2587 	u8 *TLR_bits, TLR_on, *scsi_cdb;
2588 	int dir = 0, i;
2589 	u16 alloc_len;
2590 	struct mprsas_target *target;
2591 	target_id_t target_id;
2592 
2593 	MPR_FUNCTRACE(sc);
2594 	mpr_dprint(sc, MPR_TRACE,
2595 	    "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2596 	    cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2597 	    cm->cm_targ->outstanding);
2598 
2599 	callout_stop(&cm->cm_callout);
2600 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
2601 
2602 	sassc = sc->sassc;
2603 	ccb = cm->cm_complete_data;
2604 	csio = &ccb->csio;
2605 	target_id = csio->ccb_h.target_id;
2606 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2607 	/*
2608 	 * XXX KDM if the chain allocation fails, does it matter if we do
2609 	 * the sync and unload here?  It is simpler to do it in every case,
2610 	 * assuming it doesn't cause problems.
2611 	 */
2612 	if (cm->cm_data != NULL) {
2613 		if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2614 			dir = BUS_DMASYNC_POSTREAD;
2615 		else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2616 			dir = BUS_DMASYNC_POSTWRITE;
2617 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2618 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2619 	}
2620 
2621 	cm->cm_targ->completed++;
2622 	cm->cm_targ->outstanding--;
2623 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2624 	ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2625 
2626 	if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2627 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2628 		if (cm->cm_reply != NULL)
2629 			mprsas_log_command(cm, MPR_RECOVERY,
2630 			    "completed timedout cm %p ccb %p during recovery "
2631 			    "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2632 			    le16toh(rep->IOCStatus), rep->SCSIStatus,
2633 			    rep->SCSIState, le32toh(rep->TransferCount));
2634 		else
2635 			mprsas_log_command(cm, MPR_RECOVERY,
2636 			    "completed timedout cm %p ccb %p during recovery\n",
2637 			    cm, cm->cm_ccb);
2638 	} else if (cm->cm_targ->tm != NULL) {
2639 		if (cm->cm_reply != NULL)
2640 			mprsas_log_command(cm, MPR_RECOVERY,
2641 			    "completed cm %p ccb %p during recovery "
2642 			    "ioc %x scsi %x state %x xfer %u\n",
2643 			    cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2644 			    rep->SCSIStatus, rep->SCSIState,
2645 			    le32toh(rep->TransferCount));
2646 		else
2647 			mprsas_log_command(cm, MPR_RECOVERY,
2648 			    "completed cm %p ccb %p during recovery\n",
2649 			    cm, cm->cm_ccb);
2650 	} else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2651 		mprsas_log_command(cm, MPR_RECOVERY,
2652 		    "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2653 	}
2654 
2655 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2656 		/*
2657 		 * We ran into an error after we tried to map the command,
2658 		 * so we're getting a callback without queueing the command
2659 		 * to the hardware.  So we set the status here, and it will
2660 		 * be retained below.  We'll go through the "fast path",
2661 		 * because there can be no reply when we haven't actually
2662 		 * gone out to the hardware.
2663 		 */
2664 		mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2665 
2666 		/*
2667 		 * Currently the only error included in the mask is
2668 		 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2669 		 * chain frames.  We need to freeze the queue until we get
2670 		 * a command that completed without this error, which will
2671 		 * hopefully have some chain frames attached that we can
2672 		 * use.  If we wanted to get smarter about it, we would
2673 		 * only unfreeze the queue in this condition when we're
2674 		 * sure that we're getting some chain frames back.  That's
2675 		 * probably unnecessary.
2676 		 */
2677 		if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2678 			xpt_freeze_simq(sassc->sim, 1);
2679 			sassc->flags |= MPRSAS_QUEUE_FROZEN;
2680 			mpr_dprint(sc, MPR_INFO, "Error sending command, "
2681 			    "freezing SIM queue\n");
2682 		}
2683 	}
2684 
2685 	/*
2686 	 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2687 	 * flag, and use it in a few places in the rest of this function for
2688 	 * convenience. Use the macro if available.
2689 	 */
2690 #if __FreeBSD_version >= 1100103
2691 	scsi_cdb = scsiio_cdb_ptr(csio);
2692 #else
2693 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
2694 		scsi_cdb = csio->cdb_io.cdb_ptr;
2695 	else
2696 		scsi_cdb = csio->cdb_io.cdb_bytes;
2697 #endif
2698 
2699 	/*
2700 	 * If this is a Start Stop Unit command and it was issued by the driver
2701 	 * during shutdown, decrement the refcount to account for all of the
2702 	 * commands that were sent.  All SSU commands should be completed before
2703 	 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2704 	 * is TRUE.
2705 	 */
2706 	if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2707 		mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2708 		sc->SSU_refcount--;
2709 	}
2710 
2711 	/* Take the fast path to completion */
2712 	if (cm->cm_reply == NULL) {
2713 		if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2714 			if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2715 				mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2716 			else {
2717 				mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2718 				csio->scsi_status = SCSI_STATUS_OK;
2719 			}
2720 			if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2721 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2722 				sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2723 				mpr_dprint(sc, MPR_XINFO,
2724 				    "Unfreezing SIM queue\n");
2725 			}
2726 		}
2727 
2728 		/*
2729 		 * There are two scenarios where the status won't be
2730 		 * CAM_REQ_CMP.  The first is if MPR_CM_FLAGS_ERROR_MASK is
2731 		 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2732 		 */
2733 		if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2734 			/*
2735 			 * Freeze the dev queue so that commands are
2736 			 * executed in the correct order after error
2737 			 * recovery.
2738 			 */
2739 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
2740 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2741 		}
2742 		mpr_free_command(sc, cm);
2743 		xpt_done(ccb);
2744 		return;
2745 	}
2746 
2747 	target = &sassc->targets[target_id];
2748 	if (scsi_cdb[0] == UNMAP &&
2749 	    target->is_nvme &&
2750 	    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2751 		rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2752 		csio->scsi_status = rep->SCSIStatus;
2753 	}
2754 
2755 	mprsas_log_command(cm, MPR_XINFO,
2756 	    "ioc %x scsi %x state %x xfer %u\n",
2757 	    le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2758 	    le32toh(rep->TransferCount));
2759 
2760 	switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2761 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2762 		csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2763 		/* FALLTHROUGH */
2764 	case MPI2_IOCSTATUS_SUCCESS:
2765 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2766 		if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2767 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2768 			mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2769 
2770 		/* Completion failed at the transport level. */
2771 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2772 		    MPI2_SCSI_STATE_TERMINATED)) {
2773 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2774 			break;
2775 		}
2776 
2777 		/* In a modern packetized environment, an autosense failure
2778 		 * implies that there's not much else that can be done to
2779 		 * recover the command.
2780 		 */
2781 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2782 			mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2783 			break;
2784 		}
2785 
2786 		/*
2787 		 * CAM doesn't care about SAS Response Info data, but if this is
2788 		 * the state check if TLR should be done.  If not, clear the
2789 		 * TLR_bits for the target.
2790 		 */
2791 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2792 		    ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2793 		    == MPR_SCSI_RI_INVALID_FRAME)) {
2794 			sc->mapping_table[target_id].TLR_bits =
2795 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2796 		}
2797 
2798 		/*
2799 		 * Intentionally override the normal SCSI status reporting
2800 		 * for these two cases.  These are likely to happen in a
2801 		 * multi-initiator environment, and we want to make sure that
2802 		 * CAM retries these commands rather than fail them.
2803 		 */
2804 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2805 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2806 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2807 			break;
2808 		}
2809 
2810 		/* Handle normal status and sense */
2811 		csio->scsi_status = rep->SCSIStatus;
2812 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2813 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2814 		else
2815 			mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2816 
2817 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2818 			int sense_len, returned_sense_len;
2819 
2820 			returned_sense_len = min(le32toh(rep->SenseCount),
2821 			    sizeof(struct scsi_sense_data));
2822 			if (returned_sense_len < csio->sense_len)
2823 				csio->sense_resid = csio->sense_len -
2824 				    returned_sense_len;
2825 			else
2826 				csio->sense_resid = 0;
2827 
2828 			sense_len = min(returned_sense_len,
2829 			    csio->sense_len - csio->sense_resid);
2830 			bzero(&csio->sense_data, sizeof(csio->sense_data));
2831 			bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2832 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2833 		}
2834 
2835 		/*
2836 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2837 		 * and it's page code 0 (Supported Page List), and there is
2838 		 * inquiry data, and this is for a sequential access device, and
2839 		 * the device is an SSP target, and TLR is supported by the
2840 		 * controller, turn the TLR_bits value ON if page 0x90 is
2841 		 * supported.
2842 		 */
2843 		if ((scsi_cdb[0] == INQUIRY) &&
2844 		    (scsi_cdb[1] & SI_EVPD) &&
2845 		    (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2846 		    ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2847 		    (csio->data_ptr != NULL) &&
2848 		    ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2849 		    (sc->control_TLR) &&
2850 		    (sc->mapping_table[target_id].device_info &
2851 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2852 			vpd_list = (struct scsi_vpd_supported_page_list *)
2853 			    csio->data_ptr;
2854 			TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2855 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2856 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2857 			alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2858 			alloc_len -= csio->resid;
2859 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2860 				if (vpd_list->list[i] == 0x90) {
2861 					*TLR_bits = TLR_on;
2862 					break;
2863 				}
2864 			}
2865 		}
2866 
2867 		/*
2868 		 * If this is a SATA direct-access end device, mark it so that
2869 		 * a SCSI StartStopUnit command will be sent to it when the
2870 		 * driver is being shutdown.
2871 		 */
2872 		if ((scsi_cdb[0] == INQUIRY) &&
2873 		    (csio->data_ptr != NULL) &&
2874 		    ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2875 		    (sc->mapping_table[target_id].device_info &
2876 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2877 		    ((sc->mapping_table[target_id].device_info &
2878 		    MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2879 		    MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2880 			target = &sassc->targets[target_id];
2881 			target->supports_SSU = TRUE;
2882 			mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2883 			    target_id);
2884 		}
2885 		break;
2886 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2887 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2888 		/*
2889 		 * If devinfo is 0 this will be a volume.  In that case don't
2890 		 * tell CAM that the volume is not there.  We want volumes to
2891 		 * be enumerated until they are deleted/removed, not just
2892 		 * failed.
2893 		 */
2894 		if (cm->cm_targ->devinfo == 0)
2895 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2896 		else
2897 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2898 		break;
2899 	case MPI2_IOCSTATUS_INVALID_SGL:
2900 		mpr_print_scsiio_cmd(sc, cm);
2901 		mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2902 		break;
2903 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2904 		/*
2905 		 * This is one of the responses that comes back when an I/O
2906 		 * has been aborted.  If it is because of a timeout that we
2907 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2908 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2909 		 * command is the same (it gets retried, subject to the
2910 		 * retry counter), the only difference is what gets printed
2911 		 * on the console.
2912 		 */
2913 		if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2914 			mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2915 		else
2916 			mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2917 		break;
2918 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2919 		/* resid is ignored for this condition */
2920 		csio->resid = 0;
2921 		mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2922 		break;
2923 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2924 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2925 		/*
2926 		 * These can sometimes be transient transport-related
2927 		 * errors, and sometimes persistent drive-related errors.
2928 		 * We used to retry these without decrementing the retry
2929 		 * count by returning CAM_REQUEUE_REQ.  Unfortunately, if
2930 		 * we hit a persistent drive problem that returns one of
2931 		 * these error codes, we would retry indefinitely.  So,
2932 		 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2933 		 * count and avoid infinite retries.  We're taking the
2934 		 * potential risk of flagging false failures in the event
2935 		 * of a topology-related error (e.g. a SAS expander problem
2936 		 * causes a command addressed to a drive to fail), but
2937 		 * avoiding getting into an infinite retry loop.
2938 		 */
2939 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2940 		mprsas_log_command(cm, MPR_INFO,
2941 		    "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2942 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2943 		    rep->SCSIStatus, rep->SCSIState,
2944 		    le32toh(rep->TransferCount));
2945 		break;
2946 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2947 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2948 	case MPI2_IOCSTATUS_INVALID_VPID:
2949 	case MPI2_IOCSTATUS_INVALID_FIELD:
2950 	case MPI2_IOCSTATUS_INVALID_STATE:
2951 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2952 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2953 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2954 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2955 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2956 	default:
2957 		mprsas_log_command(cm, MPR_XINFO,
2958 		    "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2959 		    le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2960 		    rep->SCSIStatus, rep->SCSIState,
2961 		    le32toh(rep->TransferCount));
2962 		csio->resid = cm->cm_length;
2963 
2964 		if (scsi_cdb[0] == UNMAP &&
2965 		    target->is_nvme &&
2966 		    (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2967 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2968 		else
2969 			mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2970 
2971 		break;
2972 	}
2973 
2974 	mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2975 
2976 	if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2977 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2978 		sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2979 		mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2980 		    "queue\n");
2981 	}
2982 
2983 	if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2984 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2985 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2986 	}
2987 
2988 	mpr_free_command(sc, cm);
2989 	xpt_done(ccb);
2990 }
2991 
2992 #if __FreeBSD_version >= 900026
2993 static void
2994 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2995 {
2996 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2997 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2998 	uint64_t sasaddr;
2999 	union ccb *ccb;
3000 
3001 	ccb = cm->cm_complete_data;
3002 
3003 	/*
3004 	 * Currently there should be no way we can hit this case.  It only
3005 	 * happens when we have a failure to allocate chain frames, and SMP
3006 	 * commands require two S/G elements only.  That should be handled
3007 	 * in the standard request size.
3008 	 */
3009 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3010 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
3011 		    "request!\n", __func__, cm->cm_flags);
3012 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3013 		goto bailout;
3014         }
3015 
3016 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
3017 	if (rpl == NULL) {
3018 		mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
3019 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3020 		goto bailout;
3021 	}
3022 
3023 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3024 	sasaddr = le32toh(req->SASAddress.Low);
3025 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
3026 
3027 	if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
3028 	    MPI2_IOCSTATUS_SUCCESS ||
3029 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
3030 		mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
3031 		    __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
3032 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3033 		goto bailout;
3034 	}
3035 
3036 	mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
3037 	    "completed successfully\n", __func__, (uintmax_t)sasaddr);
3038 
3039 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
3040 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3041 	else
3042 		mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
3043 
3044 bailout:
3045 	/*
3046 	 * We sync in both directions because we had DMAs in the S/G list
3047 	 * in both directions.
3048 	 */
3049 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3050 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3051 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3052 	mpr_free_command(sc, cm);
3053 	xpt_done(ccb);
3054 }
3055 
3056 static void
3057 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
3058 {
3059 	struct mpr_command *cm;
3060 	uint8_t *request, *response;
3061 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
3062 	struct mpr_softc *sc;
3063 	struct sglist *sg;
3064 	int error;
3065 
3066 	sc = sassc->sc;
3067 	sg = NULL;
3068 	error = 0;
3069 
3070 #if (__FreeBSD_version >= 1000028) || \
3071     ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
3072 	switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
3073 	case CAM_DATA_PADDR:
3074 	case CAM_DATA_SG_PADDR:
3075 		/*
3076 		 * XXX We don't yet support physical addresses here.
3077 		 */
3078 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3079 		    "supported\n", __func__);
3080 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3081 		xpt_done(ccb);
3082 		return;
3083 	case CAM_DATA_SG:
3084 		/*
3085 		 * The chip does not support more than one buffer for the
3086 		 * request or response.
3087 		 */
3088 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
3089 		    || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3090 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3091 			    "response buffer segments not supported for SMP\n",
3092 			    __func__);
3093 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3094 			xpt_done(ccb);
3095 			return;
3096 		}
3097 
3098 		/*
3099 		 * The CAM_SCATTER_VALID flag was originally implemented
3100 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3101 		 * We have two.  So, just take that flag to mean that we
3102 		 * might have S/G lists, and look at the S/G segment count
3103 		 * to figure out whether that is the case for each individual
3104 		 * buffer.
3105 		 */
3106 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3107 			bus_dma_segment_t *req_sg;
3108 
3109 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3110 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3111 		} else
3112 			request = ccb->smpio.smp_request;
3113 
3114 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3115 			bus_dma_segment_t *rsp_sg;
3116 
3117 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3118 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3119 		} else
3120 			response = ccb->smpio.smp_response;
3121 		break;
3122 	case CAM_DATA_VADDR:
3123 		request = ccb->smpio.smp_request;
3124 		response = ccb->smpio.smp_response;
3125 		break;
3126 	default:
3127 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3128 		xpt_done(ccb);
3129 		return;
3130 	}
3131 #else /* __FreeBSD_version < 1000028 */
3132 	/*
3133 	 * XXX We don't yet support physical addresses here.
3134 	 */
3135 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3136 		mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3137 		    "supported\n", __func__);
3138 		mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3139 		xpt_done(ccb);
3140 		return;
3141 	}
3142 
3143 	/*
3144 	 * If the user wants to send an S/G list, check to make sure they
3145 	 * have single buffers.
3146 	 */
3147 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3148 		/*
3149 		 * The chip does not support more than one buffer for the
3150 		 * request or response.
3151 		 */
3152 	 	if ((ccb->smpio.smp_request_sglist_cnt > 1)
3153 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3154 			mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3155 			    "response buffer segments not supported for SMP\n",
3156 			    __func__);
3157 			mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3158 			xpt_done(ccb);
3159 			return;
3160 		}
3161 
3162 		/*
3163 		 * The CAM_SCATTER_VALID flag was originally implemented
3164 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3165 		 * We have two.  So, just take that flag to mean that we
3166 		 * might have S/G lists, and look at the S/G segment count
3167 		 * to figure out whether that is the case for each individual
3168 		 * buffer.
3169 		 */
3170 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
3171 			bus_dma_segment_t *req_sg;
3172 
3173 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3174 			request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3175 		} else
3176 			request = ccb->smpio.smp_request;
3177 
3178 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
3179 			bus_dma_segment_t *rsp_sg;
3180 
3181 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3182 			response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3183 		} else
3184 			response = ccb->smpio.smp_response;
3185 	} else {
3186 		request = ccb->smpio.smp_request;
3187 		response = ccb->smpio.smp_response;
3188 	}
3189 #endif /* __FreeBSD_version < 1000028 */
3190 
3191 	cm = mpr_alloc_command(sc);
3192 	if (cm == NULL) {
3193 		mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3194 		    __func__);
3195 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3196 		xpt_done(ccb);
3197 		return;
3198 	}
3199 
3200 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3201 	bzero(req, sizeof(*req));
3202 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3203 
3204 	/* Allow the chip to use any route to this SAS address. */
3205 	req->PhysicalPort = 0xff;
3206 
3207 	req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3208 	req->SGLFlags =
3209 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3210 
3211 	mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3212 	    "%#jx\n", __func__, (uintmax_t)sasaddr);
3213 
3214 	mpr_init_sge(cm, req, &req->SGL);
3215 
3216 	/*
3217 	 * Set up a uio to pass into mpr_map_command().  This allows us to
3218 	 * do one map command, and one busdma call in there.
3219 	 */
3220 	cm->cm_uio.uio_iov = cm->cm_iovec;
3221 	cm->cm_uio.uio_iovcnt = 2;
3222 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3223 
3224 	/*
3225 	 * The read/write flag isn't used by busdma, but set it just in
3226 	 * case.  This isn't exactly accurate, either, since we're going in
3227 	 * both directions.
3228 	 */
3229 	cm->cm_uio.uio_rw = UIO_WRITE;
3230 
3231 	cm->cm_iovec[0].iov_base = request;
3232 	cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3233 	cm->cm_iovec[1].iov_base = response;
3234 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3235 
3236 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3237 			       cm->cm_iovec[1].iov_len;
3238 
3239 	/*
3240 	 * Trigger a warning message in mpr_data_cb() for the user if we
3241 	 * wind up exceeding two S/G segments.  The chip expects one
3242 	 * segment for the request and another for the response.
3243 	 */
3244 	cm->cm_max_segs = 2;
3245 
3246 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3247 	cm->cm_complete = mprsas_smpio_complete;
3248 	cm->cm_complete_data = ccb;
3249 
3250 	/*
3251 	 * Tell the mapping code that we're using a uio, and that this is
3252 	 * an SMP passthrough request.  There is a little special-case
3253 	 * logic there (in mpr_data_cb()) to handle the bidirectional
3254 	 * transfer.
3255 	 */
3256 	cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3257 			MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3258 
3259 	/* The chip data format is little endian. */
3260 	req->SASAddress.High = htole32(sasaddr >> 32);
3261 	req->SASAddress.Low = htole32(sasaddr);
3262 
3263 	/*
3264 	 * XXX Note that we don't have a timeout/abort mechanism here.
3265 	 * From the manual, it looks like task management requests only
3266 	 * work for SCSI IO and SATA passthrough requests.  We may need to
3267 	 * have a mechanism to retry requests in the event of a chip reset
3268 	 * at least.  Hopefully the chip will insure that any errors short
3269 	 * of that are relayed back to the driver.
3270 	 */
3271 	error = mpr_map_command(sc, cm);
3272 	if ((error != 0) && (error != EINPROGRESS)) {
3273 		mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3274 		    "mpr_map_command()\n", __func__, error);
3275 		goto bailout_error;
3276 	}
3277 
3278 	return;
3279 
3280 bailout_error:
3281 	mpr_free_command(sc, cm);
3282 	mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3283 	xpt_done(ccb);
3284 	return;
3285 }
3286 
3287 static void
3288 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3289 {
3290 	struct mpr_softc *sc;
3291 	struct mprsas_target *targ;
3292 	uint64_t sasaddr = 0;
3293 
3294 	sc = sassc->sc;
3295 
3296 	/*
3297 	 * Make sure the target exists.
3298 	 */
3299 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3300 	    ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3301 	targ = &sassc->targets[ccb->ccb_h.target_id];
3302 	if (targ->handle == 0x0) {
3303 		mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3304 		    __func__, ccb->ccb_h.target_id);
3305 		mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3306 		xpt_done(ccb);
3307 		return;
3308 	}
3309 
3310 	/*
3311 	 * If this device has an embedded SMP target, we'll talk to it
3312 	 * directly.
3313 	 * figure out what the expander's address is.
3314 	 */
3315 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3316 		sasaddr = targ->sasaddr;
3317 
3318 	/*
3319 	 * If we don't have a SAS address for the expander yet, try
3320 	 * grabbing it from the page 0x83 information cached in the
3321 	 * transport layer for this target.  LSI expanders report the
3322 	 * expander SAS address as the port-associated SAS address in
3323 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
3324 	 * 0x83.
3325 	 *
3326 	 * XXX KDM disable this for now, but leave it commented out so that
3327 	 * it is obvious that this is another possible way to get the SAS
3328 	 * address.
3329 	 *
3330 	 * The parent handle method below is a little more reliable, and
3331 	 * the other benefit is that it works for devices other than SES
3332 	 * devices.  So you can send a SMP request to a da(4) device and it
3333 	 * will get routed to the expander that device is attached to.
3334 	 * (Assuming the da(4) device doesn't contain an SMP target...)
3335 	 */
3336 #if 0
3337 	if (sasaddr == 0)
3338 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3339 #endif
3340 
3341 	/*
3342 	 * If we still don't have a SAS address for the expander, look for
3343 	 * the parent device of this device, which is probably the expander.
3344 	 */
3345 	if (sasaddr == 0) {
3346 #ifdef OLD_MPR_PROBE
3347 		struct mprsas_target *parent_target;
3348 #endif
3349 
3350 		if (targ->parent_handle == 0x0) {
3351 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3352 			    "a valid parent handle!\n", __func__, targ->handle);
3353 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3354 			goto bailout;
3355 		}
3356 #ifdef OLD_MPR_PROBE
3357 		parent_target = mprsas_find_target_by_handle(sassc, 0,
3358 		    targ->parent_handle);
3359 
3360 		if (parent_target == NULL) {
3361 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3362 			    "a valid parent target!\n", __func__, targ->handle);
3363 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3364 			goto bailout;
3365 		}
3366 
3367 		if ((parent_target->devinfo &
3368 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3369 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3370 			    "does not have an SMP target!\n", __func__,
3371 			    targ->handle, parent_target->handle);
3372 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3373 			goto bailout;
3374 		}
3375 
3376 		sasaddr = parent_target->sasaddr;
3377 #else /* OLD_MPR_PROBE */
3378 		if ((targ->parent_devinfo &
3379 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3380 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3381 			    "does not have an SMP target!\n", __func__,
3382 			    targ->handle, targ->parent_handle);
3383 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3384 			goto bailout;
3385 
3386 		}
3387 		if (targ->parent_sasaddr == 0x0) {
3388 			mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3389 			    "%d does not have a valid SAS address!\n", __func__,
3390 			    targ->handle, targ->parent_handle);
3391 			mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3392 			goto bailout;
3393 		}
3394 
3395 		sasaddr = targ->parent_sasaddr;
3396 #endif /* OLD_MPR_PROBE */
3397 
3398 	}
3399 
3400 	if (sasaddr == 0) {
3401 		mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3402 		    "handle %d\n", __func__, targ->handle);
3403 		mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3404 		goto bailout;
3405 	}
3406 	mprsas_send_smpcmd(sassc, ccb, sasaddr);
3407 
3408 	return;
3409 
3410 bailout:
3411 	xpt_done(ccb);
3412 
3413 }
3414 #endif //__FreeBSD_version >= 900026
3415 
3416 static void
3417 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3418 {
3419 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3420 	struct mpr_softc *sc;
3421 	struct mpr_command *tm;
3422 	struct mprsas_target *targ;
3423 
3424 	MPR_FUNCTRACE(sassc->sc);
3425 	mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3426 
3427 	KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3428 	    "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3429 	sc = sassc->sc;
3430 	tm = mpr_alloc_command(sc);
3431 	if (tm == NULL) {
3432 		mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3433 		    "mprsas_action_resetdev\n");
3434 		mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3435 		xpt_done(ccb);
3436 		return;
3437 	}
3438 
3439 	targ = &sassc->targets[ccb->ccb_h.target_id];
3440 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3441 	req->DevHandle = htole16(targ->handle);
3442 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3443 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3444 
3445 	/* SAS Hard Link Reset / SATA Link Reset */
3446 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3447 
3448 	tm->cm_data = NULL;
3449 	tm->cm_desc.HighPriority.RequestFlags =
3450 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3451 	tm->cm_complete = mprsas_resetdev_complete;
3452 	tm->cm_complete_data = ccb;
3453 
3454 	mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3455 	    __func__, targ->tid);
3456 	tm->cm_targ = targ;
3457 	targ->flags |= MPRSAS_TARGET_INRESET;
3458 
3459 	mpr_map_command(sc, tm);
3460 }
3461 
3462 static void
3463 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3464 {
3465 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3466 	union ccb *ccb;
3467 
3468 	MPR_FUNCTRACE(sc);
3469 	mtx_assert(&sc->mpr_mtx, MA_OWNED);
3470 
3471 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3472 	ccb = tm->cm_complete_data;
3473 
3474 	/*
3475 	 * Currently there should be no way we can hit this case.  It only
3476 	 * happens when we have a failure to allocate chain frames, and
3477 	 * task management commands don't have S/G lists.
3478 	 */
3479 	if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3480 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3481 
3482 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3483 
3484 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3485 		    "handle %#04x! This should not happen!\n", __func__,
3486 		    tm->cm_flags, req->DevHandle);
3487 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3488 		goto bailout;
3489 	}
3490 
3491 	mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3492 	    __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3493 
3494 	if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3495 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3496 		mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3497 		    CAM_LUN_WILDCARD);
3498 	}
3499 	else
3500 		mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3501 
3502 bailout:
3503 
3504 	mprsas_free_tm(sc, tm);
3505 	xpt_done(ccb);
3506 }
3507 
3508 static void
3509 mprsas_poll(struct cam_sim *sim)
3510 {
3511 	struct mprsas_softc *sassc;
3512 
3513 	sassc = cam_sim_softc(sim);
3514 
3515 	if (sassc->sc->mpr_debug & MPR_TRACE) {
3516 		/* frequent debug messages during a panic just slow
3517 		 * everything down too much.
3518 		 */
3519 		mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3520 		    __func__);
3521 		sassc->sc->mpr_debug &= ~MPR_TRACE;
3522 	}
3523 
3524 	mpr_intr_locked(sassc->sc);
3525 }
3526 
3527 static void
3528 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3529     void *arg)
3530 {
3531 	struct mpr_softc *sc;
3532 
3533 	sc = (struct mpr_softc *)callback_arg;
3534 
3535 	switch (code) {
3536 #if (__FreeBSD_version >= 1000006) || \
3537     ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3538 	case AC_ADVINFO_CHANGED: {
3539 		struct mprsas_target *target;
3540 		struct mprsas_softc *sassc;
3541 		struct scsi_read_capacity_data_long rcap_buf;
3542 		struct ccb_dev_advinfo cdai;
3543 		struct mprsas_lun *lun;
3544 		lun_id_t lunid;
3545 		int found_lun;
3546 		uintptr_t buftype;
3547 
3548 		buftype = (uintptr_t)arg;
3549 
3550 		found_lun = 0;
3551 		sassc = sc->sassc;
3552 
3553 		/*
3554 		 * We're only interested in read capacity data changes.
3555 		 */
3556 		if (buftype != CDAI_TYPE_RCAPLONG)
3557 			break;
3558 
3559 		/*
3560 		 * See the comment in mpr_attach_sas() for a detailed
3561 		 * explanation.  In these versions of FreeBSD we register
3562 		 * for all events and filter out the events that don't
3563 		 * apply to us.
3564 		 */
3565 #if (__FreeBSD_version < 1000703) || \
3566     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3567 		if (xpt_path_path_id(path) != sassc->sim->path_id)
3568 			break;
3569 #endif
3570 
3571 		/*
3572 		 * We should have a handle for this, but check to make sure.
3573 		 */
3574 		KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3575 		    ("Target %d out of bounds in mprsas_async\n",
3576 		    xpt_path_target_id(path)));
3577 		target = &sassc->targets[xpt_path_target_id(path)];
3578 		if (target->handle == 0)
3579 			break;
3580 
3581 		lunid = xpt_path_lun_id(path);
3582 
3583 		SLIST_FOREACH(lun, &target->luns, lun_link) {
3584 			if (lun->lun_id == lunid) {
3585 				found_lun = 1;
3586 				break;
3587 			}
3588 		}
3589 
3590 		if (found_lun == 0) {
3591 			lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3592 			    M_NOWAIT | M_ZERO);
3593 			if (lun == NULL) {
3594 				mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3595 				    "LUN for EEDP support.\n");
3596 				break;
3597 			}
3598 			lun->lun_id = lunid;
3599 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3600 		}
3601 
3602 		bzero(&rcap_buf, sizeof(rcap_buf));
3603 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3604 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3605 		cdai.ccb_h.flags = CAM_DIR_IN;
3606 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3607 #if (__FreeBSD_version >= 1100061) || \
3608     ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3609 		cdai.flags = CDAI_FLAG_NONE;
3610 #else
3611 		cdai.flags = 0;
3612 #endif
3613 		cdai.bufsiz = sizeof(rcap_buf);
3614 		cdai.buf = (uint8_t *)&rcap_buf;
3615 		xpt_action((union ccb *)&cdai);
3616 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3617 			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3618 
3619 		if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3620 		    && (rcap_buf.prot & SRC16_PROT_EN)) {
3621 			lun->eedp_formatted = TRUE;
3622 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3623 		} else {
3624 			lun->eedp_formatted = FALSE;
3625 			lun->eedp_block_size = 0;
3626 		}
3627 		break;
3628 	}
3629 #endif
3630 	case AC_FOUND_DEVICE: {
3631 		struct ccb_getdev *cgd;
3632 
3633 		/*
3634 		 * See the comment in mpr_attach_sas() for a detailed
3635 		 * explanation.  In these versions of FreeBSD we register
3636 		 * for all events and filter out the events that don't
3637 		 * apply to us.
3638 		 */
3639 #if (__FreeBSD_version < 1000703) || \
3640     ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3641 		if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3642 			break;
3643 #endif
3644 
3645 		cgd = arg;
3646 #if (__FreeBSD_version < 901503) || \
3647     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3648 		mprsas_check_eedp(sc, path, cgd);
3649 #endif
3650 		break;
3651 	}
3652 	default:
3653 		break;
3654 	}
3655 }
3656 
3657 #if (__FreeBSD_version < 901503) || \
3658     ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3659 static void
3660 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3661     struct ccb_getdev *cgd)
3662 {
3663 	struct mprsas_softc *sassc = sc->sassc;
3664 	struct ccb_scsiio *csio;
3665 	struct scsi_read_capacity_16 *scsi_cmd;
3666 	struct scsi_read_capacity_eedp *rcap_buf;
3667 	path_id_t pathid;
3668 	target_id_t targetid;
3669 	lun_id_t lunid;
3670 	union ccb *ccb;
3671 	struct cam_path *local_path;
3672 	struct mprsas_target *target;
3673 	struct mprsas_lun *lun;
3674 	uint8_t	found_lun;
3675 	char path_str[64];
3676 
3677 	pathid = cam_sim_path(sassc->sim);
3678 	targetid = xpt_path_target_id(path);
3679 	lunid = xpt_path_lun_id(path);
3680 
3681 	KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3682 	    "mprsas_check_eedp\n", targetid));
3683 	target = &sassc->targets[targetid];
3684 	if (target->handle == 0x0)
3685 		return;
3686 
3687 	/*
3688 	 * Determine if the device is EEDP capable.
3689 	 *
3690 	 * If this flag is set in the inquiry data, the device supports
3691 	 * protection information, and must support the 16 byte read capacity
3692 	 * command, otherwise continue without sending read cap 16.
3693 	 */
3694 	if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3695 		return;
3696 
3697 	/*
3698 	 * Issue a READ CAPACITY 16 command.  This info is used to determine if
3699 	 * the LUN is formatted for EEDP support.
3700 	 */
3701 	ccb = xpt_alloc_ccb_nowait();
3702 	if (ccb == NULL) {
3703 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3704 		    "support.\n");
3705 		return;
3706 	}
3707 
3708 	if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3709 	    CAM_REQ_CMP) {
3710 		mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3711 		    "support.\n");
3712 		xpt_free_ccb(ccb);
3713 		return;
3714 	}
3715 
3716 	/*
3717 	 * If LUN is already in list, don't create a new one.
3718 	 */
3719 	found_lun = FALSE;
3720 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3721 		if (lun->lun_id == lunid) {
3722 			found_lun = TRUE;
3723 			break;
3724 		}
3725 	}
3726 	if (!found_lun) {
3727 		lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3728 		    M_NOWAIT | M_ZERO);
3729 		if (lun == NULL) {
3730 			mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3731 			    "EEDP support.\n");
3732 			xpt_free_path(local_path);
3733 			xpt_free_ccb(ccb);
3734 			return;
3735 		}
3736 		lun->lun_id = lunid;
3737 		SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3738 	}
3739 
3740 	xpt_path_string(local_path, path_str, sizeof(path_str));
3741 	mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3742 	    path_str, target->handle);
3743 
3744 	/*
3745 	 * Issue a READ CAPACITY 16 command for the LUN.  The
3746 	 * mprsas_read_cap_done function will load the read cap info into the
3747 	 * LUN struct.
3748 	 */
3749 	rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3750 	    M_NOWAIT | M_ZERO);
3751 	if (rcap_buf == NULL) {
3752 		mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3753 		    "buffer for EEDP support.\n");
3754 		xpt_free_path(ccb->ccb_h.path);
3755 		xpt_free_ccb(ccb);
3756 		return;
3757 	}
3758 	xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3759 	csio = &ccb->csio;
3760 	csio->ccb_h.func_code = XPT_SCSI_IO;
3761 	csio->ccb_h.flags = CAM_DIR_IN;
3762 	csio->ccb_h.retry_count = 4;
3763 	csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3764 	csio->ccb_h.timeout = 60000;
3765 	csio->data_ptr = (uint8_t *)rcap_buf;
3766 	csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3767 	csio->sense_len = MPR_SENSE_LEN;
3768 	csio->cdb_len = sizeof(*scsi_cmd);
3769 	csio->tag_action = MSG_SIMPLE_Q_TAG;
3770 
3771 	scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3772 	bzero(scsi_cmd, sizeof(*scsi_cmd));
3773 	scsi_cmd->opcode = 0x9E;
3774 	scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3775 	((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3776 
3777 	ccb->ccb_h.ppriv_ptr1 = sassc;
3778 	xpt_action(ccb);
3779 }
3780 
3781 static void
3782 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3783 {
3784 	struct mprsas_softc *sassc;
3785 	struct mprsas_target *target;
3786 	struct mprsas_lun *lun;
3787 	struct scsi_read_capacity_eedp *rcap_buf;
3788 
3789 	if (done_ccb == NULL)
3790 		return;
3791 
3792 	/* Driver need to release devq, it Scsi command is
3793 	 * generated by driver internally.
3794 	 * Currently there is a single place where driver
3795 	 * calls scsi command internally. In future if driver
3796 	 * calls more scsi command internally, it needs to release
3797 	 * devq internally, since those command will not go back to
3798 	 * cam_periph.
3799 	 */
3800 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3801         	done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3802 		xpt_release_devq(done_ccb->ccb_h.path,
3803 			       	/*count*/ 1, /*run_queue*/TRUE);
3804 	}
3805 
3806 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3807 
3808 	/*
3809 	 * Get the LUN ID for the path and look it up in the LUN list for the
3810 	 * target.
3811 	 */
3812 	sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3813 	KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3814 	    "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3815 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3816 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3817 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3818 			continue;
3819 
3820 		/*
3821 		 * Got the LUN in the target's LUN list.  Fill it in with EEDP
3822 		 * info. If the READ CAP 16 command had some SCSI error (common
3823 		 * if command is not supported), mark the lun as not supporting
3824 		 * EEDP and set the block size to 0.
3825 		 */
3826 		if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3827 		    (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3828 			lun->eedp_formatted = FALSE;
3829 			lun->eedp_block_size = 0;
3830 			break;
3831 		}
3832 
3833 		if (rcap_buf->protect & 0x01) {
3834 			mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3835 			    "%d is formatted for EEDP support.\n",
3836 			    done_ccb->ccb_h.target_lun,
3837 			    done_ccb->ccb_h.target_id);
3838 			lun->eedp_formatted = TRUE;
3839 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3840 		}
3841 		break;
3842 	}
3843 
3844 	// Finished with this CCB and path.
3845 	free(rcap_buf, M_MPR);
3846 	xpt_free_path(done_ccb->ccb_h.path);
3847 	xpt_free_ccb(done_ccb);
3848 }
3849 #endif /* (__FreeBSD_version < 901503) || \
3850           ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3851 
3852 void
3853 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3854     struct mprsas_target *target, lun_id_t lun_id)
3855 {
3856 	union ccb *ccb;
3857 	path_id_t path_id;
3858 
3859 	/*
3860 	 * Set the INRESET flag for this target so that no I/O will be sent to
3861 	 * the target until the reset has completed.  If an I/O request does
3862 	 * happen, the devq will be frozen.  The CCB holds the path which is
3863 	 * used to release the devq.  The devq is released and the CCB is freed
3864 	 * when the TM completes.
3865 	 */
3866 	ccb = xpt_alloc_ccb_nowait();
3867 	if (ccb) {
3868 		path_id = cam_sim_path(sc->sassc->sim);
3869 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3870 		    target->tid, lun_id) != CAM_REQ_CMP) {
3871 			xpt_free_ccb(ccb);
3872 		} else {
3873 			tm->cm_ccb = ccb;
3874 			tm->cm_targ = target;
3875 			target->flags |= MPRSAS_TARGET_INRESET;
3876 		}
3877 	}
3878 }
3879 
3880 int
3881 mprsas_startup(struct mpr_softc *sc)
3882 {
3883 	/*
3884 	 * Send the port enable message and set the wait_for_port_enable flag.
3885 	 * This flag helps to keep the simq frozen until all discovery events
3886 	 * are processed.
3887 	 */
3888 	sc->wait_for_port_enable = 1;
3889 	mprsas_send_portenable(sc);
3890 	return (0);
3891 }
3892 
3893 static int
3894 mprsas_send_portenable(struct mpr_softc *sc)
3895 {
3896 	MPI2_PORT_ENABLE_REQUEST *request;
3897 	struct mpr_command *cm;
3898 
3899 	MPR_FUNCTRACE(sc);
3900 
3901 	if ((cm = mpr_alloc_command(sc)) == NULL)
3902 		return (EBUSY);
3903 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3904 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3905 	request->MsgFlags = 0;
3906 	request->VP_ID = 0;
3907 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3908 	cm->cm_complete = mprsas_portenable_complete;
3909 	cm->cm_data = NULL;
3910 	cm->cm_sge = NULL;
3911 
3912 	mpr_map_command(sc, cm);
3913 	mpr_dprint(sc, MPR_XINFO,
3914 	    "mpr_send_portenable finished cm %p req %p complete %p\n",
3915 	    cm, cm->cm_req, cm->cm_complete);
3916 	return (0);
3917 }
3918 
3919 static void
3920 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3921 {
3922 	MPI2_PORT_ENABLE_REPLY *reply;
3923 	struct mprsas_softc *sassc;
3924 
3925 	MPR_FUNCTRACE(sc);
3926 	sassc = sc->sassc;
3927 
3928 	/*
3929 	 * Currently there should be no way we can hit this case.  It only
3930 	 * happens when we have a failure to allocate chain frames, and
3931 	 * port enable commands don't have S/G lists.
3932 	 */
3933 	if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3934 		mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3935 		    "This should not happen!\n", __func__, cm->cm_flags);
3936 	}
3937 
3938 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3939 	if (reply == NULL)
3940 		mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3941 	else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3942 	    MPI2_IOCSTATUS_SUCCESS)
3943 		mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3944 
3945 	mpr_free_command(sc, cm);
3946 	if (sc->mpr_ich.ich_arg != NULL) {
3947 		mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3948 		config_intrhook_disestablish(&sc->mpr_ich);
3949 		sc->mpr_ich.ich_arg = NULL;
3950 	}
3951 
3952 	/*
3953 	 * Done waiting for port enable to complete.  Decrement the refcount.
3954 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3955 	 * take place.
3956 	 */
3957 	sc->wait_for_port_enable = 0;
3958 	sc->port_enable_complete = 1;
3959 	wakeup(&sc->port_enable_complete);
3960 	mprsas_startup_decrement(sassc);
3961 }
3962 
3963 int
3964 mprsas_check_id(struct mprsas_softc *sassc, int id)
3965 {
3966 	struct mpr_softc *sc = sassc->sc;
3967 	char *ids;
3968 	char *name;
3969 
3970 	ids = &sc->exclude_ids[0];
3971 	while((name = strsep(&ids, ",")) != NULL) {
3972 		if (name[0] == '\0')
3973 			continue;
3974 		if (strtol(name, NULL, 0) == (long)id)
3975 			return (1);
3976 	}
3977 
3978 	return (0);
3979 }
3980 
3981 void
3982 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3983 {
3984 	struct mprsas_softc *sassc;
3985 	struct mprsas_lun *lun, *lun_tmp;
3986 	struct mprsas_target *targ;
3987 	int i;
3988 
3989 	sassc = sc->sassc;
3990 	/*
3991 	 * The number of targets is based on IOC Facts, so free all of
3992 	 * the allocated LUNs for each target and then the target buffer
3993 	 * itself.
3994 	 */
3995 	for (i=0; i< maxtargets; i++) {
3996 		targ = &sassc->targets[i];
3997 		SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3998 			free(lun, M_MPR);
3999 		}
4000 	}
4001 	free(sassc->targets, M_MPR);
4002 
4003 	sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
4004 	    M_MPR, M_WAITOK|M_ZERO);
4005 	if (!sassc->targets) {
4006 		panic("%s failed to alloc targets with error %d\n",
4007 		    __func__, ENOMEM);
4008 	}
4009 }
4010