xref: /dragonfly/sys/dev/raid/mps/mps_sas.c (revision 20c2db9a)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2011 LSI Corp.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions
32  * are met:
33  * 1. Redistributions of source code must retain the above copyright
34  *    notice, this list of conditions and the following disclaimer.
35  * 2. Redistributions in binary form must reproduce the above copyright
36  *    notice, this list of conditions and the following disclaimer in the
37  *    documentation and/or other materials provided with the distribution.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49  * SUCH DAMAGE.
50  *
51  * LSI MPT-Fusion Host Adapter FreeBSD
52  *
53  * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
54  */
55 
56 /* Communications core for LSI MPT2 */
57 
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
64 #include <sys/bus.h>
65 #include <sys/conf.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
68 #include <sys/bio.h>
69 #include <sys/malloc.h>
70 #include <sys/uio.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
76 #include <sys/sbuf.h>
77 
78 #include <sys/rman.h>
79 
80 #include <machine/stdarg.h>
81 
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
94 #endif
95 
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
107 
108 #define MPSSAS_DISCOVERY_TIMEOUT	20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
110 
111 /*
112  * static array to check SCSI OpCode for EEDP protection bits
113  */
114 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
134 };
135 
136 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
137 
138 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
139 static void mpssas_log_command(struct mps_command *, const char *, ...)
140 		__printflike(2, 3);
141 #if 0 /* XXX unused */
142 static void mpssas_discovery_timeout(void *data);
143 #endif
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151     struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
158 			       uint64_t sasaddr);
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 			 struct cam_path *path, void *arg);
170 #else
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
173 #endif
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176     struct mps_command *cm);
177 
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
180 {
181 	struct mpssas_target *target;
182 	int i;
183 
184 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 		target = &sassc->targets[i];
186 		if (target->handle == handle)
187 			return (target);
188 	}
189 
190 	return (NULL);
191 }
192 
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194  * commands before device handles have been found by discovery.  Since
195  * discovery involves reading config pages and possibly sending commands,
196  * discovery actions may continue even after we receive the end of discovery
197  * event, so refcount discovery actions instead of assuming we can unfreeze
198  * the simq when we get the event.
199  */
200 void
201 mpssas_startup_increment(struct mpssas_softc *sassc)
202 {
203 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 		if (sassc->startup_refcount++ == 0) {
205 			/* just starting, freeze the simq */
206 			mps_dprint(sassc->sc, MPS_INFO,
207 			    "%s freezing simq\n", __func__);
208 			xpt_freeze_simq(sassc->sim, 1);
209 		}
210 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 		    sassc->startup_refcount);
212 	}
213 }
214 
215 void
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
217 {
218 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 		if (--sassc->startup_refcount == 0) {
220 			/* finished all discovery-related actions, release
221 			 * the simq and rescan for the latest topology.
222 			 */
223 			mps_dprint(sassc->sc, MPS_INFO,
224 			    "%s releasing simq\n", __func__);
225 			sassc->flags &= ~MPSSAS_IN_STARTUP;
226 			xpt_release_simq(sassc->sim, 1);
227 			mpssas_rescan_target(sassc->sc, NULL);
228 		}
229 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 		    sassc->startup_refcount);
231 	}
232 }
233 
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235  * management, so refcount the TMs and keep the simq frozen when any are in
236  * use.
237  */
238 struct mps_command *
239 mpssas_alloc_tm(struct mps_softc *sc)
240 {
241 	struct mps_command *tm;
242 
243 	tm = mps_alloc_high_priority_command(sc);
244 	if (tm != NULL) {
245 		if (sc->sassc->tm_count++ == 0) {
246 			mps_printf(sc, "%s freezing simq\n", __func__);
247 			xpt_freeze_simq(sc->sassc->sim, 1);
248 		}
249 		mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 		    sc->sassc->tm_count);
251 	}
252 	return tm;
253 }
254 
255 void
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
257 {
258 	if (tm == NULL)
259 		return;
260 
261 	/* if there are no TMs in use, we can release the simq.  We use our
262 	 * own refcount so that it's easier for a diag reset to cleanup and
263 	 * release the simq.
264 	 */
265 	if (--sc->sassc->tm_count == 0) {
266 		mps_printf(sc, "%s releasing simq\n", __func__);
267 		xpt_release_simq(sc->sassc->sim, 1);
268 	}
269 	mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 	    sc->sassc->tm_count);
271 
272 	mps_free_high_priority_command(sc, tm);
273 }
274 
275 
276 void
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
278 {
279 	struct mpssas_softc *sassc = sc->sassc;
280 	path_id_t pathid;
281 	target_id_t targetid;
282 	union ccb *ccb;
283 
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
294 
295 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
296 		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 		mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
298 		xpt_free_ccb(ccb);
299 		return;
300 	}
301 
302 	/* XXX Hardwired to scan the bus for now */
303 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
305 	mpssas_rescan(sassc, ccb);
306 }
307 
308 static void
309 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
310 {
311 	struct sbuf sb;
312 	__va_list ap;
313 	char str[192];
314 	char path_str[64];
315 
316 	if (cm == NULL)
317 		return;
318 
319 	sbuf_new(&sb, str, sizeof(str), 0);
320 
321 	__va_start(ap, fmt);
322 
323 	if (cm->cm_ccb != NULL) {
324 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
325 				sizeof(path_str));
326 		sbuf_cat(&sb, path_str);
327 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
328 			scsi_command_string(&cm->cm_ccb->csio, &sb);
329 			sbuf_printf(&sb, "length %d ",
330 				    cm->cm_ccb->csio.dxfer_len);
331 		}
332 	}
333 	else {
334 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
335 		    cam_sim_name(cm->cm_sc->sassc->sim),
336 		    cam_sim_unit(cm->cm_sc->sassc->sim),
337 		    cam_sim_bus(cm->cm_sc->sassc->sim),
338 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
339 		    cm->cm_lun);
340 	}
341 
342 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
343 	sbuf_vprintf(&sb, fmt, ap);
344 	sbuf_finish(&sb);
345 	kprintf("%s", sbuf_data(&sb));
346 
347 	__va_end(ap);
348 }
349 
350 static void
351 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
352 {
353 	struct mpssas_softc *sassc = sc->sassc;
354 	path_id_t pathid = cam_sim_path(sassc->sim);
355 	struct cam_path *path;
356 
357 	mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
358 	if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
359 		mps_printf(sc, "unable to create path for lost target %d\n",
360 		    targ->tid);
361 		return;
362 	}
363 
364 	xpt_async(AC_LOST_DEVICE, path, NULL);
365 	xpt_free_path(path);
366 }
367 
368 /*
369  * The MPT2 firmware performs debounce on the link to avoid transient link
370  * errors and false removals.  When it does decide that link has been lost
371  * and a device need to go away, it expects that the host will perform a
372  * target reset and then an op remove.  The reset has the side-effect of
373  * aborting any outstanding requests for the device, which is required for
374  * the op-remove to succeed.  It's not clear if the host should check for
375  * the device coming back alive after the reset.
376  */
377 void
378 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
379 {
380 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
381 	struct mps_softc *sc;
382 	struct mps_command *cm;
383 	struct mpssas_target *targ = NULL;
384 
385 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
386 
387 	/*
388 	 * If this is a WD controller, determine if the disk should be exposed
389 	 * to the OS or not.  If disk should be exposed, return from this
390 	 * function without doing anything.
391 	 */
392 	sc = sassc->sc;
393 	if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
394 	    MPS_WD_EXPOSE_ALWAYS)) {
395 		return;
396 	}
397 
398 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
399 	if (targ == NULL) {
400 		/* FIXME: what is the action? */
401 		/* We don't know about this device? */
402 		kprintf("%s: invalid handle 0x%x \n", __func__, handle);
403 		return;
404 	}
405 
406 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
407 
408 	cm = mpssas_alloc_tm(sc);
409 	if (cm == NULL) {
410 		mps_printf(sc, "%s: command alloc failure\n", __func__);
411 		return;
412 	}
413 
414 	mpssas_lost_target(sc, targ);
415 
416 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
417 	memset(req, 0, sizeof(*req));
418 	req->DevHandle = targ->handle;
419 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
420 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
421 
422 	/* SAS Hard Link Reset / SATA Link Reset */
423 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
424 
425 	cm->cm_targ = targ;
426 	cm->cm_data = NULL;
427 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
428 	cm->cm_complete = mpssas_remove_device;
429 	cm->cm_complete_data = (void *)(uintptr_t)handle;
430 	mps_map_command(sc, cm);
431 }
432 
433 static void
434 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
435 {
436 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
437 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
438 	struct mpssas_target *targ;
439 	struct mps_command *next_cm;
440 	uint16_t handle;
441 
442 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
443 
444 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
445 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
446 	targ = tm->cm_targ;
447 
448 	/*
449 	 * Currently there should be no way we can hit this case.  It only
450 	 * happens when we have a failure to allocate chain frames, and
451 	 * task management commands don't have S/G lists.
452 	 */
453 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
454 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
455 			   "This should not happen!\n", __func__, tm->cm_flags,
456 			   handle);
457 		mpssas_free_tm(sc, tm);
458 		return;
459 	}
460 
461 	if (reply == NULL) {
462 		/* XXX retry the remove after the diag reset completes? */
463 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
464 		    __func__, handle);
465 		mpssas_free_tm(sc, tm);
466 		return;
467 	}
468 
469 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
470 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
471 		   reply->IOCStatus, handle);
472 		mpssas_free_tm(sc, tm);
473 		return;
474 	}
475 
476 	mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
477 	    reply->TerminationCount);
478 	mps_free_reply(sc, tm->cm_reply_data);
479 	tm->cm_reply = NULL;	/* Ensures the the reply won't get re-freed */
480 
481 	/* Reuse the existing command */
482 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
483 	memset(req, 0, sizeof(*req));
484 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
485 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
486 	req->DevHandle = handle;
487 	tm->cm_data = NULL;
488 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
489 	tm->cm_complete = mpssas_remove_complete;
490 	tm->cm_complete_data = (void *)(uintptr_t)handle;
491 
492 	mps_map_command(sc, tm);
493 
494 	mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
495 		   targ->tid, handle);
496 	TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
497 		union ccb *ccb;
498 
499 		mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
500 		ccb = tm->cm_complete_data;
501 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
502 		mpssas_scsiio_complete(sc, tm);
503 	}
504 }
505 
506 static void
507 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
508 {
509 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
510 	uint16_t handle;
511 	struct mpssas_target *targ;
512 
513 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
514 
515 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
516 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
517 
518 	/*
519 	 * Currently there should be no way we can hit this case.  It only
520 	 * happens when we have a failure to allocate chain frames, and
521 	 * task management commands don't have S/G lists.
522 	 */
523 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
524 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
525 			   "This should not happen!\n", __func__, tm->cm_flags,
526 			   handle);
527 		mpssas_free_tm(sc, tm);
528 		return;
529 	}
530 
531 	if (reply == NULL) {
532 		/* most likely a chip reset */
533 		mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
534 		    __func__, handle);
535 		mpssas_free_tm(sc, tm);
536 		return;
537 	}
538 
539 	mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
540 	    handle, reply->IOCStatus);
541 
542 	/*
543 	 * Don't clear target if remove fails because things will get confusing.
544 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
545 	 * this target id if possible, and so we can assign the same target id
546 	 * to this device if it comes back in the future.
547 	 */
548 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
549 		targ = tm->cm_targ;
550 		targ->handle = 0x0;
551 		targ->encl_handle = 0x0;
552 		targ->encl_slot = 0x0;
553 		targ->exp_dev_handle = 0x0;
554 		targ->phy_num = 0x0;
555 		targ->linkrate = 0x0;
556 		targ->devinfo = 0x0;
557 	}
558 
559 	mpssas_free_tm(sc, tm);
560 }
561 
562 static int
563 mpssas_register_events(struct mps_softc *sc)
564 {
565 	uint8_t events[16];
566 
567 	bzero(events, 16);
568 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
569 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
570 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
571 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
572 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
573 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
574 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
575 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
576 	setbit(events, MPI2_EVENT_IR_VOLUME);
577 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
578 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
579 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
580 
581 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
582 	    &sc->sassc->mpssas_eh);
583 
584 	return (0);
585 }
586 
587 int
588 mps_attach_sas(struct mps_softc *sc)
589 {
590 	struct mpssas_softc *sassc;
591 #if __FreeBSD_version >= 1000006
592 	cam_status status;
593 #endif
594 	int unit, error = 0;
595 
596 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
597 
598 	sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
599 	sassc->targets = kmalloc(sizeof(struct mpssas_target) *
600 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
601 	sc->sassc = sassc;
602 	sassc->sc = sc;
603 
604 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
605 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
606 		error = ENOMEM;
607 		goto out;
608 	}
609 
610 	unit = device_get_unit(sc->mps_dev);
611 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
612 	    unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
613 	cam_simq_release(sassc->devq);
614 	if (sassc->sim == NULL) {
615 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
616 		error = EINVAL;
617 		goto out;
618 	}
619 
620 	TAILQ_INIT(&sassc->ev_queue);
621 
622 	/* Initialize taskqueue for Event Handling */
623 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
624 	sassc->ev_tq = taskqueue_create("mps_taskq", M_INTWAIT | M_ZERO,
625 	    taskqueue_thread_enqueue, &sassc->ev_tq);
626 
627 	/* Run the task queue with lowest priority */
628 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
629 	    device_get_nameunit(sc->mps_dev));
630 
631 	TAILQ_INIT(&sassc->ccb_scanq);
632 	error = mps_kproc_create(mpssas_scanner_thread, sassc,
633 	    &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
634 	if (error) {
635 		mps_printf(sc, "Error %d starting rescan thread\n", error);
636 		goto out;
637 	}
638 
639 	mps_lock(sc);
640 	sassc->flags |= MPSSAS_SCANTHREAD;
641 
642 	/*
643 	 * XXX There should be a bus for every port on the adapter, but since
644 	 * we're just going to fake the topology for now, we'll pretend that
645 	 * everything is just a target on a single bus.
646 	 */
647 	if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
648 		mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
649 		    error);
650 		mps_unlock(sc);
651 		goto out;
652 	}
653 
654 	/*
655 	 * Assume that discovery events will start right away.  Freezing
656 	 * the simq will prevent the CAM boottime scanner from running
657 	 * before discovery is complete.
658 	 */
659 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
660 	xpt_freeze_simq(sassc->sim, 1);
661 	sc->sassc->startup_refcount = 0;
662 
663 	callout_init_mp(&sassc->discovery_callout);
664 	sassc->discovery_timeouts = 0;
665 
666 	sassc->tm_count = 0;
667 
668 #if __FreeBSD_version >= 1000006
669 	status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
670 	if (status != CAM_REQ_CMP) {
671 		mps_printf(sc, "Error %#x registering async handler for "
672 			   "AC_ADVINFO_CHANGED events\n", status);
673 	}
674 #endif
675 
676 	mps_unlock(sc);
677 
678 	mpssas_register_events(sc);
679 out:
680 	if (error)
681 		mps_detach_sas(sc);
682 	return (error);
683 }
684 
685 int
686 mps_detach_sas(struct mps_softc *sc)
687 {
688 	struct mpssas_softc *sassc;
689 
690 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
691 
692 	if (sc->sassc == NULL)
693 		return (0);
694 
695 	sassc = sc->sassc;
696 	mps_deregister_events(sc, sassc->mpssas_eh);
697 
698 	/*
699 	 * Drain and free the event handling taskqueue with the lock
700 	 * unheld so that any parallel processing tasks drain properly
701 	 * without deadlocking.
702 	 */
703 	if (sassc->ev_tq != NULL)
704 		taskqueue_free(sassc->ev_tq);
705 
706 	/* Make sure CAM doesn't wedge if we had to bail out early. */
707 	mps_lock(sc);
708 
709 	/* Deregister our async handler */
710 #if __FreeBSD_version >= 1000006
711 	xpt_register_async(0, mpssas_async, sc, NULL);
712 #endif
713 
714 	if (sassc->flags & MPSSAS_IN_STARTUP)
715 		xpt_release_simq(sassc->sim, 1);
716 
717 	if (sassc->sim != NULL) {
718 		xpt_bus_deregister(cam_sim_path(sassc->sim));
719 		cam_sim_free(sassc->sim);
720 	}
721 
722 	if (sassc->flags & MPSSAS_SCANTHREAD) {
723 		sassc->flags |= MPSSAS_SHUTDOWN;
724 		wakeup(&sassc->ccb_scanq);
725 
726 		if (sassc->flags & MPSSAS_SCANTHREAD) {
727 			lksleep(&sassc->flags, &sc->mps_lock, 0,
728 			       "mps_shutdown", 30 * hz);
729 		}
730 	}
731 	mps_unlock(sc);
732 
733 	kfree(sassc->targets, M_MPT2);
734 	kfree(sassc, M_MPT2);
735 	sc->sassc = NULL;
736 
737 	return (0);
738 }
739 
740 void
741 mpssas_discovery_end(struct mpssas_softc *sassc)
742 {
743 	struct mps_softc *sc = sassc->sc;
744 
745 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
746 
747 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
748 		callout_stop(&sassc->discovery_callout);
749 
750 }
751 
752 #if 0 /* XXX unused */
753 static void
754 mpssas_discovery_timeout(void *data)
755 {
756 	struct mpssas_softc *sassc = data;
757 	struct mps_softc *sc;
758 
759 	sc = sassc->sc;
760 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
761 
762 	mps_lock(sc);
763 	mps_printf(sc,
764 	    "Timeout waiting for discovery, interrupts may not be working!\n");
765 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
766 
767 	/* Poll the hardware for events in case interrupts aren't working */
768 	mps_intr_locked(sc);
769 
770 	mps_printf(sassc->sc,
771 	    "Finished polling after discovery timeout at %d\n", ticks);
772 
773 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
774 		mpssas_discovery_end(sassc);
775 	} else {
776 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
777 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
778 			callout_reset(&sassc->discovery_callout,
779 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
780 			    mpssas_discovery_timeout, sassc);
781 			sassc->discovery_timeouts++;
782 		} else {
783 			mps_dprint(sassc->sc, MPS_FAULT,
784 			    "Discovery timed out, continuing.\n");
785 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
786 			mpssas_discovery_end(sassc);
787 		}
788 	}
789 
790 	mps_unlock(sc);
791 }
792 #endif
793 
794 static void
795 mpssas_action(struct cam_sim *sim, union ccb *ccb)
796 {
797 	struct mpssas_softc *sassc;
798 
799 	sassc = cam_sim_softc(sim);
800 
801 	mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
802 	    ccb->ccb_h.func_code);
803 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
804 
805 	switch (ccb->ccb_h.func_code) {
806 	case XPT_PATH_INQ:
807 	{
808 		struct ccb_pathinq *cpi = &ccb->cpi;
809 
810 		cpi->version_num = 1;
811 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
812 		cpi->target_sprt = 0;
813 		cpi->hba_misc = PIM_NOBUSRESET;
814 		cpi->hba_eng_cnt = 0;
815 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
816 		cpi->max_lun = 0;
817 		cpi->initiator_id = 255;
818 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
819 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
820 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
821 		cpi->unit_number = cam_sim_unit(sim);
822 		cpi->bus_id = cam_sim_bus(sim);
823 		cpi->base_transfer_speed = 150000;
824 		cpi->transport = XPORT_SAS;
825 		cpi->transport_version = 0;
826 		cpi->protocol = PROTO_SCSI;
827 		cpi->protocol_version = SCSI_REV_SPC;
828 #if __FreeBSD_version >= 800001
829 		/*
830 		 * XXX KDM where does this number come from?
831 		 */
832 		cpi->maxio = 256 * 1024;
833 #endif
834 		cpi->ccb_h.status = CAM_REQ_CMP;
835 		break;
836 	}
837 	case XPT_GET_TRAN_SETTINGS:
838 	{
839 		struct ccb_trans_settings	*cts;
840 		struct ccb_trans_settings_sas	*sas;
841 		struct ccb_trans_settings_scsi	*scsi;
842 		struct mpssas_target *targ;
843 
844 		cts = &ccb->cts;
845 		sas = &cts->xport_specific.sas;
846 		scsi = &cts->proto_specific.scsi;
847 
848 		targ = &sassc->targets[cts->ccb_h.target_id];
849 		if (targ->handle == 0x0) {
850 			cts->ccb_h.status = CAM_TID_INVALID;
851 			break;
852 		}
853 
854 		cts->protocol_version = SCSI_REV_SPC2;
855 		cts->transport = XPORT_SAS;
856 		cts->transport_version = 0;
857 
858 		sas->valid = CTS_SAS_VALID_SPEED;
859 		switch (targ->linkrate) {
860 		case 0x08:
861 			sas->bitrate = 150000;
862 			break;
863 		case 0x09:
864 			sas->bitrate = 300000;
865 			break;
866 		case 0x0a:
867 			sas->bitrate = 600000;
868 			break;
869 		default:
870 			sas->valid = 0;
871 		}
872 
873 		cts->protocol = PROTO_SCSI;
874 		scsi->valid = CTS_SCSI_VALID_TQ;
875 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
876 
877 		cts->ccb_h.status = CAM_REQ_CMP;
878 		break;
879 	}
880 	case XPT_CALC_GEOMETRY:
881 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
882 		ccb->ccb_h.status = CAM_REQ_CMP;
883 		break;
884 	case XPT_RESET_DEV:
885 		mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
886 		mpssas_action_resetdev(sassc, ccb);
887 		return;
888 	case XPT_RESET_BUS:
889 	case XPT_ABORT:
890 	case XPT_TERM_IO:
891 		mps_printf(sassc->sc, "mpssas_action faking success for "
892 			   "abort or reset\n");
893 		ccb->ccb_h.status = CAM_REQ_CMP;
894 		break;
895 	case XPT_SCSI_IO:
896 		mpssas_action_scsiio(sassc, ccb);
897 		return;
898 #if __FreeBSD_version >= 900026
899 	case XPT_SMP_IO:
900 		mpssas_action_smpio(sassc, ccb);
901 		return;
902 #endif
903 	default:
904 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
905 		break;
906 	}
907 	xpt_done(ccb);
908 
909 }
910 
911 static void
912 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
913     target_id_t target_id, lun_id_t lun_id)
914 {
915 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
916 	struct cam_path *path;
917 
918 	mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
919 	    ac_code, target_id, lun_id);
920 
921 	if (xpt_create_path(&path, NULL,
922 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
923 		mps_printf(sc, "unable to create path for reset "
924 			   "notification\n");
925 		return;
926 	}
927 
928 	xpt_async(ac_code, path, NULL);
929 	xpt_free_path(path);
930 }
931 
932 static void
933 mpssas_complete_all_commands(struct mps_softc *sc)
934 {
935 	struct mps_command *cm;
936 	int i;
937 	int completed;
938 
939 	mps_printf(sc, "%s\n", __func__);
940 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
941 
942 	/* complete all commands with a NULL reply */
943 	for (i = 1; i < sc->num_reqs; i++) {
944 		cm = &sc->commands[i];
945 		cm->cm_reply = NULL;
946 		completed = 0;
947 
948 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
949 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
950 
951 		if (cm->cm_complete != NULL) {
952 			mpssas_log_command(cm,
953 			    "completing cm %p state %x ccb %p for diag reset\n",
954 			    cm, cm->cm_state, cm->cm_ccb);
955 
956 			cm->cm_complete(sc, cm);
957 			completed = 1;
958 		}
959 
960 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
961 			mpssas_log_command(cm,
962 			    "waking up cm %p state %x ccb %p for diag reset\n",
963 			    cm, cm->cm_state, cm->cm_ccb);
964 			wakeup(cm);
965 			completed = 1;
966 		}
967 
968 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
969 			/* this should never happen, but if it does, log */
970 			mpssas_log_command(cm,
971 			    "cm %p state %x flags 0x%x ccb %p during diag "
972 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
973 			    cm->cm_ccb);
974 		}
975 	}
976 }
977 
978 void
979 mpssas_handle_reinit(struct mps_softc *sc)
980 {
981 	int i;
982 
983 	/* Go back into startup mode and freeze the simq, so that CAM
984 	 * doesn't send any commands until after we've rediscovered all
985 	 * targets and found the proper device handles for them.
986 	 *
987 	 * After the reset, portenable will trigger discovery, and after all
988 	 * discovery-related activities have finished, the simq will be
989 	 * released.
990 	 */
991 	mps_printf(sc, "%s startup\n", __func__);
992 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
993 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
994 	xpt_freeze_simq(sc->sassc->sim, 1);
995 
996 	/* notify CAM of a bus reset */
997 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
998 	    CAM_LUN_WILDCARD);
999 
1000 	/* complete and cleanup after all outstanding commands */
1001 	mpssas_complete_all_commands(sc);
1002 
1003 	mps_printf(sc, "%s startup %u tm %u after command completion\n",
1004 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1005 
1006 	/*
1007 	 * The simq was explicitly frozen above, so set the refcount to 0.
1008 	 * The simq will be explicitly released after port enable completes.
1009 	 */
1010 	sc->sassc->startup_refcount = 0;
1011 
1012 	/* zero all the target handles, since they may change after the
1013 	 * reset, and we have to rediscover all the targets and use the new
1014 	 * handles.
1015 	 */
1016 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1017 		if (sc->sassc->targets[i].outstanding != 0)
1018 			mps_printf(sc, "target %u outstanding %u\n",
1019 			    i, sc->sassc->targets[i].outstanding);
1020 		sc->sassc->targets[i].handle = 0x0;
1021 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1022 		sc->sassc->targets[i].outstanding = 0;
1023 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1024 	}
1025 }
1026 static void
1027 mpssas_tm_timeout(void *data)
1028 {
1029 	struct mps_command *tm = data;
1030 	struct mps_softc *sc = tm->cm_sc;
1031 
1032 	mps_lock(sc);
1033 	mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1034 	mps_reinit(sc);
1035 	mps_unlock(sc);
1036 }
1037 
1038 static void
1039 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1040 {
1041 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1042 	unsigned int cm_count = 0;
1043 	struct mps_command *cm;
1044 	struct mpssas_target *targ;
1045 
1046 	callout_stop(&tm->cm_callout);
1047 
1048 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1049 	targ = tm->cm_targ;
1050 
1051 	/*
1052 	 * Currently there should be no way we can hit this case.  It only
1053 	 * happens when we have a failure to allocate chain frames, and
1054 	 * task management commands don't have S/G lists.
1055 	 */
1056 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1057 		mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1058 			   "This should not happen!\n", __func__, tm->cm_flags);
1059 		mpssas_free_tm(sc, tm);
1060 		return;
1061 	}
1062 
1063 	if (reply == NULL) {
1064 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1065 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1066 			/* this completion was due to a reset, just cleanup */
1067 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1068 			targ->tm = NULL;
1069 			mpssas_free_tm(sc, tm);
1070 		}
1071 		else {
1072 			/* we should have gotten a reply. */
1073 			mps_reinit(sc);
1074 		}
1075 		return;
1076 	}
1077 
1078 	mpssas_log_command(tm,
1079 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1080 	    reply->IOCStatus, reply->ResponseCode,
1081 	    reply->TerminationCount);
1082 
1083 	/* See if there are any outstanding commands for this LUN.
1084 	 * This could be made more efficient by using a per-LU data
1085 	 * structure of some sort.
1086 	 */
1087 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1088 		if (cm->cm_lun == tm->cm_lun)
1089 			cm_count++;
1090 	}
1091 
1092 	if (cm_count == 0) {
1093 		mpssas_log_command(tm,
1094 		    "logical unit %u finished recovery after reset\n",
1095 		    tm->cm_lun);
1096 
1097 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1098 		    tm->cm_lun);
1099 
1100 		/* we've finished recovery for this logical unit.  check and
1101 		 * see if some other logical unit has a timedout command
1102 		 * that needs to be processed.
1103 		 */
1104 		cm = TAILQ_FIRST(&targ->timedout_commands);
1105 		if (cm) {
1106 			mpssas_send_abort(sc, tm, cm);
1107 		}
1108 		else {
1109 			targ->tm = NULL;
1110 			mpssas_free_tm(sc, tm);
1111 		}
1112 	}
1113 	else {
1114 		/* if we still have commands for this LUN, the reset
1115 		 * effectively failed, regardless of the status reported.
1116 		 * Escalate to a target reset.
1117 		 */
1118 		mpssas_log_command(tm,
1119 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1120 		    tm, cm_count);
1121 		mpssas_send_reset(sc, tm,
1122 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1123 	}
1124 }
1125 
1126 static void
1127 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1128 {
1129 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1130 	struct mpssas_target *targ;
1131 
1132 	callout_stop(&tm->cm_callout);
1133 
1134 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1135 	targ = tm->cm_targ;
1136 
1137 	/*
1138 	 * Currently there should be no way we can hit this case.  It only
1139 	 * happens when we have a failure to allocate chain frames, and
1140 	 * task management commands don't have S/G lists.
1141 	 */
1142 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1143 		mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1144 			   "This should not happen!\n", __func__, tm->cm_flags);
1145 		mpssas_free_tm(sc, tm);
1146 		return;
1147 	}
1148 
1149 	if (reply == NULL) {
1150 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1151 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1152 			/* this completion was due to a reset, just cleanup */
1153 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1154 			targ->tm = NULL;
1155 			mpssas_free_tm(sc, tm);
1156 		}
1157 		else {
1158 			/* we should have gotten a reply. */
1159 			mps_reinit(sc);
1160 		}
1161 		return;
1162 	}
1163 
1164 	mpssas_log_command(tm,
1165 	    "target reset status 0x%x code 0x%x count %u\n",
1166 	    reply->IOCStatus, reply->ResponseCode,
1167 	    reply->TerminationCount);
1168 
1169 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1170 
1171 	if (targ->outstanding == 0) {
1172 		/* we've finished recovery for this target and all
1173 		 * of its logical units.
1174 		 */
1175 		mpssas_log_command(tm,
1176 		    "recovery finished after target reset\n");
1177 
1178 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1179 		    CAM_LUN_WILDCARD);
1180 
1181 		targ->tm = NULL;
1182 		mpssas_free_tm(sc, tm);
1183 	}
1184 	else {
1185 		/* after a target reset, if this target still has
1186 		 * outstanding commands, the reset effectively failed,
1187 		 * regardless of the status reported.  escalate.
1188 		 */
1189 		mpssas_log_command(tm,
1190 		    "target reset complete for tm %p, but still have %u command(s)\n",
1191 		    tm, targ->outstanding);
1192 		mps_reinit(sc);
1193 	}
1194 }
1195 
1196 #define MPS_RESET_TIMEOUT 30
1197 
1198 static int
1199 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1200 {
1201 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1202 	struct mpssas_target *target;
1203 	int err;
1204 
1205 	target = tm->cm_targ;
1206 	if (target->handle == 0) {
1207 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1208 		    __func__, target->tid);
1209 		return -1;
1210 	}
1211 
1212 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1213 	req->DevHandle = target->handle;
1214 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1215 	req->TaskType = type;
1216 
1217 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1218 		/* XXX Need to handle invalid LUNs */
1219 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1220 		tm->cm_targ->logical_unit_resets++;
1221 		mpssas_log_command(tm, "sending logical unit reset\n");
1222 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1223 	}
1224 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1225 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1226 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1227 		tm->cm_targ->target_resets++;
1228 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1229 		mpssas_log_command(tm, "sending target reset\n");
1230 		tm->cm_complete = mpssas_target_reset_complete;
1231 	}
1232 	else {
1233 		mps_printf(sc, "unexpected reset type 0x%x\n", type);
1234 		return -1;
1235 	}
1236 
1237 	tm->cm_data = NULL;
1238 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1239 	tm->cm_complete_data = (void *)tm;
1240 
1241 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1242 	    mpssas_tm_timeout, tm);
1243 
1244 	err = mps_map_command(sc, tm);
1245 	if (err)
1246 		mpssas_log_command(tm,
1247 		    "error %d sending reset type %u\n",
1248 		    err, type);
1249 
1250 	return err;
1251 }
1252 
1253 
1254 static void
1255 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1256 {
1257 	struct mps_command *cm;
1258 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1259 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1260 	struct mpssas_target *targ;
1261 
1262 	callout_stop(&tm->cm_callout);
1263 
1264 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1265 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1266 	targ = tm->cm_targ;
1267 
1268 	/*
1269 	 * Currently there should be no way we can hit this case.  It only
1270 	 * happens when we have a failure to allocate chain frames, and
1271 	 * task management commands don't have S/G lists.
1272 	 */
1273 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1274 		mpssas_log_command(tm,
1275 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1276 		    tm->cm_flags, tm, req->TaskMID);
1277 		mpssas_free_tm(sc, tm);
1278 		return;
1279 	}
1280 
1281 	if (reply == NULL) {
1282 		mpssas_log_command(tm,
1283 		    "NULL abort reply for tm %p TaskMID %u\n",
1284 		    tm, req->TaskMID);
1285 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1286 			/* this completion was due to a reset, just cleanup */
1287 			targ->tm = NULL;
1288 			mpssas_free_tm(sc, tm);
1289 		}
1290 		else {
1291 			/* we should have gotten a reply. */
1292 			mps_reinit(sc);
1293 		}
1294 		return;
1295 	}
1296 
1297 	mpssas_log_command(tm,
1298 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1299 	    req->TaskMID,
1300 	    reply->IOCStatus, reply->ResponseCode,
1301 	    reply->TerminationCount);
1302 
1303 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1304 	if (cm == NULL) {
1305 		/* if there are no more timedout commands, we're done with
1306 		 * error recovery for this target.
1307 		 */
1308 		mpssas_log_command(tm,
1309 		    "finished recovery after aborting TaskMID %u\n",
1310 		    req->TaskMID);
1311 
1312 		targ->tm = NULL;
1313 		mpssas_free_tm(sc, tm);
1314 	}
1315 	else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1316 		/* abort success, but we have more timedout commands to abort */
1317 		mpssas_log_command(tm,
1318 		    "continuing recovery after aborting TaskMID %u\n",
1319 		    req->TaskMID);
1320 
1321 		mpssas_send_abort(sc, tm, cm);
1322 	}
1323 	else {
1324 		/* we didn't get a command completion, so the abort
1325 		 * failed as far as we're concerned.  escalate.
1326 		 */
1327 		mpssas_log_command(tm,
1328 		    "abort failed for TaskMID %u tm %p\n",
1329 		    req->TaskMID, tm);
1330 
1331 		mpssas_send_reset(sc, tm,
1332 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1333 	}
1334 }
1335 
1336 #define MPS_ABORT_TIMEOUT 5
1337 
1338 static int
1339 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1340 {
1341 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1342 	struct mpssas_target *targ;
1343 	int err;
1344 
1345 	targ = cm->cm_targ;
1346 	if (targ->handle == 0) {
1347 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1348 		    __func__, cm->cm_ccb->ccb_h.target_id);
1349 		return -1;
1350 	}
1351 
1352 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1353 	req->DevHandle = targ->handle;
1354 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1355 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1356 
1357 	/* XXX Need to handle invalid LUNs */
1358 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1359 
1360 	req->TaskMID = cm->cm_desc.Default.SMID;
1361 
1362 	tm->cm_data = NULL;
1363 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1364 	tm->cm_complete = mpssas_abort_complete;
1365 	tm->cm_complete_data = (void *)tm;
1366 	tm->cm_targ = cm->cm_targ;
1367 	tm->cm_lun = cm->cm_lun;
1368 
1369 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1370 	    mpssas_tm_timeout, tm);
1371 
1372 	targ->aborts++;
1373 
1374 	err = mps_map_command(sc, tm);
1375 	if (err)
1376 		mpssas_log_command(tm,
1377 		    "error %d sending abort for cm %p SMID %u\n",
1378 		    err, cm, req->TaskMID);
1379 	return err;
1380 }
1381 
1382 
1383 static void
1384 mpssas_scsiio_timeout(void *data)
1385 {
1386 	struct mps_softc *sc;
1387 	struct mps_command *cm;
1388 	struct mpssas_target *targ;
1389 
1390 	cm = (struct mps_command *)data;
1391 	sc = cm->cm_sc;
1392 
1393 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1394 
1395 	mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1396 
1397 	/*
1398 	 * Run the interrupt handler to make sure it's not pending.  This
1399 	 * isn't perfect because the command could have already completed
1400 	 * and been re-used, though this is unlikely.
1401 	 */
1402 	mps_intr_locked(sc);
1403 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1404 		mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1405 		return;
1406 	}
1407 
1408 	if (cm->cm_ccb == NULL) {
1409 		mps_printf(sc, "command timeout with NULL ccb\n");
1410 		return;
1411 	}
1412 
1413 	mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1414 	    cm, cm->cm_ccb);
1415 
1416 	targ = cm->cm_targ;
1417 	targ->timeouts++;
1418 
1419 	/* XXX first, check the firmware state, to see if it's still
1420 	 * operational.  if not, do a diag reset.
1421 	 */
1422 
1423 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1424 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1425 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1426 
1427 	if (targ->tm != NULL) {
1428 		/* target already in recovery, just queue up another
1429 		 * timedout command to be processed later.
1430 		 */
1431 		mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1432 		    cm, targ->tm);
1433 	}
1434 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1435 		mps_printf(sc, "timedout cm %p allocated tm %p\n",
1436 		    cm, targ->tm);
1437 
1438 		/* start recovery by aborting the first timedout command */
1439 		mpssas_send_abort(sc, targ->tm, cm);
1440 	}
1441 	else {
1442 		/* XXX queue this target up for recovery once a TM becomes
1443 		 * available.  The firmware only has a limited number of
1444 		 * HighPriority credits for the high priority requests used
1445 		 * for task management, and we ran out.
1446 		 *
1447 		 * Isilon: don't worry about this for now, since we have
1448 		 * more credits than disks in an enclosure, and limit
1449 		 * ourselves to one TM per target for recovery.
1450 		 */
1451 		mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1452 		    cm);
1453 	}
1454 
1455 }
1456 
1457 static void
1458 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1459 {
1460 	MPI2_SCSI_IO_REQUEST *req;
1461 	struct ccb_scsiio *csio;
1462 	struct mps_softc *sc;
1463 	struct mpssas_target *targ;
1464 	struct mpssas_lun *lun;
1465 	struct mps_command *cm;
1466 	uint8_t i, lba_byte, *ref_tag_addr;
1467 	uint16_t eedp_flags;
1468 
1469 	sc = sassc->sc;
1470 	mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1471 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1472 
1473 	csio = &ccb->csio;
1474 	targ = &sassc->targets[csio->ccb_h.target_id];
1475 	if (targ->handle == 0x0) {
1476 		mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1477 		    __func__, csio->ccb_h.target_id);
1478 		csio->ccb_h.status = CAM_TID_INVALID;
1479 		xpt_done(ccb);
1480 		return;
1481 	}
1482 	/*
1483 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1484 	 * that the volume has timed out.  We want volumes to be enumerated
1485 	 * until they are deleted/removed, not just failed.
1486 	 */
1487 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1488 		if (targ->devinfo == 0)
1489 			csio->ccb_h.status = CAM_REQ_CMP;
1490 		else
1491 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1492 		xpt_done(ccb);
1493 		return;
1494 	}
1495 
1496 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1497 		mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1498 		csio->ccb_h.status = CAM_TID_INVALID;
1499 		xpt_done(ccb);
1500 		return;
1501 	}
1502 
1503 	cm = mps_alloc_command(sc);
1504 	if (cm == NULL) {
1505 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1506 			xpt_freeze_simq(sassc->sim, 1);
1507 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1508 		}
1509 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1510 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1511 		xpt_done(ccb);
1512 		return;
1513 	}
1514 
1515 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1516 	bzero(req, sizeof(*req));
1517 	req->DevHandle = targ->handle;
1518 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1519 	req->MsgFlags = 0;
1520 	req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1521 	req->SenseBufferLength = MPS_SENSE_LEN;
1522 	req->SGLFlags = 0;
1523 	req->ChainOffset = 0;
1524 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1525 	req->SGLOffset1= 0;
1526 	req->SGLOffset2= 0;
1527 	req->SGLOffset3= 0;
1528 	req->SkipCount = 0;
1529 	req->DataLength = csio->dxfer_len;
1530 	req->BidirectionalDataLength = 0;
1531 	req->IoFlags = csio->cdb_len;
1532 	req->EEDPFlags = 0;
1533 
1534 	/* Note: BiDirectional transfers are not supported */
1535 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1536 	case CAM_DIR_IN:
1537 		req->Control = MPI2_SCSIIO_CONTROL_READ;
1538 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1539 		break;
1540 	case CAM_DIR_OUT:
1541 		req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1542 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1543 		break;
1544 	case CAM_DIR_NONE:
1545 	default:
1546 		req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1547 		break;
1548 	}
1549 
1550 	/*
1551 	 * It looks like the hardware doesn't require an explicit tag
1552 	 * number for each transaction.  SAM Task Management not supported
1553 	 * at the moment.
1554 	 */
1555 	switch (csio->tag_action) {
1556 	case MSG_HEAD_OF_Q_TAG:
1557 		req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1558 		break;
1559 	case MSG_ORDERED_Q_TAG:
1560 		req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1561 		break;
1562 	case MSG_ACA_TASK:
1563 		req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1564 		break;
1565 	case CAM_TAG_ACTION_NONE:
1566 	case MSG_SIMPLE_Q_TAG:
1567 	default:
1568 		req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1569 		break;
1570 	}
1571 	req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1572 
1573 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1574 		mps_free_command(sc, cm);
1575 		ccb->ccb_h.status = CAM_LUN_INVALID;
1576 		xpt_done(ccb);
1577 		return;
1578 	}
1579 
1580 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1581 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1582 	else
1583 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1584 	req->IoFlags = csio->cdb_len;
1585 
1586 	/*
1587 	 * Check if EEDP is supported and enabled.  If it is then check if the
1588 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1589 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1590 	 * for EEDP transfer.
1591 	 */
1592 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1593 	if (sc->eedp_enabled && eedp_flags) {
1594 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1595 			if (lun->lun_id == csio->ccb_h.target_lun) {
1596 				break;
1597 			}
1598 		}
1599 
1600 		if ((lun != NULL) && (lun->eedp_formatted)) {
1601 			req->EEDPBlockSize = lun->eedp_block_size;
1602 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1603 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1604 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1605 			req->EEDPFlags = eedp_flags;
1606 
1607 			/*
1608 			 * If CDB less than 32, fill in Primary Ref Tag with
1609 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1610 			 * already there.  Also, set protection bit.  FreeBSD
1611 			 * currently does not support CDBs bigger than 16, but
1612 			 * the code doesn't hurt, and will be here for the
1613 			 * future.
1614 			 */
1615 			if (csio->cdb_len != 32) {
1616 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1617 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1618 				    PrimaryReferenceTag;
1619 				for (i = 0; i < 4; i++) {
1620 					*ref_tag_addr =
1621 					    req->CDB.CDB32[lba_byte + i];
1622 					ref_tag_addr++;
1623 				}
1624 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1625 				    0xFFFF;
1626 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1627 				    0x20;
1628 			} else {
1629 				eedp_flags |=
1630 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1631 				req->EEDPFlags = eedp_flags;
1632 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1633 				    0x1F) | 0x20;
1634 			}
1635 		}
1636 	}
1637 
1638 	cm->cm_data = csio->data_ptr;
1639 	cm->cm_length = csio->dxfer_len;
1640 	cm->cm_sge = &req->SGL;
1641 	cm->cm_sglsize = (32 - 24) * 4;
1642 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1643 	cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1644 	cm->cm_complete = mpssas_scsiio_complete;
1645 	cm->cm_complete_data = ccb;
1646 	cm->cm_targ = targ;
1647 	cm->cm_lun = csio->ccb_h.target_lun;
1648 	cm->cm_ccb = ccb;
1649 
1650 	/*
1651 	 * If HBA is a WD and the command is not for a retry, try to build a
1652 	 * direct I/O message. If failed, or the command is for a retry, send
1653 	 * the I/O to the IR volume itself.
1654 	 */
1655 	if (sc->WD_valid_config) {
1656 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1657 			mpssas_direct_drive_io(sassc, cm, ccb);
1658 		} else {
1659 			ccb->ccb_h.status = CAM_REQ_INPROG;
1660 		}
1661 	}
1662 
1663 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1664 	   mpssas_scsiio_timeout, cm);
1665 
1666 	targ->issued++;
1667 	targ->outstanding++;
1668 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1669 
1670 	if ((sc->mps_debug & MPS_TRACE) != 0)
1671 		mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1672 		    __func__, cm, ccb, targ->outstanding);
1673 
1674 	mps_map_command(sc, cm);
1675 	return;
1676 }
1677 
1678 static void
1679 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1680 {
1681 	MPI2_SCSI_IO_REPLY *rep;
1682 	union ccb *ccb;
1683 	struct ccb_scsiio *csio;
1684 	struct mpssas_softc *sassc;
1685 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1686 	u8 *TLR_bits, TLR_on;
1687 	int dir = 0, i;
1688 	u16 alloc_len;
1689 
1690 	mps_dprint(sc, MPS_TRACE,
1691 	    "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1692 	    __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1693 	    cm->cm_targ->outstanding);
1694 
1695 	callout_stop(&cm->cm_callout);
1696 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1697 
1698 	sassc = sc->sassc;
1699 	ccb = cm->cm_complete_data;
1700 	csio = &ccb->csio;
1701 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1702 	/*
1703 	 * XXX KDM if the chain allocation fails, does it matter if we do
1704 	 * the sync and unload here?  It is simpler to do it in every case,
1705 	 * assuming it doesn't cause problems.
1706 	 */
1707 	if (cm->cm_data != NULL) {
1708 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1709 			dir = BUS_DMASYNC_POSTREAD;
1710 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1711 			dir = BUS_DMASYNC_POSTWRITE;
1712 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1713 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1714 	}
1715 
1716 	cm->cm_targ->completed++;
1717 	cm->cm_targ->outstanding--;
1718 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1719 
1720 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1721 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1722 		if (cm->cm_reply != NULL)
1723 			mpssas_log_command(cm,
1724 			    "completed timedout cm %p ccb %p during recovery "
1725 			    "ioc %x scsi %x state %x xfer %u\n",
1726 			    cm, cm->cm_ccb,
1727 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1728 			    rep->TransferCount);
1729 		else
1730 			mpssas_log_command(cm,
1731 			    "completed timedout cm %p ccb %p during recovery\n",
1732 			    cm, cm->cm_ccb);
1733 	} else if (cm->cm_targ->tm != NULL) {
1734 		if (cm->cm_reply != NULL)
1735 			mpssas_log_command(cm,
1736 			    "completed cm %p ccb %p during recovery "
1737 			    "ioc %x scsi %x state %x xfer %u\n",
1738 			    cm, cm->cm_ccb,
1739 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1740 			    rep->TransferCount);
1741 		else
1742 			mpssas_log_command(cm,
1743 			    "completed cm %p ccb %p during recovery\n",
1744 			    cm, cm->cm_ccb);
1745 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1746 		mpssas_log_command(cm,
1747 		    "reset completed cm %p ccb %p\n",
1748 		    cm, cm->cm_ccb);
1749 	}
1750 
1751 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1752 		/*
1753 		 * We ran into an error after we tried to map the command,
1754 		 * so we're getting a callback without queueing the command
1755 		 * to the hardware.  So we set the status here, and it will
1756 		 * be retained below.  We'll go through the "fast path",
1757 		 * because there can be no reply when we haven't actually
1758 		 * gone out to the hardware.
1759 		 */
1760 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1761 
1762 		/*
1763 		 * Currently the only error included in the mask is
1764 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1765 		 * chain frames.  We need to freeze the queue until we get
1766 		 * a command that completed without this error, which will
1767 		 * hopefully have some chain frames attached that we can
1768 		 * use.  If we wanted to get smarter about it, we would
1769 		 * only unfreeze the queue in this condition when we're
1770 		 * sure that we're getting some chain frames back.  That's
1771 		 * probably unnecessary.
1772 		 */
1773 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1774 			xpt_freeze_simq(sassc->sim, 1);
1775 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1776 			mps_dprint(sc, MPS_INFO, "Error sending command, "
1777 				   "freezing SIM queue\n");
1778 		}
1779 	}
1780 
1781 	/* Take the fast path to completion */
1782 	if (cm->cm_reply == NULL) {
1783 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1784 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1785 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1786 			else {
1787 				ccb->ccb_h.status = CAM_REQ_CMP;
1788 				ccb->csio.scsi_status = SCSI_STATUS_OK;
1789 			}
1790 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1791 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1792 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1793 				mps_dprint(sc, MPS_INFO,
1794 					   "Unfreezing SIM queue\n");
1795 			}
1796 		}
1797 
1798 		/*
1799 		 * There are two scenarios where the status won't be
1800 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
1801 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1802 		 */
1803 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1804 			/*
1805 			 * Freeze the dev queue so that commands are
1806 			 * executed in the correct order with after error
1807 			 * recovery.
1808 			 */
1809 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
1810 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1811 		}
1812 		mps_free_command(sc, cm);
1813 		xpt_done(ccb);
1814 		return;
1815 	}
1816 
1817 	if (sc->mps_debug & MPS_TRACE)
1818 		mpssas_log_command(cm,
1819 		    "ioc %x scsi %x state %x xfer %u\n",
1820 		    rep->IOCStatus, rep->SCSIStatus,
1821 		    rep->SCSIState, rep->TransferCount);
1822 
1823 	/*
1824 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1825 	 * Volume if an error occurred (normal I/O retry).  Use the original
1826 	 * CCB, but set a flag that this will be a retry so that it's sent to
1827 	 * the original volume.  Free the command but reuse the CCB.
1828 	 */
1829 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1830 		mps_free_command(sc, cm);
1831 		ccb->ccb_h.status = MPS_WD_RETRY;
1832 		mpssas_action_scsiio(sassc, ccb);
1833 		return;
1834 	}
1835 
1836 	switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1837 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1838 		csio->resid = cm->cm_length - rep->TransferCount;
1839 		/* FALLTHROUGH */
1840 	case MPI2_IOCSTATUS_SUCCESS:
1841 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1842 
1843 		if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1844 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1845 			mpssas_log_command(cm, "recovered error\n");
1846 
1847 		/* Completion failed at the transport level. */
1848 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1849 		    MPI2_SCSI_STATE_TERMINATED)) {
1850 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1851 			break;
1852 		}
1853 
1854 		/* In a modern packetized environment, an autosense failure
1855 		 * implies that there's not much else that can be done to
1856 		 * recover the command.
1857 		 */
1858 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1859 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1860 			break;
1861 		}
1862 
1863 		/*
1864 		 * CAM doesn't care about SAS Response Info data, but if this is
1865 		 * the state check if TLR should be done.  If not, clear the
1866 		 * TLR_bits for the target.
1867 		 */
1868 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1869 		    ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1870 		    MPS_SCSI_RI_INVALID_FRAME)) {
1871 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1872 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1873 		}
1874 
1875 		/*
1876 		 * Intentionally override the normal SCSI status reporting
1877 		 * for these two cases.  These are likely to happen in a
1878 		 * multi-initiator environment, and we want to make sure that
1879 		 * CAM retries these commands rather than fail them.
1880 		 */
1881 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1882 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1883 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1884 			break;
1885 		}
1886 
1887 		/* Handle normal status and sense */
1888 		csio->scsi_status = rep->SCSIStatus;
1889 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1890 			ccb->ccb_h.status = CAM_REQ_CMP;
1891 		else
1892 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1893 
1894 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1895 			int sense_len, returned_sense_len;
1896 
1897 			returned_sense_len = min(rep->SenseCount,
1898 			    sizeof(struct scsi_sense_data));
1899 			if (returned_sense_len < ccb->csio.sense_len)
1900 				ccb->csio.sense_resid = ccb->csio.sense_len -
1901 					returned_sense_len;
1902 			else
1903 				ccb->csio.sense_resid = 0;
1904 
1905 			sense_len = min(returned_sense_len,
1906 			    ccb->csio.sense_len - ccb->csio.sense_resid);
1907 			bzero(&ccb->csio.sense_data,
1908 			      sizeof(ccb->csio.sense_data));
1909 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1910 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1911 		}
1912 
1913 		/*
1914 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
1915 		 * and it's page code 0 (Supported Page List), and there is
1916 		 * inquiry data, and this is for a sequential access device, and
1917 		 * the device is an SSP target, and TLR is supported by the
1918 		 * controller, turn the TLR_bits value ON if page 0x90 is
1919 		 * supported.
1920 		 */
1921 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1922 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1923 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1924 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1925 		    T_SEQUENTIAL) && (sc->control_TLR) &&
1926 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
1927 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1928 			vpd_list = (struct scsi_vpd_supported_page_list *)
1929 			    csio->data_ptr;
1930 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1931 			    TLR_bits;
1932 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1933 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1934 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1935 			    csio->cdb_io.cdb_bytes[4];
1936 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1937 				if (vpd_list->list[i] == 0x90) {
1938 					*TLR_bits = TLR_on;
1939 					break;
1940 				}
1941 			}
1942 		}
1943 		break;
1944 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1945 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1946 		/*
1947 		 * If devinfo is 0 this will be a volume.  In that case don't
1948 		 * tell CAM that the volume is not there.  We want volumes to
1949 		 * be enumerated until they are deleted/removed, not just
1950 		 * failed.
1951 		 */
1952 		if (cm->cm_targ->devinfo == 0)
1953 			ccb->ccb_h.status = CAM_REQ_CMP;
1954 		else
1955 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1956 		break;
1957 	case MPI2_IOCSTATUS_INVALID_SGL:
1958 		mps_print_scsiio_cmd(sc, cm);
1959 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1960 		break;
1961 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1962 		/*
1963 		 * This is one of the responses that comes back when an I/O
1964 		 * has been aborted.  If it is because of a timeout that we
1965 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
1966 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
1967 		 * command is the same (it gets retried, subject to the
1968 		 * retry counter), the only difference is what gets printed
1969 		 * on the console.
1970 		 */
1971 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1972 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1973 		else
1974 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1975 		break;
1976 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1977 		/* resid is ignored for this condition */
1978 		csio->resid = 0;
1979 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1980 		break;
1981 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1982 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1983 		/*
1984 		 * Since these are generally external (i.e. hopefully
1985 		 * transient transport-related) errors, retry these without
1986 		 * decrementing the retry count.
1987 		 */
1988 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1989 		mpssas_log_command(cm,
1990 		    "terminated ioc %x scsi %x state %x xfer %u\n",
1991 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1992 		    rep->TransferCount);
1993 		break;
1994 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1995 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
1996 	case MPI2_IOCSTATUS_INVALID_VPID:
1997 	case MPI2_IOCSTATUS_INVALID_FIELD:
1998 	case MPI2_IOCSTATUS_INVALID_STATE:
1999 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2000 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2001 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2002 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2003 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2004 	default:
2005 		mpssas_log_command(cm,
2006 		    "completed ioc %x scsi %x state %x xfer %u\n",
2007 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2008 		    rep->TransferCount);
2009 		csio->resid = cm->cm_length;
2010 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2011 		break;
2012 	}
2013 
2014 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2015 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2016 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2017 		mps_dprint(sc, MPS_INFO, "Command completed, "
2018 			   "unfreezing SIM queue\n");
2019 	}
2020 
2021 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2022 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2023 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2024 	}
2025 
2026 	mps_free_command(sc, cm);
2027 	xpt_done(ccb);
2028 }
2029 
2030 static void
2031 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2032     union ccb *ccb) {
2033 	pMpi2SCSIIORequest_t	pIO_req;
2034 	struct mps_softc	*sc = sassc->sc;
2035 	uint64_t		virtLBA;
2036 	uint32_t		physLBA, stripe_offset, stripe_unit;
2037 	uint32_t		io_size, column;
2038 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2039 
2040 	/*
2041 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2042 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2043 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2044 	 * bit different than the 10/16 CDBs, handle them separately.
2045 	 */
2046 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2047 	CDB = pIO_req->CDB.CDB32;
2048 
2049 	/*
2050 	 * Handle 6 byte CDBs.
2051 	 */
2052 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2053 	    (CDB[0] == WRITE_6))) {
2054 		/*
2055 		 * Get the transfer size in blocks.
2056 		 */
2057 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2058 
2059 		/*
2060 		 * Get virtual LBA given in the CDB.
2061 		 */
2062 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2063 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2064 
2065 		/*
2066 		 * Check that LBA range for I/O does not exceed volume's
2067 		 * MaxLBA.
2068 		 */
2069 		if ((virtLBA + (uint64_t)io_size - 1) <=
2070 		    sc->DD_max_lba) {
2071 			/*
2072 			 * Check if the I/O crosses a stripe boundary.  If not,
2073 			 * translate the virtual LBA to a physical LBA and set
2074 			 * the DevHandle for the PhysDisk to be used.  If it
2075 			 * does cross a boundry, do normal I/O.  To get the
2076 			 * right DevHandle to use, get the map number for the
2077 			 * column, then use that map number to look up the
2078 			 * DevHandle of the PhysDisk.
2079 			 */
2080 			stripe_offset = (uint32_t)virtLBA &
2081 			    (sc->DD_stripe_size - 1);
2082 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2083 				physLBA = (uint32_t)virtLBA >>
2084 				    sc->DD_stripe_exponent;
2085 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2086 				column = physLBA % sc->DD_num_phys_disks;
2087 				pIO_req->DevHandle =
2088 				    sc->DD_column_map[column].dev_handle;
2089 				cm->cm_desc.SCSIIO.DevHandle =
2090 				    pIO_req->DevHandle;
2091 
2092 				physLBA = (stripe_unit <<
2093 				    sc->DD_stripe_exponent) + stripe_offset;
2094 				ptrLBA = &pIO_req->CDB.CDB32[1];
2095 				physLBA_byte = (uint8_t)(physLBA >> 16);
2096 				*ptrLBA = physLBA_byte;
2097 				ptrLBA = &pIO_req->CDB.CDB32[2];
2098 				physLBA_byte = (uint8_t)(physLBA >> 8);
2099 				*ptrLBA = physLBA_byte;
2100 				ptrLBA = &pIO_req->CDB.CDB32[3];
2101 				physLBA_byte = (uint8_t)physLBA;
2102 				*ptrLBA = physLBA_byte;
2103 
2104 				/*
2105 				 * Set flag that Direct Drive I/O is
2106 				 * being done.
2107 				 */
2108 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2109 			}
2110 		}
2111 		return;
2112 	}
2113 
2114 	/*
2115 	 * Handle 10 or 16 byte CDBs.
2116 	 */
2117 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2118 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2119 	    (CDB[0] == WRITE_16))) {
2120 		/*
2121 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2122 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2123 		 * the else section.  10-byte CDB's are OK.
2124 		 */
2125 		if ((CDB[0] < READ_16) ||
2126 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2127 			/*
2128 			 * Get the transfer size in blocks.
2129 			 */
2130 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2131 
2132 			/*
2133 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2134 			 * LBA in the CDB depending on command.
2135 			 */
2136 			lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2137 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2138 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2139 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2140 			    (uint64_t)CDB[lba_idx + 3];
2141 
2142 			/*
2143 			 * Check that LBA range for I/O does not exceed volume's
2144 			 * MaxLBA.
2145 			 */
2146 			if ((virtLBA + (uint64_t)io_size - 1) <=
2147 			    sc->DD_max_lba) {
2148 				/*
2149 				 * Check if the I/O crosses a stripe boundary.
2150 				 * If not, translate the virtual LBA to a
2151 				 * physical LBA and set the DevHandle for the
2152 				 * PhysDisk to be used.  If it does cross a
2153 				 * boundry, do normal I/O.  To get the right
2154 				 * DevHandle to use, get the map number for the
2155 				 * column, then use that map number to look up
2156 				 * the DevHandle of the PhysDisk.
2157 				 */
2158 				stripe_offset = (uint32_t)virtLBA &
2159 				    (sc->DD_stripe_size - 1);
2160 				if ((stripe_offset + io_size) <=
2161 				    sc->DD_stripe_size) {
2162 					physLBA = (uint32_t)virtLBA >>
2163 					    sc->DD_stripe_exponent;
2164 					stripe_unit = physLBA /
2165 					    sc->DD_num_phys_disks;
2166 					column = physLBA %
2167 					    sc->DD_num_phys_disks;
2168 					pIO_req->DevHandle =
2169 					    sc->DD_column_map[column].
2170 					    dev_handle;
2171 					cm->cm_desc.SCSIIO.DevHandle =
2172 					    pIO_req->DevHandle;
2173 
2174 					physLBA = (stripe_unit <<
2175 					    sc->DD_stripe_exponent) +
2176 					    stripe_offset;
2177 					ptrLBA =
2178 					    &pIO_req->CDB.CDB32[lba_idx];
2179 					physLBA_byte = (uint8_t)(physLBA >> 24);
2180 					*ptrLBA = physLBA_byte;
2181 					ptrLBA =
2182 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2183 					physLBA_byte = (uint8_t)(physLBA >> 16);
2184 					*ptrLBA = physLBA_byte;
2185 					ptrLBA =
2186 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2187 					physLBA_byte = (uint8_t)(physLBA >> 8);
2188 					*ptrLBA = physLBA_byte;
2189 					ptrLBA =
2190 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2191 					physLBA_byte = (uint8_t)physLBA;
2192 					*ptrLBA = physLBA_byte;
2193 
2194 					/*
2195 					 * Set flag that Direct Drive I/O is
2196 					 * being done.
2197 					 */
2198 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2199 				}
2200 			}
2201 		} else {
2202 			/*
2203 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2204 			 * 0.  Get the transfer size in blocks.
2205 			 */
2206 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2207 
2208 			/*
2209 			 * Get virtual LBA.
2210 			 */
2211 			virtLBA = ((uint64_t)CDB[2] << 54) |
2212 			    ((uint64_t)CDB[3] << 48) |
2213 			    ((uint64_t)CDB[4] << 40) |
2214 			    ((uint64_t)CDB[5] << 32) |
2215 			    ((uint64_t)CDB[6] << 24) |
2216 			    ((uint64_t)CDB[7] << 16) |
2217 			    ((uint64_t)CDB[8] << 8) |
2218 			    (uint64_t)CDB[9];
2219 
2220 			/*
2221 			 * Check that LBA range for I/O does not exceed volume's
2222 			 * MaxLBA.
2223 			 */
2224 			if ((virtLBA + (uint64_t)io_size - 1) <=
2225 			    sc->DD_max_lba) {
2226 				/*
2227 				 * Check if the I/O crosses a stripe boundary.
2228 				 * If not, translate the virtual LBA to a
2229 				 * physical LBA and set the DevHandle for the
2230 				 * PhysDisk to be used.  If it does cross a
2231 				 * boundry, do normal I/O.  To get the right
2232 				 * DevHandle to use, get the map number for the
2233 				 * column, then use that map number to look up
2234 				 * the DevHandle of the PhysDisk.
2235 				 */
2236 				stripe_offset = (uint32_t)virtLBA &
2237 				    (sc->DD_stripe_size - 1);
2238 				if ((stripe_offset + io_size) <=
2239 				    sc->DD_stripe_size) {
2240 					physLBA = (uint32_t)(virtLBA >>
2241 					    sc->DD_stripe_exponent);
2242 					stripe_unit = physLBA /
2243 					    sc->DD_num_phys_disks;
2244 					column = physLBA %
2245 					    sc->DD_num_phys_disks;
2246 					pIO_req->DevHandle =
2247 					    sc->DD_column_map[column].
2248 					    dev_handle;
2249 					cm->cm_desc.SCSIIO.DevHandle =
2250 					    pIO_req->DevHandle;
2251 
2252 					physLBA = (stripe_unit <<
2253 					    sc->DD_stripe_exponent) +
2254 					    stripe_offset;
2255 
2256 					/*
2257 					 * Set upper 4 bytes of LBA to 0.  We
2258 					 * assume that the phys disks are less
2259 					 * than 2 TB's in size.  Then, set the
2260 					 * lower 4 bytes.
2261 					 */
2262 					pIO_req->CDB.CDB32[2] = 0;
2263 					pIO_req->CDB.CDB32[3] = 0;
2264 					pIO_req->CDB.CDB32[4] = 0;
2265 					pIO_req->CDB.CDB32[5] = 0;
2266 					ptrLBA = &pIO_req->CDB.CDB32[6];
2267 					physLBA_byte = (uint8_t)(physLBA >> 24);
2268 					*ptrLBA = physLBA_byte;
2269 					ptrLBA = &pIO_req->CDB.CDB32[7];
2270 					physLBA_byte = (uint8_t)(physLBA >> 16);
2271 					*ptrLBA = physLBA_byte;
2272 					ptrLBA = &pIO_req->CDB.CDB32[8];
2273 					physLBA_byte = (uint8_t)(physLBA >> 8);
2274 					*ptrLBA = physLBA_byte;
2275 					ptrLBA = &pIO_req->CDB.CDB32[9];
2276 					physLBA_byte = (uint8_t)physLBA;
2277 					*ptrLBA = physLBA_byte;
2278 
2279 					/*
2280 					 * Set flag that Direct Drive I/O is
2281 					 * being done.
2282 					 */
2283 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2284 				}
2285 			}
2286 		}
2287 	}
2288 }
2289 
2290 #if __FreeBSD_version >= 900026
2291 static void
2292 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2293 {
2294 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2295 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2296 	uint64_t sasaddr;
2297 	union ccb *ccb;
2298 
2299 	ccb = cm->cm_complete_data;
2300 
2301 	/*
2302 	 * Currently there should be no way we can hit this case.  It only
2303 	 * happens when we have a failure to allocate chain frames, and SMP
2304 	 * commands require two S/G elements only.  That should be handled
2305 	 * in the standard request size.
2306 	 */
2307 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2308 		mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2309 			   __func__, cm->cm_flags);
2310 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2311 		goto bailout;
2312         }
2313 
2314 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2315 	if (rpl == NULL) {
2316 		mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2317 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2318 		goto bailout;
2319 	}
2320 
2321 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2322 	sasaddr = le32toh(req->SASAddress.Low);
2323 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2324 
2325 	if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2326 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2327 		mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2328 		    __func__, rpl->IOCStatus, rpl->SASStatus);
2329 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2330 		goto bailout;
2331 	}
2332 
2333 	mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2334 		   "%#jx completed successfully\n", __func__,
2335 		   (uintmax_t)sasaddr);
2336 
2337 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2338 		ccb->ccb_h.status = CAM_REQ_CMP;
2339 	else
2340 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2341 
2342 bailout:
2343 	/*
2344 	 * We sync in both directions because we had DMAs in the S/G list
2345 	 * in both directions.
2346 	 */
2347 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2348 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2349 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2350 	mps_free_command(sc, cm);
2351 	xpt_done(ccb);
2352 }
2353 
2354 static void
2355 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2356 {
2357 	struct mps_command *cm;
2358 	uint8_t *request, *response;
2359 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2360 	struct mps_softc *sc;
2361 	int error;
2362 
2363 	sc = sassc->sc;
2364 	error = 0;
2365 
2366 	/*
2367 	 * XXX We don't yet support physical addresses here.
2368 	 */
2369 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2370 		mps_printf(sc, "%s: physical addresses not supported\n",
2371 			   __func__);
2372 		ccb->ccb_h.status = CAM_REQ_INVALID;
2373 		xpt_done(ccb);
2374 		return;
2375 	}
2376 
2377 	/*
2378 	 * If the user wants to send an S/G list, check to make sure they
2379 	 * have single buffers.
2380 	 */
2381 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2382 		/*
2383 		 * The chip does not support more than one buffer for the
2384 		 * request or response.
2385 		 */
2386 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2387 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2388 			mps_printf(sc, "%s: multiple request or response "
2389 				   "buffer segments not supported for SMP\n",
2390 				   __func__);
2391 			ccb->ccb_h.status = CAM_REQ_INVALID;
2392 			xpt_done(ccb);
2393 			return;
2394 		}
2395 
2396 		/*
2397 		 * The CAM_SCATTER_VALID flag was originally implemented
2398 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2399 		 * We have two.  So, just take that flag to mean that we
2400 		 * might have S/G lists, and look at the S/G segment count
2401 		 * to figure out whether that is the case for each individual
2402 		 * buffer.
2403 		 */
2404 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2405 			bus_dma_segment_t *req_sg;
2406 
2407 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2408 			request = (uint8_t *)req_sg[0].ds_addr;
2409 		} else
2410 			request = ccb->smpio.smp_request;
2411 
2412 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2413 			bus_dma_segment_t *rsp_sg;
2414 
2415 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2416 			response = (uint8_t *)rsp_sg[0].ds_addr;
2417 		} else
2418 			response = ccb->smpio.smp_response;
2419 	} else {
2420 		request = ccb->smpio.smp_request;
2421 		response = ccb->smpio.smp_response;
2422 	}
2423 
2424 	cm = mps_alloc_command(sc);
2425 	if (cm == NULL) {
2426 		mps_printf(sc, "%s: cannot allocate command\n", __func__);
2427 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2428 		xpt_done(ccb);
2429 		return;
2430 	}
2431 
2432 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2433 	bzero(req, sizeof(*req));
2434 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2435 
2436 	/* Allow the chip to use any route to this SAS address. */
2437 	req->PhysicalPort = 0xff;
2438 
2439 	req->RequestDataLength = ccb->smpio.smp_request_len;
2440 	req->SGLFlags =
2441 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2442 
2443 	mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2444 		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2445 
2446 	mpi_init_sge(cm, req, &req->SGL);
2447 
2448 	/*
2449 	 * Set up a uio to pass into mps_map_command().  This allows us to
2450 	 * do one map command, and one busdma call in there.
2451 	 */
2452 	cm->cm_uio.uio_iov = cm->cm_iovec;
2453 	cm->cm_uio.uio_iovcnt = 2;
2454 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2455 
2456 	/*
2457 	 * The read/write flag isn't used by busdma, but set it just in
2458 	 * case.  This isn't exactly accurate, either, since we're going in
2459 	 * both directions.
2460 	 */
2461 	cm->cm_uio.uio_rw = UIO_WRITE;
2462 
2463 	cm->cm_iovec[0].iov_base = request;
2464 	cm->cm_iovec[0].iov_len = req->RequestDataLength;
2465 	cm->cm_iovec[1].iov_base = response;
2466 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2467 
2468 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2469 			       cm->cm_iovec[1].iov_len;
2470 
2471 	/*
2472 	 * Trigger a warning message in mps_data_cb() for the user if we
2473 	 * wind up exceeding two S/G segments.  The chip expects one
2474 	 * segment for the request and another for the response.
2475 	 */
2476 	cm->cm_max_segs = 2;
2477 
2478 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2479 	cm->cm_complete = mpssas_smpio_complete;
2480 	cm->cm_complete_data = ccb;
2481 
2482 	/*
2483 	 * Tell the mapping code that we're using a uio, and that this is
2484 	 * an SMP passthrough request.  There is a little special-case
2485 	 * logic there (in mps_data_cb()) to handle the bidirectional
2486 	 * transfer.
2487 	 */
2488 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2489 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2490 
2491 	/* The chip data format is little endian. */
2492 	req->SASAddress.High = htole32(sasaddr >> 32);
2493 	req->SASAddress.Low = htole32(sasaddr);
2494 
2495 	/*
2496 	 * XXX Note that we don't have a timeout/abort mechanism here.
2497 	 * From the manual, it looks like task management requests only
2498 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2499 	 * have a mechanism to retry requests in the event of a chip reset
2500 	 * at least.  Hopefully the chip will insure that any errors short
2501 	 * of that are relayed back to the driver.
2502 	 */
2503 	error = mps_map_command(sc, cm);
2504 	if ((error != 0) && (error != EINPROGRESS)) {
2505 		mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2506 			   __func__, error);
2507 		goto bailout_error;
2508 	}
2509 
2510 	return;
2511 
2512 bailout_error:
2513 	mps_free_command(sc, cm);
2514 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2515 	xpt_done(ccb);
2516 	return;
2517 
2518 }
2519 
2520 static void
2521 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2522 {
2523 	struct mps_softc *sc;
2524 	struct mpssas_target *targ;
2525 	uint64_t sasaddr = 0;
2526 
2527 	sc = sassc->sc;
2528 
2529 	/*
2530 	 * Make sure the target exists.
2531 	 */
2532 	targ = &sassc->targets[ccb->ccb_h.target_id];
2533 	if (targ->handle == 0x0) {
2534 		mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2535 			   ccb->ccb_h.target_id);
2536 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2537 		xpt_done(ccb);
2538 		return;
2539 	}
2540 
2541 	/*
2542 	 * If this device has an embedded SMP target, we'll talk to it
2543 	 * directly.
2544 	 * figure out what the expander's address is.
2545 	 */
2546 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2547 		sasaddr = targ->sasaddr;
2548 
2549 	/*
2550 	 * If we don't have a SAS address for the expander yet, try
2551 	 * grabbing it from the page 0x83 information cached in the
2552 	 * transport layer for this target.  LSI expanders report the
2553 	 * expander SAS address as the port-associated SAS address in
2554 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2555 	 * 0x83.
2556 	 *
2557 	 * XXX KDM disable this for now, but leave it commented out so that
2558 	 * it is obvious that this is another possible way to get the SAS
2559 	 * address.
2560 	 *
2561 	 * The parent handle method below is a little more reliable, and
2562 	 * the other benefit is that it works for devices other than SES
2563 	 * devices.  So you can send a SMP request to a da(4) device and it
2564 	 * will get routed to the expander that device is attached to.
2565 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2566 	 */
2567 #if 0
2568 	if (sasaddr == 0)
2569 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2570 #endif
2571 
2572 	/*
2573 	 * If we still don't have a SAS address for the expander, look for
2574 	 * the parent device of this device, which is probably the expander.
2575 	 */
2576 	if (sasaddr == 0) {
2577 #ifdef OLD_MPS_PROBE
2578 		struct mpssas_target *parent_target;
2579 #endif
2580 
2581 		if (targ->parent_handle == 0x0) {
2582 			mps_printf(sc, "%s: handle %d does not have a valid "
2583 				   "parent handle!\n", __func__, targ->handle);
2584 			ccb->ccb_h.status = CAM_REQ_INVALID;
2585 			goto bailout;
2586 		}
2587 #ifdef OLD_MPS_PROBE
2588 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2589 			targ->parent_handle);
2590 
2591 		if (parent_target == NULL) {
2592 			mps_printf(sc, "%s: handle %d does not have a valid "
2593 				   "parent target!\n", __func__, targ->handle);
2594 			ccb->ccb_h.status = CAM_REQ_INVALID;
2595 			goto bailout;
2596 		}
2597 
2598 		if ((parent_target->devinfo &
2599 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2600 			mps_printf(sc, "%s: handle %d parent %d does not "
2601 				   "have an SMP target!\n", __func__,
2602 				   targ->handle, parent_target->handle);
2603 			ccb->ccb_h.status = CAM_REQ_INVALID;
2604 			goto bailout;
2605 
2606 		}
2607 
2608 		sasaddr = parent_target->sasaddr;
2609 #else /* OLD_MPS_PROBE */
2610 		if ((targ->parent_devinfo &
2611 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2612 			mps_printf(sc, "%s: handle %d parent %d does not "
2613 				   "have an SMP target!\n", __func__,
2614 				   targ->handle, targ->parent_handle);
2615 			ccb->ccb_h.status = CAM_REQ_INVALID;
2616 			goto bailout;
2617 
2618 		}
2619 		if (targ->parent_sasaddr == 0x0) {
2620 			mps_printf(sc, "%s: handle %d parent handle %d does "
2621 				   "not have a valid SAS address!\n",
2622 				   __func__, targ->handle, targ->parent_handle);
2623 			ccb->ccb_h.status = CAM_REQ_INVALID;
2624 			goto bailout;
2625 		}
2626 
2627 		sasaddr = targ->parent_sasaddr;
2628 #endif /* OLD_MPS_PROBE */
2629 
2630 	}
2631 
2632 	if (sasaddr == 0) {
2633 		mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2634 			   __func__, targ->handle);
2635 		ccb->ccb_h.status = CAM_REQ_INVALID;
2636 		goto bailout;
2637 	}
2638 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
2639 
2640 	return;
2641 
2642 bailout:
2643 	xpt_done(ccb);
2644 
2645 }
2646 #endif //__FreeBSD_version >= 900026
2647 
2648 static void
2649 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2650 {
2651 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2652 	struct mps_softc *sc;
2653 	struct mps_command *tm;
2654 	struct mpssas_target *targ;
2655 
2656 	mps_dprint(sassc->sc, MPS_TRACE, __func__);
2657 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2658 
2659 	sc = sassc->sc;
2660 	tm = mps_alloc_command(sc);
2661 	if (tm == NULL) {
2662 		mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2663 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2664 		xpt_done(ccb);
2665 		return;
2666 	}
2667 
2668 	targ = &sassc->targets[ccb->ccb_h.target_id];
2669 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2670 	req->DevHandle = targ->handle;
2671 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2672 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2673 
2674 	/* SAS Hard Link Reset / SATA Link Reset */
2675 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2676 
2677 	tm->cm_data = NULL;
2678 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2679 	tm->cm_complete = mpssas_resetdev_complete;
2680 	tm->cm_complete_data = ccb;
2681 	mps_map_command(sc, tm);
2682 }
2683 
2684 static void
2685 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2686 {
2687 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2688 	union ccb *ccb;
2689 
2690 	mps_dprint(sc, MPS_TRACE, __func__);
2691 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2692 
2693 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2694 	ccb = tm->cm_complete_data;
2695 
2696 	/*
2697 	 * Currently there should be no way we can hit this case.  It only
2698 	 * happens when we have a failure to allocate chain frames, and
2699 	 * task management commands don't have S/G lists.
2700 	 */
2701 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2702 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2703 
2704 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2705 
2706 		mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2707 			   "This should not happen!\n", __func__, tm->cm_flags,
2708 			   req->DevHandle);
2709 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2710 		goto bailout;
2711 	}
2712 
2713 	kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2714 	    resp->IOCStatus, resp->ResponseCode);
2715 
2716 	if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2717 		ccb->ccb_h.status = CAM_REQ_CMP;
2718 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2719 		    CAM_LUN_WILDCARD);
2720 	}
2721 	else
2722 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2723 
2724 bailout:
2725 
2726 	mpssas_free_tm(sc, tm);
2727 	xpt_done(ccb);
2728 }
2729 
2730 static void
2731 mpssas_poll(struct cam_sim *sim)
2732 {
2733 	struct mpssas_softc *sassc;
2734 
2735 	sassc = cam_sim_softc(sim);
2736 
2737 	if (sassc->sc->mps_debug & MPS_TRACE) {
2738 		/* frequent debug messages during a panic just slow
2739 		 * everything down too much.
2740 		 */
2741 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2742 		sassc->sc->mps_debug &= ~MPS_TRACE;
2743 	}
2744 
2745 	mps_intr_locked(sassc->sc);
2746 }
2747 
2748 static void
2749 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2750 {
2751 	struct mpssas_softc *sassc;
2752 	char path_str[64];
2753 
2754 	if (done_ccb == NULL)
2755 		return;
2756 
2757 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2758 
2759 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2760 
2761 	xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2762 	mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2763 
2764 	xpt_free_path(done_ccb->ccb_h.path);
2765 	xpt_free_ccb(done_ccb);
2766 
2767 #if __FreeBSD_version < 1000006
2768 	/*
2769 	 * Before completing scan, get EEDP stuff for all of the existing
2770 	 * targets.
2771 	 */
2772 	mpssas_check_eedp(sassc);
2773 #endif
2774 
2775 }
2776 
2777 /* thread to handle bus rescans */
2778 static void
2779 mpssas_scanner_thread(void *arg)
2780 {
2781 	struct mpssas_softc *sassc;
2782 	struct mps_softc *sc;
2783 	union ccb	*ccb;
2784 
2785 	sassc = (struct mpssas_softc *)arg;
2786 	sc = sassc->sc;
2787 
2788 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2789 
2790 	mps_lock(sc);
2791 	for (;;) {
2792 		lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 0);
2793 		if (sassc->flags & MPSSAS_SHUTDOWN) {
2794 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2795 			break;
2796 		}
2797 		ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2798 		if (ccb == NULL)
2799 			continue;
2800 		TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2801 		xpt_action(ccb);
2802 	}
2803 
2804 	sassc->flags &= ~MPSSAS_SCANTHREAD;
2805 	wakeup(&sassc->flags);
2806 	mps_unlock(sc);
2807 	mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2808 	mps_kproc_exit(0);
2809 }
2810 
2811 static void
2812 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2813 {
2814 	char path_str[64];
2815 
2816 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2817 
2818 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2819 
2820 	if (ccb == NULL)
2821 		return;
2822 
2823 	xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2824 	mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2825 
2826 	/* Prepare request */
2827 	ccb->ccb_h.ppriv_ptr1 = sassc;
2828 	ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2829 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2830 	TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2831 	wakeup(&sassc->ccb_scanq);
2832 }
2833 
2834 #if __FreeBSD_version >= 1000006
2835 static void
2836 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2837 	     void *arg)
2838 {
2839 	struct mps_softc *sc;
2840 
2841 	sc = (struct mps_softc *)callback_arg;
2842 
2843 	switch (code) {
2844 	case AC_ADVINFO_CHANGED: {
2845 		struct mpssas_target *target;
2846 		struct mpssas_softc *sassc;
2847 		struct scsi_read_capacity_data_long rcap_buf;
2848 		struct ccb_dev_advinfo cdai;
2849 		struct mpssas_lun *lun;
2850 		lun_id_t lunid;
2851 		int found_lun;
2852 		uintptr_t buftype;
2853 
2854 		buftype = (uintptr_t)arg;
2855 
2856 		found_lun = 0;
2857 		sassc = sc->sassc;
2858 
2859 		/*
2860 		 * We're only interested in read capacity data changes.
2861 		 */
2862 		if (buftype != CDAI_TYPE_RCAPLONG)
2863 			break;
2864 
2865 		/*
2866 		 * We're only interested in devices that are attached to
2867 		 * this controller.
2868 		 */
2869 		if (xpt_path_path_id(path) != sassc->sim->path_id)
2870 			break;
2871 
2872 		/*
2873 		 * We should have a handle for this, but check to make sure.
2874 		 */
2875 		target = &sassc->targets[xpt_path_target_id(path)];
2876 		if (target->handle == 0)
2877 			break;
2878 
2879 		lunid = xpt_path_lun_id(path);
2880 
2881 		SLIST_FOREACH(lun, &target->luns, lun_link) {
2882 			if (lun->lun_id == lunid) {
2883 				found_lun = 1;
2884 				break;
2885 			}
2886 		}
2887 
2888 		if (found_lun == 0) {
2889 			lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
2890 				     M_INTWAIT | M_ZERO);
2891 			if (lun == NULL) {
2892 				mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2893 					   "LUN for EEDP support.\n");
2894 				break;
2895 			}
2896 			lun->lun_id = lunid;
2897 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2898 		}
2899 
2900 		bzero(&rcap_buf, sizeof(rcap_buf));
2901 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2902 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2903 		cdai.ccb_h.flags = CAM_DIR_IN;
2904 		cdai.buftype = CDAI_TYPE_RCAPLONG;
2905 		cdai.flags = 0;
2906 		cdai.bufsiz = sizeof(rcap_buf);
2907 		cdai.buf = (uint8_t *)&rcap_buf;
2908 		xpt_action((union ccb *)&cdai);
2909 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2910 			cam_release_devq(cdai.ccb_h.path,
2911 					 0, 0, 0, FALSE);
2912 
2913 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2914 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
2915 			lun->eedp_formatted = TRUE;
2916 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2917 		} else {
2918 			lun->eedp_formatted = FALSE;
2919 			lun->eedp_block_size = 0;
2920 		}
2921 		break;
2922 	}
2923 	default:
2924 		break;
2925 	}
2926 }
2927 #else /* __FreeBSD_version >= 1000006 */
2928 
2929 static void
2930 mpssas_check_eedp(struct mpssas_softc *sassc)
2931 {
2932 	struct mps_softc *sc = sassc->sc;
2933 	struct ccb_scsiio *csio;
2934 	struct scsi_read_capacity_16 *scsi_cmd;
2935 	struct scsi_read_capacity_eedp *rcap_buf;
2936 	union ccb *ccb;
2937 	path_id_t pathid = cam_sim_path(sassc->sim);
2938 	target_id_t targetid;
2939 	lun_id_t lunid;
2940 	struct cam_periph *found_periph;
2941 	struct mpssas_target *target;
2942 	struct mpssas_lun *lun;
2943 	uint8_t	found_lun;
2944 
2945 	/*
2946 	 * Issue a READ CAPACITY 16 command to each LUN of each target.  This
2947 	 * info is used to determine if the LUN is formatted for EEDP support.
2948 	 */
2949 	for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2950 		target = &sassc->targets[targetid];
2951 		if (target->handle == 0x0) {
2952 			continue;
2953 		}
2954 
2955 		lunid = 0;
2956 		do {
2957 			rcap_buf =
2958 			    kmalloc(sizeof(struct scsi_read_capacity_eedp),
2959 			    M_MPT2, M_INTWAIT | M_ZERO);
2960 			if (rcap_buf == NULL) {
2961 				mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2962 				    "capacity buffer for EEDP support.\n");
2963 				return;
2964 			}
2965 
2966 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
2967 			    M_WAITOK | M_ZERO);
2968 
2969 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2970 			    pathid, targetid, lunid) != CAM_REQ_CMP) {
2971 				mps_dprint(sc, MPS_FAULT, "Unable to create "
2972 				    "path for EEDP support\n");
2973 				kfree(rcap_buf, M_MPT2);
2974 				xpt_free_ccb(ccb);
2975 				return;
2976 			}
2977 
2978 			/*
2979 			 * If a periph is returned, the LUN exists.  Create an
2980 			 * entry in the target's LUN list.
2981 			 */
2982 			if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2983 			    NULL)) != NULL) {
2984 				/*
2985 				 * If LUN is already in list, don't create a new
2986 				 * one.
2987 				 */
2988 				found_lun = FALSE;
2989 				SLIST_FOREACH(lun, &target->luns, lun_link) {
2990 					if (lun->lun_id == lunid) {
2991 						found_lun = TRUE;
2992 						break;
2993 					}
2994 				}
2995 				if (!found_lun) {
2996 					lun = kmalloc(sizeof(struct mpssas_lun),
2997 					    M_MPT2, M_WAITOK | M_ZERO);
2998 					lun->lun_id = lunid;
2999 					SLIST_INSERT_HEAD(&target->luns, lun,
3000 					    lun_link);
3001 				}
3002 				lunid++;
3003 
3004 				/*
3005 				 * Issue a READ CAPACITY 16 command for the LUN.
3006 				 * The mpssas_read_cap_done function will load
3007 				 * the read cap info into the LUN struct.
3008 				 */
3009 				csio = &ccb->csio;
3010 				csio->ccb_h.func_code = XPT_SCSI_IO;
3011 				csio->ccb_h.flags = CAM_DIR_IN;
3012 				csio->ccb_h.retry_count = 4;
3013 				csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3014 				csio->ccb_h.timeout = 60000;
3015 				csio->data_ptr = (uint8_t *)rcap_buf;
3016 				csio->dxfer_len = sizeof(struct
3017 				    scsi_read_capacity_eedp);
3018 				csio->sense_len = MPS_SENSE_LEN;
3019 				csio->cdb_len = sizeof(*scsi_cmd);
3020 				csio->tag_action = MSG_SIMPLE_Q_TAG;
3021 
3022 				scsi_cmd = (struct scsi_read_capacity_16 *)
3023 				    &csio->cdb_io.cdb_bytes;
3024 				bzero(scsi_cmd, sizeof(*scsi_cmd));
3025 				scsi_cmd->opcode = 0x9E;
3026 				scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3027 				((uint8_t *)scsi_cmd)[13] = sizeof(struct
3028 				    scsi_read_capacity_eedp);
3029 
3030 				/*
3031 				 * Set the path, target and lun IDs for the READ
3032 				 * CAPACITY request.
3033 				 */
3034 				ccb->ccb_h.path_id =
3035 				    xpt_path_path_id(ccb->ccb_h.path);
3036 				ccb->ccb_h.target_id =
3037 				    xpt_path_target_id(ccb->ccb_h.path);
3038 				ccb->ccb_h.target_lun =
3039 				    xpt_path_lun_id(ccb->ccb_h.path);
3040 
3041 				ccb->ccb_h.ppriv_ptr1 = sassc;
3042 				xpt_action(ccb);
3043 			} else {
3044 				kfree(rcap_buf, M_MPT2);
3045 				xpt_free_path(ccb->ccb_h.path);
3046 				xpt_free_ccb(ccb);
3047 			}
3048 		} while (found_periph);
3049 	}
3050 }
3051 
3052 
3053 static void
3054 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3055 {
3056 	struct mpssas_softc *sassc;
3057 	struct mpssas_target *target;
3058 	struct mpssas_lun *lun;
3059 	struct scsi_read_capacity_eedp *rcap_buf;
3060 
3061 	if (done_ccb == NULL)
3062 		return;
3063 
3064 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3065 
3066 	/*
3067 	 * Get the LUN ID for the path and look it up in the LUN list for the
3068 	 * target.
3069 	 */
3070 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3071 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3072 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3073 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3074 			continue;
3075 
3076 		/*
3077 		 * Got the LUN in the target's LUN list.  Fill it in
3078 		 * with EEDP info.  If the READ CAP 16 command had some
3079 		 * SCSI error (common if command is not supported), mark
3080 		 * the lun as not supporting EEDP and set the block size
3081 		 * to 0.
3082 		 */
3083 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3084 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3085 			lun->eedp_formatted = FALSE;
3086 			lun->eedp_block_size = 0;
3087 			break;
3088 		}
3089 
3090 		if (rcap_buf->protect & 0x01) {
3091 			lun->eedp_formatted = TRUE;
3092 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3093 		}
3094 		break;
3095 	}
3096 
3097 	// Finished with this CCB and path.
3098 	kfree(rcap_buf, M_MPT2);
3099 	xpt_free_path(done_ccb->ccb_h.path);
3100 	xpt_free_ccb(done_ccb);
3101 }
3102 #endif /* __FreeBSD_version >= 1000006 */
3103 
3104 int
3105 mpssas_startup(struct mps_softc *sc)
3106 {
3107 	struct mpssas_softc *sassc;
3108 
3109 	/*
3110 	 * Send the port enable message and set the wait_for_port_enable flag.
3111 	 * This flag helps to keep the simq frozen until all discovery events
3112 	 * are processed.
3113 	 */
3114 	sassc = sc->sassc;
3115 	mpssas_startup_increment(sassc);
3116 	sc->wait_for_port_enable = 1;
3117 	mpssas_send_portenable(sc);
3118 	return (0);
3119 }
3120 
3121 static int
3122 mpssas_send_portenable(struct mps_softc *sc)
3123 {
3124 	MPI2_PORT_ENABLE_REQUEST *request;
3125 	struct mps_command *cm;
3126 
3127 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3128 
3129 	if ((cm = mps_alloc_command(sc)) == NULL)
3130 		return (EBUSY);
3131 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3132 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3133 	request->MsgFlags = 0;
3134 	request->VP_ID = 0;
3135 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3136 	cm->cm_complete = mpssas_portenable_complete;
3137 	cm->cm_data = NULL;
3138 	cm->cm_sge = NULL;
3139 
3140 	mps_map_command(sc, cm);
3141 	mps_dprint(sc, MPS_TRACE,
3142 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3143 	    cm, cm->cm_req, cm->cm_complete);
3144 	return (0);
3145 }
3146 
3147 static void
3148 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3149 {
3150 	MPI2_PORT_ENABLE_REPLY *reply;
3151 	struct mpssas_softc *sassc;
3152 	struct mpssas_target *target;
3153 	int i;
3154 
3155 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3156 	sassc = sc->sassc;
3157 
3158 	/*
3159 	 * Currently there should be no way we can hit this case.  It only
3160 	 * happens when we have a failure to allocate chain frames, and
3161 	 * port enable commands don't have S/G lists.
3162 	 */
3163 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3164 		mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3165 			   "This should not happen!\n", __func__, cm->cm_flags);
3166 	}
3167 
3168 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3169 	if (reply == NULL)
3170 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3171 	else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3172 	    MPI2_IOCSTATUS_SUCCESS)
3173 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3174 
3175 	mps_free_command(sc, cm);
3176 	if (sc->mps_ich.ich_arg != NULL) {
3177 		mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3178 		config_intrhook_disestablish(&sc->mps_ich);
3179 		sc->mps_ich.ich_arg = NULL;
3180 	}
3181 
3182 	/*
3183 	 * Get WarpDrive info after discovery is complete but before the scan
3184 	 * starts.  At this point, all devices are ready to be exposed to the
3185 	 * OS.  If devices should be hidden instead, take them out of the
3186 	 * 'targets' array before the scan.  The devinfo for a disk will have
3187 	 * some info and a volume's will be 0.  Use that to remove disks.
3188 	 */
3189 	mps_wd_config_pages(sc);
3190 	if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3191 	  && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3192 	 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3193 	    MPS_WD_HIDE_IF_VOLUME))) {
3194 		for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3195 			target = &sassc->targets[i];
3196 			if (target->devinfo) {
3197 				target->devinfo = 0x0;
3198 				target->encl_handle = 0x0;
3199 				target->encl_slot = 0x0;
3200 				target->handle = 0x0;
3201 				target->tid = 0x0;
3202 				target->linkrate = 0x0;
3203 				target->flags = 0x0;
3204 			}
3205 		}
3206 	}
3207 
3208 	/*
3209 	 * Done waiting for port enable to complete.  Decrement the refcount.
3210 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3211 	 * take place.  Since the simq was explicitly frozen before port
3212 	 * enable, it must be explicitly released here to keep the
3213 	 * freeze/release count in sync.
3214 	 */
3215 	sc->wait_for_port_enable = 0;
3216 	sc->port_enable_complete = 1;
3217 	mpssas_startup_decrement(sassc);
3218 	xpt_release_simq(sassc->sim, 1);
3219 }
3220