xref: /dragonfly/sys/dev/raid/mps/mps_sas.c (revision 783d47c4)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2011 LSI Corp.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions
32  * are met:
33  * 1. Redistributions of source code must retain the above copyright
34  *    notice, this list of conditions and the following disclaimer.
35  * 2. Redistributions in binary form must reproduce the above copyright
36  *    notice, this list of conditions and the following disclaimer in the
37  *    documentation and/or other materials provided with the distribution.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49  * SUCH DAMAGE.
50  *
51  * LSI MPT-Fusion Host Adapter FreeBSD
52  *
53  * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
54  */
55 
56 /* Communications core for LSI MPT2 */
57 
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
64 #include <sys/bus.h>
65 #include <sys/conf.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
68 #include <sys/bio.h>
69 #include <sys/malloc.h>
70 #include <sys/uio.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
76 #include <sys/sbuf.h>
77 
78 #include <sys/rman.h>
79 
80 #include <machine/stdarg.h>
81 
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
94 #endif
95 
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
107 
108 #define MPSSAS_DISCOVERY_TIMEOUT	20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
110 
111 /*
112  * static array to check SCSI OpCode for EEDP protection bits
113  */
114 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
134 };
135 
136 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
137 
138 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
139 static void mpssas_log_command(struct mps_command *, const char *, ...)
140 		__printflike(2, 3);
141 #if 0 /* XXX unused */
142 static void mpssas_discovery_timeout(void *data);
143 #endif
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151     struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
158 			       uint64_t sasaddr);
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 			 struct cam_path *path, void *arg);
170 #else
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
173 #endif
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176     struct mps_command *cm);
177 
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
180 {
181 	struct mpssas_target *target;
182 	int i;
183 
184 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 		target = &sassc->targets[i];
186 		if (target->handle == handle)
187 			return (target);
188 	}
189 
190 	return (NULL);
191 }
192 
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194  * commands before device handles have been found by discovery.  Since
195  * discovery involves reading config pages and possibly sending commands,
196  * discovery actions may continue even after we receive the end of discovery
197  * event, so refcount discovery actions instead of assuming we can unfreeze
198  * the simq when we get the event.
199  */
200 void
201 mpssas_startup_increment(struct mpssas_softc *sassc)
202 {
203 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 		if (sassc->startup_refcount++ == 0) {
205 			/* just starting, freeze the simq */
206 			mps_dprint(sassc->sc, MPS_INFO,
207 			    "%s freezing simq\n", __func__);
208 			xpt_freeze_simq(sassc->sim, 1);
209 		}
210 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 		    sassc->startup_refcount);
212 	}
213 }
214 
215 void
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
217 {
218 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 		if (--sassc->startup_refcount == 0) {
220 			/* finished all discovery-related actions, release
221 			 * the simq and rescan for the latest topology.
222 			 */
223 			mps_dprint(sassc->sc, MPS_INFO,
224 			    "%s releasing simq\n", __func__);
225 			sassc->flags &= ~MPSSAS_IN_STARTUP;
226 			xpt_release_simq(sassc->sim, 1);
227 			mpssas_rescan_target(sassc->sc, NULL);
228 		}
229 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 		    sassc->startup_refcount);
231 	}
232 }
233 
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235  * management, so refcount the TMs and keep the simq frozen when any are in
236  * use.
237  */
238 struct mps_command *
239 mpssas_alloc_tm(struct mps_softc *sc)
240 {
241 	struct mps_command *tm;
242 
243 	tm = mps_alloc_high_priority_command(sc);
244 	if (tm != NULL) {
245 		if (sc->sassc->tm_count++ == 0) {
246 			mps_printf(sc, "%s freezing simq\n", __func__);
247 			xpt_freeze_simq(sc->sassc->sim, 1);
248 		}
249 		mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 		    sc->sassc->tm_count);
251 	}
252 	return tm;
253 }
254 
255 void
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
257 {
258 	if (tm == NULL)
259 		return;
260 
261 	/* if there are no TMs in use, we can release the simq.  We use our
262 	 * own refcount so that it's easier for a diag reset to cleanup and
263 	 * release the simq.
264 	 */
265 	if (--sc->sassc->tm_count == 0) {
266 		mps_printf(sc, "%s releasing simq\n", __func__);
267 		xpt_release_simq(sc->sassc->sim, 1);
268 	}
269 	mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 	    sc->sassc->tm_count);
271 
272 	mps_free_high_priority_command(sc, tm);
273 }
274 
275 
276 void
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
278 {
279 	struct mpssas_softc *sassc = sc->sassc;
280 	path_id_t pathid;
281 	target_id_t targetid;
282 	union ccb *ccb;
283 
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
294 
295 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
296 		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 		mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
298 		xpt_free_ccb(ccb);
299 		return;
300 	}
301 
302 	/* XXX Hardwired to scan the bus for now */
303 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
305 	mpssas_rescan(sassc, ccb);
306 }
307 
308 static void
309 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
310 {
311 	struct sbuf sb;
312 	__va_list ap;
313 	char str[192];
314 	char path_str[64];
315 
316 	if (cm == NULL)
317 		return;
318 
319 	sbuf_new(&sb, str, sizeof(str), 0);
320 
321 	__va_start(ap, fmt);
322 
323 	if (cm->cm_ccb != NULL) {
324 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
325 				sizeof(path_str));
326 		sbuf_cat(&sb, path_str);
327 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
328 			scsi_command_string(&cm->cm_ccb->csio, &sb);
329 			sbuf_printf(&sb, "length %d ",
330 				    cm->cm_ccb->csio.dxfer_len);
331 		}
332 	}
333 	else {
334 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
335 		    cam_sim_name(cm->cm_sc->sassc->sim),
336 		    cam_sim_unit(cm->cm_sc->sassc->sim),
337 		    cam_sim_bus(cm->cm_sc->sassc->sim),
338 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
339 		    cm->cm_lun);
340 	}
341 
342 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
343 	sbuf_vprintf(&sb, fmt, ap);
344 	sbuf_finish(&sb);
345 	kprintf("%s", sbuf_data(&sb));
346 
347 	__va_end(ap);
348 }
349 
350 static void
351 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
352 {
353 	struct mpssas_softc *sassc = sc->sassc;
354 	path_id_t pathid = cam_sim_path(sassc->sim);
355 	struct cam_path *path;
356 
357 	mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
358 	if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
359 		mps_printf(sc, "unable to create path for lost target %d\n",
360 		    targ->tid);
361 		return;
362 	}
363 
364 	xpt_async(AC_LOST_DEVICE, path, NULL);
365 	xpt_free_path(path);
366 }
367 
368 /*
369  * The MPT2 firmware performs debounce on the link to avoid transient link
370  * errors and false removals.  When it does decide that link has been lost
371  * and a device need to go away, it expects that the host will perform a
372  * target reset and then an op remove.  The reset has the side-effect of
373  * aborting any outstanding requests for the device, which is required for
374  * the op-remove to succeed.  It's not clear if the host should check for
375  * the device coming back alive after the reset.
376  */
377 void
378 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
379 {
380 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
381 	struct mps_softc *sc;
382 	struct mps_command *cm;
383 	struct mpssas_target *targ = NULL;
384 
385 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
386 
387 	/*
388 	 * If this is a WD controller, determine if the disk should be exposed
389 	 * to the OS or not.  If disk should be exposed, return from this
390 	 * function without doing anything.
391 	 */
392 	sc = sassc->sc;
393 	if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
394 	    MPS_WD_EXPOSE_ALWAYS)) {
395 		return;
396 	}
397 
398 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
399 	if (targ == NULL) {
400 		/* FIXME: what is the action? */
401 		/* We don't know about this device? */
402 		kprintf("%s: invalid handle 0x%x \n", __func__, handle);
403 		return;
404 	}
405 
406 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
407 
408 	cm = mpssas_alloc_tm(sc);
409 	if (cm == NULL) {
410 		mps_printf(sc, "%s: command alloc failure\n", __func__);
411 		return;
412 	}
413 
414 	mpssas_lost_target(sc, targ);
415 
416 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
417 	memset(req, 0, sizeof(*req));
418 	req->DevHandle = targ->handle;
419 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
420 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
421 
422 	/* SAS Hard Link Reset / SATA Link Reset */
423 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
424 
425 	cm->cm_targ = targ;
426 	cm->cm_data = NULL;
427 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
428 	cm->cm_complete = mpssas_remove_device;
429 	cm->cm_complete_data = (void *)(uintptr_t)handle;
430 	mps_map_command(sc, cm);
431 }
432 
433 static void
434 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
435 {
436 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
437 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
438 	struct mpssas_target *targ;
439 	struct mps_command *next_cm;
440 	uint16_t handle;
441 
442 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
443 
444 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
445 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
446 	targ = tm->cm_targ;
447 
448 	/*
449 	 * Currently there should be no way we can hit this case.  It only
450 	 * happens when we have a failure to allocate chain frames, and
451 	 * task management commands don't have S/G lists.
452 	 */
453 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
454 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
455 			   "This should not happen!\n", __func__, tm->cm_flags,
456 			   handle);
457 		mpssas_free_tm(sc, tm);
458 		return;
459 	}
460 
461 	if (reply == NULL) {
462 		/* XXX retry the remove after the diag reset completes? */
463 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
464 		    __func__, handle);
465 		mpssas_free_tm(sc, tm);
466 		return;
467 	}
468 
469 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
470 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
471 		   reply->IOCStatus, handle);
472 		mpssas_free_tm(sc, tm);
473 		return;
474 	}
475 
476 	mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
477 	    reply->TerminationCount);
478 	mps_free_reply(sc, tm->cm_reply_data);
479 	tm->cm_reply = NULL;	/* Ensures the the reply won't get re-freed */
480 
481 	/* Reuse the existing command */
482 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
483 	memset(req, 0, sizeof(*req));
484 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
485 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
486 	req->DevHandle = handle;
487 	tm->cm_data = NULL;
488 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
489 	tm->cm_complete = mpssas_remove_complete;
490 	tm->cm_complete_data = (void *)(uintptr_t)handle;
491 
492 	mps_map_command(sc, tm);
493 
494 	mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
495 		   targ->tid, handle);
496 	TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
497 		union ccb *ccb;
498 
499 		mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
500 		ccb = tm->cm_complete_data;
501 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
502 		mpssas_scsiio_complete(sc, tm);
503 	}
504 }
505 
506 static void
507 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
508 {
509 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
510 	uint16_t handle;
511 	struct mpssas_target *targ;
512 
513 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
514 
515 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
516 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
517 
518 	/*
519 	 * Currently there should be no way we can hit this case.  It only
520 	 * happens when we have a failure to allocate chain frames, and
521 	 * task management commands don't have S/G lists.
522 	 */
523 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
524 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
525 			   "This should not happen!\n", __func__, tm->cm_flags,
526 			   handle);
527 		mpssas_free_tm(sc, tm);
528 		return;
529 	}
530 
531 	if (reply == NULL) {
532 		/* most likely a chip reset */
533 		mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
534 		    __func__, handle);
535 		mpssas_free_tm(sc, tm);
536 		return;
537 	}
538 
539 	mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
540 	    handle, reply->IOCStatus);
541 
542 	/*
543 	 * Don't clear target if remove fails because things will get confusing.
544 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
545 	 * this target id if possible, and so we can assign the same target id
546 	 * to this device if it comes back in the future.
547 	 */
548 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
549 		targ = tm->cm_targ;
550 		targ->handle = 0x0;
551 		targ->encl_handle = 0x0;
552 		targ->encl_slot = 0x0;
553 		targ->exp_dev_handle = 0x0;
554 		targ->phy_num = 0x0;
555 		targ->linkrate = 0x0;
556 		targ->devinfo = 0x0;
557 	}
558 
559 	mpssas_free_tm(sc, tm);
560 }
561 
562 static int
563 mpssas_register_events(struct mps_softc *sc)
564 {
565 	uint8_t events[16];
566 
567 	bzero(events, 16);
568 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
569 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
570 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
571 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
572 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
573 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
574 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
575 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
576 	setbit(events, MPI2_EVENT_IR_VOLUME);
577 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
578 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
579 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
580 
581 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
582 	    &sc->sassc->mpssas_eh);
583 
584 	return (0);
585 }
586 
587 int
588 mps_attach_sas(struct mps_softc *sc)
589 {
590 	struct mpssas_softc *sassc;
591 #if __FreeBSD_version >= 1000006
592 	cam_status status;
593 #endif
594 	int unit, error = 0;
595 
596 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
597 
598 	sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
599 	sassc->targets = kmalloc(sizeof(struct mpssas_target) *
600 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
601 	sc->sassc = sassc;
602 	sassc->sc = sc;
603 
604 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
605 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
606 		error = ENOMEM;
607 		goto out;
608 	}
609 
610 	unit = device_get_unit(sc->mps_dev);
611 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
612 	    unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
613 	if (sassc->sim == NULL) {
614 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
615 		error = EINVAL;
616 		goto out;
617 	}
618 
619 	TAILQ_INIT(&sassc->ev_queue);
620 
621 	/* Initialize taskqueue for Event Handling */
622 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
623 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
624 	    taskqueue_thread_enqueue, &sassc->ev_tq);
625 
626 	/* Run the task queue with lowest priority */
627 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
628 	    device_get_nameunit(sc->mps_dev));
629 
630 	TAILQ_INIT(&sassc->ccb_scanq);
631 	error = mps_kproc_create(mpssas_scanner_thread, sassc,
632 	    &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
633 	if (error) {
634 		mps_printf(sc, "Error %d starting rescan thread\n", error);
635 		goto out;
636 	}
637 
638 	mps_lock(sc);
639 	sassc->flags |= MPSSAS_SCANTHREAD;
640 
641 	/*
642 	 * XXX There should be a bus for every port on the adapter, but since
643 	 * we're just going to fake the topology for now, we'll pretend that
644 	 * everything is just a target on a single bus.
645 	 */
646 	if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
647 		mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
648 		    error);
649 		mps_unlock(sc);
650 		goto out;
651 	}
652 
653 	/*
654 	 * Assume that discovery events will start right away.  Freezing
655 	 * the simq will prevent the CAM boottime scanner from running
656 	 * before discovery is complete.
657 	 */
658 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
659 	xpt_freeze_simq(sassc->sim, 1);
660 	sc->sassc->startup_refcount = 0;
661 
662 	callout_init_mp(&sassc->discovery_callout);
663 	sassc->discovery_timeouts = 0;
664 
665 	sassc->tm_count = 0;
666 
667 #if __FreeBSD_version >= 1000006
668 	status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
669 	if (status != CAM_REQ_CMP) {
670 		mps_printf(sc, "Error %#x registering async handler for "
671 			   "AC_ADVINFO_CHANGED events\n", status);
672 	}
673 #endif
674 
675 	mps_unlock(sc);
676 
677 	mpssas_register_events(sc);
678 out:
679 	if (error)
680 		mps_detach_sas(sc);
681 	return (error);
682 }
683 
684 int
685 mps_detach_sas(struct mps_softc *sc)
686 {
687 	struct mpssas_softc *sassc;
688 
689 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
690 
691 	if (sc->sassc == NULL)
692 		return (0);
693 
694 	sassc = sc->sassc;
695 	mps_deregister_events(sc, sassc->mpssas_eh);
696 
697 	/*
698 	 * Drain and free the event handling taskqueue with the lock
699 	 * unheld so that any parallel processing tasks drain properly
700 	 * without deadlocking.
701 	 */
702 	if (sassc->ev_tq != NULL)
703 		taskqueue_free(sassc->ev_tq);
704 
705 	/* Make sure CAM doesn't wedge if we had to bail out early. */
706 	mps_lock(sc);
707 
708 	/* Deregister our async handler */
709 #if __FreeBSD_version >= 1000006
710 	xpt_register_async(0, mpssas_async, sc, NULL);
711 #endif
712 
713 	if (sassc->flags & MPSSAS_IN_STARTUP)
714 		xpt_release_simq(sassc->sim, 1);
715 
716 	if (sassc->sim != NULL) {
717 		xpt_bus_deregister(cam_sim_path(sassc->sim));
718 		cam_sim_free(sassc->sim);
719 	}
720 
721 	if (sassc->flags & MPSSAS_SCANTHREAD) {
722 		sassc->flags |= MPSSAS_SHUTDOWN;
723 		wakeup(&sassc->ccb_scanq);
724 
725 		if (sassc->flags & MPSSAS_SCANTHREAD) {
726 			lksleep(&sassc->flags, &sc->mps_lock, 0,
727 			       "mps_shutdown", 30 * hz);
728 		}
729 	}
730 	mps_unlock(sc);
731 
732 	if (sassc->devq != NULL)
733 		cam_simq_release(sassc->devq);
734 
735 	kfree(sassc->targets, M_MPT2);
736 	kfree(sassc, M_MPT2);
737 	sc->sassc = NULL;
738 
739 	return (0);
740 }
741 
742 void
743 mpssas_discovery_end(struct mpssas_softc *sassc)
744 {
745 	struct mps_softc *sc = sassc->sc;
746 
747 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
748 
749 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
750 		callout_stop(&sassc->discovery_callout);
751 
752 }
753 
754 #if 0 /* XXX unused */
755 static void
756 mpssas_discovery_timeout(void *data)
757 {
758 	struct mpssas_softc *sassc = data;
759 	struct mps_softc *sc;
760 
761 	sc = sassc->sc;
762 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
763 
764 	mps_lock(sc);
765 	mps_printf(sc,
766 	    "Timeout waiting for discovery, interrupts may not be working!\n");
767 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
768 
769 	/* Poll the hardware for events in case interrupts aren't working */
770 	mps_intr_locked(sc);
771 
772 	mps_printf(sassc->sc,
773 	    "Finished polling after discovery timeout at %d\n", ticks);
774 
775 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
776 		mpssas_discovery_end(sassc);
777 	} else {
778 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
779 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
780 			callout_reset(&sassc->discovery_callout,
781 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
782 			    mpssas_discovery_timeout, sassc);
783 			sassc->discovery_timeouts++;
784 		} else {
785 			mps_dprint(sassc->sc, MPS_FAULT,
786 			    "Discovery timed out, continuing.\n");
787 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
788 			mpssas_discovery_end(sassc);
789 		}
790 	}
791 
792 	mps_unlock(sc);
793 }
794 #endif
795 
796 static void
797 mpssas_action(struct cam_sim *sim, union ccb *ccb)
798 {
799 	struct mpssas_softc *sassc;
800 
801 	sassc = cam_sim_softc(sim);
802 
803 	mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
804 	    ccb->ccb_h.func_code);
805 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
806 
807 	switch (ccb->ccb_h.func_code) {
808 	case XPT_PATH_INQ:
809 	{
810 		struct ccb_pathinq *cpi = &ccb->cpi;
811 
812 		cpi->version_num = 1;
813 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
814 		cpi->target_sprt = 0;
815 		cpi->hba_misc = PIM_NOBUSRESET;
816 		cpi->hba_eng_cnt = 0;
817 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
818 		cpi->max_lun = 0;
819 		cpi->initiator_id = 255;
820 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
821 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
822 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
823 		cpi->unit_number = cam_sim_unit(sim);
824 		cpi->bus_id = cam_sim_bus(sim);
825 		cpi->base_transfer_speed = 150000;
826 		cpi->transport = XPORT_SAS;
827 		cpi->transport_version = 0;
828 		cpi->protocol = PROTO_SCSI;
829 		cpi->protocol_version = SCSI_REV_SPC;
830 #if __FreeBSD_version >= 800001
831 		/*
832 		 * XXX KDM where does this number come from?
833 		 */
834 		cpi->maxio = 256 * 1024;
835 #endif
836 		cpi->ccb_h.status = CAM_REQ_CMP;
837 		break;
838 	}
839 	case XPT_GET_TRAN_SETTINGS:
840 	{
841 		struct ccb_trans_settings	*cts;
842 		struct ccb_trans_settings_sas	*sas;
843 		struct ccb_trans_settings_scsi	*scsi;
844 		struct mpssas_target *targ;
845 
846 		cts = &ccb->cts;
847 		sas = &cts->xport_specific.sas;
848 		scsi = &cts->proto_specific.scsi;
849 
850 		targ = &sassc->targets[cts->ccb_h.target_id];
851 		if (targ->handle == 0x0) {
852 			cts->ccb_h.status = CAM_TID_INVALID;
853 			break;
854 		}
855 
856 		cts->protocol_version = SCSI_REV_SPC2;
857 		cts->transport = XPORT_SAS;
858 		cts->transport_version = 0;
859 
860 		sas->valid = CTS_SAS_VALID_SPEED;
861 		switch (targ->linkrate) {
862 		case 0x08:
863 			sas->bitrate = 150000;
864 			break;
865 		case 0x09:
866 			sas->bitrate = 300000;
867 			break;
868 		case 0x0a:
869 			sas->bitrate = 600000;
870 			break;
871 		default:
872 			sas->valid = 0;
873 		}
874 
875 		cts->protocol = PROTO_SCSI;
876 		scsi->valid = CTS_SCSI_VALID_TQ;
877 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
878 
879 		cts->ccb_h.status = CAM_REQ_CMP;
880 		break;
881 	}
882 	case XPT_CALC_GEOMETRY:
883 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
884 		ccb->ccb_h.status = CAM_REQ_CMP;
885 		break;
886 	case XPT_RESET_DEV:
887 		mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
888 		mpssas_action_resetdev(sassc, ccb);
889 		return;
890 	case XPT_RESET_BUS:
891 	case XPT_ABORT:
892 	case XPT_TERM_IO:
893 		mps_printf(sassc->sc, "mpssas_action faking success for "
894 			   "abort or reset\n");
895 		ccb->ccb_h.status = CAM_REQ_CMP;
896 		break;
897 	case XPT_SCSI_IO:
898 		mpssas_action_scsiio(sassc, ccb);
899 		return;
900 #if __FreeBSD_version >= 900026
901 	case XPT_SMP_IO:
902 		mpssas_action_smpio(sassc, ccb);
903 		return;
904 #endif
905 	default:
906 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
907 		break;
908 	}
909 	xpt_done(ccb);
910 
911 }
912 
913 static void
914 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
915     target_id_t target_id, lun_id_t lun_id)
916 {
917 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
918 	struct cam_path *path;
919 
920 	mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
921 	    ac_code, target_id, lun_id);
922 
923 	if (xpt_create_path(&path, NULL,
924 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
925 		mps_printf(sc, "unable to create path for reset "
926 			   "notification\n");
927 		return;
928 	}
929 
930 	xpt_async(ac_code, path, NULL);
931 	xpt_free_path(path);
932 }
933 
934 static void
935 mpssas_complete_all_commands(struct mps_softc *sc)
936 {
937 	struct mps_command *cm;
938 	int i;
939 	int completed;
940 
941 	mps_printf(sc, "%s\n", __func__);
942 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
943 
944 	/* complete all commands with a NULL reply */
945 	for (i = 1; i < sc->num_reqs; i++) {
946 		cm = &sc->commands[i];
947 		cm->cm_reply = NULL;
948 		completed = 0;
949 
950 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
951 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
952 
953 		if (cm->cm_complete != NULL) {
954 			mpssas_log_command(cm,
955 			    "completing cm %p state %x ccb %p for diag reset\n",
956 			    cm, cm->cm_state, cm->cm_ccb);
957 
958 			cm->cm_complete(sc, cm);
959 			completed = 1;
960 		}
961 
962 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
963 			mpssas_log_command(cm,
964 			    "waking up cm %p state %x ccb %p for diag reset\n",
965 			    cm, cm->cm_state, cm->cm_ccb);
966 			wakeup(cm);
967 			completed = 1;
968 		}
969 
970 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
971 			/* this should never happen, but if it does, log */
972 			mpssas_log_command(cm,
973 			    "cm %p state %x flags 0x%x ccb %p during diag "
974 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
975 			    cm->cm_ccb);
976 		}
977 	}
978 }
979 
980 void
981 mpssas_handle_reinit(struct mps_softc *sc)
982 {
983 	int i;
984 
985 	/* Go back into startup mode and freeze the simq, so that CAM
986 	 * doesn't send any commands until after we've rediscovered all
987 	 * targets and found the proper device handles for them.
988 	 *
989 	 * After the reset, portenable will trigger discovery, and after all
990 	 * discovery-related activities have finished, the simq will be
991 	 * released.
992 	 */
993 	mps_printf(sc, "%s startup\n", __func__);
994 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
995 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
996 	xpt_freeze_simq(sc->sassc->sim, 1);
997 
998 	/* notify CAM of a bus reset */
999 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1000 	    CAM_LUN_WILDCARD);
1001 
1002 	/* complete and cleanup after all outstanding commands */
1003 	mpssas_complete_all_commands(sc);
1004 
1005 	mps_printf(sc, "%s startup %u tm %u after command completion\n",
1006 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1007 
1008 	/*
1009 	 * The simq was explicitly frozen above, so set the refcount to 0.
1010 	 * The simq will be explicitly released after port enable completes.
1011 	 */
1012 	sc->sassc->startup_refcount = 0;
1013 
1014 	/* zero all the target handles, since they may change after the
1015 	 * reset, and we have to rediscover all the targets and use the new
1016 	 * handles.
1017 	 */
1018 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1019 		if (sc->sassc->targets[i].outstanding != 0)
1020 			mps_printf(sc, "target %u outstanding %u\n",
1021 			    i, sc->sassc->targets[i].outstanding);
1022 		sc->sassc->targets[i].handle = 0x0;
1023 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1024 		sc->sassc->targets[i].outstanding = 0;
1025 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1026 	}
1027 }
1028 static void
1029 mpssas_tm_timeout(void *data)
1030 {
1031 	struct mps_command *tm = data;
1032 	struct mps_softc *sc = tm->cm_sc;
1033 
1034 	mps_lock(sc);
1035 	mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1036 	mps_reinit(sc);
1037 	mps_unlock(sc);
1038 }
1039 
1040 static void
1041 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1042 {
1043 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1044 	unsigned int cm_count = 0;
1045 	struct mps_command *cm;
1046 	struct mpssas_target *targ;
1047 
1048 	callout_stop(&tm->cm_callout);
1049 
1050 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1051 	targ = tm->cm_targ;
1052 
1053 	/*
1054 	 * Currently there should be no way we can hit this case.  It only
1055 	 * happens when we have a failure to allocate chain frames, and
1056 	 * task management commands don't have S/G lists.
1057 	 */
1058 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1059 		mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1060 			   "This should not happen!\n", __func__, tm->cm_flags);
1061 		mpssas_free_tm(sc, tm);
1062 		return;
1063 	}
1064 
1065 	if (reply == NULL) {
1066 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1067 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1068 			/* this completion was due to a reset, just cleanup */
1069 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1070 			targ->tm = NULL;
1071 			mpssas_free_tm(sc, tm);
1072 		}
1073 		else {
1074 			/* we should have gotten a reply. */
1075 			mps_reinit(sc);
1076 		}
1077 		return;
1078 	}
1079 
1080 	mpssas_log_command(tm,
1081 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1082 	    reply->IOCStatus, reply->ResponseCode,
1083 	    reply->TerminationCount);
1084 
1085 	/* See if there are any outstanding commands for this LUN.
1086 	 * This could be made more efficient by using a per-LU data
1087 	 * structure of some sort.
1088 	 */
1089 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1090 		if (cm->cm_lun == tm->cm_lun)
1091 			cm_count++;
1092 	}
1093 
1094 	if (cm_count == 0) {
1095 		mpssas_log_command(tm,
1096 		    "logical unit %u finished recovery after reset\n",
1097 		    tm->cm_lun);
1098 
1099 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1100 		    tm->cm_lun);
1101 
1102 		/* we've finished recovery for this logical unit.  check and
1103 		 * see if some other logical unit has a timedout command
1104 		 * that needs to be processed.
1105 		 */
1106 		cm = TAILQ_FIRST(&targ->timedout_commands);
1107 		if (cm) {
1108 			mpssas_send_abort(sc, tm, cm);
1109 		}
1110 		else {
1111 			targ->tm = NULL;
1112 			mpssas_free_tm(sc, tm);
1113 		}
1114 	}
1115 	else {
1116 		/* if we still have commands for this LUN, the reset
1117 		 * effectively failed, regardless of the status reported.
1118 		 * Escalate to a target reset.
1119 		 */
1120 		mpssas_log_command(tm,
1121 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1122 		    tm, cm_count);
1123 		mpssas_send_reset(sc, tm,
1124 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1125 	}
1126 }
1127 
1128 static void
1129 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1130 {
1131 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1132 	struct mpssas_target *targ;
1133 
1134 	callout_stop(&tm->cm_callout);
1135 
1136 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1137 	targ = tm->cm_targ;
1138 
1139 	/*
1140 	 * Currently there should be no way we can hit this case.  It only
1141 	 * happens when we have a failure to allocate chain frames, and
1142 	 * task management commands don't have S/G lists.
1143 	 */
1144 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1145 		mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1146 			   "This should not happen!\n", __func__, tm->cm_flags);
1147 		mpssas_free_tm(sc, tm);
1148 		return;
1149 	}
1150 
1151 	if (reply == NULL) {
1152 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1153 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1154 			/* this completion was due to a reset, just cleanup */
1155 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1156 			targ->tm = NULL;
1157 			mpssas_free_tm(sc, tm);
1158 		}
1159 		else {
1160 			/* we should have gotten a reply. */
1161 			mps_reinit(sc);
1162 		}
1163 		return;
1164 	}
1165 
1166 	mpssas_log_command(tm,
1167 	    "target reset status 0x%x code 0x%x count %u\n",
1168 	    reply->IOCStatus, reply->ResponseCode,
1169 	    reply->TerminationCount);
1170 
1171 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1172 
1173 	if (targ->outstanding == 0) {
1174 		/* we've finished recovery for this target and all
1175 		 * of its logical units.
1176 		 */
1177 		mpssas_log_command(tm,
1178 		    "recovery finished after target reset\n");
1179 
1180 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1181 		    CAM_LUN_WILDCARD);
1182 
1183 		targ->tm = NULL;
1184 		mpssas_free_tm(sc, tm);
1185 	}
1186 	else {
1187 		/* after a target reset, if this target still has
1188 		 * outstanding commands, the reset effectively failed,
1189 		 * regardless of the status reported.  escalate.
1190 		 */
1191 		mpssas_log_command(tm,
1192 		    "target reset complete for tm %p, but still have %u command(s)\n",
1193 		    tm, targ->outstanding);
1194 		mps_reinit(sc);
1195 	}
1196 }
1197 
1198 #define MPS_RESET_TIMEOUT 30
1199 
1200 static int
1201 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1202 {
1203 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1204 	struct mpssas_target *target;
1205 	int err;
1206 
1207 	target = tm->cm_targ;
1208 	if (target->handle == 0) {
1209 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1210 		    __func__, target->tid);
1211 		return -1;
1212 	}
1213 
1214 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1215 	req->DevHandle = target->handle;
1216 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1217 	req->TaskType = type;
1218 
1219 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1220 		/* XXX Need to handle invalid LUNs */
1221 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1222 		tm->cm_targ->logical_unit_resets++;
1223 		mpssas_log_command(tm, "sending logical unit reset\n");
1224 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1225 	}
1226 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1227 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1228 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1229 		tm->cm_targ->target_resets++;
1230 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1231 		mpssas_log_command(tm, "sending target reset\n");
1232 		tm->cm_complete = mpssas_target_reset_complete;
1233 	}
1234 	else {
1235 		mps_printf(sc, "unexpected reset type 0x%x\n", type);
1236 		return -1;
1237 	}
1238 
1239 	tm->cm_data = NULL;
1240 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1241 	tm->cm_complete_data = (void *)tm;
1242 
1243 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1244 	    mpssas_tm_timeout, tm);
1245 
1246 	err = mps_map_command(sc, tm);
1247 	if (err)
1248 		mpssas_log_command(tm,
1249 		    "error %d sending reset type %u\n",
1250 		    err, type);
1251 
1252 	return err;
1253 }
1254 
1255 
1256 static void
1257 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1258 {
1259 	struct mps_command *cm;
1260 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1261 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1262 	struct mpssas_target *targ;
1263 
1264 	callout_stop(&tm->cm_callout);
1265 
1266 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1267 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1268 	targ = tm->cm_targ;
1269 
1270 	/*
1271 	 * Currently there should be no way we can hit this case.  It only
1272 	 * happens when we have a failure to allocate chain frames, and
1273 	 * task management commands don't have S/G lists.
1274 	 */
1275 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1276 		mpssas_log_command(tm,
1277 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1278 		    tm->cm_flags, tm, req->TaskMID);
1279 		mpssas_free_tm(sc, tm);
1280 		return;
1281 	}
1282 
1283 	if (reply == NULL) {
1284 		mpssas_log_command(tm,
1285 		    "NULL abort reply for tm %p TaskMID %u\n",
1286 		    tm, req->TaskMID);
1287 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1288 			/* this completion was due to a reset, just cleanup */
1289 			targ->tm = NULL;
1290 			mpssas_free_tm(sc, tm);
1291 		}
1292 		else {
1293 			/* we should have gotten a reply. */
1294 			mps_reinit(sc);
1295 		}
1296 		return;
1297 	}
1298 
1299 	mpssas_log_command(tm,
1300 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1301 	    req->TaskMID,
1302 	    reply->IOCStatus, reply->ResponseCode,
1303 	    reply->TerminationCount);
1304 
1305 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1306 	if (cm == NULL) {
1307 		/* if there are no more timedout commands, we're done with
1308 		 * error recovery for this target.
1309 		 */
1310 		mpssas_log_command(tm,
1311 		    "finished recovery after aborting TaskMID %u\n",
1312 		    req->TaskMID);
1313 
1314 		targ->tm = NULL;
1315 		mpssas_free_tm(sc, tm);
1316 	}
1317 	else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1318 		/* abort success, but we have more timedout commands to abort */
1319 		mpssas_log_command(tm,
1320 		    "continuing recovery after aborting TaskMID %u\n",
1321 		    req->TaskMID);
1322 
1323 		mpssas_send_abort(sc, tm, cm);
1324 	}
1325 	else {
1326 		/* we didn't get a command completion, so the abort
1327 		 * failed as far as we're concerned.  escalate.
1328 		 */
1329 		mpssas_log_command(tm,
1330 		    "abort failed for TaskMID %u tm %p\n",
1331 		    req->TaskMID, tm);
1332 
1333 		mpssas_send_reset(sc, tm,
1334 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1335 	}
1336 }
1337 
1338 #define MPS_ABORT_TIMEOUT 5
1339 
1340 static int
1341 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1342 {
1343 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1344 	struct mpssas_target *targ;
1345 	int err;
1346 
1347 	targ = cm->cm_targ;
1348 	if (targ->handle == 0) {
1349 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1350 		    __func__, cm->cm_ccb->ccb_h.target_id);
1351 		return -1;
1352 	}
1353 
1354 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1355 	req->DevHandle = targ->handle;
1356 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1357 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1358 
1359 	/* XXX Need to handle invalid LUNs */
1360 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1361 
1362 	req->TaskMID = cm->cm_desc.Default.SMID;
1363 
1364 	tm->cm_data = NULL;
1365 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1366 	tm->cm_complete = mpssas_abort_complete;
1367 	tm->cm_complete_data = (void *)tm;
1368 	tm->cm_targ = cm->cm_targ;
1369 	tm->cm_lun = cm->cm_lun;
1370 
1371 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1372 	    mpssas_tm_timeout, tm);
1373 
1374 	targ->aborts++;
1375 
1376 	err = mps_map_command(sc, tm);
1377 	if (err)
1378 		mpssas_log_command(tm,
1379 		    "error %d sending abort for cm %p SMID %u\n",
1380 		    err, cm, req->TaskMID);
1381 	return err;
1382 }
1383 
1384 
1385 static void
1386 mpssas_scsiio_timeout(void *data)
1387 {
1388 	struct mps_softc *sc;
1389 	struct mps_command *cm;
1390 	struct mpssas_target *targ;
1391 
1392 	cm = (struct mps_command *)data;
1393 	sc = cm->cm_sc;
1394 
1395 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1396 
1397 	mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1398 
1399 	/*
1400 	 * Run the interrupt handler to make sure it's not pending.  This
1401 	 * isn't perfect because the command could have already completed
1402 	 * and been re-used, though this is unlikely.
1403 	 */
1404 	mps_intr_locked(sc);
1405 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1406 		mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1407 		return;
1408 	}
1409 
1410 	if (cm->cm_ccb == NULL) {
1411 		mps_printf(sc, "command timeout with NULL ccb\n");
1412 		return;
1413 	}
1414 
1415 	mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1416 	    cm, cm->cm_ccb);
1417 
1418 	targ = cm->cm_targ;
1419 	targ->timeouts++;
1420 
1421 	/* XXX first, check the firmware state, to see if it's still
1422 	 * operational.  if not, do a diag reset.
1423 	 */
1424 
1425 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1426 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1427 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1428 
1429 	if (targ->tm != NULL) {
1430 		/* target already in recovery, just queue up another
1431 		 * timedout command to be processed later.
1432 		 */
1433 		mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1434 		    cm, targ->tm);
1435 	}
1436 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1437 		mps_printf(sc, "timedout cm %p allocated tm %p\n",
1438 		    cm, targ->tm);
1439 
1440 		/* start recovery by aborting the first timedout command */
1441 		mpssas_send_abort(sc, targ->tm, cm);
1442 	}
1443 	else {
1444 		/* XXX queue this target up for recovery once a TM becomes
1445 		 * available.  The firmware only has a limited number of
1446 		 * HighPriority credits for the high priority requests used
1447 		 * for task management, and we ran out.
1448 		 *
1449 		 * Isilon: don't worry about this for now, since we have
1450 		 * more credits than disks in an enclosure, and limit
1451 		 * ourselves to one TM per target for recovery.
1452 		 */
1453 		mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1454 		    cm);
1455 	}
1456 
1457 }
1458 
1459 static void
1460 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1461 {
1462 	MPI2_SCSI_IO_REQUEST *req;
1463 	struct ccb_scsiio *csio;
1464 	struct mps_softc *sc;
1465 	struct mpssas_target *targ;
1466 	struct mpssas_lun *lun;
1467 	struct mps_command *cm;
1468 	uint8_t i, lba_byte, *ref_tag_addr;
1469 	uint16_t eedp_flags;
1470 
1471 	sc = sassc->sc;
1472 	mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1473 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1474 
1475 	csio = &ccb->csio;
1476 	targ = &sassc->targets[csio->ccb_h.target_id];
1477 	if (targ->handle == 0x0) {
1478 		mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1479 		    __func__, csio->ccb_h.target_id);
1480 		csio->ccb_h.status = CAM_TID_INVALID;
1481 		xpt_done(ccb);
1482 		return;
1483 	}
1484 	/*
1485 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1486 	 * that the volume has timed out.  We want volumes to be enumerated
1487 	 * until they are deleted/removed, not just failed.
1488 	 */
1489 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1490 		if (targ->devinfo == 0)
1491 			csio->ccb_h.status = CAM_REQ_CMP;
1492 		else
1493 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1494 		xpt_done(ccb);
1495 		return;
1496 	}
1497 
1498 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1499 		mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1500 		csio->ccb_h.status = CAM_TID_INVALID;
1501 		xpt_done(ccb);
1502 		return;
1503 	}
1504 
1505 	cm = mps_alloc_command(sc);
1506 	if (cm == NULL) {
1507 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1508 			xpt_freeze_simq(sassc->sim, 1);
1509 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1510 		}
1511 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1512 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1513 		xpt_done(ccb);
1514 		return;
1515 	}
1516 
1517 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1518 	bzero(req, sizeof(*req));
1519 	req->DevHandle = targ->handle;
1520 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1521 	req->MsgFlags = 0;
1522 	req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1523 	req->SenseBufferLength = MPS_SENSE_LEN;
1524 	req->SGLFlags = 0;
1525 	req->ChainOffset = 0;
1526 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1527 	req->SGLOffset1= 0;
1528 	req->SGLOffset2= 0;
1529 	req->SGLOffset3= 0;
1530 	req->SkipCount = 0;
1531 	req->DataLength = csio->dxfer_len;
1532 	req->BidirectionalDataLength = 0;
1533 	req->IoFlags = csio->cdb_len;
1534 	req->EEDPFlags = 0;
1535 
1536 	/* Note: BiDirectional transfers are not supported */
1537 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1538 	case CAM_DIR_IN:
1539 		req->Control = MPI2_SCSIIO_CONTROL_READ;
1540 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1541 		break;
1542 	case CAM_DIR_OUT:
1543 		req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1544 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1545 		break;
1546 	case CAM_DIR_NONE:
1547 	default:
1548 		req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1549 		break;
1550 	}
1551 
1552 	/*
1553 	 * It looks like the hardware doesn't require an explicit tag
1554 	 * number for each transaction.  SAM Task Management not supported
1555 	 * at the moment.
1556 	 */
1557 	switch (csio->tag_action) {
1558 	case MSG_HEAD_OF_Q_TAG:
1559 		req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1560 		break;
1561 	case MSG_ORDERED_Q_TAG:
1562 		req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1563 		break;
1564 	case MSG_ACA_TASK:
1565 		req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1566 		break;
1567 	case CAM_TAG_ACTION_NONE:
1568 	case MSG_SIMPLE_Q_TAG:
1569 	default:
1570 		req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1571 		break;
1572 	}
1573 	req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1574 
1575 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1576 		mps_free_command(sc, cm);
1577 		ccb->ccb_h.status = CAM_LUN_INVALID;
1578 		xpt_done(ccb);
1579 		return;
1580 	}
1581 
1582 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1583 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1584 	else
1585 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1586 	req->IoFlags = csio->cdb_len;
1587 
1588 	/*
1589 	 * Check if EEDP is supported and enabled.  If it is then check if the
1590 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1591 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1592 	 * for EEDP transfer.
1593 	 */
1594 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1595 	if (sc->eedp_enabled && eedp_flags) {
1596 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1597 			if (lun->lun_id == csio->ccb_h.target_lun) {
1598 				break;
1599 			}
1600 		}
1601 
1602 		if ((lun != NULL) && (lun->eedp_formatted)) {
1603 			req->EEDPBlockSize = lun->eedp_block_size;
1604 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1605 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1606 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1607 			req->EEDPFlags = eedp_flags;
1608 
1609 			/*
1610 			 * If CDB less than 32, fill in Primary Ref Tag with
1611 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1612 			 * already there.  Also, set protection bit.  FreeBSD
1613 			 * currently does not support CDBs bigger than 16, but
1614 			 * the code doesn't hurt, and will be here for the
1615 			 * future.
1616 			 */
1617 			if (csio->cdb_len != 32) {
1618 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1619 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1620 				    PrimaryReferenceTag;
1621 				for (i = 0; i < 4; i++) {
1622 					*ref_tag_addr =
1623 					    req->CDB.CDB32[lba_byte + i];
1624 					ref_tag_addr++;
1625 				}
1626 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1627 				    0xFFFF;
1628 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1629 				    0x20;
1630 			} else {
1631 				eedp_flags |=
1632 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1633 				req->EEDPFlags = eedp_flags;
1634 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1635 				    0x1F) | 0x20;
1636 			}
1637 		}
1638 	}
1639 
1640 	cm->cm_data = csio->data_ptr;
1641 	cm->cm_length = csio->dxfer_len;
1642 	cm->cm_sge = &req->SGL;
1643 	cm->cm_sglsize = (32 - 24) * 4;
1644 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1645 	cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1646 	cm->cm_complete = mpssas_scsiio_complete;
1647 	cm->cm_complete_data = ccb;
1648 	cm->cm_targ = targ;
1649 	cm->cm_lun = csio->ccb_h.target_lun;
1650 	cm->cm_ccb = ccb;
1651 
1652 	/*
1653 	 * If HBA is a WD and the command is not for a retry, try to build a
1654 	 * direct I/O message. If failed, or the command is for a retry, send
1655 	 * the I/O to the IR volume itself.
1656 	 */
1657 	if (sc->WD_valid_config) {
1658 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1659 			mpssas_direct_drive_io(sassc, cm, ccb);
1660 		} else {
1661 			ccb->ccb_h.status = CAM_REQ_INPROG;
1662 		}
1663 	}
1664 
1665 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1666 	   mpssas_scsiio_timeout, cm);
1667 
1668 	targ->issued++;
1669 	targ->outstanding++;
1670 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1671 
1672 	if ((sc->mps_debug & MPS_TRACE) != 0)
1673 		mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1674 		    __func__, cm, ccb, targ->outstanding);
1675 
1676 	mps_map_command(sc, cm);
1677 	return;
1678 }
1679 
1680 static void
1681 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1682 {
1683 	MPI2_SCSI_IO_REPLY *rep;
1684 	union ccb *ccb;
1685 	struct ccb_scsiio *csio;
1686 	struct mpssas_softc *sassc;
1687 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1688 	u8 *TLR_bits, TLR_on;
1689 	int dir = 0, i;
1690 	u16 alloc_len;
1691 
1692 	mps_dprint(sc, MPS_TRACE,
1693 	    "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1694 	    __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1695 	    cm->cm_targ->outstanding);
1696 
1697 	callout_stop(&cm->cm_callout);
1698 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1699 
1700 	sassc = sc->sassc;
1701 	ccb = cm->cm_complete_data;
1702 	csio = &ccb->csio;
1703 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1704 	/*
1705 	 * XXX KDM if the chain allocation fails, does it matter if we do
1706 	 * the sync and unload here?  It is simpler to do it in every case,
1707 	 * assuming it doesn't cause problems.
1708 	 */
1709 	if (cm->cm_data != NULL) {
1710 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1711 			dir = BUS_DMASYNC_POSTREAD;
1712 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1713 			dir = BUS_DMASYNC_POSTWRITE;;
1714 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1715 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1716 	}
1717 
1718 	cm->cm_targ->completed++;
1719 	cm->cm_targ->outstanding--;
1720 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1721 
1722 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1723 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1724 		if (cm->cm_reply != NULL)
1725 			mpssas_log_command(cm,
1726 			    "completed timedout cm %p ccb %p during recovery "
1727 			    "ioc %x scsi %x state %x xfer %u\n",
1728 			    cm, cm->cm_ccb,
1729 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1730 			    rep->TransferCount);
1731 		else
1732 			mpssas_log_command(cm,
1733 			    "completed timedout cm %p ccb %p during recovery\n",
1734 			    cm, cm->cm_ccb);
1735 	} else if (cm->cm_targ->tm != NULL) {
1736 		if (cm->cm_reply != NULL)
1737 			mpssas_log_command(cm,
1738 			    "completed cm %p ccb %p during recovery "
1739 			    "ioc %x scsi %x state %x xfer %u\n",
1740 			    cm, cm->cm_ccb,
1741 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1742 			    rep->TransferCount);
1743 		else
1744 			mpssas_log_command(cm,
1745 			    "completed cm %p ccb %p during recovery\n",
1746 			    cm, cm->cm_ccb);
1747 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1748 		mpssas_log_command(cm,
1749 		    "reset completed cm %p ccb %p\n",
1750 		    cm, cm->cm_ccb);
1751 	}
1752 
1753 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1754 		/*
1755 		 * We ran into an error after we tried to map the command,
1756 		 * so we're getting a callback without queueing the command
1757 		 * to the hardware.  So we set the status here, and it will
1758 		 * be retained below.  We'll go through the "fast path",
1759 		 * because there can be no reply when we haven't actually
1760 		 * gone out to the hardware.
1761 		 */
1762 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1763 
1764 		/*
1765 		 * Currently the only error included in the mask is
1766 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1767 		 * chain frames.  We need to freeze the queue until we get
1768 		 * a command that completed without this error, which will
1769 		 * hopefully have some chain frames attached that we can
1770 		 * use.  If we wanted to get smarter about it, we would
1771 		 * only unfreeze the queue in this condition when we're
1772 		 * sure that we're getting some chain frames back.  That's
1773 		 * probably unnecessary.
1774 		 */
1775 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1776 			xpt_freeze_simq(sassc->sim, 1);
1777 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1778 			mps_dprint(sc, MPS_INFO, "Error sending command, "
1779 				   "freezing SIM queue\n");
1780 		}
1781 	}
1782 
1783 	/* Take the fast path to completion */
1784 	if (cm->cm_reply == NULL) {
1785 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1786 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1787 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1788 			else {
1789 				ccb->ccb_h.status = CAM_REQ_CMP;
1790 				ccb->csio.scsi_status = SCSI_STATUS_OK;
1791 			}
1792 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1793 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1794 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1795 				mps_dprint(sc, MPS_INFO,
1796 					   "Unfreezing SIM queue\n");
1797 			}
1798 		}
1799 
1800 		/*
1801 		 * There are two scenarios where the status won't be
1802 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
1803 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1804 		 */
1805 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1806 			/*
1807 			 * Freeze the dev queue so that commands are
1808 			 * executed in the correct order with after error
1809 			 * recovery.
1810 			 */
1811 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
1812 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1813 		}
1814 		mps_free_command(sc, cm);
1815 		xpt_done(ccb);
1816 		return;
1817 	}
1818 
1819 	if (sc->mps_debug & MPS_TRACE)
1820 		mpssas_log_command(cm,
1821 		    "ioc %x scsi %x state %x xfer %u\n",
1822 		    rep->IOCStatus, rep->SCSIStatus,
1823 		    rep->SCSIState, rep->TransferCount);
1824 
1825 	/*
1826 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1827 	 * Volume if an error occurred (normal I/O retry).  Use the original
1828 	 * CCB, but set a flag that this will be a retry so that it's sent to
1829 	 * the original volume.  Free the command but reuse the CCB.
1830 	 */
1831 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1832 		mps_free_command(sc, cm);
1833 		ccb->ccb_h.status = MPS_WD_RETRY;
1834 		mpssas_action_scsiio(sassc, ccb);
1835 		return;
1836 	}
1837 
1838 	switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1839 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1840 		csio->resid = cm->cm_length - rep->TransferCount;
1841 		/* FALLTHROUGH */
1842 	case MPI2_IOCSTATUS_SUCCESS:
1843 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1844 
1845 		if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1846 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1847 			mpssas_log_command(cm, "recovered error\n");
1848 
1849 		/* Completion failed at the transport level. */
1850 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1851 		    MPI2_SCSI_STATE_TERMINATED)) {
1852 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1853 			break;
1854 		}
1855 
1856 		/* In a modern packetized environment, an autosense failure
1857 		 * implies that there's not much else that can be done to
1858 		 * recover the command.
1859 		 */
1860 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1861 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1862 			break;
1863 		}
1864 
1865 		/*
1866 		 * CAM doesn't care about SAS Response Info data, but if this is
1867 		 * the state check if TLR should be done.  If not, clear the
1868 		 * TLR_bits for the target.
1869 		 */
1870 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1871 		    ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1872 		    MPS_SCSI_RI_INVALID_FRAME)) {
1873 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1874 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1875 		}
1876 
1877 		/*
1878 		 * Intentionally override the normal SCSI status reporting
1879 		 * for these two cases.  These are likely to happen in a
1880 		 * multi-initiator environment, and we want to make sure that
1881 		 * CAM retries these commands rather than fail them.
1882 		 */
1883 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1884 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1885 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1886 			break;
1887 		}
1888 
1889 		/* Handle normal status and sense */
1890 		csio->scsi_status = rep->SCSIStatus;
1891 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1892 			ccb->ccb_h.status = CAM_REQ_CMP;
1893 		else
1894 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1895 
1896 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1897 			int sense_len, returned_sense_len;
1898 
1899 			returned_sense_len = min(rep->SenseCount,
1900 			    sizeof(struct scsi_sense_data));
1901 			if (returned_sense_len < ccb->csio.sense_len)
1902 				ccb->csio.sense_resid = ccb->csio.sense_len -
1903 					returned_sense_len;
1904 			else
1905 				ccb->csio.sense_resid = 0;
1906 
1907 			sense_len = min(returned_sense_len,
1908 			    ccb->csio.sense_len - ccb->csio.sense_resid);
1909 			bzero(&ccb->csio.sense_data,
1910 			      sizeof(&ccb->csio.sense_data));
1911 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1912 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1913 		}
1914 
1915 		/*
1916 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
1917 		 * and it's page code 0 (Supported Page List), and there is
1918 		 * inquiry data, and this is for a sequential access device, and
1919 		 * the device is an SSP target, and TLR is supported by the
1920 		 * controller, turn the TLR_bits value ON if page 0x90 is
1921 		 * supported.
1922 		 */
1923 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1924 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1925 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1926 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1927 		    T_SEQUENTIAL) && (sc->control_TLR) &&
1928 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
1929 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1930 			vpd_list = (struct scsi_vpd_supported_page_list *)
1931 			    csio->data_ptr;
1932 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1933 			    TLR_bits;
1934 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1935 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1936 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1937 			    csio->cdb_io.cdb_bytes[4];
1938 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1939 				if (vpd_list->list[i] == 0x90) {
1940 					*TLR_bits = TLR_on;
1941 					break;
1942 				}
1943 			}
1944 		}
1945 		break;
1946 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1947 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1948 		/*
1949 		 * If devinfo is 0 this will be a volume.  In that case don't
1950 		 * tell CAM that the volume is not there.  We want volumes to
1951 		 * be enumerated until they are deleted/removed, not just
1952 		 * failed.
1953 		 */
1954 		if (cm->cm_targ->devinfo == 0)
1955 			ccb->ccb_h.status = CAM_REQ_CMP;
1956 		else
1957 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1958 		break;
1959 	case MPI2_IOCSTATUS_INVALID_SGL:
1960 		mps_print_scsiio_cmd(sc, cm);
1961 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1962 		break;
1963 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1964 		/*
1965 		 * This is one of the responses that comes back when an I/O
1966 		 * has been aborted.  If it is because of a timeout that we
1967 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
1968 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
1969 		 * command is the same (it gets retried, subject to the
1970 		 * retry counter), the only difference is what gets printed
1971 		 * on the console.
1972 		 */
1973 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1974 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1975 		else
1976 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1977 		break;
1978 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1979 		/* resid is ignored for this condition */
1980 		csio->resid = 0;
1981 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1982 		break;
1983 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1984 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1985 		/*
1986 		 * Since these are generally external (i.e. hopefully
1987 		 * transient transport-related) errors, retry these without
1988 		 * decrementing the retry count.
1989 		 */
1990 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1991 		mpssas_log_command(cm,
1992 		    "terminated ioc %x scsi %x state %x xfer %u\n",
1993 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1994 		    rep->TransferCount);
1995 		break;
1996 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1997 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
1998 	case MPI2_IOCSTATUS_INVALID_VPID:
1999 	case MPI2_IOCSTATUS_INVALID_FIELD:
2000 	case MPI2_IOCSTATUS_INVALID_STATE:
2001 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2002 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2003 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2004 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2005 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2006 	default:
2007 		mpssas_log_command(cm,
2008 		    "completed ioc %x scsi %x state %x xfer %u\n",
2009 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2010 		    rep->TransferCount);
2011 		csio->resid = cm->cm_length;
2012 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2013 		break;
2014 	}
2015 
2016 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2017 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2018 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2019 		mps_dprint(sc, MPS_INFO, "Command completed, "
2020 			   "unfreezing SIM queue\n");
2021 	}
2022 
2023 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2024 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2025 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2026 	}
2027 
2028 	mps_free_command(sc, cm);
2029 	xpt_done(ccb);
2030 }
2031 
2032 static void
2033 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2034     union ccb *ccb) {
2035 	pMpi2SCSIIORequest_t	pIO_req;
2036 	struct mps_softc	*sc = sassc->sc;
2037 	uint64_t		virtLBA;
2038 	uint32_t		physLBA, stripe_offset, stripe_unit;
2039 	uint32_t		io_size, column;
2040 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2041 
2042 	/*
2043 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2044 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2045 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2046 	 * bit different than the 10/16 CDBs, handle them separately.
2047 	 */
2048 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2049 	CDB = pIO_req->CDB.CDB32;
2050 
2051 	/*
2052 	 * Handle 6 byte CDBs.
2053 	 */
2054 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2055 	    (CDB[0] == WRITE_6))) {
2056 		/*
2057 		 * Get the transfer size in blocks.
2058 		 */
2059 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2060 
2061 		/*
2062 		 * Get virtual LBA given in the CDB.
2063 		 */
2064 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2065 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2066 
2067 		/*
2068 		 * Check that LBA range for I/O does not exceed volume's
2069 		 * MaxLBA.
2070 		 */
2071 		if ((virtLBA + (uint64_t)io_size - 1) <=
2072 		    sc->DD_max_lba) {
2073 			/*
2074 			 * Check if the I/O crosses a stripe boundary.  If not,
2075 			 * translate the virtual LBA to a physical LBA and set
2076 			 * the DevHandle for the PhysDisk to be used.  If it
2077 			 * does cross a boundry, do normal I/O.  To get the
2078 			 * right DevHandle to use, get the map number for the
2079 			 * column, then use that map number to look up the
2080 			 * DevHandle of the PhysDisk.
2081 			 */
2082 			stripe_offset = (uint32_t)virtLBA &
2083 			    (sc->DD_stripe_size - 1);
2084 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2085 				physLBA = (uint32_t)virtLBA >>
2086 				    sc->DD_stripe_exponent;
2087 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2088 				column = physLBA % sc->DD_num_phys_disks;
2089 				pIO_req->DevHandle =
2090 				    sc->DD_column_map[column].dev_handle;
2091 				cm->cm_desc.SCSIIO.DevHandle =
2092 				    pIO_req->DevHandle;
2093 
2094 				physLBA = (stripe_unit <<
2095 				    sc->DD_stripe_exponent) + stripe_offset;
2096 				ptrLBA = &pIO_req->CDB.CDB32[1];
2097 				physLBA_byte = (uint8_t)(physLBA >> 16);
2098 				*ptrLBA = physLBA_byte;
2099 				ptrLBA = &pIO_req->CDB.CDB32[2];
2100 				physLBA_byte = (uint8_t)(physLBA >> 8);
2101 				*ptrLBA = physLBA_byte;
2102 				ptrLBA = &pIO_req->CDB.CDB32[3];
2103 				physLBA_byte = (uint8_t)physLBA;
2104 				*ptrLBA = physLBA_byte;
2105 
2106 				/*
2107 				 * Set flag that Direct Drive I/O is
2108 				 * being done.
2109 				 */
2110 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2111 			}
2112 		}
2113 		return;
2114 	}
2115 
2116 	/*
2117 	 * Handle 10 or 16 byte CDBs.
2118 	 */
2119 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2120 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2121 	    (CDB[0] == WRITE_16))) {
2122 		/*
2123 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2124 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2125 		 * the else section.  10-byte CDB's are OK.
2126 		 */
2127 		if ((CDB[0] < READ_16) ||
2128 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2129 			/*
2130 			 * Get the transfer size in blocks.
2131 			 */
2132 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2133 
2134 			/*
2135 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2136 			 * LBA in the CDB depending on command.
2137 			 */
2138 			lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2139 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2140 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2141 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2142 			    (uint64_t)CDB[lba_idx + 3];
2143 
2144 			/*
2145 			 * Check that LBA range for I/O does not exceed volume's
2146 			 * MaxLBA.
2147 			 */
2148 			if ((virtLBA + (uint64_t)io_size - 1) <=
2149 			    sc->DD_max_lba) {
2150 				/*
2151 				 * Check if the I/O crosses a stripe boundary.
2152 				 * If not, translate the virtual LBA to a
2153 				 * physical LBA and set the DevHandle for the
2154 				 * PhysDisk to be used.  If it does cross a
2155 				 * boundry, do normal I/O.  To get the right
2156 				 * DevHandle to use, get the map number for the
2157 				 * column, then use that map number to look up
2158 				 * the DevHandle of the PhysDisk.
2159 				 */
2160 				stripe_offset = (uint32_t)virtLBA &
2161 				    (sc->DD_stripe_size - 1);
2162 				if ((stripe_offset + io_size) <=
2163 				    sc->DD_stripe_size) {
2164 					physLBA = (uint32_t)virtLBA >>
2165 					    sc->DD_stripe_exponent;
2166 					stripe_unit = physLBA /
2167 					    sc->DD_num_phys_disks;
2168 					column = physLBA %
2169 					    sc->DD_num_phys_disks;
2170 					pIO_req->DevHandle =
2171 					    sc->DD_column_map[column].
2172 					    dev_handle;
2173 					cm->cm_desc.SCSIIO.DevHandle =
2174 					    pIO_req->DevHandle;
2175 
2176 					physLBA = (stripe_unit <<
2177 					    sc->DD_stripe_exponent) +
2178 					    stripe_offset;
2179 					ptrLBA =
2180 					    &pIO_req->CDB.CDB32[lba_idx];
2181 					physLBA_byte = (uint8_t)(physLBA >> 24);
2182 					*ptrLBA = physLBA_byte;
2183 					ptrLBA =
2184 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2185 					physLBA_byte = (uint8_t)(physLBA >> 16);
2186 					*ptrLBA = physLBA_byte;
2187 					ptrLBA =
2188 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2189 					physLBA_byte = (uint8_t)(physLBA >> 8);
2190 					*ptrLBA = physLBA_byte;
2191 					ptrLBA =
2192 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2193 					physLBA_byte = (uint8_t)physLBA;
2194 					*ptrLBA = physLBA_byte;
2195 
2196 					/*
2197 					 * Set flag that Direct Drive I/O is
2198 					 * being done.
2199 					 */
2200 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2201 				}
2202 			}
2203 		} else {
2204 			/*
2205 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2206 			 * 0.  Get the transfer size in blocks.
2207 			 */
2208 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2209 
2210 			/*
2211 			 * Get virtual LBA.
2212 			 */
2213 			virtLBA = ((uint64_t)CDB[2] << 54) |
2214 			    ((uint64_t)CDB[3] << 48) |
2215 			    ((uint64_t)CDB[4] << 40) |
2216 			    ((uint64_t)CDB[5] << 32) |
2217 			    ((uint64_t)CDB[6] << 24) |
2218 			    ((uint64_t)CDB[7] << 16) |
2219 			    ((uint64_t)CDB[8] << 8) |
2220 			    (uint64_t)CDB[9];
2221 
2222 			/*
2223 			 * Check that LBA range for I/O does not exceed volume's
2224 			 * MaxLBA.
2225 			 */
2226 			if ((virtLBA + (uint64_t)io_size - 1) <=
2227 			    sc->DD_max_lba) {
2228 				/*
2229 				 * Check if the I/O crosses a stripe boundary.
2230 				 * If not, translate the virtual LBA to a
2231 				 * physical LBA and set the DevHandle for the
2232 				 * PhysDisk to be used.  If it does cross a
2233 				 * boundry, do normal I/O.  To get the right
2234 				 * DevHandle to use, get the map number for the
2235 				 * column, then use that map number to look up
2236 				 * the DevHandle of the PhysDisk.
2237 				 */
2238 				stripe_offset = (uint32_t)virtLBA &
2239 				    (sc->DD_stripe_size - 1);
2240 				if ((stripe_offset + io_size) <=
2241 				    sc->DD_stripe_size) {
2242 					physLBA = (uint32_t)(virtLBA >>
2243 					    sc->DD_stripe_exponent);
2244 					stripe_unit = physLBA /
2245 					    sc->DD_num_phys_disks;
2246 					column = physLBA %
2247 					    sc->DD_num_phys_disks;
2248 					pIO_req->DevHandle =
2249 					    sc->DD_column_map[column].
2250 					    dev_handle;
2251 					cm->cm_desc.SCSIIO.DevHandle =
2252 					    pIO_req->DevHandle;
2253 
2254 					physLBA = (stripe_unit <<
2255 					    sc->DD_stripe_exponent) +
2256 					    stripe_offset;
2257 
2258 					/*
2259 					 * Set upper 4 bytes of LBA to 0.  We
2260 					 * assume that the phys disks are less
2261 					 * than 2 TB's in size.  Then, set the
2262 					 * lower 4 bytes.
2263 					 */
2264 					pIO_req->CDB.CDB32[2] = 0;
2265 					pIO_req->CDB.CDB32[3] = 0;
2266 					pIO_req->CDB.CDB32[4] = 0;
2267 					pIO_req->CDB.CDB32[5] = 0;
2268 					ptrLBA = &pIO_req->CDB.CDB32[6];
2269 					physLBA_byte = (uint8_t)(physLBA >> 24);
2270 					*ptrLBA = physLBA_byte;
2271 					ptrLBA = &pIO_req->CDB.CDB32[7];
2272 					physLBA_byte = (uint8_t)(physLBA >> 16);
2273 					*ptrLBA = physLBA_byte;
2274 					ptrLBA = &pIO_req->CDB.CDB32[8];
2275 					physLBA_byte = (uint8_t)(physLBA >> 8);
2276 					*ptrLBA = physLBA_byte;
2277 					ptrLBA = &pIO_req->CDB.CDB32[9];
2278 					physLBA_byte = (uint8_t)physLBA;
2279 					*ptrLBA = physLBA_byte;
2280 
2281 					/*
2282 					 * Set flag that Direct Drive I/O is
2283 					 * being done.
2284 					 */
2285 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2286 				}
2287 			}
2288 		}
2289 	}
2290 }
2291 
2292 #if __FreeBSD_version >= 900026
2293 static void
2294 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2295 {
2296 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2297 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2298 	uint64_t sasaddr;
2299 	union ccb *ccb;
2300 
2301 	ccb = cm->cm_complete_data;
2302 
2303 	/*
2304 	 * Currently there should be no way we can hit this case.  It only
2305 	 * happens when we have a failure to allocate chain frames, and SMP
2306 	 * commands require two S/G elements only.  That should be handled
2307 	 * in the standard request size.
2308 	 */
2309 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2310 		mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2311 			   __func__, cm->cm_flags);
2312 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2313 		goto bailout;
2314         }
2315 
2316 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2317 	if (rpl == NULL) {
2318 		mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2319 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2320 		goto bailout;
2321 	}
2322 
2323 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2324 	sasaddr = le32toh(req->SASAddress.Low);
2325 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2326 
2327 	if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2328 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2329 		mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2330 		    __func__, rpl->IOCStatus, rpl->SASStatus);
2331 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2332 		goto bailout;
2333 	}
2334 
2335 	mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2336 		   "%#jx completed successfully\n", __func__,
2337 		   (uintmax_t)sasaddr);
2338 
2339 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2340 		ccb->ccb_h.status = CAM_REQ_CMP;
2341 	else
2342 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2343 
2344 bailout:
2345 	/*
2346 	 * We sync in both directions because we had DMAs in the S/G list
2347 	 * in both directions.
2348 	 */
2349 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2350 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2351 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2352 	mps_free_command(sc, cm);
2353 	xpt_done(ccb);
2354 }
2355 
2356 static void
2357 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2358 {
2359 	struct mps_command *cm;
2360 	uint8_t *request, *response;
2361 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2362 	struct mps_softc *sc;
2363 	int error;
2364 
2365 	sc = sassc->sc;
2366 	error = 0;
2367 
2368 	/*
2369 	 * XXX We don't yet support physical addresses here.
2370 	 */
2371 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2372 		mps_printf(sc, "%s: physical addresses not supported\n",
2373 			   __func__);
2374 		ccb->ccb_h.status = CAM_REQ_INVALID;
2375 		xpt_done(ccb);
2376 		return;
2377 	}
2378 
2379 	/*
2380 	 * If the user wants to send an S/G list, check to make sure they
2381 	 * have single buffers.
2382 	 */
2383 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2384 		/*
2385 		 * The chip does not support more than one buffer for the
2386 		 * request or response.
2387 		 */
2388 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2389 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2390 			mps_printf(sc, "%s: multiple request or response "
2391 				   "buffer segments not supported for SMP\n",
2392 				   __func__);
2393 			ccb->ccb_h.status = CAM_REQ_INVALID;
2394 			xpt_done(ccb);
2395 			return;
2396 		}
2397 
2398 		/*
2399 		 * The CAM_SCATTER_VALID flag was originally implemented
2400 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2401 		 * We have two.  So, just take that flag to mean that we
2402 		 * might have S/G lists, and look at the S/G segment count
2403 		 * to figure out whether that is the case for each individual
2404 		 * buffer.
2405 		 */
2406 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2407 			bus_dma_segment_t *req_sg;
2408 
2409 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2410 			request = (uint8_t *)req_sg[0].ds_addr;
2411 		} else
2412 			request = ccb->smpio.smp_request;
2413 
2414 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2415 			bus_dma_segment_t *rsp_sg;
2416 
2417 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2418 			response = (uint8_t *)rsp_sg[0].ds_addr;
2419 		} else
2420 			response = ccb->smpio.smp_response;
2421 	} else {
2422 		request = ccb->smpio.smp_request;
2423 		response = ccb->smpio.smp_response;
2424 	}
2425 
2426 	cm = mps_alloc_command(sc);
2427 	if (cm == NULL) {
2428 		mps_printf(sc, "%s: cannot allocate command\n", __func__);
2429 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2430 		xpt_done(ccb);
2431 		return;
2432 	}
2433 
2434 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2435 	bzero(req, sizeof(*req));
2436 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2437 
2438 	/* Allow the chip to use any route to this SAS address. */
2439 	req->PhysicalPort = 0xff;
2440 
2441 	req->RequestDataLength = ccb->smpio.smp_request_len;
2442 	req->SGLFlags =
2443 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2444 
2445 	mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2446 		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2447 
2448 	mpi_init_sge(cm, req, &req->SGL);
2449 
2450 	/*
2451 	 * Set up a uio to pass into mps_map_command().  This allows us to
2452 	 * do one map command, and one busdma call in there.
2453 	 */
2454 	cm->cm_uio.uio_iov = cm->cm_iovec;
2455 	cm->cm_uio.uio_iovcnt = 2;
2456 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2457 
2458 	/*
2459 	 * The read/write flag isn't used by busdma, but set it just in
2460 	 * case.  This isn't exactly accurate, either, since we're going in
2461 	 * both directions.
2462 	 */
2463 	cm->cm_uio.uio_rw = UIO_WRITE;
2464 
2465 	cm->cm_iovec[0].iov_base = request;
2466 	cm->cm_iovec[0].iov_len = req->RequestDataLength;
2467 	cm->cm_iovec[1].iov_base = response;
2468 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2469 
2470 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2471 			       cm->cm_iovec[1].iov_len;
2472 
2473 	/*
2474 	 * Trigger a warning message in mps_data_cb() for the user if we
2475 	 * wind up exceeding two S/G segments.  The chip expects one
2476 	 * segment for the request and another for the response.
2477 	 */
2478 	cm->cm_max_segs = 2;
2479 
2480 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2481 	cm->cm_complete = mpssas_smpio_complete;
2482 	cm->cm_complete_data = ccb;
2483 
2484 	/*
2485 	 * Tell the mapping code that we're using a uio, and that this is
2486 	 * an SMP passthrough request.  There is a little special-case
2487 	 * logic there (in mps_data_cb()) to handle the bidirectional
2488 	 * transfer.
2489 	 */
2490 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2491 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2492 
2493 	/* The chip data format is little endian. */
2494 	req->SASAddress.High = htole32(sasaddr >> 32);
2495 	req->SASAddress.Low = htole32(sasaddr);
2496 
2497 	/*
2498 	 * XXX Note that we don't have a timeout/abort mechanism here.
2499 	 * From the manual, it looks like task management requests only
2500 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2501 	 * have a mechanism to retry requests in the event of a chip reset
2502 	 * at least.  Hopefully the chip will insure that any errors short
2503 	 * of that are relayed back to the driver.
2504 	 */
2505 	error = mps_map_command(sc, cm);
2506 	if ((error != 0) && (error != EINPROGRESS)) {
2507 		mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2508 			   __func__, error);
2509 		goto bailout_error;
2510 	}
2511 
2512 	return;
2513 
2514 bailout_error:
2515 	mps_free_command(sc, cm);
2516 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2517 	xpt_done(ccb);
2518 	return;
2519 
2520 }
2521 
2522 static void
2523 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2524 {
2525 	struct mps_softc *sc;
2526 	struct mpssas_target *targ;
2527 	uint64_t sasaddr = 0;
2528 
2529 	sc = sassc->sc;
2530 
2531 	/*
2532 	 * Make sure the target exists.
2533 	 */
2534 	targ = &sassc->targets[ccb->ccb_h.target_id];
2535 	if (targ->handle == 0x0) {
2536 		mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2537 			   ccb->ccb_h.target_id);
2538 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2539 		xpt_done(ccb);
2540 		return;
2541 	}
2542 
2543 	/*
2544 	 * If this device has an embedded SMP target, we'll talk to it
2545 	 * directly.
2546 	 * figure out what the expander's address is.
2547 	 */
2548 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2549 		sasaddr = targ->sasaddr;
2550 
2551 	/*
2552 	 * If we don't have a SAS address for the expander yet, try
2553 	 * grabbing it from the page 0x83 information cached in the
2554 	 * transport layer for this target.  LSI expanders report the
2555 	 * expander SAS address as the port-associated SAS address in
2556 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2557 	 * 0x83.
2558 	 *
2559 	 * XXX KDM disable this for now, but leave it commented out so that
2560 	 * it is obvious that this is another possible way to get the SAS
2561 	 * address.
2562 	 *
2563 	 * The parent handle method below is a little more reliable, and
2564 	 * the other benefit is that it works for devices other than SES
2565 	 * devices.  So you can send a SMP request to a da(4) device and it
2566 	 * will get routed to the expander that device is attached to.
2567 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2568 	 */
2569 #if 0
2570 	if (sasaddr == 0)
2571 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2572 #endif
2573 
2574 	/*
2575 	 * If we still don't have a SAS address for the expander, look for
2576 	 * the parent device of this device, which is probably the expander.
2577 	 */
2578 	if (sasaddr == 0) {
2579 #ifdef OLD_MPS_PROBE
2580 		struct mpssas_target *parent_target;
2581 #endif
2582 
2583 		if (targ->parent_handle == 0x0) {
2584 			mps_printf(sc, "%s: handle %d does not have a valid "
2585 				   "parent handle!\n", __func__, targ->handle);
2586 			ccb->ccb_h.status = CAM_REQ_INVALID;
2587 			goto bailout;
2588 		}
2589 #ifdef OLD_MPS_PROBE
2590 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2591 			targ->parent_handle);
2592 
2593 		if (parent_target == NULL) {
2594 			mps_printf(sc, "%s: handle %d does not have a valid "
2595 				   "parent target!\n", __func__, targ->handle);
2596 			ccb->ccb_h.status = CAM_REQ_INVALID;
2597 			goto bailout;
2598 		}
2599 
2600 		if ((parent_target->devinfo &
2601 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2602 			mps_printf(sc, "%s: handle %d parent %d does not "
2603 				   "have an SMP target!\n", __func__,
2604 				   targ->handle, parent_target->handle);
2605 			ccb->ccb_h.status = CAM_REQ_INVALID;
2606 			goto bailout;
2607 
2608 		}
2609 
2610 		sasaddr = parent_target->sasaddr;
2611 #else /* OLD_MPS_PROBE */
2612 		if ((targ->parent_devinfo &
2613 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2614 			mps_printf(sc, "%s: handle %d parent %d does not "
2615 				   "have an SMP target!\n", __func__,
2616 				   targ->handle, targ->parent_handle);
2617 			ccb->ccb_h.status = CAM_REQ_INVALID;
2618 			goto bailout;
2619 
2620 		}
2621 		if (targ->parent_sasaddr == 0x0) {
2622 			mps_printf(sc, "%s: handle %d parent handle %d does "
2623 				   "not have a valid SAS address!\n",
2624 				   __func__, targ->handle, targ->parent_handle);
2625 			ccb->ccb_h.status = CAM_REQ_INVALID;
2626 			goto bailout;
2627 		}
2628 
2629 		sasaddr = targ->parent_sasaddr;
2630 #endif /* OLD_MPS_PROBE */
2631 
2632 	}
2633 
2634 	if (sasaddr == 0) {
2635 		mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2636 			   __func__, targ->handle);
2637 		ccb->ccb_h.status = CAM_REQ_INVALID;
2638 		goto bailout;
2639 	}
2640 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
2641 
2642 	return;
2643 
2644 bailout:
2645 	xpt_done(ccb);
2646 
2647 }
2648 #endif //__FreeBSD_version >= 900026
2649 
2650 static void
2651 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2652 {
2653 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2654 	struct mps_softc *sc;
2655 	struct mps_command *tm;
2656 	struct mpssas_target *targ;
2657 
2658 	mps_dprint(sassc->sc, MPS_TRACE, __func__);
2659 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2660 
2661 	sc = sassc->sc;
2662 	tm = mps_alloc_command(sc);
2663 	if (tm == NULL) {
2664 		mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2665 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2666 		xpt_done(ccb);
2667 		return;
2668 	}
2669 
2670 	targ = &sassc->targets[ccb->ccb_h.target_id];
2671 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2672 	req->DevHandle = targ->handle;
2673 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2674 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2675 
2676 	/* SAS Hard Link Reset / SATA Link Reset */
2677 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2678 
2679 	tm->cm_data = NULL;
2680 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2681 	tm->cm_complete = mpssas_resetdev_complete;
2682 	tm->cm_complete_data = ccb;
2683 	mps_map_command(sc, tm);
2684 }
2685 
2686 static void
2687 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2688 {
2689 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2690 	union ccb *ccb;
2691 
2692 	mps_dprint(sc, MPS_TRACE, __func__);
2693 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2694 
2695 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2696 	ccb = tm->cm_complete_data;
2697 
2698 	/*
2699 	 * Currently there should be no way we can hit this case.  It only
2700 	 * happens when we have a failure to allocate chain frames, and
2701 	 * task management commands don't have S/G lists.
2702 	 */
2703 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2704 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2705 
2706 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2707 
2708 		mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2709 			   "This should not happen!\n", __func__, tm->cm_flags,
2710 			   req->DevHandle);
2711 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2712 		goto bailout;
2713 	}
2714 
2715 	kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2716 	    resp->IOCStatus, resp->ResponseCode);
2717 
2718 	if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2719 		ccb->ccb_h.status = CAM_REQ_CMP;
2720 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2721 		    CAM_LUN_WILDCARD);
2722 	}
2723 	else
2724 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2725 
2726 bailout:
2727 
2728 	mpssas_free_tm(sc, tm);
2729 	xpt_done(ccb);
2730 }
2731 
2732 static void
2733 mpssas_poll(struct cam_sim *sim)
2734 {
2735 	struct mpssas_softc *sassc;
2736 
2737 	sassc = cam_sim_softc(sim);
2738 
2739 	if (sassc->sc->mps_debug & MPS_TRACE) {
2740 		/* frequent debug messages during a panic just slow
2741 		 * everything down too much.
2742 		 */
2743 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2744 		sassc->sc->mps_debug &= ~MPS_TRACE;
2745 	}
2746 
2747 	mps_intr_locked(sassc->sc);
2748 }
2749 
2750 static void
2751 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2752 {
2753 	struct mpssas_softc *sassc;
2754 	char path_str[64];
2755 
2756 	if (done_ccb == NULL)
2757 		return;
2758 
2759 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2760 
2761 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2762 
2763 	xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2764 	mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2765 
2766 	xpt_free_path(done_ccb->ccb_h.path);
2767 	xpt_free_ccb(done_ccb);
2768 
2769 #if __FreeBSD_version < 1000006
2770 	/*
2771 	 * Before completing scan, get EEDP stuff for all of the existing
2772 	 * targets.
2773 	 */
2774 	mpssas_check_eedp(sassc);
2775 #endif
2776 
2777 }
2778 
2779 /* thread to handle bus rescans */
2780 static void
2781 mpssas_scanner_thread(void *arg)
2782 {
2783 	struct mpssas_softc *sassc;
2784 	struct mps_softc *sc;
2785 	union ccb	*ccb;
2786 
2787 	sassc = (struct mpssas_softc *)arg;
2788 	sc = sassc->sc;
2789 
2790 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2791 
2792 	mps_lock(sc);
2793 	for (;;) {
2794 		lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 0);
2795 		if (sassc->flags & MPSSAS_SHUTDOWN) {
2796 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2797 			break;
2798 		}
2799 		ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2800 		if (ccb == NULL)
2801 			continue;
2802 		TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2803 		xpt_action(ccb);
2804 	}
2805 
2806 	sassc->flags &= ~MPSSAS_SCANTHREAD;
2807 	wakeup(&sassc->flags);
2808 	mps_unlock(sc);
2809 	mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2810 	mps_kproc_exit(0);
2811 }
2812 
2813 static void
2814 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2815 {
2816 	char path_str[64];
2817 
2818 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2819 
2820 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2821 
2822 	if (ccb == NULL)
2823 		return;
2824 
2825 	xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2826 	mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2827 
2828 	/* Prepare request */
2829 	ccb->ccb_h.ppriv_ptr1 = sassc;
2830 	ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2831 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2832 	TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2833 	wakeup(&sassc->ccb_scanq);
2834 }
2835 
2836 #if __FreeBSD_version >= 1000006
2837 static void
2838 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2839 	     void *arg)
2840 {
2841 	struct mps_softc *sc;
2842 
2843 	sc = (struct mps_softc *)callback_arg;
2844 
2845 	switch (code) {
2846 	case AC_ADVINFO_CHANGED: {
2847 		struct mpssas_target *target;
2848 		struct mpssas_softc *sassc;
2849 		struct scsi_read_capacity_data_long rcap_buf;
2850 		struct ccb_dev_advinfo cdai;
2851 		struct mpssas_lun *lun;
2852 		lun_id_t lunid;
2853 		int found_lun;
2854 		uintptr_t buftype;
2855 
2856 		buftype = (uintptr_t)arg;
2857 
2858 		found_lun = 0;
2859 		sassc = sc->sassc;
2860 
2861 		/*
2862 		 * We're only interested in read capacity data changes.
2863 		 */
2864 		if (buftype != CDAI_TYPE_RCAPLONG)
2865 			break;
2866 
2867 		/*
2868 		 * We're only interested in devices that are attached to
2869 		 * this controller.
2870 		 */
2871 		if (xpt_path_path_id(path) != sassc->sim->path_id)
2872 			break;
2873 
2874 		/*
2875 		 * We should have a handle for this, but check to make sure.
2876 		 */
2877 		target = &sassc->targets[xpt_path_target_id(path)];
2878 		if (target->handle == 0)
2879 			break;
2880 
2881 		lunid = xpt_path_lun_id(path);
2882 
2883 		SLIST_FOREACH(lun, &target->luns, lun_link) {
2884 			if (lun->lun_id == lunid) {
2885 				found_lun = 1;
2886 				break;
2887 			}
2888 		}
2889 
2890 		if (found_lun == 0) {
2891 			lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
2892 				     M_NOWAIT | M_ZERO);
2893 			if (lun == NULL) {
2894 				mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2895 					   "LUN for EEDP support.\n");
2896 				break;
2897 			}
2898 			lun->lun_id = lunid;
2899 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2900 		}
2901 
2902 		bzero(&rcap_buf, sizeof(rcap_buf));
2903 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2904 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2905 		cdai.ccb_h.flags = CAM_DIR_IN;
2906 		cdai.buftype = CDAI_TYPE_RCAPLONG;
2907 		cdai.flags = 0;
2908 		cdai.bufsiz = sizeof(rcap_buf);
2909 		cdai.buf = (uint8_t *)&rcap_buf;
2910 		xpt_action((union ccb *)&cdai);
2911 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2912 			cam_release_devq(cdai.ccb_h.path,
2913 					 0, 0, 0, FALSE);
2914 
2915 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2916 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
2917 			lun->eedp_formatted = TRUE;
2918 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2919 		} else {
2920 			lun->eedp_formatted = FALSE;
2921 			lun->eedp_block_size = 0;
2922 		}
2923 		break;
2924 	}
2925 	default:
2926 		break;
2927 	}
2928 }
2929 #else /* __FreeBSD_version >= 1000006 */
2930 
2931 static void
2932 mpssas_check_eedp(struct mpssas_softc *sassc)
2933 {
2934 	struct mps_softc *sc = sassc->sc;
2935 	struct ccb_scsiio *csio;
2936 	struct scsi_read_capacity_16 *scsi_cmd;
2937 	struct scsi_read_capacity_eedp *rcap_buf;
2938 	union ccb *ccb;
2939 	path_id_t pathid = cam_sim_path(sassc->sim);
2940 	target_id_t targetid;
2941 	lun_id_t lunid;
2942 	struct cam_periph *found_periph;
2943 	struct mpssas_target *target;
2944 	struct mpssas_lun *lun;
2945 	uint8_t	found_lun;
2946 
2947 	/*
2948 	 * Issue a READ CAPACITY 16 command to each LUN of each target.  This
2949 	 * info is used to determine if the LUN is formatted for EEDP support.
2950 	 */
2951 	for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2952 		target = &sassc->targets[targetid];
2953 		if (target->handle == 0x0) {
2954 			continue;
2955 		}
2956 
2957 		lunid = 0;
2958 		do {
2959 			rcap_buf =
2960 			    kmalloc(sizeof(struct scsi_read_capacity_eedp),
2961 			    M_MPT2, M_NOWAIT | M_ZERO);
2962 			if (rcap_buf == NULL) {
2963 				mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2964 				    "capacity buffer for EEDP support.\n");
2965 				return;
2966 			}
2967 
2968 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
2969 			    M_WAITOK | M_ZERO);
2970 
2971 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2972 			    pathid, targetid, lunid) != CAM_REQ_CMP) {
2973 				mps_dprint(sc, MPS_FAULT, "Unable to create "
2974 				    "path for EEDP support\n");
2975 				kfree(rcap_buf, M_MPT2);
2976 				xpt_free_ccb(ccb);
2977 				return;
2978 			}
2979 
2980 			/*
2981 			 * If a periph is returned, the LUN exists.  Create an
2982 			 * entry in the target's LUN list.
2983 			 */
2984 			if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2985 			    NULL)) != NULL) {
2986 				/*
2987 				 * If LUN is already in list, don't create a new
2988 				 * one.
2989 				 */
2990 				found_lun = FALSE;
2991 				SLIST_FOREACH(lun, &target->luns, lun_link) {
2992 					if (lun->lun_id == lunid) {
2993 						found_lun = TRUE;
2994 						break;
2995 					}
2996 				}
2997 				if (!found_lun) {
2998 					lun = kmalloc(sizeof(struct mpssas_lun),
2999 					    M_MPT2, M_WAITOK | M_ZERO);
3000 					lun->lun_id = lunid;
3001 					SLIST_INSERT_HEAD(&target->luns, lun,
3002 					    lun_link);
3003 				}
3004 				lunid++;
3005 
3006 				/*
3007 				 * Issue a READ CAPACITY 16 command for the LUN.
3008 				 * The mpssas_read_cap_done function will load
3009 				 * the read cap info into the LUN struct.
3010 				 */
3011 				csio = &ccb->csio;
3012 				csio->ccb_h.func_code = XPT_SCSI_IO;
3013 				csio->ccb_h.flags = CAM_DIR_IN;
3014 				csio->ccb_h.retry_count = 4;
3015 				csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3016 				csio->ccb_h.timeout = 60000;
3017 				csio->data_ptr = (uint8_t *)rcap_buf;
3018 				csio->dxfer_len = sizeof(struct
3019 				    scsi_read_capacity_eedp);
3020 				csio->sense_len = MPS_SENSE_LEN;
3021 				csio->cdb_len = sizeof(*scsi_cmd);
3022 				csio->tag_action = MSG_SIMPLE_Q_TAG;
3023 
3024 				scsi_cmd = (struct scsi_read_capacity_16 *)
3025 				    &csio->cdb_io.cdb_bytes;
3026 				bzero(scsi_cmd, sizeof(*scsi_cmd));
3027 				scsi_cmd->opcode = 0x9E;
3028 				scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3029 				((uint8_t *)scsi_cmd)[13] = sizeof(struct
3030 				    scsi_read_capacity_eedp);
3031 
3032 				/*
3033 				 * Set the path, target and lun IDs for the READ
3034 				 * CAPACITY request.
3035 				 */
3036 				ccb->ccb_h.path_id =
3037 				    xpt_path_path_id(ccb->ccb_h.path);
3038 				ccb->ccb_h.target_id =
3039 				    xpt_path_target_id(ccb->ccb_h.path);
3040 				ccb->ccb_h.target_lun =
3041 				    xpt_path_lun_id(ccb->ccb_h.path);
3042 
3043 				ccb->ccb_h.ppriv_ptr1 = sassc;
3044 				xpt_action(ccb);
3045 			} else {
3046 				kfree(rcap_buf, M_MPT2);
3047 				xpt_free_path(ccb->ccb_h.path);
3048 				xpt_free_ccb(ccb);
3049 			}
3050 		} while (found_periph);
3051 	}
3052 }
3053 
3054 
3055 static void
3056 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3057 {
3058 	struct mpssas_softc *sassc;
3059 	struct mpssas_target *target;
3060 	struct mpssas_lun *lun;
3061 	struct scsi_read_capacity_eedp *rcap_buf;
3062 
3063 	if (done_ccb == NULL)
3064 		return;
3065 
3066 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3067 
3068 	/*
3069 	 * Get the LUN ID for the path and look it up in the LUN list for the
3070 	 * target.
3071 	 */
3072 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3073 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3074 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3075 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3076 			continue;
3077 
3078 		/*
3079 		 * Got the LUN in the target's LUN list.  Fill it in
3080 		 * with EEDP info.  If the READ CAP 16 command had some
3081 		 * SCSI error (common if command is not supported), mark
3082 		 * the lun as not supporting EEDP and set the block size
3083 		 * to 0.
3084 		 */
3085 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3086 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3087 			lun->eedp_formatted = FALSE;
3088 			lun->eedp_block_size = 0;
3089 			break;
3090 		}
3091 
3092 		if (rcap_buf->protect & 0x01) {
3093 			lun->eedp_formatted = TRUE;
3094 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3095 		}
3096 		break;
3097 	}
3098 
3099 	// Finished with this CCB and path.
3100 	kfree(rcap_buf, M_MPT2);
3101 	xpt_free_path(done_ccb->ccb_h.path);
3102 	xpt_free_ccb(done_ccb);
3103 }
3104 #endif /* __FreeBSD_version >= 1000006 */
3105 
3106 int
3107 mpssas_startup(struct mps_softc *sc)
3108 {
3109 	struct mpssas_softc *sassc;
3110 
3111 	/*
3112 	 * Send the port enable message and set the wait_for_port_enable flag.
3113 	 * This flag helps to keep the simq frozen until all discovery events
3114 	 * are processed.
3115 	 */
3116 	sassc = sc->sassc;
3117 	mpssas_startup_increment(sassc);
3118 	sc->wait_for_port_enable = 1;
3119 	mpssas_send_portenable(sc);
3120 	return (0);
3121 }
3122 
3123 static int
3124 mpssas_send_portenable(struct mps_softc *sc)
3125 {
3126 	MPI2_PORT_ENABLE_REQUEST *request;
3127 	struct mps_command *cm;
3128 
3129 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3130 
3131 	if ((cm = mps_alloc_command(sc)) == NULL)
3132 		return (EBUSY);
3133 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3134 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3135 	request->MsgFlags = 0;
3136 	request->VP_ID = 0;
3137 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3138 	cm->cm_complete = mpssas_portenable_complete;
3139 	cm->cm_data = NULL;
3140 	cm->cm_sge = NULL;
3141 
3142 	mps_map_command(sc, cm);
3143 	mps_dprint(sc, MPS_TRACE,
3144 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3145 	    cm, cm->cm_req, cm->cm_complete);
3146 	return (0);
3147 }
3148 
3149 static void
3150 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3151 {
3152 	MPI2_PORT_ENABLE_REPLY *reply;
3153 	struct mpssas_softc *sassc;
3154 	struct mpssas_target *target;
3155 	int i;
3156 
3157 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3158 	sassc = sc->sassc;
3159 
3160 	/*
3161 	 * Currently there should be no way we can hit this case.  It only
3162 	 * happens when we have a failure to allocate chain frames, and
3163 	 * port enable commands don't have S/G lists.
3164 	 */
3165 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3166 		mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3167 			   "This should not happen!\n", __func__, cm->cm_flags);
3168 	}
3169 
3170 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3171 	if (reply == NULL)
3172 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3173 	else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3174 	    MPI2_IOCSTATUS_SUCCESS)
3175 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3176 
3177 	mps_free_command(sc, cm);
3178 	if (sc->mps_ich.ich_arg != NULL) {
3179 		mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3180 		config_intrhook_disestablish(&sc->mps_ich);
3181 		sc->mps_ich.ich_arg = NULL;
3182 	}
3183 
3184 	/*
3185 	 * Get WarpDrive info after discovery is complete but before the scan
3186 	 * starts.  At this point, all devices are ready to be exposed to the
3187 	 * OS.  If devices should be hidden instead, take them out of the
3188 	 * 'targets' array before the scan.  The devinfo for a disk will have
3189 	 * some info and a volume's will be 0.  Use that to remove disks.
3190 	 */
3191 	mps_wd_config_pages(sc);
3192 	if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3193 	  && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3194 	 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3195 	    MPS_WD_HIDE_IF_VOLUME))) {
3196 		for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3197 			target = &sassc->targets[i];
3198 			if (target->devinfo) {
3199 				target->devinfo = 0x0;
3200 				target->encl_handle = 0x0;
3201 				target->encl_slot = 0x0;
3202 				target->handle = 0x0;
3203 				target->tid = 0x0;
3204 				target->linkrate = 0x0;
3205 				target->flags = 0x0;
3206 			}
3207 		}
3208 	}
3209 
3210 	/*
3211 	 * Done waiting for port enable to complete.  Decrement the refcount.
3212 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3213 	 * take place.  Since the simq was explicitly frozen before port
3214 	 * enable, it must be explicitly released here to keep the
3215 	 * freeze/release count in sync.
3216 	 */
3217 	sc->wait_for_port_enable = 0;
3218 	sc->port_enable_complete = 1;
3219 	mpssas_startup_decrement(sassc);
3220 	xpt_release_simq(sassc->sim, 1);
3221 }
3222