xref: /dragonfly/sys/dev/raid/mps/mps_sas.c (revision 7eedf208)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2011 LSI Corp.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions
32  * are met:
33  * 1. Redistributions of source code must retain the above copyright
34  *    notice, this list of conditions and the following disclaimer.
35  * 2. Redistributions in binary form must reproduce the above copyright
36  *    notice, this list of conditions and the following disclaimer in the
37  *    documentation and/or other materials provided with the distribution.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49  * SUCH DAMAGE.
50  *
51  * LSI MPT-Fusion Host Adapter FreeBSD
52  *
53  * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
54  */
55 
56 /* Communications core for LSI MPT2 */
57 
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
64 #include <sys/bus.h>
65 #include <sys/conf.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
68 #include <sys/bio.h>
69 #include <sys/malloc.h>
70 #include <sys/uio.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
76 #include <sys/sbuf.h>
77 
78 #include <sys/rman.h>
79 
80 #include <machine/stdarg.h>
81 
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
94 #endif
95 
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
107 
108 #define MPSSAS_DISCOVERY_TIMEOUT	20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
110 
111 /*
112  * static array to check SCSI OpCode for EEDP protection bits
113  */
114 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
134 };
135 
136 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
137 
138 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
139 static void mpssas_log_command(struct mps_command *, const char *, ...)
140 		__printflike(2, 3);
141 #if 0 /* XXX unused */
142 static void mpssas_discovery_timeout(void *data);
143 #endif
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151     struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
158 			       uint64_t sasaddr);
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 			 struct cam_path *path, void *arg);
170 #else
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
173 #endif
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176     struct mps_command *cm);
177 
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
180 {
181 	struct mpssas_target *target;
182 	int i;
183 
184 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 		target = &sassc->targets[i];
186 		if (target->handle == handle)
187 			return (target);
188 	}
189 
190 	return (NULL);
191 }
192 
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194  * commands before device handles have been found by discovery.  Since
195  * discovery involves reading config pages and possibly sending commands,
196  * discovery actions may continue even after we receive the end of discovery
197  * event, so refcount discovery actions instead of assuming we can unfreeze
198  * the simq when we get the event.
199  */
200 void
201 mpssas_startup_increment(struct mpssas_softc *sassc)
202 {
203 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 		if (sassc->startup_refcount++ == 0) {
205 			/* just starting, freeze the simq */
206 			mps_dprint(sassc->sc, MPS_INFO,
207 			    "%s freezing simq\n", __func__);
208 			xpt_freeze_simq(sassc->sim, 1);
209 		}
210 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 		    sassc->startup_refcount);
212 	}
213 }
214 
215 void
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
217 {
218 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 		if (--sassc->startup_refcount == 0) {
220 			/* finished all discovery-related actions, release
221 			 * the simq and rescan for the latest topology.
222 			 */
223 			mps_dprint(sassc->sc, MPS_INFO,
224 			    "%s releasing simq\n", __func__);
225 			sassc->flags &= ~MPSSAS_IN_STARTUP;
226 			xpt_release_simq(sassc->sim, 1);
227 			mpssas_rescan_target(sassc->sc, NULL);
228 		}
229 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 		    sassc->startup_refcount);
231 	}
232 }
233 
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235  * management, so refcount the TMs and keep the simq frozen when any are in
236  * use.
237  */
238 struct mps_command *
239 mpssas_alloc_tm(struct mps_softc *sc)
240 {
241 	struct mps_command *tm;
242 
243 	tm = mps_alloc_high_priority_command(sc);
244 	if (tm != NULL) {
245 		if (sc->sassc->tm_count++ == 0) {
246 			mps_printf(sc, "%s freezing simq\n", __func__);
247 			xpt_freeze_simq(sc->sassc->sim, 1);
248 		}
249 		mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 		    sc->sassc->tm_count);
251 	}
252 	return tm;
253 }
254 
255 void
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
257 {
258 	if (tm == NULL)
259 		return;
260 
261 	/* if there are no TMs in use, we can release the simq.  We use our
262 	 * own refcount so that it's easier for a diag reset to cleanup and
263 	 * release the simq.
264 	 */
265 	if (--sc->sassc->tm_count == 0) {
266 		mps_printf(sc, "%s releasing simq\n", __func__);
267 		xpt_release_simq(sc->sassc->sim, 1);
268 	}
269 	mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 	    sc->sassc->tm_count);
271 
272 	mps_free_high_priority_command(sc, tm);
273 }
274 
275 
276 void
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
278 {
279 	struct mpssas_softc *sassc = sc->sassc;
280 	path_id_t pathid;
281 	target_id_t targetid;
282 	union ccb *ccb;
283 
284 	pathid = cam_sim_path(sassc->sim);
285 	if (targ == NULL)
286 		targetid = CAM_TARGET_WILDCARD;
287 	else
288 		targetid = targ - sassc->targets;
289 
290 	/*
291 	 * Allocate a CCB and schedule a rescan.
292 	 */
293 	ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
294 
295 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
296 		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 		mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
298 		xpt_free_ccb(ccb);
299 		return;
300 	}
301 
302 	/* XXX Hardwired to scan the bus for now */
303 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
304 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
305 	mpssas_rescan(sassc, ccb);
306 }
307 
308 static void
309 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
310 {
311 	struct sbuf sb;
312 	__va_list ap;
313 	char str[192];
314 	char path_str[64];
315 
316 	if (cm == NULL)
317 		return;
318 
319 	sbuf_new(&sb, str, sizeof(str), 0);
320 
321 	__va_start(ap, fmt);
322 
323 	if (cm->cm_ccb != NULL) {
324 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
325 				sizeof(path_str));
326 		sbuf_cat(&sb, path_str);
327 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
328 			scsi_command_string(&cm->cm_ccb->csio, &sb);
329 			sbuf_printf(&sb, "length %d ",
330 				    cm->cm_ccb->csio.dxfer_len);
331 		}
332 	}
333 	else {
334 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
335 		    cam_sim_name(cm->cm_sc->sassc->sim),
336 		    cam_sim_unit(cm->cm_sc->sassc->sim),
337 		    cam_sim_bus(cm->cm_sc->sassc->sim),
338 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
339 		    cm->cm_lun);
340 	}
341 
342 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
343 	sbuf_vprintf(&sb, fmt, ap);
344 	sbuf_finish(&sb);
345 	kprintf("%s", sbuf_data(&sb));
346 
347 	__va_end(ap);
348 }
349 
350 static void
351 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
352 {
353 	struct mpssas_softc *sassc = sc->sassc;
354 	path_id_t pathid = cam_sim_path(sassc->sim);
355 	struct cam_path *path;
356 
357 	mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
358 	if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
359 		mps_printf(sc, "unable to create path for lost target %d\n",
360 		    targ->tid);
361 		return;
362 	}
363 
364 	xpt_async(AC_LOST_DEVICE, path, NULL);
365 	xpt_free_path(path);
366 }
367 
368 /*
369  * The MPT2 firmware performs debounce on the link to avoid transient link
370  * errors and false removals.  When it does decide that link has been lost
371  * and a device need to go away, it expects that the host will perform a
372  * target reset and then an op remove.  The reset has the side-effect of
373  * aborting any outstanding requests for the device, which is required for
374  * the op-remove to succeed.  It's not clear if the host should check for
375  * the device coming back alive after the reset.
376  */
377 void
378 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
379 {
380 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
381 	struct mps_softc *sc;
382 	struct mps_command *cm;
383 	struct mpssas_target *targ = NULL;
384 
385 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
386 
387 	/*
388 	 * If this is a WD controller, determine if the disk should be exposed
389 	 * to the OS or not.  If disk should be exposed, return from this
390 	 * function without doing anything.
391 	 */
392 	sc = sassc->sc;
393 	if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
394 	    MPS_WD_EXPOSE_ALWAYS)) {
395 		return;
396 	}
397 
398 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
399 	if (targ == NULL) {
400 		/* FIXME: what is the action? */
401 		/* We don't know about this device? */
402 		kprintf("%s: invalid handle 0x%x \n", __func__, handle);
403 		return;
404 	}
405 
406 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
407 
408 	cm = mpssas_alloc_tm(sc);
409 	if (cm == NULL) {
410 		mps_printf(sc, "%s: command alloc failure\n", __func__);
411 		return;
412 	}
413 
414 	mpssas_lost_target(sc, targ);
415 
416 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
417 	memset(req, 0, sizeof(*req));
418 	req->DevHandle = targ->handle;
419 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
420 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
421 
422 	/* SAS Hard Link Reset / SATA Link Reset */
423 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
424 
425 	cm->cm_targ = targ;
426 	cm->cm_data = NULL;
427 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
428 	cm->cm_complete = mpssas_remove_device;
429 	cm->cm_complete_data = (void *)(uintptr_t)handle;
430 	mps_map_command(sc, cm);
431 }
432 
433 static void
434 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
435 {
436 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
437 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
438 	struct mpssas_target *targ;
439 	struct mps_command *next_cm;
440 	uint16_t handle;
441 
442 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
443 
444 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
445 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
446 	targ = tm->cm_targ;
447 
448 	/*
449 	 * Currently there should be no way we can hit this case.  It only
450 	 * happens when we have a failure to allocate chain frames, and
451 	 * task management commands don't have S/G lists.
452 	 */
453 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
454 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
455 			   "This should not happen!\n", __func__, tm->cm_flags,
456 			   handle);
457 		mpssas_free_tm(sc, tm);
458 		return;
459 	}
460 
461 	if (reply == NULL) {
462 		/* XXX retry the remove after the diag reset completes? */
463 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
464 		    __func__, handle);
465 		mpssas_free_tm(sc, tm);
466 		return;
467 	}
468 
469 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
470 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
471 		   reply->IOCStatus, handle);
472 		mpssas_free_tm(sc, tm);
473 		return;
474 	}
475 
476 	mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
477 	    reply->TerminationCount);
478 	mps_free_reply(sc, tm->cm_reply_data);
479 	tm->cm_reply = NULL;	/* Ensures the the reply won't get re-freed */
480 
481 	/* Reuse the existing command */
482 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
483 	memset(req, 0, sizeof(*req));
484 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
485 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
486 	req->DevHandle = handle;
487 	tm->cm_data = NULL;
488 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
489 	tm->cm_complete = mpssas_remove_complete;
490 	tm->cm_complete_data = (void *)(uintptr_t)handle;
491 
492 	mps_map_command(sc, tm);
493 
494 	mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
495 		   targ->tid, handle);
496 	TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
497 		union ccb *ccb;
498 
499 		mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
500 		ccb = tm->cm_complete_data;
501 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
502 		mpssas_scsiio_complete(sc, tm);
503 	}
504 }
505 
506 static void
507 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
508 {
509 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
510 	uint16_t handle;
511 	struct mpssas_target *targ;
512 
513 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
514 
515 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
516 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
517 
518 	/*
519 	 * Currently there should be no way we can hit this case.  It only
520 	 * happens when we have a failure to allocate chain frames, and
521 	 * task management commands don't have S/G lists.
522 	 */
523 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
524 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
525 			   "This should not happen!\n", __func__, tm->cm_flags,
526 			   handle);
527 		mpssas_free_tm(sc, tm);
528 		return;
529 	}
530 
531 	if (reply == NULL) {
532 		/* most likely a chip reset */
533 		mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
534 		    __func__, handle);
535 		mpssas_free_tm(sc, tm);
536 		return;
537 	}
538 
539 	mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
540 	    handle, reply->IOCStatus);
541 
542 	/*
543 	 * Don't clear target if remove fails because things will get confusing.
544 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
545 	 * this target id if possible, and so we can assign the same target id
546 	 * to this device if it comes back in the future.
547 	 */
548 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
549 		targ = tm->cm_targ;
550 		targ->handle = 0x0;
551 		targ->encl_handle = 0x0;
552 		targ->encl_slot = 0x0;
553 		targ->exp_dev_handle = 0x0;
554 		targ->phy_num = 0x0;
555 		targ->linkrate = 0x0;
556 		targ->devinfo = 0x0;
557 	}
558 
559 	mpssas_free_tm(sc, tm);
560 }
561 
562 static int
563 mpssas_register_events(struct mps_softc *sc)
564 {
565 	uint8_t events[16];
566 
567 	bzero(events, 16);
568 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
569 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
570 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
571 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
572 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
573 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
574 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
575 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
576 	setbit(events, MPI2_EVENT_IR_VOLUME);
577 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
578 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
579 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
580 
581 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
582 	    &sc->sassc->mpssas_eh);
583 
584 	return (0);
585 }
586 
587 int
588 mps_attach_sas(struct mps_softc *sc)
589 {
590 	struct mpssas_softc *sassc;
591 #if __FreeBSD_version >= 1000006
592 	cam_status status;
593 #endif
594 	int unit, error = 0;
595 
596 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
597 
598 	sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
599 	sassc->targets = kmalloc(sizeof(struct mpssas_target) *
600 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
601 	sc->sassc = sassc;
602 	sassc->sc = sc;
603 
604 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
605 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
606 		error = ENOMEM;
607 		goto out;
608 	}
609 
610 	unit = device_get_unit(sc->mps_dev);
611 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
612 	    unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
613 	if (sassc->sim == NULL) {
614 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
615 		error = EINVAL;
616 		goto out;
617 	}
618 
619 	TAILQ_INIT(&sassc->ev_queue);
620 
621 	/* Initialize taskqueue for Event Handling */
622 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
623 	sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
624 	    taskqueue_thread_enqueue, &sassc->ev_tq);
625 
626 	/* Run the task queue with lowest priority */
627 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
628 	    device_get_nameunit(sc->mps_dev));
629 
630 	TAILQ_INIT(&sassc->ccb_scanq);
631 	error = mps_kproc_create(mpssas_scanner_thread, sassc,
632 	    &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
633 	if (error) {
634 		mps_printf(sc, "Error %d starting rescan thread\n", error);
635 		goto out;
636 	}
637 
638 	mps_lock(sc);
639 	sassc->flags |= MPSSAS_SCANTHREAD;
640 
641 	/*
642 	 * XXX There should be a bus for every port on the adapter, but since
643 	 * we're just going to fake the topology for now, we'll pretend that
644 	 * everything is just a target on a single bus.
645 	 */
646 	if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
647 		mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
648 		    error);
649 		mps_unlock(sc);
650 		goto out;
651 	}
652 
653 	/*
654 	 * Assume that discovery events will start right away.  Freezing
655 	 * the simq will prevent the CAM boottime scanner from running
656 	 * before discovery is complete.
657 	 */
658 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
659 	xpt_freeze_simq(sassc->sim, 1);
660 	sc->sassc->startup_refcount = 0;
661 
662 	callout_init_mp(&sassc->discovery_callout);
663 	sassc->discovery_timeouts = 0;
664 
665 	sassc->tm_count = 0;
666 
667 #if __FreeBSD_version >= 1000006
668 	status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
669 	if (status != CAM_REQ_CMP) {
670 		mps_printf(sc, "Error %#x registering async handler for "
671 			   "AC_ADVINFO_CHANGED events\n", status);
672 	}
673 #endif
674 
675 	mps_unlock(sc);
676 
677 	mpssas_register_events(sc);
678 out:
679 	if (error)
680 		mps_detach_sas(sc);
681 	return (error);
682 }
683 
684 int
685 mps_detach_sas(struct mps_softc *sc)
686 {
687 	struct mpssas_softc *sassc;
688 
689 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
690 
691 	if (sc->sassc == NULL)
692 		return (0);
693 
694 	sassc = sc->sassc;
695 	mps_deregister_events(sc, sassc->mpssas_eh);
696 
697 	/*
698 	 * Drain and free the event handling taskqueue with the lock
699 	 * unheld so that any parallel processing tasks drain properly
700 	 * without deadlocking.
701 	 */
702 	if (sassc->ev_tq != NULL)
703 		taskqueue_free(sassc->ev_tq);
704 
705 	/* Make sure CAM doesn't wedge if we had to bail out early. */
706 	mps_lock(sc);
707 
708 	/* Deregister our async handler */
709 #if __FreeBSD_version >= 1000006
710 	xpt_register_async(0, mpssas_async, sc, NULL);
711 #endif
712 
713 	if (sassc->flags & MPSSAS_IN_STARTUP)
714 		xpt_release_simq(sassc->sim, 1);
715 
716 	if (sassc->sim != NULL) {
717 		xpt_bus_deregister(cam_sim_path(sassc->sim));
718 		cam_sim_free(sassc->sim);
719 	}
720 
721 	if (sassc->flags & MPSSAS_SCANTHREAD) {
722 		sassc->flags |= MPSSAS_SHUTDOWN;
723 		wakeup(&sassc->ccb_scanq);
724 
725 		if (sassc->flags & MPSSAS_SCANTHREAD) {
726 			lksleep(&sassc->flags, &sc->mps_lock, 0,
727 			       "mps_shutdown", 30 * hz);
728 		}
729 	}
730 	mps_unlock(sc);
731 
732 	if (sassc->devq != NULL)
733 		cam_simq_release(sassc->devq);
734 
735 	kfree(sassc->targets, M_MPT2);
736 	kfree(sassc, M_MPT2);
737 	sc->sassc = NULL;
738 
739 	return (0);
740 }
741 
742 void
743 mpssas_discovery_end(struct mpssas_softc *sassc)
744 {
745 	struct mps_softc *sc = sassc->sc;
746 
747 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
748 
749 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
750 		callout_stop(&sassc->discovery_callout);
751 
752 }
753 
754 #if 0 /* XXX unused */
755 static void
756 mpssas_discovery_timeout(void *data)
757 {
758 	struct mpssas_softc *sassc = data;
759 	struct mps_softc *sc;
760 
761 	sc = sassc->sc;
762 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
763 
764 	mps_lock(sc);
765 	mps_printf(sc,
766 	    "Timeout waiting for discovery, interrupts may not be working!\n");
767 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
768 
769 	/* Poll the hardware for events in case interrupts aren't working */
770 	mps_intr_locked(sc);
771 
772 	mps_printf(sassc->sc,
773 	    "Finished polling after discovery timeout at %d\n", ticks);
774 
775 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
776 		mpssas_discovery_end(sassc);
777 	} else {
778 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
779 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
780 			callout_reset(&sassc->discovery_callout,
781 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
782 			    mpssas_discovery_timeout, sassc);
783 			sassc->discovery_timeouts++;
784 		} else {
785 			mps_dprint(sassc->sc, MPS_FAULT,
786 			    "Discovery timed out, continuing.\n");
787 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
788 			mpssas_discovery_end(sassc);
789 		}
790 	}
791 
792 	mps_unlock(sc);
793 }
794 #endif
795 
796 static void
797 mpssas_action(struct cam_sim *sim, union ccb *ccb)
798 {
799 	struct mpssas_softc *sassc;
800 
801 	sassc = cam_sim_softc(sim);
802 
803 	mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
804 	    ccb->ccb_h.func_code);
805 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
806 
807 	switch (ccb->ccb_h.func_code) {
808 	case XPT_PATH_INQ:
809 	{
810 		struct ccb_pathinq *cpi = &ccb->cpi;
811 
812 		cpi->version_num = 1;
813 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
814 		cpi->target_sprt = 0;
815 		cpi->hba_misc = PIM_NOBUSRESET;
816 		cpi->hba_eng_cnt = 0;
817 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
818 		cpi->max_lun = 0;
819 		cpi->initiator_id = 255;
820 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
821 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
822 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
823 		cpi->unit_number = cam_sim_unit(sim);
824 		cpi->bus_id = cam_sim_bus(sim);
825 		cpi->base_transfer_speed = 150000;
826 		cpi->transport = XPORT_SAS;
827 		cpi->transport_version = 0;
828 		cpi->protocol = PROTO_SCSI;
829 		cpi->protocol_version = SCSI_REV_SPC;
830 #if __FreeBSD_version >= 800001
831 		/*
832 		 * XXX KDM where does this number come from?
833 		 */
834 		cpi->maxio = 256 * 1024;
835 #endif
836 		cpi->ccb_h.status = CAM_REQ_CMP;
837 		break;
838 	}
839 	case XPT_GET_TRAN_SETTINGS:
840 	{
841 		struct ccb_trans_settings	*cts;
842 		struct ccb_trans_settings_sas	*sas;
843 		struct ccb_trans_settings_scsi	*scsi;
844 		struct mpssas_target *targ;
845 
846 		cts = &ccb->cts;
847 		sas = &cts->xport_specific.sas;
848 		scsi = &cts->proto_specific.scsi;
849 
850 		targ = &sassc->targets[cts->ccb_h.target_id];
851 		if (targ->handle == 0x0) {
852 			cts->ccb_h.status = CAM_TID_INVALID;
853 			break;
854 		}
855 
856 		cts->protocol_version = SCSI_REV_SPC2;
857 		cts->transport = XPORT_SAS;
858 		cts->transport_version = 0;
859 
860 		sas->valid = CTS_SAS_VALID_SPEED;
861 		switch (targ->linkrate) {
862 		case 0x08:
863 			sas->bitrate = 150000;
864 			break;
865 		case 0x09:
866 			sas->bitrate = 300000;
867 			break;
868 		case 0x0a:
869 			sas->bitrate = 600000;
870 			break;
871 		default:
872 			sas->valid = 0;
873 		}
874 
875 		cts->protocol = PROTO_SCSI;
876 		scsi->valid = CTS_SCSI_VALID_TQ;
877 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
878 
879 		cts->ccb_h.status = CAM_REQ_CMP;
880 		break;
881 	}
882 	case XPT_CALC_GEOMETRY:
883 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
884 		ccb->ccb_h.status = CAM_REQ_CMP;
885 		break;
886 	case XPT_RESET_DEV:
887 		mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
888 		mpssas_action_resetdev(sassc, ccb);
889 		return;
890 	case XPT_RESET_BUS:
891 	case XPT_ABORT:
892 	case XPT_TERM_IO:
893 		mps_printf(sassc->sc, "mpssas_action faking success for "
894 			   "abort or reset\n");
895 		ccb->ccb_h.status = CAM_REQ_CMP;
896 		break;
897 	case XPT_SCSI_IO:
898 		mpssas_action_scsiio(sassc, ccb);
899 		return;
900 #if __FreeBSD_version >= 900026
901 	case XPT_SMP_IO:
902 		mpssas_action_smpio(sassc, ccb);
903 		return;
904 #endif
905 	default:
906 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
907 		break;
908 	}
909 	xpt_done(ccb);
910 
911 }
912 
913 static void
914 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
915     target_id_t target_id, lun_id_t lun_id)
916 {
917 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
918 	struct cam_path *path;
919 
920 	mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
921 	    ac_code, target_id, lun_id);
922 
923 	if (xpt_create_path(&path, NULL,
924 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
925 		mps_printf(sc, "unable to create path for reset "
926 			   "notification\n");
927 		return;
928 	}
929 
930 	xpt_async(ac_code, path, NULL);
931 	xpt_free_path(path);
932 }
933 
934 static void
935 mpssas_complete_all_commands(struct mps_softc *sc)
936 {
937 	struct mps_command *cm;
938 	int i;
939 	int completed;
940 
941 	mps_printf(sc, "%s\n", __func__);
942 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
943 
944 	/* complete all commands with a NULL reply */
945 	for (i = 1; i < sc->num_reqs; i++) {
946 		cm = &sc->commands[i];
947 		cm->cm_reply = NULL;
948 		completed = 0;
949 
950 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
951 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
952 
953 		if (cm->cm_complete != NULL) {
954 			mpssas_log_command(cm,
955 			    "completing cm %p state %x ccb %p for diag reset\n",
956 			    cm, cm->cm_state, cm->cm_ccb);
957 
958 			cm->cm_complete(sc, cm);
959 			completed = 1;
960 		}
961 
962 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
963 			mpssas_log_command(cm,
964 			    "waking up cm %p state %x ccb %p for diag reset\n",
965 			    cm, cm->cm_state, cm->cm_ccb);
966 			wakeup(cm);
967 			completed = 1;
968 		}
969 
970 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
971 			/* this should never happen, but if it does, log */
972 			mpssas_log_command(cm,
973 			    "cm %p state %x flags 0x%x ccb %p during diag "
974 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
975 			    cm->cm_ccb);
976 		}
977 	}
978 }
979 
980 void
981 mpssas_handle_reinit(struct mps_softc *sc)
982 {
983 	int i;
984 
985 	/* Go back into startup mode and freeze the simq, so that CAM
986 	 * doesn't send any commands until after we've rediscovered all
987 	 * targets and found the proper device handles for them.
988 	 *
989 	 * After the reset, portenable will trigger discovery, and after all
990 	 * discovery-related activities have finished, the simq will be
991 	 * released.
992 	 */
993 	mps_printf(sc, "%s startup\n", __func__);
994 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
995 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
996 	xpt_freeze_simq(sc->sassc->sim, 1);
997 
998 	/* notify CAM of a bus reset */
999 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1000 	    CAM_LUN_WILDCARD);
1001 
1002 	/* complete and cleanup after all outstanding commands */
1003 	mpssas_complete_all_commands(sc);
1004 
1005 	mps_printf(sc, "%s startup %u tm %u after command completion\n",
1006 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1007 
1008 	/*
1009 	 * The simq was explicitly frozen above, so set the refcount to 0.
1010 	 * The simq will be explicitly released after port enable completes.
1011 	 */
1012 	sc->sassc->startup_refcount = 0;
1013 
1014 	/* zero all the target handles, since they may change after the
1015 	 * reset, and we have to rediscover all the targets and use the new
1016 	 * handles.
1017 	 */
1018 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1019 		if (sc->sassc->targets[i].outstanding != 0)
1020 			mps_printf(sc, "target %u outstanding %u\n",
1021 			    i, sc->sassc->targets[i].outstanding);
1022 		sc->sassc->targets[i].handle = 0x0;
1023 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1024 		sc->sassc->targets[i].outstanding = 0;
1025 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1026 	}
1027 }
1028 static void
1029 mpssas_tm_timeout(void *data)
1030 {
1031 	struct mps_command *tm = data;
1032 	struct mps_softc *sc = tm->cm_sc;
1033 
1034 	mps_lock(sc);
1035 	mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1036 	mps_reinit(sc);
1037 	mps_unlock(sc);
1038 }
1039 
1040 static void
1041 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1042 {
1043 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1044 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1045 	unsigned int cm_count = 0;
1046 	struct mps_command *cm;
1047 	struct mpssas_target *targ;
1048 
1049 	callout_stop(&tm->cm_callout);
1050 
1051 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1052 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1053 	targ = tm->cm_targ;
1054 
1055 	/*
1056 	 * Currently there should be no way we can hit this case.  It only
1057 	 * happens when we have a failure to allocate chain frames, and
1058 	 * task management commands don't have S/G lists.
1059 	 */
1060 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1061 		mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1062 			   "This should not happen!\n", __func__, tm->cm_flags);
1063 		mpssas_free_tm(sc, tm);
1064 		return;
1065 	}
1066 
1067 	if (reply == NULL) {
1068 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1069 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1070 			/* this completion was due to a reset, just cleanup */
1071 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1072 			targ->tm = NULL;
1073 			mpssas_free_tm(sc, tm);
1074 		}
1075 		else {
1076 			/* we should have gotten a reply. */
1077 			mps_reinit(sc);
1078 		}
1079 		return;
1080 	}
1081 
1082 	mpssas_log_command(tm,
1083 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1084 	    reply->IOCStatus, reply->ResponseCode,
1085 	    reply->TerminationCount);
1086 
1087 	/* See if there are any outstanding commands for this LUN.
1088 	 * This could be made more efficient by using a per-LU data
1089 	 * structure of some sort.
1090 	 */
1091 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1092 		if (cm->cm_lun == tm->cm_lun)
1093 			cm_count++;
1094 	}
1095 
1096 	if (cm_count == 0) {
1097 		mpssas_log_command(tm,
1098 		    "logical unit %u finished recovery after reset\n",
1099 		    tm->cm_lun);
1100 
1101 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1102 		    tm->cm_lun);
1103 
1104 		/* we've finished recovery for this logical unit.  check and
1105 		 * see if some other logical unit has a timedout command
1106 		 * that needs to be processed.
1107 		 */
1108 		cm = TAILQ_FIRST(&targ->timedout_commands);
1109 		if (cm) {
1110 			mpssas_send_abort(sc, tm, cm);
1111 		}
1112 		else {
1113 			targ->tm = NULL;
1114 			mpssas_free_tm(sc, tm);
1115 		}
1116 	}
1117 	else {
1118 		/* if we still have commands for this LUN, the reset
1119 		 * effectively failed, regardless of the status reported.
1120 		 * Escalate to a target reset.
1121 		 */
1122 		mpssas_log_command(tm,
1123 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1124 		    tm, cm_count);
1125 		mpssas_send_reset(sc, tm,
1126 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1127 	}
1128 }
1129 
1130 static void
1131 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1132 {
1133 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1134 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1135 	struct mpssas_target *targ;
1136 
1137 	callout_stop(&tm->cm_callout);
1138 
1139 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1140 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1141 	targ = tm->cm_targ;
1142 
1143 	/*
1144 	 * Currently there should be no way we can hit this case.  It only
1145 	 * happens when we have a failure to allocate chain frames, and
1146 	 * task management commands don't have S/G lists.
1147 	 */
1148 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1149 		mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1150 			   "This should not happen!\n", __func__, tm->cm_flags);
1151 		mpssas_free_tm(sc, tm);
1152 		return;
1153 	}
1154 
1155 	if (reply == NULL) {
1156 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1157 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1158 			/* this completion was due to a reset, just cleanup */
1159 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1160 			targ->tm = NULL;
1161 			mpssas_free_tm(sc, tm);
1162 		}
1163 		else {
1164 			/* we should have gotten a reply. */
1165 			mps_reinit(sc);
1166 		}
1167 		return;
1168 	}
1169 
1170 	mpssas_log_command(tm,
1171 	    "target reset status 0x%x code 0x%x count %u\n",
1172 	    reply->IOCStatus, reply->ResponseCode,
1173 	    reply->TerminationCount);
1174 
1175 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1176 
1177 	if (targ->outstanding == 0) {
1178 		/* we've finished recovery for this target and all
1179 		 * of its logical units.
1180 		 */
1181 		mpssas_log_command(tm,
1182 		    "recovery finished after target reset\n");
1183 
1184 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1185 		    CAM_LUN_WILDCARD);
1186 
1187 		targ->tm = NULL;
1188 		mpssas_free_tm(sc, tm);
1189 	}
1190 	else {
1191 		/* after a target reset, if this target still has
1192 		 * outstanding commands, the reset effectively failed,
1193 		 * regardless of the status reported.  escalate.
1194 		 */
1195 		mpssas_log_command(tm,
1196 		    "target reset complete for tm %p, but still have %u command(s)\n",
1197 		    tm, targ->outstanding);
1198 		mps_reinit(sc);
1199 	}
1200 }
1201 
1202 #define MPS_RESET_TIMEOUT 30
1203 
1204 static int
1205 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1206 {
1207 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1208 	struct mpssas_target *target;
1209 	int err;
1210 
1211 	target = tm->cm_targ;
1212 	if (target->handle == 0) {
1213 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1214 		    __func__, target->tid);
1215 		return -1;
1216 	}
1217 
1218 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1219 	req->DevHandle = target->handle;
1220 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1221 	req->TaskType = type;
1222 
1223 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1224 		/* XXX Need to handle invalid LUNs */
1225 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1226 		tm->cm_targ->logical_unit_resets++;
1227 		mpssas_log_command(tm, "sending logical unit reset\n");
1228 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1229 	}
1230 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1231 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1232 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1233 		tm->cm_targ->target_resets++;
1234 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1235 		mpssas_log_command(tm, "sending target reset\n");
1236 		tm->cm_complete = mpssas_target_reset_complete;
1237 	}
1238 	else {
1239 		mps_printf(sc, "unexpected reset type 0x%x\n", type);
1240 		return -1;
1241 	}
1242 
1243 	tm->cm_data = NULL;
1244 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1245 	tm->cm_complete_data = (void *)tm;
1246 
1247 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1248 	    mpssas_tm_timeout, tm);
1249 
1250 	err = mps_map_command(sc, tm);
1251 	if (err)
1252 		mpssas_log_command(tm,
1253 		    "error %d sending reset type %u\n",
1254 		    err, type);
1255 
1256 	return err;
1257 }
1258 
1259 
1260 static void
1261 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1262 {
1263 	struct mps_command *cm;
1264 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1265 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1266 	struct mpssas_target *targ;
1267 
1268 	callout_stop(&tm->cm_callout);
1269 
1270 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1271 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1272 	targ = tm->cm_targ;
1273 
1274 	/*
1275 	 * Currently there should be no way we can hit this case.  It only
1276 	 * happens when we have a failure to allocate chain frames, and
1277 	 * task management commands don't have S/G lists.
1278 	 */
1279 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1280 		mpssas_log_command(tm,
1281 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1282 		    tm->cm_flags, tm, req->TaskMID);
1283 		mpssas_free_tm(sc, tm);
1284 		return;
1285 	}
1286 
1287 	if (reply == NULL) {
1288 		mpssas_log_command(tm,
1289 		    "NULL abort reply for tm %p TaskMID %u\n",
1290 		    tm, req->TaskMID);
1291 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1292 			/* this completion was due to a reset, just cleanup */
1293 			targ->tm = NULL;
1294 			mpssas_free_tm(sc, tm);
1295 		}
1296 		else {
1297 			/* we should have gotten a reply. */
1298 			mps_reinit(sc);
1299 		}
1300 		return;
1301 	}
1302 
1303 	mpssas_log_command(tm,
1304 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1305 	    req->TaskMID,
1306 	    reply->IOCStatus, reply->ResponseCode,
1307 	    reply->TerminationCount);
1308 
1309 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1310 	if (cm == NULL) {
1311 		/* if there are no more timedout commands, we're done with
1312 		 * error recovery for this target.
1313 		 */
1314 		mpssas_log_command(tm,
1315 		    "finished recovery after aborting TaskMID %u\n",
1316 		    req->TaskMID);
1317 
1318 		targ->tm = NULL;
1319 		mpssas_free_tm(sc, tm);
1320 	}
1321 	else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1322 		/* abort success, but we have more timedout commands to abort */
1323 		mpssas_log_command(tm,
1324 		    "continuing recovery after aborting TaskMID %u\n",
1325 		    req->TaskMID);
1326 
1327 		mpssas_send_abort(sc, tm, cm);
1328 	}
1329 	else {
1330 		/* we didn't get a command completion, so the abort
1331 		 * failed as far as we're concerned.  escalate.
1332 		 */
1333 		mpssas_log_command(tm,
1334 		    "abort failed for TaskMID %u tm %p\n",
1335 		    req->TaskMID, tm);
1336 
1337 		mpssas_send_reset(sc, tm,
1338 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1339 	}
1340 }
1341 
1342 #define MPS_ABORT_TIMEOUT 5
1343 
1344 static int
1345 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1346 {
1347 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1348 	struct mpssas_target *targ;
1349 	int err;
1350 
1351 	targ = cm->cm_targ;
1352 	if (targ->handle == 0) {
1353 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1354 		    __func__, cm->cm_ccb->ccb_h.target_id);
1355 		return -1;
1356 	}
1357 
1358 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1359 	req->DevHandle = targ->handle;
1360 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1361 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1362 
1363 	/* XXX Need to handle invalid LUNs */
1364 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1365 
1366 	req->TaskMID = cm->cm_desc.Default.SMID;
1367 
1368 	tm->cm_data = NULL;
1369 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1370 	tm->cm_complete = mpssas_abort_complete;
1371 	tm->cm_complete_data = (void *)tm;
1372 	tm->cm_targ = cm->cm_targ;
1373 	tm->cm_lun = cm->cm_lun;
1374 
1375 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1376 	    mpssas_tm_timeout, tm);
1377 
1378 	targ->aborts++;
1379 
1380 	err = mps_map_command(sc, tm);
1381 	if (err)
1382 		mpssas_log_command(tm,
1383 		    "error %d sending abort for cm %p SMID %u\n",
1384 		    err, cm, req->TaskMID);
1385 	return err;
1386 }
1387 
1388 
1389 static void
1390 mpssas_scsiio_timeout(void *data)
1391 {
1392 	struct mps_softc *sc;
1393 	struct mps_command *cm;
1394 	struct mpssas_target *targ;
1395 
1396 	cm = (struct mps_command *)data;
1397 	sc = cm->cm_sc;
1398 
1399 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1400 
1401 	mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1402 
1403 	/*
1404 	 * Run the interrupt handler to make sure it's not pending.  This
1405 	 * isn't perfect because the command could have already completed
1406 	 * and been re-used, though this is unlikely.
1407 	 */
1408 	mps_intr_locked(sc);
1409 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1410 		mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1411 		return;
1412 	}
1413 
1414 	if (cm->cm_ccb == NULL) {
1415 		mps_printf(sc, "command timeout with NULL ccb\n");
1416 		return;
1417 	}
1418 
1419 	mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1420 	    cm, cm->cm_ccb);
1421 
1422 	targ = cm->cm_targ;
1423 	targ->timeouts++;
1424 
1425 	/* XXX first, check the firmware state, to see if it's still
1426 	 * operational.  if not, do a diag reset.
1427 	 */
1428 
1429 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1430 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1431 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1432 
1433 	if (targ->tm != NULL) {
1434 		/* target already in recovery, just queue up another
1435 		 * timedout command to be processed later.
1436 		 */
1437 		mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1438 		    cm, targ->tm);
1439 	}
1440 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1441 		mps_printf(sc, "timedout cm %p allocated tm %p\n",
1442 		    cm, targ->tm);
1443 
1444 		/* start recovery by aborting the first timedout command */
1445 		mpssas_send_abort(sc, targ->tm, cm);
1446 	}
1447 	else {
1448 		/* XXX queue this target up for recovery once a TM becomes
1449 		 * available.  The firmware only has a limited number of
1450 		 * HighPriority credits for the high priority requests used
1451 		 * for task management, and we ran out.
1452 		 *
1453 		 * Isilon: don't worry about this for now, since we have
1454 		 * more credits than disks in an enclosure, and limit
1455 		 * ourselves to one TM per target for recovery.
1456 		 */
1457 		mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1458 		    cm);
1459 	}
1460 
1461 }
1462 
1463 static void
1464 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1465 {
1466 	MPI2_SCSI_IO_REQUEST *req;
1467 	struct ccb_scsiio *csio;
1468 	struct mps_softc *sc;
1469 	struct mpssas_target *targ;
1470 	struct mpssas_lun *lun;
1471 	struct mps_command *cm;
1472 	uint8_t i, lba_byte, *ref_tag_addr;
1473 	uint16_t eedp_flags;
1474 
1475 	sc = sassc->sc;
1476 	mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1477 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1478 
1479 	csio = &ccb->csio;
1480 	targ = &sassc->targets[csio->ccb_h.target_id];
1481 	if (targ->handle == 0x0) {
1482 		mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1483 		    __func__, csio->ccb_h.target_id);
1484 		csio->ccb_h.status = CAM_TID_INVALID;
1485 		xpt_done(ccb);
1486 		return;
1487 	}
1488 	/*
1489 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1490 	 * that the volume has timed out.  We want volumes to be enumerated
1491 	 * until they are deleted/removed, not just failed.
1492 	 */
1493 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1494 		if (targ->devinfo == 0)
1495 			csio->ccb_h.status = CAM_REQ_CMP;
1496 		else
1497 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1498 		xpt_done(ccb);
1499 		return;
1500 	}
1501 
1502 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1503 		mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1504 		csio->ccb_h.status = CAM_TID_INVALID;
1505 		xpt_done(ccb);
1506 		return;
1507 	}
1508 
1509 	cm = mps_alloc_command(sc);
1510 	if (cm == NULL) {
1511 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1512 			xpt_freeze_simq(sassc->sim, 1);
1513 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1514 		}
1515 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1516 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1517 		xpt_done(ccb);
1518 		return;
1519 	}
1520 
1521 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1522 	bzero(req, sizeof(*req));
1523 	req->DevHandle = targ->handle;
1524 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1525 	req->MsgFlags = 0;
1526 	req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1527 	req->SenseBufferLength = MPS_SENSE_LEN;
1528 	req->SGLFlags = 0;
1529 	req->ChainOffset = 0;
1530 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1531 	req->SGLOffset1= 0;
1532 	req->SGLOffset2= 0;
1533 	req->SGLOffset3= 0;
1534 	req->SkipCount = 0;
1535 	req->DataLength = csio->dxfer_len;
1536 	req->BidirectionalDataLength = 0;
1537 	req->IoFlags = csio->cdb_len;
1538 	req->EEDPFlags = 0;
1539 
1540 	/* Note: BiDirectional transfers are not supported */
1541 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1542 	case CAM_DIR_IN:
1543 		req->Control = MPI2_SCSIIO_CONTROL_READ;
1544 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1545 		break;
1546 	case CAM_DIR_OUT:
1547 		req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1548 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1549 		break;
1550 	case CAM_DIR_NONE:
1551 	default:
1552 		req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1553 		break;
1554 	}
1555 
1556 	/*
1557 	 * It looks like the hardware doesn't require an explicit tag
1558 	 * number for each transaction.  SAM Task Management not supported
1559 	 * at the moment.
1560 	 */
1561 	switch (csio->tag_action) {
1562 	case MSG_HEAD_OF_Q_TAG:
1563 		req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1564 		break;
1565 	case MSG_ORDERED_Q_TAG:
1566 		req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1567 		break;
1568 	case MSG_ACA_TASK:
1569 		req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1570 		break;
1571 	case CAM_TAG_ACTION_NONE:
1572 	case MSG_SIMPLE_Q_TAG:
1573 	default:
1574 		req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1575 		break;
1576 	}
1577 	req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1578 
1579 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1580 		mps_free_command(sc, cm);
1581 		ccb->ccb_h.status = CAM_LUN_INVALID;
1582 		xpt_done(ccb);
1583 		return;
1584 	}
1585 
1586 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1587 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1588 	else
1589 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1590 	req->IoFlags = csio->cdb_len;
1591 
1592 	/*
1593 	 * Check if EEDP is supported and enabled.  If it is then check if the
1594 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1595 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1596 	 * for EEDP transfer.
1597 	 */
1598 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1599 	if (sc->eedp_enabled && eedp_flags) {
1600 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1601 			if (lun->lun_id == csio->ccb_h.target_lun) {
1602 				break;
1603 			}
1604 		}
1605 
1606 		if ((lun != NULL) && (lun->eedp_formatted)) {
1607 			req->EEDPBlockSize = lun->eedp_block_size;
1608 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1609 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1610 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1611 			req->EEDPFlags = eedp_flags;
1612 
1613 			/*
1614 			 * If CDB less than 32, fill in Primary Ref Tag with
1615 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1616 			 * already there.  Also, set protection bit.  FreeBSD
1617 			 * currently does not support CDBs bigger than 16, but
1618 			 * the code doesn't hurt, and will be here for the
1619 			 * future.
1620 			 */
1621 			if (csio->cdb_len != 32) {
1622 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1623 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1624 				    PrimaryReferenceTag;
1625 				for (i = 0; i < 4; i++) {
1626 					*ref_tag_addr =
1627 					    req->CDB.CDB32[lba_byte + i];
1628 					ref_tag_addr++;
1629 				}
1630 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1631 				    0xFFFF;
1632 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1633 				    0x20;
1634 			} else {
1635 				eedp_flags |=
1636 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1637 				req->EEDPFlags = eedp_flags;
1638 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1639 				    0x1F) | 0x20;
1640 			}
1641 		}
1642 	}
1643 
1644 	cm->cm_data = csio->data_ptr;
1645 	cm->cm_length = csio->dxfer_len;
1646 	cm->cm_sge = &req->SGL;
1647 	cm->cm_sglsize = (32 - 24) * 4;
1648 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1649 	cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1650 	cm->cm_complete = mpssas_scsiio_complete;
1651 	cm->cm_complete_data = ccb;
1652 	cm->cm_targ = targ;
1653 	cm->cm_lun = csio->ccb_h.target_lun;
1654 	cm->cm_ccb = ccb;
1655 
1656 	/*
1657 	 * If HBA is a WD and the command is not for a retry, try to build a
1658 	 * direct I/O message. If failed, or the command is for a retry, send
1659 	 * the I/O to the IR volume itself.
1660 	 */
1661 	if (sc->WD_valid_config) {
1662 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1663 			mpssas_direct_drive_io(sassc, cm, ccb);
1664 		} else {
1665 			ccb->ccb_h.status = CAM_REQ_INPROG;
1666 		}
1667 	}
1668 
1669 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1670 	   mpssas_scsiio_timeout, cm);
1671 
1672 	targ->issued++;
1673 	targ->outstanding++;
1674 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1675 
1676 	if ((sc->mps_debug & MPS_TRACE) != 0)
1677 		mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1678 		    __func__, cm, ccb, targ->outstanding);
1679 
1680 	mps_map_command(sc, cm);
1681 	return;
1682 }
1683 
1684 static void
1685 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1686 {
1687 	MPI2_SCSI_IO_REPLY *rep;
1688 	union ccb *ccb;
1689 	struct ccb_scsiio *csio;
1690 	struct mpssas_softc *sassc;
1691 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1692 	u8 *TLR_bits, TLR_on;
1693 	int dir = 0, i;
1694 	u16 alloc_len;
1695 
1696 	mps_dprint(sc, MPS_TRACE,
1697 	    "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1698 	    __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1699 	    cm->cm_targ->outstanding);
1700 
1701 	callout_stop(&cm->cm_callout);
1702 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1703 
1704 	sassc = sc->sassc;
1705 	ccb = cm->cm_complete_data;
1706 	csio = &ccb->csio;
1707 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1708 	/*
1709 	 * XXX KDM if the chain allocation fails, does it matter if we do
1710 	 * the sync and unload here?  It is simpler to do it in every case,
1711 	 * assuming it doesn't cause problems.
1712 	 */
1713 	if (cm->cm_data != NULL) {
1714 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1715 			dir = BUS_DMASYNC_POSTREAD;
1716 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1717 			dir = BUS_DMASYNC_POSTWRITE;;
1718 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1719 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1720 	}
1721 
1722 	cm->cm_targ->completed++;
1723 	cm->cm_targ->outstanding--;
1724 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1725 
1726 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1727 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1728 		if (cm->cm_reply != NULL)
1729 			mpssas_log_command(cm,
1730 			    "completed timedout cm %p ccb %p during recovery "
1731 			    "ioc %x scsi %x state %x xfer %u\n",
1732 			    cm, cm->cm_ccb,
1733 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1734 			    rep->TransferCount);
1735 		else
1736 			mpssas_log_command(cm,
1737 			    "completed timedout cm %p ccb %p during recovery\n",
1738 			    cm, cm->cm_ccb);
1739 	} else if (cm->cm_targ->tm != NULL) {
1740 		if (cm->cm_reply != NULL)
1741 			mpssas_log_command(cm,
1742 			    "completed cm %p ccb %p during recovery "
1743 			    "ioc %x scsi %x state %x xfer %u\n",
1744 			    cm, cm->cm_ccb,
1745 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1746 			    rep->TransferCount);
1747 		else
1748 			mpssas_log_command(cm,
1749 			    "completed cm %p ccb %p during recovery\n",
1750 			    cm, cm->cm_ccb);
1751 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1752 		mpssas_log_command(cm,
1753 		    "reset completed cm %p ccb %p\n",
1754 		    cm, cm->cm_ccb);
1755 	}
1756 
1757 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1758 		/*
1759 		 * We ran into an error after we tried to map the command,
1760 		 * so we're getting a callback without queueing the command
1761 		 * to the hardware.  So we set the status here, and it will
1762 		 * be retained below.  We'll go through the "fast path",
1763 		 * because there can be no reply when we haven't actually
1764 		 * gone out to the hardware.
1765 		 */
1766 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1767 
1768 		/*
1769 		 * Currently the only error included in the mask is
1770 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1771 		 * chain frames.  We need to freeze the queue until we get
1772 		 * a command that completed without this error, which will
1773 		 * hopefully have some chain frames attached that we can
1774 		 * use.  If we wanted to get smarter about it, we would
1775 		 * only unfreeze the queue in this condition when we're
1776 		 * sure that we're getting some chain frames back.  That's
1777 		 * probably unnecessary.
1778 		 */
1779 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1780 			xpt_freeze_simq(sassc->sim, 1);
1781 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1782 			mps_dprint(sc, MPS_INFO, "Error sending command, "
1783 				   "freezing SIM queue\n");
1784 		}
1785 	}
1786 
1787 	/* Take the fast path to completion */
1788 	if (cm->cm_reply == NULL) {
1789 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1790 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1791 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1792 			else {
1793 				ccb->ccb_h.status = CAM_REQ_CMP;
1794 				ccb->csio.scsi_status = SCSI_STATUS_OK;
1795 			}
1796 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1797 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1798 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1799 				mps_dprint(sc, MPS_INFO,
1800 					   "Unfreezing SIM queue\n");
1801 			}
1802 		}
1803 
1804 		/*
1805 		 * There are two scenarios where the status won't be
1806 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
1807 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1808 		 */
1809 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1810 			/*
1811 			 * Freeze the dev queue so that commands are
1812 			 * executed in the correct order with after error
1813 			 * recovery.
1814 			 */
1815 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
1816 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1817 		}
1818 		mps_free_command(sc, cm);
1819 		xpt_done(ccb);
1820 		return;
1821 	}
1822 
1823 	if (sc->mps_debug & MPS_TRACE)
1824 		mpssas_log_command(cm,
1825 		    "ioc %x scsi %x state %x xfer %u\n",
1826 		    rep->IOCStatus, rep->SCSIStatus,
1827 		    rep->SCSIState, rep->TransferCount);
1828 
1829 	/*
1830 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1831 	 * Volume if an error occurred (normal I/O retry).  Use the original
1832 	 * CCB, but set a flag that this will be a retry so that it's sent to
1833 	 * the original volume.  Free the command but reuse the CCB.
1834 	 */
1835 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1836 		mps_free_command(sc, cm);
1837 		ccb->ccb_h.status = MPS_WD_RETRY;
1838 		mpssas_action_scsiio(sassc, ccb);
1839 		return;
1840 	}
1841 
1842 	switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1843 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1844 		csio->resid = cm->cm_length - rep->TransferCount;
1845 		/* FALLTHROUGH */
1846 	case MPI2_IOCSTATUS_SUCCESS:
1847 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1848 
1849 		if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1850 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1851 			mpssas_log_command(cm, "recovered error\n");
1852 
1853 		/* Completion failed at the transport level. */
1854 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1855 		    MPI2_SCSI_STATE_TERMINATED)) {
1856 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1857 			break;
1858 		}
1859 
1860 		/* In a modern packetized environment, an autosense failure
1861 		 * implies that there's not much else that can be done to
1862 		 * recover the command.
1863 		 */
1864 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1865 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1866 			break;
1867 		}
1868 
1869 		/*
1870 		 * CAM doesn't care about SAS Response Info data, but if this is
1871 		 * the state check if TLR should be done.  If not, clear the
1872 		 * TLR_bits for the target.
1873 		 */
1874 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1875 		    ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1876 		    MPS_SCSI_RI_INVALID_FRAME)) {
1877 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1878 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1879 		}
1880 
1881 		/*
1882 		 * Intentionally override the normal SCSI status reporting
1883 		 * for these two cases.  These are likely to happen in a
1884 		 * multi-initiator environment, and we want to make sure that
1885 		 * CAM retries these commands rather than fail them.
1886 		 */
1887 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1888 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1889 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1890 			break;
1891 		}
1892 
1893 		/* Handle normal status and sense */
1894 		csio->scsi_status = rep->SCSIStatus;
1895 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1896 			ccb->ccb_h.status = CAM_REQ_CMP;
1897 		else
1898 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1899 
1900 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1901 			int sense_len, returned_sense_len;
1902 
1903 			returned_sense_len = min(rep->SenseCount,
1904 			    sizeof(struct scsi_sense_data));
1905 			if (returned_sense_len < ccb->csio.sense_len)
1906 				ccb->csio.sense_resid = ccb->csio.sense_len -
1907 					returned_sense_len;
1908 			else
1909 				ccb->csio.sense_resid = 0;
1910 
1911 			sense_len = min(returned_sense_len,
1912 			    ccb->csio.sense_len - ccb->csio.sense_resid);
1913 			bzero(&ccb->csio.sense_data,
1914 			      sizeof(&ccb->csio.sense_data));
1915 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1916 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1917 		}
1918 
1919 		/*
1920 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
1921 		 * and it's page code 0 (Supported Page List), and there is
1922 		 * inquiry data, and this is for a sequential access device, and
1923 		 * the device is an SSP target, and TLR is supported by the
1924 		 * controller, turn the TLR_bits value ON if page 0x90 is
1925 		 * supported.
1926 		 */
1927 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1928 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1929 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1930 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1931 		    T_SEQUENTIAL) && (sc->control_TLR) &&
1932 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
1933 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1934 			vpd_list = (struct scsi_vpd_supported_page_list *)
1935 			    csio->data_ptr;
1936 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1937 			    TLR_bits;
1938 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1939 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1940 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1941 			    csio->cdb_io.cdb_bytes[4];
1942 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1943 				if (vpd_list->list[i] == 0x90) {
1944 					*TLR_bits = TLR_on;
1945 					break;
1946 				}
1947 			}
1948 		}
1949 		break;
1950 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1951 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1952 		/*
1953 		 * If devinfo is 0 this will be a volume.  In that case don't
1954 		 * tell CAM that the volume is not there.  We want volumes to
1955 		 * be enumerated until they are deleted/removed, not just
1956 		 * failed.
1957 		 */
1958 		if (cm->cm_targ->devinfo == 0)
1959 			ccb->ccb_h.status = CAM_REQ_CMP;
1960 		else
1961 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1962 		break;
1963 	case MPI2_IOCSTATUS_INVALID_SGL:
1964 		mps_print_scsiio_cmd(sc, cm);
1965 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1966 		break;
1967 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1968 		/*
1969 		 * This is one of the responses that comes back when an I/O
1970 		 * has been aborted.  If it is because of a timeout that we
1971 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
1972 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
1973 		 * command is the same (it gets retried, subject to the
1974 		 * retry counter), the only difference is what gets printed
1975 		 * on the console.
1976 		 */
1977 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1978 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1979 		else
1980 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1981 		break;
1982 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1983 		/* resid is ignored for this condition */
1984 		csio->resid = 0;
1985 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1986 		break;
1987 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1988 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1989 		/*
1990 		 * Since these are generally external (i.e. hopefully
1991 		 * transient transport-related) errors, retry these without
1992 		 * decrementing the retry count.
1993 		 */
1994 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
1995 		mpssas_log_command(cm,
1996 		    "terminated ioc %x scsi %x state %x xfer %u\n",
1997 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1998 		    rep->TransferCount);
1999 		break;
2000 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2001 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2002 	case MPI2_IOCSTATUS_INVALID_VPID:
2003 	case MPI2_IOCSTATUS_INVALID_FIELD:
2004 	case MPI2_IOCSTATUS_INVALID_STATE:
2005 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2006 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2007 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2008 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2009 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2010 	default:
2011 		mpssas_log_command(cm,
2012 		    "completed ioc %x scsi %x state %x xfer %u\n",
2013 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2014 		    rep->TransferCount);
2015 		csio->resid = cm->cm_length;
2016 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2017 		break;
2018 	}
2019 
2020 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2021 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2022 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2023 		mps_dprint(sc, MPS_INFO, "Command completed, "
2024 			   "unfreezing SIM queue\n");
2025 	}
2026 
2027 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2028 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2029 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2030 	}
2031 
2032 	mps_free_command(sc, cm);
2033 	xpt_done(ccb);
2034 }
2035 
2036 static void
2037 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2038     union ccb *ccb) {
2039 	pMpi2SCSIIORequest_t	pIO_req;
2040 	struct mps_softc	*sc = sassc->sc;
2041 	uint64_t		virtLBA;
2042 	uint32_t		physLBA, stripe_offset, stripe_unit;
2043 	uint32_t		io_size, column;
2044 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2045 
2046 	/*
2047 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2048 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2049 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2050 	 * bit different than the 10/16 CDBs, handle them separately.
2051 	 */
2052 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2053 	CDB = pIO_req->CDB.CDB32;
2054 
2055 	/*
2056 	 * Handle 6 byte CDBs.
2057 	 */
2058 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2059 	    (CDB[0] == WRITE_6))) {
2060 		/*
2061 		 * Get the transfer size in blocks.
2062 		 */
2063 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2064 
2065 		/*
2066 		 * Get virtual LBA given in the CDB.
2067 		 */
2068 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2069 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2070 
2071 		/*
2072 		 * Check that LBA range for I/O does not exceed volume's
2073 		 * MaxLBA.
2074 		 */
2075 		if ((virtLBA + (uint64_t)io_size - 1) <=
2076 		    sc->DD_max_lba) {
2077 			/*
2078 			 * Check if the I/O crosses a stripe boundary.  If not,
2079 			 * translate the virtual LBA to a physical LBA and set
2080 			 * the DevHandle for the PhysDisk to be used.  If it
2081 			 * does cross a boundry, do normal I/O.  To get the
2082 			 * right DevHandle to use, get the map number for the
2083 			 * column, then use that map number to look up the
2084 			 * DevHandle of the PhysDisk.
2085 			 */
2086 			stripe_offset = (uint32_t)virtLBA &
2087 			    (sc->DD_stripe_size - 1);
2088 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2089 				physLBA = (uint32_t)virtLBA >>
2090 				    sc->DD_stripe_exponent;
2091 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2092 				column = physLBA % sc->DD_num_phys_disks;
2093 				pIO_req->DevHandle =
2094 				    sc->DD_column_map[column].dev_handle;
2095 				cm->cm_desc.SCSIIO.DevHandle =
2096 				    pIO_req->DevHandle;
2097 
2098 				physLBA = (stripe_unit <<
2099 				    sc->DD_stripe_exponent) + stripe_offset;
2100 				ptrLBA = &pIO_req->CDB.CDB32[1];
2101 				physLBA_byte = (uint8_t)(physLBA >> 16);
2102 				*ptrLBA = physLBA_byte;
2103 				ptrLBA = &pIO_req->CDB.CDB32[2];
2104 				physLBA_byte = (uint8_t)(physLBA >> 8);
2105 				*ptrLBA = physLBA_byte;
2106 				ptrLBA = &pIO_req->CDB.CDB32[3];
2107 				physLBA_byte = (uint8_t)physLBA;
2108 				*ptrLBA = physLBA_byte;
2109 
2110 				/*
2111 				 * Set flag that Direct Drive I/O is
2112 				 * being done.
2113 				 */
2114 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2115 			}
2116 		}
2117 		return;
2118 	}
2119 
2120 	/*
2121 	 * Handle 10 or 16 byte CDBs.
2122 	 */
2123 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2124 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2125 	    (CDB[0] == WRITE_16))) {
2126 		/*
2127 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2128 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2129 		 * the else section.  10-byte CDB's are OK.
2130 		 */
2131 		if ((CDB[0] < READ_16) ||
2132 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2133 			/*
2134 			 * Get the transfer size in blocks.
2135 			 */
2136 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2137 
2138 			/*
2139 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2140 			 * LBA in the CDB depending on command.
2141 			 */
2142 			lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2143 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2144 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2145 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2146 			    (uint64_t)CDB[lba_idx + 3];
2147 
2148 			/*
2149 			 * Check that LBA range for I/O does not exceed volume's
2150 			 * MaxLBA.
2151 			 */
2152 			if ((virtLBA + (uint64_t)io_size - 1) <=
2153 			    sc->DD_max_lba) {
2154 				/*
2155 				 * Check if the I/O crosses a stripe boundary.
2156 				 * If not, translate the virtual LBA to a
2157 				 * physical LBA and set the DevHandle for the
2158 				 * PhysDisk to be used.  If it does cross a
2159 				 * boundry, do normal I/O.  To get the right
2160 				 * DevHandle to use, get the map number for the
2161 				 * column, then use that map number to look up
2162 				 * the DevHandle of the PhysDisk.
2163 				 */
2164 				stripe_offset = (uint32_t)virtLBA &
2165 				    (sc->DD_stripe_size - 1);
2166 				if ((stripe_offset + io_size) <=
2167 				    sc->DD_stripe_size) {
2168 					physLBA = (uint32_t)virtLBA >>
2169 					    sc->DD_stripe_exponent;
2170 					stripe_unit = physLBA /
2171 					    sc->DD_num_phys_disks;
2172 					column = physLBA %
2173 					    sc->DD_num_phys_disks;
2174 					pIO_req->DevHandle =
2175 					    sc->DD_column_map[column].
2176 					    dev_handle;
2177 					cm->cm_desc.SCSIIO.DevHandle =
2178 					    pIO_req->DevHandle;
2179 
2180 					physLBA = (stripe_unit <<
2181 					    sc->DD_stripe_exponent) +
2182 					    stripe_offset;
2183 					ptrLBA =
2184 					    &pIO_req->CDB.CDB32[lba_idx];
2185 					physLBA_byte = (uint8_t)(physLBA >> 24);
2186 					*ptrLBA = physLBA_byte;
2187 					ptrLBA =
2188 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2189 					physLBA_byte = (uint8_t)(physLBA >> 16);
2190 					*ptrLBA = physLBA_byte;
2191 					ptrLBA =
2192 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2193 					physLBA_byte = (uint8_t)(physLBA >> 8);
2194 					*ptrLBA = physLBA_byte;
2195 					ptrLBA =
2196 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2197 					physLBA_byte = (uint8_t)physLBA;
2198 					*ptrLBA = physLBA_byte;
2199 
2200 					/*
2201 					 * Set flag that Direct Drive I/O is
2202 					 * being done.
2203 					 */
2204 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2205 				}
2206 			}
2207 		} else {
2208 			/*
2209 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2210 			 * 0.  Get the transfer size in blocks.
2211 			 */
2212 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2213 
2214 			/*
2215 			 * Get virtual LBA.
2216 			 */
2217 			virtLBA = ((uint64_t)CDB[2] << 54) |
2218 			    ((uint64_t)CDB[3] << 48) |
2219 			    ((uint64_t)CDB[4] << 40) |
2220 			    ((uint64_t)CDB[5] << 32) |
2221 			    ((uint64_t)CDB[6] << 24) |
2222 			    ((uint64_t)CDB[7] << 16) |
2223 			    ((uint64_t)CDB[8] << 8) |
2224 			    (uint64_t)CDB[9];
2225 
2226 			/*
2227 			 * Check that LBA range for I/O does not exceed volume's
2228 			 * MaxLBA.
2229 			 */
2230 			if ((virtLBA + (uint64_t)io_size - 1) <=
2231 			    sc->DD_max_lba) {
2232 				/*
2233 				 * Check if the I/O crosses a stripe boundary.
2234 				 * If not, translate the virtual LBA to a
2235 				 * physical LBA and set the DevHandle for the
2236 				 * PhysDisk to be used.  If it does cross a
2237 				 * boundry, do normal I/O.  To get the right
2238 				 * DevHandle to use, get the map number for the
2239 				 * column, then use that map number to look up
2240 				 * the DevHandle of the PhysDisk.
2241 				 */
2242 				stripe_offset = (uint32_t)virtLBA &
2243 				    (sc->DD_stripe_size - 1);
2244 				if ((stripe_offset + io_size) <=
2245 				    sc->DD_stripe_size) {
2246 					physLBA = (uint32_t)(virtLBA >>
2247 					    sc->DD_stripe_exponent);
2248 					stripe_unit = physLBA /
2249 					    sc->DD_num_phys_disks;
2250 					column = physLBA %
2251 					    sc->DD_num_phys_disks;
2252 					pIO_req->DevHandle =
2253 					    sc->DD_column_map[column].
2254 					    dev_handle;
2255 					cm->cm_desc.SCSIIO.DevHandle =
2256 					    pIO_req->DevHandle;
2257 
2258 					physLBA = (stripe_unit <<
2259 					    sc->DD_stripe_exponent) +
2260 					    stripe_offset;
2261 
2262 					/*
2263 					 * Set upper 4 bytes of LBA to 0.  We
2264 					 * assume that the phys disks are less
2265 					 * than 2 TB's in size.  Then, set the
2266 					 * lower 4 bytes.
2267 					 */
2268 					pIO_req->CDB.CDB32[2] = 0;
2269 					pIO_req->CDB.CDB32[3] = 0;
2270 					pIO_req->CDB.CDB32[4] = 0;
2271 					pIO_req->CDB.CDB32[5] = 0;
2272 					ptrLBA = &pIO_req->CDB.CDB32[6];
2273 					physLBA_byte = (uint8_t)(physLBA >> 24);
2274 					*ptrLBA = physLBA_byte;
2275 					ptrLBA = &pIO_req->CDB.CDB32[7];
2276 					physLBA_byte = (uint8_t)(physLBA >> 16);
2277 					*ptrLBA = physLBA_byte;
2278 					ptrLBA = &pIO_req->CDB.CDB32[8];
2279 					physLBA_byte = (uint8_t)(physLBA >> 8);
2280 					*ptrLBA = physLBA_byte;
2281 					ptrLBA = &pIO_req->CDB.CDB32[9];
2282 					physLBA_byte = (uint8_t)physLBA;
2283 					*ptrLBA = physLBA_byte;
2284 
2285 					/*
2286 					 * Set flag that Direct Drive I/O is
2287 					 * being done.
2288 					 */
2289 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2290 				}
2291 			}
2292 		}
2293 	}
2294 }
2295 
2296 #if __FreeBSD_version >= 900026
2297 static void
2298 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2299 {
2300 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2301 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2302 	uint64_t sasaddr;
2303 	union ccb *ccb;
2304 
2305 	ccb = cm->cm_complete_data;
2306 
2307 	/*
2308 	 * Currently there should be no way we can hit this case.  It only
2309 	 * happens when we have a failure to allocate chain frames, and SMP
2310 	 * commands require two S/G elements only.  That should be handled
2311 	 * in the standard request size.
2312 	 */
2313 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2314 		mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2315 			   __func__, cm->cm_flags);
2316 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2317 		goto bailout;
2318         }
2319 
2320 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2321 	if (rpl == NULL) {
2322 		mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2323 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2324 		goto bailout;
2325 	}
2326 
2327 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2328 	sasaddr = le32toh(req->SASAddress.Low);
2329 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2330 
2331 	if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2332 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2333 		mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2334 		    __func__, rpl->IOCStatus, rpl->SASStatus);
2335 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2336 		goto bailout;
2337 	}
2338 
2339 	mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2340 		   "%#jx completed successfully\n", __func__,
2341 		   (uintmax_t)sasaddr);
2342 
2343 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2344 		ccb->ccb_h.status = CAM_REQ_CMP;
2345 	else
2346 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2347 
2348 bailout:
2349 	/*
2350 	 * We sync in both directions because we had DMAs in the S/G list
2351 	 * in both directions.
2352 	 */
2353 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2354 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2355 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2356 	mps_free_command(sc, cm);
2357 	xpt_done(ccb);
2358 }
2359 
2360 static void
2361 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2362 {
2363 	struct mps_command *cm;
2364 	uint8_t *request, *response;
2365 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2366 	struct mps_softc *sc;
2367 	struct sglist *sg;
2368 	int error;
2369 
2370 	sc = sassc->sc;
2371 	sg = NULL;
2372 	error = 0;
2373 
2374 	/*
2375 	 * XXX We don't yet support physical addresses here.
2376 	 */
2377 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2378 		mps_printf(sc, "%s: physical addresses not supported\n",
2379 			   __func__);
2380 		ccb->ccb_h.status = CAM_REQ_INVALID;
2381 		xpt_done(ccb);
2382 		return;
2383 	}
2384 
2385 	/*
2386 	 * If the user wants to send an S/G list, check to make sure they
2387 	 * have single buffers.
2388 	 */
2389 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2390 		/*
2391 		 * The chip does not support more than one buffer for the
2392 		 * request or response.
2393 		 */
2394 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2395 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2396 			mps_printf(sc, "%s: multiple request or response "
2397 				   "buffer segments not supported for SMP\n",
2398 				   __func__);
2399 			ccb->ccb_h.status = CAM_REQ_INVALID;
2400 			xpt_done(ccb);
2401 			return;
2402 		}
2403 
2404 		/*
2405 		 * The CAM_SCATTER_VALID flag was originally implemented
2406 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2407 		 * We have two.  So, just take that flag to mean that we
2408 		 * might have S/G lists, and look at the S/G segment count
2409 		 * to figure out whether that is the case for each individual
2410 		 * buffer.
2411 		 */
2412 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2413 			bus_dma_segment_t *req_sg;
2414 
2415 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2416 			request = (uint8_t *)req_sg[0].ds_addr;
2417 		} else
2418 			request = ccb->smpio.smp_request;
2419 
2420 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2421 			bus_dma_segment_t *rsp_sg;
2422 
2423 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2424 			response = (uint8_t *)rsp_sg[0].ds_addr;
2425 		} else
2426 			response = ccb->smpio.smp_response;
2427 	} else {
2428 		request = ccb->smpio.smp_request;
2429 		response = ccb->smpio.smp_response;
2430 	}
2431 
2432 	cm = mps_alloc_command(sc);
2433 	if (cm == NULL) {
2434 		mps_printf(sc, "%s: cannot allocate command\n", __func__);
2435 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2436 		xpt_done(ccb);
2437 		return;
2438 	}
2439 
2440 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2441 	bzero(req, sizeof(*req));
2442 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2443 
2444 	/* Allow the chip to use any route to this SAS address. */
2445 	req->PhysicalPort = 0xff;
2446 
2447 	req->RequestDataLength = ccb->smpio.smp_request_len;
2448 	req->SGLFlags =
2449 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2450 
2451 	mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2452 		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2453 
2454 	mpi_init_sge(cm, req, &req->SGL);
2455 
2456 	/*
2457 	 * Set up a uio to pass into mps_map_command().  This allows us to
2458 	 * do one map command, and one busdma call in there.
2459 	 */
2460 	cm->cm_uio.uio_iov = cm->cm_iovec;
2461 	cm->cm_uio.uio_iovcnt = 2;
2462 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2463 
2464 	/*
2465 	 * The read/write flag isn't used by busdma, but set it just in
2466 	 * case.  This isn't exactly accurate, either, since we're going in
2467 	 * both directions.
2468 	 */
2469 	cm->cm_uio.uio_rw = UIO_WRITE;
2470 
2471 	cm->cm_iovec[0].iov_base = request;
2472 	cm->cm_iovec[0].iov_len = req->RequestDataLength;
2473 	cm->cm_iovec[1].iov_base = response;
2474 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2475 
2476 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2477 			       cm->cm_iovec[1].iov_len;
2478 
2479 	/*
2480 	 * Trigger a warning message in mps_data_cb() for the user if we
2481 	 * wind up exceeding two S/G segments.  The chip expects one
2482 	 * segment for the request and another for the response.
2483 	 */
2484 	cm->cm_max_segs = 2;
2485 
2486 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2487 	cm->cm_complete = mpssas_smpio_complete;
2488 	cm->cm_complete_data = ccb;
2489 
2490 	/*
2491 	 * Tell the mapping code that we're using a uio, and that this is
2492 	 * an SMP passthrough request.  There is a little special-case
2493 	 * logic there (in mps_data_cb()) to handle the bidirectional
2494 	 * transfer.
2495 	 */
2496 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2497 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2498 
2499 	/* The chip data format is little endian. */
2500 	req->SASAddress.High = htole32(sasaddr >> 32);
2501 	req->SASAddress.Low = htole32(sasaddr);
2502 
2503 	/*
2504 	 * XXX Note that we don't have a timeout/abort mechanism here.
2505 	 * From the manual, it looks like task management requests only
2506 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2507 	 * have a mechanism to retry requests in the event of a chip reset
2508 	 * at least.  Hopefully the chip will insure that any errors short
2509 	 * of that are relayed back to the driver.
2510 	 */
2511 	error = mps_map_command(sc, cm);
2512 	if ((error != 0) && (error != EINPROGRESS)) {
2513 		mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2514 			   __func__, error);
2515 		goto bailout_error;
2516 	}
2517 
2518 	return;
2519 
2520 bailout_error:
2521 	mps_free_command(sc, cm);
2522 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2523 	xpt_done(ccb);
2524 	return;
2525 
2526 }
2527 
2528 static void
2529 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2530 {
2531 	struct mps_softc *sc;
2532 	struct mpssas_target *targ;
2533 	uint64_t sasaddr = 0;
2534 
2535 	sc = sassc->sc;
2536 
2537 	/*
2538 	 * Make sure the target exists.
2539 	 */
2540 	targ = &sassc->targets[ccb->ccb_h.target_id];
2541 	if (targ->handle == 0x0) {
2542 		mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2543 			   ccb->ccb_h.target_id);
2544 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2545 		xpt_done(ccb);
2546 		return;
2547 	}
2548 
2549 	/*
2550 	 * If this device has an embedded SMP target, we'll talk to it
2551 	 * directly.
2552 	 * figure out what the expander's address is.
2553 	 */
2554 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2555 		sasaddr = targ->sasaddr;
2556 
2557 	/*
2558 	 * If we don't have a SAS address for the expander yet, try
2559 	 * grabbing it from the page 0x83 information cached in the
2560 	 * transport layer for this target.  LSI expanders report the
2561 	 * expander SAS address as the port-associated SAS address in
2562 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2563 	 * 0x83.
2564 	 *
2565 	 * XXX KDM disable this for now, but leave it commented out so that
2566 	 * it is obvious that this is another possible way to get the SAS
2567 	 * address.
2568 	 *
2569 	 * The parent handle method below is a little more reliable, and
2570 	 * the other benefit is that it works for devices other than SES
2571 	 * devices.  So you can send a SMP request to a da(4) device and it
2572 	 * will get routed to the expander that device is attached to.
2573 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2574 	 */
2575 #if 0
2576 	if (sasaddr == 0)
2577 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2578 #endif
2579 
2580 	/*
2581 	 * If we still don't have a SAS address for the expander, look for
2582 	 * the parent device of this device, which is probably the expander.
2583 	 */
2584 	if (sasaddr == 0) {
2585 #ifdef OLD_MPS_PROBE
2586 		struct mpssas_target *parent_target;
2587 #endif
2588 
2589 		if (targ->parent_handle == 0x0) {
2590 			mps_printf(sc, "%s: handle %d does not have a valid "
2591 				   "parent handle!\n", __func__, targ->handle);
2592 			ccb->ccb_h.status = CAM_REQ_INVALID;
2593 			goto bailout;
2594 		}
2595 #ifdef OLD_MPS_PROBE
2596 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2597 			targ->parent_handle);
2598 
2599 		if (parent_target == NULL) {
2600 			mps_printf(sc, "%s: handle %d does not have a valid "
2601 				   "parent target!\n", __func__, targ->handle);
2602 			ccb->ccb_h.status = CAM_REQ_INVALID;
2603 			goto bailout;
2604 		}
2605 
2606 		if ((parent_target->devinfo &
2607 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2608 			mps_printf(sc, "%s: handle %d parent %d does not "
2609 				   "have an SMP target!\n", __func__,
2610 				   targ->handle, parent_target->handle);
2611 			ccb->ccb_h.status = CAM_REQ_INVALID;
2612 			goto bailout;
2613 
2614 		}
2615 
2616 		sasaddr = parent_target->sasaddr;
2617 #else /* OLD_MPS_PROBE */
2618 		if ((targ->parent_devinfo &
2619 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2620 			mps_printf(sc, "%s: handle %d parent %d does not "
2621 				   "have an SMP target!\n", __func__,
2622 				   targ->handle, targ->parent_handle);
2623 			ccb->ccb_h.status = CAM_REQ_INVALID;
2624 			goto bailout;
2625 
2626 		}
2627 		if (targ->parent_sasaddr == 0x0) {
2628 			mps_printf(sc, "%s: handle %d parent handle %d does "
2629 				   "not have a valid SAS address!\n",
2630 				   __func__, targ->handle, targ->parent_handle);
2631 			ccb->ccb_h.status = CAM_REQ_INVALID;
2632 			goto bailout;
2633 		}
2634 
2635 		sasaddr = targ->parent_sasaddr;
2636 #endif /* OLD_MPS_PROBE */
2637 
2638 	}
2639 
2640 	if (sasaddr == 0) {
2641 		mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2642 			   __func__, targ->handle);
2643 		ccb->ccb_h.status = CAM_REQ_INVALID;
2644 		goto bailout;
2645 	}
2646 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
2647 
2648 	return;
2649 
2650 bailout:
2651 	xpt_done(ccb);
2652 
2653 }
2654 #endif //__FreeBSD_version >= 900026
2655 
2656 static void
2657 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2658 {
2659 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2660 	struct mps_softc *sc;
2661 	struct mps_command *tm;
2662 	struct mpssas_target *targ;
2663 
2664 	mps_dprint(sassc->sc, MPS_TRACE, __func__);
2665 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2666 
2667 	sc = sassc->sc;
2668 	tm = mps_alloc_command(sc);
2669 	if (tm == NULL) {
2670 		mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2671 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2672 		xpt_done(ccb);
2673 		return;
2674 	}
2675 
2676 	targ = &sassc->targets[ccb->ccb_h.target_id];
2677 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2678 	req->DevHandle = targ->handle;
2679 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2680 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2681 
2682 	/* SAS Hard Link Reset / SATA Link Reset */
2683 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2684 
2685 	tm->cm_data = NULL;
2686 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2687 	tm->cm_complete = mpssas_resetdev_complete;
2688 	tm->cm_complete_data = ccb;
2689 	mps_map_command(sc, tm);
2690 }
2691 
2692 static void
2693 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2694 {
2695 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2696 	union ccb *ccb;
2697 
2698 	mps_dprint(sc, MPS_TRACE, __func__);
2699 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2700 
2701 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2702 	ccb = tm->cm_complete_data;
2703 
2704 	/*
2705 	 * Currently there should be no way we can hit this case.  It only
2706 	 * happens when we have a failure to allocate chain frames, and
2707 	 * task management commands don't have S/G lists.
2708 	 */
2709 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2710 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2711 
2712 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2713 
2714 		mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2715 			   "This should not happen!\n", __func__, tm->cm_flags,
2716 			   req->DevHandle);
2717 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2718 		goto bailout;
2719 	}
2720 
2721 	kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2722 	    resp->IOCStatus, resp->ResponseCode);
2723 
2724 	if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2725 		ccb->ccb_h.status = CAM_REQ_CMP;
2726 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2727 		    CAM_LUN_WILDCARD);
2728 	}
2729 	else
2730 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2731 
2732 bailout:
2733 
2734 	mpssas_free_tm(sc, tm);
2735 	xpt_done(ccb);
2736 }
2737 
2738 static void
2739 mpssas_poll(struct cam_sim *sim)
2740 {
2741 	struct mpssas_softc *sassc;
2742 
2743 	sassc = cam_sim_softc(sim);
2744 
2745 	if (sassc->sc->mps_debug & MPS_TRACE) {
2746 		/* frequent debug messages during a panic just slow
2747 		 * everything down too much.
2748 		 */
2749 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2750 		sassc->sc->mps_debug &= ~MPS_TRACE;
2751 	}
2752 
2753 	mps_intr_locked(sassc->sc);
2754 }
2755 
2756 static void
2757 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2758 {
2759 	struct mpssas_softc *sassc;
2760 	char path_str[64];
2761 
2762 	if (done_ccb == NULL)
2763 		return;
2764 
2765 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2766 
2767 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2768 
2769 	xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2770 	mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2771 
2772 	xpt_free_path(done_ccb->ccb_h.path);
2773 	xpt_free_ccb(done_ccb);
2774 
2775 #if __FreeBSD_version < 1000006
2776 	/*
2777 	 * Before completing scan, get EEDP stuff for all of the existing
2778 	 * targets.
2779 	 */
2780 	mpssas_check_eedp(sassc);
2781 #endif
2782 
2783 }
2784 
2785 /* thread to handle bus rescans */
2786 static void
2787 mpssas_scanner_thread(void *arg)
2788 {
2789 	struct mpssas_softc *sassc;
2790 	struct mps_softc *sc;
2791 	union ccb	*ccb;
2792 
2793 	sassc = (struct mpssas_softc *)arg;
2794 	sc = sassc->sc;
2795 
2796 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2797 
2798 	mps_lock(sc);
2799 	for (;;) {
2800 		lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 0);
2801 		if (sassc->flags & MPSSAS_SHUTDOWN) {
2802 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2803 			break;
2804 		}
2805 		ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2806 		if (ccb == NULL)
2807 			continue;
2808 		TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2809 		xpt_action(ccb);
2810 	}
2811 
2812 	sassc->flags &= ~MPSSAS_SCANTHREAD;
2813 	wakeup(&sassc->flags);
2814 	mps_unlock(sc);
2815 	mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2816 	mps_kproc_exit(0);
2817 }
2818 
2819 static void
2820 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2821 {
2822 	char path_str[64];
2823 
2824 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2825 
2826 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2827 
2828 	if (ccb == NULL)
2829 		return;
2830 
2831 	xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2832 	mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2833 
2834 	/* Prepare request */
2835 	ccb->ccb_h.ppriv_ptr1 = sassc;
2836 	ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2837 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2838 	TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2839 	wakeup(&sassc->ccb_scanq);
2840 }
2841 
2842 #if __FreeBSD_version >= 1000006
2843 static void
2844 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2845 	     void *arg)
2846 {
2847 	struct mps_softc *sc;
2848 
2849 	sc = (struct mps_softc *)callback_arg;
2850 
2851 	switch (code) {
2852 	case AC_ADVINFO_CHANGED: {
2853 		struct mpssas_target *target;
2854 		struct mpssas_softc *sassc;
2855 		struct scsi_read_capacity_data_long rcap_buf;
2856 		struct ccb_dev_advinfo cdai;
2857 		struct mpssas_lun *lun;
2858 		lun_id_t lunid;
2859 		int found_lun;
2860 		uintptr_t buftype;
2861 
2862 		buftype = (uintptr_t)arg;
2863 
2864 		found_lun = 0;
2865 		sassc = sc->sassc;
2866 
2867 		/*
2868 		 * We're only interested in read capacity data changes.
2869 		 */
2870 		if (buftype != CDAI_TYPE_RCAPLONG)
2871 			break;
2872 
2873 		/*
2874 		 * We're only interested in devices that are attached to
2875 		 * this controller.
2876 		 */
2877 		if (xpt_path_path_id(path) != sassc->sim->path_id)
2878 			break;
2879 
2880 		/*
2881 		 * We should have a handle for this, but check to make sure.
2882 		 */
2883 		target = &sassc->targets[xpt_path_target_id(path)];
2884 		if (target->handle == 0)
2885 			break;
2886 
2887 		lunid = xpt_path_lun_id(path);
2888 
2889 		SLIST_FOREACH(lun, &target->luns, lun_link) {
2890 			if (lun->lun_id == lunid) {
2891 				found_lun = 1;
2892 				break;
2893 			}
2894 		}
2895 
2896 		if (found_lun == 0) {
2897 			lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
2898 				     M_NOWAIT | M_ZERO);
2899 			if (lun == NULL) {
2900 				mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2901 					   "LUN for EEDP support.\n");
2902 				break;
2903 			}
2904 			lun->lun_id = lunid;
2905 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2906 		}
2907 
2908 		bzero(&rcap_buf, sizeof(rcap_buf));
2909 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2910 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2911 		cdai.ccb_h.flags = CAM_DIR_IN;
2912 		cdai.buftype = CDAI_TYPE_RCAPLONG;
2913 		cdai.flags = 0;
2914 		cdai.bufsiz = sizeof(rcap_buf);
2915 		cdai.buf = (uint8_t *)&rcap_buf;
2916 		xpt_action((union ccb *)&cdai);
2917 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2918 			cam_release_devq(cdai.ccb_h.path,
2919 					 0, 0, 0, FALSE);
2920 
2921 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2922 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
2923 			lun->eedp_formatted = TRUE;
2924 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2925 		} else {
2926 			lun->eedp_formatted = FALSE;
2927 			lun->eedp_block_size = 0;
2928 		}
2929 		break;
2930 	}
2931 	default:
2932 		break;
2933 	}
2934 }
2935 #else /* __FreeBSD_version >= 1000006 */
2936 
2937 static void
2938 mpssas_check_eedp(struct mpssas_softc *sassc)
2939 {
2940 	struct mps_softc *sc = sassc->sc;
2941 	struct ccb_scsiio *csio;
2942 	struct scsi_read_capacity_16 *scsi_cmd;
2943 	struct scsi_read_capacity_eedp *rcap_buf;
2944 	union ccb *ccb;
2945 	path_id_t pathid = cam_sim_path(sassc->sim);
2946 	target_id_t targetid;
2947 	lun_id_t lunid;
2948 	struct cam_periph *found_periph;
2949 	struct mpssas_target *target;
2950 	struct mpssas_lun *lun;
2951 	uint8_t	found_lun;
2952 
2953 	/*
2954 	 * Issue a READ CAPACITY 16 command to each LUN of each target.  This
2955 	 * info is used to determine if the LUN is formatted for EEDP support.
2956 	 */
2957 	for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2958 		target = &sassc->targets[targetid];
2959 		if (target->handle == 0x0) {
2960 			continue;
2961 		}
2962 
2963 		lunid = 0;
2964 		do {
2965 			rcap_buf =
2966 			    kmalloc(sizeof(struct scsi_read_capacity_eedp),
2967 			    M_MPT2, M_NOWAIT | M_ZERO);
2968 			if (rcap_buf == NULL) {
2969 				mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2970 				    "capacity buffer for EEDP support.\n");
2971 				return;
2972 			}
2973 
2974 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
2975 			    M_WAITOK | M_ZERO);
2976 
2977 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2978 			    pathid, targetid, lunid) != CAM_REQ_CMP) {
2979 				mps_dprint(sc, MPS_FAULT, "Unable to create "
2980 				    "path for EEDP support\n");
2981 				kfree(rcap_buf, M_MPT2);
2982 				xpt_free_ccb(ccb);
2983 				return;
2984 			}
2985 
2986 			/*
2987 			 * If a periph is returned, the LUN exists.  Create an
2988 			 * entry in the target's LUN list.
2989 			 */
2990 			if ((found_periph = cam_periph_find(ccb->ccb_h.path,
2991 			    NULL)) != NULL) {
2992 				/*
2993 				 * If LUN is already in list, don't create a new
2994 				 * one.
2995 				 */
2996 				found_lun = FALSE;
2997 				SLIST_FOREACH(lun, &target->luns, lun_link) {
2998 					if (lun->lun_id == lunid) {
2999 						found_lun = TRUE;
3000 						break;
3001 					}
3002 				}
3003 				if (!found_lun) {
3004 					lun = kmalloc(sizeof(struct mpssas_lun),
3005 					    M_MPT2, M_WAITOK | M_ZERO);
3006 					if (lun == NULL) {
3007 						mps_dprint(sc, MPS_FAULT,
3008 						    "Unable to alloc LUN for "
3009 						    "EEDP support.\n");
3010 						kfree(rcap_buf, M_MPT2);
3011 						xpt_free_path(ccb->ccb_h.path);
3012 						xpt_free_ccb(ccb);
3013 						return;
3014 					}
3015 					lun->lun_id = lunid;
3016 					SLIST_INSERT_HEAD(&target->luns, lun,
3017 					    lun_link);
3018 				}
3019 				lunid++;
3020 
3021 				/*
3022 				 * Issue a READ CAPACITY 16 command for the LUN.
3023 				 * The mpssas_read_cap_done function will load
3024 				 * the read cap info into the LUN struct.
3025 				 */
3026 				csio = &ccb->csio;
3027 				csio->ccb_h.func_code = XPT_SCSI_IO;
3028 				csio->ccb_h.flags = CAM_DIR_IN;
3029 				csio->ccb_h.retry_count = 4;
3030 				csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3031 				csio->ccb_h.timeout = 60000;
3032 				csio->data_ptr = (uint8_t *)rcap_buf;
3033 				csio->dxfer_len = sizeof(struct
3034 				    scsi_read_capacity_eedp);
3035 				csio->sense_len = MPS_SENSE_LEN;
3036 				csio->cdb_len = sizeof(*scsi_cmd);
3037 				csio->tag_action = MSG_SIMPLE_Q_TAG;
3038 
3039 				scsi_cmd = (struct scsi_read_capacity_16 *)
3040 				    &csio->cdb_io.cdb_bytes;
3041 				bzero(scsi_cmd, sizeof(*scsi_cmd));
3042 				scsi_cmd->opcode = 0x9E;
3043 				scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3044 				((uint8_t *)scsi_cmd)[13] = sizeof(struct
3045 				    scsi_read_capacity_eedp);
3046 
3047 				/*
3048 				 * Set the path, target and lun IDs for the READ
3049 				 * CAPACITY request.
3050 				 */
3051 				ccb->ccb_h.path_id =
3052 				    xpt_path_path_id(ccb->ccb_h.path);
3053 				ccb->ccb_h.target_id =
3054 				    xpt_path_target_id(ccb->ccb_h.path);
3055 				ccb->ccb_h.target_lun =
3056 				    xpt_path_lun_id(ccb->ccb_h.path);
3057 
3058 				ccb->ccb_h.ppriv_ptr1 = sassc;
3059 				xpt_action(ccb);
3060 			} else {
3061 				kfree(rcap_buf, M_MPT2);
3062 				xpt_free_path(ccb->ccb_h.path);
3063 				xpt_free_ccb(ccb);
3064 			}
3065 		} while (found_periph);
3066 	}
3067 }
3068 
3069 
3070 static void
3071 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3072 {
3073 	struct mpssas_softc *sassc;
3074 	struct mpssas_target *target;
3075 	struct mpssas_lun *lun;
3076 	struct scsi_read_capacity_eedp *rcap_buf;
3077 
3078 	if (done_ccb == NULL)
3079 		return;
3080 
3081 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3082 
3083 	/*
3084 	 * Get the LUN ID for the path and look it up in the LUN list for the
3085 	 * target.
3086 	 */
3087 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3088 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3089 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3090 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3091 			continue;
3092 
3093 		/*
3094 		 * Got the LUN in the target's LUN list.  Fill it in
3095 		 * with EEDP info.  If the READ CAP 16 command had some
3096 		 * SCSI error (common if command is not supported), mark
3097 		 * the lun as not supporting EEDP and set the block size
3098 		 * to 0.
3099 		 */
3100 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3101 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3102 			lun->eedp_formatted = FALSE;
3103 			lun->eedp_block_size = 0;
3104 			break;
3105 		}
3106 
3107 		if (rcap_buf->protect & 0x01) {
3108 			lun->eedp_formatted = TRUE;
3109 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3110 		}
3111 		break;
3112 	}
3113 
3114 	// Finished with this CCB and path.
3115 	kfree(rcap_buf, M_MPT2);
3116 	xpt_free_path(done_ccb->ccb_h.path);
3117 	xpt_free_ccb(done_ccb);
3118 }
3119 #endif /* __FreeBSD_version >= 1000006 */
3120 
3121 int
3122 mpssas_startup(struct mps_softc *sc)
3123 {
3124 	struct mpssas_softc *sassc;
3125 
3126 	/*
3127 	 * Send the port enable message and set the wait_for_port_enable flag.
3128 	 * This flag helps to keep the simq frozen until all discovery events
3129 	 * are processed.
3130 	 */
3131 	sassc = sc->sassc;
3132 	mpssas_startup_increment(sassc);
3133 	sc->wait_for_port_enable = 1;
3134 	mpssas_send_portenable(sc);
3135 	return (0);
3136 }
3137 
3138 static int
3139 mpssas_send_portenable(struct mps_softc *sc)
3140 {
3141 	MPI2_PORT_ENABLE_REQUEST *request;
3142 	struct mps_command *cm;
3143 
3144 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3145 
3146 	if ((cm = mps_alloc_command(sc)) == NULL)
3147 		return (EBUSY);
3148 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3149 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3150 	request->MsgFlags = 0;
3151 	request->VP_ID = 0;
3152 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3153 	cm->cm_complete = mpssas_portenable_complete;
3154 	cm->cm_data = NULL;
3155 	cm->cm_sge = NULL;
3156 
3157 	mps_map_command(sc, cm);
3158 	mps_dprint(sc, MPS_TRACE,
3159 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3160 	    cm, cm->cm_req, cm->cm_complete);
3161 	return (0);
3162 }
3163 
3164 static void
3165 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3166 {
3167 	MPI2_PORT_ENABLE_REPLY *reply;
3168 	struct mpssas_softc *sassc;
3169 	struct mpssas_target *target;
3170 	int i;
3171 
3172 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3173 	sassc = sc->sassc;
3174 
3175 	/*
3176 	 * Currently there should be no way we can hit this case.  It only
3177 	 * happens when we have a failure to allocate chain frames, and
3178 	 * port enable commands don't have S/G lists.
3179 	 */
3180 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3181 		mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3182 			   "This should not happen!\n", __func__, cm->cm_flags);
3183 	}
3184 
3185 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3186 	if (reply == NULL)
3187 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3188 	else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3189 	    MPI2_IOCSTATUS_SUCCESS)
3190 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3191 
3192 	mps_free_command(sc, cm);
3193 	if (sc->mps_ich.ich_arg != NULL) {
3194 		mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3195 		config_intrhook_disestablish(&sc->mps_ich);
3196 		sc->mps_ich.ich_arg = NULL;
3197 	}
3198 
3199 	/*
3200 	 * Get WarpDrive info after discovery is complete but before the scan
3201 	 * starts.  At this point, all devices are ready to be exposed to the
3202 	 * OS.  If devices should be hidden instead, take them out of the
3203 	 * 'targets' array before the scan.  The devinfo for a disk will have
3204 	 * some info and a volume's will be 0.  Use that to remove disks.
3205 	 */
3206 	mps_wd_config_pages(sc);
3207 	if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3208 	  && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3209 	 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3210 	    MPS_WD_HIDE_IF_VOLUME))) {
3211 		for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3212 			target = &sassc->targets[i];
3213 			if (target->devinfo) {
3214 				target->devinfo = 0x0;
3215 				target->encl_handle = 0x0;
3216 				target->encl_slot = 0x0;
3217 				target->handle = 0x0;
3218 				target->tid = 0x0;
3219 				target->linkrate = 0x0;
3220 				target->flags = 0x0;
3221 			}
3222 		}
3223 	}
3224 
3225 	/*
3226 	 * Done waiting for port enable to complete.  Decrement the refcount.
3227 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3228 	 * take place.  Since the simq was explicitly frozen before port
3229 	 * enable, it must be explicitly released here to keep the
3230 	 * freeze/release count in sync.
3231 	 */
3232 	sc->wait_for_port_enable = 0;
3233 	sc->port_enable_complete = 1;
3234 	mpssas_startup_decrement(sassc);
3235 	xpt_release_simq(sassc->sim, 1);
3236 }
3237