xref: /dragonfly/sys/dev/raid/mps/mps_sas.c (revision d4ef6694)
1 /*-
2  * Copyright (c) 2009 Yahoo! Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2011 LSI Corp.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions
32  * are met:
33  * 1. Redistributions of source code must retain the above copyright
34  *    notice, this list of conditions and the following disclaimer.
35  * 2. Redistributions in binary form must reproduce the above copyright
36  *    notice, this list of conditions and the following disclaimer in the
37  *    documentation and/or other materials provided with the distribution.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49  * SUCH DAMAGE.
50  *
51  * LSI MPT-Fusion Host Adapter FreeBSD
52  *
53  * $FreeBSD: src/sys/dev/mps/mps_sas.c,v 1.16 2012/01/26 18:17:21 ken Exp $
54  */
55 
56 /* Communications core for LSI MPT2 */
57 
58 /* TODO Move headers to mpsvar */
59 #include <sys/types.h>
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/module.h>
64 #include <sys/bus.h>
65 #include <sys/conf.h>
66 #include <sys/eventhandler.h>
67 #include <sys/globaldata.h>
68 #include <sys/bio.h>
69 #include <sys/malloc.h>
70 #include <sys/uio.h>
71 #include <sys/sysctl.h>
72 #include <sys/endian.h>
73 #include <sys/queue.h>
74 #include <sys/kthread.h>
75 #include <sys/taskqueue.h>
76 #include <sys/sbuf.h>
77 
78 #include <sys/rman.h>
79 
80 #include <machine/stdarg.h>
81 
82 #include <bus/cam/cam.h>
83 #include <bus/cam/cam_ccb.h>
84 #include <bus/cam/cam_xpt.h>
85 #include <bus/cam/cam_debug.h>
86 #include <bus/cam/cam_sim.h>
87 #include <bus/cam/cam_xpt_sim.h>
88 #include <bus/cam/cam_xpt_periph.h>
89 #include <bus/cam/cam_periph.h>
90 #include <bus/cam/scsi/scsi_all.h>
91 #include <bus/cam/scsi/scsi_message.h>
92 #if 0 /* XXX __FreeBSD_version >= 900026 */
93 #include <bus/cam/scsi/smp_all.h>
94 #endif
95 
96 #include <dev/raid/mps/mpi/mpi2_type.h>
97 #include <dev/raid/mps/mpi/mpi2.h>
98 #include <dev/raid/mps/mpi/mpi2_ioc.h>
99 #include <dev/raid/mps/mpi/mpi2_sas.h>
100 #include <dev/raid/mps/mpi/mpi2_cnfg.h>
101 #include <dev/raid/mps/mpi/mpi2_init.h>
102 #include <dev/raid/mps/mpi/mpi2_tool.h>
103 #include <dev/raid/mps/mps_ioctl.h>
104 #include <dev/raid/mps/mpsvar.h>
105 #include <dev/raid/mps/mps_table.h>
106 #include <dev/raid/mps/mps_sas.h>
107 
108 #define MPSSAS_DISCOVERY_TIMEOUT	20
109 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS	10 /* 200 seconds */
110 
111 /*
112  * static array to check SCSI OpCode for EEDP protection bits
113  */
114 #define	PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
115 #define	PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
116 #define	PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
117 static uint8_t op_code_prot[256] = {
118 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
119 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
121 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
122 	0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
125 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
127 	0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 	0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
129 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
133 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
134 };
135 
136 static void mpssas_log_command(struct mps_command *, const char *, ...)
137 		__printflike(2, 3);
138 #if 0 /* XXX unused */
139 static void mpssas_discovery_timeout(void *data);
140 #endif
141 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
142 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
143 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
144 static void mpssas_poll(struct cam_sim *sim);
145 static void mpssas_scsiio_timeout(void *data);
146 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
147 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
148     struct mps_command *cm, union ccb *ccb);
149 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
150 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
151 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
152 #if __FreeBSD_version >= 900026
153 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
154 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
155 			       uint64_t sasaddr);
156 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
157 #endif //FreeBSD_version >= 900026
158 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
159 static int  mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
160 static int  mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
161 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
162 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
163 static void mpssas_scanner_thread(void *arg);
164 #if __FreeBSD_version >= 1000006
165 static void mpssas_async(void *callback_arg, uint32_t code,
166 			 struct cam_path *path, void *arg);
167 #else
168 static void mpssas_check_eedp(struct mpssas_softc *sassc);
169 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
170 #endif
171 static int mpssas_send_portenable(struct mps_softc *sc);
172 static void mpssas_portenable_complete(struct mps_softc *sc,
173     struct mps_command *cm);
174 
175 struct mpssas_target *
176 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
177 {
178 	struct mpssas_target *target;
179 	int i;
180 
181 	for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
182 		target = &sassc->targets[i];
183 		if (target->handle == handle)
184 			return (target);
185 	}
186 
187 	return (NULL);
188 }
189 
190 /* we need to freeze the simq during attach and diag reset, to avoid failing
191  * commands before device handles have been found by discovery.  Since
192  * discovery involves reading config pages and possibly sending commands,
193  * discovery actions may continue even after we receive the end of discovery
194  * event, so refcount discovery actions instead of assuming we can unfreeze
195  * the simq when we get the event.
196  */
197 void
198 mpssas_startup_increment(struct mpssas_softc *sassc)
199 {
200 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
201 		if (sassc->startup_refcount++ == 0) {
202 			/* just starting, freeze the simq */
203 			mps_dprint(sassc->sc, MPS_INFO,
204 			    "%s freezing simq\n", __func__);
205 			xpt_freeze_simq(sassc->sim, 1);
206 		}
207 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
208 		    sassc->startup_refcount);
209 	}
210 }
211 
212 void
213 mpssas_startup_decrement(struct mpssas_softc *sassc)
214 {
215 	if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
216 		if (--sassc->startup_refcount == 0) {
217 			/* finished all discovery-related actions, release
218 			 * the simq and rescan for the latest topology.
219 			 */
220 			mps_dprint(sassc->sc, MPS_INFO,
221 			    "%s releasing simq\n", __func__);
222 			sassc->flags &= ~MPSSAS_IN_STARTUP;
223 			xpt_release_simq(sassc->sim, 1);
224 			mpssas_rescan_target(sassc->sc, NULL);
225 		}
226 		mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
227 		    sassc->startup_refcount);
228 	}
229 }
230 
231 /* LSI's firmware requires us to stop sending commands when we're doing task
232  * management, so refcount the TMs and keep the simq frozen when any are in
233  * use.
234  */
235 struct mps_command *
236 mpssas_alloc_tm(struct mps_softc *sc)
237 {
238 	struct mps_command *tm;
239 
240 	tm = mps_alloc_high_priority_command(sc);
241 	if (tm != NULL) {
242 		if (sc->sassc->tm_count++ == 0) {
243 			mps_printf(sc, "%s freezing simq\n", __func__);
244 			xpt_freeze_simq(sc->sassc->sim, 1);
245 		}
246 		mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
247 		    sc->sassc->tm_count);
248 	}
249 	return tm;
250 }
251 
252 void
253 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
254 {
255 	if (tm == NULL)
256 		return;
257 
258 	/* if there are no TMs in use, we can release the simq.  We use our
259 	 * own refcount so that it's easier for a diag reset to cleanup and
260 	 * release the simq.
261 	 */
262 	if (--sc->sassc->tm_count == 0) {
263 		mps_printf(sc, "%s releasing simq\n", __func__);
264 		xpt_release_simq(sc->sassc->sim, 1);
265 	}
266 	mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
267 	    sc->sassc->tm_count);
268 
269 	mps_free_high_priority_command(sc, tm);
270 }
271 
272 
273 void
274 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
275 {
276 	struct mpssas_softc *sassc = sc->sassc;
277 	path_id_t pathid;
278 	target_id_t targetid;
279 	union ccb *ccb;
280 
281 	pathid = cam_sim_path(sassc->sim);
282 	if (targ == NULL)
283 		targetid = CAM_TARGET_WILDCARD;
284 	else
285 		targetid = targ - sassc->targets;
286 
287 	/*
288 	 * Allocate a CCB and schedule a rescan.
289 	 */
290 	ccb = xpt_alloc_ccb();
291 
292 	if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
293 		            targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
294 		mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
295 		xpt_free_ccb(ccb);
296 		return;
297 	}
298 
299 	/* XXX Hardwired to scan the bus for now */
300 	ccb->ccb_h.func_code = XPT_SCAN_BUS;
301 	mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
302 	mpssas_rescan(sassc, ccb);
303 }
304 
305 static void
306 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
307 {
308 	struct sbuf sb;
309 	__va_list ap;
310 	char str[192];
311 	char path_str[64];
312 
313 	if (cm == NULL)
314 		return;
315 
316 	sbuf_new(&sb, str, sizeof(str), 0);
317 
318 	__va_start(ap, fmt);
319 
320 	if (cm->cm_ccb != NULL) {
321 		xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
322 				sizeof(path_str));
323 		sbuf_cat(&sb, path_str);
324 		if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
325 			scsi_command_string(&cm->cm_ccb->csio, &sb);
326 			sbuf_printf(&sb, "length %d ",
327 				    cm->cm_ccb->csio.dxfer_len);
328 		}
329 	}
330 	else {
331 		sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
332 		    cam_sim_name(cm->cm_sc->sassc->sim),
333 		    cam_sim_unit(cm->cm_sc->sassc->sim),
334 		    cam_sim_bus(cm->cm_sc->sassc->sim),
335 		    cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
336 		    cm->cm_lun);
337 	}
338 
339 	sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
340 	sbuf_vprintf(&sb, fmt, ap);
341 	sbuf_finish(&sb);
342 	kprintf("%s", sbuf_data(&sb));
343 
344 	__va_end(ap);
345 }
346 
347 static void
348 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
349 {
350 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
351 	struct mpssas_target *targ;
352 	uint16_t handle;
353 
354 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
355 
356 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
357 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
358 	targ = tm->cm_targ;
359 
360 	if (reply == NULL) {
361 		/* XXX retry the remove after the diag reset completes? */
362 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
363 			   __func__, handle);
364 		mpssas_free_tm(sc, tm);
365 		return;
366 	}
367 
368 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
369 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
370 			   reply->IOCStatus, handle);
371 		mpssas_free_tm(sc, tm);
372 		return;
373 	}
374 
375 	mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount);
376 	mps_free_reply(sc, tm->cm_reply_data);
377 	tm->cm_reply = NULL;    /* Ensures the reply won't get re-freed */
378 
379 	mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle);
380 
381 	/*
382 	 * Don't clear target if remove fails because things will get confusing.
383 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
384 	 * this target id if possible, and so we can assign the same target id
385 	 * to this device if it comes back in the future.
386 	 */
387 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
388 		targ = tm->cm_targ;
389 		targ->handle = 0x0;
390 		targ->encl_handle = 0x0;
391 		targ->encl_slot = 0x0;
392 		targ->exp_dev_handle = 0x0;
393 		targ->phy_num = 0x0;
394 		targ->linkrate = 0x0;
395 		targ->devinfo = 0x0;
396 		targ->flags = 0x0;
397 	}
398 
399 	mpssas_free_tm(sc, tm);
400 }
401 
402 /*
403  * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
404  * Otherwise Volume Delete is same as Bare Drive Removal.
405  */
406 void
407 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
408 {
409 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
410 	struct mps_softc *sc;
411 	struct mps_command *cm;
412 	struct mpssas_target *targ = NULL;
413 
414 	mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
415 	sc = sassc->sc;
416 
417 #ifdef WD_SUPPORT
418 	/*
419 	 * If this is a WD controller, determine if the disk should be exposed
420 	 * to the OS or not.  If disk should be exposed, return from this
421 	 * function without doing anything.
422 	 */
423 	if (sc->WD_available && (sc->WD_hide_expose ==
424 	    MPS_WD_EXPOSE_ALWAYS)) {
425 		return;
426 	}
427 #endif
428 
429 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
430 	if (targ == NULL) {
431 		/* FIXME: what is the action? */
432 		/* We don't know about this device? */
433 		kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
434 		return;
435 	}
436 
437 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
438 
439 	cm = mpssas_alloc_tm(sc);
440 	if (cm == NULL) {
441 		mps_printf(sc, "%s: command alloc failure\n", __func__);
442 		return;
443 	}
444 
445 	mpssas_rescan_target(sc, targ);
446 
447 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
448 	req->DevHandle = targ->handle;
449 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
450 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
451 
452 	/* SAS Hard Link Reset / SATA Link Reset */
453 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
454 
455 	cm->cm_targ = targ;
456 	cm->cm_data = NULL;
457 	cm->cm_desc.HighPriority.RequestFlags =
458 		MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
459 	cm->cm_complete = mpssas_remove_volume;
460 	cm->cm_complete_data = (void *)(uintptr_t)handle;
461 	mps_map_command(sc, cm);
462 }
463 
464 /*
465  * The MPT2 firmware performs debounce on the link to avoid transient link
466  * errors and false removals.  When it does decide that link has been lost
467  * and a device need to go away, it expects that the host will perform a
468  * target reset and then an op remove.  The reset has the side-effect of
469  * aborting any outstanding requests for the device, which is required for
470  * the op-remove to succeed.  It's not clear if the host should check for
471  * the device coming back alive after the reset.
472  */
473 void
474 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
475 {
476 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
477 	struct mps_softc *sc;
478 	struct mps_command *cm;
479 	struct mpssas_target *targ = NULL;
480 
481 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
482 
483 	/*
484 	 * If this is a WD controller, determine if the disk should be exposed
485 	 * to the OS or not.  If disk should be exposed, return from this
486 	 * function without doing anything.
487 	 */
488 	sc = sassc->sc;
489 	if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
490 	    MPS_WD_EXPOSE_ALWAYS)) {
491 		return;
492 	}
493 
494 	targ = mpssas_find_target_by_handle(sassc, 0, handle);
495 	if (targ == NULL) {
496 		/* FIXME: what is the action? */
497 		/* We don't know about this device? */
498 		kprintf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
499 		return;
500 	}
501 
502 	targ->flags |= MPSSAS_TARGET_INREMOVAL;
503 
504 	cm = mpssas_alloc_tm(sc);
505 	if (cm == NULL) {
506 		mps_printf(sc, "%s: command alloc failure\n", __func__);
507 		return;
508 	}
509 
510 	mpssas_rescan_target(sc, targ);
511 
512 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
513 	memset(req, 0, sizeof(*req));
514 	req->DevHandle = targ->handle;
515 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
516 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
517 
518 	/* SAS Hard Link Reset / SATA Link Reset */
519 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
520 
521 	cm->cm_targ = targ;
522 	cm->cm_data = NULL;
523 	cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
524 	cm->cm_complete = mpssas_remove_device;
525 	cm->cm_complete_data = (void *)(uintptr_t)handle;
526 	mps_map_command(sc, cm);
527 }
528 
529 static void
530 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
531 {
532 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
533 	MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
534 	struct mpssas_target *targ;
535 	struct mps_command *next_cm;
536 	uint16_t handle;
537 
538 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
539 
540 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
541 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
542 	targ = tm->cm_targ;
543 
544 	/*
545 	 * Currently there should be no way we can hit this case.  It only
546 	 * happens when we have a failure to allocate chain frames, and
547 	 * task management commands don't have S/G lists.
548 	 */
549 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
550 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
551 			   "This should not happen!\n", __func__, tm->cm_flags,
552 			   handle);
553 		mpssas_free_tm(sc, tm);
554 		return;
555 	}
556 
557 	if (reply == NULL) {
558 		/* XXX retry the remove after the diag reset completes? */
559 		mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
560 		    __func__, handle);
561 		mpssas_free_tm(sc, tm);
562 		return;
563 	}
564 
565 	if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
566 		mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
567 		   reply->IOCStatus, handle);
568 		mpssas_free_tm(sc, tm);
569 		return;
570 	}
571 
572 	mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
573 	    reply->TerminationCount);
574 	mps_free_reply(sc, tm->cm_reply_data);
575 	tm->cm_reply = NULL;	/* Ensures the reply won't get re-freed */
576 
577 	/* Reuse the existing command */
578 	req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
579 	memset(req, 0, sizeof(*req));
580 	req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
581 	req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
582 	req->DevHandle = handle;
583 	tm->cm_data = NULL;
584 	tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
585 	tm->cm_complete = mpssas_remove_complete;
586 	tm->cm_complete_data = (void *)(uintptr_t)handle;
587 
588 	mps_map_command(sc, tm);
589 
590 	mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
591 		   targ->tid, handle);
592 	TAILQ_FOREACH_MUTABLE(tm, &targ->commands, cm_link, next_cm) {
593 		union ccb *ccb;
594 
595 		mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
596 		ccb = tm->cm_complete_data;
597 		ccb->ccb_h.status = CAM_DEV_NOT_THERE;
598 		mpssas_scsiio_complete(sc, tm);
599 	}
600 }
601 
602 static void
603 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
604 {
605 	MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
606 	uint16_t handle;
607 	struct mpssas_target *targ;
608 
609 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
610 
611 	reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
612 	handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
613 
614 	/*
615 	 * Currently there should be no way we can hit this case.  It only
616 	 * happens when we have a failure to allocate chain frames, and
617 	 * task management commands don't have S/G lists.
618 	 */
619 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
620 		mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
621 			   "This should not happen!\n", __func__, tm->cm_flags,
622 			   handle);
623 		mpssas_free_tm(sc, tm);
624 		return;
625 	}
626 
627 	if (reply == NULL) {
628 		/* most likely a chip reset */
629 		mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
630 		    __func__, handle);
631 		mpssas_free_tm(sc, tm);
632 		return;
633 	}
634 
635 	mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
636 	    handle, reply->IOCStatus);
637 
638 	/*
639 	 * Don't clear target if remove fails because things will get confusing.
640 	 * Leave the devname and sasaddr intact so that we know to avoid reusing
641 	 * this target id if possible, and so we can assign the same target id
642 	 * to this device if it comes back in the future.
643 	 */
644 	if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
645 		targ = tm->cm_targ;
646 		targ->handle = 0x0;
647 		targ->encl_handle = 0x0;
648 		targ->encl_slot = 0x0;
649 		targ->exp_dev_handle = 0x0;
650 		targ->phy_num = 0x0;
651 		targ->linkrate = 0x0;
652 		targ->devinfo = 0x0;
653 		targ->flags = 0x0;
654 	}
655 
656 	mpssas_free_tm(sc, tm);
657 }
658 
659 static int
660 mpssas_register_events(struct mps_softc *sc)
661 {
662 	uint8_t events[16];
663 
664 	bzero(events, 16);
665 	setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
666 	setbit(events, MPI2_EVENT_SAS_DISCOVERY);
667 	setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
668 	setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
669 	setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
670 	setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
671 	setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
672 	setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
673 	setbit(events, MPI2_EVENT_IR_VOLUME);
674 	setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
675 	setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
676 	setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
677 
678 	mps_register_events(sc, events, mpssas_evt_handler, NULL,
679 	    &sc->sassc->mpssas_eh);
680 
681 	return (0);
682 }
683 
684 int
685 mps_attach_sas(struct mps_softc *sc)
686 {
687 	struct mpssas_softc *sassc;
688 #if __FreeBSD_version >= 1000006
689 	cam_status status;
690 #endif
691 	int unit, error = 0;
692 
693 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
694 
695 	sassc = kmalloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
696 	sassc->targets = kmalloc(sizeof(struct mpssas_target) *
697 	    sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
698 	sc->sassc = sassc;
699 	sassc->sc = sc;
700 
701 	if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
702 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
703 		error = ENOMEM;
704 		goto out;
705 	}
706 
707 	unit = device_get_unit(sc->mps_dev);
708 	sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
709 	    unit, &sc->mps_lock, sc->num_reqs, sc->num_reqs, sassc->devq);
710 	cam_simq_release(sassc->devq);
711 	if (sassc->sim == NULL) {
712 		mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
713 		error = EINVAL;
714 		goto out;
715 	}
716 
717 	TAILQ_INIT(&sassc->ev_queue);
718 
719 	/* Initialize taskqueue for Event Handling */
720 	TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
721 	sassc->ev_tq = taskqueue_create("mps_taskq", M_INTWAIT | M_ZERO,
722 	    taskqueue_thread_enqueue, &sassc->ev_tq);
723 
724 	/* Run the task queue with lowest priority */
725 	taskqueue_start_threads(&sassc->ev_tq, 1, 255, -1, "%s taskq",
726 	    device_get_nameunit(sc->mps_dev));
727 
728 	TAILQ_INIT(&sassc->ccb_scanq);
729 	error = mps_kproc_create(mpssas_scanner_thread, sassc,
730 	    &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
731 	if (error) {
732 		mps_printf(sc, "Error %d starting rescan thread\n", error);
733 		goto out;
734 	}
735 
736 	mps_lock(sc);
737 	sassc->flags |= MPSSAS_SCANTHREAD;
738 
739 	/*
740 	 * XXX There should be a bus for every port on the adapter, but since
741 	 * we're just going to fake the topology for now, we'll pretend that
742 	 * everything is just a target on a single bus.
743 	 */
744 	if ((error = xpt_bus_register(sassc->sim, 0)) != 0) {
745 		mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
746 		    error);
747 		mps_unlock(sc);
748 		goto out;
749 	}
750 
751 	/*
752 	 * Assume that discovery events will start right away.  Freezing
753 	 * the simq will prevent the CAM boottime scanner from running
754 	 * before discovery is complete.
755 	 */
756 	sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
757 	xpt_freeze_simq(sassc->sim, 1);
758 	sc->sassc->startup_refcount = 0;
759 
760 	callout_init_mp(&sassc->discovery_callout);
761 	sassc->discovery_timeouts = 0;
762 
763 	sassc->tm_count = 0;
764 
765 #if __FreeBSD_version >= 1000006
766 	status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
767 	if (status != CAM_REQ_CMP) {
768 		mps_printf(sc, "Error %#x registering async handler for "
769 			   "AC_ADVINFO_CHANGED events\n", status);
770 	}
771 #endif
772 
773 	mps_unlock(sc);
774 
775 	mpssas_register_events(sc);
776 out:
777 	if (error)
778 		mps_detach_sas(sc);
779 	return (error);
780 }
781 
782 int
783 mps_detach_sas(struct mps_softc *sc)
784 {
785 	struct mpssas_softc *sassc;
786 
787 	mps_dprint(sc, MPS_INFO, "%s\n", __func__);
788 
789 	if (sc->sassc == NULL)
790 		return (0);
791 
792 	sassc = sc->sassc;
793 	mps_deregister_events(sc, sassc->mpssas_eh);
794 
795 	/*
796 	 * Drain and free the event handling taskqueue with the lock
797 	 * unheld so that any parallel processing tasks drain properly
798 	 * without deadlocking.
799 	 */
800 	if (sassc->ev_tq != NULL)
801 		taskqueue_free(sassc->ev_tq);
802 
803 	/* Make sure CAM doesn't wedge if we had to bail out early. */
804 	mps_lock(sc);
805 
806 	/* Deregister our async handler */
807 #if __FreeBSD_version >= 1000006
808 	xpt_register_async(0, mpssas_async, sc, NULL);
809 #endif
810 
811 	if (sassc->flags & MPSSAS_IN_STARTUP)
812 		xpt_release_simq(sassc->sim, 1);
813 
814 	if (sassc->sim != NULL) {
815 		xpt_bus_deregister(cam_sim_path(sassc->sim));
816 		cam_sim_free(sassc->sim);
817 	}
818 
819 	if (sassc->flags & MPSSAS_SCANTHREAD) {
820 		sassc->flags |= MPSSAS_SHUTDOWN;
821 		wakeup(&sassc->ccb_scanq);
822 
823 		if (sassc->flags & MPSSAS_SCANTHREAD) {
824 			lksleep(&sassc->flags, &sc->mps_lock, 0,
825 			       "mps_shutdown", 30 * hz);
826 		}
827 	}
828 	mps_unlock(sc);
829 
830 	kfree(sassc->targets, M_MPT2);
831 	kfree(sassc, M_MPT2);
832 	sc->sassc = NULL;
833 
834 	return (0);
835 }
836 
837 void
838 mpssas_discovery_end(struct mpssas_softc *sassc)
839 {
840 	struct mps_softc *sc = sassc->sc;
841 
842 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
843 
844 	if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
845 		callout_stop(&sassc->discovery_callout);
846 
847 }
848 
849 #if 0 /* XXX unused */
850 static void
851 mpssas_discovery_timeout(void *data)
852 {
853 	struct mpssas_softc *sassc = data;
854 	struct mps_softc *sc;
855 
856 	sc = sassc->sc;
857 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
858 
859 	mps_lock(sc);
860 	mps_printf(sc,
861 	    "Timeout waiting for discovery, interrupts may not be working!\n");
862 	sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
863 
864 	/* Poll the hardware for events in case interrupts aren't working */
865 	mps_intr_locked(sc);
866 
867 	mps_printf(sassc->sc,
868 	    "Finished polling after discovery timeout at %d\n", ticks);
869 
870 	if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
871 		mpssas_discovery_end(sassc);
872 	} else {
873 		if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
874 			sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
875 			callout_reset(&sassc->discovery_callout,
876 			    MPSSAS_DISCOVERY_TIMEOUT * hz,
877 			    mpssas_discovery_timeout, sassc);
878 			sassc->discovery_timeouts++;
879 		} else {
880 			mps_dprint(sassc->sc, MPS_FAULT,
881 			    "Discovery timed out, continuing.\n");
882 			sassc->flags &= ~MPSSAS_IN_DISCOVERY;
883 			mpssas_discovery_end(sassc);
884 		}
885 	}
886 
887 	mps_unlock(sc);
888 }
889 #endif
890 
891 static void
892 mpssas_action(struct cam_sim *sim, union ccb *ccb)
893 {
894 	struct mpssas_softc *sassc;
895 
896 	sassc = cam_sim_softc(sim);
897 
898 	mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
899 	    ccb->ccb_h.func_code);
900 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
901 
902 	switch (ccb->ccb_h.func_code) {
903 	case XPT_PATH_INQ:
904 	{
905 		struct ccb_pathinq *cpi = &ccb->cpi;
906 
907 		cpi->version_num = 1;
908 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
909 		cpi->target_sprt = 0;
910 		cpi->hba_misc = PIM_NOBUSRESET;
911 		cpi->hba_eng_cnt = 0;
912 		cpi->max_target = sassc->sc->facts->MaxTargets - 1;
913 		cpi->max_lun = 8;
914 		cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
915 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
916 		strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
917 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
918 		cpi->unit_number = cam_sim_unit(sim);
919 		cpi->bus_id = cam_sim_bus(sim);
920 		cpi->base_transfer_speed = 150000;
921 		cpi->transport = XPORT_SAS;
922 		cpi->transport_version = 0;
923 		cpi->protocol = PROTO_SCSI;
924 		cpi->protocol_version = SCSI_REV_SPC;
925 #if __FreeBSD_version >= 800001
926 		/*
927 		 * XXX KDM where does this number come from?
928 		 */
929 		cpi->maxio = 256 * 1024;
930 #endif
931 		cpi->ccb_h.status = CAM_REQ_CMP;
932 		break;
933 	}
934 	case XPT_GET_TRAN_SETTINGS:
935 	{
936 		struct ccb_trans_settings	*cts;
937 		struct ccb_trans_settings_sas	*sas;
938 		struct ccb_trans_settings_scsi	*scsi;
939 		struct mpssas_target *targ;
940 
941 		cts = &ccb->cts;
942 		sas = &cts->xport_specific.sas;
943 		scsi = &cts->proto_specific.scsi;
944 
945 		targ = &sassc->targets[cts->ccb_h.target_id];
946 		if (targ->handle == 0x0) {
947 			cts->ccb_h.status = CAM_TID_INVALID;
948 			break;
949 		}
950 
951 		cts->protocol_version = SCSI_REV_SPC2;
952 		cts->transport = XPORT_SAS;
953 		cts->transport_version = 0;
954 
955 		sas->valid = CTS_SAS_VALID_SPEED;
956 		switch (targ->linkrate) {
957 		case 0x08:
958 			sas->bitrate = 150000;
959 			break;
960 		case 0x09:
961 			sas->bitrate = 300000;
962 			break;
963 		case 0x0a:
964 			sas->bitrate = 600000;
965 			break;
966 		default:
967 			sas->valid = 0;
968 		}
969 
970 		cts->protocol = PROTO_SCSI;
971 		scsi->valid = CTS_SCSI_VALID_TQ;
972 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
973 
974 		cts->ccb_h.status = CAM_REQ_CMP;
975 		break;
976 	}
977 	case XPT_CALC_GEOMETRY:
978 		cam_calc_geometry(&ccb->ccg, /*extended*/1);
979 		break;
980 	case XPT_RESET_DEV:
981 		mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
982 		mpssas_action_resetdev(sassc, ccb);
983 		return;
984 	case XPT_RESET_BUS:
985 	case XPT_ABORT:
986 	case XPT_TERM_IO:
987 		mps_printf(sassc->sc, "mpssas_action faking success for "
988 			   "abort or reset\n");
989 		ccb->ccb_h.status = CAM_REQ_CMP;
990 		break;
991 	case XPT_SCSI_IO:
992 		mpssas_action_scsiio(sassc, ccb);
993 		return;
994 #if __FreeBSD_version >= 900026
995 	case XPT_SMP_IO:
996 		mpssas_action_smpio(sassc, ccb);
997 		return;
998 #endif
999 	default:
1000 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1001 		break;
1002 	}
1003 	xpt_done(ccb);
1004 
1005 }
1006 
1007 static void
1008 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1009     target_id_t target_id, lun_id_t lun_id)
1010 {
1011 	path_id_t path_id = cam_sim_path(sc->sassc->sim);
1012 	struct cam_path *path;
1013 
1014 	mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
1015 	    ac_code, target_id, lun_id);
1016 
1017 	if (xpt_create_path(&path, NULL,
1018 		path_id, target_id, lun_id) != CAM_REQ_CMP) {
1019 		mps_printf(sc, "unable to create path for reset "
1020 			   "notification\n");
1021 		return;
1022 	}
1023 
1024 	xpt_async(ac_code, path, NULL);
1025 	xpt_free_path(path);
1026 }
1027 
1028 static void
1029 mpssas_complete_all_commands(struct mps_softc *sc)
1030 {
1031 	struct mps_command *cm;
1032 	int i;
1033 	int completed;
1034 
1035 	mps_printf(sc, "%s\n", __func__);
1036 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1037 
1038 	/* complete all commands with a NULL reply */
1039 	for (i = 1; i < sc->num_reqs; i++) {
1040 		cm = &sc->commands[i];
1041 		cm->cm_reply = NULL;
1042 		completed = 0;
1043 
1044 		if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1045 			cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1046 
1047 		if (cm->cm_complete != NULL) {
1048 			mpssas_log_command(cm,
1049 			    "completing cm %p state %x ccb %p for diag reset\n",
1050 			    cm, cm->cm_state, cm->cm_ccb);
1051 
1052 			cm->cm_complete(sc, cm);
1053 			completed = 1;
1054 		}
1055 
1056 		if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1057 			mpssas_log_command(cm,
1058 			    "waking up cm %p state %x ccb %p for diag reset\n",
1059 			    cm, cm->cm_state, cm->cm_ccb);
1060 			wakeup(cm);
1061 			completed = 1;
1062 		}
1063 
1064 		if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1065 			/* this should never happen, but if it does, log */
1066 			mpssas_log_command(cm,
1067 			    "cm %p state %x flags 0x%x ccb %p during diag "
1068 			    "reset\n", cm, cm->cm_state, cm->cm_flags,
1069 			    cm->cm_ccb);
1070 		}
1071 	}
1072 }
1073 
1074 void
1075 mpssas_handle_reinit(struct mps_softc *sc)
1076 {
1077 	int i;
1078 
1079 	/* Go back into startup mode and freeze the simq, so that CAM
1080 	 * doesn't send any commands until after we've rediscovered all
1081 	 * targets and found the proper device handles for them.
1082 	 *
1083 	 * After the reset, portenable will trigger discovery, and after all
1084 	 * discovery-related activities have finished, the simq will be
1085 	 * released.
1086 	 */
1087 	mps_printf(sc, "%s startup\n", __func__);
1088 	sc->sassc->flags |= MPSSAS_IN_STARTUP;
1089 	sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1090 	xpt_freeze_simq(sc->sassc->sim, 1);
1091 
1092 	/* notify CAM of a bus reset */
1093 	mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1094 	    CAM_LUN_WILDCARD);
1095 
1096 	/* complete and cleanup after all outstanding commands */
1097 	mpssas_complete_all_commands(sc);
1098 
1099 	mps_printf(sc, "%s startup %u tm %u after command completion\n",
1100 	    __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1101 
1102 	/*
1103 	 * The simq was explicitly frozen above, so set the refcount to 0.
1104 	 * The simq will be explicitly released after port enable completes.
1105 	 */
1106 	sc->sassc->startup_refcount = 0;
1107 
1108 	/* zero all the target handles, since they may change after the
1109 	 * reset, and we have to rediscover all the targets and use the new
1110 	 * handles.
1111 	 */
1112 	for (i = 0; i < sc->facts->MaxTargets; i++) {
1113 		if (sc->sassc->targets[i].outstanding != 0)
1114 			mps_printf(sc, "target %u outstanding %u\n",
1115 			    i, sc->sassc->targets[i].outstanding);
1116 		sc->sassc->targets[i].handle = 0x0;
1117 		sc->sassc->targets[i].exp_dev_handle = 0x0;
1118 		sc->sassc->targets[i].outstanding = 0;
1119 		sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1120 	}
1121 }
1122 static void
1123 mpssas_tm_timeout(void *data)
1124 {
1125 	struct mps_command *tm = data;
1126 	struct mps_softc *sc = tm->cm_sc;
1127 
1128 	mps_lock(sc);
1129 	mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1130 	mps_reinit(sc);
1131 	mps_unlock(sc);
1132 }
1133 
1134 static void
1135 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1136 {
1137 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1138 	unsigned int cm_count = 0;
1139 	struct mps_command *cm;
1140 	struct mpssas_target *targ;
1141 
1142 	callout_stop(&tm->cm_callout);
1143 
1144 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1145 	targ = tm->cm_targ;
1146 
1147 	/*
1148 	 * Currently there should be no way we can hit this case.  It only
1149 	 * happens when we have a failure to allocate chain frames, and
1150 	 * task management commands don't have S/G lists.
1151 	 */
1152 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1153 		mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1154 			   "This should not happen!\n", __func__, tm->cm_flags);
1155 		mpssas_free_tm(sc, tm);
1156 		return;
1157 	}
1158 
1159 	if (reply == NULL) {
1160 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1161 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1162 			/* this completion was due to a reset, just cleanup */
1163 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1164 			targ->tm = NULL;
1165 			mpssas_free_tm(sc, tm);
1166 		}
1167 		else {
1168 			/* we should have gotten a reply. */
1169 			mps_reinit(sc);
1170 		}
1171 		return;
1172 	}
1173 
1174 	mpssas_log_command(tm,
1175 	    "logical unit reset status 0x%x code 0x%x count %u\n",
1176 	    reply->IOCStatus, reply->ResponseCode,
1177 	    reply->TerminationCount);
1178 
1179 	/* See if there are any outstanding commands for this LUN.
1180 	 * This could be made more efficient by using a per-LU data
1181 	 * structure of some sort.
1182 	 */
1183 	TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1184 		if (cm->cm_lun == tm->cm_lun)
1185 			cm_count++;
1186 	}
1187 
1188 	if (cm_count == 0) {
1189 		mpssas_log_command(tm,
1190 		    "logical unit %u finished recovery after reset\n",
1191 		    tm->cm_lun);
1192 
1193 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1194 		    tm->cm_lun);
1195 
1196 		/* we've finished recovery for this logical unit.  check and
1197 		 * see if some other logical unit has a timedout command
1198 		 * that needs to be processed.
1199 		 */
1200 		cm = TAILQ_FIRST(&targ->timedout_commands);
1201 		if (cm) {
1202 			mpssas_send_abort(sc, tm, cm);
1203 		}
1204 		else {
1205 			targ->tm = NULL;
1206 			mpssas_free_tm(sc, tm);
1207 		}
1208 	}
1209 	else {
1210 		/* if we still have commands for this LUN, the reset
1211 		 * effectively failed, regardless of the status reported.
1212 		 * Escalate to a target reset.
1213 		 */
1214 		mpssas_log_command(tm,
1215 		    "logical unit reset complete for tm %p, but still have %u command(s)\n",
1216 		    tm, cm_count);
1217 		mpssas_send_reset(sc, tm,
1218 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1219 	}
1220 }
1221 
1222 static void
1223 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1224 {
1225 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1226 	struct mpssas_target *targ;
1227 
1228 	callout_stop(&tm->cm_callout);
1229 
1230 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1231 	targ = tm->cm_targ;
1232 
1233 	/*
1234 	 * Currently there should be no way we can hit this case.  It only
1235 	 * happens when we have a failure to allocate chain frames, and
1236 	 * task management commands don't have S/G lists.
1237 	 */
1238 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1239 		mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1240 			   "This should not happen!\n", __func__, tm->cm_flags);
1241 		mpssas_free_tm(sc, tm);
1242 		return;
1243 	}
1244 
1245 	if (reply == NULL) {
1246 		mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1247 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1248 			/* this completion was due to a reset, just cleanup */
1249 			targ->flags &= ~MPSSAS_TARGET_INRESET;
1250 			targ->tm = NULL;
1251 			mpssas_free_tm(sc, tm);
1252 		}
1253 		else {
1254 			/* we should have gotten a reply. */
1255 			mps_reinit(sc);
1256 		}
1257 		return;
1258 	}
1259 
1260 	mpssas_log_command(tm,
1261 	    "target reset status 0x%x code 0x%x count %u\n",
1262 	    reply->IOCStatus, reply->ResponseCode,
1263 	    reply->TerminationCount);
1264 
1265 	targ->flags &= ~MPSSAS_TARGET_INRESET;
1266 
1267 	if (targ->outstanding == 0) {
1268 		/* we've finished recovery for this target and all
1269 		 * of its logical units.
1270 		 */
1271 		mpssas_log_command(tm,
1272 		    "recovery finished after target reset\n");
1273 
1274 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1275 		    CAM_LUN_WILDCARD);
1276 
1277 		targ->tm = NULL;
1278 		mpssas_free_tm(sc, tm);
1279 	}
1280 	else {
1281 		/* after a target reset, if this target still has
1282 		 * outstanding commands, the reset effectively failed,
1283 		 * regardless of the status reported.  escalate.
1284 		 */
1285 		mpssas_log_command(tm,
1286 		    "target reset complete for tm %p, but still have %u command(s)\n",
1287 		    tm, targ->outstanding);
1288 		mps_reinit(sc);
1289 	}
1290 }
1291 
1292 #define MPS_RESET_TIMEOUT 30
1293 
1294 static int
1295 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1296 {
1297 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1298 	struct mpssas_target *target;
1299 	int err;
1300 
1301 	target = tm->cm_targ;
1302 	if (target->handle == 0) {
1303 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1304 		    __func__, target->tid);
1305 		return -1;
1306 	}
1307 
1308 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1309 	req->DevHandle = target->handle;
1310 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1311 	req->TaskType = type;
1312 
1313 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1314 		/* XXX Need to handle invalid LUNs */
1315 		MPS_SET_LUN(req->LUN, tm->cm_lun);
1316 		tm->cm_targ->logical_unit_resets++;
1317 		mpssas_log_command(tm, "sending logical unit reset\n");
1318 		tm->cm_complete = mpssas_logical_unit_reset_complete;
1319 	}
1320 	else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1321 		/* Target reset method =  SAS Hard Link Reset / SATA Link Reset */
1322 		req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1323 		tm->cm_targ->target_resets++;
1324 		tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1325 		mpssas_log_command(tm, "sending target reset\n");
1326 		tm->cm_complete = mpssas_target_reset_complete;
1327 	}
1328 	else {
1329 		mps_printf(sc, "unexpected reset type 0x%x\n", type);
1330 		return -1;
1331 	}
1332 
1333 	tm->cm_data = NULL;
1334 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1335 	tm->cm_complete_data = (void *)tm;
1336 
1337 	callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1338 	    mpssas_tm_timeout, tm);
1339 
1340 	err = mps_map_command(sc, tm);
1341 	if (err)
1342 		mpssas_log_command(tm,
1343 		    "error %d sending reset type %u\n",
1344 		    err, type);
1345 
1346 	return err;
1347 }
1348 
1349 
1350 static void
1351 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1352 {
1353 	struct mps_command *cm;
1354 	MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1355 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1356 	struct mpssas_target *targ;
1357 
1358 	callout_stop(&tm->cm_callout);
1359 
1360 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1361 	reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1362 	targ = tm->cm_targ;
1363 
1364 	/*
1365 	 * Currently there should be no way we can hit this case.  It only
1366 	 * happens when we have a failure to allocate chain frames, and
1367 	 * task management commands don't have S/G lists.
1368 	 */
1369 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1370 		mpssas_log_command(tm,
1371 		    "cm_flags = %#x for abort %p TaskMID %u!\n",
1372 		    tm->cm_flags, tm, req->TaskMID);
1373 		mpssas_free_tm(sc, tm);
1374 		return;
1375 	}
1376 
1377 	if (reply == NULL) {
1378 		mpssas_log_command(tm,
1379 		    "NULL abort reply for tm %p TaskMID %u\n",
1380 		    tm, req->TaskMID);
1381 		if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1382 			/* this completion was due to a reset, just cleanup */
1383 			targ->tm = NULL;
1384 			mpssas_free_tm(sc, tm);
1385 		}
1386 		else {
1387 			/* we should have gotten a reply. */
1388 			mps_reinit(sc);
1389 		}
1390 		return;
1391 	}
1392 
1393 	mpssas_log_command(tm,
1394 	    "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1395 	    req->TaskMID,
1396 	    reply->IOCStatus, reply->ResponseCode,
1397 	    reply->TerminationCount);
1398 
1399 	cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1400 	if (cm == NULL) {
1401 		/* if there are no more timedout commands, we're done with
1402 		 * error recovery for this target.
1403 		 */
1404 		mpssas_log_command(tm,
1405 		    "finished recovery after aborting TaskMID %u\n",
1406 		    req->TaskMID);
1407 
1408 		targ->tm = NULL;
1409 		mpssas_free_tm(sc, tm);
1410 	}
1411 	else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1412 		/* abort success, but we have more timedout commands to abort */
1413 		mpssas_log_command(tm,
1414 		    "continuing recovery after aborting TaskMID %u\n",
1415 		    req->TaskMID);
1416 
1417 		mpssas_send_abort(sc, tm, cm);
1418 	}
1419 	else {
1420 		/* we didn't get a command completion, so the abort
1421 		 * failed as far as we're concerned.  escalate.
1422 		 */
1423 		mpssas_log_command(tm,
1424 		    "abort failed for TaskMID %u tm %p\n",
1425 		    req->TaskMID, tm);
1426 
1427 		mpssas_send_reset(sc, tm,
1428 		    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1429 	}
1430 }
1431 
1432 #define MPS_ABORT_TIMEOUT 5
1433 
1434 static int
1435 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1436 {
1437 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1438 	struct mpssas_target *targ;
1439 	int err;
1440 
1441 	targ = cm->cm_targ;
1442 	if (targ->handle == 0) {
1443 		mps_printf(sc, "%s null devhandle for target_id %d\n",
1444 		    __func__, cm->cm_ccb->ccb_h.target_id);
1445 		return -1;
1446 	}
1447 
1448 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1449 	req->DevHandle = targ->handle;
1450 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1451 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1452 
1453 	/* XXX Need to handle invalid LUNs */
1454 	MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1455 
1456 	req->TaskMID = cm->cm_desc.Default.SMID;
1457 
1458 	tm->cm_data = NULL;
1459 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1460 	tm->cm_complete = mpssas_abort_complete;
1461 	tm->cm_complete_data = (void *)tm;
1462 	tm->cm_targ = cm->cm_targ;
1463 	tm->cm_lun = cm->cm_lun;
1464 
1465 	callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1466 	    mpssas_tm_timeout, tm);
1467 
1468 	targ->aborts++;
1469 
1470 	err = mps_map_command(sc, tm);
1471 	if (err)
1472 		mpssas_log_command(tm,
1473 		    "error %d sending abort for cm %p SMID %u\n",
1474 		    err, cm, req->TaskMID);
1475 	return err;
1476 }
1477 
1478 
1479 static void
1480 mpssas_scsiio_timeout(void *data)
1481 {
1482 	struct mps_softc *sc;
1483 	struct mps_command *cm;
1484 	struct mpssas_target *targ;
1485 
1486 	cm = (struct mps_command *)data;
1487 	sc = cm->cm_sc;
1488 
1489 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1490 
1491 	mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1492 
1493 	/*
1494 	 * Run the interrupt handler to make sure it's not pending.  This
1495 	 * isn't perfect because the command could have already completed
1496 	 * and been re-used, though this is unlikely.
1497 	 */
1498 	mps_intr_locked(sc);
1499 	if (cm->cm_state == MPS_CM_STATE_FREE) {
1500 		mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1501 		return;
1502 	}
1503 
1504 	if (cm->cm_ccb == NULL) {
1505 		mps_printf(sc, "command timeout with NULL ccb\n");
1506 		return;
1507 	}
1508 
1509 	mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1510 	    cm, cm->cm_ccb);
1511 
1512 	targ = cm->cm_targ;
1513 	targ->timeouts++;
1514 
1515 	/* XXX first, check the firmware state, to see if it's still
1516 	 * operational.  if not, do a diag reset.
1517 	 */
1518 
1519 	cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1520 	cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1521 	TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1522 
1523 	if (targ->tm != NULL) {
1524 		/* target already in recovery, just queue up another
1525 		 * timedout command to be processed later.
1526 		 */
1527 		mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1528 		    cm, targ->tm);
1529 	}
1530 	else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1531 		mps_printf(sc, "timedout cm %p allocated tm %p\n",
1532 		    cm, targ->tm);
1533 
1534 		/* start recovery by aborting the first timedout command */
1535 		mpssas_send_abort(sc, targ->tm, cm);
1536 	}
1537 	else {
1538 		/* XXX queue this target up for recovery once a TM becomes
1539 		 * available.  The firmware only has a limited number of
1540 		 * HighPriority credits for the high priority requests used
1541 		 * for task management, and we ran out.
1542 		 *
1543 		 * Isilon: don't worry about this for now, since we have
1544 		 * more credits than disks in an enclosure, and limit
1545 		 * ourselves to one TM per target for recovery.
1546 		 */
1547 		mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1548 		    cm);
1549 	}
1550 
1551 }
1552 
1553 static void
1554 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1555 {
1556 	MPI2_SCSI_IO_REQUEST *req;
1557 	struct ccb_scsiio *csio;
1558 	struct mps_softc *sc;
1559 	struct mpssas_target *targ;
1560 	struct mpssas_lun *lun;
1561 	struct mps_command *cm;
1562 	uint8_t i, lba_byte, *ref_tag_addr;
1563 	uint16_t eedp_flags;
1564 
1565 	sc = sassc->sc;
1566 	mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1567 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1568 
1569 	csio = &ccb->csio;
1570 	targ = &sassc->targets[csio->ccb_h.target_id];
1571 	if (targ->handle == 0x0) {
1572 		mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1573 		    __func__, csio->ccb_h.target_id);
1574 		csio->ccb_h.status = CAM_TID_INVALID;
1575 		xpt_done(ccb);
1576 		return;
1577 	}
1578 	if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1579 		mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n",
1580 			   __func__, csio->ccb_h.target_id);
1581 		csio->ccb_h.status = CAM_TID_INVALID;
1582 		xpt_done(ccb);
1583 		return;
1584 	}
1585 
1586 	/*
1587 	 * If devinfo is 0 this will be a volume.  In that case don't tell CAM
1588 	 * that the volume has timed out.  We want volumes to be enumerated
1589 	 * until they are deleted/removed, not just failed.
1590 	 */
1591 	if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1592 		if (targ->devinfo == 0)
1593 			csio->ccb_h.status = CAM_REQ_CMP;
1594 		else
1595 			csio->ccb_h.status = CAM_SEL_TIMEOUT;
1596 		xpt_done(ccb);
1597 		return;
1598 	}
1599 
1600 	if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1601 		mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1602 		csio->ccb_h.status = CAM_TID_INVALID;
1603 		xpt_done(ccb);
1604 		return;
1605 	}
1606 
1607 	cm = mps_alloc_command(sc);
1608 	if (cm == NULL) {
1609 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1610 			xpt_freeze_simq(sassc->sim, 1);
1611 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1612 		}
1613 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1614 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1615 		xpt_done(ccb);
1616 		return;
1617 	}
1618 
1619 	req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1620 	bzero(req, sizeof(*req));
1621 	req->DevHandle = targ->handle;
1622 	req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1623 	req->MsgFlags = 0;
1624 	req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1625 	req->SenseBufferLength = MPS_SENSE_LEN;
1626 	req->SGLFlags = 0;
1627 	req->ChainOffset = 0;
1628 	req->SGLOffset0 = 24;	/* 32bit word offset to the SGL */
1629 	req->SGLOffset1= 0;
1630 	req->SGLOffset2= 0;
1631 	req->SGLOffset3= 0;
1632 	req->SkipCount = 0;
1633 	req->DataLength = csio->dxfer_len;
1634 	req->BidirectionalDataLength = 0;
1635 	req->IoFlags = csio->cdb_len;
1636 	req->EEDPFlags = 0;
1637 
1638 	/* Note: BiDirectional transfers are not supported */
1639 	switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1640 	case CAM_DIR_IN:
1641 		req->Control = MPI2_SCSIIO_CONTROL_READ;
1642 		cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1643 		break;
1644 	case CAM_DIR_OUT:
1645 		req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1646 		cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1647 		break;
1648 	case CAM_DIR_NONE:
1649 	default:
1650 		req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1651 		break;
1652 	}
1653 
1654 	/*
1655 	 * It looks like the hardware doesn't require an explicit tag
1656 	 * number for each transaction.  SAM Task Management not supported
1657 	 * at the moment.
1658 	 */
1659 	switch (csio->tag_action) {
1660 	case MSG_HEAD_OF_Q_TAG:
1661 		req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1662 		break;
1663 	case MSG_ORDERED_Q_TAG:
1664 		req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1665 		break;
1666 	case MSG_ACA_TASK:
1667 		req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1668 		break;
1669 	case CAM_TAG_ACTION_NONE:
1670 	case MSG_SIMPLE_Q_TAG:
1671 	default:
1672 		req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1673 		break;
1674 	}
1675 	req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1676 
1677 	if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1678 		mps_free_command(sc, cm);
1679 		ccb->ccb_h.status = CAM_LUN_INVALID;
1680 		xpt_done(ccb);
1681 		return;
1682 	}
1683 
1684 	if (csio->ccb_h.flags & CAM_CDB_POINTER)
1685 		bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1686 	else
1687 		bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1688 	req->IoFlags = csio->cdb_len;
1689 
1690 	/*
1691 	 * Check if EEDP is supported and enabled.  If it is then check if the
1692 	 * SCSI opcode could be using EEDP.  If so, make sure the LUN exists and
1693 	 * is formatted for EEDP support.  If all of this is true, set CDB up
1694 	 * for EEDP transfer.
1695 	 */
1696 	eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1697 	if (sc->eedp_enabled && eedp_flags) {
1698 		SLIST_FOREACH(lun, &targ->luns, lun_link) {
1699 			if (lun->lun_id == csio->ccb_h.target_lun) {
1700 				break;
1701 			}
1702 		}
1703 
1704 		if ((lun != NULL) && (lun->eedp_formatted)) {
1705 			req->EEDPBlockSize = lun->eedp_block_size;
1706 			eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1707 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1708 			    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1709 			req->EEDPFlags = eedp_flags;
1710 
1711 			/*
1712 			 * If CDB less than 32, fill in Primary Ref Tag with
1713 			 * low 4 bytes of LBA.  If CDB is 32, tag stuff is
1714 			 * already there.  Also, set protection bit.  FreeBSD
1715 			 * currently does not support CDBs bigger than 16, but
1716 			 * the code doesn't hurt, and will be here for the
1717 			 * future.
1718 			 */
1719 			if (csio->cdb_len != 32) {
1720 				lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1721 				ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1722 				    PrimaryReferenceTag;
1723 				for (i = 0; i < 4; i++) {
1724 					*ref_tag_addr =
1725 					    req->CDB.CDB32[lba_byte + i];
1726 					ref_tag_addr++;
1727 				}
1728 				req->CDB.EEDP32.PrimaryApplicationTagMask =
1729 				    0xFFFF;
1730 				req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1731 				    0x20;
1732 			} else {
1733 				eedp_flags |=
1734 				    MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1735 				req->EEDPFlags = eedp_flags;
1736 				req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1737 				    0x1F) | 0x20;
1738 			}
1739 		}
1740 	}
1741 
1742 	cm->cm_data = csio->data_ptr;
1743 	cm->cm_length = csio->dxfer_len;
1744 	cm->cm_sge = &req->SGL;
1745 	cm->cm_sglsize = (32 - 24) * 4;
1746 	cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1747 	cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1748 	cm->cm_complete = mpssas_scsiio_complete;
1749 	cm->cm_complete_data = ccb;
1750 	cm->cm_targ = targ;
1751 	cm->cm_lun = csio->ccb_h.target_lun;
1752 	cm->cm_ccb = ccb;
1753 
1754 	/*
1755 	 * If HBA is a WD and the command is not for a retry, try to build a
1756 	 * direct I/O message. If failed, or the command is for a retry, send
1757 	 * the I/O to the IR volume itself.
1758 	 */
1759 	if (sc->WD_valid_config) {
1760 		if (ccb->ccb_h.status != MPS_WD_RETRY) {
1761 			mpssas_direct_drive_io(sassc, cm, ccb);
1762 		} else {
1763 			ccb->ccb_h.status = CAM_REQ_INPROG;
1764 		}
1765 	}
1766 
1767 	callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1768 	   mpssas_scsiio_timeout, cm);
1769 
1770 	targ->issued++;
1771 	targ->outstanding++;
1772 	TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1773 
1774 	if ((sc->mps_debug & MPS_TRACE) != 0)
1775 		mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1776 		    __func__, cm, ccb, targ->outstanding);
1777 
1778 	mps_map_command(sc, cm);
1779 	return;
1780 }
1781 
1782 static void
1783 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1784 {
1785 	MPI2_SCSI_IO_REPLY *rep;
1786 	union ccb *ccb;
1787 	struct ccb_scsiio *csio;
1788 	struct mpssas_softc *sassc;
1789 	struct scsi_vpd_supported_page_list *vpd_list = NULL;
1790 	u8 *TLR_bits, TLR_on;
1791 	int dir = 0, i;
1792 	u16 alloc_len;
1793 
1794 	mps_dprint(sc, MPS_TRACE,
1795 	    "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1796 	    __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1797 	    cm->cm_targ->outstanding);
1798 
1799 	callout_stop(&cm->cm_callout);
1800 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
1801 
1802 	sassc = sc->sassc;
1803 	ccb = cm->cm_complete_data;
1804 	csio = &ccb->csio;
1805 	rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1806 	/*
1807 	 * XXX KDM if the chain allocation fails, does it matter if we do
1808 	 * the sync and unload here?  It is simpler to do it in every case,
1809 	 * assuming it doesn't cause problems.
1810 	 */
1811 	if (cm->cm_data != NULL) {
1812 		if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1813 			dir = BUS_DMASYNC_POSTREAD;
1814 		else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1815 			dir = BUS_DMASYNC_POSTWRITE;
1816 		bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1817 		bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1818 	}
1819 
1820 	cm->cm_targ->completed++;
1821 	cm->cm_targ->outstanding--;
1822 	TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1823 
1824 	if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1825 		TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1826 		if (cm->cm_reply != NULL)
1827 			mpssas_log_command(cm,
1828 			    "completed timedout cm %p ccb %p during recovery "
1829 			    "ioc %x scsi %x state %x xfer %u\n",
1830 			    cm, cm->cm_ccb,
1831 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1832 			    rep->TransferCount);
1833 		else
1834 			mpssas_log_command(cm,
1835 			    "completed timedout cm %p ccb %p during recovery\n",
1836 			    cm, cm->cm_ccb);
1837 	} else if (cm->cm_targ->tm != NULL) {
1838 		if (cm->cm_reply != NULL)
1839 			mpssas_log_command(cm,
1840 			    "completed cm %p ccb %p during recovery "
1841 			    "ioc %x scsi %x state %x xfer %u\n",
1842 			    cm, cm->cm_ccb,
1843 			    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1844 			    rep->TransferCount);
1845 		else
1846 			mpssas_log_command(cm,
1847 			    "completed cm %p ccb %p during recovery\n",
1848 			    cm, cm->cm_ccb);
1849 	} else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1850 		mpssas_log_command(cm,
1851 		    "reset completed cm %p ccb %p\n",
1852 		    cm, cm->cm_ccb);
1853 	}
1854 
1855 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1856 		/*
1857 		 * We ran into an error after we tried to map the command,
1858 		 * so we're getting a callback without queueing the command
1859 		 * to the hardware.  So we set the status here, and it will
1860 		 * be retained below.  We'll go through the "fast path",
1861 		 * because there can be no reply when we haven't actually
1862 		 * gone out to the hardware.
1863 		 */
1864 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1865 
1866 		/*
1867 		 * Currently the only error included in the mask is
1868 		 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1869 		 * chain frames.  We need to freeze the queue until we get
1870 		 * a command that completed without this error, which will
1871 		 * hopefully have some chain frames attached that we can
1872 		 * use.  If we wanted to get smarter about it, we would
1873 		 * only unfreeze the queue in this condition when we're
1874 		 * sure that we're getting some chain frames back.  That's
1875 		 * probably unnecessary.
1876 		 */
1877 		if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1878 			xpt_freeze_simq(sassc->sim, 1);
1879 			sassc->flags |= MPSSAS_QUEUE_FROZEN;
1880 			mps_dprint(sc, MPS_INFO, "Error sending command, "
1881 				   "freezing SIM queue\n");
1882 		}
1883 	}
1884 
1885 	/* Take the fast path to completion */
1886 	if (cm->cm_reply == NULL) {
1887 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1888 			if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1889 				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1890 			else {
1891 				ccb->ccb_h.status = CAM_REQ_CMP;
1892 				ccb->csio.scsi_status = SCSI_STATUS_OK;
1893 			}
1894 			if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1895 				ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1896 				sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1897 				mps_dprint(sc, MPS_INFO,
1898 					   "Unfreezing SIM queue\n");
1899 			}
1900 		}
1901 
1902 		/*
1903 		 * There are two scenarios where the status won't be
1904 		 * CAM_REQ_CMP.  The first is if MPS_CM_FLAGS_ERROR_MASK is
1905 		 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1906 		 */
1907 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1908 			/*
1909 			 * Freeze the dev queue so that commands are
1910 			 * executed in the correct order with after error
1911 			 * recovery.
1912 			 */
1913 			ccb->ccb_h.status |= CAM_DEV_QFRZN;
1914 			xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1915 		}
1916 		mps_free_command(sc, cm);
1917 		xpt_done(ccb);
1918 		return;
1919 	}
1920 
1921 	if (sc->mps_debug & MPS_TRACE)
1922 		mpssas_log_command(cm,
1923 		    "ioc %x scsi %x state %x xfer %u\n",
1924 		    rep->IOCStatus, rep->SCSIStatus,
1925 		    rep->SCSIState, rep->TransferCount);
1926 
1927 	/*
1928 	 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1929 	 * Volume if an error occurred (normal I/O retry).  Use the original
1930 	 * CCB, but set a flag that this will be a retry so that it's sent to
1931 	 * the original volume.  Free the command but reuse the CCB.
1932 	 */
1933 	if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1934 		mps_free_command(sc, cm);
1935 		ccb->ccb_h.status = MPS_WD_RETRY;
1936 		mpssas_action_scsiio(sassc, ccb);
1937 		return;
1938 	}
1939 
1940 	switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1941 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1942 		csio->resid = cm->cm_length - rep->TransferCount;
1943 		/* FALLTHROUGH */
1944 	case MPI2_IOCSTATUS_SUCCESS:
1945 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1946 
1947 		if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1948 		    MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1949 			mpssas_log_command(cm, "recovered error\n");
1950 
1951 		/* Completion failed at the transport level. */
1952 		if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1953 		    MPI2_SCSI_STATE_TERMINATED)) {
1954 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1955 			break;
1956 		}
1957 
1958 		/* In a modern packetized environment, an autosense failure
1959 		 * implies that there's not much else that can be done to
1960 		 * recover the command.
1961 		 */
1962 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1963 			ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1964 			break;
1965 		}
1966 
1967 		/*
1968 		 * CAM doesn't care about SAS Response Info data, but if this is
1969 		 * the state check if TLR should be done.  If not, clear the
1970 		 * TLR_bits for the target.
1971 		 */
1972 		if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1973 		    ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1974 		    MPS_SCSI_RI_INVALID_FRAME)) {
1975 			sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1976 			    (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1977 		}
1978 
1979 		/*
1980 		 * Intentionally override the normal SCSI status reporting
1981 		 * for these two cases.  These are likely to happen in a
1982 		 * multi-initiator environment, and we want to make sure that
1983 		 * CAM retries these commands rather than fail them.
1984 		 */
1985 		if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1986 		    (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1987 			ccb->ccb_h.status = CAM_REQ_ABORTED;
1988 			break;
1989 		}
1990 
1991 		/* Handle normal status and sense */
1992 		csio->scsi_status = rep->SCSIStatus;
1993 		if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1994 			ccb->ccb_h.status = CAM_REQ_CMP;
1995 		else
1996 			ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1997 
1998 		if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1999 			int sense_len, returned_sense_len;
2000 
2001 			returned_sense_len = min(rep->SenseCount,
2002 			    sizeof(struct scsi_sense_data));
2003 			if (returned_sense_len < ccb->csio.sense_len)
2004 				ccb->csio.sense_resid = ccb->csio.sense_len -
2005 					returned_sense_len;
2006 			else
2007 				ccb->csio.sense_resid = 0;
2008 
2009 			sense_len = min(returned_sense_len,
2010 			    ccb->csio.sense_len - ccb->csio.sense_resid);
2011 			bzero(&ccb->csio.sense_data,
2012 			      sizeof(ccb->csio.sense_data));
2013 			bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2014 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2015 		}
2016 
2017 		/*
2018 		 * Check if this is an INQUIRY command.  If it's a VPD inquiry,
2019 		 * and it's page code 0 (Supported Page List), and there is
2020 		 * inquiry data, and this is for a sequential access device, and
2021 		 * the device is an SSP target, and TLR is supported by the
2022 		 * controller, turn the TLR_bits value ON if page 0x90 is
2023 		 * supported.
2024 		 */
2025 		if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2026 		    (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2027 		    (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2028 		    (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2029 		    T_SEQUENTIAL) && (sc->control_TLR) &&
2030 		    (sc->mapping_table[csio->ccb_h.target_id].device_info &
2031 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2032 			vpd_list = (struct scsi_vpd_supported_page_list *)
2033 			    csio->data_ptr;
2034 			TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2035 			    TLR_bits;
2036 			*TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2037 			TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2038 			alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2039 			    csio->cdb_io.cdb_bytes[4];
2040 			for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2041 				if (vpd_list->list[i] == 0x90) {
2042 					*TLR_bits = TLR_on;
2043 					break;
2044 				}
2045 			}
2046 		}
2047 		break;
2048 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2049 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2050 		/*
2051 		 * If devinfo is 0 this will be a volume.  In that case don't
2052 		 * tell CAM that the volume is not there.  We want volumes to
2053 		 * be enumerated until they are deleted/removed, not just
2054 		 * failed.
2055 		 */
2056 		if (cm->cm_targ->devinfo == 0)
2057 			ccb->ccb_h.status = CAM_REQ_CMP;
2058 		else
2059 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2060 		break;
2061 	case MPI2_IOCSTATUS_INVALID_SGL:
2062 		mps_print_scsiio_cmd(sc, cm);
2063 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2064 		break;
2065 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2066 		/*
2067 		 * This is one of the responses that comes back when an I/O
2068 		 * has been aborted.  If it is because of a timeout that we
2069 		 * initiated, just set the status to CAM_CMD_TIMEOUT.
2070 		 * Otherwise set it to CAM_REQ_ABORTED.  The effect on the
2071 		 * command is the same (it gets retried, subject to the
2072 		 * retry counter), the only difference is what gets printed
2073 		 * on the console.
2074 		 */
2075 		if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2076 			ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2077 		else
2078 			ccb->ccb_h.status = CAM_REQ_ABORTED;
2079 		break;
2080 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2081 		/* resid is ignored for this condition */
2082 		csio->resid = 0;
2083 		ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2084 		break;
2085 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2086 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2087 		/*
2088 		 * Since these are generally external (i.e. hopefully
2089 		 * transient transport-related) errors, retry these without
2090 		 * decrementing the retry count.
2091 		 */
2092 		ccb->ccb_h.status = CAM_REQUEUE_REQ;
2093 		mpssas_log_command(cm,
2094 		    "terminated ioc %x scsi %x state %x xfer %u\n",
2095 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2096 		    rep->TransferCount);
2097 		break;
2098 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
2099 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
2100 	case MPI2_IOCSTATUS_INVALID_VPID:
2101 	case MPI2_IOCSTATUS_INVALID_FIELD:
2102 	case MPI2_IOCSTATUS_INVALID_STATE:
2103 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2104 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2105 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2106 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2107 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2108 	default:
2109 		mpssas_log_command(cm,
2110 		    "completed ioc %x scsi %x state %x xfer %u\n",
2111 		    rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2112 		    rep->TransferCount);
2113 		csio->resid = cm->cm_length;
2114 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2115 		break;
2116 	}
2117 
2118 	if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2119 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2120 		sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2121 		mps_dprint(sc, MPS_INFO, "Command completed, "
2122 			   "unfreezing SIM queue\n");
2123 	}
2124 
2125 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2126 		ccb->ccb_h.status |= CAM_DEV_QFRZN;
2127 		xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2128 	}
2129 
2130 	mps_free_command(sc, cm);
2131 	xpt_done(ccb);
2132 }
2133 
2134 static void
2135 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2136     union ccb *ccb) {
2137 	pMpi2SCSIIORequest_t	pIO_req;
2138 	struct mps_softc	*sc = sassc->sc;
2139 	uint64_t		virtLBA;
2140 	uint32_t		physLBA, stripe_offset, stripe_unit;
2141 	uint32_t		io_size, column;
2142 	uint8_t			*ptrLBA, lba_idx, physLBA_byte, *CDB;
2143 
2144 	/*
2145 	 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2146 	 * Write10, or Write16), build a direct I/O message.  Otherwise, the I/O
2147 	 * will be sent to the IR volume itself.  Since Read6 and Write6 are a
2148 	 * bit different than the 10/16 CDBs, handle them separately.
2149 	 */
2150 	pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2151 	CDB = pIO_req->CDB.CDB32;
2152 
2153 	/*
2154 	 * Handle 6 byte CDBs.
2155 	 */
2156 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2157 	    (CDB[0] == WRITE_6))) {
2158 		/*
2159 		 * Get the transfer size in blocks.
2160 		 */
2161 		io_size = (cm->cm_length >> sc->DD_block_exponent);
2162 
2163 		/*
2164 		 * Get virtual LBA given in the CDB.
2165 		 */
2166 		virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2167 		    ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2168 
2169 		/*
2170 		 * Check that LBA range for I/O does not exceed volume's
2171 		 * MaxLBA.
2172 		 */
2173 		if ((virtLBA + (uint64_t)io_size - 1) <=
2174 		    sc->DD_max_lba) {
2175 			/*
2176 			 * Check if the I/O crosses a stripe boundary.  If not,
2177 			 * translate the virtual LBA to a physical LBA and set
2178 			 * the DevHandle for the PhysDisk to be used.  If it
2179 			 * does cross a boundry, do normal I/O.  To get the
2180 			 * right DevHandle to use, get the map number for the
2181 			 * column, then use that map number to look up the
2182 			 * DevHandle of the PhysDisk.
2183 			 */
2184 			stripe_offset = (uint32_t)virtLBA &
2185 			    (sc->DD_stripe_size - 1);
2186 			if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2187 				physLBA = (uint32_t)virtLBA >>
2188 				    sc->DD_stripe_exponent;
2189 				stripe_unit = physLBA / sc->DD_num_phys_disks;
2190 				column = physLBA % sc->DD_num_phys_disks;
2191 				pIO_req->DevHandle =
2192 				    sc->DD_column_map[column].dev_handle;
2193 				cm->cm_desc.SCSIIO.DevHandle =
2194 				    pIO_req->DevHandle;
2195 
2196 				physLBA = (stripe_unit <<
2197 				    sc->DD_stripe_exponent) + stripe_offset;
2198 				ptrLBA = &pIO_req->CDB.CDB32[1];
2199 				physLBA_byte = (uint8_t)(physLBA >> 16);
2200 				*ptrLBA = physLBA_byte;
2201 				ptrLBA = &pIO_req->CDB.CDB32[2];
2202 				physLBA_byte = (uint8_t)(physLBA >> 8);
2203 				*ptrLBA = physLBA_byte;
2204 				ptrLBA = &pIO_req->CDB.CDB32[3];
2205 				physLBA_byte = (uint8_t)physLBA;
2206 				*ptrLBA = physLBA_byte;
2207 
2208 				/*
2209 				 * Set flag that Direct Drive I/O is
2210 				 * being done.
2211 				 */
2212 				cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2213 			}
2214 		}
2215 		return;
2216 	}
2217 
2218 	/*
2219 	 * Handle 10 or 16 byte CDBs.
2220 	 */
2221 	if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2222 	    (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2223 	    (CDB[0] == WRITE_16))) {
2224 		/*
2225 		 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2226 		 * are 0.  If not, this is accessing beyond 2TB so handle it in
2227 		 * the else section.  10-byte CDB's are OK.
2228 		 */
2229 		if ((CDB[0] < READ_16) ||
2230 		    !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2231 			/*
2232 			 * Get the transfer size in blocks.
2233 			 */
2234 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2235 
2236 			/*
2237 			 * Get virtual LBA.  Point to correct lower 4 bytes of
2238 			 * LBA in the CDB depending on command.
2239 			 */
2240 			lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2241 			virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2242 			    ((uint64_t)CDB[lba_idx + 1] << 16) |
2243 			    ((uint64_t)CDB[lba_idx + 2] << 8) |
2244 			    (uint64_t)CDB[lba_idx + 3];
2245 
2246 			/*
2247 			 * Check that LBA range for I/O does not exceed volume's
2248 			 * MaxLBA.
2249 			 */
2250 			if ((virtLBA + (uint64_t)io_size - 1) <=
2251 			    sc->DD_max_lba) {
2252 				/*
2253 				 * Check if the I/O crosses a stripe boundary.
2254 				 * If not, translate the virtual LBA to a
2255 				 * physical LBA and set the DevHandle for the
2256 				 * PhysDisk to be used.  If it does cross a
2257 				 * boundry, do normal I/O.  To get the right
2258 				 * DevHandle to use, get the map number for the
2259 				 * column, then use that map number to look up
2260 				 * the DevHandle of the PhysDisk.
2261 				 */
2262 				stripe_offset = (uint32_t)virtLBA &
2263 				    (sc->DD_stripe_size - 1);
2264 				if ((stripe_offset + io_size) <=
2265 				    sc->DD_stripe_size) {
2266 					physLBA = (uint32_t)virtLBA >>
2267 					    sc->DD_stripe_exponent;
2268 					stripe_unit = physLBA /
2269 					    sc->DD_num_phys_disks;
2270 					column = physLBA %
2271 					    sc->DD_num_phys_disks;
2272 					pIO_req->DevHandle =
2273 					    sc->DD_column_map[column].
2274 					    dev_handle;
2275 					cm->cm_desc.SCSIIO.DevHandle =
2276 					    pIO_req->DevHandle;
2277 
2278 					physLBA = (stripe_unit <<
2279 					    sc->DD_stripe_exponent) +
2280 					    stripe_offset;
2281 					ptrLBA =
2282 					    &pIO_req->CDB.CDB32[lba_idx];
2283 					physLBA_byte = (uint8_t)(physLBA >> 24);
2284 					*ptrLBA = physLBA_byte;
2285 					ptrLBA =
2286 					    &pIO_req->CDB.CDB32[lba_idx + 1];
2287 					physLBA_byte = (uint8_t)(physLBA >> 16);
2288 					*ptrLBA = physLBA_byte;
2289 					ptrLBA =
2290 					    &pIO_req->CDB.CDB32[lba_idx + 2];
2291 					physLBA_byte = (uint8_t)(physLBA >> 8);
2292 					*ptrLBA = physLBA_byte;
2293 					ptrLBA =
2294 					    &pIO_req->CDB.CDB32[lba_idx + 3];
2295 					physLBA_byte = (uint8_t)physLBA;
2296 					*ptrLBA = physLBA_byte;
2297 
2298 					/*
2299 					 * Set flag that Direct Drive I/O is
2300 					 * being done.
2301 					 */
2302 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2303 				}
2304 			}
2305 		} else {
2306 			/*
2307 			 * 16-byte CDB and the upper 4 bytes of the CDB are not
2308 			 * 0.  Get the transfer size in blocks.
2309 			 */
2310 			io_size = (cm->cm_length >> sc->DD_block_exponent);
2311 
2312 			/*
2313 			 * Get virtual LBA.
2314 			 */
2315 			virtLBA = ((uint64_t)CDB[2] << 54) |
2316 			    ((uint64_t)CDB[3] << 48) |
2317 			    ((uint64_t)CDB[4] << 40) |
2318 			    ((uint64_t)CDB[5] << 32) |
2319 			    ((uint64_t)CDB[6] << 24) |
2320 			    ((uint64_t)CDB[7] << 16) |
2321 			    ((uint64_t)CDB[8] << 8) |
2322 			    (uint64_t)CDB[9];
2323 
2324 			/*
2325 			 * Check that LBA range for I/O does not exceed volume's
2326 			 * MaxLBA.
2327 			 */
2328 			if ((virtLBA + (uint64_t)io_size - 1) <=
2329 			    sc->DD_max_lba) {
2330 				/*
2331 				 * Check if the I/O crosses a stripe boundary.
2332 				 * If not, translate the virtual LBA to a
2333 				 * physical LBA and set the DevHandle for the
2334 				 * PhysDisk to be used.  If it does cross a
2335 				 * boundry, do normal I/O.  To get the right
2336 				 * DevHandle to use, get the map number for the
2337 				 * column, then use that map number to look up
2338 				 * the DevHandle of the PhysDisk.
2339 				 */
2340 				stripe_offset = (uint32_t)virtLBA &
2341 				    (sc->DD_stripe_size - 1);
2342 				if ((stripe_offset + io_size) <=
2343 				    sc->DD_stripe_size) {
2344 					physLBA = (uint32_t)(virtLBA >>
2345 					    sc->DD_stripe_exponent);
2346 					stripe_unit = physLBA /
2347 					    sc->DD_num_phys_disks;
2348 					column = physLBA %
2349 					    sc->DD_num_phys_disks;
2350 					pIO_req->DevHandle =
2351 					    sc->DD_column_map[column].
2352 					    dev_handle;
2353 					cm->cm_desc.SCSIIO.DevHandle =
2354 					    pIO_req->DevHandle;
2355 
2356 					physLBA = (stripe_unit <<
2357 					    sc->DD_stripe_exponent) +
2358 					    stripe_offset;
2359 
2360 					/*
2361 					 * Set upper 4 bytes of LBA to 0.  We
2362 					 * assume that the phys disks are less
2363 					 * than 2 TB's in size.  Then, set the
2364 					 * lower 4 bytes.
2365 					 */
2366 					pIO_req->CDB.CDB32[2] = 0;
2367 					pIO_req->CDB.CDB32[3] = 0;
2368 					pIO_req->CDB.CDB32[4] = 0;
2369 					pIO_req->CDB.CDB32[5] = 0;
2370 					ptrLBA = &pIO_req->CDB.CDB32[6];
2371 					physLBA_byte = (uint8_t)(physLBA >> 24);
2372 					*ptrLBA = physLBA_byte;
2373 					ptrLBA = &pIO_req->CDB.CDB32[7];
2374 					physLBA_byte = (uint8_t)(physLBA >> 16);
2375 					*ptrLBA = physLBA_byte;
2376 					ptrLBA = &pIO_req->CDB.CDB32[8];
2377 					physLBA_byte = (uint8_t)(physLBA >> 8);
2378 					*ptrLBA = physLBA_byte;
2379 					ptrLBA = &pIO_req->CDB.CDB32[9];
2380 					physLBA_byte = (uint8_t)physLBA;
2381 					*ptrLBA = physLBA_byte;
2382 
2383 					/*
2384 					 * Set flag that Direct Drive I/O is
2385 					 * being done.
2386 					 */
2387 					cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2388 				}
2389 			}
2390 		}
2391 	}
2392 }
2393 
2394 #if __FreeBSD_version >= 900026
2395 static void
2396 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2397 {
2398 	MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2399 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2400 	uint64_t sasaddr;
2401 	union ccb *ccb;
2402 
2403 	ccb = cm->cm_complete_data;
2404 
2405 	/*
2406 	 * Currently there should be no way we can hit this case.  It only
2407 	 * happens when we have a failure to allocate chain frames, and SMP
2408 	 * commands require two S/G elements only.  That should be handled
2409 	 * in the standard request size.
2410 	 */
2411 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2412 		mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2413 			   __func__, cm->cm_flags);
2414 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2415 		goto bailout;
2416         }
2417 
2418 	rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2419 	if (rpl == NULL) {
2420 		mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2421 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2422 		goto bailout;
2423 	}
2424 
2425 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2426 	sasaddr = le32toh(req->SASAddress.Low);
2427 	sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2428 
2429 	if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2430 	    rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2431 		mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2432 		    __func__, rpl->IOCStatus, rpl->SASStatus);
2433 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2434 		goto bailout;
2435 	}
2436 
2437 	mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2438 		   "%#jx completed successfully\n", __func__,
2439 		   (uintmax_t)sasaddr);
2440 
2441 	if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2442 		ccb->ccb_h.status = CAM_REQ_CMP;
2443 	else
2444 		ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2445 
2446 bailout:
2447 	/*
2448 	 * We sync in both directions because we had DMAs in the S/G list
2449 	 * in both directions.
2450 	 */
2451 	bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2452 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2453 	bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2454 	mps_free_command(sc, cm);
2455 	xpt_done(ccb);
2456 }
2457 
2458 static void
2459 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2460 {
2461 	struct mps_command *cm;
2462 	uint8_t *request, *response;
2463 	MPI2_SMP_PASSTHROUGH_REQUEST *req;
2464 	struct mps_softc *sc;
2465 	int error;
2466 
2467 	sc = sassc->sc;
2468 	error = 0;
2469 
2470 	/*
2471 	 * XXX We don't yet support physical addresses here.
2472 	 */
2473 	if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2474 		mps_printf(sc, "%s: physical addresses not supported\n",
2475 			   __func__);
2476 		ccb->ccb_h.status = CAM_REQ_INVALID;
2477 		xpt_done(ccb);
2478 		return;
2479 	}
2480 
2481 	/*
2482 	 * If the user wants to send an S/G list, check to make sure they
2483 	 * have single buffers.
2484 	 */
2485 	if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2486 		/*
2487 		 * The chip does not support more than one buffer for the
2488 		 * request or response.
2489 		 */
2490 		if ((ccb->smpio.smp_request_sglist_cnt > 1)
2491 		  || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2492 			mps_printf(sc, "%s: multiple request or response "
2493 				   "buffer segments not supported for SMP\n",
2494 				   __func__);
2495 			ccb->ccb_h.status = CAM_REQ_INVALID;
2496 			xpt_done(ccb);
2497 			return;
2498 		}
2499 
2500 		/*
2501 		 * The CAM_SCATTER_VALID flag was originally implemented
2502 		 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2503 		 * We have two.  So, just take that flag to mean that we
2504 		 * might have S/G lists, and look at the S/G segment count
2505 		 * to figure out whether that is the case for each individual
2506 		 * buffer.
2507 		 */
2508 		if (ccb->smpio.smp_request_sglist_cnt != 0) {
2509 			bus_dma_segment_t *req_sg;
2510 
2511 			req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2512 			request = (uint8_t *)req_sg[0].ds_addr;
2513 		} else
2514 			request = ccb->smpio.smp_request;
2515 
2516 		if (ccb->smpio.smp_response_sglist_cnt != 0) {
2517 			bus_dma_segment_t *rsp_sg;
2518 
2519 			rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2520 			response = (uint8_t *)rsp_sg[0].ds_addr;
2521 		} else
2522 			response = ccb->smpio.smp_response;
2523 	} else {
2524 		request = ccb->smpio.smp_request;
2525 		response = ccb->smpio.smp_response;
2526 	}
2527 
2528 	cm = mps_alloc_command(sc);
2529 	if (cm == NULL) {
2530 		mps_printf(sc, "%s: cannot allocate command\n", __func__);
2531 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2532 		xpt_done(ccb);
2533 		return;
2534 	}
2535 
2536 	req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2537 	bzero(req, sizeof(*req));
2538 	req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2539 
2540 	/* Allow the chip to use any route to this SAS address. */
2541 	req->PhysicalPort = 0xff;
2542 
2543 	req->RequestDataLength = ccb->smpio.smp_request_len;
2544 	req->SGLFlags =
2545 	    MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2546 
2547 	mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2548 		   "address %#jx\n", __func__, (uintmax_t)sasaddr);
2549 
2550 	mpi_init_sge(cm, req, &req->SGL);
2551 
2552 	/*
2553 	 * Set up a uio to pass into mps_map_command().  This allows us to
2554 	 * do one map command, and one busdma call in there.
2555 	 */
2556 	cm->cm_uio.uio_iov = cm->cm_iovec;
2557 	cm->cm_uio.uio_iovcnt = 2;
2558 	cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2559 
2560 	/*
2561 	 * The read/write flag isn't used by busdma, but set it just in
2562 	 * case.  This isn't exactly accurate, either, since we're going in
2563 	 * both directions.
2564 	 */
2565 	cm->cm_uio.uio_rw = UIO_WRITE;
2566 
2567 	cm->cm_iovec[0].iov_base = request;
2568 	cm->cm_iovec[0].iov_len = req->RequestDataLength;
2569 	cm->cm_iovec[1].iov_base = response;
2570 	cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2571 
2572 	cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2573 			       cm->cm_iovec[1].iov_len;
2574 
2575 	/*
2576 	 * Trigger a warning message in mps_data_cb() for the user if we
2577 	 * wind up exceeding two S/G segments.  The chip expects one
2578 	 * segment for the request and another for the response.
2579 	 */
2580 	cm->cm_max_segs = 2;
2581 
2582 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2583 	cm->cm_complete = mpssas_smpio_complete;
2584 	cm->cm_complete_data = ccb;
2585 
2586 	/*
2587 	 * Tell the mapping code that we're using a uio, and that this is
2588 	 * an SMP passthrough request.  There is a little special-case
2589 	 * logic there (in mps_data_cb()) to handle the bidirectional
2590 	 * transfer.
2591 	 */
2592 	cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2593 			MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2594 
2595 	/* The chip data format is little endian. */
2596 	req->SASAddress.High = htole32(sasaddr >> 32);
2597 	req->SASAddress.Low = htole32(sasaddr);
2598 
2599 	/*
2600 	 * XXX Note that we don't have a timeout/abort mechanism here.
2601 	 * From the manual, it looks like task management requests only
2602 	 * work for SCSI IO and SATA passthrough requests.  We may need to
2603 	 * have a mechanism to retry requests in the event of a chip reset
2604 	 * at least.  Hopefully the chip will insure that any errors short
2605 	 * of that are relayed back to the driver.
2606 	 */
2607 	error = mps_map_command(sc, cm);
2608 	if ((error != 0) && (error != EINPROGRESS)) {
2609 		mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2610 			   __func__, error);
2611 		goto bailout_error;
2612 	}
2613 
2614 	return;
2615 
2616 bailout_error:
2617 	mps_free_command(sc, cm);
2618 	ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2619 	xpt_done(ccb);
2620 	return;
2621 
2622 }
2623 
2624 static void
2625 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2626 {
2627 	struct mps_softc *sc;
2628 	struct mpssas_target *targ;
2629 	uint64_t sasaddr = 0;
2630 
2631 	sc = sassc->sc;
2632 
2633 	/*
2634 	 * Make sure the target exists.
2635 	 */
2636 	targ = &sassc->targets[ccb->ccb_h.target_id];
2637 	if (targ->handle == 0x0) {
2638 		mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2639 			   ccb->ccb_h.target_id);
2640 		ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2641 		xpt_done(ccb);
2642 		return;
2643 	}
2644 
2645 	/*
2646 	 * If this device has an embedded SMP target, we'll talk to it
2647 	 * directly.
2648 	 * figure out what the expander's address is.
2649 	 */
2650 	if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2651 		sasaddr = targ->sasaddr;
2652 
2653 	/*
2654 	 * If we don't have a SAS address for the expander yet, try
2655 	 * grabbing it from the page 0x83 information cached in the
2656 	 * transport layer for this target.  LSI expanders report the
2657 	 * expander SAS address as the port-associated SAS address in
2658 	 * Inquiry VPD page 0x83.  Maxim expanders don't report it in page
2659 	 * 0x83.
2660 	 *
2661 	 * XXX KDM disable this for now, but leave it commented out so that
2662 	 * it is obvious that this is another possible way to get the SAS
2663 	 * address.
2664 	 *
2665 	 * The parent handle method below is a little more reliable, and
2666 	 * the other benefit is that it works for devices other than SES
2667 	 * devices.  So you can send a SMP request to a da(4) device and it
2668 	 * will get routed to the expander that device is attached to.
2669 	 * (Assuming the da(4) device doesn't contain an SMP target...)
2670 	 */
2671 #if 0
2672 	if (sasaddr == 0)
2673 		sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2674 #endif
2675 
2676 	/*
2677 	 * If we still don't have a SAS address for the expander, look for
2678 	 * the parent device of this device, which is probably the expander.
2679 	 */
2680 	if (sasaddr == 0) {
2681 #ifdef OLD_MPS_PROBE
2682 		struct mpssas_target *parent_target;
2683 #endif
2684 
2685 		if (targ->parent_handle == 0x0) {
2686 			mps_printf(sc, "%s: handle %d does not have a valid "
2687 				   "parent handle!\n", __func__, targ->handle);
2688 			ccb->ccb_h.status = CAM_REQ_INVALID;
2689 			goto bailout;
2690 		}
2691 #ifdef OLD_MPS_PROBE
2692 		parent_target = mpssas_find_target_by_handle(sassc, 0,
2693 			targ->parent_handle);
2694 
2695 		if (parent_target == NULL) {
2696 			mps_printf(sc, "%s: handle %d does not have a valid "
2697 				   "parent target!\n", __func__, targ->handle);
2698 			ccb->ccb_h.status = CAM_REQ_INVALID;
2699 			goto bailout;
2700 		}
2701 
2702 		if ((parent_target->devinfo &
2703 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2704 			mps_printf(sc, "%s: handle %d parent %d does not "
2705 				   "have an SMP target!\n", __func__,
2706 				   targ->handle, parent_target->handle);
2707 			ccb->ccb_h.status = CAM_REQ_INVALID;
2708 			goto bailout;
2709 
2710 		}
2711 
2712 		sasaddr = parent_target->sasaddr;
2713 #else /* OLD_MPS_PROBE */
2714 		if ((targ->parent_devinfo &
2715 		     MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2716 			mps_printf(sc, "%s: handle %d parent %d does not "
2717 				   "have an SMP target!\n", __func__,
2718 				   targ->handle, targ->parent_handle);
2719 			ccb->ccb_h.status = CAM_REQ_INVALID;
2720 			goto bailout;
2721 
2722 		}
2723 		if (targ->parent_sasaddr == 0x0) {
2724 			mps_printf(sc, "%s: handle %d parent handle %d does "
2725 				   "not have a valid SAS address!\n",
2726 				   __func__, targ->handle, targ->parent_handle);
2727 			ccb->ccb_h.status = CAM_REQ_INVALID;
2728 			goto bailout;
2729 		}
2730 
2731 		sasaddr = targ->parent_sasaddr;
2732 #endif /* OLD_MPS_PROBE */
2733 
2734 	}
2735 
2736 	if (sasaddr == 0) {
2737 		mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2738 			   __func__, targ->handle);
2739 		ccb->ccb_h.status = CAM_REQ_INVALID;
2740 		goto bailout;
2741 	}
2742 	mpssas_send_smpcmd(sassc, ccb, sasaddr);
2743 
2744 	return;
2745 
2746 bailout:
2747 	xpt_done(ccb);
2748 
2749 }
2750 #endif //__FreeBSD_version >= 900026
2751 
2752 static void
2753 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2754 {
2755 	MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2756 	struct mps_softc *sc;
2757 	struct mps_command *tm;
2758 	struct mpssas_target *targ;
2759 
2760 	mps_dprint(sassc->sc, MPS_TRACE, __func__);
2761 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2762 
2763 	sc = sassc->sc;
2764 	tm = mps_alloc_command(sc);
2765 	if (tm == NULL) {
2766 		mps_printf(sc, "command alloc failure in mpssas_action_resetdev\n");
2767 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2768 		xpt_done(ccb);
2769 		return;
2770 	}
2771 
2772 	targ = &sassc->targets[ccb->ccb_h.target_id];
2773 	req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2774 	req->DevHandle = targ->handle;
2775 	req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2776 	req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2777 
2778 	/* SAS Hard Link Reset / SATA Link Reset */
2779 	req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2780 
2781 	tm->cm_data = NULL;
2782 	tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2783 	tm->cm_complete = mpssas_resetdev_complete;
2784 	tm->cm_complete_data = ccb;
2785 	tm->cm_targ = targ;
2786 	mps_map_command(sc, tm);
2787 }
2788 
2789 static void
2790 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2791 {
2792 	MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2793 	union ccb *ccb;
2794 
2795 	mps_dprint(sc, MPS_TRACE, __func__);
2796 	KKASSERT(lockstatus(&sc->mps_lock, curthread) != 0);
2797 
2798 	resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2799 	ccb = tm->cm_complete_data;
2800 
2801 	/*
2802 	 * Currently there should be no way we can hit this case.  It only
2803 	 * happens when we have a failure to allocate chain frames, and
2804 	 * task management commands don't have S/G lists.
2805 	 */
2806 	if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2807 		MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2808 
2809 		req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2810 
2811 		mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2812 			   "This should not happen!\n", __func__, tm->cm_flags,
2813 			   req->DevHandle);
2814 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2815 		goto bailout;
2816 	}
2817 
2818 	kprintf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2819 	    resp->IOCStatus, resp->ResponseCode);
2820 
2821 	if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2822 		ccb->ccb_h.status = CAM_REQ_CMP;
2823 		mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2824 		    CAM_LUN_WILDCARD);
2825 	}
2826 	else
2827 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2828 
2829 bailout:
2830 
2831 	mpssas_free_tm(sc, tm);
2832 	xpt_done(ccb);
2833 }
2834 
2835 static void
2836 mpssas_poll(struct cam_sim *sim)
2837 {
2838 	struct mpssas_softc *sassc;
2839 
2840 	sassc = cam_sim_softc(sim);
2841 
2842 	if (sassc->sc->mps_debug & MPS_TRACE) {
2843 		/* frequent debug messages during a panic just slow
2844 		 * everything down too much.
2845 		 */
2846 		mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2847 		sassc->sc->mps_debug &= ~MPS_TRACE;
2848 	}
2849 
2850 	mps_intr_locked(sassc->sc);
2851 }
2852 
2853 static void
2854 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2855 {
2856 	struct mpssas_softc *sassc;
2857 	char path_str[64];
2858 
2859 	if (done_ccb == NULL)
2860 		return;
2861 
2862 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2863 
2864 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2865 
2866 	xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2867 	mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2868 
2869 	xpt_free_path(done_ccb->ccb_h.path);
2870 	xpt_free_ccb(done_ccb);
2871 
2872 #if __FreeBSD_version < 1000006
2873 	/*
2874 	 * Before completing scan, get EEDP stuff for all of the existing
2875 	 * targets.
2876 	 */
2877 	mpssas_check_eedp(sassc);
2878 #endif
2879 
2880 }
2881 
2882 /* thread to handle bus rescans */
2883 static void
2884 mpssas_scanner_thread(void *arg)
2885 {
2886 	struct mpssas_softc *sassc;
2887 	struct mps_softc *sc;
2888 	union ccb	*ccb;
2889 
2890 	sassc = (struct mpssas_softc *)arg;
2891 	sc = sassc->sc;
2892 
2893 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2894 
2895 	mps_lock(sc);
2896 	for (;;) {
2897 		/* Sleep for 1 second and check the queue status*/
2898 		lksleep(&sassc->ccb_scanq, &sc->mps_lock, 0, "mps_scanq", 1 * hz);
2899 		if (sassc->flags & MPSSAS_SHUTDOWN) {
2900 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2901 			break;
2902 		}
2903 next_work:
2904 		/* Get first work */
2905 		ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2906 		if (ccb == NULL)
2907 			continue;
2908 		/* Got first work */
2909 		TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2910 		xpt_action(ccb);
2911 		if (sassc->flags & MPSSAS_SHUTDOWN) {
2912 			mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2913 			break;
2914 		}
2915 		goto next_work;
2916 	}
2917 
2918 	sassc->flags &= ~MPSSAS_SCANTHREAD;
2919 	wakeup(&sassc->flags);
2920 	mps_unlock(sc);
2921 	mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2922 	mps_kproc_exit(0);
2923 }
2924 
2925 static void
2926 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2927 {
2928 	char path_str[64];
2929 
2930 	mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2931 
2932 	KKASSERT(lockstatus(&sassc->sc->mps_lock, curthread) != 0);
2933 
2934 	if (ccb == NULL)
2935 		return;
2936 
2937 	xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2938 	mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2939 
2940 	/* Prepare request */
2941 	ccb->ccb_h.ppriv_ptr1 = sassc;
2942 	ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2943 	xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2944 	TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2945 	wakeup(&sassc->ccb_scanq);
2946 }
2947 
2948 #if __FreeBSD_version >= 1000006
2949 static void
2950 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2951 	     void *arg)
2952 {
2953 	struct mps_softc *sc;
2954 
2955 	sc = (struct mps_softc *)callback_arg;
2956 
2957 	switch (code) {
2958 	case AC_ADVINFO_CHANGED: {
2959 		struct mpssas_target *target;
2960 		struct mpssas_softc *sassc;
2961 		struct scsi_read_capacity_data_long rcap_buf;
2962 		struct ccb_dev_advinfo cdai;
2963 		struct mpssas_lun *lun;
2964 		lun_id_t lunid;
2965 		int found_lun;
2966 		uintptr_t buftype;
2967 
2968 		buftype = (uintptr_t)arg;
2969 
2970 		found_lun = 0;
2971 		sassc = sc->sassc;
2972 
2973 		/*
2974 		 * We're only interested in read capacity data changes.
2975 		 */
2976 		if (buftype != CDAI_TYPE_RCAPLONG)
2977 			break;
2978 
2979 		/*
2980 		 * We're only interested in devices that are attached to
2981 		 * this controller.
2982 		 */
2983 		if (xpt_path_path_id(path) != sassc->sim->path_id)
2984 			break;
2985 
2986 		/*
2987 		 * We should have a handle for this, but check to make sure.
2988 		 */
2989 		target = &sassc->targets[xpt_path_target_id(path)];
2990 		if (target->handle == 0)
2991 			break;
2992 
2993 		lunid = xpt_path_lun_id(path);
2994 
2995 		SLIST_FOREACH(lun, &target->luns, lun_link) {
2996 			if (lun->lun_id == lunid) {
2997 				found_lun = 1;
2998 				break;
2999 			}
3000 		}
3001 
3002 		if (found_lun == 0) {
3003 			lun = kmalloc(sizeof(struct mpssas_lun), M_MPT2,
3004 				     M_INTWAIT | M_ZERO);
3005 			if (lun == NULL) {
3006 				mps_dprint(sc, MPS_FAULT, "Unable to alloc "
3007 					   "LUN for EEDP support.\n");
3008 				break;
3009 			}
3010 			lun->lun_id = lunid;
3011 			SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3012 		}
3013 
3014 		bzero(&rcap_buf, sizeof(rcap_buf));
3015 		xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3016 		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3017 		cdai.ccb_h.flags = CAM_DIR_IN;
3018 		cdai.buftype = CDAI_TYPE_RCAPLONG;
3019 		cdai.flags = 0;
3020 		cdai.bufsiz = sizeof(rcap_buf);
3021 		cdai.buf = (uint8_t *)&rcap_buf;
3022 		xpt_action((union ccb *)&cdai);
3023 		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3024 			cam_release_devq(cdai.ccb_h.path,
3025 					 0, 0, 0, FALSE);
3026 
3027 		if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3028 		 && (rcap_buf.prot & SRC16_PROT_EN)) {
3029 			lun->eedp_formatted = TRUE;
3030 			lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3031 		} else {
3032 			lun->eedp_formatted = FALSE;
3033 			lun->eedp_block_size = 0;
3034 		}
3035 		break;
3036 	}
3037 	default:
3038 		break;
3039 	}
3040 }
3041 #else /* __FreeBSD_version >= 1000006 */
3042 
3043 static void
3044 mpssas_check_eedp(struct mpssas_softc *sassc)
3045 {
3046 	struct mps_softc *sc = sassc->sc;
3047 	struct ccb_scsiio *csio;
3048 	struct scsi_read_capacity_16 *scsi_cmd;
3049 	struct scsi_read_capacity_eedp *rcap_buf;
3050 	union ccb *ccb;
3051 	path_id_t pathid = cam_sim_path(sassc->sim);
3052 	target_id_t targetid;
3053 	lun_id_t lunid;
3054 	struct cam_periph *found_periph;
3055 	struct mpssas_target *target;
3056 	struct mpssas_lun *lun;
3057 	uint8_t	found_lun;
3058 
3059 	/*
3060 	 * Issue a READ CAPACITY 16 command to each LUN of each target.  This
3061 	 * info is used to determine if the LUN is formatted for EEDP support.
3062 	 */
3063 	for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
3064 		target = &sassc->targets[targetid];
3065 		if (target->handle == 0x0) {
3066 			continue;
3067 		}
3068 
3069 		lunid = 0;
3070 		do {
3071 			rcap_buf =
3072 			    kmalloc(sizeof(struct scsi_read_capacity_eedp),
3073 			    M_MPT2, M_INTWAIT | M_ZERO);
3074 			if (rcap_buf == NULL) {
3075 				mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
3076 				    "capacity buffer for EEDP support.\n");
3077 				return;
3078 			}
3079 
3080 			ccb = kmalloc(sizeof(union ccb), M_TEMP,
3081 			    M_WAITOK | M_ZERO);
3082 
3083 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
3084 			    pathid, targetid, lunid) != CAM_REQ_CMP) {
3085 				mps_dprint(sc, MPS_FAULT, "Unable to create "
3086 				    "path for EEDP support\n");
3087 				kfree(rcap_buf, M_MPT2);
3088 				xpt_free_ccb(ccb);
3089 				return;
3090 			}
3091 
3092 			/*
3093 			 * If a periph is returned, the LUN exists.  Create an
3094 			 * entry in the target's LUN list.
3095 			 */
3096 			if ((found_periph = cam_periph_find(ccb->ccb_h.path,
3097 			    NULL)) != NULL) {
3098 				/*
3099 				 * If LUN is already in list, don't create a new
3100 				 * one.
3101 				 */
3102 				found_lun = FALSE;
3103 				SLIST_FOREACH(lun, &target->luns, lun_link) {
3104 					if (lun->lun_id == lunid) {
3105 						found_lun = TRUE;
3106 						break;
3107 					}
3108 				}
3109 				if (!found_lun) {
3110 					lun = kmalloc(sizeof(struct mpssas_lun),
3111 					    M_MPT2, M_INTWAIT | M_ZERO);
3112 					lun->lun_id = lunid;
3113 					SLIST_INSERT_HEAD(&target->luns, lun,
3114 					    lun_link);
3115 				}
3116 				lunid++;
3117 
3118 				/*
3119 				 * Issue a READ CAPACITY 16 command for the LUN.
3120 				 * The mpssas_read_cap_done function will load
3121 				 * the read cap info into the LUN struct.
3122 				 */
3123 				csio = &ccb->csio;
3124 				csio->ccb_h.func_code = XPT_SCSI_IO;
3125 				csio->ccb_h.flags = CAM_DIR_IN;
3126 				csio->ccb_h.retry_count = 4;
3127 				csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3128 				csio->ccb_h.timeout = 60000;
3129 				csio->data_ptr = (uint8_t *)rcap_buf;
3130 				csio->dxfer_len = sizeof(struct
3131 				    scsi_read_capacity_eedp);
3132 				csio->sense_len = MPS_SENSE_LEN;
3133 				csio->cdb_len = sizeof(*scsi_cmd);
3134 				csio->tag_action = MSG_SIMPLE_Q_TAG;
3135 
3136 				scsi_cmd = (struct scsi_read_capacity_16 *)
3137 				    &csio->cdb_io.cdb_bytes;
3138 				bzero(scsi_cmd, sizeof(*scsi_cmd));
3139 				scsi_cmd->opcode = 0x9E;
3140 				scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3141 				((uint8_t *)scsi_cmd)[13] = sizeof(struct
3142 				    scsi_read_capacity_eedp);
3143 
3144 				/*
3145 				 * Set the path, target and lun IDs for the READ
3146 				 * CAPACITY request.
3147 				 */
3148 				ccb->ccb_h.path_id =
3149 				    xpt_path_path_id(ccb->ccb_h.path);
3150 				ccb->ccb_h.target_id =
3151 				    xpt_path_target_id(ccb->ccb_h.path);
3152 				ccb->ccb_h.target_lun =
3153 				    xpt_path_lun_id(ccb->ccb_h.path);
3154 
3155 				ccb->ccb_h.ppriv_ptr1 = sassc;
3156 				xpt_action(ccb);
3157 			} else {
3158 				kfree(rcap_buf, M_MPT2);
3159 				xpt_free_path(ccb->ccb_h.path);
3160 				xpt_free_ccb(ccb);
3161 			}
3162 		} while (found_periph);
3163 	}
3164 }
3165 
3166 
3167 static void
3168 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3169 {
3170 	struct mpssas_softc *sassc;
3171 	struct mpssas_target *target;
3172 	struct mpssas_lun *lun;
3173 	struct scsi_read_capacity_eedp *rcap_buf;
3174 
3175 	if (done_ccb == NULL)
3176 		return;
3177 
3178 	/*
3179 	 * Driver need to release devq, it Scsi command is
3180 	 * generated by driver internally.
3181 	 * Currently there is a single place where driver
3182 	 * calls scsi command internally. In future if driver
3183 	 * calls more scsi command internally, it needs to release
3184 	 * devq internally, since those command will not go back to
3185 	 * cam_periph.
3186 	 */
3187 	if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3188 		done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3189 		xpt_release_devq(done_ccb->ccb_h.path,
3190 				 /*count*/ 1, /*run_queue*/TRUE);
3191 	}
3192 
3193 	rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3194 
3195 	/*
3196 	 * Get the LUN ID for the path and look it up in the LUN list for the
3197 	 * target.
3198 	 */
3199 	sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3200 	target = &sassc->targets[done_ccb->ccb_h.target_id];
3201 	SLIST_FOREACH(lun, &target->luns, lun_link) {
3202 		if (lun->lun_id != done_ccb->ccb_h.target_lun)
3203 			continue;
3204 
3205 		/*
3206 		 * Got the LUN in the target's LUN list.  Fill it in
3207 		 * with EEDP info.  If the READ CAP 16 command had some
3208 		 * SCSI error (common if command is not supported), mark
3209 		 * the lun as not supporting EEDP and set the block size
3210 		 * to 0.
3211 		 */
3212 		if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3213 		 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3214 			lun->eedp_formatted = FALSE;
3215 			lun->eedp_block_size = 0;
3216 			break;
3217 		}
3218 
3219 		if (rcap_buf->protect & 0x01) {
3220 			lun->eedp_formatted = TRUE;
3221 			lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3222 		}
3223 		break;
3224 	}
3225 
3226 	// Finished with this CCB and path.
3227 	kfree(rcap_buf, M_MPT2);
3228 	xpt_free_path(done_ccb->ccb_h.path);
3229 	xpt_free_ccb(done_ccb);
3230 }
3231 #endif /* __FreeBSD_version >= 1000006 */
3232 
3233 int
3234 mpssas_startup(struct mps_softc *sc)
3235 {
3236 	struct mpssas_softc *sassc;
3237 
3238 	/*
3239 	 * Send the port enable message and set the wait_for_port_enable flag.
3240 	 * This flag helps to keep the simq frozen until all discovery events
3241 	 * are processed.
3242 	 */
3243 	sassc = sc->sassc;
3244 	mpssas_startup_increment(sassc);
3245 	sc->wait_for_port_enable = 1;
3246 	mpssas_send_portenable(sc);
3247 	return (0);
3248 }
3249 
3250 static int
3251 mpssas_send_portenable(struct mps_softc *sc)
3252 {
3253 	MPI2_PORT_ENABLE_REQUEST *request;
3254 	struct mps_command *cm;
3255 
3256 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3257 
3258 	if ((cm = mps_alloc_command(sc)) == NULL)
3259 		return (EBUSY);
3260 	request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3261 	request->Function = MPI2_FUNCTION_PORT_ENABLE;
3262 	request->MsgFlags = 0;
3263 	request->VP_ID = 0;
3264 	cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3265 	cm->cm_complete = mpssas_portenable_complete;
3266 	cm->cm_data = NULL;
3267 	cm->cm_sge = NULL;
3268 
3269 	mps_map_command(sc, cm);
3270 	mps_dprint(sc, MPS_TRACE,
3271 	    "mps_send_portenable finished cm %p req %p complete %p\n",
3272 	    cm, cm->cm_req, cm->cm_complete);
3273 	return (0);
3274 }
3275 
3276 static void
3277 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3278 {
3279 	MPI2_PORT_ENABLE_REPLY *reply;
3280 	struct mpssas_softc *sassc;
3281 	struct mpssas_target *target;
3282 	int i;
3283 
3284 	mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3285 	sassc = sc->sassc;
3286 
3287 	/*
3288 	 * Currently there should be no way we can hit this case.  It only
3289 	 * happens when we have a failure to allocate chain frames, and
3290 	 * port enable commands don't have S/G lists.
3291 	 */
3292 	if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3293 		mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3294 			   "This should not happen!\n", __func__, cm->cm_flags);
3295 	}
3296 
3297 	reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3298 	if (reply == NULL)
3299 		mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3300 	else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3301 	    MPI2_IOCSTATUS_SUCCESS)
3302 		mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3303 
3304 	mps_free_command(sc, cm);
3305 	if (sc->mps_ich.ich_arg != NULL) {
3306 		mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3307 		config_intrhook_disestablish(&sc->mps_ich);
3308 		sc->mps_ich.ich_arg = NULL;
3309 	}
3310 
3311 	/*
3312 	 * Get WarpDrive info after discovery is complete but before the scan
3313 	 * starts.  At this point, all devices are ready to be exposed to the
3314 	 * OS.  If devices should be hidden instead, take them out of the
3315 	 * 'targets' array before the scan.  The devinfo for a disk will have
3316 	 * some info and a volume's will be 0.  Use that to remove disks.
3317 	 */
3318 	mps_wd_config_pages(sc);
3319 	if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3320 	  && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3321 	 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3322 	    MPS_WD_HIDE_IF_VOLUME))) {
3323 		for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3324 			target = &sassc->targets[i];
3325 			if (target->devinfo) {
3326 				target->devinfo = 0x0;
3327 				target->encl_handle = 0x0;
3328 				target->encl_slot = 0x0;
3329 				target->handle = 0x0;
3330 				target->tid = 0x0;
3331 				target->linkrate = 0x0;
3332 				target->flags = 0x0;
3333 			}
3334 		}
3335 	}
3336 
3337 	/*
3338 	 * Done waiting for port enable to complete.  Decrement the refcount.
3339 	 * If refcount is 0, discovery is complete and a rescan of the bus can
3340 	 * take place.  Since the simq was explicitly frozen before port
3341 	 * enable, it must be explicitly released here to keep the
3342 	 * freeze/release count in sync.
3343 	 */
3344 	sc->wait_for_port_enable = 0;
3345 	sc->port_enable_complete = 1;
3346 	mpssas_startup_decrement(sassc);
3347 	xpt_release_simq(sassc->sim, 1);
3348 }
3349