xref: /freebsd/sys/dev/mfi/mfi.c (revision 42249ef2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include "opt_mfi.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
65 #include <sys/poll.h>
66 #include <sys/selinfo.h>
67 #include <sys/bus.h>
68 #include <sys/conf.h>
69 #include <sys/eventhandler.h>
70 #include <sys/rman.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
136            0, "event message locale");
137 
138 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
139 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
140            0, "event message class");
141 
142 static int	mfi_max_cmds = 128;
143 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
144 	   0, "Max commands limit (-1 = controller limit)");
145 
146 static int	mfi_detect_jbod_change = 1;
147 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
148 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
149 
150 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
151 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
152 	   &mfi_polled_cmd_timeout, 0,
153 	   "Polled command timeout - used for firmware flash etc (in seconds)");
154 
155 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
157 	   0, "Command timeout (in seconds)");
158 
159 /* Management interface */
160 static d_open_t		mfi_open;
161 static d_close_t	mfi_close;
162 static d_ioctl_t	mfi_ioctl;
163 static d_poll_t		mfi_poll;
164 
165 static struct cdevsw mfi_cdevsw = {
166 	.d_version = 	D_VERSION,
167 	.d_flags =	0,
168 	.d_open = 	mfi_open,
169 	.d_close =	mfi_close,
170 	.d_ioctl =	mfi_ioctl,
171 	.d_poll =	mfi_poll,
172 	.d_name =	"mfi",
173 };
174 
175 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
176 
177 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
178 struct mfi_skinny_dma_info mfi_skinny;
179 
180 static void
181 mfi_enable_intr_xscale(struct mfi_softc *sc)
182 {
183 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
184 }
185 
186 static void
187 mfi_enable_intr_ppc(struct mfi_softc *sc)
188 {
189 	if (sc->mfi_flags & MFI_FLAGS_1078) {
190 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
191 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
192 	}
193 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
194 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
195 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
196 	}
197 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
198 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
199 	}
200 }
201 
202 static int32_t
203 mfi_read_fw_status_xscale(struct mfi_softc *sc)
204 {
205 	return MFI_READ4(sc, MFI_OMSG0);
206 }
207 
208 static int32_t
209 mfi_read_fw_status_ppc(struct mfi_softc *sc)
210 {
211 	return MFI_READ4(sc, MFI_OSP0);
212 }
213 
214 static int
215 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
216 {
217 	int32_t status;
218 
219 	status = MFI_READ4(sc, MFI_OSTS);
220 	if ((status & MFI_OSTS_INTR_VALID) == 0)
221 		return 1;
222 
223 	MFI_WRITE4(sc, MFI_OSTS, status);
224 	return 0;
225 }
226 
227 static int
228 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
229 {
230 	int32_t status;
231 
232 	status = MFI_READ4(sc, MFI_OSTS);
233 	if (sc->mfi_flags & MFI_FLAGS_1078) {
234 		if (!(status & MFI_1078_RM)) {
235 			return 1;
236 		}
237 	}
238 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
239 		if (!(status & MFI_GEN2_RM)) {
240 			return 1;
241 		}
242 	}
243 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
244 		if (!(status & MFI_SKINNY_RM)) {
245 			return 1;
246 		}
247 	}
248 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
249 		MFI_WRITE4(sc, MFI_OSTS, status);
250 	else
251 		MFI_WRITE4(sc, MFI_ODCR0, status);
252 	return 0;
253 }
254 
255 static void
256 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
257 {
258 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
259 }
260 
261 static void
262 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
263 {
264 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
265 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
266 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
267 	} else {
268 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
269 	}
270 }
271 
272 int
273 mfi_transition_firmware(struct mfi_softc *sc)
274 {
275 	uint32_t fw_state, cur_state;
276 	int max_wait, i;
277 	uint32_t cur_abs_reg_val = 0;
278 	uint32_t prev_abs_reg_val = 0;
279 
280 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
281 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
282 	while (fw_state != MFI_FWSTATE_READY) {
283 		if (bootverbose)
284 			device_printf(sc->mfi_dev, "Waiting for firmware to "
285 			"become ready\n");
286 		cur_state = fw_state;
287 		switch (fw_state) {
288 		case MFI_FWSTATE_FAULT:
289 			device_printf(sc->mfi_dev, "Firmware fault\n");
290 			return (ENXIO);
291 		case MFI_FWSTATE_WAIT_HANDSHAKE:
292 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
294 			else
295 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
296 			max_wait = MFI_RESET_WAIT_TIME;
297 			break;
298 		case MFI_FWSTATE_OPERATIONAL:
299 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
300 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
301 			else
302 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
303 			max_wait = MFI_RESET_WAIT_TIME;
304 			break;
305 		case MFI_FWSTATE_UNDEFINED:
306 		case MFI_FWSTATE_BB_INIT:
307 			max_wait = MFI_RESET_WAIT_TIME;
308 			break;
309 		case MFI_FWSTATE_FW_INIT_2:
310 			max_wait = MFI_RESET_WAIT_TIME;
311 			break;
312 		case MFI_FWSTATE_FW_INIT:
313 		case MFI_FWSTATE_FLUSH_CACHE:
314 			max_wait = MFI_RESET_WAIT_TIME;
315 			break;
316 		case MFI_FWSTATE_DEVICE_SCAN:
317 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
318 			prev_abs_reg_val = cur_abs_reg_val;
319 			break;
320 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
321 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
322 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
323 			else
324 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
325 			max_wait = MFI_RESET_WAIT_TIME;
326 			break;
327 		default:
328 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
329 			    fw_state);
330 			return (ENXIO);
331 		}
332 		for (i = 0; i < (max_wait * 10); i++) {
333 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
334 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
335 			if (fw_state == cur_state)
336 				DELAY(100000);
337 			else
338 				break;
339 		}
340 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
341 			/* Check the device scanning progress */
342 			if (prev_abs_reg_val != cur_abs_reg_val) {
343 				continue;
344 			}
345 		}
346 		if (fw_state == cur_state) {
347 			device_printf(sc->mfi_dev, "Firmware stuck in state "
348 			    "%#x\n", fw_state);
349 			return (ENXIO);
350 		}
351 	}
352 	return (0);
353 }
354 
355 static void
356 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
357 {
358 	bus_addr_t *addr;
359 
360 	addr = arg;
361 	*addr = segs[0].ds_addr;
362 }
363 
364 
365 int
366 mfi_attach(struct mfi_softc *sc)
367 {
368 	uint32_t status;
369 	int error, commsz, framessz, sensesz;
370 	int frames, unit, max_fw_sge, max_fw_cmds;
371 	uint32_t tb_mem_size = 0;
372 	struct cdev *dev_t;
373 
374 	if (sc == NULL)
375 		return EINVAL;
376 
377 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
378 	    MEGASAS_VERSION);
379 
380 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
381 	sx_init(&sc->mfi_config_lock, "MFI config");
382 	TAILQ_INIT(&sc->mfi_ld_tqh);
383 	TAILQ_INIT(&sc->mfi_syspd_tqh);
384 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
385 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
386 	TAILQ_INIT(&sc->mfi_evt_queue);
387 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
388 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
389 	TAILQ_INIT(&sc->mfi_aen_pids);
390 	TAILQ_INIT(&sc->mfi_cam_ccbq);
391 
392 	mfi_initq_free(sc);
393 	mfi_initq_ready(sc);
394 	mfi_initq_busy(sc);
395 	mfi_initq_bio(sc);
396 
397 	sc->adpreset = 0;
398 	sc->last_seq_num = 0;
399 	sc->disableOnlineCtrlReset = 1;
400 	sc->issuepend_done = 1;
401 	sc->hw_crit_error = 0;
402 
403 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
404 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
405 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
406 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
407 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
408 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
409 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
410 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
411 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
412 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
413 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
414 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
415 		sc->mfi_tbolt = 1;
416 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
417 	} else {
418 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
419 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
420 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
421 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
422 	}
423 
424 
425 	/* Before we get too far, see if the firmware is working */
426 	if ((error = mfi_transition_firmware(sc)) != 0) {
427 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
428 		    "error %d\n", error);
429 		return (ENXIO);
430 	}
431 
432 	/* Start: LSIP200113393 */
433 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
434 				1, 0,			/* algnmnt, boundary */
435 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
436 				BUS_SPACE_MAXADDR,	/* highaddr */
437 				NULL, NULL,		/* filter, filterarg */
438 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
439 				1,			/* msegments */
440 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
441 				0,			/* flags */
442 				NULL, NULL,		/* lockfunc, lockarg */
443 				&sc->verbuf_h_dmat)) {
444 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
445 		return (ENOMEM);
446 	}
447 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
448 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
449 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
450 		return (ENOMEM);
451 	}
452 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
453 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
454 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
455 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
456 	/* End: LSIP200113393 */
457 
458 	/*
459 	 * Get information needed for sizing the contiguous memory for the
460 	 * frame pool.  Size down the sgl parameter since we know that
461 	 * we will never need more than what's required for MAXPHYS.
462 	 * It would be nice if these constants were available at runtime
463 	 * instead of compile time.
464 	 */
465 	status = sc->mfi_read_fw_status(sc);
466 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
467 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
468 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
469 		    max_fw_cmds, mfi_max_cmds);
470 		sc->mfi_max_fw_cmds = mfi_max_cmds;
471 	} else {
472 		sc->mfi_max_fw_cmds = max_fw_cmds;
473 	}
474 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
475 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
476 
477 	/* ThunderBolt Support get the contiguous memory */
478 
479 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
480 		mfi_tbolt_init_globals(sc);
481 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
482 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
483 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
484 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
485 
486 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
487 				1, 0,			/* algnmnt, boundary */
488 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
489 				BUS_SPACE_MAXADDR,	/* highaddr */
490 				NULL, NULL,		/* filter, filterarg */
491 				tb_mem_size,		/* maxsize */
492 				1,			/* msegments */
493 				tb_mem_size,		/* maxsegsize */
494 				0,			/* flags */
495 				NULL, NULL,		/* lockfunc, lockarg */
496 				&sc->mfi_tb_dmat)) {
497 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
498 			return (ENOMEM);
499 		}
500 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
501 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
502 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
503 			return (ENOMEM);
504 		}
505 		bzero(sc->request_message_pool, tb_mem_size);
506 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
507 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
508 
509 		/* For ThunderBolt memory init */
510 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
511 				0x100, 0,		/* alignmnt, boundary */
512 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
513 				BUS_SPACE_MAXADDR,	/* highaddr */
514 				NULL, NULL,		/* filter, filterarg */
515 				MFI_FRAME_SIZE,		/* maxsize */
516 				1,			/* msegments */
517 				MFI_FRAME_SIZE,		/* maxsegsize */
518 				0,			/* flags */
519 				NULL, NULL,		/* lockfunc, lockarg */
520 				&sc->mfi_tb_init_dmat)) {
521 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
522 			return (ENOMEM);
523 		}
524 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
525 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
526 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
527 			return (ENOMEM);
528 		}
529 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
530 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
531 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
532 		    &sc->mfi_tb_init_busaddr, 0);
533 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
534 		    tb_mem_size)) {
535 			device_printf(sc->mfi_dev,
536 			    "Thunderbolt pool preparation error\n");
537 			return 0;
538 		}
539 
540 		/*
541 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
542 		  we are taking it different from what we have allocated for Request
543 		  and reply descriptors to avoid confusion later
544 		*/
545 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
546 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
547 				1, 0,			/* algnmnt, boundary */
548 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
549 				BUS_SPACE_MAXADDR,	/* highaddr */
550 				NULL, NULL,		/* filter, filterarg */
551 				tb_mem_size,		/* maxsize */
552 				1,			/* msegments */
553 				tb_mem_size,		/* maxsegsize */
554 				0,			/* flags */
555 				NULL, NULL,		/* lockfunc, lockarg */
556 				&sc->mfi_tb_ioc_init_dmat)) {
557 			device_printf(sc->mfi_dev,
558 			    "Cannot allocate comms DMA tag\n");
559 			return (ENOMEM);
560 		}
561 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
562 		    (void **)&sc->mfi_tb_ioc_init_desc,
563 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
564 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
565 			return (ENOMEM);
566 		}
567 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
568 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
569 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
570 		    &sc->mfi_tb_ioc_init_busaddr, 0);
571 	}
572 	/*
573 	 * Create the dma tag for data buffers.  Used both for block I/O
574 	 * and for various internal data queries.
575 	 */
576 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
577 				1, 0,			/* algnmnt, boundary */
578 				BUS_SPACE_MAXADDR,	/* lowaddr */
579 				BUS_SPACE_MAXADDR,	/* highaddr */
580 				NULL, NULL,		/* filter, filterarg */
581 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
582 				sc->mfi_max_sge,	/* nsegments */
583 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
584 				BUS_DMA_ALLOCNOW,	/* flags */
585 				busdma_lock_mutex,	/* lockfunc */
586 				&sc->mfi_io_lock,	/* lockfuncarg */
587 				&sc->mfi_buffer_dmat)) {
588 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
589 		return (ENOMEM);
590 	}
591 
592 	/*
593 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
594 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
595 	 * entry, so the calculated size here will be will be 1 more than
596 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
597 	 */
598 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
599 	    sizeof(struct mfi_hwcomms);
600 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
601 				1, 0,			/* algnmnt, boundary */
602 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
603 				BUS_SPACE_MAXADDR,	/* highaddr */
604 				NULL, NULL,		/* filter, filterarg */
605 				commsz,			/* maxsize */
606 				1,			/* msegments */
607 				commsz,			/* maxsegsize */
608 				0,			/* flags */
609 				NULL, NULL,		/* lockfunc, lockarg */
610 				&sc->mfi_comms_dmat)) {
611 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
612 		return (ENOMEM);
613 	}
614 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
615 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
616 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
617 		return (ENOMEM);
618 	}
619 	bzero(sc->mfi_comms, commsz);
620 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
621 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
622 	/*
623 	 * Allocate DMA memory for the command frames.  Keep them in the
624 	 * lower 4GB for efficiency.  Calculate the size of the commands at
625 	 * the same time; each command is one 64 byte frame plus a set of
626          * additional frames for holding sg lists or other data.
627 	 * The assumption here is that the SG list will start at the second
628 	 * frame and not use the unused bytes in the first frame.  While this
629 	 * isn't technically correct, it simplifies the calculation and allows
630 	 * for command frames that might be larger than an mfi_io_frame.
631 	 */
632 	if (sizeof(bus_addr_t) == 8) {
633 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
634 		sc->mfi_flags |= MFI_FLAGS_SG64;
635 	} else {
636 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
637 	}
638 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
639 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
640 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
641 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
642 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
643 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
644 				64, 0,			/* algnmnt, boundary */
645 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
646 				BUS_SPACE_MAXADDR,	/* highaddr */
647 				NULL, NULL,		/* filter, filterarg */
648 				framessz,		/* maxsize */
649 				1,			/* nsegments */
650 				framessz,		/* maxsegsize */
651 				0,			/* flags */
652 				NULL, NULL,		/* lockfunc, lockarg */
653 				&sc->mfi_frames_dmat)) {
654 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
655 		return (ENOMEM);
656 	}
657 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
658 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
659 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
660 		return (ENOMEM);
661 	}
662 	bzero(sc->mfi_frames, framessz);
663 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
664 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
665 	/*
666 	 * Allocate DMA memory for the frame sense data.  Keep them in the
667 	 * lower 4GB for efficiency
668 	 */
669 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
670 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
671 				4, 0,			/* algnmnt, boundary */
672 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
673 				BUS_SPACE_MAXADDR,	/* highaddr */
674 				NULL, NULL,		/* filter, filterarg */
675 				sensesz,		/* maxsize */
676 				1,			/* nsegments */
677 				sensesz,		/* maxsegsize */
678 				0,			/* flags */
679 				NULL, NULL,		/* lockfunc, lockarg */
680 				&sc->mfi_sense_dmat)) {
681 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
682 		return (ENOMEM);
683 	}
684 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
685 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
686 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
687 		return (ENOMEM);
688 	}
689 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
690 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
691 	if ((error = mfi_alloc_commands(sc)) != 0)
692 		return (error);
693 
694 	/* Before moving the FW to operational state, check whether
695 	 * hostmemory is required by the FW or not
696 	 */
697 
698 	/* ThunderBolt MFI_IOC2 INIT */
699 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
700 		sc->mfi_disable_intr(sc);
701 		mtx_lock(&sc->mfi_io_lock);
702 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
703 			device_printf(sc->mfi_dev,
704 			    "TB Init has failed with error %d\n",error);
705 			mtx_unlock(&sc->mfi_io_lock);
706 			return error;
707 		}
708 		mtx_unlock(&sc->mfi_io_lock);
709 
710 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
711 			return error;
712 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
713 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
714 		    &sc->mfi_intr)) {
715 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
716 			return (EINVAL);
717 		}
718 		sc->mfi_intr_ptr = mfi_intr_tbolt;
719 		sc->mfi_enable_intr(sc);
720 	} else {
721 		if ((error = mfi_comms_init(sc)) != 0)
722 			return (error);
723 
724 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
725 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
726 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
727 			return (EINVAL);
728 		}
729 		sc->mfi_intr_ptr = mfi_intr;
730 		sc->mfi_enable_intr(sc);
731 	}
732 	if ((error = mfi_get_controller_info(sc)) != 0)
733 		return (error);
734 	sc->disableOnlineCtrlReset = 0;
735 
736 	/* Register a config hook to probe the bus for arrays */
737 	sc->mfi_ich.ich_func = mfi_startup;
738 	sc->mfi_ich.ich_arg = sc;
739 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
740 		device_printf(sc->mfi_dev, "Cannot establish configuration "
741 		    "hook\n");
742 		return (EINVAL);
743 	}
744 	mtx_lock(&sc->mfi_io_lock);
745 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
746 		mtx_unlock(&sc->mfi_io_lock);
747 		return (error);
748 	}
749 	mtx_unlock(&sc->mfi_io_lock);
750 
751 	/*
752 	 * Register a shutdown handler.
753 	 */
754 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
755 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
756 		device_printf(sc->mfi_dev, "Warning: shutdown event "
757 		    "registration failed\n");
758 	}
759 
760 	/*
761 	 * Create the control device for doing management
762 	 */
763 	unit = device_get_unit(sc->mfi_dev);
764 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
765 	    0640, "mfi%d", unit);
766 	if (unit == 0)
767 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
768 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
769 	if (sc->mfi_cdev != NULL)
770 		sc->mfi_cdev->si_drv1 = sc;
771 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
772 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
773 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
774 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
775 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
776 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
777 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
778 	    &sc->mfi_keep_deleted_volumes, 0,
779 	    "Don't detach the mfid device for a busy volume that is deleted");
780 
781 	device_add_child(sc->mfi_dev, "mfip", -1);
782 	bus_generic_attach(sc->mfi_dev);
783 
784 	/* Start the timeout watchdog */
785 	callout_init(&sc->mfi_watchdog_callout, 1);
786 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
787 	    mfi_timeout, sc);
788 
789 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
790 		mtx_lock(&sc->mfi_io_lock);
791 		mfi_tbolt_sync_map_info(sc);
792 		mtx_unlock(&sc->mfi_io_lock);
793 	}
794 
795 	return (0);
796 }
797 
798 static int
799 mfi_alloc_commands(struct mfi_softc *sc)
800 {
801 	struct mfi_command *cm;
802 	int i, j;
803 
804 	/*
805 	 * XXX Should we allocate all the commands up front, or allocate on
806 	 * demand later like 'aac' does?
807 	 */
808 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
809 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
810 
811 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
812 		cm = &sc->mfi_commands[i];
813 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
814 		    sc->mfi_cmd_size * i);
815 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
816 		    sc->mfi_cmd_size * i;
817 		cm->cm_frame->header.context = i;
818 		cm->cm_sense = &sc->mfi_sense[i];
819 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
820 		cm->cm_sc = sc;
821 		cm->cm_index = i;
822 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
823 		    &cm->cm_dmamap) == 0) {
824 			mtx_lock(&sc->mfi_io_lock);
825 			mfi_release_command(cm);
826 			mtx_unlock(&sc->mfi_io_lock);
827 		} else {
828 			device_printf(sc->mfi_dev, "Failed to allocate %d "
829 			   "command blocks, only allocated %d\n",
830 			    sc->mfi_max_fw_cmds, i - 1);
831 			for (j = 0; j < i; j++) {
832 				cm = &sc->mfi_commands[i];
833 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
834 				    cm->cm_dmamap);
835 			}
836 			free(sc->mfi_commands, M_MFIBUF);
837 			sc->mfi_commands = NULL;
838 
839 			return (ENOMEM);
840 		}
841 	}
842 
843 	return (0);
844 }
845 
846 void
847 mfi_release_command(struct mfi_command *cm)
848 {
849 	struct mfi_frame_header *hdr;
850 	uint32_t *hdr_data;
851 
852 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
853 
854 	/*
855 	 * Zero out the important fields of the frame, but make sure the
856 	 * context field is preserved.  For efficiency, handle the fields
857 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
858 	 */
859 	hdr = &cm->cm_frame->header;
860 	if (cm->cm_data != NULL && hdr->sg_count) {
861 		cm->cm_sg->sg32[0].len = 0;
862 		cm->cm_sg->sg32[0].addr = 0;
863 	}
864 
865 	/*
866 	 * Command may be on other queues e.g. busy queue depending on the
867 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
868 	 * properly
869 	 */
870 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
871 		mfi_remove_busy(cm);
872 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
873 		mfi_remove_ready(cm);
874 
875 	/* We're not expecting it to be on any other queue but check */
876 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
877 		panic("Command %p is still on another queue, flags = %#x",
878 		    cm, cm->cm_flags);
879 	}
880 
881 	/* tbolt cleanup */
882 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
883 		mfi_tbolt_return_cmd(cm->cm_sc,
884 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
885 		    cm);
886 	}
887 
888 	hdr_data = (uint32_t *)cm->cm_frame;
889 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
890 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
891 	hdr_data[4] = 0;	/* flags, timeout */
892 	hdr_data[5] = 0;	/* data_len */
893 
894 	cm->cm_extra_frames = 0;
895 	cm->cm_flags = 0;
896 	cm->cm_complete = NULL;
897 	cm->cm_private = NULL;
898 	cm->cm_data = NULL;
899 	cm->cm_sg = 0;
900 	cm->cm_total_frame_size = 0;
901 	cm->retry_for_fw_reset = 0;
902 
903 	mfi_enqueue_free(cm);
904 }
905 
906 int
907 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
908     uint32_t opcode, void **bufp, size_t bufsize)
909 {
910 	struct mfi_command *cm;
911 	struct mfi_dcmd_frame *dcmd;
912 	void *buf = NULL;
913 	uint32_t context = 0;
914 
915 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
916 
917 	cm = mfi_dequeue_free(sc);
918 	if (cm == NULL)
919 		return (EBUSY);
920 
921 	/* Zero out the MFI frame */
922 	context = cm->cm_frame->header.context;
923 	bzero(cm->cm_frame, sizeof(union mfi_frame));
924 	cm->cm_frame->header.context = context;
925 
926 	if ((bufsize > 0) && (bufp != NULL)) {
927 		if (*bufp == NULL) {
928 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
929 			if (buf == NULL) {
930 				mfi_release_command(cm);
931 				return (ENOMEM);
932 			}
933 			*bufp = buf;
934 		} else {
935 			buf = *bufp;
936 		}
937 	}
938 
939 	dcmd =  &cm->cm_frame->dcmd;
940 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
941 	dcmd->header.cmd = MFI_CMD_DCMD;
942 	dcmd->header.timeout = 0;
943 	dcmd->header.flags = 0;
944 	dcmd->header.data_len = bufsize;
945 	dcmd->header.scsi_status = 0;
946 	dcmd->opcode = opcode;
947 	cm->cm_sg = &dcmd->sgl;
948 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
949 	cm->cm_flags = 0;
950 	cm->cm_data = buf;
951 	cm->cm_private = buf;
952 	cm->cm_len = bufsize;
953 
954 	*cmp = cm;
955 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
956 		*bufp = buf;
957 	return (0);
958 }
959 
960 static int
961 mfi_comms_init(struct mfi_softc *sc)
962 {
963 	struct mfi_command *cm;
964 	struct mfi_init_frame *init;
965 	struct mfi_init_qinfo *qinfo;
966 	int error;
967 	uint32_t context = 0;
968 
969 	mtx_lock(&sc->mfi_io_lock);
970 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
971 		mtx_unlock(&sc->mfi_io_lock);
972 		return (EBUSY);
973 	}
974 
975 	/* Zero out the MFI frame */
976 	context = cm->cm_frame->header.context;
977 	bzero(cm->cm_frame, sizeof(union mfi_frame));
978 	cm->cm_frame->header.context = context;
979 
980 	/*
981 	 * Abuse the SG list area of the frame to hold the init_qinfo
982 	 * object;
983 	 */
984 	init = &cm->cm_frame->init;
985 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
986 
987 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
988 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
989 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
990 	    offsetof(struct mfi_hwcomms, hw_reply_q);
991 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
992 	    offsetof(struct mfi_hwcomms, hw_pi);
993 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
994 	    offsetof(struct mfi_hwcomms, hw_ci);
995 
996 	init->header.cmd = MFI_CMD_INIT;
997 	init->header.data_len = sizeof(struct mfi_init_qinfo);
998 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
999 	cm->cm_data = NULL;
1000 	cm->cm_flags = MFI_CMD_POLLED;
1001 
1002 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1003 		device_printf(sc->mfi_dev, "failed to send init command\n");
1004 	mfi_release_command(cm);
1005 	mtx_unlock(&sc->mfi_io_lock);
1006 
1007 	return (error);
1008 }
1009 
1010 static int
1011 mfi_get_controller_info(struct mfi_softc *sc)
1012 {
1013 	struct mfi_command *cm = NULL;
1014 	struct mfi_ctrl_info *ci = NULL;
1015 	uint32_t max_sectors_1, max_sectors_2;
1016 	int error;
1017 
1018 	mtx_lock(&sc->mfi_io_lock);
1019 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1020 	    (void **)&ci, sizeof(*ci));
1021 	if (error)
1022 		goto out;
1023 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1024 
1025 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1026 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1027 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1028 		    MFI_SECTOR_LEN;
1029 		error = 0;
1030 		goto out;
1031 	}
1032 
1033 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1034 	    BUS_DMASYNC_POSTREAD);
1035 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1036 
1037 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1038 	max_sectors_2 = ci->max_request_size;
1039 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1040 	sc->disableOnlineCtrlReset =
1041 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1042 
1043 out:
1044 	if (ci)
1045 		free(ci, M_MFIBUF);
1046 	if (cm)
1047 		mfi_release_command(cm);
1048 	mtx_unlock(&sc->mfi_io_lock);
1049 	return (error);
1050 }
1051 
1052 static int
1053 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1054 {
1055 	struct mfi_command *cm = NULL;
1056 	int error;
1057 
1058 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1059 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1060 	    (void **)log_state, sizeof(**log_state));
1061 	if (error)
1062 		goto out;
1063 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1064 
1065 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1066 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1067 		goto out;
1068 	}
1069 
1070 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1071 	    BUS_DMASYNC_POSTREAD);
1072 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1073 
1074 out:
1075 	if (cm)
1076 		mfi_release_command(cm);
1077 
1078 	return (error);
1079 }
1080 
1081 int
1082 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1083 {
1084 	struct mfi_evt_log_state *log_state = NULL;
1085 	union mfi_evt class_locale;
1086 	int error = 0;
1087 	uint32_t seq;
1088 
1089 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1090 
1091 	class_locale.members.reserved = 0;
1092 	class_locale.members.locale = mfi_event_locale;
1093 	class_locale.members.evt_class  = mfi_event_class;
1094 
1095 	if (seq_start == 0) {
1096 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1097 			goto out;
1098 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1099 
1100 		/*
1101 		 * Walk through any events that fired since the last
1102 		 * shutdown.
1103 		 */
1104 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1105 		    log_state->newest_seq_num)) != 0)
1106 			goto out;
1107 		seq = log_state->newest_seq_num;
1108 	} else
1109 		seq = seq_start;
1110 	error = mfi_aen_register(sc, seq, class_locale.word);
1111 out:
1112 	free(log_state, M_MFIBUF);
1113 
1114 	return (error);
1115 }
1116 
1117 int
1118 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1119 {
1120 
1121 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1122 	cm->cm_complete = NULL;
1123 
1124 	/*
1125 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1126 	 * and return 0 to it as status
1127 	 */
1128 	if (cm->cm_frame->dcmd.opcode == 0) {
1129 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1130 		cm->cm_error = 0;
1131 		return (cm->cm_error);
1132 	}
1133 	mfi_enqueue_ready(cm);
1134 	mfi_startio(sc);
1135 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1136 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1137 	return (cm->cm_error);
1138 }
1139 
1140 void
1141 mfi_free(struct mfi_softc *sc)
1142 {
1143 	struct mfi_command *cm;
1144 	int i;
1145 
1146 	callout_drain(&sc->mfi_watchdog_callout);
1147 
1148 	if (sc->mfi_cdev != NULL)
1149 		destroy_dev(sc->mfi_cdev);
1150 
1151 	if (sc->mfi_commands != NULL) {
1152 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1153 			cm = &sc->mfi_commands[i];
1154 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1155 		}
1156 		free(sc->mfi_commands, M_MFIBUF);
1157 		sc->mfi_commands = NULL;
1158 	}
1159 
1160 	if (sc->mfi_intr)
1161 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1162 	if (sc->mfi_irq != NULL)
1163 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1164 		    sc->mfi_irq);
1165 
1166 	if (sc->mfi_sense_busaddr != 0)
1167 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1168 	if (sc->mfi_sense != NULL)
1169 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1170 		    sc->mfi_sense_dmamap);
1171 	if (sc->mfi_sense_dmat != NULL)
1172 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1173 
1174 	if (sc->mfi_frames_busaddr != 0)
1175 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1176 	if (sc->mfi_frames != NULL)
1177 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1178 		    sc->mfi_frames_dmamap);
1179 	if (sc->mfi_frames_dmat != NULL)
1180 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1181 
1182 	if (sc->mfi_comms_busaddr != 0)
1183 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1184 	if (sc->mfi_comms != NULL)
1185 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1186 		    sc->mfi_comms_dmamap);
1187 	if (sc->mfi_comms_dmat != NULL)
1188 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1189 
1190 	/* ThunderBolt contiguous memory free here */
1191 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1192 		if (sc->mfi_tb_busaddr != 0)
1193 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1194 		if (sc->request_message_pool != NULL)
1195 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1196 			    sc->mfi_tb_dmamap);
1197 		if (sc->mfi_tb_dmat != NULL)
1198 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1199 
1200 		/* Version buffer memory free */
1201 		/* Start LSIP200113393 */
1202 		if (sc->verbuf_h_busaddr != 0)
1203 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1204 		if (sc->verbuf != NULL)
1205 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1206 			    sc->verbuf_h_dmamap);
1207 		if (sc->verbuf_h_dmat != NULL)
1208 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1209 
1210 		/* End LSIP200113393 */
1211 		/* ThunderBolt INIT packet memory Free */
1212 		if (sc->mfi_tb_init_busaddr != 0)
1213 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1214 			    sc->mfi_tb_init_dmamap);
1215 		if (sc->mfi_tb_init != NULL)
1216 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1217 			    sc->mfi_tb_init_dmamap);
1218 		if (sc->mfi_tb_init_dmat != NULL)
1219 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1220 
1221 		/* ThunderBolt IOC Init Desc memory free here */
1222 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1223 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1224 			    sc->mfi_tb_ioc_init_dmamap);
1225 		if (sc->mfi_tb_ioc_init_desc != NULL)
1226 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1227 			    sc->mfi_tb_ioc_init_desc,
1228 			    sc->mfi_tb_ioc_init_dmamap);
1229 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1230 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1231 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1232 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1233 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1234 					free(sc->mfi_cmd_pool_tbolt[i],
1235 					    M_MFIBUF);
1236 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1237 				}
1238 			}
1239 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1240 			sc->mfi_cmd_pool_tbolt = NULL;
1241 		}
1242 		if (sc->request_desc_pool != NULL) {
1243 			free(sc->request_desc_pool, M_MFIBUF);
1244 			sc->request_desc_pool = NULL;
1245 		}
1246 	}
1247 	if (sc->mfi_buffer_dmat != NULL)
1248 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1249 	if (sc->mfi_parent_dmat != NULL)
1250 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1251 
1252 	if (mtx_initialized(&sc->mfi_io_lock)) {
1253 		mtx_destroy(&sc->mfi_io_lock);
1254 		sx_destroy(&sc->mfi_config_lock);
1255 	}
1256 
1257 	return;
1258 }
1259 
1260 static void
1261 mfi_startup(void *arg)
1262 {
1263 	struct mfi_softc *sc;
1264 
1265 	sc = (struct mfi_softc *)arg;
1266 
1267 	sc->mfi_enable_intr(sc);
1268 	sx_xlock(&sc->mfi_config_lock);
1269 	mtx_lock(&sc->mfi_io_lock);
1270 	mfi_ldprobe(sc);
1271 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1272 	    mfi_syspdprobe(sc);
1273 	mtx_unlock(&sc->mfi_io_lock);
1274 	sx_xunlock(&sc->mfi_config_lock);
1275 
1276 	config_intrhook_disestablish(&sc->mfi_ich);
1277 }
1278 
1279 static void
1280 mfi_intr(void *arg)
1281 {
1282 	struct mfi_softc *sc;
1283 	struct mfi_command *cm;
1284 	uint32_t pi, ci, context;
1285 
1286 	sc = (struct mfi_softc *)arg;
1287 
1288 	if (sc->mfi_check_clear_intr(sc))
1289 		return;
1290 
1291 restart:
1292 	pi = sc->mfi_comms->hw_pi;
1293 	ci = sc->mfi_comms->hw_ci;
1294 	mtx_lock(&sc->mfi_io_lock);
1295 	while (ci != pi) {
1296 		context = sc->mfi_comms->hw_reply_q[ci];
1297 		if (context < sc->mfi_max_fw_cmds) {
1298 			cm = &sc->mfi_commands[context];
1299 			mfi_remove_busy(cm);
1300 			cm->cm_error = 0;
1301 			mfi_complete(sc, cm);
1302 		}
1303 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1304 			ci = 0;
1305 	}
1306 
1307 	sc->mfi_comms->hw_ci = ci;
1308 
1309 	/* Give defered I/O a chance to run */
1310 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1311 	mfi_startio(sc);
1312 	mtx_unlock(&sc->mfi_io_lock);
1313 
1314 	/*
1315 	 * Dummy read to flush the bus; this ensures that the indexes are up
1316 	 * to date.  Restart processing if more commands have come it.
1317 	 */
1318 	(void)sc->mfi_read_fw_status(sc);
1319 	if (pi != sc->mfi_comms->hw_pi)
1320 		goto restart;
1321 
1322 	return;
1323 }
1324 
1325 int
1326 mfi_shutdown(struct mfi_softc *sc)
1327 {
1328 	struct mfi_dcmd_frame *dcmd;
1329 	struct mfi_command *cm;
1330 	int error;
1331 
1332 
1333 	if (sc->mfi_aen_cm != NULL) {
1334 		sc->cm_aen_abort = 1;
1335 		mfi_abort(sc, &sc->mfi_aen_cm);
1336 	}
1337 
1338 	if (sc->mfi_map_sync_cm != NULL) {
1339 		sc->cm_map_abort = 1;
1340 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1341 	}
1342 
1343 	mtx_lock(&sc->mfi_io_lock);
1344 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1345 	if (error) {
1346 		mtx_unlock(&sc->mfi_io_lock);
1347 		return (error);
1348 	}
1349 
1350 	dcmd = &cm->cm_frame->dcmd;
1351 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1352 	cm->cm_flags = MFI_CMD_POLLED;
1353 	cm->cm_data = NULL;
1354 
1355 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1356 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1357 
1358 	mfi_release_command(cm);
1359 	mtx_unlock(&sc->mfi_io_lock);
1360 	return (error);
1361 }
1362 
1363 static void
1364 mfi_syspdprobe(struct mfi_softc *sc)
1365 {
1366 	struct mfi_frame_header *hdr;
1367 	struct mfi_command *cm = NULL;
1368 	struct mfi_pd_list *pdlist = NULL;
1369 	struct mfi_system_pd *syspd, *tmp;
1370 	struct mfi_system_pending *syspd_pend;
1371 	int error, i, found;
1372 
1373 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1374 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1375 	/* Add SYSTEM PD's */
1376 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1377 	    (void **)&pdlist, sizeof(*pdlist));
1378 	if (error) {
1379 		device_printf(sc->mfi_dev,
1380 		    "Error while forming SYSTEM PD list\n");
1381 		goto out;
1382 	}
1383 
1384 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1385 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1386 	cm->cm_frame->dcmd.mbox[1] = 0;
1387 	if (mfi_mapcmd(sc, cm) != 0) {
1388 		device_printf(sc->mfi_dev,
1389 		    "Failed to get syspd device listing\n");
1390 		goto out;
1391 	}
1392 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1393 	    BUS_DMASYNC_POSTREAD);
1394 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1395 	hdr = &cm->cm_frame->header;
1396 	if (hdr->cmd_status != MFI_STAT_OK) {
1397 		device_printf(sc->mfi_dev,
1398 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1399 		goto out;
1400 	}
1401 	/* Get each PD and add it to the system */
1402 	for (i = 0; i < pdlist->count; i++) {
1403 		if (pdlist->addr[i].device_id ==
1404 		    pdlist->addr[i].encl_device_id)
1405 			continue;
1406 		found = 0;
1407 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1408 			if (syspd->pd_id == pdlist->addr[i].device_id)
1409 				found = 1;
1410 		}
1411 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1412 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1413 				found = 1;
1414 		}
1415 		if (found == 0)
1416 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1417 	}
1418 	/* Delete SYSPD's whose state has been changed */
1419 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1420 		found = 0;
1421 		for (i = 0; i < pdlist->count; i++) {
1422 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1423 				found = 1;
1424 				break;
1425 			}
1426 		}
1427 		if (found == 0) {
1428 			printf("DELETE\n");
1429 			mtx_unlock(&sc->mfi_io_lock);
1430 			mtx_lock(&Giant);
1431 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1432 			mtx_unlock(&Giant);
1433 			mtx_lock(&sc->mfi_io_lock);
1434 		}
1435 	}
1436 out:
1437 	if (pdlist)
1438 	    free(pdlist, M_MFIBUF);
1439 	if (cm)
1440 	    mfi_release_command(cm);
1441 
1442 	return;
1443 }
1444 
1445 static void
1446 mfi_ldprobe(struct mfi_softc *sc)
1447 {
1448 	struct mfi_frame_header *hdr;
1449 	struct mfi_command *cm = NULL;
1450 	struct mfi_ld_list *list = NULL;
1451 	struct mfi_disk *ld;
1452 	struct mfi_disk_pending *ld_pend;
1453 	int error, i;
1454 
1455 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1456 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1457 
1458 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1459 	    (void **)&list, sizeof(*list));
1460 	if (error)
1461 		goto out;
1462 
1463 	cm->cm_flags = MFI_CMD_DATAIN;
1464 	if (mfi_wait_command(sc, cm) != 0) {
1465 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1466 		goto out;
1467 	}
1468 
1469 	hdr = &cm->cm_frame->header;
1470 	if (hdr->cmd_status != MFI_STAT_OK) {
1471 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1472 		    hdr->cmd_status);
1473 		goto out;
1474 	}
1475 
1476 	for (i = 0; i < list->ld_count; i++) {
1477 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1478 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1479 				goto skip_add;
1480 		}
1481 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1482 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1483 				goto skip_add;
1484 		}
1485 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1486 	skip_add:;
1487 	}
1488 out:
1489 	if (list)
1490 		free(list, M_MFIBUF);
1491 	if (cm)
1492 		mfi_release_command(cm);
1493 
1494 	return;
1495 }
1496 
1497 /*
1498  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1499  * the bits in 24-31 are all set, then it is the number of seconds since
1500  * boot.
1501  */
1502 static const char *
1503 format_timestamp(uint32_t timestamp)
1504 {
1505 	static char buffer[32];
1506 
1507 	if ((timestamp & 0xff000000) == 0xff000000)
1508 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1509 		    0x00ffffff);
1510 	else
1511 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1512 	return (buffer);
1513 }
1514 
1515 static const char *
1516 format_class(int8_t class)
1517 {
1518 	static char buffer[6];
1519 
1520 	switch (class) {
1521 	case MFI_EVT_CLASS_DEBUG:
1522 		return ("debug");
1523 	case MFI_EVT_CLASS_PROGRESS:
1524 		return ("progress");
1525 	case MFI_EVT_CLASS_INFO:
1526 		return ("info");
1527 	case MFI_EVT_CLASS_WARNING:
1528 		return ("WARN");
1529 	case MFI_EVT_CLASS_CRITICAL:
1530 		return ("CRIT");
1531 	case MFI_EVT_CLASS_FATAL:
1532 		return ("FATAL");
1533 	case MFI_EVT_CLASS_DEAD:
1534 		return ("DEAD");
1535 	default:
1536 		snprintf(buffer, sizeof(buffer), "%d", class);
1537 		return (buffer);
1538 	}
1539 }
1540 
1541 static void
1542 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1543 {
1544 	struct mfi_system_pd *syspd = NULL;
1545 
1546 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1547 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1548 	    format_class(detail->evt_class.members.evt_class),
1549 	    detail->description);
1550 
1551         /* Don't act on old AEN's or while shutting down */
1552         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1553                 return;
1554 
1555 	switch (detail->arg_type) {
1556 	case MR_EVT_ARGS_NONE:
1557 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1558 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1559 			if (mfi_detect_jbod_change) {
1560 				/*
1561 				 * Probe for new SYSPD's and Delete
1562 				 * invalid SYSPD's
1563 				 */
1564 				sx_xlock(&sc->mfi_config_lock);
1565 				mtx_lock(&sc->mfi_io_lock);
1566 				mfi_syspdprobe(sc);
1567 				mtx_unlock(&sc->mfi_io_lock);
1568 				sx_xunlock(&sc->mfi_config_lock);
1569 			}
1570 		}
1571 		break;
1572 	case MR_EVT_ARGS_LD_STATE:
1573 		/* During load time driver reads all the events starting
1574 		 * from the one that has been logged after shutdown. Avoid
1575 		 * these old events.
1576 		 */
1577 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1578 			/* Remove the LD */
1579 			struct mfi_disk *ld;
1580 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1581 				if (ld->ld_id ==
1582 				    detail->args.ld_state.ld.target_id)
1583 					break;
1584 			}
1585 			/*
1586 			Fix: for kernel panics when SSCD is removed
1587 			KASSERT(ld != NULL, ("volume dissappeared"));
1588 			*/
1589 			if (ld != NULL) {
1590 				mtx_lock(&Giant);
1591 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1592 				mtx_unlock(&Giant);
1593 			}
1594 		}
1595 		break;
1596 	case MR_EVT_ARGS_PD:
1597 		if (detail->code == MR_EVT_PD_REMOVED) {
1598 			if (mfi_detect_jbod_change) {
1599 				/*
1600 				 * If the removed device is a SYSPD then
1601 				 * delete it
1602 				 */
1603 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1604 				    pd_link) {
1605 					if (syspd->pd_id ==
1606 					    detail->args.pd.device_id) {
1607 						mtx_lock(&Giant);
1608 						device_delete_child(
1609 						    sc->mfi_dev,
1610 						    syspd->pd_dev);
1611 						mtx_unlock(&Giant);
1612 						break;
1613 					}
1614 				}
1615 			}
1616 		}
1617 		if (detail->code == MR_EVT_PD_INSERTED) {
1618 			if (mfi_detect_jbod_change) {
1619 				/* Probe for new SYSPD's */
1620 				sx_xlock(&sc->mfi_config_lock);
1621 				mtx_lock(&sc->mfi_io_lock);
1622 				mfi_syspdprobe(sc);
1623 				mtx_unlock(&sc->mfi_io_lock);
1624 				sx_xunlock(&sc->mfi_config_lock);
1625 			}
1626 		}
1627 		if (sc->mfi_cam_rescan_cb != NULL &&
1628 		    (detail->code == MR_EVT_PD_INSERTED ||
1629 		    detail->code == MR_EVT_PD_REMOVED)) {
1630 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1631 		}
1632 		break;
1633 	}
1634 }
1635 
1636 static void
1637 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1638 {
1639 	struct mfi_evt_queue_elm *elm;
1640 
1641 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1642 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1643 	if (elm == NULL)
1644 		return;
1645 	memcpy(&elm->detail, detail, sizeof(*detail));
1646 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1647 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1648 }
1649 
1650 static void
1651 mfi_handle_evt(void *context, int pending)
1652 {
1653 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1654 	struct mfi_softc *sc;
1655 	struct mfi_evt_queue_elm *elm;
1656 
1657 	sc = context;
1658 	TAILQ_INIT(&queue);
1659 	mtx_lock(&sc->mfi_io_lock);
1660 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1661 	mtx_unlock(&sc->mfi_io_lock);
1662 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1663 		TAILQ_REMOVE(&queue, elm, link);
1664 		mfi_decode_evt(sc, &elm->detail);
1665 		free(elm, M_MFIBUF);
1666 	}
1667 }
1668 
1669 static int
1670 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1671 {
1672 	struct mfi_command *cm;
1673 	struct mfi_dcmd_frame *dcmd;
1674 	union mfi_evt current_aen, prior_aen;
1675 	struct mfi_evt_detail *ed = NULL;
1676 	int error = 0;
1677 
1678 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1679 
1680 	current_aen.word = locale;
1681 	if (sc->mfi_aen_cm != NULL) {
1682 		prior_aen.word =
1683 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1684 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1685 		    !((prior_aen.members.locale & current_aen.members.locale)
1686 		    ^current_aen.members.locale)) {
1687 			return (0);
1688 		} else {
1689 			prior_aen.members.locale |= current_aen.members.locale;
1690 			if (prior_aen.members.evt_class
1691 			    < current_aen.members.evt_class)
1692 				current_aen.members.evt_class =
1693 				    prior_aen.members.evt_class;
1694 			mfi_abort(sc, &sc->mfi_aen_cm);
1695 		}
1696 	}
1697 
1698 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1699 	    (void **)&ed, sizeof(*ed));
1700 	if (error)
1701 		goto out;
1702 
1703 	dcmd = &cm->cm_frame->dcmd;
1704 	((uint32_t *)&dcmd->mbox)[0] = seq;
1705 	((uint32_t *)&dcmd->mbox)[1] = locale;
1706 	cm->cm_flags = MFI_CMD_DATAIN;
1707 	cm->cm_complete = mfi_aen_complete;
1708 
1709 	sc->last_seq_num = seq;
1710 	sc->mfi_aen_cm = cm;
1711 
1712 	mfi_enqueue_ready(cm);
1713 	mfi_startio(sc);
1714 
1715 out:
1716 	return (error);
1717 }
1718 
1719 static void
1720 mfi_aen_complete(struct mfi_command *cm)
1721 {
1722 	struct mfi_frame_header *hdr;
1723 	struct mfi_softc *sc;
1724 	struct mfi_evt_detail *detail;
1725 	struct mfi_aen *mfi_aen_entry, *tmp;
1726 	int seq = 0, aborted = 0;
1727 
1728 	sc = cm->cm_sc;
1729 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1730 
1731 	if (sc->mfi_aen_cm == NULL)
1732 		return;
1733 
1734 	hdr = &cm->cm_frame->header;
1735 
1736 	if (sc->cm_aen_abort ||
1737 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1738 		sc->cm_aen_abort = 0;
1739 		aborted = 1;
1740 	} else {
1741 		sc->mfi_aen_triggered = 1;
1742 		if (sc->mfi_poll_waiting) {
1743 			sc->mfi_poll_waiting = 0;
1744 			selwakeup(&sc->mfi_select);
1745 		}
1746 		detail = cm->cm_data;
1747 		mfi_queue_evt(sc, detail);
1748 		seq = detail->seq + 1;
1749 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1750 		    tmp) {
1751 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1752 			    aen_link);
1753 			PROC_LOCK(mfi_aen_entry->p);
1754 			kern_psignal(mfi_aen_entry->p, SIGIO);
1755 			PROC_UNLOCK(mfi_aen_entry->p);
1756 			free(mfi_aen_entry, M_MFIBUF);
1757 		}
1758 	}
1759 
1760 	free(cm->cm_data, M_MFIBUF);
1761 	wakeup(&sc->mfi_aen_cm);
1762 	sc->mfi_aen_cm = NULL;
1763 	mfi_release_command(cm);
1764 
1765 	/* set it up again so the driver can catch more events */
1766 	if (!aborted)
1767 		mfi_aen_setup(sc, seq);
1768 }
1769 
1770 #define MAX_EVENTS 15
1771 
1772 static int
1773 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1774 {
1775 	struct mfi_command *cm;
1776 	struct mfi_dcmd_frame *dcmd;
1777 	struct mfi_evt_list *el;
1778 	union mfi_evt class_locale;
1779 	int error, i, seq, size;
1780 
1781 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1782 
1783 	class_locale.members.reserved = 0;
1784 	class_locale.members.locale = mfi_event_locale;
1785 	class_locale.members.evt_class  = mfi_event_class;
1786 
1787 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1788 		* (MAX_EVENTS - 1);
1789 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1790 	if (el == NULL)
1791 		return (ENOMEM);
1792 
1793 	for (seq = start_seq;;) {
1794 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1795 			free(el, M_MFIBUF);
1796 			return (EBUSY);
1797 		}
1798 
1799 		dcmd = &cm->cm_frame->dcmd;
1800 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1801 		dcmd->header.cmd = MFI_CMD_DCMD;
1802 		dcmd->header.timeout = 0;
1803 		dcmd->header.data_len = size;
1804 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1805 		((uint32_t *)&dcmd->mbox)[0] = seq;
1806 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1807 		cm->cm_sg = &dcmd->sgl;
1808 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1809 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1810 		cm->cm_data = el;
1811 		cm->cm_len = size;
1812 
1813 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1814 			device_printf(sc->mfi_dev,
1815 			    "Failed to get controller entries\n");
1816 			mfi_release_command(cm);
1817 			break;
1818 		}
1819 
1820 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1821 		    BUS_DMASYNC_POSTREAD);
1822 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1823 
1824 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1825 			mfi_release_command(cm);
1826 			break;
1827 		}
1828 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1829 			device_printf(sc->mfi_dev,
1830 			    "Error %d fetching controller entries\n",
1831 			    dcmd->header.cmd_status);
1832 			mfi_release_command(cm);
1833 			error = EIO;
1834 			break;
1835 		}
1836 		mfi_release_command(cm);
1837 
1838 		for (i = 0; i < el->count; i++) {
1839 			/*
1840 			 * If this event is newer than 'stop_seq' then
1841 			 * break out of the loop.  Note that the log
1842 			 * is a circular buffer so we have to handle
1843 			 * the case that our stop point is earlier in
1844 			 * the buffer than our start point.
1845 			 */
1846 			if (el->event[i].seq >= stop_seq) {
1847 				if (start_seq <= stop_seq)
1848 					break;
1849 				else if (el->event[i].seq < start_seq)
1850 					break;
1851 			}
1852 			mfi_queue_evt(sc, &el->event[i]);
1853 		}
1854 		seq = el->event[el->count - 1].seq + 1;
1855 	}
1856 
1857 	free(el, M_MFIBUF);
1858 	return (error);
1859 }
1860 
1861 static int
1862 mfi_add_ld(struct mfi_softc *sc, int id)
1863 {
1864 	struct mfi_command *cm;
1865 	struct mfi_dcmd_frame *dcmd = NULL;
1866 	struct mfi_ld_info *ld_info = NULL;
1867 	struct mfi_disk_pending *ld_pend;
1868 	int error;
1869 
1870 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1871 
1872 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1873 	if (ld_pend != NULL) {
1874 		ld_pend->ld_id = id;
1875 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1876 	}
1877 
1878 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1879 	    (void **)&ld_info, sizeof(*ld_info));
1880 	if (error) {
1881 		device_printf(sc->mfi_dev,
1882 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1883 		if (ld_info)
1884 			free(ld_info, M_MFIBUF);
1885 		return (error);
1886 	}
1887 	cm->cm_flags = MFI_CMD_DATAIN;
1888 	dcmd = &cm->cm_frame->dcmd;
1889 	dcmd->mbox[0] = id;
1890 	if (mfi_wait_command(sc, cm) != 0) {
1891 		device_printf(sc->mfi_dev,
1892 		    "Failed to get logical drive: %d\n", id);
1893 		free(ld_info, M_MFIBUF);
1894 		return (0);
1895 	}
1896 	if (ld_info->ld_config.params.isSSCD != 1)
1897 		mfi_add_ld_complete(cm);
1898 	else {
1899 		mfi_release_command(cm);
1900 		if (ld_info)		/* SSCD drives ld_info free here */
1901 			free(ld_info, M_MFIBUF);
1902 	}
1903 	return (0);
1904 }
1905 
1906 static void
1907 mfi_add_ld_complete(struct mfi_command *cm)
1908 {
1909 	struct mfi_frame_header *hdr;
1910 	struct mfi_ld_info *ld_info;
1911 	struct mfi_softc *sc;
1912 	device_t child;
1913 
1914 	sc = cm->cm_sc;
1915 	hdr = &cm->cm_frame->header;
1916 	ld_info = cm->cm_private;
1917 
1918 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1919 		free(ld_info, M_MFIBUF);
1920 		wakeup(&sc->mfi_map_sync_cm);
1921 		mfi_release_command(cm);
1922 		return;
1923 	}
1924 	wakeup(&sc->mfi_map_sync_cm);
1925 	mfi_release_command(cm);
1926 
1927 	mtx_unlock(&sc->mfi_io_lock);
1928 	mtx_lock(&Giant);
1929 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1930 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1931 		free(ld_info, M_MFIBUF);
1932 		mtx_unlock(&Giant);
1933 		mtx_lock(&sc->mfi_io_lock);
1934 		return;
1935 	}
1936 
1937 	device_set_ivars(child, ld_info);
1938 	device_set_desc(child, "MFI Logical Disk");
1939 	bus_generic_attach(sc->mfi_dev);
1940 	mtx_unlock(&Giant);
1941 	mtx_lock(&sc->mfi_io_lock);
1942 }
1943 
1944 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1945 {
1946 	struct mfi_command *cm;
1947 	struct mfi_dcmd_frame *dcmd = NULL;
1948 	struct mfi_pd_info *pd_info = NULL;
1949 	struct mfi_system_pending *syspd_pend;
1950 	int error;
1951 
1952 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1953 
1954 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1955 	if (syspd_pend != NULL) {
1956 		syspd_pend->pd_id = id;
1957 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1958 	}
1959 
1960 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1961 		(void **)&pd_info, sizeof(*pd_info));
1962 	if (error) {
1963 		device_printf(sc->mfi_dev,
1964 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1965 		    error);
1966 		if (pd_info)
1967 			free(pd_info, M_MFIBUF);
1968 		return (error);
1969 	}
1970 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1971 	dcmd = &cm->cm_frame->dcmd;
1972 	dcmd->mbox[0]=id;
1973 	dcmd->header.scsi_status = 0;
1974 	dcmd->header.pad0 = 0;
1975 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1976 		device_printf(sc->mfi_dev,
1977 		    "Failed to get physical drive info %d\n", id);
1978 		free(pd_info, M_MFIBUF);
1979 		mfi_release_command(cm);
1980 		return (error);
1981 	}
1982 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1983 	    BUS_DMASYNC_POSTREAD);
1984 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1985 	mfi_add_sys_pd_complete(cm);
1986 	return (0);
1987 }
1988 
1989 static void
1990 mfi_add_sys_pd_complete(struct mfi_command *cm)
1991 {
1992 	struct mfi_frame_header *hdr;
1993 	struct mfi_pd_info *pd_info;
1994 	struct mfi_softc *sc;
1995 	device_t child;
1996 
1997 	sc = cm->cm_sc;
1998 	hdr = &cm->cm_frame->header;
1999 	pd_info = cm->cm_private;
2000 
2001 	if (hdr->cmd_status != MFI_STAT_OK) {
2002 		free(pd_info, M_MFIBUF);
2003 		mfi_release_command(cm);
2004 		return;
2005 	}
2006 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2007 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2008 		    pd_info->ref.v.device_id);
2009 		free(pd_info, M_MFIBUF);
2010 		mfi_release_command(cm);
2011 		return;
2012 	}
2013 	mfi_release_command(cm);
2014 
2015 	mtx_unlock(&sc->mfi_io_lock);
2016 	mtx_lock(&Giant);
2017 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2018 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2019 		free(pd_info, M_MFIBUF);
2020 		mtx_unlock(&Giant);
2021 		mtx_lock(&sc->mfi_io_lock);
2022 		return;
2023 	}
2024 
2025 	device_set_ivars(child, pd_info);
2026 	device_set_desc(child, "MFI System PD");
2027 	bus_generic_attach(sc->mfi_dev);
2028 	mtx_unlock(&Giant);
2029 	mtx_lock(&sc->mfi_io_lock);
2030 }
2031 
2032 static struct mfi_command *
2033 mfi_bio_command(struct mfi_softc *sc)
2034 {
2035 	struct bio *bio;
2036 	struct mfi_command *cm = NULL;
2037 
2038 	/*reserving two commands to avoid starvation for IOCTL*/
2039 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2040 		return (NULL);
2041 	}
2042 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2043 		return (NULL);
2044 	}
2045 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2046 		cm = mfi_build_ldio(sc, bio);
2047 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2048 		cm = mfi_build_syspdio(sc, bio);
2049 	}
2050 	if (!cm)
2051 	    mfi_enqueue_bio(sc, bio);
2052 	return cm;
2053 }
2054 
2055 /*
2056  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2057  */
2058 
2059 int
2060 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2061 {
2062 	int cdb_len;
2063 
2064 	if (((lba & 0x1fffff) == lba)
2065          && ((block_count & 0xff) == block_count)
2066          && (byte2 == 0)) {
2067 		/* We can fit in a 6 byte cdb */
2068 		struct scsi_rw_6 *scsi_cmd;
2069 
2070 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2071 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2072 		scsi_ulto3b(lba, scsi_cmd->addr);
2073 		scsi_cmd->length = block_count & 0xff;
2074 		scsi_cmd->control = 0;
2075 		cdb_len = sizeof(*scsi_cmd);
2076 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2077 		/* Need a 10 byte CDB */
2078 		struct scsi_rw_10 *scsi_cmd;
2079 
2080 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2081 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2082 		scsi_cmd->byte2 = byte2;
2083 		scsi_ulto4b(lba, scsi_cmd->addr);
2084 		scsi_cmd->reserved = 0;
2085 		scsi_ulto2b(block_count, scsi_cmd->length);
2086 		scsi_cmd->control = 0;
2087 		cdb_len = sizeof(*scsi_cmd);
2088 	} else if (((block_count & 0xffffffff) == block_count) &&
2089 	    ((lba & 0xffffffff) == lba)) {
2090 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2091 		struct scsi_rw_12 *scsi_cmd;
2092 
2093 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2094 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2095 		scsi_cmd->byte2 = byte2;
2096 		scsi_ulto4b(lba, scsi_cmd->addr);
2097 		scsi_cmd->reserved = 0;
2098 		scsi_ulto4b(block_count, scsi_cmd->length);
2099 		scsi_cmd->control = 0;
2100 		cdb_len = sizeof(*scsi_cmd);
2101 	} else {
2102 		/*
2103 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2104 		 * than 2^32
2105 		 */
2106 		struct scsi_rw_16 *scsi_cmd;
2107 
2108 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2109 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2110 		scsi_cmd->byte2 = byte2;
2111 		scsi_u64to8b(lba, scsi_cmd->addr);
2112 		scsi_cmd->reserved = 0;
2113 		scsi_ulto4b(block_count, scsi_cmd->length);
2114 		scsi_cmd->control = 0;
2115 		cdb_len = sizeof(*scsi_cmd);
2116 	}
2117 
2118 	return cdb_len;
2119 }
2120 
2121 extern char *unmapped_buf;
2122 
2123 static struct mfi_command *
2124 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2125 {
2126 	struct mfi_command *cm;
2127 	struct mfi_pass_frame *pass;
2128 	uint32_t context = 0;
2129 	int flags = 0, blkcount = 0, readop;
2130 	uint8_t cdb_len;
2131 
2132 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2133 
2134 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2135 	    return (NULL);
2136 
2137 	/* Zero out the MFI frame */
2138 	context = cm->cm_frame->header.context;
2139 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2140 	cm->cm_frame->header.context = context;
2141 	pass = &cm->cm_frame->pass;
2142 	bzero(pass->cdb, 16);
2143 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2144 	switch (bio->bio_cmd) {
2145 	case BIO_READ:
2146 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2147 		readop = 1;
2148 		break;
2149 	case BIO_WRITE:
2150 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2151 		readop = 0;
2152 		break;
2153 	default:
2154 		/* TODO: what about BIO_DELETE??? */
2155 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2156 	}
2157 
2158 	/* Cheat with the sector length to avoid a non-constant division */
2159 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2160 	/* Fill the LBA and Transfer length in CDB */
2161 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2162 	    pass->cdb);
2163 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2164 	pass->header.lun_id = 0;
2165 	pass->header.timeout = 0;
2166 	pass->header.flags = 0;
2167 	pass->header.scsi_status = 0;
2168 	pass->header.sense_len = MFI_SENSE_LEN;
2169 	pass->header.data_len = bio->bio_bcount;
2170 	pass->header.cdb_len = cdb_len;
2171 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2172 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2173 	cm->cm_complete = mfi_bio_complete;
2174 	cm->cm_private = bio;
2175 	cm->cm_data = unmapped_buf;
2176 	cm->cm_len = bio->bio_bcount;
2177 	cm->cm_sg = &pass->sgl;
2178 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2179 	cm->cm_flags = flags;
2180 
2181 	return (cm);
2182 }
2183 
2184 static struct mfi_command *
2185 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2186 {
2187 	struct mfi_io_frame *io;
2188 	struct mfi_command *cm;
2189 	int flags;
2190 	uint32_t blkcount;
2191 	uint32_t context = 0;
2192 
2193 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2194 
2195 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2196 	    return (NULL);
2197 
2198 	/* Zero out the MFI frame */
2199 	context = cm->cm_frame->header.context;
2200 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2201 	cm->cm_frame->header.context = context;
2202 	io = &cm->cm_frame->io;
2203 	switch (bio->bio_cmd) {
2204 	case BIO_READ:
2205 		io->header.cmd = MFI_CMD_LD_READ;
2206 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2207 		break;
2208 	case BIO_WRITE:
2209 		io->header.cmd = MFI_CMD_LD_WRITE;
2210 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2211 		break;
2212 	default:
2213 		/* TODO: what about BIO_DELETE??? */
2214 		panic("Unsupported bio command %x\n", bio->bio_cmd);
2215 	}
2216 
2217 	/* Cheat with the sector length to avoid a non-constant division */
2218 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2219 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2220 	io->header.timeout = 0;
2221 	io->header.flags = 0;
2222 	io->header.scsi_status = 0;
2223 	io->header.sense_len = MFI_SENSE_LEN;
2224 	io->header.data_len = blkcount;
2225 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2226 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2227 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2228 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2229 	cm->cm_complete = mfi_bio_complete;
2230 	cm->cm_private = bio;
2231 	cm->cm_data = unmapped_buf;
2232 	cm->cm_len = bio->bio_bcount;
2233 	cm->cm_sg = &io->sgl;
2234 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2235 	cm->cm_flags = flags;
2236 
2237 	return (cm);
2238 }
2239 
2240 static void
2241 mfi_bio_complete(struct mfi_command *cm)
2242 {
2243 	struct bio *bio;
2244 	struct mfi_frame_header *hdr;
2245 	struct mfi_softc *sc;
2246 
2247 	bio = cm->cm_private;
2248 	hdr = &cm->cm_frame->header;
2249 	sc = cm->cm_sc;
2250 
2251 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2252 		bio->bio_flags |= BIO_ERROR;
2253 		bio->bio_error = EIO;
2254 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2255 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2256 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2257 	} else if (cm->cm_error != 0) {
2258 		bio->bio_flags |= BIO_ERROR;
2259 		bio->bio_error = cm->cm_error;
2260 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2261 		    cm, cm->cm_error);
2262 	}
2263 
2264 	mfi_release_command(cm);
2265 	mfi_disk_complete(bio);
2266 }
2267 
2268 void
2269 mfi_startio(struct mfi_softc *sc)
2270 {
2271 	struct mfi_command *cm;
2272 	struct ccb_hdr *ccbh;
2273 
2274 	for (;;) {
2275 		/* Don't bother if we're short on resources */
2276 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2277 			break;
2278 
2279 		/* Try a command that has already been prepared */
2280 		cm = mfi_dequeue_ready(sc);
2281 
2282 		if (cm == NULL) {
2283 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2284 				cm = sc->mfi_cam_start(ccbh);
2285 		}
2286 
2287 		/* Nope, so look for work on the bioq */
2288 		if (cm == NULL)
2289 			cm = mfi_bio_command(sc);
2290 
2291 		/* No work available, so exit */
2292 		if (cm == NULL)
2293 			break;
2294 
2295 		/* Send the command to the controller */
2296 		if (mfi_mapcmd(sc, cm) != 0) {
2297 			device_printf(sc->mfi_dev, "Failed to startio\n");
2298 			mfi_requeue_ready(cm);
2299 			break;
2300 		}
2301 	}
2302 }
2303 
2304 int
2305 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2306 {
2307 	int error, polled;
2308 
2309 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2310 
2311 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2312 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2313 		if (cm->cm_flags & MFI_CMD_CCB)
2314 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2315 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2316 			    polled);
2317 		else if (cm->cm_flags & MFI_CMD_BIO)
2318 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2319 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2320 			    polled);
2321 		else
2322 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2323 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2324 			    mfi_data_cb, cm, polled);
2325 		if (error == EINPROGRESS) {
2326 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2327 			return (0);
2328 		}
2329 	} else {
2330 		error = mfi_send_frame(sc, cm);
2331 	}
2332 
2333 	return (error);
2334 }
2335 
2336 static void
2337 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2338 {
2339 	struct mfi_frame_header *hdr;
2340 	struct mfi_command *cm;
2341 	union mfi_sgl *sgl;
2342 	struct mfi_softc *sc;
2343 	int i, j, first, dir;
2344 	int sge_size, locked;
2345 
2346 	cm = (struct mfi_command *)arg;
2347 	sc = cm->cm_sc;
2348 	hdr = &cm->cm_frame->header;
2349 	sgl = cm->cm_sg;
2350 
2351 	/*
2352 	 * We need to check if we have the lock as this is async
2353 	 * callback so even though our caller mfi_mapcmd asserts
2354 	 * it has the lock, there is no guarantee that hasn't been
2355 	 * dropped if bus_dmamap_load returned prior to our
2356 	 * completion.
2357 	 */
2358 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2359 		mtx_lock(&sc->mfi_io_lock);
2360 
2361 	if (error) {
2362 		printf("error %d in callback\n", error);
2363 		cm->cm_error = error;
2364 		mfi_complete(sc, cm);
2365 		goto out;
2366 	}
2367 	/* Use IEEE sgl only for IO's on a SKINNY controller
2368 	 * For other commands on a SKINNY controller use either
2369 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2370 	 * Also calculate the total frame size based on the type
2371 	 * of SGL used.
2372 	 */
2373 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2374 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2375 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2376 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2377 		for (i = 0; i < nsegs; i++) {
2378 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2379 			sgl->sg_skinny[i].len = segs[i].ds_len;
2380 			sgl->sg_skinny[i].flag = 0;
2381 		}
2382 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2383 		sge_size = sizeof(struct mfi_sg_skinny);
2384 		hdr->sg_count = nsegs;
2385 	} else {
2386 		j = 0;
2387 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2388 			first = cm->cm_stp_len;
2389 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2390 				sgl->sg32[j].addr = segs[0].ds_addr;
2391 				sgl->sg32[j++].len = first;
2392 			} else {
2393 				sgl->sg64[j].addr = segs[0].ds_addr;
2394 				sgl->sg64[j++].len = first;
2395 			}
2396 		} else
2397 			first = 0;
2398 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2399 			for (i = 0; i < nsegs; i++) {
2400 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2401 				sgl->sg32[j++].len = segs[i].ds_len - first;
2402 				first = 0;
2403 			}
2404 		} else {
2405 			for (i = 0; i < nsegs; i++) {
2406 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2407 				sgl->sg64[j++].len = segs[i].ds_len - first;
2408 				first = 0;
2409 			}
2410 			hdr->flags |= MFI_FRAME_SGL64;
2411 		}
2412 		hdr->sg_count = j;
2413 		sge_size = sc->mfi_sge_size;
2414 	}
2415 
2416 	dir = 0;
2417 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2418 		dir |= BUS_DMASYNC_PREREAD;
2419 		hdr->flags |= MFI_FRAME_DIR_READ;
2420 	}
2421 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2422 		dir |= BUS_DMASYNC_PREWRITE;
2423 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2424 	}
2425 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2426 	cm->cm_flags |= MFI_CMD_MAPPED;
2427 
2428 	/*
2429 	 * Instead of calculating the total number of frames in the
2430 	 * compound frame, it's already assumed that there will be at
2431 	 * least 1 frame, so don't compensate for the modulo of the
2432 	 * following division.
2433 	 */
2434 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2435 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2436 
2437 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2438 		printf("error %d in callback from mfi_send_frame\n", error);
2439 		cm->cm_error = error;
2440 		mfi_complete(sc, cm);
2441 		goto out;
2442 	}
2443 
2444 out:
2445 	/* leave the lock in the state we found it */
2446 	if (locked == 0)
2447 		mtx_unlock(&sc->mfi_io_lock);
2448 
2449 	return;
2450 }
2451 
2452 static int
2453 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2454 {
2455 	int error;
2456 
2457 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2458 
2459 	if (sc->MFA_enabled)
2460 		error = mfi_tbolt_send_frame(sc, cm);
2461 	else
2462 		error = mfi_std_send_frame(sc, cm);
2463 
2464 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2465 		mfi_remove_busy(cm);
2466 
2467 	return (error);
2468 }
2469 
2470 static int
2471 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2472 {
2473 	struct mfi_frame_header *hdr;
2474 	int tm = mfi_polled_cmd_timeout * 1000;
2475 
2476 	hdr = &cm->cm_frame->header;
2477 
2478 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2479 		cm->cm_timestamp = time_uptime;
2480 		mfi_enqueue_busy(cm);
2481 	} else {
2482 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2483 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2484 	}
2485 
2486 	/*
2487 	 * The bus address of the command is aligned on a 64 byte boundary,
2488 	 * leaving the least 6 bits as zero.  For whatever reason, the
2489 	 * hardware wants the address shifted right by three, leaving just
2490 	 * 3 zero bits.  These three bits are then used as a prefetching
2491 	 * hint for the hardware to predict how many frames need to be
2492 	 * fetched across the bus.  If a command has more than 8 frames
2493 	 * then the 3 bits are set to 0x7 and the firmware uses other
2494 	 * information in the command to determine the total amount to fetch.
2495 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2496 	 * is enough for both 32bit and 64bit systems.
2497 	 */
2498 	if (cm->cm_extra_frames > 7)
2499 		cm->cm_extra_frames = 7;
2500 
2501 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2502 
2503 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2504 		return (0);
2505 
2506 	/* This is a polled command, so busy-wait for it to complete. */
2507 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2508 		DELAY(1000);
2509 		tm -= 1;
2510 		if (tm <= 0)
2511 			break;
2512 	}
2513 
2514 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2515 		device_printf(sc->mfi_dev, "Frame %p timed out "
2516 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2517 		return (ETIMEDOUT);
2518 	}
2519 
2520 	return (0);
2521 }
2522 
2523 
2524 void
2525 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2526 {
2527 	int dir;
2528 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2529 
2530 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2531 		dir = 0;
2532 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2533 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2534 			dir |= BUS_DMASYNC_POSTREAD;
2535 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2536 			dir |= BUS_DMASYNC_POSTWRITE;
2537 
2538 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2539 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2540 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2541 	}
2542 
2543 	cm->cm_flags |= MFI_CMD_COMPLETED;
2544 
2545 	if (cm->cm_complete != NULL)
2546 		cm->cm_complete(cm);
2547 	else
2548 		wakeup(cm);
2549 }
2550 
2551 static int
2552 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2553 {
2554 	struct mfi_command *cm;
2555 	struct mfi_abort_frame *abort;
2556 	int i = 0, error;
2557 	uint32_t context = 0;
2558 
2559 	mtx_lock(&sc->mfi_io_lock);
2560 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2561 		mtx_unlock(&sc->mfi_io_lock);
2562 		return (EBUSY);
2563 	}
2564 
2565 	/* Zero out the MFI frame */
2566 	context = cm->cm_frame->header.context;
2567 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2568 	cm->cm_frame->header.context = context;
2569 
2570 	abort = &cm->cm_frame->abort;
2571 	abort->header.cmd = MFI_CMD_ABORT;
2572 	abort->header.flags = 0;
2573 	abort->header.scsi_status = 0;
2574 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2575 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2576 	abort->abort_mfi_addr_hi =
2577 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2578 	cm->cm_data = NULL;
2579 	cm->cm_flags = MFI_CMD_POLLED;
2580 
2581 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2582 		device_printf(sc->mfi_dev, "failed to abort command\n");
2583 	mfi_release_command(cm);
2584 
2585 	mtx_unlock(&sc->mfi_io_lock);
2586 	while (i < 5 && *cm_abort != NULL) {
2587 		tsleep(cm_abort, 0, "mfiabort",
2588 		    5 * hz);
2589 		i++;
2590 	}
2591 	if (*cm_abort != NULL) {
2592 		/* Force a complete if command didn't abort */
2593 		mtx_lock(&sc->mfi_io_lock);
2594 		(*cm_abort)->cm_complete(*cm_abort);
2595 		mtx_unlock(&sc->mfi_io_lock);
2596 	}
2597 
2598 	return (error);
2599 }
2600 
2601 int
2602 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2603      int len)
2604 {
2605 	struct mfi_command *cm;
2606 	struct mfi_io_frame *io;
2607 	int error;
2608 	uint32_t context = 0;
2609 
2610 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2611 		return (EBUSY);
2612 
2613 	/* Zero out the MFI frame */
2614 	context = cm->cm_frame->header.context;
2615 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2616 	cm->cm_frame->header.context = context;
2617 
2618 	io = &cm->cm_frame->io;
2619 	io->header.cmd = MFI_CMD_LD_WRITE;
2620 	io->header.target_id = id;
2621 	io->header.timeout = 0;
2622 	io->header.flags = 0;
2623 	io->header.scsi_status = 0;
2624 	io->header.sense_len = MFI_SENSE_LEN;
2625 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2626 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2627 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2628 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2629 	io->lba_lo = lba & 0xffffffff;
2630 	cm->cm_data = virt;
2631 	cm->cm_len = len;
2632 	cm->cm_sg = &io->sgl;
2633 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2634 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2635 
2636 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2637 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2638 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2639 	    BUS_DMASYNC_POSTWRITE);
2640 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2641 	mfi_release_command(cm);
2642 
2643 	return (error);
2644 }
2645 
2646 int
2647 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2648     int len)
2649 {
2650 	struct mfi_command *cm;
2651 	struct mfi_pass_frame *pass;
2652 	int error, readop, cdb_len;
2653 	uint32_t blkcount;
2654 
2655 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2656 		return (EBUSY);
2657 
2658 	pass = &cm->cm_frame->pass;
2659 	bzero(pass->cdb, 16);
2660 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2661 
2662 	readop = 0;
2663 	blkcount = howmany(len, MFI_SECTOR_LEN);
2664 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2665 	pass->header.target_id = id;
2666 	pass->header.timeout = 0;
2667 	pass->header.flags = 0;
2668 	pass->header.scsi_status = 0;
2669 	pass->header.sense_len = MFI_SENSE_LEN;
2670 	pass->header.data_len = len;
2671 	pass->header.cdb_len = cdb_len;
2672 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2673 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2674 	cm->cm_data = virt;
2675 	cm->cm_len = len;
2676 	cm->cm_sg = &pass->sgl;
2677 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2678 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2679 
2680 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2681 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2682 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2683 	    BUS_DMASYNC_POSTWRITE);
2684 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2685 	mfi_release_command(cm);
2686 
2687 	return (error);
2688 }
2689 
2690 static int
2691 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2692 {
2693 	struct mfi_softc *sc;
2694 	int error;
2695 
2696 	sc = dev->si_drv1;
2697 
2698 	mtx_lock(&sc->mfi_io_lock);
2699 	if (sc->mfi_detaching)
2700 		error = ENXIO;
2701 	else {
2702 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2703 		error = 0;
2704 	}
2705 	mtx_unlock(&sc->mfi_io_lock);
2706 
2707 	return (error);
2708 }
2709 
2710 static int
2711 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2712 {
2713 	struct mfi_softc *sc;
2714 	struct mfi_aen *mfi_aen_entry, *tmp;
2715 
2716 	sc = dev->si_drv1;
2717 
2718 	mtx_lock(&sc->mfi_io_lock);
2719 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2720 
2721 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2722 		if (mfi_aen_entry->p == curproc) {
2723 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2724 			    aen_link);
2725 			free(mfi_aen_entry, M_MFIBUF);
2726 		}
2727 	}
2728 	mtx_unlock(&sc->mfi_io_lock);
2729 	return (0);
2730 }
2731 
2732 static int
2733 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2734 {
2735 
2736 	switch (opcode) {
2737 	case MFI_DCMD_LD_DELETE:
2738 	case MFI_DCMD_CFG_ADD:
2739 	case MFI_DCMD_CFG_CLEAR:
2740 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2741 		sx_xlock(&sc->mfi_config_lock);
2742 		return (1);
2743 	default:
2744 		return (0);
2745 	}
2746 }
2747 
2748 static void
2749 mfi_config_unlock(struct mfi_softc *sc, int locked)
2750 {
2751 
2752 	if (locked)
2753 		sx_xunlock(&sc->mfi_config_lock);
2754 }
2755 
2756 /*
2757  * Perform pre-issue checks on commands from userland and possibly veto
2758  * them.
2759  */
2760 static int
2761 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2762 {
2763 	struct mfi_disk *ld, *ld2;
2764 	int error;
2765 	struct mfi_system_pd *syspd = NULL;
2766 	uint16_t syspd_id;
2767 	uint16_t *mbox;
2768 
2769 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2770 	error = 0;
2771 	switch (cm->cm_frame->dcmd.opcode) {
2772 	case MFI_DCMD_LD_DELETE:
2773 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2774 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2775 				break;
2776 		}
2777 		if (ld == NULL)
2778 			error = ENOENT;
2779 		else
2780 			error = mfi_disk_disable(ld);
2781 		break;
2782 	case MFI_DCMD_CFG_CLEAR:
2783 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2784 			error = mfi_disk_disable(ld);
2785 			if (error)
2786 				break;
2787 		}
2788 		if (error) {
2789 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2790 				if (ld2 == ld)
2791 					break;
2792 				mfi_disk_enable(ld2);
2793 			}
2794 		}
2795 		break;
2796 	case MFI_DCMD_PD_STATE_SET:
2797 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2798 		syspd_id = mbox[0];
2799 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2800 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2801 				if (syspd->pd_id == syspd_id)
2802 					break;
2803 			}
2804 		}
2805 		else
2806 			break;
2807 		if (syspd)
2808 			error = mfi_syspd_disable(syspd);
2809 		break;
2810 	default:
2811 		break;
2812 	}
2813 	return (error);
2814 }
2815 
2816 /* Perform post-issue checks on commands from userland. */
2817 static void
2818 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2819 {
2820 	struct mfi_disk *ld, *ldn;
2821 	struct mfi_system_pd *syspd = NULL;
2822 	uint16_t syspd_id;
2823 	uint16_t *mbox;
2824 
2825 	switch (cm->cm_frame->dcmd.opcode) {
2826 	case MFI_DCMD_LD_DELETE:
2827 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2828 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2829 				break;
2830 		}
2831 		KASSERT(ld != NULL, ("volume dissappeared"));
2832 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2833 			mtx_unlock(&sc->mfi_io_lock);
2834 			mtx_lock(&Giant);
2835 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2836 			mtx_unlock(&Giant);
2837 			mtx_lock(&sc->mfi_io_lock);
2838 		} else
2839 			mfi_disk_enable(ld);
2840 		break;
2841 	case MFI_DCMD_CFG_CLEAR:
2842 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2843 			mtx_unlock(&sc->mfi_io_lock);
2844 			mtx_lock(&Giant);
2845 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2846 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2847 			}
2848 			mtx_unlock(&Giant);
2849 			mtx_lock(&sc->mfi_io_lock);
2850 		} else {
2851 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2852 				mfi_disk_enable(ld);
2853 		}
2854 		break;
2855 	case MFI_DCMD_CFG_ADD:
2856 		mfi_ldprobe(sc);
2857 		break;
2858 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2859 		mfi_ldprobe(sc);
2860 		break;
2861 	case MFI_DCMD_PD_STATE_SET:
2862 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2863 		syspd_id = mbox[0];
2864 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2865 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2866 				if (syspd->pd_id == syspd_id)
2867 					break;
2868 			}
2869 		}
2870 		else
2871 			break;
2872 		/* If the transition fails then enable the syspd again */
2873 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2874 			mfi_syspd_enable(syspd);
2875 		break;
2876 	}
2877 }
2878 
2879 static int
2880 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2881 {
2882 	struct mfi_config_data *conf_data;
2883 	struct mfi_command *ld_cm = NULL;
2884 	struct mfi_ld_info *ld_info = NULL;
2885 	struct mfi_ld_config *ld;
2886 	char *p;
2887 	int error = 0;
2888 
2889 	conf_data = (struct mfi_config_data *)cm->cm_data;
2890 
2891 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2892 		p = (char *)conf_data->array;
2893 		p += conf_data->array_size * conf_data->array_count;
2894 		ld = (struct mfi_ld_config *)p;
2895 		if (ld->params.isSSCD == 1)
2896 			error = 1;
2897 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2898 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2899 		    (void **)&ld_info, sizeof(*ld_info));
2900 		if (error) {
2901 			device_printf(sc->mfi_dev, "Failed to allocate"
2902 			    "MFI_DCMD_LD_GET_INFO %d", error);
2903 			if (ld_info)
2904 				free(ld_info, M_MFIBUF);
2905 			return 0;
2906 		}
2907 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2908 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2909 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2910 		if (mfi_wait_command(sc, ld_cm) != 0) {
2911 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2912 			mfi_release_command(ld_cm);
2913 			free(ld_info, M_MFIBUF);
2914 			return 0;
2915 		}
2916 
2917 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2918 			free(ld_info, M_MFIBUF);
2919 			mfi_release_command(ld_cm);
2920 			return 0;
2921 		}
2922 		else
2923 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2924 
2925 		if (ld_info->ld_config.params.isSSCD == 1)
2926 			error = 1;
2927 
2928 		mfi_release_command(ld_cm);
2929 		free(ld_info, M_MFIBUF);
2930 
2931 	}
2932 	return error;
2933 }
2934 
2935 static int
2936 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2937 {
2938 	uint8_t i;
2939 	struct mfi_ioc_packet *ioc;
2940 	ioc = (struct mfi_ioc_packet *)arg;
2941 	int sge_size, error;
2942 	struct megasas_sge *kern_sge;
2943 
2944 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2945 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2946 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2947 
2948 	if (sizeof(bus_addr_t) == 8) {
2949 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2950 		cm->cm_extra_frames = 2;
2951 		sge_size = sizeof(struct mfi_sg64);
2952 	} else {
2953 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2954 		sge_size = sizeof(struct mfi_sg32);
2955 	}
2956 
2957 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2958 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2959 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2960 			1, 0,			/* algnmnt, boundary */
2961 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2962 			BUS_SPACE_MAXADDR,	/* highaddr */
2963 			NULL, NULL,		/* filter, filterarg */
2964 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2965 			2,			/* nsegments */
2966 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2967 			BUS_DMA_ALLOCNOW,	/* flags */
2968 			NULL, NULL,		/* lockfunc, lockarg */
2969 			&sc->mfi_kbuff_arr_dmat[i])) {
2970 			device_printf(sc->mfi_dev,
2971 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2972 			return (ENOMEM);
2973 		}
2974 
2975 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2976 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2977 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2978 			device_printf(sc->mfi_dev,
2979 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2980 			return (ENOMEM);
2981 		}
2982 
2983 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2984 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2985 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2986 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2987 
2988 		if (!sc->kbuff_arr[i]) {
2989 			device_printf(sc->mfi_dev,
2990 			    "Could not allocate memory for kbuff_arr info\n");
2991 			return -1;
2992 		}
2993 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2994 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2995 
2996 		if (sizeof(bus_addr_t) == 8) {
2997 			cm->cm_frame->stp.sgl.sg64[i].addr =
2998 			    kern_sge[i].phys_addr;
2999 			cm->cm_frame->stp.sgl.sg64[i].len =
3000 			    ioc->mfi_sgl[i].iov_len;
3001 		} else {
3002 			cm->cm_frame->stp.sgl.sg32[i].addr =
3003 			    kern_sge[i].phys_addr;
3004 			cm->cm_frame->stp.sgl.sg32[i].len =
3005 			    ioc->mfi_sgl[i].iov_len;
3006 		}
3007 
3008 		error = copyin(ioc->mfi_sgl[i].iov_base,
3009 		    sc->kbuff_arr[i],
3010 		    ioc->mfi_sgl[i].iov_len);
3011 		if (error != 0) {
3012 			device_printf(sc->mfi_dev, "Copy in failed\n");
3013 			return error;
3014 		}
3015 	}
3016 
3017 	cm->cm_flags |=MFI_CMD_MAPPED;
3018 	return 0;
3019 }
3020 
3021 static int
3022 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3023 {
3024 	struct mfi_command *cm;
3025 	struct mfi_dcmd_frame *dcmd;
3026 	void *ioc_buf = NULL;
3027 	uint32_t context;
3028 	int error = 0, locked;
3029 
3030 
3031 	if (ioc->buf_size > 0) {
3032 		if (ioc->buf_size > 1024 * 1024)
3033 			return (ENOMEM);
3034 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3035 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3036 		if (error) {
3037 			device_printf(sc->mfi_dev, "failed to copyin\n");
3038 			free(ioc_buf, M_MFIBUF);
3039 			return (error);
3040 		}
3041 	}
3042 
3043 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3044 
3045 	mtx_lock(&sc->mfi_io_lock);
3046 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3047 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3048 
3049 	/* Save context for later */
3050 	context = cm->cm_frame->header.context;
3051 
3052 	dcmd = &cm->cm_frame->dcmd;
3053 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3054 
3055 	cm->cm_sg = &dcmd->sgl;
3056 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3057 	cm->cm_data = ioc_buf;
3058 	cm->cm_len = ioc->buf_size;
3059 
3060 	/* restore context */
3061 	cm->cm_frame->header.context = context;
3062 
3063 	/* Cheat since we don't know if we're writing or reading */
3064 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3065 
3066 	error = mfi_check_command_pre(sc, cm);
3067 	if (error)
3068 		goto out;
3069 
3070 	error = mfi_wait_command(sc, cm);
3071 	if (error) {
3072 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3073 		goto out;
3074 	}
3075 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3076 	mfi_check_command_post(sc, cm);
3077 out:
3078 	mfi_release_command(cm);
3079 	mtx_unlock(&sc->mfi_io_lock);
3080 	mfi_config_unlock(sc, locked);
3081 	if (ioc->buf_size > 0)
3082 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3083 	if (ioc_buf)
3084 		free(ioc_buf, M_MFIBUF);
3085 	return (error);
3086 }
3087 
3088 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3089 
3090 static int
3091 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3092 {
3093 	struct mfi_softc *sc;
3094 	union mfi_statrequest *ms;
3095 	struct mfi_ioc_packet *ioc;
3096 #ifdef COMPAT_FREEBSD32
3097 	struct mfi_ioc_packet32 *ioc32;
3098 #endif
3099 	struct mfi_ioc_aen *aen;
3100 	struct mfi_command *cm = NULL;
3101 	uint32_t context = 0;
3102 	union mfi_sense_ptr sense_ptr;
3103 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3104 	size_t len;
3105 	int i, res;
3106 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3107 #ifdef COMPAT_FREEBSD32
3108 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3109 	struct mfi_ioc_passthru iop_swab;
3110 #endif
3111 	int error, locked;
3112 	union mfi_sgl *sgl;
3113 	sc = dev->si_drv1;
3114 	error = 0;
3115 
3116 	if (sc->adpreset)
3117 		return EBUSY;
3118 
3119 	if (sc->hw_crit_error)
3120 		return EBUSY;
3121 
3122 	if (sc->issuepend_done == 0)
3123 		return EBUSY;
3124 
3125 	switch (cmd) {
3126 	case MFIIO_STATS:
3127 		ms = (union mfi_statrequest *)arg;
3128 		switch (ms->ms_item) {
3129 		case MFIQ_FREE:
3130 		case MFIQ_BIO:
3131 		case MFIQ_READY:
3132 		case MFIQ_BUSY:
3133 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3134 			    sizeof(struct mfi_qstat));
3135 			break;
3136 		default:
3137 			error = ENOIOCTL;
3138 			break;
3139 		}
3140 		break;
3141 	case MFIIO_QUERY_DISK:
3142 	{
3143 		struct mfi_query_disk *qd;
3144 		struct mfi_disk *ld;
3145 
3146 		qd = (struct mfi_query_disk *)arg;
3147 		mtx_lock(&sc->mfi_io_lock);
3148 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3149 			if (ld->ld_id == qd->array_id)
3150 				break;
3151 		}
3152 		if (ld == NULL) {
3153 			qd->present = 0;
3154 			mtx_unlock(&sc->mfi_io_lock);
3155 			return (0);
3156 		}
3157 		qd->present = 1;
3158 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3159 			qd->open = 1;
3160 		bzero(qd->devname, SPECNAMELEN + 1);
3161 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3162 		mtx_unlock(&sc->mfi_io_lock);
3163 		break;
3164 	}
3165 	case MFI_CMD:
3166 #ifdef COMPAT_FREEBSD32
3167 	case MFI_CMD32:
3168 #endif
3169 		{
3170 		devclass_t devclass;
3171 		ioc = (struct mfi_ioc_packet *)arg;
3172 		int adapter;
3173 
3174 		adapter = ioc->mfi_adapter_no;
3175 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3176 			devclass = devclass_find("mfi");
3177 			sc = devclass_get_softc(devclass, adapter);
3178 		}
3179 		mtx_lock(&sc->mfi_io_lock);
3180 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3181 			mtx_unlock(&sc->mfi_io_lock);
3182 			return (EBUSY);
3183 		}
3184 		mtx_unlock(&sc->mfi_io_lock);
3185 		locked = 0;
3186 
3187 		/*
3188 		 * save off original context since copying from user
3189 		 * will clobber some data
3190 		 */
3191 		context = cm->cm_frame->header.context;
3192 		cm->cm_frame->header.context = cm->cm_index;
3193 
3194 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3195 		    2 * MEGAMFI_FRAME_SIZE);
3196 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3197 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3198 		cm->cm_frame->header.scsi_status = 0;
3199 		cm->cm_frame->header.pad0 = 0;
3200 		if (ioc->mfi_sge_count) {
3201 			cm->cm_sg =
3202 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3203 		}
3204 		sgl = cm->cm_sg;
3205 		cm->cm_flags = 0;
3206 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3207 			cm->cm_flags |= MFI_CMD_DATAIN;
3208 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3209 			cm->cm_flags |= MFI_CMD_DATAOUT;
3210 		/* Legacy app shim */
3211 		if (cm->cm_flags == 0)
3212 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3213 		cm->cm_len = cm->cm_frame->header.data_len;
3214 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3215 #ifdef COMPAT_FREEBSD32
3216 			if (cmd == MFI_CMD) {
3217 #endif
3218 				/* Native */
3219 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3220 #ifdef COMPAT_FREEBSD32
3221 			} else {
3222 				/* 32bit on 64bit */
3223 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3224 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3225 			}
3226 #endif
3227 			cm->cm_len += cm->cm_stp_len;
3228 		}
3229 		if (cm->cm_len &&
3230 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3231 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3232 			    M_WAITOK | M_ZERO);
3233 		} else {
3234 			cm->cm_data = 0;
3235 		}
3236 
3237 		/* restore header context */
3238 		cm->cm_frame->header.context = context;
3239 
3240 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3241 			res = mfi_stp_cmd(sc, cm, arg);
3242 			if (res != 0)
3243 				goto out;
3244 		} else {
3245 			temp = data;
3246 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3247 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3248 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3249 #ifdef COMPAT_FREEBSD32
3250 					if (cmd == MFI_CMD) {
3251 #endif
3252 						/* Native */
3253 						addr = ioc->mfi_sgl[i].iov_base;
3254 						len = ioc->mfi_sgl[i].iov_len;
3255 #ifdef COMPAT_FREEBSD32
3256 					} else {
3257 						/* 32bit on 64bit */
3258 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3259 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3260 						len = ioc32->mfi_sgl[i].iov_len;
3261 					}
3262 #endif
3263 					error = copyin(addr, temp, len);
3264 					if (error != 0) {
3265 						device_printf(sc->mfi_dev,
3266 						    "Copy in failed\n");
3267 						goto out;
3268 					}
3269 					temp = &temp[len];
3270 				}
3271 			}
3272 		}
3273 
3274 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3275 			locked = mfi_config_lock(sc,
3276 			     cm->cm_frame->dcmd.opcode);
3277 
3278 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3279 			cm->cm_frame->pass.sense_addr_lo =
3280 			    (uint32_t)cm->cm_sense_busaddr;
3281 			cm->cm_frame->pass.sense_addr_hi =
3282 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3283 		}
3284 		mtx_lock(&sc->mfi_io_lock);
3285 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3286 		if (!skip_pre_post) {
3287 			error = mfi_check_command_pre(sc, cm);
3288 			if (error) {
3289 				mtx_unlock(&sc->mfi_io_lock);
3290 				goto out;
3291 			}
3292 		}
3293 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3294 			device_printf(sc->mfi_dev,
3295 			    "Controller polled failed\n");
3296 			mtx_unlock(&sc->mfi_io_lock);
3297 			goto out;
3298 		}
3299 		if (!skip_pre_post) {
3300 			mfi_check_command_post(sc, cm);
3301 		}
3302 		mtx_unlock(&sc->mfi_io_lock);
3303 
3304 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3305 			temp = data;
3306 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3307 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3308 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3309 #ifdef COMPAT_FREEBSD32
3310 					if (cmd == MFI_CMD) {
3311 #endif
3312 						/* Native */
3313 						addr = ioc->mfi_sgl[i].iov_base;
3314 						len = ioc->mfi_sgl[i].iov_len;
3315 #ifdef COMPAT_FREEBSD32
3316 					} else {
3317 						/* 32bit on 64bit */
3318 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3319 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3320 						len = ioc32->mfi_sgl[i].iov_len;
3321 					}
3322 #endif
3323 					error = copyout(temp, addr, len);
3324 					if (error != 0) {
3325 						device_printf(sc->mfi_dev,
3326 						    "Copy out failed\n");
3327 						goto out;
3328 					}
3329 					temp = &temp[len];
3330 				}
3331 			}
3332 		}
3333 
3334 		if (ioc->mfi_sense_len) {
3335 			/* get user-space sense ptr then copy out sense */
3336 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3337 			    &sense_ptr.sense_ptr_data[0],
3338 			    sizeof(sense_ptr.sense_ptr_data));
3339 #ifdef COMPAT_FREEBSD32
3340 			if (cmd != MFI_CMD) {
3341 				/*
3342 				 * not 64bit native so zero out any address
3343 				 * over 32bit */
3344 				sense_ptr.addr.high = 0;
3345 			}
3346 #endif
3347 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3348 			    ioc->mfi_sense_len);
3349 			if (error != 0) {
3350 				device_printf(sc->mfi_dev,
3351 				    "Copy out failed\n");
3352 				goto out;
3353 			}
3354 		}
3355 
3356 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3357 out:
3358 		mfi_config_unlock(sc, locked);
3359 		if (data)
3360 			free(data, M_MFIBUF);
3361 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3362 			for (i = 0; i < 2; i++) {
3363 				if (sc->kbuff_arr[i]) {
3364 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3365 						bus_dmamap_unload(
3366 						    sc->mfi_kbuff_arr_dmat[i],
3367 						    sc->mfi_kbuff_arr_dmamap[i]
3368 						    );
3369 					if (sc->kbuff_arr[i] != NULL)
3370 						bus_dmamem_free(
3371 						    sc->mfi_kbuff_arr_dmat[i],
3372 						    sc->kbuff_arr[i],
3373 						    sc->mfi_kbuff_arr_dmamap[i]
3374 						    );
3375 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3376 						bus_dma_tag_destroy(
3377 						    sc->mfi_kbuff_arr_dmat[i]);
3378 				}
3379 			}
3380 		}
3381 		if (cm) {
3382 			mtx_lock(&sc->mfi_io_lock);
3383 			mfi_release_command(cm);
3384 			mtx_unlock(&sc->mfi_io_lock);
3385 		}
3386 
3387 		break;
3388 		}
3389 	case MFI_SET_AEN:
3390 		aen = (struct mfi_ioc_aen *)arg;
3391 		mtx_lock(&sc->mfi_io_lock);
3392 		error = mfi_aen_register(sc, aen->aen_seq_num,
3393 		    aen->aen_class_locale);
3394 		mtx_unlock(&sc->mfi_io_lock);
3395 
3396 		break;
3397 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3398 		{
3399 			devclass_t devclass;
3400 			struct mfi_linux_ioc_packet l_ioc;
3401 			int adapter;
3402 
3403 			devclass = devclass_find("mfi");
3404 			if (devclass == NULL)
3405 				return (ENOENT);
3406 
3407 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3408 			if (error)
3409 				return (error);
3410 			adapter = l_ioc.lioc_adapter_no;
3411 			sc = devclass_get_softc(devclass, adapter);
3412 			if (sc == NULL)
3413 				return (ENOENT);
3414 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3415 			    cmd, arg, flag, td));
3416 			break;
3417 		}
3418 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3419 		{
3420 			devclass_t devclass;
3421 			struct mfi_linux_ioc_aen l_aen;
3422 			int adapter;
3423 
3424 			devclass = devclass_find("mfi");
3425 			if (devclass == NULL)
3426 				return (ENOENT);
3427 
3428 			error = copyin(arg, &l_aen, sizeof(l_aen));
3429 			if (error)
3430 				return (error);
3431 			adapter = l_aen.laen_adapter_no;
3432 			sc = devclass_get_softc(devclass, adapter);
3433 			if (sc == NULL)
3434 				return (ENOENT);
3435 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3436 			    cmd, arg, flag, td));
3437 			break;
3438 		}
3439 #ifdef COMPAT_FREEBSD32
3440 	case MFIIO_PASSTHRU32:
3441 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3442 			error = ENOTTY;
3443 			break;
3444 		}
3445 		iop_swab.ioc_frame	= iop32->ioc_frame;
3446 		iop_swab.buf_size	= iop32->buf_size;
3447 		iop_swab.buf		= PTRIN(iop32->buf);
3448 		iop			= &iop_swab;
3449 		/* FALLTHROUGH */
3450 #endif
3451 	case MFIIO_PASSTHRU:
3452 		error = mfi_user_command(sc, iop);
3453 #ifdef COMPAT_FREEBSD32
3454 		if (cmd == MFIIO_PASSTHRU32)
3455 			iop32->ioc_frame = iop_swab.ioc_frame;
3456 #endif
3457 		break;
3458 	default:
3459 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3460 		error = ENOTTY;
3461 		break;
3462 	}
3463 
3464 	return (error);
3465 }
3466 
3467 static int
3468 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3469 {
3470 	struct mfi_softc *sc;
3471 	struct mfi_linux_ioc_packet l_ioc;
3472 	struct mfi_linux_ioc_aen l_aen;
3473 	struct mfi_command *cm = NULL;
3474 	struct mfi_aen *mfi_aen_entry;
3475 	union mfi_sense_ptr sense_ptr;
3476 	uint32_t context = 0;
3477 	uint8_t *data = NULL, *temp;
3478 	int i;
3479 	int error, locked;
3480 
3481 	sc = dev->si_drv1;
3482 	error = 0;
3483 	switch (cmd) {
3484 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3485 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3486 		if (error != 0)
3487 			return (error);
3488 
3489 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3490 			return (EINVAL);
3491 		}
3492 
3493 		mtx_lock(&sc->mfi_io_lock);
3494 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3495 			mtx_unlock(&sc->mfi_io_lock);
3496 			return (EBUSY);
3497 		}
3498 		mtx_unlock(&sc->mfi_io_lock);
3499 		locked = 0;
3500 
3501 		/*
3502 		 * save off original context since copying from user
3503 		 * will clobber some data
3504 		 */
3505 		context = cm->cm_frame->header.context;
3506 
3507 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3508 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3509 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3510 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3511 		cm->cm_frame->header.scsi_status = 0;
3512 		cm->cm_frame->header.pad0 = 0;
3513 		if (l_ioc.lioc_sge_count)
3514 			cm->cm_sg =
3515 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3516 		cm->cm_flags = 0;
3517 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3518 			cm->cm_flags |= MFI_CMD_DATAIN;
3519 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3520 			cm->cm_flags |= MFI_CMD_DATAOUT;
3521 		cm->cm_len = cm->cm_frame->header.data_len;
3522 		if (cm->cm_len &&
3523 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3524 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3525 			    M_WAITOK | M_ZERO);
3526 		} else {
3527 			cm->cm_data = 0;
3528 		}
3529 
3530 		/* restore header context */
3531 		cm->cm_frame->header.context = context;
3532 
3533 		temp = data;
3534 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3535 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3536 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3537 				       temp,
3538 				       l_ioc.lioc_sgl[i].iov_len);
3539 				if (error != 0) {
3540 					device_printf(sc->mfi_dev,
3541 					    "Copy in failed\n");
3542 					goto out;
3543 				}
3544 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3545 			}
3546 		}
3547 
3548 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3549 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3550 
3551 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3552 			cm->cm_frame->pass.sense_addr_lo =
3553 			    (uint32_t)cm->cm_sense_busaddr;
3554 			cm->cm_frame->pass.sense_addr_hi =
3555 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3556 		}
3557 
3558 		mtx_lock(&sc->mfi_io_lock);
3559 		error = mfi_check_command_pre(sc, cm);
3560 		if (error) {
3561 			mtx_unlock(&sc->mfi_io_lock);
3562 			goto out;
3563 		}
3564 
3565 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3566 			device_printf(sc->mfi_dev,
3567 			    "Controller polled failed\n");
3568 			mtx_unlock(&sc->mfi_io_lock);
3569 			goto out;
3570 		}
3571 
3572 		mfi_check_command_post(sc, cm);
3573 		mtx_unlock(&sc->mfi_io_lock);
3574 
3575 		temp = data;
3576 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3577 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3578 				error = copyout(temp,
3579 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3580 					l_ioc.lioc_sgl[i].iov_len);
3581 				if (error != 0) {
3582 					device_printf(sc->mfi_dev,
3583 					    "Copy out failed\n");
3584 					goto out;
3585 				}
3586 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3587 			}
3588 		}
3589 
3590 		if (l_ioc.lioc_sense_len) {
3591 			/* get user-space sense ptr then copy out sense */
3592 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3593                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3594 			    &sense_ptr.sense_ptr_data[0],
3595 			    sizeof(sense_ptr.sense_ptr_data));
3596 #ifdef __amd64__
3597 			/*
3598 			 * only 32bit Linux support so zero out any
3599 			 * address over 32bit
3600 			 */
3601 			sense_ptr.addr.high = 0;
3602 #endif
3603 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3604 			    l_ioc.lioc_sense_len);
3605 			if (error != 0) {
3606 				device_printf(sc->mfi_dev,
3607 				    "Copy out failed\n");
3608 				goto out;
3609 			}
3610 		}
3611 
3612 		error = copyout(&cm->cm_frame->header.cmd_status,
3613 			&((struct mfi_linux_ioc_packet*)arg)
3614 			->lioc_frame.hdr.cmd_status,
3615 			1);
3616 		if (error != 0) {
3617 			device_printf(sc->mfi_dev,
3618 				      "Copy out failed\n");
3619 			goto out;
3620 		}
3621 
3622 out:
3623 		mfi_config_unlock(sc, locked);
3624 		if (data)
3625 			free(data, M_MFIBUF);
3626 		if (cm) {
3627 			mtx_lock(&sc->mfi_io_lock);
3628 			mfi_release_command(cm);
3629 			mtx_unlock(&sc->mfi_io_lock);
3630 		}
3631 
3632 		return (error);
3633 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3634 		error = copyin(arg, &l_aen, sizeof(l_aen));
3635 		if (error != 0)
3636 			return (error);
3637 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3638 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3639 		    M_WAITOK);
3640 		mtx_lock(&sc->mfi_io_lock);
3641 		if (mfi_aen_entry != NULL) {
3642 			mfi_aen_entry->p = curproc;
3643 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3644 			    aen_link);
3645 		}
3646 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3647 		    l_aen.laen_class_locale);
3648 
3649 		if (error != 0) {
3650 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3651 			    aen_link);
3652 			free(mfi_aen_entry, M_MFIBUF);
3653 		}
3654 		mtx_unlock(&sc->mfi_io_lock);
3655 
3656 		return (error);
3657 	default:
3658 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3659 		error = ENOENT;
3660 		break;
3661 	}
3662 
3663 	return (error);
3664 }
3665 
3666 static int
3667 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3668 {
3669 	struct mfi_softc *sc;
3670 	int revents = 0;
3671 
3672 	sc = dev->si_drv1;
3673 
3674 	if (poll_events & (POLLIN | POLLRDNORM)) {
3675 		if (sc->mfi_aen_triggered != 0) {
3676 			revents |= poll_events & (POLLIN | POLLRDNORM);
3677 			sc->mfi_aen_triggered = 0;
3678 		}
3679 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3680 			revents |= POLLERR;
3681 		}
3682 	}
3683 
3684 	if (revents == 0) {
3685 		if (poll_events & (POLLIN | POLLRDNORM)) {
3686 			sc->mfi_poll_waiting = 1;
3687 			selrecord(td, &sc->mfi_select);
3688 		}
3689 	}
3690 
3691 	return revents;
3692 }
3693 
3694 static void
3695 mfi_dump_all(void)
3696 {
3697 	struct mfi_softc *sc;
3698 	struct mfi_command *cm;
3699 	devclass_t dc;
3700 	time_t deadline;
3701 	int timedout;
3702 	int i;
3703 
3704 	dc = devclass_find("mfi");
3705 	if (dc == NULL) {
3706 		printf("No mfi dev class\n");
3707 		return;
3708 	}
3709 
3710 	for (i = 0; ; i++) {
3711 		sc = devclass_get_softc(dc, i);
3712 		if (sc == NULL)
3713 			break;
3714 		device_printf(sc->mfi_dev, "Dumping\n\n");
3715 		timedout = 0;
3716 		deadline = time_uptime - mfi_cmd_timeout;
3717 		mtx_lock(&sc->mfi_io_lock);
3718 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3719 			if (cm->cm_timestamp <= deadline) {
3720 				device_printf(sc->mfi_dev,
3721 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3722 				    cm, (int)(time_uptime - cm->cm_timestamp));
3723 				MFI_PRINT_CMD(cm);
3724 				timedout++;
3725 			}
3726 		}
3727 
3728 #if 0
3729 		if (timedout)
3730 			MFI_DUMP_CMDS(sc);
3731 #endif
3732 
3733 		mtx_unlock(&sc->mfi_io_lock);
3734 	}
3735 
3736 	return;
3737 }
3738 
3739 static void
3740 mfi_timeout(void *data)
3741 {
3742 	struct mfi_softc *sc = (struct mfi_softc *)data;
3743 	struct mfi_command *cm, *tmp;
3744 	time_t deadline;
3745 	int timedout = 0;
3746 
3747 	deadline = time_uptime - mfi_cmd_timeout;
3748 	if (sc->adpreset == 0) {
3749 		if (!mfi_tbolt_reset(sc)) {
3750 			callout_reset(&sc->mfi_watchdog_callout,
3751 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3752 			return;
3753 		}
3754 	}
3755 	mtx_lock(&sc->mfi_io_lock);
3756 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3757 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3758 			continue;
3759 		if (cm->cm_timestamp <= deadline) {
3760 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3761 				cm->cm_timestamp = time_uptime;
3762 			} else {
3763 				device_printf(sc->mfi_dev,
3764 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3765 				     cm, (int)(time_uptime - cm->cm_timestamp)
3766 				     );
3767 				MFI_PRINT_CMD(cm);
3768 				MFI_VALIDATE_CMD(sc, cm);
3769 				/*
3770 				 * While commands can get stuck forever we do
3771 				 * not fail them as there is no way to tell if
3772 				 * the controller has actually processed them
3773 				 * or not.
3774 				 *
3775 				 * In addition its very likely that force
3776 				 * failing a command here would cause a panic
3777 				 * e.g. in UFS.
3778 				 */
3779 				timedout++;
3780 			}
3781 		}
3782 	}
3783 
3784 #if 0
3785 	if (timedout)
3786 		MFI_DUMP_CMDS(sc);
3787 #endif
3788 
3789 	mtx_unlock(&sc->mfi_io_lock);
3790 
3791 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3792 	    mfi_timeout, sc);
3793 
3794 	if (0)
3795 		mfi_dump_all();
3796 	return;
3797 }
3798