xref: /freebsd/sys/dev/mfi/mfi.c (revision a3557ef0)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause
3  *
4  * Copyright (c) 2006 IronPort Systems
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2007 LSI Corp.
30  * Copyright (c) 2007 Rajesh Prabhakaran.
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce the above copyright
39  *    notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include "opt_mfi.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/malloc.h>
64 #include <sys/kernel.h>
65 #include <sys/poll.h>
66 #include <sys/selinfo.h>
67 #include <sys/bus.h>
68 #include <sys/conf.h>
69 #include <sys/eventhandler.h>
70 #include <sys/rman.h>
71 #include <sys/bio.h>
72 #include <sys/ioccom.h>
73 #include <sys/uio.h>
74 #include <sys/proc.h>
75 #include <sys/signalvar.h>
76 #include <sys/sysent.h>
77 #include <sys/taskqueue.h>
78 
79 #include <machine/bus.h>
80 #include <machine/resource.h>
81 
82 #include <dev/mfi/mfireg.h>
83 #include <dev/mfi/mfi_ioctl.h>
84 #include <dev/mfi/mfivar.h>
85 #include <sys/interrupt.h>
86 #include <sys/priority.h>
87 
88 static int	mfi_alloc_commands(struct mfi_softc *);
89 static int	mfi_comms_init(struct mfi_softc *);
90 static int	mfi_get_controller_info(struct mfi_softc *);
91 static int	mfi_get_log_state(struct mfi_softc *,
92 		    struct mfi_evt_log_state **);
93 static int	mfi_parse_entries(struct mfi_softc *, int, int);
94 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
95 static void	mfi_startup(void *arg);
96 static void	mfi_intr(void *arg);
97 static void	mfi_ldprobe(struct mfi_softc *sc);
98 static void	mfi_syspdprobe(struct mfi_softc *sc);
99 static void	mfi_handle_evt(void *context, int pending);
100 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
101 static void	mfi_aen_complete(struct mfi_command *);
102 static int	mfi_add_ld(struct mfi_softc *sc, int);
103 static void	mfi_add_ld_complete(struct mfi_command *);
104 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
105 static void	mfi_add_sys_pd_complete(struct mfi_command *);
106 static struct mfi_command * mfi_bio_command(struct mfi_softc *);
107 static void	mfi_bio_complete(struct mfi_command *);
108 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
109 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
110 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
111 static int	mfi_std_send_frame(struct mfi_softc *, struct mfi_command *);
112 static int	mfi_abort(struct mfi_softc *, struct mfi_command **);
113 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int, struct thread *);
114 static void	mfi_timeout(void *);
115 static int	mfi_user_command(struct mfi_softc *,
116 		    struct mfi_ioc_passthru *);
117 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
118 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
119 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
120 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
121 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
122 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
123 static void 	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
124 		    uint32_t frame_cnt);
125 static void 	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
126 		    uint32_t frame_cnt);
127 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
128 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
129 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
130 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
131 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
134     "MFI driver parameters");
135 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RWTUN, &mfi_event_locale,
137            0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RWTUN, &mfi_event_class,
141            0, "event message class");
142 
143 static int	mfi_max_cmds = 128;
144 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RDTUN, &mfi_max_cmds,
145 	   0, "Max commands limit (-1 = controller limit)");
146 
147 static int	mfi_detect_jbod_change = 1;
148 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RWTUN,
149 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
150 
151 int		mfi_polled_cmd_timeout = MFI_POLL_TIMEOUT_SECS;
152 SYSCTL_INT(_hw_mfi, OID_AUTO, polled_cmd_timeout, CTLFLAG_RWTUN,
153 	   &mfi_polled_cmd_timeout, 0,
154 	   "Polled command timeout - used for firmware flash etc (in seconds)");
155 
156 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
157 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RWTUN, &mfi_cmd_timeout,
158 	   0, "Command timeout (in seconds)");
159 
160 /* Management interface */
161 static d_open_t		mfi_open;
162 static d_close_t	mfi_close;
163 static d_ioctl_t	mfi_ioctl;
164 static d_poll_t		mfi_poll;
165 
166 static struct cdevsw mfi_cdevsw = {
167 	.d_version = 	D_VERSION,
168 	.d_flags =	0,
169 	.d_open = 	mfi_open,
170 	.d_close =	mfi_close,
171 	.d_ioctl =	mfi_ioctl,
172 	.d_poll =	mfi_poll,
173 	.d_name =	"mfi",
174 };
175 
176 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
177 
178 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
179 struct mfi_skinny_dma_info mfi_skinny;
180 
181 static void
182 mfi_enable_intr_xscale(struct mfi_softc *sc)
183 {
184 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
185 }
186 
187 static void
188 mfi_enable_intr_ppc(struct mfi_softc *sc)
189 {
190 	if (sc->mfi_flags & MFI_FLAGS_1078) {
191 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
192 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
193 	}
194 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
195 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
196 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
197 	}
198 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	}
201 }
202 
203 static int32_t
204 mfi_read_fw_status_xscale(struct mfi_softc *sc)
205 {
206 	return MFI_READ4(sc, MFI_OMSG0);
207 }
208 
209 static int32_t
210 mfi_read_fw_status_ppc(struct mfi_softc *sc)
211 {
212 	return MFI_READ4(sc, MFI_OSP0);
213 }
214 
215 static int
216 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
217 {
218 	int32_t status;
219 
220 	status = MFI_READ4(sc, MFI_OSTS);
221 	if ((status & MFI_OSTS_INTR_VALID) == 0)
222 		return 1;
223 
224 	MFI_WRITE4(sc, MFI_OSTS, status);
225 	return 0;
226 }
227 
228 static int
229 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
230 {
231 	int32_t status;
232 
233 	status = MFI_READ4(sc, MFI_OSTS);
234 	if (sc->mfi_flags & MFI_FLAGS_1078) {
235 		if (!(status & MFI_1078_RM)) {
236 			return 1;
237 		}
238 	}
239 	else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
240 		if (!(status & MFI_GEN2_RM)) {
241 			return 1;
242 		}
243 	}
244 	else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
245 		if (!(status & MFI_SKINNY_RM)) {
246 			return 1;
247 		}
248 	}
249 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
250 		MFI_WRITE4(sc, MFI_OSTS, status);
251 	else
252 		MFI_WRITE4(sc, MFI_ODCR0, status);
253 	return 0;
254 }
255 
256 static void
257 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
258 {
259 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3)|frame_cnt);
260 }
261 
262 static void
263 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
264 {
265 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
266 	    MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt <<1)|1 );
267 	    MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
268 	} else {
269 	    MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt <<1)|1 );
270 	}
271 }
272 
273 int
274 mfi_transition_firmware(struct mfi_softc *sc)
275 {
276 	uint32_t fw_state, cur_state;
277 	int max_wait, i;
278 	uint32_t cur_abs_reg_val = 0;
279 	uint32_t prev_abs_reg_val = 0;
280 
281 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
282 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
283 	while (fw_state != MFI_FWSTATE_READY) {
284 		if (bootverbose)
285 			device_printf(sc->mfi_dev, "Waiting for firmware to "
286 			"become ready\n");
287 		cur_state = fw_state;
288 		switch (fw_state) {
289 		case MFI_FWSTATE_FAULT:
290 			device_printf(sc->mfi_dev, "Firmware fault\n");
291 			return (ENXIO);
292 		case MFI_FWSTATE_WAIT_HANDSHAKE:
293 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
294 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
295 			else
296 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
297 			max_wait = MFI_RESET_WAIT_TIME;
298 			break;
299 		case MFI_FWSTATE_OPERATIONAL:
300 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
301 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
302 			else
303 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
304 			max_wait = MFI_RESET_WAIT_TIME;
305 			break;
306 		case MFI_FWSTATE_UNDEFINED:
307 		case MFI_FWSTATE_BB_INIT:
308 			max_wait = MFI_RESET_WAIT_TIME;
309 			break;
310 		case MFI_FWSTATE_FW_INIT_2:
311 			max_wait = MFI_RESET_WAIT_TIME;
312 			break;
313 		case MFI_FWSTATE_FW_INIT:
314 		case MFI_FWSTATE_FLUSH_CACHE:
315 			max_wait = MFI_RESET_WAIT_TIME;
316 			break;
317 		case MFI_FWSTATE_DEVICE_SCAN:
318 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
319 			prev_abs_reg_val = cur_abs_reg_val;
320 			break;
321 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
322 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
323 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
324 			else
325 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
326 			max_wait = MFI_RESET_WAIT_TIME;
327 			break;
328 		default:
329 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
330 			    fw_state);
331 			return (ENXIO);
332 		}
333 		for (i = 0; i < (max_wait * 10); i++) {
334 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
335 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
336 			if (fw_state == cur_state)
337 				DELAY(100000);
338 			else
339 				break;
340 		}
341 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
342 			/* Check the device scanning progress */
343 			if (prev_abs_reg_val != cur_abs_reg_val) {
344 				continue;
345 			}
346 		}
347 		if (fw_state == cur_state) {
348 			device_printf(sc->mfi_dev, "Firmware stuck in state "
349 			    "%#x\n", fw_state);
350 			return (ENXIO);
351 		}
352 	}
353 	return (0);
354 }
355 
356 static void
357 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
358 {
359 	bus_addr_t *addr;
360 
361 	addr = arg;
362 	*addr = segs[0].ds_addr;
363 }
364 
365 
366 int
367 mfi_attach(struct mfi_softc *sc)
368 {
369 	uint32_t status;
370 	int error, commsz, framessz, sensesz;
371 	int frames, unit, max_fw_sge, max_fw_cmds;
372 	uint32_t tb_mem_size = 0;
373 	struct cdev *dev_t;
374 
375 	if (sc == NULL)
376 		return EINVAL;
377 
378 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
379 	    MEGASAS_VERSION);
380 
381 	mtx_init(&sc->mfi_io_lock, "MFI I/O lock", NULL, MTX_DEF);
382 	sx_init(&sc->mfi_config_lock, "MFI config");
383 	TAILQ_INIT(&sc->mfi_ld_tqh);
384 	TAILQ_INIT(&sc->mfi_syspd_tqh);
385 	TAILQ_INIT(&sc->mfi_ld_pend_tqh);
386 	TAILQ_INIT(&sc->mfi_syspd_pend_tqh);
387 	TAILQ_INIT(&sc->mfi_evt_queue);
388 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
389 	TASK_INIT(&sc->mfi_map_sync_task, 0, mfi_handle_map_sync, sc);
390 	TAILQ_INIT(&sc->mfi_aen_pids);
391 	TAILQ_INIT(&sc->mfi_cam_ccbq);
392 
393 	mfi_initq_free(sc);
394 	mfi_initq_ready(sc);
395 	mfi_initq_busy(sc);
396 	mfi_initq_bio(sc);
397 
398 	sc->adpreset = 0;
399 	sc->last_seq_num = 0;
400 	sc->disableOnlineCtrlReset = 1;
401 	sc->issuepend_done = 1;
402 	sc->hw_crit_error = 0;
403 
404 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
405 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
406 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
407 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
408 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
409 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
410 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
411 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
412 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
413 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
414 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
415 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
416 		sc->mfi_tbolt = 1;
417 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
418 	} else {
419 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
420 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
421 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
422 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
423 	}
424 
425 
426 	/* Before we get too far, see if the firmware is working */
427 	if ((error = mfi_transition_firmware(sc)) != 0) {
428 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
429 		    "error %d\n", error);
430 		return (ENXIO);
431 	}
432 
433 	/* Start: LSIP200113393 */
434 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
435 				1, 0,			/* algnmnt, boundary */
436 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
437 				BUS_SPACE_MAXADDR,	/* highaddr */
438 				NULL, NULL,		/* filter, filterarg */
439 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
440 				1,			/* msegments */
441 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
442 				0,			/* flags */
443 				NULL, NULL,		/* lockfunc, lockarg */
444 				&sc->verbuf_h_dmat)) {
445 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
446 		return (ENOMEM);
447 	}
448 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
449 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
450 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
451 		return (ENOMEM);
452 	}
453 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
454 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
455 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
456 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
457 	/* End: LSIP200113393 */
458 
459 	/*
460 	 * Get information needed for sizing the contiguous memory for the
461 	 * frame pool.  Size down the sgl parameter since we know that
462 	 * we will never need more than what's required for MAXPHYS.
463 	 * It would be nice if these constants were available at runtime
464 	 * instead of compile time.
465 	 */
466 	status = sc->mfi_read_fw_status(sc);
467 	max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
468 	if (mfi_max_cmds > 0 && mfi_max_cmds < max_fw_cmds) {
469 		device_printf(sc->mfi_dev, "FW MaxCmds = %d, limiting to %d\n",
470 		    max_fw_cmds, mfi_max_cmds);
471 		sc->mfi_max_fw_cmds = mfi_max_cmds;
472 	} else {
473 		sc->mfi_max_fw_cmds = max_fw_cmds;
474 	}
475 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
476 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
477 
478 	/* ThunderBolt Support get the contiguous memory */
479 
480 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
481 		mfi_tbolt_init_globals(sc);
482 		device_printf(sc->mfi_dev, "MaxCmd = %d, Drv MaxCmd = %d, "
483 		    "MaxSgl = %d, state = %#x\n", max_fw_cmds,
484 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
485 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
486 
487 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
488 				1, 0,			/* algnmnt, boundary */
489 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 				BUS_SPACE_MAXADDR,	/* highaddr */
491 				NULL, NULL,		/* filter, filterarg */
492 				tb_mem_size,		/* maxsize */
493 				1,			/* msegments */
494 				tb_mem_size,		/* maxsegsize */
495 				0,			/* flags */
496 				NULL, NULL,		/* lockfunc, lockarg */
497 				&sc->mfi_tb_dmat)) {
498 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
499 			return (ENOMEM);
500 		}
501 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
502 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
503 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
504 			return (ENOMEM);
505 		}
506 		bzero(sc->request_message_pool, tb_mem_size);
507 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
508 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
509 
510 		/* For ThunderBolt memory init */
511 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
512 				0x100, 0,		/* alignmnt, boundary */
513 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
514 				BUS_SPACE_MAXADDR,	/* highaddr */
515 				NULL, NULL,		/* filter, filterarg */
516 				MFI_FRAME_SIZE,		/* maxsize */
517 				1,			/* msegments */
518 				MFI_FRAME_SIZE,		/* maxsegsize */
519 				0,			/* flags */
520 				NULL, NULL,		/* lockfunc, lockarg */
521 				&sc->mfi_tb_init_dmat)) {
522 			device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
523 			return (ENOMEM);
524 		}
525 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
526 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
527 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
528 			return (ENOMEM);
529 		}
530 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
531 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
532 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
533 		    &sc->mfi_tb_init_busaddr, 0);
534 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
535 		    tb_mem_size)) {
536 			device_printf(sc->mfi_dev,
537 			    "Thunderbolt pool preparation error\n");
538 			return 0;
539 		}
540 
541 		/*
542 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
543 		  we are taking it different from what we have allocated for Request
544 		  and reply descriptors to avoid confusion later
545 		*/
546 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
547 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
548 				1, 0,			/* algnmnt, boundary */
549 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
550 				BUS_SPACE_MAXADDR,	/* highaddr */
551 				NULL, NULL,		/* filter, filterarg */
552 				tb_mem_size,		/* maxsize */
553 				1,			/* msegments */
554 				tb_mem_size,		/* maxsegsize */
555 				0,			/* flags */
556 				NULL, NULL,		/* lockfunc, lockarg */
557 				&sc->mfi_tb_ioc_init_dmat)) {
558 			device_printf(sc->mfi_dev,
559 			    "Cannot allocate comms DMA tag\n");
560 			return (ENOMEM);
561 		}
562 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
563 		    (void **)&sc->mfi_tb_ioc_init_desc,
564 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
565 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
566 			return (ENOMEM);
567 		}
568 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
569 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
570 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
571 		    &sc->mfi_tb_ioc_init_busaddr, 0);
572 	}
573 	/*
574 	 * Create the dma tag for data buffers.  Used both for block I/O
575 	 * and for various internal data queries.
576 	 */
577 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
578 				1, 0,			/* algnmnt, boundary */
579 				BUS_SPACE_MAXADDR,	/* lowaddr */
580 				BUS_SPACE_MAXADDR,	/* highaddr */
581 				NULL, NULL,		/* filter, filterarg */
582 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
583 				sc->mfi_max_sge,	/* nsegments */
584 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
585 				BUS_DMA_ALLOCNOW,	/* flags */
586 				busdma_lock_mutex,	/* lockfunc */
587 				&sc->mfi_io_lock,	/* lockfuncarg */
588 				&sc->mfi_buffer_dmat)) {
589 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
590 		return (ENOMEM);
591 	}
592 
593 	/*
594 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
595 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
596 	 * entry, so the calculated size here will be will be 1 more than
597 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
598 	 */
599 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
600 	    sizeof(struct mfi_hwcomms);
601 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
602 				1, 0,			/* algnmnt, boundary */
603 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
604 				BUS_SPACE_MAXADDR,	/* highaddr */
605 				NULL, NULL,		/* filter, filterarg */
606 				commsz,			/* maxsize */
607 				1,			/* msegments */
608 				commsz,			/* maxsegsize */
609 				0,			/* flags */
610 				NULL, NULL,		/* lockfunc, lockarg */
611 				&sc->mfi_comms_dmat)) {
612 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
613 		return (ENOMEM);
614 	}
615 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
616 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
617 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
618 		return (ENOMEM);
619 	}
620 	bzero(sc->mfi_comms, commsz);
621 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
622 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
623 	/*
624 	 * Allocate DMA memory for the command frames.  Keep them in the
625 	 * lower 4GB for efficiency.  Calculate the size of the commands at
626 	 * the same time; each command is one 64 byte frame plus a set of
627          * additional frames for holding sg lists or other data.
628 	 * The assumption here is that the SG list will start at the second
629 	 * frame and not use the unused bytes in the first frame.  While this
630 	 * isn't technically correct, it simplifies the calculation and allows
631 	 * for command frames that might be larger than an mfi_io_frame.
632 	 */
633 	if (sizeof(bus_addr_t) == 8) {
634 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
635 		sc->mfi_flags |= MFI_FLAGS_SG64;
636 	} else {
637 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
638 	}
639 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
640 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
641 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
642 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
643 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
644 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
645 				64, 0,			/* algnmnt, boundary */
646 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
647 				BUS_SPACE_MAXADDR,	/* highaddr */
648 				NULL, NULL,		/* filter, filterarg */
649 				framessz,		/* maxsize */
650 				1,			/* nsegments */
651 				framessz,		/* maxsegsize */
652 				0,			/* flags */
653 				NULL, NULL,		/* lockfunc, lockarg */
654 				&sc->mfi_frames_dmat)) {
655 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
656 		return (ENOMEM);
657 	}
658 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
659 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
660 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
661 		return (ENOMEM);
662 	}
663 	bzero(sc->mfi_frames, framessz);
664 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
665 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
666 	/*
667 	 * Allocate DMA memory for the frame sense data.  Keep them in the
668 	 * lower 4GB for efficiency
669 	 */
670 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
671 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
672 				4, 0,			/* algnmnt, boundary */
673 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
674 				BUS_SPACE_MAXADDR,	/* highaddr */
675 				NULL, NULL,		/* filter, filterarg */
676 				sensesz,		/* maxsize */
677 				1,			/* nsegments */
678 				sensesz,		/* maxsegsize */
679 				0,			/* flags */
680 				NULL, NULL,		/* lockfunc, lockarg */
681 				&sc->mfi_sense_dmat)) {
682 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
683 		return (ENOMEM);
684 	}
685 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
686 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
687 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
688 		return (ENOMEM);
689 	}
690 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
691 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
692 	if ((error = mfi_alloc_commands(sc)) != 0)
693 		return (error);
694 
695 	/* Before moving the FW to operational state, check whether
696 	 * hostmemory is required by the FW or not
697 	 */
698 
699 	/* ThunderBolt MFI_IOC2 INIT */
700 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
701 		sc->mfi_disable_intr(sc);
702 		mtx_lock(&sc->mfi_io_lock);
703 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
704 			device_printf(sc->mfi_dev,
705 			    "TB Init has failed with error %d\n",error);
706 			mtx_unlock(&sc->mfi_io_lock);
707 			return error;
708 		}
709 		mtx_unlock(&sc->mfi_io_lock);
710 
711 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
712 			return error;
713 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
714 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr_tbolt, sc,
715 		    &sc->mfi_intr)) {
716 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
717 			return (EINVAL);
718 		}
719 		sc->mfi_intr_ptr = mfi_intr_tbolt;
720 		sc->mfi_enable_intr(sc);
721 	} else {
722 		if ((error = mfi_comms_init(sc)) != 0)
723 			return (error);
724 
725 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq,
726 		    INTR_MPSAFE|INTR_TYPE_BIO, NULL, mfi_intr, sc, &sc->mfi_intr)) {
727 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
728 			return (EINVAL);
729 		}
730 		sc->mfi_intr_ptr = mfi_intr;
731 		sc->mfi_enable_intr(sc);
732 	}
733 	if ((error = mfi_get_controller_info(sc)) != 0)
734 		return (error);
735 	sc->disableOnlineCtrlReset = 0;
736 
737 	/* Register a config hook to probe the bus for arrays */
738 	sc->mfi_ich.ich_func = mfi_startup;
739 	sc->mfi_ich.ich_arg = sc;
740 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
741 		device_printf(sc->mfi_dev, "Cannot establish configuration "
742 		    "hook\n");
743 		return (EINVAL);
744 	}
745 	mtx_lock(&sc->mfi_io_lock);
746 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
747 		mtx_unlock(&sc->mfi_io_lock);
748 		return (error);
749 	}
750 	mtx_unlock(&sc->mfi_io_lock);
751 
752 	/*
753 	 * Register a shutdown handler.
754 	 */
755 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
756 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
757 		device_printf(sc->mfi_dev, "Warning: shutdown event "
758 		    "registration failed\n");
759 	}
760 
761 	/*
762 	 * Create the control device for doing management
763 	 */
764 	unit = device_get_unit(sc->mfi_dev);
765 	sc->mfi_cdev = make_dev(&mfi_cdevsw, unit, UID_ROOT, GID_OPERATOR,
766 	    0640, "mfi%d", unit);
767 	if (unit == 0)
768 		make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev_t,
769 		    sc->mfi_cdev, "%s", "megaraid_sas_ioctl_node");
770 	if (sc->mfi_cdev != NULL)
771 		sc->mfi_cdev->si_drv1 = sc;
772 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
773 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
774 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
775 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
776 	SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->mfi_dev),
777 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->mfi_dev)),
778 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
779 	    &sc->mfi_keep_deleted_volumes, 0,
780 	    "Don't detach the mfid device for a busy volume that is deleted");
781 
782 	device_add_child(sc->mfi_dev, "mfip", -1);
783 	bus_generic_attach(sc->mfi_dev);
784 
785 	/* Start the timeout watchdog */
786 	callout_init(&sc->mfi_watchdog_callout, 1);
787 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
788 	    mfi_timeout, sc);
789 
790 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
791 		mtx_lock(&sc->mfi_io_lock);
792 		mfi_tbolt_sync_map_info(sc);
793 		mtx_unlock(&sc->mfi_io_lock);
794 	}
795 
796 	return (0);
797 }
798 
799 static int
800 mfi_alloc_commands(struct mfi_softc *sc)
801 {
802 	struct mfi_command *cm;
803 	int i, j;
804 
805 	/*
806 	 * XXX Should we allocate all the commands up front, or allocate on
807 	 * demand later like 'aac' does?
808 	 */
809 	sc->mfi_commands = malloc(sizeof(sc->mfi_commands[0]) *
810 	    sc->mfi_max_fw_cmds, M_MFIBUF, M_WAITOK | M_ZERO);
811 
812 	for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
813 		cm = &sc->mfi_commands[i];
814 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
815 		    sc->mfi_cmd_size * i);
816 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
817 		    sc->mfi_cmd_size * i;
818 		cm->cm_frame->header.context = i;
819 		cm->cm_sense = &sc->mfi_sense[i];
820 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
821 		cm->cm_sc = sc;
822 		cm->cm_index = i;
823 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
824 		    &cm->cm_dmamap) == 0) {
825 			mtx_lock(&sc->mfi_io_lock);
826 			mfi_release_command(cm);
827 			mtx_unlock(&sc->mfi_io_lock);
828 		} else {
829 			device_printf(sc->mfi_dev, "Failed to allocate %d "
830 			   "command blocks, only allocated %d\n",
831 			    sc->mfi_max_fw_cmds, i - 1);
832 			for (j = 0; j < i; j++) {
833 				cm = &sc->mfi_commands[i];
834 				bus_dmamap_destroy(sc->mfi_buffer_dmat,
835 				    cm->cm_dmamap);
836 			}
837 			free(sc->mfi_commands, M_MFIBUF);
838 			sc->mfi_commands = NULL;
839 
840 			return (ENOMEM);
841 		}
842 	}
843 
844 	return (0);
845 }
846 
847 void
848 mfi_release_command(struct mfi_command *cm)
849 {
850 	struct mfi_frame_header *hdr;
851 	uint32_t *hdr_data;
852 
853 	mtx_assert(&cm->cm_sc->mfi_io_lock, MA_OWNED);
854 
855 	/*
856 	 * Zero out the important fields of the frame, but make sure the
857 	 * context field is preserved.  For efficiency, handle the fields
858 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
859 	 */
860 	hdr = &cm->cm_frame->header;
861 	if (cm->cm_data != NULL && hdr->sg_count) {
862 		cm->cm_sg->sg32[0].len = 0;
863 		cm->cm_sg->sg32[0].addr = 0;
864 	}
865 
866 	/*
867 	 * Command may be on other queues e.g. busy queue depending on the
868 	 * flow of a previous call to mfi_mapcmd, so ensure its dequeued
869 	 * properly
870 	 */
871 	if ((cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
872 		mfi_remove_busy(cm);
873 	if ((cm->cm_flags & MFI_ON_MFIQ_READY) != 0)
874 		mfi_remove_ready(cm);
875 
876 	/* We're not expecting it to be on any other queue but check */
877 	if ((cm->cm_flags & MFI_ON_MFIQ_MASK) != 0) {
878 		panic("Command %p is still on another queue, flags = %#x",
879 		    cm, cm->cm_flags);
880 	}
881 
882 	/* tbolt cleanup */
883 	if ((cm->cm_flags & MFI_CMD_TBOLT) != 0) {
884 		mfi_tbolt_return_cmd(cm->cm_sc,
885 		    cm->cm_sc->mfi_cmd_pool_tbolt[cm->cm_extra_frames - 1],
886 		    cm);
887 	}
888 
889 	hdr_data = (uint32_t *)cm->cm_frame;
890 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
891 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
892 	hdr_data[4] = 0;	/* flags, timeout */
893 	hdr_data[5] = 0;	/* data_len */
894 
895 	cm->cm_extra_frames = 0;
896 	cm->cm_flags = 0;
897 	cm->cm_complete = NULL;
898 	cm->cm_private = NULL;
899 	cm->cm_data = NULL;
900 	cm->cm_sg = 0;
901 	cm->cm_total_frame_size = 0;
902 	cm->retry_for_fw_reset = 0;
903 
904 	mfi_enqueue_free(cm);
905 }
906 
907 int
908 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
909     uint32_t opcode, void **bufp, size_t bufsize)
910 {
911 	struct mfi_command *cm;
912 	struct mfi_dcmd_frame *dcmd;
913 	void *buf = NULL;
914 	uint32_t context = 0;
915 
916 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
917 
918 	cm = mfi_dequeue_free(sc);
919 	if (cm == NULL)
920 		return (EBUSY);
921 
922 	/* Zero out the MFI frame */
923 	context = cm->cm_frame->header.context;
924 	bzero(cm->cm_frame, sizeof(union mfi_frame));
925 	cm->cm_frame->header.context = context;
926 
927 	if ((bufsize > 0) && (bufp != NULL)) {
928 		if (*bufp == NULL) {
929 			buf = malloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
930 			if (buf == NULL) {
931 				mfi_release_command(cm);
932 				return (ENOMEM);
933 			}
934 			*bufp = buf;
935 		} else {
936 			buf = *bufp;
937 		}
938 	}
939 
940 	dcmd =  &cm->cm_frame->dcmd;
941 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
942 	dcmd->header.cmd = MFI_CMD_DCMD;
943 	dcmd->header.timeout = 0;
944 	dcmd->header.flags = 0;
945 	dcmd->header.data_len = bufsize;
946 	dcmd->header.scsi_status = 0;
947 	dcmd->opcode = opcode;
948 	cm->cm_sg = &dcmd->sgl;
949 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
950 	cm->cm_flags = 0;
951 	cm->cm_data = buf;
952 	cm->cm_private = buf;
953 	cm->cm_len = bufsize;
954 
955 	*cmp = cm;
956 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
957 		*bufp = buf;
958 	return (0);
959 }
960 
961 static int
962 mfi_comms_init(struct mfi_softc *sc)
963 {
964 	struct mfi_command *cm;
965 	struct mfi_init_frame *init;
966 	struct mfi_init_qinfo *qinfo;
967 	int error;
968 	uint32_t context = 0;
969 
970 	mtx_lock(&sc->mfi_io_lock);
971 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
972 		mtx_unlock(&sc->mfi_io_lock);
973 		return (EBUSY);
974 	}
975 
976 	/* Zero out the MFI frame */
977 	context = cm->cm_frame->header.context;
978 	bzero(cm->cm_frame, sizeof(union mfi_frame));
979 	cm->cm_frame->header.context = context;
980 
981 	/*
982 	 * Abuse the SG list area of the frame to hold the init_qinfo
983 	 * object;
984 	 */
985 	init = &cm->cm_frame->init;
986 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
987 
988 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
989 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
990 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
991 	    offsetof(struct mfi_hwcomms, hw_reply_q);
992 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
993 	    offsetof(struct mfi_hwcomms, hw_pi);
994 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
995 	    offsetof(struct mfi_hwcomms, hw_ci);
996 
997 	init->header.cmd = MFI_CMD_INIT;
998 	init->header.data_len = sizeof(struct mfi_init_qinfo);
999 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
1000 	cm->cm_data = NULL;
1001 	cm->cm_flags = MFI_CMD_POLLED;
1002 
1003 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1004 		device_printf(sc->mfi_dev, "failed to send init command\n");
1005 	mfi_release_command(cm);
1006 	mtx_unlock(&sc->mfi_io_lock);
1007 
1008 	return (error);
1009 }
1010 
1011 static int
1012 mfi_get_controller_info(struct mfi_softc *sc)
1013 {
1014 	struct mfi_command *cm = NULL;
1015 	struct mfi_ctrl_info *ci = NULL;
1016 	uint32_t max_sectors_1, max_sectors_2;
1017 	int error;
1018 
1019 	mtx_lock(&sc->mfi_io_lock);
1020 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
1021 	    (void **)&ci, sizeof(*ci));
1022 	if (error)
1023 		goto out;
1024 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1025 
1026 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1027 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
1028 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
1029 		    MFI_SECTOR_LEN;
1030 		error = 0;
1031 		goto out;
1032 	}
1033 
1034 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1035 	    BUS_DMASYNC_POSTREAD);
1036 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1037 
1038 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
1039 	max_sectors_2 = ci->max_request_size;
1040 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
1041 	sc->disableOnlineCtrlReset =
1042 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
1043 
1044 out:
1045 	if (ci)
1046 		free(ci, M_MFIBUF);
1047 	if (cm)
1048 		mfi_release_command(cm);
1049 	mtx_unlock(&sc->mfi_io_lock);
1050 	return (error);
1051 }
1052 
1053 static int
1054 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
1055 {
1056 	struct mfi_command *cm = NULL;
1057 	int error;
1058 
1059 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1060 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1061 	    (void **)log_state, sizeof(**log_state));
1062 	if (error)
1063 		goto out;
1064 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1065 
1066 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1067 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1068 		goto out;
1069 	}
1070 
1071 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1072 	    BUS_DMASYNC_POSTREAD);
1073 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1074 
1075 out:
1076 	if (cm)
1077 		mfi_release_command(cm);
1078 
1079 	return (error);
1080 }
1081 
1082 int
1083 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1084 {
1085 	struct mfi_evt_log_state *log_state = NULL;
1086 	union mfi_evt class_locale;
1087 	int error = 0;
1088 	uint32_t seq;
1089 
1090 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1091 
1092 	class_locale.members.reserved = 0;
1093 	class_locale.members.locale = mfi_event_locale;
1094 	class_locale.members.evt_class  = mfi_event_class;
1095 
1096 	if (seq_start == 0) {
1097 		if ((error = mfi_get_log_state(sc, &log_state)) != 0)
1098 			goto out;
1099 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1100 
1101 		/*
1102 		 * Walk through any events that fired since the last
1103 		 * shutdown.
1104 		 */
1105 		if ((error = mfi_parse_entries(sc, log_state->shutdown_seq_num,
1106 		    log_state->newest_seq_num)) != 0)
1107 			goto out;
1108 		seq = log_state->newest_seq_num;
1109 	} else
1110 		seq = seq_start;
1111 	error = mfi_aen_register(sc, seq, class_locale.word);
1112 out:
1113 	free(log_state, M_MFIBUF);
1114 
1115 	return (error);
1116 }
1117 
1118 int
1119 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1120 {
1121 
1122 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1123 	cm->cm_complete = NULL;
1124 
1125 	/*
1126 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1127 	 * and return 0 to it as status
1128 	 */
1129 	if (cm->cm_frame->dcmd.opcode == 0) {
1130 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1131 		cm->cm_error = 0;
1132 		return (cm->cm_error);
1133 	}
1134 	mfi_enqueue_ready(cm);
1135 	mfi_startio(sc);
1136 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1137 		msleep(cm, &sc->mfi_io_lock, PRIBIO, "mfiwait", 0);
1138 	return (cm->cm_error);
1139 }
1140 
1141 void
1142 mfi_free(struct mfi_softc *sc)
1143 {
1144 	struct mfi_command *cm;
1145 	int i;
1146 
1147 	callout_drain(&sc->mfi_watchdog_callout);
1148 
1149 	if (sc->mfi_cdev != NULL)
1150 		destroy_dev(sc->mfi_cdev);
1151 
1152 	if (sc->mfi_commands != NULL) {
1153 		for (i = 0; i < sc->mfi_max_fw_cmds; i++) {
1154 			cm = &sc->mfi_commands[i];
1155 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1156 		}
1157 		free(sc->mfi_commands, M_MFIBUF);
1158 		sc->mfi_commands = NULL;
1159 	}
1160 
1161 	if (sc->mfi_intr)
1162 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1163 	if (sc->mfi_irq != NULL)
1164 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1165 		    sc->mfi_irq);
1166 
1167 	if (sc->mfi_sense_busaddr != 0)
1168 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1169 	if (sc->mfi_sense != NULL)
1170 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1171 		    sc->mfi_sense_dmamap);
1172 	if (sc->mfi_sense_dmat != NULL)
1173 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1174 
1175 	if (sc->mfi_frames_busaddr != 0)
1176 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1177 	if (sc->mfi_frames != NULL)
1178 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1179 		    sc->mfi_frames_dmamap);
1180 	if (sc->mfi_frames_dmat != NULL)
1181 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1182 
1183 	if (sc->mfi_comms_busaddr != 0)
1184 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1185 	if (sc->mfi_comms != NULL)
1186 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1187 		    sc->mfi_comms_dmamap);
1188 	if (sc->mfi_comms_dmat != NULL)
1189 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1190 
1191 	/* ThunderBolt contiguous memory free here */
1192 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1193 		if (sc->mfi_tb_busaddr != 0)
1194 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1195 		if (sc->request_message_pool != NULL)
1196 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1197 			    sc->mfi_tb_dmamap);
1198 		if (sc->mfi_tb_dmat != NULL)
1199 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1200 
1201 		/* Version buffer memory free */
1202 		/* Start LSIP200113393 */
1203 		if (sc->verbuf_h_busaddr != 0)
1204 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1205 		if (sc->verbuf != NULL)
1206 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1207 			    sc->verbuf_h_dmamap);
1208 		if (sc->verbuf_h_dmat != NULL)
1209 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1210 
1211 		/* End LSIP200113393 */
1212 		/* ThunderBolt INIT packet memory Free */
1213 		if (sc->mfi_tb_init_busaddr != 0)
1214 			bus_dmamap_unload(sc->mfi_tb_init_dmat,
1215 			    sc->mfi_tb_init_dmamap);
1216 		if (sc->mfi_tb_init != NULL)
1217 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1218 			    sc->mfi_tb_init_dmamap);
1219 		if (sc->mfi_tb_init_dmat != NULL)
1220 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1221 
1222 		/* ThunderBolt IOC Init Desc memory free here */
1223 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1224 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1225 			    sc->mfi_tb_ioc_init_dmamap);
1226 		if (sc->mfi_tb_ioc_init_desc != NULL)
1227 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1228 			    sc->mfi_tb_ioc_init_desc,
1229 			    sc->mfi_tb_ioc_init_dmamap);
1230 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1231 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1232 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1233 			for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1234 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1235 					free(sc->mfi_cmd_pool_tbolt[i],
1236 					    M_MFIBUF);
1237 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1238 				}
1239 			}
1240 			free(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1241 			sc->mfi_cmd_pool_tbolt = NULL;
1242 		}
1243 		if (sc->request_desc_pool != NULL) {
1244 			free(sc->request_desc_pool, M_MFIBUF);
1245 			sc->request_desc_pool = NULL;
1246 		}
1247 	}
1248 	if (sc->mfi_buffer_dmat != NULL)
1249 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1250 	if (sc->mfi_parent_dmat != NULL)
1251 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1252 
1253 	if (mtx_initialized(&sc->mfi_io_lock)) {
1254 		mtx_destroy(&sc->mfi_io_lock);
1255 		sx_destroy(&sc->mfi_config_lock);
1256 	}
1257 
1258 	return;
1259 }
1260 
1261 static void
1262 mfi_startup(void *arg)
1263 {
1264 	struct mfi_softc *sc;
1265 
1266 	sc = (struct mfi_softc *)arg;
1267 
1268 	sc->mfi_enable_intr(sc);
1269 	sx_xlock(&sc->mfi_config_lock);
1270 	mtx_lock(&sc->mfi_io_lock);
1271 	mfi_ldprobe(sc);
1272 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1273 	    mfi_syspdprobe(sc);
1274 	mtx_unlock(&sc->mfi_io_lock);
1275 	sx_xunlock(&sc->mfi_config_lock);
1276 
1277 	config_intrhook_disestablish(&sc->mfi_ich);
1278 }
1279 
1280 static void
1281 mfi_intr(void *arg)
1282 {
1283 	struct mfi_softc *sc;
1284 	struct mfi_command *cm;
1285 	uint32_t pi, ci, context;
1286 
1287 	sc = (struct mfi_softc *)arg;
1288 
1289 	if (sc->mfi_check_clear_intr(sc))
1290 		return;
1291 
1292 restart:
1293 	pi = sc->mfi_comms->hw_pi;
1294 	ci = sc->mfi_comms->hw_ci;
1295 	mtx_lock(&sc->mfi_io_lock);
1296 	while (ci != pi) {
1297 		context = sc->mfi_comms->hw_reply_q[ci];
1298 		if (context < sc->mfi_max_fw_cmds) {
1299 			cm = &sc->mfi_commands[context];
1300 			mfi_remove_busy(cm);
1301 			cm->cm_error = 0;
1302 			mfi_complete(sc, cm);
1303 		}
1304 		if (++ci == (sc->mfi_max_fw_cmds + 1))
1305 			ci = 0;
1306 	}
1307 
1308 	sc->mfi_comms->hw_ci = ci;
1309 
1310 	/* Give defered I/O a chance to run */
1311 	sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1312 	mfi_startio(sc);
1313 	mtx_unlock(&sc->mfi_io_lock);
1314 
1315 	/*
1316 	 * Dummy read to flush the bus; this ensures that the indexes are up
1317 	 * to date.  Restart processing if more commands have come it.
1318 	 */
1319 	(void)sc->mfi_read_fw_status(sc);
1320 	if (pi != sc->mfi_comms->hw_pi)
1321 		goto restart;
1322 
1323 	return;
1324 }
1325 
1326 int
1327 mfi_shutdown(struct mfi_softc *sc)
1328 {
1329 	struct mfi_dcmd_frame *dcmd;
1330 	struct mfi_command *cm;
1331 	int error;
1332 
1333 
1334 	if (sc->mfi_aen_cm != NULL) {
1335 		sc->cm_aen_abort = 1;
1336 		mfi_abort(sc, &sc->mfi_aen_cm);
1337 	}
1338 
1339 	if (sc->mfi_map_sync_cm != NULL) {
1340 		sc->cm_map_abort = 1;
1341 		mfi_abort(sc, &sc->mfi_map_sync_cm);
1342 	}
1343 
1344 	mtx_lock(&sc->mfi_io_lock);
1345 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1346 	if (error) {
1347 		mtx_unlock(&sc->mfi_io_lock);
1348 		return (error);
1349 	}
1350 
1351 	dcmd = &cm->cm_frame->dcmd;
1352 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1353 	cm->cm_flags = MFI_CMD_POLLED;
1354 	cm->cm_data = NULL;
1355 
1356 	if ((error = mfi_mapcmd(sc, cm)) != 0)
1357 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1358 
1359 	mfi_release_command(cm);
1360 	mtx_unlock(&sc->mfi_io_lock);
1361 	return (error);
1362 }
1363 
1364 static void
1365 mfi_syspdprobe(struct mfi_softc *sc)
1366 {
1367 	struct mfi_frame_header *hdr;
1368 	struct mfi_command *cm = NULL;
1369 	struct mfi_pd_list *pdlist = NULL;
1370 	struct mfi_system_pd *syspd, *tmp;
1371 	struct mfi_system_pending *syspd_pend;
1372 	int error, i, found;
1373 
1374 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1375 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1376 	/* Add SYSTEM PD's */
1377 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1378 	    (void **)&pdlist, sizeof(*pdlist));
1379 	if (error) {
1380 		device_printf(sc->mfi_dev,
1381 		    "Error while forming SYSTEM PD list\n");
1382 		goto out;
1383 	}
1384 
1385 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1386 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1387 	cm->cm_frame->dcmd.mbox[1] = 0;
1388 	if (mfi_mapcmd(sc, cm) != 0) {
1389 		device_printf(sc->mfi_dev,
1390 		    "Failed to get syspd device listing\n");
1391 		goto out;
1392 	}
1393 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1394 	    BUS_DMASYNC_POSTREAD);
1395 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1396 	hdr = &cm->cm_frame->header;
1397 	if (hdr->cmd_status != MFI_STAT_OK) {
1398 		device_printf(sc->mfi_dev,
1399 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1400 		goto out;
1401 	}
1402 	/* Get each PD and add it to the system */
1403 	for (i = 0; i < pdlist->count; i++) {
1404 		if (pdlist->addr[i].device_id ==
1405 		    pdlist->addr[i].encl_device_id)
1406 			continue;
1407 		found = 0;
1408 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1409 			if (syspd->pd_id == pdlist->addr[i].device_id)
1410 				found = 1;
1411 		}
1412 		TAILQ_FOREACH(syspd_pend, &sc->mfi_syspd_pend_tqh, pd_link) {
1413 			if (syspd_pend->pd_id == pdlist->addr[i].device_id)
1414 				found = 1;
1415 		}
1416 		if (found == 0)
1417 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1418 	}
1419 	/* Delete SYSPD's whose state has been changed */
1420 	TAILQ_FOREACH_SAFE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1421 		found = 0;
1422 		for (i = 0; i < pdlist->count; i++) {
1423 			if (syspd->pd_id == pdlist->addr[i].device_id) {
1424 				found = 1;
1425 				break;
1426 			}
1427 		}
1428 		if (found == 0) {
1429 			printf("DELETE\n");
1430 			mtx_unlock(&sc->mfi_io_lock);
1431 			mtx_lock(&Giant);
1432 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1433 			mtx_unlock(&Giant);
1434 			mtx_lock(&sc->mfi_io_lock);
1435 		}
1436 	}
1437 out:
1438 	if (pdlist)
1439 	    free(pdlist, M_MFIBUF);
1440 	if (cm)
1441 	    mfi_release_command(cm);
1442 
1443 	return;
1444 }
1445 
1446 static void
1447 mfi_ldprobe(struct mfi_softc *sc)
1448 {
1449 	struct mfi_frame_header *hdr;
1450 	struct mfi_command *cm = NULL;
1451 	struct mfi_ld_list *list = NULL;
1452 	struct mfi_disk *ld;
1453 	struct mfi_disk_pending *ld_pend;
1454 	int error, i;
1455 
1456 	sx_assert(&sc->mfi_config_lock, SA_XLOCKED);
1457 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1458 
1459 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1460 	    (void **)&list, sizeof(*list));
1461 	if (error)
1462 		goto out;
1463 
1464 	cm->cm_flags = MFI_CMD_DATAIN;
1465 	if (mfi_wait_command(sc, cm) != 0) {
1466 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1467 		goto out;
1468 	}
1469 
1470 	hdr = &cm->cm_frame->header;
1471 	if (hdr->cmd_status != MFI_STAT_OK) {
1472 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1473 		    hdr->cmd_status);
1474 		goto out;
1475 	}
1476 
1477 	for (i = 0; i < list->ld_count; i++) {
1478 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1479 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1480 				goto skip_add;
1481 		}
1482 		TAILQ_FOREACH(ld_pend, &sc->mfi_ld_pend_tqh, ld_link) {
1483 			if (ld_pend->ld_id == list->ld_list[i].ld.v.target_id)
1484 				goto skip_add;
1485 		}
1486 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1487 	skip_add:;
1488 	}
1489 out:
1490 	if (list)
1491 		free(list, M_MFIBUF);
1492 	if (cm)
1493 		mfi_release_command(cm);
1494 
1495 	return;
1496 }
1497 
1498 /*
1499  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1500  * the bits in 24-31 are all set, then it is the number of seconds since
1501  * boot.
1502  */
1503 static const char *
1504 format_timestamp(uint32_t timestamp)
1505 {
1506 	static char buffer[32];
1507 
1508 	if ((timestamp & 0xff000000) == 0xff000000)
1509 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1510 		    0x00ffffff);
1511 	else
1512 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
1513 	return (buffer);
1514 }
1515 
1516 static const char *
1517 format_class(int8_t class)
1518 {
1519 	static char buffer[6];
1520 
1521 	switch (class) {
1522 	case MFI_EVT_CLASS_DEBUG:
1523 		return ("debug");
1524 	case MFI_EVT_CLASS_PROGRESS:
1525 		return ("progress");
1526 	case MFI_EVT_CLASS_INFO:
1527 		return ("info");
1528 	case MFI_EVT_CLASS_WARNING:
1529 		return ("WARN");
1530 	case MFI_EVT_CLASS_CRITICAL:
1531 		return ("CRIT");
1532 	case MFI_EVT_CLASS_FATAL:
1533 		return ("FATAL");
1534 	case MFI_EVT_CLASS_DEAD:
1535 		return ("DEAD");
1536 	default:
1537 		snprintf(buffer, sizeof(buffer), "%d", class);
1538 		return (buffer);
1539 	}
1540 }
1541 
1542 static void
1543 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1544 {
1545 	struct mfi_system_pd *syspd = NULL;
1546 
1547 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1548 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1549 	    format_class(detail->evt_class.members.evt_class),
1550 	    detail->description);
1551 
1552         /* Don't act on old AEN's or while shutting down */
1553         if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1554                 return;
1555 
1556 	switch (detail->arg_type) {
1557 	case MR_EVT_ARGS_NONE:
1558 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1559 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1560 			if (mfi_detect_jbod_change) {
1561 				/*
1562 				 * Probe for new SYSPD's and Delete
1563 				 * invalid SYSPD's
1564 				 */
1565 				sx_xlock(&sc->mfi_config_lock);
1566 				mtx_lock(&sc->mfi_io_lock);
1567 				mfi_syspdprobe(sc);
1568 				mtx_unlock(&sc->mfi_io_lock);
1569 				sx_xunlock(&sc->mfi_config_lock);
1570 			}
1571 		}
1572 		break;
1573 	case MR_EVT_ARGS_LD_STATE:
1574 		/* During load time driver reads all the events starting
1575 		 * from the one that has been logged after shutdown. Avoid
1576 		 * these old events.
1577 		 */
1578 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1579 			/* Remove the LD */
1580 			struct mfi_disk *ld;
1581 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1582 				if (ld->ld_id ==
1583 				    detail->args.ld_state.ld.target_id)
1584 					break;
1585 			}
1586 			/*
1587 			Fix: for kernel panics when SSCD is removed
1588 			KASSERT(ld != NULL, ("volume dissappeared"));
1589 			*/
1590 			if (ld != NULL) {
1591 				mtx_lock(&Giant);
1592 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1593 				mtx_unlock(&Giant);
1594 			}
1595 		}
1596 		break;
1597 	case MR_EVT_ARGS_PD:
1598 		if (detail->code == MR_EVT_PD_REMOVED) {
1599 			if (mfi_detect_jbod_change) {
1600 				/*
1601 				 * If the removed device is a SYSPD then
1602 				 * delete it
1603 				 */
1604 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1605 				    pd_link) {
1606 					if (syspd->pd_id ==
1607 					    detail->args.pd.device_id) {
1608 						mtx_lock(&Giant);
1609 						device_delete_child(
1610 						    sc->mfi_dev,
1611 						    syspd->pd_dev);
1612 						mtx_unlock(&Giant);
1613 						break;
1614 					}
1615 				}
1616 			}
1617 		}
1618 		if (detail->code == MR_EVT_PD_INSERTED) {
1619 			if (mfi_detect_jbod_change) {
1620 				/* Probe for new SYSPD's */
1621 				sx_xlock(&sc->mfi_config_lock);
1622 				mtx_lock(&sc->mfi_io_lock);
1623 				mfi_syspdprobe(sc);
1624 				mtx_unlock(&sc->mfi_io_lock);
1625 				sx_xunlock(&sc->mfi_config_lock);
1626 			}
1627 		}
1628 		if (sc->mfi_cam_rescan_cb != NULL &&
1629 		    (detail->code == MR_EVT_PD_INSERTED ||
1630 		    detail->code == MR_EVT_PD_REMOVED)) {
1631 			sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
1632 		}
1633 		break;
1634 	}
1635 }
1636 
1637 static void
1638 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1639 {
1640 	struct mfi_evt_queue_elm *elm;
1641 
1642 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1643 	elm = malloc(sizeof(*elm), M_MFIBUF, M_NOWAIT|M_ZERO);
1644 	if (elm == NULL)
1645 		return;
1646 	memcpy(&elm->detail, detail, sizeof(*detail));
1647 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1648 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1649 }
1650 
1651 static void
1652 mfi_handle_evt(void *context, int pending)
1653 {
1654 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1655 	struct mfi_softc *sc;
1656 	struct mfi_evt_queue_elm *elm;
1657 
1658 	sc = context;
1659 	TAILQ_INIT(&queue);
1660 	mtx_lock(&sc->mfi_io_lock);
1661 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1662 	mtx_unlock(&sc->mfi_io_lock);
1663 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1664 		TAILQ_REMOVE(&queue, elm, link);
1665 		mfi_decode_evt(sc, &elm->detail);
1666 		free(elm, M_MFIBUF);
1667 	}
1668 }
1669 
1670 static int
1671 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1672 {
1673 	struct mfi_command *cm;
1674 	struct mfi_dcmd_frame *dcmd;
1675 	union mfi_evt current_aen, prior_aen;
1676 	struct mfi_evt_detail *ed = NULL;
1677 	int error = 0;
1678 
1679 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1680 
1681 	current_aen.word = locale;
1682 	if (sc->mfi_aen_cm != NULL) {
1683 		prior_aen.word =
1684 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1685 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1686 		    !((prior_aen.members.locale & current_aen.members.locale)
1687 		    ^current_aen.members.locale)) {
1688 			return (0);
1689 		} else {
1690 			prior_aen.members.locale |= current_aen.members.locale;
1691 			if (prior_aen.members.evt_class
1692 			    < current_aen.members.evt_class)
1693 				current_aen.members.evt_class =
1694 				    prior_aen.members.evt_class;
1695 			mfi_abort(sc, &sc->mfi_aen_cm);
1696 		}
1697 	}
1698 
1699 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1700 	    (void **)&ed, sizeof(*ed));
1701 	if (error)
1702 		goto out;
1703 
1704 	dcmd = &cm->cm_frame->dcmd;
1705 	((uint32_t *)&dcmd->mbox)[0] = seq;
1706 	((uint32_t *)&dcmd->mbox)[1] = locale;
1707 	cm->cm_flags = MFI_CMD_DATAIN;
1708 	cm->cm_complete = mfi_aen_complete;
1709 
1710 	sc->last_seq_num = seq;
1711 	sc->mfi_aen_cm = cm;
1712 
1713 	mfi_enqueue_ready(cm);
1714 	mfi_startio(sc);
1715 
1716 out:
1717 	return (error);
1718 }
1719 
1720 static void
1721 mfi_aen_complete(struct mfi_command *cm)
1722 {
1723 	struct mfi_frame_header *hdr;
1724 	struct mfi_softc *sc;
1725 	struct mfi_evt_detail *detail;
1726 	struct mfi_aen *mfi_aen_entry, *tmp;
1727 	int seq = 0, aborted = 0;
1728 
1729 	sc = cm->cm_sc;
1730 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1731 
1732 	if (sc->mfi_aen_cm == NULL)
1733 		return;
1734 
1735 	hdr = &cm->cm_frame->header;
1736 
1737 	if (sc->cm_aen_abort ||
1738 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1739 		sc->cm_aen_abort = 0;
1740 		aborted = 1;
1741 	} else {
1742 		sc->mfi_aen_triggered = 1;
1743 		if (sc->mfi_poll_waiting) {
1744 			sc->mfi_poll_waiting = 0;
1745 			selwakeup(&sc->mfi_select);
1746 		}
1747 		detail = cm->cm_data;
1748 		mfi_queue_evt(sc, detail);
1749 		seq = detail->seq + 1;
1750 		TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link,
1751 		    tmp) {
1752 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1753 			    aen_link);
1754 			PROC_LOCK(mfi_aen_entry->p);
1755 			kern_psignal(mfi_aen_entry->p, SIGIO);
1756 			PROC_UNLOCK(mfi_aen_entry->p);
1757 			free(mfi_aen_entry, M_MFIBUF);
1758 		}
1759 	}
1760 
1761 	free(cm->cm_data, M_MFIBUF);
1762 	wakeup(&sc->mfi_aen_cm);
1763 	sc->mfi_aen_cm = NULL;
1764 	mfi_release_command(cm);
1765 
1766 	/* set it up again so the driver can catch more events */
1767 	if (!aborted)
1768 		mfi_aen_setup(sc, seq);
1769 }
1770 
1771 #define MAX_EVENTS 15
1772 
1773 static int
1774 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1775 {
1776 	struct mfi_command *cm;
1777 	struct mfi_dcmd_frame *dcmd;
1778 	struct mfi_evt_list *el;
1779 	union mfi_evt class_locale;
1780 	int error, i, seq, size;
1781 
1782 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1783 
1784 	class_locale.members.reserved = 0;
1785 	class_locale.members.locale = mfi_event_locale;
1786 	class_locale.members.evt_class  = mfi_event_class;
1787 
1788 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1789 		* (MAX_EVENTS - 1);
1790 	el = malloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1791 	if (el == NULL)
1792 		return (ENOMEM);
1793 
1794 	for (seq = start_seq;;) {
1795 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1796 			free(el, M_MFIBUF);
1797 			return (EBUSY);
1798 		}
1799 
1800 		dcmd = &cm->cm_frame->dcmd;
1801 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1802 		dcmd->header.cmd = MFI_CMD_DCMD;
1803 		dcmd->header.timeout = 0;
1804 		dcmd->header.data_len = size;
1805 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1806 		((uint32_t *)&dcmd->mbox)[0] = seq;
1807 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1808 		cm->cm_sg = &dcmd->sgl;
1809 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1810 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1811 		cm->cm_data = el;
1812 		cm->cm_len = size;
1813 
1814 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1815 			device_printf(sc->mfi_dev,
1816 			    "Failed to get controller entries\n");
1817 			mfi_release_command(cm);
1818 			break;
1819 		}
1820 
1821 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1822 		    BUS_DMASYNC_POSTREAD);
1823 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1824 
1825 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1826 			mfi_release_command(cm);
1827 			break;
1828 		}
1829 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1830 			device_printf(sc->mfi_dev,
1831 			    "Error %d fetching controller entries\n",
1832 			    dcmd->header.cmd_status);
1833 			mfi_release_command(cm);
1834 			error = EIO;
1835 			break;
1836 		}
1837 		mfi_release_command(cm);
1838 
1839 		for (i = 0; i < el->count; i++) {
1840 			/*
1841 			 * If this event is newer than 'stop_seq' then
1842 			 * break out of the loop.  Note that the log
1843 			 * is a circular buffer so we have to handle
1844 			 * the case that our stop point is earlier in
1845 			 * the buffer than our start point.
1846 			 */
1847 			if (el->event[i].seq >= stop_seq) {
1848 				if (start_seq <= stop_seq)
1849 					break;
1850 				else if (el->event[i].seq < start_seq)
1851 					break;
1852 			}
1853 			mfi_queue_evt(sc, &el->event[i]);
1854 		}
1855 		seq = el->event[el->count - 1].seq + 1;
1856 	}
1857 
1858 	free(el, M_MFIBUF);
1859 	return (error);
1860 }
1861 
1862 static int
1863 mfi_add_ld(struct mfi_softc *sc, int id)
1864 {
1865 	struct mfi_command *cm;
1866 	struct mfi_dcmd_frame *dcmd = NULL;
1867 	struct mfi_ld_info *ld_info = NULL;
1868 	struct mfi_disk_pending *ld_pend;
1869 	int error;
1870 
1871 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1872 
1873 	ld_pend = malloc(sizeof(*ld_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1874 	if (ld_pend != NULL) {
1875 		ld_pend->ld_id = id;
1876 		TAILQ_INSERT_TAIL(&sc->mfi_ld_pend_tqh, ld_pend, ld_link);
1877 	}
1878 
1879 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1880 	    (void **)&ld_info, sizeof(*ld_info));
1881 	if (error) {
1882 		device_printf(sc->mfi_dev,
1883 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1884 		if (ld_info)
1885 			free(ld_info, M_MFIBUF);
1886 		return (error);
1887 	}
1888 	cm->cm_flags = MFI_CMD_DATAIN;
1889 	dcmd = &cm->cm_frame->dcmd;
1890 	dcmd->mbox[0] = id;
1891 	if (mfi_wait_command(sc, cm) != 0) {
1892 		device_printf(sc->mfi_dev,
1893 		    "Failed to get logical drive: %d\n", id);
1894 		free(ld_info, M_MFIBUF);
1895 		return (0);
1896 	}
1897 	if (ld_info->ld_config.params.isSSCD != 1)
1898 		mfi_add_ld_complete(cm);
1899 	else {
1900 		mfi_release_command(cm);
1901 		if (ld_info)		/* SSCD drives ld_info free here */
1902 			free(ld_info, M_MFIBUF);
1903 	}
1904 	return (0);
1905 }
1906 
1907 static void
1908 mfi_add_ld_complete(struct mfi_command *cm)
1909 {
1910 	struct mfi_frame_header *hdr;
1911 	struct mfi_ld_info *ld_info;
1912 	struct mfi_softc *sc;
1913 	device_t child;
1914 
1915 	sc = cm->cm_sc;
1916 	hdr = &cm->cm_frame->header;
1917 	ld_info = cm->cm_private;
1918 
1919 	if (sc->cm_map_abort || hdr->cmd_status != MFI_STAT_OK) {
1920 		free(ld_info, M_MFIBUF);
1921 		wakeup(&sc->mfi_map_sync_cm);
1922 		mfi_release_command(cm);
1923 		return;
1924 	}
1925 	wakeup(&sc->mfi_map_sync_cm);
1926 	mfi_release_command(cm);
1927 
1928 	mtx_unlock(&sc->mfi_io_lock);
1929 	mtx_lock(&Giant);
1930 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1931 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1932 		free(ld_info, M_MFIBUF);
1933 		mtx_unlock(&Giant);
1934 		mtx_lock(&sc->mfi_io_lock);
1935 		return;
1936 	}
1937 
1938 	device_set_ivars(child, ld_info);
1939 	device_set_desc(child, "MFI Logical Disk");
1940 	bus_generic_attach(sc->mfi_dev);
1941 	mtx_unlock(&Giant);
1942 	mtx_lock(&sc->mfi_io_lock);
1943 }
1944 
1945 static int mfi_add_sys_pd(struct mfi_softc *sc, int id)
1946 {
1947 	struct mfi_command *cm;
1948 	struct mfi_dcmd_frame *dcmd = NULL;
1949 	struct mfi_pd_info *pd_info = NULL;
1950 	struct mfi_system_pending *syspd_pend;
1951 	int error;
1952 
1953 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
1954 
1955 	syspd_pend = malloc(sizeof(*syspd_pend), M_MFIBUF, M_NOWAIT | M_ZERO);
1956 	if (syspd_pend != NULL) {
1957 		syspd_pend->pd_id = id;
1958 		TAILQ_INSERT_TAIL(&sc->mfi_syspd_pend_tqh, syspd_pend, pd_link);
1959 	}
1960 
1961 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1962 		(void **)&pd_info, sizeof(*pd_info));
1963 	if (error) {
1964 		device_printf(sc->mfi_dev,
1965 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1966 		    error);
1967 		if (pd_info)
1968 			free(pd_info, M_MFIBUF);
1969 		return (error);
1970 	}
1971 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1972 	dcmd = &cm->cm_frame->dcmd;
1973 	dcmd->mbox[0]=id;
1974 	dcmd->header.scsi_status = 0;
1975 	dcmd->header.pad0 = 0;
1976 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1977 		device_printf(sc->mfi_dev,
1978 		    "Failed to get physical drive info %d\n", id);
1979 		free(pd_info, M_MFIBUF);
1980 		mfi_release_command(cm);
1981 		return (error);
1982 	}
1983 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1984 	    BUS_DMASYNC_POSTREAD);
1985 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1986 	mfi_add_sys_pd_complete(cm);
1987 	return (0);
1988 }
1989 
1990 static void
1991 mfi_add_sys_pd_complete(struct mfi_command *cm)
1992 {
1993 	struct mfi_frame_header *hdr;
1994 	struct mfi_pd_info *pd_info;
1995 	struct mfi_softc *sc;
1996 	device_t child;
1997 
1998 	sc = cm->cm_sc;
1999 	hdr = &cm->cm_frame->header;
2000 	pd_info = cm->cm_private;
2001 
2002 	if (hdr->cmd_status != MFI_STAT_OK) {
2003 		free(pd_info, M_MFIBUF);
2004 		mfi_release_command(cm);
2005 		return;
2006 	}
2007 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
2008 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
2009 		    pd_info->ref.v.device_id);
2010 		free(pd_info, M_MFIBUF);
2011 		mfi_release_command(cm);
2012 		return;
2013 	}
2014 	mfi_release_command(cm);
2015 
2016 	mtx_unlock(&sc->mfi_io_lock);
2017 	mtx_lock(&Giant);
2018 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
2019 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
2020 		free(pd_info, M_MFIBUF);
2021 		mtx_unlock(&Giant);
2022 		mtx_lock(&sc->mfi_io_lock);
2023 		return;
2024 	}
2025 
2026 	device_set_ivars(child, pd_info);
2027 	device_set_desc(child, "MFI System PD");
2028 	bus_generic_attach(sc->mfi_dev);
2029 	mtx_unlock(&Giant);
2030 	mtx_lock(&sc->mfi_io_lock);
2031 }
2032 
2033 static struct mfi_command *
2034 mfi_bio_command(struct mfi_softc *sc)
2035 {
2036 	struct bio *bio;
2037 	struct mfi_command *cm = NULL;
2038 
2039 	/*reserving two commands to avoid starvation for IOCTL*/
2040 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2) {
2041 		return (NULL);
2042 	}
2043 	if ((bio = mfi_dequeue_bio(sc)) == NULL) {
2044 		return (NULL);
2045 	}
2046 	if ((uintptr_t)bio->bio_driver2 == MFI_LD_IO) {
2047 		cm = mfi_build_ldio(sc, bio);
2048 	} else if ((uintptr_t) bio->bio_driver2 == MFI_SYS_PD_IO) {
2049 		cm = mfi_build_syspdio(sc, bio);
2050 	}
2051 	if (!cm)
2052 	    mfi_enqueue_bio(sc, bio);
2053 	return cm;
2054 }
2055 
2056 /*
2057  * mostly copied from cam/scsi/scsi_all.c:scsi_read_write
2058  */
2059 
2060 int
2061 mfi_build_cdb(int readop, uint8_t byte2, u_int64_t lba, u_int32_t block_count, uint8_t *cdb)
2062 {
2063 	int cdb_len;
2064 
2065 	if (((lba & 0x1fffff) == lba)
2066          && ((block_count & 0xff) == block_count)
2067          && (byte2 == 0)) {
2068 		/* We can fit in a 6 byte cdb */
2069 		struct scsi_rw_6 *scsi_cmd;
2070 
2071 		scsi_cmd = (struct scsi_rw_6 *)cdb;
2072 		scsi_cmd->opcode = readop ? READ_6 : WRITE_6;
2073 		scsi_ulto3b(lba, scsi_cmd->addr);
2074 		scsi_cmd->length = block_count & 0xff;
2075 		scsi_cmd->control = 0;
2076 		cdb_len = sizeof(*scsi_cmd);
2077 	} else if (((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) {
2078 		/* Need a 10 byte CDB */
2079 		struct scsi_rw_10 *scsi_cmd;
2080 
2081 		scsi_cmd = (struct scsi_rw_10 *)cdb;
2082 		scsi_cmd->opcode = readop ? READ_10 : WRITE_10;
2083 		scsi_cmd->byte2 = byte2;
2084 		scsi_ulto4b(lba, scsi_cmd->addr);
2085 		scsi_cmd->reserved = 0;
2086 		scsi_ulto2b(block_count, scsi_cmd->length);
2087 		scsi_cmd->control = 0;
2088 		cdb_len = sizeof(*scsi_cmd);
2089 	} else if (((block_count & 0xffffffff) == block_count) &&
2090 	    ((lba & 0xffffffff) == lba)) {
2091 		/* Block count is too big for 10 byte CDB use a 12 byte CDB */
2092 		struct scsi_rw_12 *scsi_cmd;
2093 
2094 		scsi_cmd = (struct scsi_rw_12 *)cdb;
2095 		scsi_cmd->opcode = readop ? READ_12 : WRITE_12;
2096 		scsi_cmd->byte2 = byte2;
2097 		scsi_ulto4b(lba, scsi_cmd->addr);
2098 		scsi_cmd->reserved = 0;
2099 		scsi_ulto4b(block_count, scsi_cmd->length);
2100 		scsi_cmd->control = 0;
2101 		cdb_len = sizeof(*scsi_cmd);
2102 	} else {
2103 		/*
2104 		 * 16 byte CDB.  We'll only get here if the LBA is larger
2105 		 * than 2^32
2106 		 */
2107 		struct scsi_rw_16 *scsi_cmd;
2108 
2109 		scsi_cmd = (struct scsi_rw_16 *)cdb;
2110 		scsi_cmd->opcode = readop ? READ_16 : WRITE_16;
2111 		scsi_cmd->byte2 = byte2;
2112 		scsi_u64to8b(lba, scsi_cmd->addr);
2113 		scsi_cmd->reserved = 0;
2114 		scsi_ulto4b(block_count, scsi_cmd->length);
2115 		scsi_cmd->control = 0;
2116 		cdb_len = sizeof(*scsi_cmd);
2117 	}
2118 
2119 	return cdb_len;
2120 }
2121 
2122 extern char *unmapped_buf;
2123 
2124 static struct mfi_command *
2125 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
2126 {
2127 	struct mfi_command *cm;
2128 	struct mfi_pass_frame *pass;
2129 	uint32_t context = 0;
2130 	int flags = 0, blkcount = 0, readop;
2131 	uint8_t cdb_len;
2132 
2133 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2134 
2135 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2136 	    return (NULL);
2137 
2138 	/* Zero out the MFI frame */
2139 	context = cm->cm_frame->header.context;
2140 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2141 	cm->cm_frame->header.context = context;
2142 	pass = &cm->cm_frame->pass;
2143 	bzero(pass->cdb, 16);
2144 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2145 	switch (bio->bio_cmd) {
2146 	case BIO_READ:
2147 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2148 		readop = 1;
2149 		break;
2150 	case BIO_WRITE:
2151 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2152 		readop = 0;
2153 		break;
2154 	default:
2155 		/* TODO: what about BIO_DELETE??? */
2156 		biofinish(bio, NULL, EOPNOTSUPP);
2157 		mfi_enqueue_free(cm);
2158 		return (NULL);
2159 	}
2160 
2161 	/* Cheat with the sector length to avoid a non-constant division */
2162 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2163 	/* Fill the LBA and Transfer length in CDB */
2164 	cdb_len = mfi_build_cdb(readop, 0, bio->bio_pblkno, blkcount,
2165 	    pass->cdb);
2166 	pass->header.target_id = (uintptr_t)bio->bio_driver1;
2167 	pass->header.lun_id = 0;
2168 	pass->header.timeout = 0;
2169 	pass->header.flags = 0;
2170 	pass->header.scsi_status = 0;
2171 	pass->header.sense_len = MFI_SENSE_LEN;
2172 	pass->header.data_len = bio->bio_bcount;
2173 	pass->header.cdb_len = cdb_len;
2174 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2175 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2176 	cm->cm_complete = mfi_bio_complete;
2177 	cm->cm_private = bio;
2178 	cm->cm_data = unmapped_buf;
2179 	cm->cm_len = bio->bio_bcount;
2180 	cm->cm_sg = &pass->sgl;
2181 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2182 	cm->cm_flags = flags;
2183 
2184 	return (cm);
2185 }
2186 
2187 static struct mfi_command *
2188 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2189 {
2190 	struct mfi_io_frame *io;
2191 	struct mfi_command *cm;
2192 	int flags;
2193 	uint32_t blkcount;
2194 	uint32_t context = 0;
2195 
2196 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2197 
2198 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2199 	    return (NULL);
2200 
2201 	/* Zero out the MFI frame */
2202 	context = cm->cm_frame->header.context;
2203 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2204 	cm->cm_frame->header.context = context;
2205 	io = &cm->cm_frame->io;
2206 	switch (bio->bio_cmd) {
2207 	case BIO_READ:
2208 		io->header.cmd = MFI_CMD_LD_READ;
2209 		flags = MFI_CMD_DATAIN | MFI_CMD_BIO;
2210 		break;
2211 	case BIO_WRITE:
2212 		io->header.cmd = MFI_CMD_LD_WRITE;
2213 		flags = MFI_CMD_DATAOUT | MFI_CMD_BIO;
2214 		break;
2215 	default:
2216 		/* TODO: what about BIO_DELETE??? */
2217 		biofinish(bio, NULL, EOPNOTSUPP);
2218 		mfi_enqueue_free(cm);
2219 		return (NULL);
2220 	}
2221 
2222 	/* Cheat with the sector length to avoid a non-constant division */
2223 	blkcount = howmany(bio->bio_bcount, MFI_SECTOR_LEN);
2224 	io->header.target_id = (uintptr_t)bio->bio_driver1;
2225 	io->header.timeout = 0;
2226 	io->header.flags = 0;
2227 	io->header.scsi_status = 0;
2228 	io->header.sense_len = MFI_SENSE_LEN;
2229 	io->header.data_len = blkcount;
2230 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2231 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2232 	io->lba_hi = (bio->bio_pblkno & 0xffffffff00000000) >> 32;
2233 	io->lba_lo = bio->bio_pblkno & 0xffffffff;
2234 	cm->cm_complete = mfi_bio_complete;
2235 	cm->cm_private = bio;
2236 	cm->cm_data = unmapped_buf;
2237 	cm->cm_len = bio->bio_bcount;
2238 	cm->cm_sg = &io->sgl;
2239 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2240 	cm->cm_flags = flags;
2241 
2242 	return (cm);
2243 }
2244 
2245 static void
2246 mfi_bio_complete(struct mfi_command *cm)
2247 {
2248 	struct bio *bio;
2249 	struct mfi_frame_header *hdr;
2250 	struct mfi_softc *sc;
2251 
2252 	bio = cm->cm_private;
2253 	hdr = &cm->cm_frame->header;
2254 	sc = cm->cm_sc;
2255 
2256 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2257 		bio->bio_flags |= BIO_ERROR;
2258 		bio->bio_error = EIO;
2259 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, status=%#x, "
2260 		    "scsi_status=%#x\n", cm, hdr->cmd_status, hdr->scsi_status);
2261 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2262 	} else if (cm->cm_error != 0) {
2263 		bio->bio_flags |= BIO_ERROR;
2264 		bio->bio_error = cm->cm_error;
2265 		device_printf(sc->mfi_dev, "I/O error, cmd=%p, error=%#x\n",
2266 		    cm, cm->cm_error);
2267 	}
2268 
2269 	mfi_release_command(cm);
2270 	mfi_disk_complete(bio);
2271 }
2272 
2273 void
2274 mfi_startio(struct mfi_softc *sc)
2275 {
2276 	struct mfi_command *cm;
2277 	struct ccb_hdr *ccbh;
2278 
2279 	for (;;) {
2280 		/* Don't bother if we're short on resources */
2281 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2282 			break;
2283 
2284 		/* Try a command that has already been prepared */
2285 		cm = mfi_dequeue_ready(sc);
2286 
2287 		if (cm == NULL) {
2288 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2289 				cm = sc->mfi_cam_start(ccbh);
2290 		}
2291 
2292 		/* Nope, so look for work on the bioq */
2293 		if (cm == NULL)
2294 			cm = mfi_bio_command(sc);
2295 
2296 		/* No work available, so exit */
2297 		if (cm == NULL)
2298 			break;
2299 
2300 		/* Send the command to the controller */
2301 		if (mfi_mapcmd(sc, cm) != 0) {
2302 			device_printf(sc->mfi_dev, "Failed to startio\n");
2303 			mfi_requeue_ready(cm);
2304 			break;
2305 		}
2306 	}
2307 }
2308 
2309 int
2310 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2311 {
2312 	int error, polled;
2313 
2314 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2315 
2316 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP )) {
2317 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2318 		if (cm->cm_flags & MFI_CMD_CCB)
2319 			error = bus_dmamap_load_ccb(sc->mfi_buffer_dmat,
2320 			    cm->cm_dmamap, cm->cm_data, mfi_data_cb, cm,
2321 			    polled);
2322 		else if (cm->cm_flags & MFI_CMD_BIO)
2323 			error = bus_dmamap_load_bio(sc->mfi_buffer_dmat,
2324 			    cm->cm_dmamap, cm->cm_private, mfi_data_cb, cm,
2325 			    polled);
2326 		else
2327 			error = bus_dmamap_load(sc->mfi_buffer_dmat,
2328 			    cm->cm_dmamap, cm->cm_data, cm->cm_len,
2329 			    mfi_data_cb, cm, polled);
2330 		if (error == EINPROGRESS) {
2331 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2332 			return (0);
2333 		}
2334 	} else {
2335 		error = mfi_send_frame(sc, cm);
2336 	}
2337 
2338 	return (error);
2339 }
2340 
2341 static void
2342 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2343 {
2344 	struct mfi_frame_header *hdr;
2345 	struct mfi_command *cm;
2346 	union mfi_sgl *sgl;
2347 	struct mfi_softc *sc;
2348 	int i, j, first, dir;
2349 	int sge_size, locked;
2350 
2351 	cm = (struct mfi_command *)arg;
2352 	sc = cm->cm_sc;
2353 	hdr = &cm->cm_frame->header;
2354 	sgl = cm->cm_sg;
2355 
2356 	/*
2357 	 * We need to check if we have the lock as this is async
2358 	 * callback so even though our caller mfi_mapcmd asserts
2359 	 * it has the lock, there is no guarantee that hasn't been
2360 	 * dropped if bus_dmamap_load returned prior to our
2361 	 * completion.
2362 	 */
2363 	if ((locked = mtx_owned(&sc->mfi_io_lock)) == 0)
2364 		mtx_lock(&sc->mfi_io_lock);
2365 
2366 	if (error) {
2367 		printf("error %d in callback\n", error);
2368 		cm->cm_error = error;
2369 		mfi_complete(sc, cm);
2370 		goto out;
2371 	}
2372 	/* Use IEEE sgl only for IO's on a SKINNY controller
2373 	 * For other commands on a SKINNY controller use either
2374 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2375 	 * Also calculate the total frame size based on the type
2376 	 * of SGL used.
2377 	 */
2378 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2379 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2380 	    (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2381 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2382 		for (i = 0; i < nsegs; i++) {
2383 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2384 			sgl->sg_skinny[i].len = segs[i].ds_len;
2385 			sgl->sg_skinny[i].flag = 0;
2386 		}
2387 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2388 		sge_size = sizeof(struct mfi_sg_skinny);
2389 		hdr->sg_count = nsegs;
2390 	} else {
2391 		j = 0;
2392 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2393 			first = cm->cm_stp_len;
2394 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2395 				sgl->sg32[j].addr = segs[0].ds_addr;
2396 				sgl->sg32[j++].len = first;
2397 			} else {
2398 				sgl->sg64[j].addr = segs[0].ds_addr;
2399 				sgl->sg64[j++].len = first;
2400 			}
2401 		} else
2402 			first = 0;
2403 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2404 			for (i = 0; i < nsegs; i++) {
2405 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2406 				sgl->sg32[j++].len = segs[i].ds_len - first;
2407 				first = 0;
2408 			}
2409 		} else {
2410 			for (i = 0; i < nsegs; i++) {
2411 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2412 				sgl->sg64[j++].len = segs[i].ds_len - first;
2413 				first = 0;
2414 			}
2415 			hdr->flags |= MFI_FRAME_SGL64;
2416 		}
2417 		hdr->sg_count = j;
2418 		sge_size = sc->mfi_sge_size;
2419 	}
2420 
2421 	dir = 0;
2422 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2423 		dir |= BUS_DMASYNC_PREREAD;
2424 		hdr->flags |= MFI_FRAME_DIR_READ;
2425 	}
2426 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2427 		dir |= BUS_DMASYNC_PREWRITE;
2428 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2429 	}
2430 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2431 	cm->cm_flags |= MFI_CMD_MAPPED;
2432 
2433 	/*
2434 	 * Instead of calculating the total number of frames in the
2435 	 * compound frame, it's already assumed that there will be at
2436 	 * least 1 frame, so don't compensate for the modulo of the
2437 	 * following division.
2438 	 */
2439 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2440 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2441 
2442 	if ((error = mfi_send_frame(sc, cm)) != 0) {
2443 		printf("error %d in callback from mfi_send_frame\n", error);
2444 		cm->cm_error = error;
2445 		mfi_complete(sc, cm);
2446 		goto out;
2447 	}
2448 
2449 out:
2450 	/* leave the lock in the state we found it */
2451 	if (locked == 0)
2452 		mtx_unlock(&sc->mfi_io_lock);
2453 
2454 	return;
2455 }
2456 
2457 static int
2458 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2459 {
2460 	int error;
2461 
2462 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2463 
2464 	if (sc->MFA_enabled)
2465 		error = mfi_tbolt_send_frame(sc, cm);
2466 	else
2467 		error = mfi_std_send_frame(sc, cm);
2468 
2469 	if (error != 0 && (cm->cm_flags & MFI_ON_MFIQ_BUSY) != 0)
2470 		mfi_remove_busy(cm);
2471 
2472 	return (error);
2473 }
2474 
2475 static int
2476 mfi_std_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2477 {
2478 	struct mfi_frame_header *hdr;
2479 	int tm = mfi_polled_cmd_timeout * 1000;
2480 
2481 	hdr = &cm->cm_frame->header;
2482 
2483 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2484 		cm->cm_timestamp = time_uptime;
2485 		mfi_enqueue_busy(cm);
2486 	} else {
2487 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2488 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2489 	}
2490 
2491 	/*
2492 	 * The bus address of the command is aligned on a 64 byte boundary,
2493 	 * leaving the least 6 bits as zero.  For whatever reason, the
2494 	 * hardware wants the address shifted right by three, leaving just
2495 	 * 3 zero bits.  These three bits are then used as a prefetching
2496 	 * hint for the hardware to predict how many frames need to be
2497 	 * fetched across the bus.  If a command has more than 8 frames
2498 	 * then the 3 bits are set to 0x7 and the firmware uses other
2499 	 * information in the command to determine the total amount to fetch.
2500 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2501 	 * is enough for both 32bit and 64bit systems.
2502 	 */
2503 	if (cm->cm_extra_frames > 7)
2504 		cm->cm_extra_frames = 7;
2505 
2506 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2507 
2508 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2509 		return (0);
2510 
2511 	/* This is a polled command, so busy-wait for it to complete. */
2512 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2513 		DELAY(1000);
2514 		tm -= 1;
2515 		if (tm <= 0)
2516 			break;
2517 	}
2518 
2519 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2520 		device_printf(sc->mfi_dev, "Frame %p timed out "
2521 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2522 		return (ETIMEDOUT);
2523 	}
2524 
2525 	return (0);
2526 }
2527 
2528 
2529 void
2530 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2531 {
2532 	int dir;
2533 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2534 
2535 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2536 		dir = 0;
2537 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2538 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2539 			dir |= BUS_DMASYNC_POSTREAD;
2540 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2541 			dir |= BUS_DMASYNC_POSTWRITE;
2542 
2543 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2544 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2545 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2546 	}
2547 
2548 	cm->cm_flags |= MFI_CMD_COMPLETED;
2549 
2550 	if (cm->cm_complete != NULL)
2551 		cm->cm_complete(cm);
2552 	else
2553 		wakeup(cm);
2554 }
2555 
2556 static int
2557 mfi_abort(struct mfi_softc *sc, struct mfi_command **cm_abort)
2558 {
2559 	struct mfi_command *cm;
2560 	struct mfi_abort_frame *abort;
2561 	int i = 0, error;
2562 	uint32_t context = 0;
2563 
2564 	mtx_lock(&sc->mfi_io_lock);
2565 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2566 		mtx_unlock(&sc->mfi_io_lock);
2567 		return (EBUSY);
2568 	}
2569 
2570 	/* Zero out the MFI frame */
2571 	context = cm->cm_frame->header.context;
2572 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2573 	cm->cm_frame->header.context = context;
2574 
2575 	abort = &cm->cm_frame->abort;
2576 	abort->header.cmd = MFI_CMD_ABORT;
2577 	abort->header.flags = 0;
2578 	abort->header.scsi_status = 0;
2579 	abort->abort_context = (*cm_abort)->cm_frame->header.context;
2580 	abort->abort_mfi_addr_lo = (uint32_t)(*cm_abort)->cm_frame_busaddr;
2581 	abort->abort_mfi_addr_hi =
2582 		(uint32_t)((uint64_t)(*cm_abort)->cm_frame_busaddr >> 32);
2583 	cm->cm_data = NULL;
2584 	cm->cm_flags = MFI_CMD_POLLED;
2585 
2586 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2587 		device_printf(sc->mfi_dev, "failed to abort command\n");
2588 	mfi_release_command(cm);
2589 
2590 	mtx_unlock(&sc->mfi_io_lock);
2591 	while (i < 5 && *cm_abort != NULL) {
2592 		tsleep(cm_abort, 0, "mfiabort",
2593 		    5 * hz);
2594 		i++;
2595 	}
2596 	if (*cm_abort != NULL) {
2597 		/* Force a complete if command didn't abort */
2598 		mtx_lock(&sc->mfi_io_lock);
2599 		(*cm_abort)->cm_complete(*cm_abort);
2600 		mtx_unlock(&sc->mfi_io_lock);
2601 	}
2602 
2603 	return (error);
2604 }
2605 
2606 int
2607 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2608      int len)
2609 {
2610 	struct mfi_command *cm;
2611 	struct mfi_io_frame *io;
2612 	int error;
2613 	uint32_t context = 0;
2614 
2615 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2616 		return (EBUSY);
2617 
2618 	/* Zero out the MFI frame */
2619 	context = cm->cm_frame->header.context;
2620 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2621 	cm->cm_frame->header.context = context;
2622 
2623 	io = &cm->cm_frame->io;
2624 	io->header.cmd = MFI_CMD_LD_WRITE;
2625 	io->header.target_id = id;
2626 	io->header.timeout = 0;
2627 	io->header.flags = 0;
2628 	io->header.scsi_status = 0;
2629 	io->header.sense_len = MFI_SENSE_LEN;
2630 	io->header.data_len = howmany(len, MFI_SECTOR_LEN);
2631 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2632 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2633 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2634 	io->lba_lo = lba & 0xffffffff;
2635 	cm->cm_data = virt;
2636 	cm->cm_len = len;
2637 	cm->cm_sg = &io->sgl;
2638 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2639 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2640 
2641 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2642 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2643 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2644 	    BUS_DMASYNC_POSTWRITE);
2645 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2646 	mfi_release_command(cm);
2647 
2648 	return (error);
2649 }
2650 
2651 int
2652 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2653     int len)
2654 {
2655 	struct mfi_command *cm;
2656 	struct mfi_pass_frame *pass;
2657 	int error, readop, cdb_len;
2658 	uint32_t blkcount;
2659 
2660 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2661 		return (EBUSY);
2662 
2663 	pass = &cm->cm_frame->pass;
2664 	bzero(pass->cdb, 16);
2665 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2666 
2667 	readop = 0;
2668 	blkcount = howmany(len, MFI_SECTOR_LEN);
2669 	cdb_len = mfi_build_cdb(readop, 0, lba, blkcount, pass->cdb);
2670 	pass->header.target_id = id;
2671 	pass->header.timeout = 0;
2672 	pass->header.flags = 0;
2673 	pass->header.scsi_status = 0;
2674 	pass->header.sense_len = MFI_SENSE_LEN;
2675 	pass->header.data_len = len;
2676 	pass->header.cdb_len = cdb_len;
2677 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2678 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2679 	cm->cm_data = virt;
2680 	cm->cm_len = len;
2681 	cm->cm_sg = &pass->sgl;
2682 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2683 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT | MFI_CMD_SCSI;
2684 
2685 	if ((error = mfi_mapcmd(sc, cm)) != 0)
2686 		device_printf(sc->mfi_dev, "failed dump blocks\n");
2687 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2688 	    BUS_DMASYNC_POSTWRITE);
2689 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2690 	mfi_release_command(cm);
2691 
2692 	return (error);
2693 }
2694 
2695 static int
2696 mfi_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2697 {
2698 	struct mfi_softc *sc;
2699 	int error;
2700 
2701 	sc = dev->si_drv1;
2702 
2703 	mtx_lock(&sc->mfi_io_lock);
2704 	if (sc->mfi_detaching)
2705 		error = ENXIO;
2706 	else {
2707 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2708 		error = 0;
2709 	}
2710 	mtx_unlock(&sc->mfi_io_lock);
2711 
2712 	return (error);
2713 }
2714 
2715 static int
2716 mfi_close(struct cdev *dev, int flags, int fmt, struct thread *td)
2717 {
2718 	struct mfi_softc *sc;
2719 	struct mfi_aen *mfi_aen_entry, *tmp;
2720 
2721 	sc = dev->si_drv1;
2722 
2723 	mtx_lock(&sc->mfi_io_lock);
2724 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2725 
2726 	TAILQ_FOREACH_SAFE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2727 		if (mfi_aen_entry->p == curproc) {
2728 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2729 			    aen_link);
2730 			free(mfi_aen_entry, M_MFIBUF);
2731 		}
2732 	}
2733 	mtx_unlock(&sc->mfi_io_lock);
2734 	return (0);
2735 }
2736 
2737 static int
2738 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2739 {
2740 
2741 	switch (opcode) {
2742 	case MFI_DCMD_LD_DELETE:
2743 	case MFI_DCMD_CFG_ADD:
2744 	case MFI_DCMD_CFG_CLEAR:
2745 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2746 		sx_xlock(&sc->mfi_config_lock);
2747 		return (1);
2748 	default:
2749 		return (0);
2750 	}
2751 }
2752 
2753 static void
2754 mfi_config_unlock(struct mfi_softc *sc, int locked)
2755 {
2756 
2757 	if (locked)
2758 		sx_xunlock(&sc->mfi_config_lock);
2759 }
2760 
2761 /*
2762  * Perform pre-issue checks on commands from userland and possibly veto
2763  * them.
2764  */
2765 static int
2766 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2767 {
2768 	struct mfi_disk *ld, *ld2;
2769 	int error;
2770 	struct mfi_system_pd *syspd = NULL;
2771 	uint16_t syspd_id;
2772 	uint16_t *mbox;
2773 
2774 	mtx_assert(&sc->mfi_io_lock, MA_OWNED);
2775 	error = 0;
2776 	switch (cm->cm_frame->dcmd.opcode) {
2777 	case MFI_DCMD_LD_DELETE:
2778 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2779 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2780 				break;
2781 		}
2782 		if (ld == NULL)
2783 			error = ENOENT;
2784 		else
2785 			error = mfi_disk_disable(ld);
2786 		break;
2787 	case MFI_DCMD_CFG_CLEAR:
2788 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2789 			error = mfi_disk_disable(ld);
2790 			if (error)
2791 				break;
2792 		}
2793 		if (error) {
2794 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2795 				if (ld2 == ld)
2796 					break;
2797 				mfi_disk_enable(ld2);
2798 			}
2799 		}
2800 		break;
2801 	case MFI_DCMD_PD_STATE_SET:
2802 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2803 		syspd_id = mbox[0];
2804 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2805 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2806 				if (syspd->pd_id == syspd_id)
2807 					break;
2808 			}
2809 		}
2810 		else
2811 			break;
2812 		if (syspd)
2813 			error = mfi_syspd_disable(syspd);
2814 		break;
2815 	default:
2816 		break;
2817 	}
2818 	return (error);
2819 }
2820 
2821 /* Perform post-issue checks on commands from userland. */
2822 static void
2823 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2824 {
2825 	struct mfi_disk *ld, *ldn;
2826 	struct mfi_system_pd *syspd = NULL;
2827 	uint16_t syspd_id;
2828 	uint16_t *mbox;
2829 
2830 	switch (cm->cm_frame->dcmd.opcode) {
2831 	case MFI_DCMD_LD_DELETE:
2832 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2833 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2834 				break;
2835 		}
2836 		KASSERT(ld != NULL, ("volume dissappeared"));
2837 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2838 			mtx_unlock(&sc->mfi_io_lock);
2839 			mtx_lock(&Giant);
2840 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2841 			mtx_unlock(&Giant);
2842 			mtx_lock(&sc->mfi_io_lock);
2843 		} else
2844 			mfi_disk_enable(ld);
2845 		break;
2846 	case MFI_DCMD_CFG_CLEAR:
2847 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2848 			mtx_unlock(&sc->mfi_io_lock);
2849 			mtx_lock(&Giant);
2850 			TAILQ_FOREACH_SAFE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2851 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2852 			}
2853 			mtx_unlock(&Giant);
2854 			mtx_lock(&sc->mfi_io_lock);
2855 		} else {
2856 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2857 				mfi_disk_enable(ld);
2858 		}
2859 		break;
2860 	case MFI_DCMD_CFG_ADD:
2861 		mfi_ldprobe(sc);
2862 		break;
2863 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2864 		mfi_ldprobe(sc);
2865 		break;
2866 	case MFI_DCMD_PD_STATE_SET:
2867 		mbox = (uint16_t *) cm->cm_frame->dcmd.mbox;
2868 		syspd_id = mbox[0];
2869 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2870 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,pd_link) {
2871 				if (syspd->pd_id == syspd_id)
2872 					break;
2873 			}
2874 		}
2875 		else
2876 			break;
2877 		/* If the transition fails then enable the syspd again */
2878 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2879 			mfi_syspd_enable(syspd);
2880 		break;
2881 	}
2882 }
2883 
2884 static int
2885 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2886 {
2887 	struct mfi_config_data *conf_data;
2888 	struct mfi_command *ld_cm = NULL;
2889 	struct mfi_ld_info *ld_info = NULL;
2890 	struct mfi_ld_config *ld;
2891 	char *p;
2892 	int error = 0;
2893 
2894 	conf_data = (struct mfi_config_data *)cm->cm_data;
2895 
2896 	if (cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) {
2897 		p = (char *)conf_data->array;
2898 		p += conf_data->array_size * conf_data->array_count;
2899 		ld = (struct mfi_ld_config *)p;
2900 		if (ld->params.isSSCD == 1)
2901 			error = 1;
2902 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2903 		error = mfi_dcmd_command (sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2904 		    (void **)&ld_info, sizeof(*ld_info));
2905 		if (error) {
2906 			device_printf(sc->mfi_dev, "Failed to allocate"
2907 			    "MFI_DCMD_LD_GET_INFO %d", error);
2908 			if (ld_info)
2909 				free(ld_info, M_MFIBUF);
2910 			return 0;
2911 		}
2912 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2913 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2914 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2915 		if (mfi_wait_command(sc, ld_cm) != 0) {
2916 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2917 			mfi_release_command(ld_cm);
2918 			free(ld_info, M_MFIBUF);
2919 			return 0;
2920 		}
2921 
2922 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2923 			free(ld_info, M_MFIBUF);
2924 			mfi_release_command(ld_cm);
2925 			return 0;
2926 		}
2927 		else
2928 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2929 
2930 		if (ld_info->ld_config.params.isSSCD == 1)
2931 			error = 1;
2932 
2933 		mfi_release_command(ld_cm);
2934 		free(ld_info, M_MFIBUF);
2935 
2936 	}
2937 	return error;
2938 }
2939 
2940 static int
2941 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2942 {
2943 	uint8_t i;
2944 	struct mfi_ioc_packet *ioc;
2945 	ioc = (struct mfi_ioc_packet *)arg;
2946 	int sge_size, error;
2947 	struct megasas_sge *kern_sge;
2948 
2949 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2950 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2951 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2952 
2953 	if (sizeof(bus_addr_t) == 8) {
2954 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2955 		cm->cm_extra_frames = 2;
2956 		sge_size = sizeof(struct mfi_sg64);
2957 	} else {
2958 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2959 		sge_size = sizeof(struct mfi_sg32);
2960 	}
2961 
2962 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2963 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2964 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2965 			1, 0,			/* algnmnt, boundary */
2966 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2967 			BUS_SPACE_MAXADDR,	/* highaddr */
2968 			NULL, NULL,		/* filter, filterarg */
2969 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2970 			2,			/* nsegments */
2971 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2972 			BUS_DMA_ALLOCNOW,	/* flags */
2973 			NULL, NULL,		/* lockfunc, lockarg */
2974 			&sc->mfi_kbuff_arr_dmat[i])) {
2975 			device_printf(sc->mfi_dev,
2976 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2977 			return (ENOMEM);
2978 		}
2979 
2980 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2981 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2982 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2983 			device_printf(sc->mfi_dev,
2984 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2985 			return (ENOMEM);
2986 		}
2987 
2988 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2989 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2990 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2991 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2992 
2993 		if (!sc->kbuff_arr[i]) {
2994 			device_printf(sc->mfi_dev,
2995 			    "Could not allocate memory for kbuff_arr info\n");
2996 			return -1;
2997 		}
2998 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2999 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
3000 
3001 		if (sizeof(bus_addr_t) == 8) {
3002 			cm->cm_frame->stp.sgl.sg64[i].addr =
3003 			    kern_sge[i].phys_addr;
3004 			cm->cm_frame->stp.sgl.sg64[i].len =
3005 			    ioc->mfi_sgl[i].iov_len;
3006 		} else {
3007 			cm->cm_frame->stp.sgl.sg32[i].addr =
3008 			    kern_sge[i].phys_addr;
3009 			cm->cm_frame->stp.sgl.sg32[i].len =
3010 			    ioc->mfi_sgl[i].iov_len;
3011 		}
3012 
3013 		error = copyin(ioc->mfi_sgl[i].iov_base,
3014 		    sc->kbuff_arr[i],
3015 		    ioc->mfi_sgl[i].iov_len);
3016 		if (error != 0) {
3017 			device_printf(sc->mfi_dev, "Copy in failed\n");
3018 			return error;
3019 		}
3020 	}
3021 
3022 	cm->cm_flags |=MFI_CMD_MAPPED;
3023 	return 0;
3024 }
3025 
3026 static int
3027 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
3028 {
3029 	struct mfi_command *cm;
3030 	struct mfi_dcmd_frame *dcmd;
3031 	void *ioc_buf = NULL;
3032 	uint32_t context;
3033 	int error = 0, locked;
3034 
3035 
3036 	if (ioc->buf_size > 0) {
3037 		if (ioc->buf_size > 1024 * 1024)
3038 			return (ENOMEM);
3039 		ioc_buf = malloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
3040 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
3041 		if (error) {
3042 			device_printf(sc->mfi_dev, "failed to copyin\n");
3043 			free(ioc_buf, M_MFIBUF);
3044 			return (error);
3045 		}
3046 	}
3047 
3048 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
3049 
3050 	mtx_lock(&sc->mfi_io_lock);
3051 	while ((cm = mfi_dequeue_free(sc)) == NULL)
3052 		msleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
3053 
3054 	/* Save context for later */
3055 	context = cm->cm_frame->header.context;
3056 
3057 	dcmd = &cm->cm_frame->dcmd;
3058 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
3059 
3060 	cm->cm_sg = &dcmd->sgl;
3061 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
3062 	cm->cm_data = ioc_buf;
3063 	cm->cm_len = ioc->buf_size;
3064 
3065 	/* restore context */
3066 	cm->cm_frame->header.context = context;
3067 
3068 	/* Cheat since we don't know if we're writing or reading */
3069 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3070 
3071 	error = mfi_check_command_pre(sc, cm);
3072 	if (error)
3073 		goto out;
3074 
3075 	error = mfi_wait_command(sc, cm);
3076 	if (error) {
3077 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
3078 		goto out;
3079 	}
3080 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
3081 	mfi_check_command_post(sc, cm);
3082 out:
3083 	mfi_release_command(cm);
3084 	mtx_unlock(&sc->mfi_io_lock);
3085 	mfi_config_unlock(sc, locked);
3086 	if (ioc->buf_size > 0)
3087 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
3088 	if (ioc_buf)
3089 		free(ioc_buf, M_MFIBUF);
3090 	return (error);
3091 }
3092 
3093 #define	PTRIN(p)		((void *)(uintptr_t)(p))
3094 
3095 static int
3096 mfi_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3097 {
3098 	struct mfi_softc *sc;
3099 	union mfi_statrequest *ms;
3100 	struct mfi_ioc_packet *ioc;
3101 #ifdef COMPAT_FREEBSD32
3102 	struct mfi_ioc_packet32 *ioc32;
3103 #endif
3104 	struct mfi_ioc_aen *aen;
3105 	struct mfi_command *cm = NULL;
3106 	uint32_t context = 0;
3107 	union mfi_sense_ptr sense_ptr;
3108 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
3109 	size_t len;
3110 	int i, res;
3111 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
3112 #ifdef COMPAT_FREEBSD32
3113 	struct mfi_ioc_passthru32 *iop32 = (struct mfi_ioc_passthru32 *)arg;
3114 	struct mfi_ioc_passthru iop_swab;
3115 #endif
3116 	int error, locked;
3117 	union mfi_sgl *sgl;
3118 	sc = dev->si_drv1;
3119 	error = 0;
3120 
3121 	if (sc->adpreset)
3122 		return EBUSY;
3123 
3124 	if (sc->hw_crit_error)
3125 		return EBUSY;
3126 
3127 	if (sc->issuepend_done == 0)
3128 		return EBUSY;
3129 
3130 	switch (cmd) {
3131 	case MFIIO_STATS:
3132 		ms = (union mfi_statrequest *)arg;
3133 		switch (ms->ms_item) {
3134 		case MFIQ_FREE:
3135 		case MFIQ_BIO:
3136 		case MFIQ_READY:
3137 		case MFIQ_BUSY:
3138 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
3139 			    sizeof(struct mfi_qstat));
3140 			break;
3141 		default:
3142 			error = ENOIOCTL;
3143 			break;
3144 		}
3145 		break;
3146 	case MFIIO_QUERY_DISK:
3147 	{
3148 		struct mfi_query_disk *qd;
3149 		struct mfi_disk *ld;
3150 
3151 		qd = (struct mfi_query_disk *)arg;
3152 		mtx_lock(&sc->mfi_io_lock);
3153 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
3154 			if (ld->ld_id == qd->array_id)
3155 				break;
3156 		}
3157 		if (ld == NULL) {
3158 			qd->present = 0;
3159 			mtx_unlock(&sc->mfi_io_lock);
3160 			return (0);
3161 		}
3162 		qd->present = 1;
3163 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
3164 			qd->open = 1;
3165 		bzero(qd->devname, SPECNAMELEN + 1);
3166 		snprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
3167 		mtx_unlock(&sc->mfi_io_lock);
3168 		break;
3169 	}
3170 	case MFI_CMD:
3171 #ifdef COMPAT_FREEBSD32
3172 	case MFI_CMD32:
3173 #endif
3174 		{
3175 		devclass_t devclass;
3176 		ioc = (struct mfi_ioc_packet *)arg;
3177 		int adapter;
3178 
3179 		adapter = ioc->mfi_adapter_no;
3180 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
3181 			devclass = devclass_find("mfi");
3182 			sc = devclass_get_softc(devclass, adapter);
3183 		}
3184 		mtx_lock(&sc->mfi_io_lock);
3185 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3186 			mtx_unlock(&sc->mfi_io_lock);
3187 			return (EBUSY);
3188 		}
3189 		mtx_unlock(&sc->mfi_io_lock);
3190 		locked = 0;
3191 
3192 		/*
3193 		 * save off original context since copying from user
3194 		 * will clobber some data
3195 		 */
3196 		context = cm->cm_frame->header.context;
3197 		cm->cm_frame->header.context = cm->cm_index;
3198 
3199 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
3200 		    2 * MEGAMFI_FRAME_SIZE);
3201 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3202 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
3203 		cm->cm_frame->header.scsi_status = 0;
3204 		cm->cm_frame->header.pad0 = 0;
3205 		if (ioc->mfi_sge_count) {
3206 			cm->cm_sg =
3207 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3208 		}
3209 		sgl = cm->cm_sg;
3210 		cm->cm_flags = 0;
3211 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3212 			cm->cm_flags |= MFI_CMD_DATAIN;
3213 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3214 			cm->cm_flags |= MFI_CMD_DATAOUT;
3215 		/* Legacy app shim */
3216 		if (cm->cm_flags == 0)
3217 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3218 		cm->cm_len = cm->cm_frame->header.data_len;
3219 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3220 #ifdef COMPAT_FREEBSD32
3221 			if (cmd == MFI_CMD) {
3222 #endif
3223 				/* Native */
3224 				cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3225 #ifdef COMPAT_FREEBSD32
3226 			} else {
3227 				/* 32bit on 64bit */
3228 				ioc32 = (struct mfi_ioc_packet32 *)ioc;
3229 				cm->cm_stp_len = ioc32->mfi_sgl[0].iov_len;
3230 			}
3231 #endif
3232 			cm->cm_len += cm->cm_stp_len;
3233 		}
3234 		if (cm->cm_len &&
3235 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3236 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3237 			    M_WAITOK | M_ZERO);
3238 		} else {
3239 			cm->cm_data = 0;
3240 		}
3241 
3242 		/* restore header context */
3243 		cm->cm_frame->header.context = context;
3244 
3245 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3246 			res = mfi_stp_cmd(sc, cm, arg);
3247 			if (res != 0)
3248 				goto out;
3249 		} else {
3250 			temp = data;
3251 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3252 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3253 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3254 #ifdef COMPAT_FREEBSD32
3255 					if (cmd == MFI_CMD) {
3256 #endif
3257 						/* Native */
3258 						addr = ioc->mfi_sgl[i].iov_base;
3259 						len = ioc->mfi_sgl[i].iov_len;
3260 #ifdef COMPAT_FREEBSD32
3261 					} else {
3262 						/* 32bit on 64bit */
3263 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3264 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3265 						len = ioc32->mfi_sgl[i].iov_len;
3266 					}
3267 #endif
3268 					error = copyin(addr, temp, len);
3269 					if (error != 0) {
3270 						device_printf(sc->mfi_dev,
3271 						    "Copy in failed\n");
3272 						goto out;
3273 					}
3274 					temp = &temp[len];
3275 				}
3276 			}
3277 		}
3278 
3279 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3280 			locked = mfi_config_lock(sc,
3281 			     cm->cm_frame->dcmd.opcode);
3282 
3283 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3284 			cm->cm_frame->pass.sense_addr_lo =
3285 			    (uint32_t)cm->cm_sense_busaddr;
3286 			cm->cm_frame->pass.sense_addr_hi =
3287 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3288 		}
3289 		mtx_lock(&sc->mfi_io_lock);
3290 		skip_pre_post = mfi_check_for_sscd (sc, cm);
3291 		if (!skip_pre_post) {
3292 			error = mfi_check_command_pre(sc, cm);
3293 			if (error) {
3294 				mtx_unlock(&sc->mfi_io_lock);
3295 				goto out;
3296 			}
3297 		}
3298 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3299 			device_printf(sc->mfi_dev,
3300 			    "Controller polled failed\n");
3301 			mtx_unlock(&sc->mfi_io_lock);
3302 			goto out;
3303 		}
3304 		if (!skip_pre_post) {
3305 			mfi_check_command_post(sc, cm);
3306 		}
3307 		mtx_unlock(&sc->mfi_io_lock);
3308 
3309 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3310 			temp = data;
3311 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3312 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3313 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3314 #ifdef COMPAT_FREEBSD32
3315 					if (cmd == MFI_CMD) {
3316 #endif
3317 						/* Native */
3318 						addr = ioc->mfi_sgl[i].iov_base;
3319 						len = ioc->mfi_sgl[i].iov_len;
3320 #ifdef COMPAT_FREEBSD32
3321 					} else {
3322 						/* 32bit on 64bit */
3323 						ioc32 = (struct mfi_ioc_packet32 *)ioc;
3324 						addr = PTRIN(ioc32->mfi_sgl[i].iov_base);
3325 						len = ioc32->mfi_sgl[i].iov_len;
3326 					}
3327 #endif
3328 					error = copyout(temp, addr, len);
3329 					if (error != 0) {
3330 						device_printf(sc->mfi_dev,
3331 						    "Copy out failed\n");
3332 						goto out;
3333 					}
3334 					temp = &temp[len];
3335 				}
3336 			}
3337 		}
3338 
3339 		if (ioc->mfi_sense_len) {
3340 			/* get user-space sense ptr then copy out sense */
3341 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3342 			    &sense_ptr.sense_ptr_data[0],
3343 			    sizeof(sense_ptr.sense_ptr_data));
3344 #ifdef COMPAT_FREEBSD32
3345 			if (cmd != MFI_CMD) {
3346 				/*
3347 				 * not 64bit native so zero out any address
3348 				 * over 32bit */
3349 				sense_ptr.addr.high = 0;
3350 			}
3351 #endif
3352 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3353 			    ioc->mfi_sense_len);
3354 			if (error != 0) {
3355 				device_printf(sc->mfi_dev,
3356 				    "Copy out failed\n");
3357 				goto out;
3358 			}
3359 		}
3360 
3361 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3362 out:
3363 		mfi_config_unlock(sc, locked);
3364 		if (data)
3365 			free(data, M_MFIBUF);
3366 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3367 			for (i = 0; i < 2; i++) {
3368 				if (sc->kbuff_arr[i]) {
3369 					if (sc->mfi_kbuff_arr_busaddr[i] != 0)
3370 						bus_dmamap_unload(
3371 						    sc->mfi_kbuff_arr_dmat[i],
3372 						    sc->mfi_kbuff_arr_dmamap[i]
3373 						    );
3374 					if (sc->kbuff_arr[i] != NULL)
3375 						bus_dmamem_free(
3376 						    sc->mfi_kbuff_arr_dmat[i],
3377 						    sc->kbuff_arr[i],
3378 						    sc->mfi_kbuff_arr_dmamap[i]
3379 						    );
3380 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3381 						bus_dma_tag_destroy(
3382 						    sc->mfi_kbuff_arr_dmat[i]);
3383 				}
3384 			}
3385 		}
3386 		if (cm) {
3387 			mtx_lock(&sc->mfi_io_lock);
3388 			mfi_release_command(cm);
3389 			mtx_unlock(&sc->mfi_io_lock);
3390 		}
3391 
3392 		break;
3393 		}
3394 	case MFI_SET_AEN:
3395 		aen = (struct mfi_ioc_aen *)arg;
3396 		mtx_lock(&sc->mfi_io_lock);
3397 		error = mfi_aen_register(sc, aen->aen_seq_num,
3398 		    aen->aen_class_locale);
3399 		mtx_unlock(&sc->mfi_io_lock);
3400 
3401 		break;
3402 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3403 		{
3404 			devclass_t devclass;
3405 			struct mfi_linux_ioc_packet l_ioc;
3406 			int adapter;
3407 
3408 			devclass = devclass_find("mfi");
3409 			if (devclass == NULL)
3410 				return (ENOENT);
3411 
3412 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3413 			if (error)
3414 				return (error);
3415 			adapter = l_ioc.lioc_adapter_no;
3416 			sc = devclass_get_softc(devclass, adapter);
3417 			if (sc == NULL)
3418 				return (ENOENT);
3419 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3420 			    cmd, arg, flag, td));
3421 			break;
3422 		}
3423 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3424 		{
3425 			devclass_t devclass;
3426 			struct mfi_linux_ioc_aen l_aen;
3427 			int adapter;
3428 
3429 			devclass = devclass_find("mfi");
3430 			if (devclass == NULL)
3431 				return (ENOENT);
3432 
3433 			error = copyin(arg, &l_aen, sizeof(l_aen));
3434 			if (error)
3435 				return (error);
3436 			adapter = l_aen.laen_adapter_no;
3437 			sc = devclass_get_softc(devclass, adapter);
3438 			if (sc == NULL)
3439 				return (ENOENT);
3440 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3441 			    cmd, arg, flag, td));
3442 			break;
3443 		}
3444 #ifdef COMPAT_FREEBSD32
3445 	case MFIIO_PASSTHRU32:
3446 		if (!SV_CURPROC_FLAG(SV_ILP32)) {
3447 			error = ENOTTY;
3448 			break;
3449 		}
3450 		iop_swab.ioc_frame	= iop32->ioc_frame;
3451 		iop_swab.buf_size	= iop32->buf_size;
3452 		iop_swab.buf		= PTRIN(iop32->buf);
3453 		iop			= &iop_swab;
3454 		/* FALLTHROUGH */
3455 #endif
3456 	case MFIIO_PASSTHRU:
3457 		error = mfi_user_command(sc, iop);
3458 #ifdef COMPAT_FREEBSD32
3459 		if (cmd == MFIIO_PASSTHRU32)
3460 			iop32->ioc_frame = iop_swab.ioc_frame;
3461 #endif
3462 		break;
3463 	default:
3464 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3465 		error = ENOTTY;
3466 		break;
3467 	}
3468 
3469 	return (error);
3470 }
3471 
3472 static int
3473 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
3474 {
3475 	struct mfi_softc *sc;
3476 	struct mfi_linux_ioc_packet l_ioc;
3477 	struct mfi_linux_ioc_aen l_aen;
3478 	struct mfi_command *cm = NULL;
3479 	struct mfi_aen *mfi_aen_entry;
3480 	union mfi_sense_ptr sense_ptr;
3481 	uint32_t context = 0;
3482 	uint8_t *data = NULL, *temp;
3483 	int i;
3484 	int error, locked;
3485 
3486 	sc = dev->si_drv1;
3487 	error = 0;
3488 	switch (cmd) {
3489 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3490 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3491 		if (error != 0)
3492 			return (error);
3493 
3494 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3495 			return (EINVAL);
3496 		}
3497 
3498 		mtx_lock(&sc->mfi_io_lock);
3499 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3500 			mtx_unlock(&sc->mfi_io_lock);
3501 			return (EBUSY);
3502 		}
3503 		mtx_unlock(&sc->mfi_io_lock);
3504 		locked = 0;
3505 
3506 		/*
3507 		 * save off original context since copying from user
3508 		 * will clobber some data
3509 		 */
3510 		context = cm->cm_frame->header.context;
3511 
3512 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3513 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3514 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3515 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3516 		cm->cm_frame->header.scsi_status = 0;
3517 		cm->cm_frame->header.pad0 = 0;
3518 		if (l_ioc.lioc_sge_count)
3519 			cm->cm_sg =
3520 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3521 		cm->cm_flags = 0;
3522 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3523 			cm->cm_flags |= MFI_CMD_DATAIN;
3524 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3525 			cm->cm_flags |= MFI_CMD_DATAOUT;
3526 		cm->cm_len = cm->cm_frame->header.data_len;
3527 		if (cm->cm_len &&
3528 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3529 			cm->cm_data = data = malloc(cm->cm_len, M_MFIBUF,
3530 			    M_WAITOK | M_ZERO);
3531 		} else {
3532 			cm->cm_data = 0;
3533 		}
3534 
3535 		/* restore header context */
3536 		cm->cm_frame->header.context = context;
3537 
3538 		temp = data;
3539 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3540 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3541 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3542 				       temp,
3543 				       l_ioc.lioc_sgl[i].iov_len);
3544 				if (error != 0) {
3545 					device_printf(sc->mfi_dev,
3546 					    "Copy in failed\n");
3547 					goto out;
3548 				}
3549 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3550 			}
3551 		}
3552 
3553 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3554 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3555 
3556 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3557 			cm->cm_frame->pass.sense_addr_lo =
3558 			    (uint32_t)cm->cm_sense_busaddr;
3559 			cm->cm_frame->pass.sense_addr_hi =
3560 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3561 		}
3562 
3563 		mtx_lock(&sc->mfi_io_lock);
3564 		error = mfi_check_command_pre(sc, cm);
3565 		if (error) {
3566 			mtx_unlock(&sc->mfi_io_lock);
3567 			goto out;
3568 		}
3569 
3570 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3571 			device_printf(sc->mfi_dev,
3572 			    "Controller polled failed\n");
3573 			mtx_unlock(&sc->mfi_io_lock);
3574 			goto out;
3575 		}
3576 
3577 		mfi_check_command_post(sc, cm);
3578 		mtx_unlock(&sc->mfi_io_lock);
3579 
3580 		temp = data;
3581 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3582 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3583 				error = copyout(temp,
3584 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3585 					l_ioc.lioc_sgl[i].iov_len);
3586 				if (error != 0) {
3587 					device_printf(sc->mfi_dev,
3588 					    "Copy out failed\n");
3589 					goto out;
3590 				}
3591 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3592 			}
3593 		}
3594 
3595 		if (l_ioc.lioc_sense_len) {
3596 			/* get user-space sense ptr then copy out sense */
3597 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3598                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3599 			    &sense_ptr.sense_ptr_data[0],
3600 			    sizeof(sense_ptr.sense_ptr_data));
3601 #ifdef __amd64__
3602 			/*
3603 			 * only 32bit Linux support so zero out any
3604 			 * address over 32bit
3605 			 */
3606 			sense_ptr.addr.high = 0;
3607 #endif
3608 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3609 			    l_ioc.lioc_sense_len);
3610 			if (error != 0) {
3611 				device_printf(sc->mfi_dev,
3612 				    "Copy out failed\n");
3613 				goto out;
3614 			}
3615 		}
3616 
3617 		error = copyout(&cm->cm_frame->header.cmd_status,
3618 			&((struct mfi_linux_ioc_packet*)arg)
3619 			->lioc_frame.hdr.cmd_status,
3620 			1);
3621 		if (error != 0) {
3622 			device_printf(sc->mfi_dev,
3623 				      "Copy out failed\n");
3624 			goto out;
3625 		}
3626 
3627 out:
3628 		mfi_config_unlock(sc, locked);
3629 		if (data)
3630 			free(data, M_MFIBUF);
3631 		if (cm) {
3632 			mtx_lock(&sc->mfi_io_lock);
3633 			mfi_release_command(cm);
3634 			mtx_unlock(&sc->mfi_io_lock);
3635 		}
3636 
3637 		return (error);
3638 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3639 		error = copyin(arg, &l_aen, sizeof(l_aen));
3640 		if (error != 0)
3641 			return (error);
3642 		printf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3643 		mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
3644 		    M_WAITOK);
3645 		mtx_lock(&sc->mfi_io_lock);
3646 		if (mfi_aen_entry != NULL) {
3647 			mfi_aen_entry->p = curproc;
3648 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3649 			    aen_link);
3650 		}
3651 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3652 		    l_aen.laen_class_locale);
3653 
3654 		if (error != 0) {
3655 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3656 			    aen_link);
3657 			free(mfi_aen_entry, M_MFIBUF);
3658 		}
3659 		mtx_unlock(&sc->mfi_io_lock);
3660 
3661 		return (error);
3662 	default:
3663 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3664 		error = ENOENT;
3665 		break;
3666 	}
3667 
3668 	return (error);
3669 }
3670 
3671 static int
3672 mfi_poll(struct cdev *dev, int poll_events, struct thread *td)
3673 {
3674 	struct mfi_softc *sc;
3675 	int revents = 0;
3676 
3677 	sc = dev->si_drv1;
3678 
3679 	if (poll_events & (POLLIN | POLLRDNORM)) {
3680 		if (sc->mfi_aen_triggered != 0) {
3681 			revents |= poll_events & (POLLIN | POLLRDNORM);
3682 			sc->mfi_aen_triggered = 0;
3683 		}
3684 		if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL) {
3685 			revents |= POLLERR;
3686 		}
3687 	}
3688 
3689 	if (revents == 0) {
3690 		if (poll_events & (POLLIN | POLLRDNORM)) {
3691 			sc->mfi_poll_waiting = 1;
3692 			selrecord(td, &sc->mfi_select);
3693 		}
3694 	}
3695 
3696 	return revents;
3697 }
3698 
3699 static void
3700 mfi_dump_all(void)
3701 {
3702 	struct mfi_softc *sc;
3703 	struct mfi_command *cm;
3704 	devclass_t dc;
3705 	time_t deadline;
3706 	int timedout;
3707 	int i;
3708 
3709 	dc = devclass_find("mfi");
3710 	if (dc == NULL) {
3711 		printf("No mfi dev class\n");
3712 		return;
3713 	}
3714 
3715 	for (i = 0; ; i++) {
3716 		sc = devclass_get_softc(dc, i);
3717 		if (sc == NULL)
3718 			break;
3719 		device_printf(sc->mfi_dev, "Dumping\n\n");
3720 		timedout = 0;
3721 		deadline = time_uptime - mfi_cmd_timeout;
3722 		mtx_lock(&sc->mfi_io_lock);
3723 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3724 			if (cm->cm_timestamp <= deadline) {
3725 				device_printf(sc->mfi_dev,
3726 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3727 				    cm, (int)(time_uptime - cm->cm_timestamp));
3728 				MFI_PRINT_CMD(cm);
3729 				timedout++;
3730 			}
3731 		}
3732 
3733 #if 0
3734 		if (timedout)
3735 			MFI_DUMP_CMDS(sc);
3736 #endif
3737 
3738 		mtx_unlock(&sc->mfi_io_lock);
3739 	}
3740 
3741 	return;
3742 }
3743 
3744 static void
3745 mfi_timeout(void *data)
3746 {
3747 	struct mfi_softc *sc = (struct mfi_softc *)data;
3748 	struct mfi_command *cm, *tmp;
3749 	time_t deadline;
3750 	int timedout = 0;
3751 
3752 	deadline = time_uptime - mfi_cmd_timeout;
3753 	if (sc->adpreset == 0) {
3754 		if (!mfi_tbolt_reset(sc)) {
3755 			callout_reset(&sc->mfi_watchdog_callout,
3756 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3757 			return;
3758 		}
3759 	}
3760 	mtx_lock(&sc->mfi_io_lock);
3761 	TAILQ_FOREACH_SAFE(cm, &sc->mfi_busy, cm_link, tmp) {
3762 		if (sc->mfi_aen_cm == cm || sc->mfi_map_sync_cm == cm)
3763 			continue;
3764 		if (cm->cm_timestamp <= deadline) {
3765 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3766 				cm->cm_timestamp = time_uptime;
3767 			} else {
3768 				device_printf(sc->mfi_dev,
3769 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3770 				     cm, (int)(time_uptime - cm->cm_timestamp)
3771 				     );
3772 				MFI_PRINT_CMD(cm);
3773 				MFI_VALIDATE_CMD(sc, cm);
3774 				/*
3775 				 * While commands can get stuck forever we do
3776 				 * not fail them as there is no way to tell if
3777 				 * the controller has actually processed them
3778 				 * or not.
3779 				 *
3780 				 * In addition its very likely that force
3781 				 * failing a command here would cause a panic
3782 				 * e.g. in UFS.
3783 				 */
3784 				timedout++;
3785 			}
3786 		}
3787 	}
3788 
3789 #if 0
3790 	if (timedout)
3791 		MFI_DUMP_CMDS(sc);
3792 #endif
3793 
3794 	mtx_unlock(&sc->mfi_io_lock);
3795 
3796 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3797 	    mfi_timeout, sc);
3798 
3799 	if (0)
3800 		mfi_dump_all();
3801 	return;
3802 }
3803