xref: /dragonfly/sys/dev/raid/mfi/mfi.c (revision e7d467f4)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  *
52  * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53  * FreeBSD projects/head_mfi/ r233016
54  */
55 
56 #include "opt_mfi.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/bus.h>
64 #include <sys/eventhandler.h>
65 #include <sys/rman.h>
66 #include <sys/bus_dma.h>
67 #include <sys/buf2.h>
68 #include <sys/uio.h>
69 #include <sys/proc.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
74 
75 #include <bus/cam/scsi/scsi_all.h>
76 
77 #include <bus/pci/pcivar.h>
78 
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
82 
83 static int	mfi_alloc_commands(struct mfi_softc *);
84 static int	mfi_comms_init(struct mfi_softc *);
85 static int	mfi_get_controller_info(struct mfi_softc *);
86 static int	mfi_get_log_state(struct mfi_softc *,
87 		    struct mfi_evt_log_state **);
88 static int	mfi_parse_entries(struct mfi_softc *, int, int);
89 static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 		    uint32_t, void **, size_t);
91 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void	mfi_startup(void *arg);
93 static void	mfi_intr(void *arg);
94 static void	mfi_ldprobe(struct mfi_softc *sc);
95 static void	mfi_syspdprobe(struct mfi_softc *sc);
96 static void	mfi_handle_evt(void *context, int pending);
97 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void	mfi_aen_complete(struct mfi_command *);
99 static int	mfi_add_ld(struct mfi_softc *sc, int);
100 static void	mfi_add_ld_complete(struct mfi_command *);
101 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void	mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void	mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void	mfi_timeout(void *);
111 static int	mfi_user_command(struct mfi_softc *,
112 		    struct mfi_ioc_passthru *);
113 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
120 		    uint32_t frame_cnt);
121 static void	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
122 		    uint32_t frame_cnt);
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
128 
129 static void	mfi_filter_detach(struct knote *);
130 static int	mfi_filter_read(struct knote *, long);
131 static int	mfi_filter_write(struct knote *, long);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137             0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142           0, "event message class");
143 
144 static int	mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
147 	   0, "Max commands");
148 
149 static int	mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153 
154 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RW, &mfi_cmd_timeout,
157 	   0, "Command timeout (in seconds)");
158 
159 /* Management interface */
160 static d_open_t		mfi_open;
161 static d_close_t	mfi_close;
162 static d_ioctl_t	mfi_ioctl;
163 static d_kqfilter_t	mfi_kqfilter;
164 
165 static struct dev_ops mfi_ops = {
166 	{ "mfi", 0, 0 },
167 	.d_open =	mfi_open,
168 	.d_close =	mfi_close,
169 	.d_ioctl =	mfi_ioctl,
170 	.d_kqfilter =	mfi_kqfilter,
171 };
172 
173 static struct filterops mfi_read_filterops =
174 	{ FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
175 static struct filterops mfi_write_filterops =
176 	{ FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
177 
178 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
179 
180 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
181 struct mfi_skinny_dma_info mfi_skinny;
182 
183 static void
184 mfi_enable_intr_xscale(struct mfi_softc *sc)
185 {
186 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
187 }
188 
189 static void
190 mfi_enable_intr_ppc(struct mfi_softc *sc)
191 {
192 	if (sc->mfi_flags & MFI_FLAGS_1078) {
193 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
195 	} else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
196 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 	} else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	} else {
201 		panic("unknown adapter type");
202 	}
203 }
204 
205 static int32_t
206 mfi_read_fw_status_xscale(struct mfi_softc *sc)
207 {
208 	return MFI_READ4(sc, MFI_OMSG0);
209 }
210 
211 static int32_t
212 mfi_read_fw_status_ppc(struct mfi_softc *sc)
213 {
214 	return MFI_READ4(sc, MFI_OSP0);
215 }
216 
217 static int
218 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
219 {
220 	int32_t status;
221 
222 	status = MFI_READ4(sc, MFI_OSTS);
223 	if ((status & MFI_OSTS_INTR_VALID) == 0)
224 		return 1;
225 
226 	MFI_WRITE4(sc, MFI_OSTS, status);
227 	return 0;
228 }
229 
230 static int
231 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
232 {
233 	int32_t status;
234 
235 	status = MFI_READ4(sc, MFI_OSTS);
236 	if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
237 	    ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
238 	    ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
239 		return 1;
240 
241 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 		MFI_WRITE4(sc, MFI_OSTS, status);
243 	else
244 		MFI_WRITE4(sc, MFI_ODCR0, status);
245 	return 0;
246 }
247 
248 static void
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
250 {
251 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
252 }
253 
254 static void
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256 {
257 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 		MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
259 		MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
260 	} else {
261 		MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
262 	}
263 }
264 
265 int
266 mfi_transition_firmware(struct mfi_softc *sc)
267 {
268 	uint32_t fw_state, cur_state;
269 	int max_wait, i;
270 	uint32_t cur_abs_reg_val = 0;
271 	uint32_t prev_abs_reg_val = 0;
272 
273 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 	while (fw_state != MFI_FWSTATE_READY) {
276 		if (bootverbose)
277 			device_printf(sc->mfi_dev, "Waiting for firmware to "
278 			"become ready\n");
279 		cur_state = fw_state;
280 		switch (fw_state) {
281 		case MFI_FWSTATE_FAULT:
282 			device_printf(sc->mfi_dev, "Firmware fault\n");
283 			return (ENXIO);
284 		case MFI_FWSTATE_WAIT_HANDSHAKE:
285 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
287 			else
288 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 			max_wait = MFI_RESET_WAIT_TIME;
290 			break;
291 		case MFI_FWSTATE_OPERATIONAL:
292 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
294 			else
295 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 			max_wait = MFI_RESET_WAIT_TIME;
297 			break;
298 		case MFI_FWSTATE_UNDEFINED:
299 		case MFI_FWSTATE_BB_INIT:
300 			max_wait = MFI_RESET_WAIT_TIME;
301 			break;
302 		case MFI_FWSTATE_FW_INIT_2:
303 			max_wait = MFI_RESET_WAIT_TIME;
304 			break;
305 		case MFI_FWSTATE_FW_INIT:
306 		case MFI_FWSTATE_FLUSH_CACHE:
307 			max_wait = MFI_RESET_WAIT_TIME;
308 			break;
309 		case MFI_FWSTATE_DEVICE_SCAN:
310 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 			prev_abs_reg_val = cur_abs_reg_val;
312 			break;
313 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
316 			else
317 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 			max_wait = MFI_RESET_WAIT_TIME;
319 			break;
320 		default:
321 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
322 			    fw_state);
323 			return (ENXIO);
324 		}
325 		for (i = 0; i < (max_wait * 10); i++) {
326 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 			if (fw_state == cur_state)
329 				DELAY(100000);
330 			else
331 				break;
332 		}
333 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 			/* Check the device scanning progress */
335 			if (prev_abs_reg_val != cur_abs_reg_val)
336 				continue;
337 		}
338 		if (fw_state == cur_state) {
339 			device_printf(sc->mfi_dev, "Firmware stuck in state "
340 			    "%#x\n", fw_state);
341 			return (ENXIO);
342 		}
343 	}
344 	return (0);
345 }
346 
347 static void
348 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
349 {
350 	bus_addr_t *addr;
351 
352 	addr = arg;
353 	*addr = segs[0].ds_addr;
354 }
355 
356 int
357 mfi_attach(struct mfi_softc *sc)
358 {
359 	uint32_t status;
360 	int error, commsz, framessz, sensesz;
361 	int frames, unit, max_fw_sge;
362 	uint32_t tb_mem_size = 0;
363 
364 	if (sc == NULL)
365 		return EINVAL;
366 
367 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
368 	    MEGASAS_VERSION);
369 
370 	lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
371 	lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
372 	TAILQ_INIT(&sc->mfi_ld_tqh);
373 	TAILQ_INIT(&sc->mfi_syspd_tqh);
374 	TAILQ_INIT(&sc->mfi_evt_queue);
375 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
376 	TAILQ_INIT(&sc->mfi_aen_pids);
377 	TAILQ_INIT(&sc->mfi_cam_ccbq);
378 
379 	mfi_initq_free(sc);
380 	mfi_initq_ready(sc);
381 	mfi_initq_busy(sc);
382 	mfi_initq_bio(sc);
383 
384 	sc->adpreset = 0;
385 	sc->last_seq_num = 0;
386 	sc->disableOnlineCtrlReset = 1;
387 	sc->issuepend_done = 1;
388 	sc->hw_crit_error = 0;
389 
390 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
391 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
392 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
393 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
394 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
395 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
396 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
397 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
398 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
399 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
400 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
401 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
402 		sc->mfi_tbolt = 1;
403 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
404 	} else {
405 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
406 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
407 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
408 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
409 	}
410 
411 
412 	/* Before we get too far, see if the firmware is working */
413 	if ((error = mfi_transition_firmware(sc)) != 0) {
414 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
415 		    "error %d\n", error);
416 		return (ENXIO);
417 	}
418 
419 	/* Start: LSIP200113393 */
420 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
421 				1, 0,			/* algnmnt, boundary */
422 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 				BUS_SPACE_MAXADDR,	/* highaddr */
424 				NULL, NULL,		/* filter, filterarg */
425 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
426 				1,			/* msegments */
427 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
428 				0,			/* flags */
429 				&sc->verbuf_h_dmat)) {
430 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
431 		return (ENOMEM);
432 	}
433 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
434 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
435 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
436 		return (ENOMEM);
437 	}
438 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
439 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
440 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
441 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
442 	/* End: LSIP200113393 */
443 
444 	/*
445 	 * Get information needed for sizing the contiguous memory for the
446 	 * frame pool.  Size down the sgl parameter since we know that
447 	 * we will never need more than what's required for MAXPHYS.
448 	 * It would be nice if these constants were available at runtime
449 	 * instead of compile time.
450 	 */
451 	status = sc->mfi_read_fw_status(sc);
452 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
453 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
454 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
455 
456 	/* ThunderBolt Support get the contiguous memory */
457 
458 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
459 		mfi_tbolt_init_globals(sc);
460 		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
461 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
462 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
463 
464 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
465 				1, 0,			/* algnmnt, boundary */
466 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
467 				BUS_SPACE_MAXADDR,	/* highaddr */
468 				NULL, NULL,		/* filter, filterarg */
469 				tb_mem_size,		/* maxsize */
470 				1,			/* msegments */
471 				tb_mem_size,		/* maxsegsize */
472 				0,			/* flags */
473 				&sc->mfi_tb_dmat)) {
474 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
475 			return (ENOMEM);
476 		}
477 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
478 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
479 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
480 			return (ENOMEM);
481 		}
482 		bzero(sc->request_message_pool, tb_mem_size);
483 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
484 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
485 
486 		/* For ThunderBolt memory init */
487 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
488 				0x100, 0,		/* alignmnt, boundary */
489 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 				BUS_SPACE_MAXADDR,	/* highaddr */
491 				NULL, NULL,		/* filter, filterarg */
492 				MFI_FRAME_SIZE,		/* maxsize */
493 				1,			/* msegments */
494 				MFI_FRAME_SIZE,		/* maxsegsize */
495 				0,			/* flags */
496 				&sc->mfi_tb_init_dmat)) {
497 		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
498 		return (ENOMEM);
499 		}
500 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
501 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
502 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
503 			return (ENOMEM);
504 		}
505 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
506 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
507 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
508 		    &sc->mfi_tb_init_busaddr, 0);
509 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
510 		    tb_mem_size)) {
511 			device_printf(sc->mfi_dev,
512 			    "Thunderbolt pool preparation error\n");
513 			return 0;
514 		}
515 
516 		/*
517 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
518 		  we are taking it diffrent from what we have allocated for Request
519 		  and reply descriptors to avoid confusion later
520 		*/
521 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
522 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
523 				1, 0,			/* algnmnt, boundary */
524 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
525 				BUS_SPACE_MAXADDR,	/* highaddr */
526 				NULL, NULL,		/* filter, filterarg */
527 				tb_mem_size,		/* maxsize */
528 				1,			/* msegments */
529 				tb_mem_size,		/* maxsegsize */
530 				0,			/* flags */
531 				&sc->mfi_tb_ioc_init_dmat)) {
532 			device_printf(sc->mfi_dev,
533 			    "Cannot allocate comms DMA tag\n");
534 			return (ENOMEM);
535 		}
536 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
537 		    (void **)&sc->mfi_tb_ioc_init_desc,
538 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
539 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
540 			return (ENOMEM);
541 		}
542 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
543 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
544 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
545 		    &sc->mfi_tb_ioc_init_busaddr, 0);
546 	}
547 	/*
548 	 * Create the dma tag for data buffers.  Used both for block I/O
549 	 * and for various internal data queries.
550 	 */
551 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
552 				1, 0,			/* algnmnt, boundary */
553 				BUS_SPACE_MAXADDR,	/* lowaddr */
554 				BUS_SPACE_MAXADDR,	/* highaddr */
555 				NULL, NULL,		/* filter, filterarg */
556 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
557 				sc->mfi_max_sge,	/* nsegments */
558 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
559 				BUS_DMA_ALLOCNOW,	/* flags */
560 				&sc->mfi_buffer_dmat)) {
561 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
562 		return (ENOMEM);
563 	}
564 
565 	/*
566 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
567 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
568 	 * entry, so the calculated size here will be will be 1 more than
569 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
570 	 */
571 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
572 	    sizeof(struct mfi_hwcomms);
573 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
574 				1, 0,			/* algnmnt, boundary */
575 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
576 				BUS_SPACE_MAXADDR,	/* highaddr */
577 				NULL, NULL,		/* filter, filterarg */
578 				commsz,			/* maxsize */
579 				1,			/* msegments */
580 				commsz,			/* maxsegsize */
581 				0,			/* flags */
582 				&sc->mfi_comms_dmat)) {
583 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
584 		return (ENOMEM);
585 	}
586 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
587 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
588 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
589 		return (ENOMEM);
590 	}
591 	bzero(sc->mfi_comms, commsz);
592 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
593 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
594 	/*
595 	 * Allocate DMA memory for the command frames.  Keep them in the
596 	 * lower 4GB for efficiency.  Calculate the size of the commands at
597 	 * the same time; each command is one 64 byte frame plus a set of
598          * additional frames for holding sg lists or other data.
599 	 * The assumption here is that the SG list will start at the second
600 	 * frame and not use the unused bytes in the first frame.  While this
601 	 * isn't technically correct, it simplifies the calculation and allows
602 	 * for command frames that might be larger than an mfi_io_frame.
603 	 */
604 	if (sizeof(bus_addr_t) == 8) {
605 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
606 		sc->mfi_flags |= MFI_FLAGS_SG64;
607 	} else {
608 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
609 	}
610 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
611 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
612 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
613 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
614 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
615 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
616 				64, 0,			/* algnmnt, boundary */
617 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
618 				BUS_SPACE_MAXADDR,	/* highaddr */
619 				NULL, NULL,		/* filter, filterarg */
620 				framessz,		/* maxsize */
621 				1,			/* nsegments */
622 				framessz,		/* maxsegsize */
623 				0,			/* flags */
624 				&sc->mfi_frames_dmat)) {
625 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
626 		return (ENOMEM);
627 	}
628 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
629 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
630 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
631 		return (ENOMEM);
632 	}
633 	bzero(sc->mfi_frames, framessz);
634 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
635 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
636 	/*
637 	 * Allocate DMA memory for the frame sense data.  Keep them in the
638 	 * lower 4GB for efficiency
639 	 */
640 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
641 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
642 				4, 0,			/* algnmnt, boundary */
643 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
644 				BUS_SPACE_MAXADDR,	/* highaddr */
645 				NULL, NULL,		/* filter, filterarg */
646 				sensesz,		/* maxsize */
647 				1,			/* nsegments */
648 				sensesz,		/* maxsegsize */
649 				0,			/* flags */
650 				&sc->mfi_sense_dmat)) {
651 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
652 		return (ENOMEM);
653 	}
654 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
655 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
656 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
657 		return (ENOMEM);
658 	}
659 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
660 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
661 	if ((error = mfi_alloc_commands(sc)) != 0)
662 		return (error);
663 
664 	/*
665 	 * Before moving the FW to operational state, check whether
666 	 * hostmemory is required by the FW or not
667 	 */
668 
669 	/* ThunderBolt MFI_IOC2 INIT */
670 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
671 		sc->mfi_disable_intr(sc);
672 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
673 			device_printf(sc->mfi_dev,
674 			    "TB Init has failed with error %d\n",error);
675 			return error;
676 		}
677 
678 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
679 			return error;
680 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
681 			mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
682 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
683 			return (EINVAL);
684 		}
685 		sc->mfi_enable_intr(sc);
686 		sc->map_id = 0;
687 	} else {
688 		if ((error = mfi_comms_init(sc)) != 0)
689 			return (error);
690 
691 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
692 			mfi_intr, sc, &sc->mfi_intr, NULL)) {
693 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
694 			return (EINVAL);
695 		}
696 		sc->mfi_enable_intr(sc);
697 	}
698 	if ((error = mfi_get_controller_info(sc)) != 0)
699 		return (error);
700 	sc->disableOnlineCtrlReset = 0;
701 
702 	/* Register a config hook to probe the bus for arrays */
703 	sc->mfi_ich.ich_func = mfi_startup;
704 	sc->mfi_ich.ich_arg = sc;
705 	sc->mfi_ich.ich_desc = "mfi";
706 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
707 		device_printf(sc->mfi_dev, "Cannot establish configuration "
708 		    "hook\n");
709 		return (EINVAL);
710 	}
711 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
712 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
713 		return (error);
714 	}
715 
716 	/*
717 	 * Register a shutdown handler.
718 	 */
719 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
720 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
721 		device_printf(sc->mfi_dev, "Warning: shutdown event "
722 		    "registration failed\n");
723 	}
724 
725 	/*
726 	 * Create the control device for doing management
727 	 */
728 	unit = device_get_unit(sc->mfi_dev);
729 	sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
730 	    0640, "mfi%d", unit);
731 	if (unit == 0)
732 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
733 	if (sc->mfi_cdev != NULL)
734 		sc->mfi_cdev->si_drv1 = sc;
735 	sysctl_ctx_init(&sc->mfi_sysctl_ctx);
736 	sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
737 	    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
738 	    device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
739 	if (sc->mfi_sysctl_tree == NULL) {
740 		device_printf(sc->mfi_dev, "can't add sysctl node\n");
741 		return (EINVAL);
742 	}
743 	SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
744 	    SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
745 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
746 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
747 	SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
748 	    SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
749 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
750 	    &sc->mfi_keep_deleted_volumes, 0,
751 	    "Don't detach the mfid device for a busy volume that is deleted");
752 
753 	device_add_child(sc->mfi_dev, "mfip", -1);
754 	bus_generic_attach(sc->mfi_dev);
755 
756 	/* Start the timeout watchdog */
757 	callout_init_mp(&sc->mfi_watchdog_callout);
758 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
759 	    mfi_timeout, sc);
760 
761 	return (0);
762 }
763 
764 static int
765 mfi_alloc_commands(struct mfi_softc *sc)
766 {
767 	struct mfi_command *cm;
768 	int i, ncmds;
769 
770 	/*
771 	 * XXX Should we allocate all the commands up front, or allocate on
772 	 * demand later like 'aac' does?
773 	 */
774 	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
775 	if (bootverbose)
776 		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
777 		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
778 
779 	sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
780 	    M_WAITOK | M_ZERO);
781 
782 	for (i = 0; i < ncmds; i++) {
783 		cm = &sc->mfi_commands[i];
784 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
785 		    sc->mfi_cmd_size * i);
786 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
787 		    sc->mfi_cmd_size * i;
788 		cm->cm_frame->header.context = i;
789 		cm->cm_sense = &sc->mfi_sense[i];
790 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
791 		cm->cm_sc = sc;
792 		cm->cm_index = i;
793 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
794 		    &cm->cm_dmamap) == 0) {
795 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
796 			mfi_release_command(cm);
797 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
798 		}
799 		else
800 			break;
801 		sc->mfi_total_cmds++;
802 	}
803 
804 	return (0);
805 }
806 
807 void
808 mfi_release_command(struct mfi_command *cm)
809 {
810 	struct mfi_frame_header *hdr;
811 	uint32_t *hdr_data;
812 
813 	mfi_lockassert(&cm->cm_sc->mfi_io_lock);
814 
815 	/*
816 	 * Zero out the important fields of the frame, but make sure the
817 	 * context field is preserved.  For efficiency, handle the fields
818 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
819 	 */
820 	hdr = &cm->cm_frame->header;
821 	if (cm->cm_data != NULL && hdr->sg_count) {
822 		cm->cm_sg->sg32[0].len = 0;
823 		cm->cm_sg->sg32[0].addr = 0;
824 	}
825 
826 	hdr_data = (uint32_t *)cm->cm_frame;
827 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
828 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
829 	hdr_data[4] = 0;	/* flags, timeout */
830 	hdr_data[5] = 0;	/* data_len */
831 
832 	cm->cm_extra_frames = 0;
833 	cm->cm_flags = 0;
834 	cm->cm_complete = NULL;
835 	cm->cm_private = NULL;
836 	cm->cm_data = NULL;
837 	cm->cm_sg = 0;
838 	cm->cm_total_frame_size = 0;
839 	cm->retry_for_fw_reset = 0;
840 
841 	mfi_enqueue_free(cm);
842 }
843 
844 static int
845 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
846     uint32_t opcode, void **bufp, size_t bufsize)
847 {
848 	struct mfi_command *cm;
849 	struct mfi_dcmd_frame *dcmd;
850 	void *buf = NULL;
851 	uint32_t context = 0;
852 
853 	mfi_lockassert(&sc->mfi_io_lock);
854 
855 	cm = mfi_dequeue_free(sc);
856 	if (cm == NULL)
857 		return (EBUSY);
858 
859 	/* Zero out the MFI frame */
860 	context = cm->cm_frame->header.context;
861 	bzero(cm->cm_frame, sizeof(union mfi_frame));
862 	cm->cm_frame->header.context = context;
863 
864 	if ((bufsize > 0) && (bufp != NULL)) {
865 		if (*bufp == NULL) {
866 			buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
867 			if (buf == NULL) {
868 				mfi_release_command(cm);
869 				return (ENOMEM);
870 			}
871 			*bufp = buf;
872 		} else {
873 			buf = *bufp;
874 		}
875 	}
876 
877 	dcmd =  &cm->cm_frame->dcmd;
878 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
879 	dcmd->header.cmd = MFI_CMD_DCMD;
880 	dcmd->header.timeout = 0;
881 	dcmd->header.flags = 0;
882 	dcmd->header.data_len = bufsize;
883 	dcmd->header.scsi_status = 0;
884 	dcmd->opcode = opcode;
885 	cm->cm_sg = &dcmd->sgl;
886 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
887 	cm->cm_flags = 0;
888 	cm->cm_data = buf;
889 	cm->cm_private = buf;
890 	cm->cm_len = bufsize;
891 
892 	*cmp = cm;
893 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
894 		*bufp = buf;
895 	return (0);
896 }
897 
898 static int
899 mfi_comms_init(struct mfi_softc *sc)
900 {
901 	struct mfi_command *cm;
902 	struct mfi_init_frame *init;
903 	struct mfi_init_qinfo *qinfo;
904 	int error;
905 	uint32_t context = 0;
906 
907 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
908 	if ((cm = mfi_dequeue_free(sc)) == NULL)
909 		return (EBUSY);
910 
911 	/* Zero out the MFI frame */
912 	context = cm->cm_frame->header.context;
913 	bzero(cm->cm_frame, sizeof(union mfi_frame));
914 	cm->cm_frame->header.context = context;
915 
916 	/*
917 	 * Abuse the SG list area of the frame to hold the init_qinfo
918 	 * object;
919 	 */
920 	init = &cm->cm_frame->init;
921 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
922 
923 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
924 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
925 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
926 	    offsetof(struct mfi_hwcomms, hw_reply_q);
927 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
928 	    offsetof(struct mfi_hwcomms, hw_pi);
929 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
930 	    offsetof(struct mfi_hwcomms, hw_ci);
931 
932 	init->header.cmd = MFI_CMD_INIT;
933 	init->header.data_len = sizeof(struct mfi_init_qinfo);
934 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
935 	cm->cm_data = NULL;
936 	cm->cm_flags = MFI_CMD_POLLED;
937 
938 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
939 		device_printf(sc->mfi_dev, "failed to send init command\n");
940 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
941 		return (error);
942 	}
943 	mfi_release_command(cm);
944 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
945 
946 	return (0);
947 }
948 
949 static int
950 mfi_get_controller_info(struct mfi_softc *sc)
951 {
952 	struct mfi_command *cm = NULL;
953 	struct mfi_ctrl_info *ci = NULL;
954 	uint32_t max_sectors_1, max_sectors_2;
955 	int error;
956 
957 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
958 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
959 	    (void **)&ci, sizeof(*ci));
960 	if (error)
961 		goto out;
962 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
963 
964 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
965 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
966 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
967 		    MFI_SECTOR_LEN;
968 		error = 0;
969 		goto out;
970 	}
971 
972 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
973 	    BUS_DMASYNC_POSTREAD);
974 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
975 
976 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
977 	max_sectors_2 = ci->max_request_size;
978 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
979 	sc->disableOnlineCtrlReset =
980 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
981 
982 out:
983 	if (ci)
984 		kfree(ci, M_MFIBUF);
985 	if (cm)
986 		mfi_release_command(cm);
987 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
988 	return (error);
989 }
990 
991 static int
992 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
993 {
994 	struct mfi_command *cm = NULL;
995 	int error;
996 
997 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
998 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
999 	    (void **)log_state, sizeof(**log_state));
1000 	if (error)
1001 		goto out;
1002 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1003 
1004 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1005 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1006 		goto out;
1007 	}
1008 
1009 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1010 	    BUS_DMASYNC_POSTREAD);
1011 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1012 
1013 out:
1014 	if (cm)
1015 		mfi_release_command(cm);
1016 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1017 
1018 	return (error);
1019 }
1020 
1021 int
1022 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1023 {
1024 	struct mfi_evt_log_state *log_state = NULL;
1025 	union mfi_evt class_locale;
1026 	int error = 0;
1027 	uint32_t seq;
1028 
1029 	class_locale.members.reserved = 0;
1030 	class_locale.members.locale = mfi_event_locale;
1031 	class_locale.members.evt_class  = mfi_event_class;
1032 
1033 	if (seq_start == 0) {
1034 		error = mfi_get_log_state(sc, &log_state);
1035 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1036 		if (error) {
1037 			if (log_state)
1038 				kfree(log_state, M_MFIBUF);
1039 			return (error);
1040 		}
1041 
1042 		/*
1043 		 * Walk through any events that fired since the last
1044 		 * shutdown.
1045 		 */
1046 		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1047 		    log_state->newest_seq_num);
1048 		seq = log_state->newest_seq_num;
1049 	} else
1050 		seq = seq_start;
1051 	mfi_aen_register(sc, seq, class_locale.word);
1052 	if (log_state != NULL)
1053 		kfree(log_state, M_MFIBUF);
1054 
1055 	return 0;
1056 }
1057 
1058 int
1059 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1060 {
1061 
1062 	mfi_lockassert(&sc->mfi_io_lock);
1063 	cm->cm_complete = NULL;
1064 
1065 
1066 	/*
1067 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1068 	 * and return 0 to it as status
1069 	 */
1070 	if (cm->cm_frame->dcmd.opcode == 0) {
1071 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1072 		cm->cm_error = 0;
1073 		return (cm->cm_error);
1074 	}
1075 	mfi_enqueue_ready(cm);
1076 	mfi_startio(sc);
1077 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1078 		lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1079 	return (cm->cm_error);
1080 }
1081 
1082 void
1083 mfi_free(struct mfi_softc *sc)
1084 {
1085 	struct mfi_command *cm;
1086 	int i;
1087 
1088 	callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
1089 
1090 	if (sc->mfi_cdev != NULL)
1091 		destroy_dev(sc->mfi_cdev);
1092 	dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1093 
1094 	if (sc->mfi_total_cmds != 0) {
1095 		for (i = 0; i < sc->mfi_total_cmds; i++) {
1096 			cm = &sc->mfi_commands[i];
1097 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1098 		}
1099 		kfree(sc->mfi_commands, M_MFIBUF);
1100 	}
1101 
1102 	if (sc->mfi_intr)
1103 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1104 	if (sc->mfi_irq != NULL)
1105 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1106 		    sc->mfi_irq);
1107 
1108 	if (sc->mfi_sense_busaddr != 0)
1109 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1110 	if (sc->mfi_sense != NULL)
1111 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1112 		    sc->mfi_sense_dmamap);
1113 	if (sc->mfi_sense_dmat != NULL)
1114 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1115 
1116 	if (sc->mfi_frames_busaddr != 0)
1117 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1118 	if (sc->mfi_frames != NULL)
1119 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1120 		    sc->mfi_frames_dmamap);
1121 	if (sc->mfi_frames_dmat != NULL)
1122 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1123 
1124 	if (sc->mfi_comms_busaddr != 0)
1125 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1126 	if (sc->mfi_comms != NULL)
1127 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1128 		    sc->mfi_comms_dmamap);
1129 	if (sc->mfi_comms_dmat != NULL)
1130 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1131 
1132 	/* ThunderBolt contiguous memory free here */
1133 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1134 		if (sc->mfi_tb_busaddr != 0)
1135 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1136 		if (sc->request_message_pool != NULL)
1137 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1138 			    sc->mfi_tb_dmamap);
1139 		if (sc->mfi_tb_dmat != NULL)
1140 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1141 
1142 		/* Version buffer memory free */
1143 		/* Start LSIP200113393 */
1144 		if (sc->verbuf_h_busaddr != 0)
1145 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1146 		if (sc->verbuf != NULL)
1147 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1148 			    sc->verbuf_h_dmamap);
1149 		if (sc->verbuf_h_dmat != NULL)
1150 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1151 
1152 		/* End LSIP200113393 */
1153 		/* ThunderBolt INIT packet memory Free */
1154 		if (sc->mfi_tb_init_busaddr != 0)
1155 			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1156 		if (sc->mfi_tb_init != NULL)
1157 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1158 			    sc->mfi_tb_init_dmamap);
1159 		if (sc->mfi_tb_init_dmat != NULL)
1160 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1161 
1162 		/* ThunderBolt IOC Init Desc memory free here */
1163 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1164 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1165 			    sc->mfi_tb_ioc_init_dmamap);
1166 		if (sc->mfi_tb_ioc_init_desc != NULL)
1167 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1168 			    sc->mfi_tb_ioc_init_desc,
1169 			    sc->mfi_tb_ioc_init_dmamap);
1170 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1171 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1172 		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1173 			if (sc->mfi_cmd_pool_tbolt != NULL) {
1174 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1175 					kfree(sc->mfi_cmd_pool_tbolt[i],
1176 					    M_MFIBUF);
1177 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1178 				}
1179 			}
1180 		}
1181 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1182 			kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1183 			sc->mfi_cmd_pool_tbolt = NULL;
1184 		}
1185 		if (sc->request_desc_pool != NULL) {
1186 			kfree(sc->request_desc_pool, M_MFIBUF);
1187 			sc->request_desc_pool = NULL;
1188 		}
1189 	}
1190 	if (sc->mfi_buffer_dmat != NULL)
1191 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1192 	if (sc->mfi_parent_dmat != NULL)
1193 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1194 
1195 	if (sc->mfi_sysctl_tree != NULL)
1196 		sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1197 
1198 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1199 	if (mtx_initialized(&sc->mfi_io_lock))
1200 #endif
1201 	{
1202 	lockuninit(&sc->mfi_io_lock);
1203 	lockuninit(&sc->mfi_config_lock);
1204 	}
1205 
1206 	return;
1207 }
1208 
1209 static void
1210 mfi_startup(void *arg)
1211 {
1212 	struct mfi_softc *sc;
1213 
1214 	sc = (struct mfi_softc *)arg;
1215 
1216 	config_intrhook_disestablish(&sc->mfi_ich);
1217 
1218 	sc->mfi_enable_intr(sc);
1219 	lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1220 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1221 	mfi_ldprobe(sc);
1222 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1223 		mfi_syspdprobe(sc);
1224 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1225 	lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1226 }
1227 
1228 static void
1229 mfi_intr(void *arg)
1230 {
1231 	struct mfi_softc *sc;
1232 	struct mfi_command *cm;
1233 	uint32_t pi, ci, context;
1234 
1235 	sc = (struct mfi_softc *)arg;
1236 
1237 	if (sc->mfi_check_clear_intr(sc))
1238 		return;
1239 
1240 restart:
1241 	pi = sc->mfi_comms->hw_pi;
1242 	ci = sc->mfi_comms->hw_ci;
1243 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1244 	while (ci != pi) {
1245 		context = sc->mfi_comms->hw_reply_q[ci];
1246 		if (context < sc->mfi_max_fw_cmds) {
1247 			cm = &sc->mfi_commands[context];
1248 			mfi_remove_busy(cm);
1249 			cm->cm_error = 0;
1250 			mfi_complete(sc, cm);
1251 		}
1252 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1253 			ci = 0;
1254 		}
1255 	}
1256 
1257 	sc->mfi_comms->hw_ci = ci;
1258 
1259 	/* Give defered I/O a chance to run */
1260 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1261 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1262 	mfi_startio(sc);
1263 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1264 
1265 	/*
1266 	 * Dummy read to flush the bus; this ensures that the indexes are up
1267 	 * to date.  Restart processing if more commands have come it.
1268 	 */
1269 	(void)sc->mfi_read_fw_status(sc);
1270 	if (pi != sc->mfi_comms->hw_pi)
1271 		goto restart;
1272 
1273 	return;
1274 }
1275 
1276 int
1277 mfi_shutdown(struct mfi_softc *sc)
1278 {
1279 	struct mfi_dcmd_frame *dcmd;
1280 	struct mfi_command *cm;
1281 	int error;
1282 
1283 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1284 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1285 	if (error) {
1286 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1287 		return (error);
1288 	}
1289 
1290 	if (sc->mfi_aen_cm != NULL)
1291 		mfi_abort(sc, sc->mfi_aen_cm);
1292 
1293 	if (sc->map_update_cmd != NULL)
1294 		mfi_abort(sc, sc->map_update_cmd);
1295 
1296 	dcmd = &cm->cm_frame->dcmd;
1297 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1298 	cm->cm_flags = MFI_CMD_POLLED;
1299 	cm->cm_data = NULL;
1300 
1301 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1302 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1303 	}
1304 
1305 	mfi_release_command(cm);
1306 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1307 	return (error);
1308 }
1309 
1310 static void
1311 mfi_syspdprobe(struct mfi_softc *sc)
1312 {
1313 	struct mfi_frame_header *hdr;
1314 	struct mfi_command *cm = NULL;
1315 	struct mfi_pd_list *pdlist = NULL;
1316 	struct mfi_system_pd *syspd, *tmp;
1317 	int error, i, found;
1318 
1319 	mfi_lockassert(&sc->mfi_config_lock);
1320 	mfi_lockassert(&sc->mfi_io_lock);
1321 	/* Add SYSTEM PD's */
1322 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1323 	    (void **)&pdlist, sizeof(*pdlist));
1324 	if (error) {
1325 		device_printf(sc->mfi_dev,
1326 		    "Error while forming SYSTEM PD list\n");
1327 		goto out;
1328 	}
1329 
1330 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1331 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1332 	cm->cm_frame->dcmd.mbox[1] = 0;
1333 	if (mfi_mapcmd(sc, cm) != 0) {
1334 		device_printf(sc->mfi_dev,
1335 		    "Failed to get syspd device listing\n");
1336 		goto out;
1337 	}
1338 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1339 	    BUS_DMASYNC_POSTREAD);
1340 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1341 	hdr = &cm->cm_frame->header;
1342 	if (hdr->cmd_status != MFI_STAT_OK) {
1343 		device_printf(sc->mfi_dev,
1344 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1345 		goto out;
1346 	}
1347 	/* Get each PD and add it to the system */
1348 	for (i = 0; i < pdlist->count; i++) {
1349 		if (pdlist->addr[i].device_id ==
1350 		    pdlist->addr[i].encl_device_id)
1351 			continue;
1352 		found = 0;
1353 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1354 			if (syspd->pd_id == pdlist->addr[i].device_id)
1355 				found = 1;
1356 		}
1357 		if (found == 0)
1358 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1359 	}
1360 	/* Delete SYSPD's whose state has been changed */
1361 	TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1362 		found = 0;
1363 		for (i = 0; i < pdlist->count; i++) {
1364 			if (syspd->pd_id == pdlist->addr[i].device_id)
1365 				found = 1;
1366 		}
1367 		if (found == 0) {
1368 			kprintf("DELETE\n");
1369 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1370 			get_mplock();
1371 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1372 			rel_mplock();
1373 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1374 		}
1375 	}
1376 out:
1377 	if (pdlist)
1378 		kfree(pdlist, M_MFIBUF);
1379 	if (cm)
1380 		mfi_release_command(cm);
1381 }
1382 
1383 static void
1384 mfi_ldprobe(struct mfi_softc *sc)
1385 {
1386 	struct mfi_frame_header *hdr;
1387 	struct mfi_command *cm = NULL;
1388 	struct mfi_ld_list *list = NULL;
1389 	struct mfi_disk *ld;
1390 	int error, i;
1391 
1392 	mfi_lockassert(&sc->mfi_config_lock);
1393 	mfi_lockassert(&sc->mfi_io_lock);
1394 
1395 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1396 	    (void **)&list, sizeof(*list));
1397 	if (error)
1398 		goto out;
1399 
1400 	cm->cm_flags = MFI_CMD_DATAIN;
1401 	if (mfi_wait_command(sc, cm) != 0) {
1402 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1403 		goto out;
1404 	}
1405 
1406 	hdr = &cm->cm_frame->header;
1407 	if (hdr->cmd_status != MFI_STAT_OK) {
1408 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1409 		    hdr->cmd_status);
1410 		goto out;
1411 	}
1412 
1413 	for (i = 0; i < list->ld_count; i++) {
1414 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1415 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1416 				goto skip_add;
1417 		}
1418 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1419 	skip_add:;
1420 	}
1421 out:
1422 	if (list)
1423 		kfree(list, M_MFIBUF);
1424 	if (cm)
1425 		mfi_release_command(cm);
1426 
1427 	return;
1428 }
1429 
1430 /*
1431  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1432  * the bits in 24-31 are all set, then it is the number of seconds since
1433  * boot.
1434  */
1435 static const char *
1436 format_timestamp(uint32_t timestamp)
1437 {
1438 	static char buffer[32];
1439 
1440 	if ((timestamp & 0xff000000) == 0xff000000)
1441 		ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1442 		    0x00ffffff);
1443 	else
1444 		ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1445 	return (buffer);
1446 }
1447 
1448 static const char *
1449 format_class(int8_t class)
1450 {
1451 	static char buffer[6];
1452 
1453 	switch (class) {
1454 	case MFI_EVT_CLASS_DEBUG:
1455 		return ("debug");
1456 	case MFI_EVT_CLASS_PROGRESS:
1457 		return ("progress");
1458 	case MFI_EVT_CLASS_INFO:
1459 		return ("info");
1460 	case MFI_EVT_CLASS_WARNING:
1461 		return ("WARN");
1462 	case MFI_EVT_CLASS_CRITICAL:
1463 		return ("CRIT");
1464 	case MFI_EVT_CLASS_FATAL:
1465 		return ("FATAL");
1466 	case MFI_EVT_CLASS_DEAD:
1467 		return ("DEAD");
1468 	default:
1469 		ksnprintf(buffer, sizeof(buffer), "%d", class);
1470 		return (buffer);
1471 	}
1472 }
1473 
1474 static void
1475 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1476 {
1477 	struct mfi_system_pd *syspd = NULL;
1478 
1479 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1480 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1481 	    format_class(detail->evt_class.members.evt_class),
1482 	    detail->description);
1483 
1484 	/* Don't act on old AEN's or while shutting down */
1485 	if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1486 		return;
1487 
1488 	switch (detail->arg_type) {
1489 	case MR_EVT_ARGS_NONE:
1490 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1491 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1492 			if (mfi_detect_jbod_change) {
1493 				/*
1494 				 * Probe for new SYSPD's and Delete
1495 				 * invalid SYSPD's
1496 				 */
1497 				lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1498 				lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1499 				mfi_syspdprobe(sc);
1500 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1501 				lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1502 			}
1503 		}
1504 		break;
1505 	case MR_EVT_ARGS_LD_STATE:
1506 		/*
1507 		 * During load time driver reads all the events starting
1508 		 * from the one that has been logged after shutdown. Avoid
1509 		 * these old events.
1510 		 */
1511 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1512 			/* Remove the LD */
1513 			struct mfi_disk *ld;
1514 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1515 				if (ld->ld_id ==
1516 				    detail->args.ld_state.ld.target_id)
1517 					break;
1518 			}
1519 			/*
1520 			Fix: for kernel panics when SSCD is removed
1521 			KASSERT(ld != NULL, ("volume dissappeared"));
1522 			*/
1523 			if (ld != NULL) {
1524 				get_mplock();
1525 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1526 				rel_mplock();
1527 			}
1528 		}
1529 		break;
1530 	case MR_EVT_ARGS_PD:
1531 		if (detail->code == MR_EVT_PD_REMOVED) {
1532 			if (mfi_detect_jbod_change) {
1533 				/*
1534 				 * If the removed device is a SYSPD then
1535 				 * delete it
1536 				 */
1537 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1538 				    pd_link) {
1539 					if (syspd->pd_id ==
1540 					    detail->args.pd.device_id) {
1541 						get_mplock();
1542 						device_delete_child(
1543 						    sc->mfi_dev,
1544 						    syspd->pd_dev);
1545 						rel_mplock();
1546 						break;
1547 					}
1548 				}
1549 			}
1550 		}
1551 		if (detail->code == MR_EVT_PD_INSERTED) {
1552 			if (mfi_detect_jbod_change) {
1553 				/* Probe for new SYSPD's */
1554 				lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1555 				lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1556 				mfi_syspdprobe(sc);
1557 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1558 				lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1559 			}
1560 		}
1561 		break;
1562 	}
1563 }
1564 
1565 static void
1566 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1567 {
1568 	struct mfi_evt_queue_elm *elm;
1569 
1570 	mfi_lockassert(&sc->mfi_io_lock);
1571 	elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1572 	if (elm == NULL)
1573 		return;
1574 	memcpy(&elm->detail, detail, sizeof(*detail));
1575 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1576 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1577 }
1578 
1579 static void
1580 mfi_handle_evt(void *context, int pending)
1581 {
1582 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1583 	struct mfi_softc *sc;
1584 	struct mfi_evt_queue_elm *elm;
1585 
1586 	sc = context;
1587 	TAILQ_INIT(&queue);
1588 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1589 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1590 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1591 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1592 		TAILQ_REMOVE(&queue, elm, link);
1593 		mfi_decode_evt(sc, &elm->detail);
1594 		kfree(elm, M_MFIBUF);
1595 	}
1596 }
1597 
1598 static int
1599 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1600 {
1601 	struct mfi_command *cm;
1602 	struct mfi_dcmd_frame *dcmd;
1603 	union mfi_evt current_aen, prior_aen;
1604 	struct mfi_evt_detail *ed = NULL;
1605 	int error = 0;
1606 
1607 	current_aen.word = locale;
1608 	if (sc->mfi_aen_cm != NULL) {
1609 		prior_aen.word =
1610 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1611 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1612 		    !((prior_aen.members.locale & current_aen.members.locale)
1613 		    ^current_aen.members.locale)) {
1614 			return (0);
1615 		} else {
1616 			prior_aen.members.locale |= current_aen.members.locale;
1617 			if (prior_aen.members.evt_class
1618 			    < current_aen.members.evt_class)
1619 				current_aen.members.evt_class =
1620 				    prior_aen.members.evt_class;
1621 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1622 			mfi_abort(sc, sc->mfi_aen_cm);
1623 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1624 		}
1625 	}
1626 
1627 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1628 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1629 	    (void **)&ed, sizeof(*ed));
1630 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1631 	if (error) {
1632 		goto out;
1633 	}
1634 
1635 	dcmd = &cm->cm_frame->dcmd;
1636 	((uint32_t *)&dcmd->mbox)[0] = seq;
1637 	((uint32_t *)&dcmd->mbox)[1] = locale;
1638 	cm->cm_flags = MFI_CMD_DATAIN;
1639 	cm->cm_complete = mfi_aen_complete;
1640 
1641 	sc->last_seq_num = seq;
1642 	sc->mfi_aen_cm = cm;
1643 
1644 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1645 	mfi_enqueue_ready(cm);
1646 	mfi_startio(sc);
1647 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1648 
1649 out:
1650 	return (error);
1651 }
1652 
1653 static void
1654 mfi_aen_complete(struct mfi_command *cm)
1655 {
1656 	struct mfi_frame_header *hdr;
1657 	struct mfi_softc *sc;
1658 	struct mfi_evt_detail *detail;
1659 	struct mfi_aen *mfi_aen_entry, *tmp;
1660 	int seq = 0, aborted = 0;
1661 
1662 	sc = cm->cm_sc;
1663 	mfi_lockassert(&sc->mfi_io_lock);
1664 
1665 	hdr = &cm->cm_frame->header;
1666 
1667 	if (sc->mfi_aen_cm == NULL)
1668 		return;
1669 
1670 	if (sc->mfi_aen_cm->cm_aen_abort ||
1671 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1672 		sc->mfi_aen_cm->cm_aen_abort = 0;
1673 		aborted = 1;
1674 	} else {
1675 		sc->mfi_aen_triggered = 1;
1676 		if (sc->mfi_poll_waiting) {
1677 			sc->mfi_poll_waiting = 0;
1678 			KNOTE(&sc->mfi_kq.ki_note, 0);
1679 		}
1680 		detail = cm->cm_data;
1681 		mfi_queue_evt(sc, detail);
1682 		seq = detail->seq + 1;
1683 		TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1684 		    aen_link, tmp) {
1685 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1686 			    aen_link);
1687 			lwkt_gettoken(&proc_token);
1688 			ksignal(mfi_aen_entry->p, SIGIO);
1689 			lwkt_reltoken(&proc_token);
1690 			kfree(mfi_aen_entry, M_MFIBUF);
1691 		}
1692 	}
1693 
1694 	kfree(cm->cm_data, M_MFIBUF);
1695 	sc->mfi_aen_cm = NULL;
1696 	wakeup(&sc->mfi_aen_cm);
1697 	mfi_release_command(cm);
1698 
1699 	/* set it up again so the driver can catch more events */
1700 	if (!aborted) {
1701 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1702 		mfi_aen_setup(sc, seq);
1703 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1704 	}
1705 }
1706 
1707 #define MAX_EVENTS 15
1708 
1709 static int
1710 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1711 {
1712 	struct mfi_command *cm;
1713 	struct mfi_dcmd_frame *dcmd;
1714 	struct mfi_evt_list *el;
1715 	union mfi_evt class_locale;
1716 	int error, i, seq, size;
1717 
1718 	class_locale.members.reserved = 0;
1719 	class_locale.members.locale = mfi_event_locale;
1720 	class_locale.members.evt_class  = mfi_event_class;
1721 
1722 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1723 		* (MAX_EVENTS - 1);
1724 	el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1725 	if (el == NULL)
1726 		return (ENOMEM);
1727 
1728 	for (seq = start_seq;;) {
1729 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1730 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1731 			kfree(el, M_MFIBUF);
1732 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1733 			return (EBUSY);
1734 		}
1735 
1736 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1737 
1738 		dcmd = &cm->cm_frame->dcmd;
1739 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1740 		dcmd->header.cmd = MFI_CMD_DCMD;
1741 		dcmd->header.timeout = 0;
1742 		dcmd->header.data_len = size;
1743 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1744 		((uint32_t *)&dcmd->mbox)[0] = seq;
1745 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1746 		cm->cm_sg = &dcmd->sgl;
1747 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1748 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1749 		cm->cm_data = el;
1750 		cm->cm_len = size;
1751 
1752 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1753 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1754 			device_printf(sc->mfi_dev,
1755 			    "Failed to get controller entries\n");
1756 			mfi_release_command(cm);
1757 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1758 			break;
1759 		}
1760 
1761 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1762 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1763 		    BUS_DMASYNC_POSTREAD);
1764 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1765 
1766 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1767 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1768 			mfi_release_command(cm);
1769 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1770 			break;
1771 		}
1772 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1773 			device_printf(sc->mfi_dev,
1774 			    "Error %d fetching controller entries\n",
1775 			    dcmd->header.cmd_status);
1776 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1777 			mfi_release_command(cm);
1778 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1779 			break;
1780 		}
1781 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1782 		mfi_release_command(cm);
1783 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1784 
1785 		for (i = 0; i < el->count; i++) {
1786 			/*
1787 			 * If this event is newer than 'stop_seq' then
1788 			 * break out of the loop.  Note that the log
1789 			 * is a circular buffer so we have to handle
1790 			 * the case that our stop point is earlier in
1791 			 * the buffer than our start point.
1792 			 */
1793 			if (el->event[i].seq >= stop_seq) {
1794 				if (start_seq <= stop_seq)
1795 					break;
1796 				else if (el->event[i].seq < start_seq)
1797 					break;
1798 			}
1799 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1800 			mfi_queue_evt(sc, &el->event[i]);
1801 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1802 		}
1803 		seq = el->event[el->count - 1].seq + 1;
1804 	}
1805 
1806 	kfree(el, M_MFIBUF);
1807 	return (0);
1808 }
1809 
1810 static int
1811 mfi_add_ld(struct mfi_softc *sc, int id)
1812 {
1813 	struct mfi_command *cm;
1814 	struct mfi_dcmd_frame *dcmd = NULL;
1815 	struct mfi_ld_info *ld_info = NULL;
1816 	int error;
1817 
1818 	mfi_lockassert(&sc->mfi_io_lock);
1819 
1820 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1821 	    (void **)&ld_info, sizeof(*ld_info));
1822 	if (error) {
1823 		device_printf(sc->mfi_dev,
1824 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1825 		if (ld_info)
1826 			kfree(ld_info, M_MFIBUF);
1827 		return (error);
1828 	}
1829 	cm->cm_flags = MFI_CMD_DATAIN;
1830 	dcmd = &cm->cm_frame->dcmd;
1831 	dcmd->mbox[0] = id;
1832 	if (mfi_wait_command(sc, cm) != 0) {
1833 		device_printf(sc->mfi_dev,
1834 		    "Failed to get logical drive: %d\n", id);
1835 		kfree(ld_info, M_MFIBUF);
1836 		return (0);
1837 	}
1838 	if (ld_info->ld_config.params.isSSCD != 1) {
1839 		mfi_add_ld_complete(cm);
1840 	} else {
1841 		mfi_release_command(cm);
1842 		if (ld_info)		/* SSCD drives ld_info free here */
1843 			kfree(ld_info, M_MFIBUF);
1844 	}
1845 	return (0);
1846 }
1847 
1848 static void
1849 mfi_add_ld_complete(struct mfi_command *cm)
1850 {
1851 	struct mfi_frame_header *hdr;
1852 	struct mfi_ld_info *ld_info;
1853 	struct mfi_softc *sc;
1854 	device_t child;
1855 
1856 	sc = cm->cm_sc;
1857 	hdr = &cm->cm_frame->header;
1858 	ld_info = cm->cm_private;
1859 
1860 	if (hdr->cmd_status != MFI_STAT_OK) {
1861 		kfree(ld_info, M_MFIBUF);
1862 		mfi_release_command(cm);
1863 		return;
1864 	}
1865 	mfi_release_command(cm);
1866 
1867 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1868 	get_mplock();
1869 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1870 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1871 		kfree(ld_info, M_MFIBUF);
1872 		rel_mplock();
1873 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1874 		return;
1875 	}
1876 
1877 	device_set_ivars(child, ld_info);
1878 	device_set_desc(child, "MFI Logical Disk");
1879 	bus_generic_attach(sc->mfi_dev);
1880 	rel_mplock();
1881 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1882 }
1883 
1884 static int
1885 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1886 {
1887 	struct mfi_command *cm;
1888 	struct mfi_dcmd_frame *dcmd = NULL;
1889 	struct mfi_pd_info *pd_info = NULL;
1890 	int error;
1891 
1892 	mfi_lockassert(&sc->mfi_io_lock);
1893 
1894 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1895 	    (void **)&pd_info, sizeof(*pd_info));
1896 	if (error) {
1897 		device_printf(sc->mfi_dev,
1898 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1899 		    error);
1900 		if (pd_info)
1901 			kfree(pd_info, M_MFIBUF);
1902 		return (error);
1903 	}
1904 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1905 	dcmd = &cm->cm_frame->dcmd;
1906 	dcmd->mbox[0] = id;
1907 	dcmd->header.scsi_status = 0;
1908 	dcmd->header.pad0 = 0;
1909 	if (mfi_mapcmd(sc, cm) != 0) {
1910 		device_printf(sc->mfi_dev,
1911 		    "Failed to get physical drive info %d\n", id);
1912 		kfree(pd_info, M_MFIBUF);
1913 		return (0);
1914 	}
1915 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1916 	    BUS_DMASYNC_POSTREAD);
1917 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1918 	mfi_add_sys_pd_complete(cm);
1919 	return (0);
1920 }
1921 
1922 static void
1923 mfi_add_sys_pd_complete(struct mfi_command *cm)
1924 {
1925 	struct mfi_frame_header *hdr;
1926 	struct mfi_pd_info *pd_info;
1927 	struct mfi_softc *sc;
1928 	device_t child;
1929 
1930 	sc = cm->cm_sc;
1931 	hdr = &cm->cm_frame->header;
1932 	pd_info = cm->cm_private;
1933 
1934 	if (hdr->cmd_status != MFI_STAT_OK) {
1935 		kfree(pd_info, M_MFIBUF);
1936 		mfi_release_command(cm);
1937 		return;
1938 	}
1939 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1940 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1941 		    pd_info->ref.v.device_id);
1942 		kfree(pd_info, M_MFIBUF);
1943 		mfi_release_command(cm);
1944 		return;
1945 	}
1946 	mfi_release_command(cm);
1947 
1948 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1949 	get_mplock();
1950 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1951 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1952 		kfree(pd_info, M_MFIBUF);
1953 		rel_mplock();
1954 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1955 		return;
1956 	}
1957 
1958 	device_set_ivars(child, pd_info);
1959 	device_set_desc(child, "MFI System PD");
1960 	bus_generic_attach(sc->mfi_dev);
1961 	rel_mplock();
1962 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1963 }
1964 
1965 static struct mfi_command *
1966 mfi_bio_command(struct mfi_softc *sc)
1967 {
1968 	struct bio *bio;
1969 	struct mfi_command *cm = NULL;
1970 	struct mfi_disk *mfid;
1971 
1972 	/* reserving two commands to avoid starvation for IOCTL */
1973 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1974 		return (NULL);
1975 	if ((bio = mfi_dequeue_bio(sc)) == NULL)
1976 		return (NULL);
1977 	mfid = bio->bio_driver_info;
1978 	if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1979 		cm = mfi_build_syspdio(sc, bio);
1980 	else
1981 		cm = mfi_build_ldio(sc, bio);
1982 	if (!cm)
1983 		mfi_enqueue_bio(sc, bio);
1984 	return cm;
1985 }
1986 
1987 static struct mfi_command *
1988 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1989 {
1990 	struct mfi_command *cm;
1991 	struct buf *bp;
1992 	struct mfi_system_pd *disk;
1993 	struct mfi_pass_frame *pass;
1994 	int flags = 0, blkcount = 0;
1995 	uint32_t context = 0;
1996 
1997 	if ((cm = mfi_dequeue_free(sc)) == NULL)
1998 		return (NULL);
1999 
2000 	/* Zero out the MFI frame */
2001 	context = cm->cm_frame->header.context;
2002 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2003 	cm->cm_frame->header.context = context;
2004 	bp = bio->bio_buf;
2005 	pass = &cm->cm_frame->pass;
2006 	bzero(pass->cdb, 16);
2007 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2008 	switch (bp->b_cmd & 0x03) {
2009 	case BUF_CMD_READ:
2010 		pass->cdb[0] = READ_10;
2011 		flags = MFI_CMD_DATAIN;
2012 		break;
2013 	case BUF_CMD_WRITE:
2014 		pass->cdb[0] = WRITE_10;
2015 		flags = MFI_CMD_DATAOUT;
2016 		break;
2017 	default:
2018 		panic("Invalid bio command");
2019 	}
2020 
2021 	/* Cheat with the sector length to avoid a non-constant division */
2022 	blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2023 	disk = bio->bio_driver_info;
2024 	/* Fill the LBA and Transfer length in CDB */
2025 	pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2026 	pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2027 	pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2028 	pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2029 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2030 	pass->cdb[8] = (blkcount & 0x00ff);
2031 	pass->header.target_id = disk->pd_id;
2032 	pass->header.timeout = 0;
2033 	pass->header.flags = 0;
2034 	pass->header.scsi_status = 0;
2035 	pass->header.sense_len = MFI_SENSE_LEN;
2036 	pass->header.data_len = bp->b_bcount;
2037 	pass->header.cdb_len = 10;
2038 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2039 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2040 	cm->cm_complete = mfi_bio_complete;
2041 	cm->cm_private = bio;
2042 	cm->cm_data = bp->b_data;
2043 	cm->cm_len = bp->b_bcount;
2044 	cm->cm_sg = &pass->sgl;
2045 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2046 	cm->cm_flags = flags;
2047 	return (cm);
2048 }
2049 
2050 static struct mfi_command *
2051 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2052 {
2053 	struct mfi_io_frame *io;
2054 	struct buf *bp;
2055 	struct mfi_disk *disk;
2056 	struct mfi_command *cm;
2057 	int flags, blkcount;
2058 	uint32_t context = 0;
2059 
2060 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2061 	    return (NULL);
2062 
2063 	/* Zero out the MFI frame */
2064 	context = cm->cm_frame->header.context;
2065 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2066 	cm->cm_frame->header.context = context;
2067 	bp = bio->bio_buf;
2068 	io = &cm->cm_frame->io;
2069 	switch (bp->b_cmd & 0x03) {
2070 	case BUF_CMD_READ:
2071 		io->header.cmd = MFI_CMD_LD_READ;
2072 		flags = MFI_CMD_DATAIN;
2073 		break;
2074 	case BUF_CMD_WRITE:
2075 		io->header.cmd = MFI_CMD_LD_WRITE;
2076 		flags = MFI_CMD_DATAOUT;
2077 		break;
2078 	default:
2079 		panic("Invalid bio command");
2080 	}
2081 
2082 	/* Cheat with the sector length to avoid a non-constant division */
2083 	blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2084 	disk = bio->bio_driver_info;
2085 	io->header.target_id = disk->ld_id;
2086 	io->header.timeout = 0;
2087 	io->header.flags = 0;
2088 	io->header.scsi_status = 0;
2089 	io->header.sense_len = MFI_SENSE_LEN;
2090 	io->header.data_len = blkcount;
2091 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2092 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2093 	io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2094 	io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2095 	cm->cm_complete = mfi_bio_complete;
2096 	cm->cm_private = bio;
2097 	cm->cm_data = bp->b_data;
2098 	cm->cm_len = bp->b_bcount;
2099 	cm->cm_sg = &io->sgl;
2100 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2101 	cm->cm_flags = flags;
2102 	return (cm);
2103 }
2104 
2105 static void
2106 mfi_bio_complete(struct mfi_command *cm)
2107 {
2108 	struct bio *bio;
2109 	struct buf *bp;
2110 	struct mfi_frame_header *hdr;
2111 	struct mfi_softc *sc;
2112 
2113 	bio = cm->cm_private;
2114 	bp = bio->bio_buf;
2115 	hdr = &cm->cm_frame->header;
2116 	sc = cm->cm_sc;
2117 
2118 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2119 		bp->b_flags |= B_ERROR;
2120 		bp->b_error = EIO;
2121 		device_printf(sc->mfi_dev, "I/O error, status= %d "
2122 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2123 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2124 	} else if (cm->cm_error != 0) {
2125 		bp->b_flags |= B_ERROR;
2126 	}
2127 
2128 	mfi_release_command(cm);
2129 	mfi_disk_complete(bio);
2130 }
2131 
2132 void
2133 mfi_startio(struct mfi_softc *sc)
2134 {
2135 	struct mfi_command *cm;
2136 	struct ccb_hdr *ccbh;
2137 
2138 	for (;;) {
2139 		/* Don't bother if we're short on resources */
2140 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2141 			break;
2142 
2143 		/* Try a command that has already been prepared */
2144 		cm = mfi_dequeue_ready(sc);
2145 
2146 		if (cm == NULL) {
2147 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2148 				cm = sc->mfi_cam_start(ccbh);
2149 		}
2150 
2151 		/* Nope, so look for work on the bioq */
2152 		if (cm == NULL)
2153 			cm = mfi_bio_command(sc);
2154 
2155 		/* No work available, so exit */
2156 		if (cm == NULL)
2157 			break;
2158 
2159 		/* Send the command to the controller */
2160 		if (mfi_mapcmd(sc, cm) != 0) {
2161 			mfi_requeue_ready(cm);
2162 			break;
2163 		}
2164 	}
2165 }
2166 
2167 int
2168 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2169 {
2170 	int error, polled;
2171 
2172 	mfi_lockassert(&sc->mfi_io_lock);
2173 
2174 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2175 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2176 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2177 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2178 		if (error == EINPROGRESS) {
2179 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2180 			return (0);
2181 		}
2182 	} else {
2183 		if (sc->MFA_enabled)
2184 			error = mfi_tbolt_send_frame(sc, cm);
2185 		else
2186 			error = mfi_send_frame(sc, cm);
2187 	}
2188 
2189 	return (error);
2190 }
2191 
2192 static void
2193 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2194 {
2195 	struct mfi_frame_header *hdr;
2196 	struct mfi_command *cm;
2197 	union mfi_sgl *sgl;
2198 	struct mfi_softc *sc;
2199 	int i, j, first, dir;
2200 
2201 	cm = (struct mfi_command *)arg;
2202 	sc = cm->cm_sc;
2203 	hdr = &cm->cm_frame->header;
2204 	sgl = cm->cm_sg;
2205 
2206 	if (error) {
2207 		kprintf("error %d in callback\n", error);
2208 		cm->cm_error = error;
2209 		mfi_complete(sc, cm);
2210 		return;
2211 	}
2212 
2213 	/* Use IEEE sgl only for IO's on a SKINNY controller
2214 	 * For other commands on a SKINNY controller use either
2215 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2216 	 * Also calculate the total frame size based on the type
2217 	 * of SGL used.
2218 	 */
2219 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2220 	     (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2221 	     (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2222 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2223 		for (i = 0; i < nsegs; i++) {
2224 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2225 			sgl->sg_skinny[i].len = segs[i].ds_len;
2226 			sgl->sg_skinny[i].flag = 0;
2227 		}
2228 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2229 		hdr->sg_count = nsegs;
2230 	} else {
2231 		j = 0;
2232 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2233 			first = cm->cm_stp_len;
2234 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2235 				sgl->sg32[j].addr = segs[0].ds_addr;
2236 				sgl->sg32[j++].len = first;
2237 			} else {
2238 				sgl->sg64[j].addr = segs[0].ds_addr;
2239 				sgl->sg64[j++].len = first;
2240 			}
2241 		} else
2242 			first = 0;
2243 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2244 			for (i = 0; i < nsegs; i++) {
2245 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2246 				sgl->sg32[j++].len = segs[i].ds_len - first;
2247 				first = 0;
2248 			}
2249 		} else {
2250 			for (i = 0; i < nsegs; i++) {
2251 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2252 				sgl->sg64[j++].len = segs[i].ds_len - first;
2253 				first = 0;
2254 			}
2255 			hdr->flags |= MFI_FRAME_SGL64;
2256 		}
2257 		hdr->sg_count = j;
2258 	}
2259 
2260 	dir = 0;
2261 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2262 		dir |= BUS_DMASYNC_PREREAD;
2263 		hdr->flags |= MFI_FRAME_DIR_READ;
2264 	}
2265 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2266 		dir |= BUS_DMASYNC_PREWRITE;
2267 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2268 	}
2269 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2270 	cm->cm_flags |= MFI_CMD_MAPPED;
2271 
2272 	/*
2273 	 * Instead of calculating the total number of frames in the
2274 	 * compound frame, it's already assumed that there will be at
2275 	 * least 1 frame, so don't compensate for the modulo of the
2276 	 * following division.
2277 	 */
2278 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2279 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2280 
2281 	if (sc->MFA_enabled)
2282 		mfi_tbolt_send_frame(sc, cm);
2283 	else
2284 		mfi_send_frame(sc, cm);
2285 }
2286 
2287 static int
2288 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2289 {
2290 	struct mfi_frame_header *hdr;
2291 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2292 
2293 	hdr = &cm->cm_frame->header;
2294 
2295 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2296 		cm->cm_timestamp = time_second;
2297 		mfi_enqueue_busy(cm);
2298 	} else {
2299 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2300 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2301 	}
2302 
2303 	/*
2304 	 * The bus address of the command is aligned on a 64 byte boundary,
2305 	 * leaving the least 6 bits as zero.  For whatever reason, the
2306 	 * hardware wants the address shifted right by three, leaving just
2307 	 * 3 zero bits.  These three bits are then used as a prefetching
2308 	 * hint for the hardware to predict how many frames need to be
2309 	 * fetched across the bus.  If a command has more than 8 frames
2310 	 * then the 3 bits are set to 0x7 and the firmware uses other
2311 	 * information in the command to determine the total amount to fetch.
2312 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2313 	 * is enough for both 32bit and 64bit systems.
2314 	 */
2315 	if (cm->cm_extra_frames > 7)
2316 		cm->cm_extra_frames = 7;
2317 
2318 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2319 
2320 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2321 		return (0);
2322 
2323 	/* This is a polled command, so busy-wait for it to complete. */
2324 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2325 		DELAY(1000);
2326 		tm -= 1;
2327 		if (tm <= 0)
2328 			break;
2329 	}
2330 
2331 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2332 		device_printf(sc->mfi_dev, "Frame %p timed out "
2333 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2334 		return (ETIMEDOUT);
2335 	}
2336 
2337 	return (0);
2338 }
2339 
2340 void
2341 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2342 {
2343 	int dir;
2344 
2345 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2346 		dir = 0;
2347 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2348 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2349 			dir |= BUS_DMASYNC_POSTREAD;
2350 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2351 			dir |= BUS_DMASYNC_POSTWRITE;
2352 
2353 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2354 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2355 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2356 	}
2357 
2358 	cm->cm_flags |= MFI_CMD_COMPLETED;
2359 
2360 	if (cm->cm_complete != NULL)
2361 		cm->cm_complete(cm);
2362 	else
2363 		wakeup(cm);
2364 }
2365 
2366 static int
2367 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2368 {
2369 	struct mfi_command *cm;
2370 	struct mfi_abort_frame *abort;
2371 	int i = 0;
2372 	uint32_t context = 0;
2373 
2374 	mfi_lockassert(&sc->mfi_io_lock);
2375 
2376 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2377 		return (EBUSY);
2378 	}
2379 
2380 	/* Zero out the MFI frame */
2381 	context = cm->cm_frame->header.context;
2382 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2383 	cm->cm_frame->header.context = context;
2384 
2385 	abort = &cm->cm_frame->abort;
2386 	abort->header.cmd = MFI_CMD_ABORT;
2387 	abort->header.flags = 0;
2388 	abort->header.scsi_status = 0;
2389 	abort->abort_context = cm_abort->cm_frame->header.context;
2390 	abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2391 	abort->abort_mfi_addr_hi =
2392 	    (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2393 	cm->cm_data = NULL;
2394 	cm->cm_flags = MFI_CMD_POLLED;
2395 
2396 	if (sc->mfi_aen_cm)
2397 		sc->mfi_aen_cm->cm_aen_abort = 1;
2398 	mfi_mapcmd(sc, cm);
2399 	mfi_release_command(cm);
2400 
2401 	while (i < 5 && sc->mfi_aen_cm != NULL) {
2402 		lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2403 		    5 * hz);
2404 		i++;
2405 	}
2406 
2407 	return (0);
2408 }
2409 
2410 int
2411 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2412     int len)
2413 {
2414 	struct mfi_command *cm;
2415 	struct mfi_io_frame *io;
2416 	int error;
2417 	uint32_t context = 0;
2418 
2419 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2420 		return (EBUSY);
2421 
2422 	/* Zero out the MFI frame */
2423 	context = cm->cm_frame->header.context;
2424 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2425 	cm->cm_frame->header.context = context;
2426 
2427 	io = &cm->cm_frame->io;
2428 	io->header.cmd = MFI_CMD_LD_WRITE;
2429 	io->header.target_id = id;
2430 	io->header.timeout = 0;
2431 	io->header.flags = 0;
2432 	io->header.scsi_status = 0;
2433 	io->header.sense_len = MFI_SENSE_LEN;
2434 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2435 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2436 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2437 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2438 	io->lba_lo = lba & 0xffffffff;
2439 	cm->cm_data = virt;
2440 	cm->cm_len = len;
2441 	cm->cm_sg = &io->sgl;
2442 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2443 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2444 
2445 	error = mfi_mapcmd(sc, cm);
2446 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2447 	    BUS_DMASYNC_POSTWRITE);
2448 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2449 	mfi_release_command(cm);
2450 
2451 	return (error);
2452 }
2453 
2454 int
2455 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2456     int len)
2457 {
2458 	struct mfi_command *cm;
2459 	struct mfi_pass_frame *pass;
2460 	int error;
2461 	int blkcount = 0;
2462 
2463 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2464 		return (EBUSY);
2465 
2466 	pass = &cm->cm_frame->pass;
2467 	bzero(pass->cdb, 16);
2468 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2469 	pass->cdb[0] = WRITE_10;
2470 	pass->cdb[2] = (lba & 0xff000000) >> 24;
2471 	pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2472 	pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2473 	pass->cdb[5] = (lba & 0x000000ff);
2474 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2475 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2476 	pass->cdb[8] = (blkcount & 0x00ff);
2477 	pass->header.target_id = id;
2478 	pass->header.timeout = 0;
2479 	pass->header.flags = 0;
2480 	pass->header.scsi_status = 0;
2481 	pass->header.sense_len = MFI_SENSE_LEN;
2482 	pass->header.data_len = len;
2483 	pass->header.cdb_len = 10;
2484 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2485 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2486 	cm->cm_data = virt;
2487 	cm->cm_len = len;
2488 	cm->cm_sg = &pass->sgl;
2489 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2490 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2491 
2492 	error = mfi_mapcmd(sc, cm);
2493 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2494 	    BUS_DMASYNC_POSTWRITE);
2495 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2496 	mfi_release_command(cm);
2497 
2498 	return (error);
2499 }
2500 
2501 static int
2502 mfi_open(struct dev_open_args *ap)
2503 {
2504 	cdev_t dev = ap->a_head.a_dev;
2505 	struct mfi_softc *sc;
2506 	int error;
2507 
2508 	sc = dev->si_drv1;
2509 
2510 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2511 	if (sc->mfi_detaching)
2512 		error = ENXIO;
2513 	else {
2514 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2515 		error = 0;
2516 	}
2517 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2518 
2519 	return (error);
2520 }
2521 
2522 static int
2523 mfi_close(struct dev_close_args *ap)
2524 {
2525 	cdev_t dev = ap->a_head.a_dev;
2526 	struct mfi_softc *sc;
2527 	struct mfi_aen *mfi_aen_entry, *tmp;
2528 
2529 	sc = dev->si_drv1;
2530 
2531 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2532 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2533 
2534 	TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2535 		if (mfi_aen_entry->p == curproc) {
2536 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2537 			    aen_link);
2538 			kfree(mfi_aen_entry, M_MFIBUF);
2539 		}
2540 	}
2541 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2542 	return (0);
2543 }
2544 
2545 static int
2546 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2547 {
2548 
2549 	switch (opcode) {
2550 	case MFI_DCMD_LD_DELETE:
2551 	case MFI_DCMD_CFG_ADD:
2552 	case MFI_DCMD_CFG_CLEAR:
2553 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2554 		lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2555 		return (1);
2556 	default:
2557 		return (0);
2558 	}
2559 }
2560 
2561 static void
2562 mfi_config_unlock(struct mfi_softc *sc, int locked)
2563 {
2564 
2565 	if (locked)
2566 		lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2567 }
2568 
2569 /*
2570  * Perform pre-issue checks on commands from userland and possibly veto
2571  * them.
2572  */
2573 static int
2574 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2575 {
2576 	struct mfi_disk *ld, *ld2;
2577 	int error;
2578 	struct mfi_system_pd *syspd = NULL;
2579 	uint16_t syspd_id;
2580 	uint16_t *mbox;
2581 
2582 	mfi_lockassert(&sc->mfi_io_lock);
2583 	error = 0;
2584 	switch (cm->cm_frame->dcmd.opcode) {
2585 	case MFI_DCMD_LD_DELETE:
2586 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2587 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2588 				break;
2589 		}
2590 		if (ld == NULL)
2591 			error = ENOENT;
2592 		else
2593 			error = mfi_disk_disable(ld);
2594 		break;
2595 	case MFI_DCMD_CFG_CLEAR:
2596 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2597 			error = mfi_disk_disable(ld);
2598 			if (error)
2599 				break;
2600 		}
2601 		if (error) {
2602 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2603 				if (ld2 == ld)
2604 					break;
2605 				mfi_disk_enable(ld2);
2606 			}
2607 		}
2608 		break;
2609 	case MFI_DCMD_PD_STATE_SET:
2610 		mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2611 		syspd_id = mbox[0];
2612 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2613 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2614 				if (syspd->pd_id == syspd_id)
2615 					break;
2616 			}
2617 		} else {
2618 			break;
2619 		}
2620 		if (syspd)
2621 			error = mfi_syspd_disable(syspd);
2622 		break;
2623 	default:
2624 		break;
2625 	}
2626 	return (error);
2627 }
2628 
2629 /* Perform post-issue checks on commands from userland. */
2630 static void
2631 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2632 {
2633 	struct mfi_disk *ld, *ldn;
2634 	struct mfi_system_pd *syspd = NULL;
2635 	uint16_t syspd_id;
2636 	uint16_t *mbox;
2637 
2638 	switch (cm->cm_frame->dcmd.opcode) {
2639 	case MFI_DCMD_LD_DELETE:
2640 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2641 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2642 				break;
2643 		}
2644 		KASSERT(ld != NULL, ("volume dissappeared"));
2645 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2646 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2647 			get_mplock();
2648 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2649 			rel_mplock();
2650 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2651 		} else
2652 			mfi_disk_enable(ld);
2653 		break;
2654 	case MFI_DCMD_CFG_CLEAR:
2655 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2656 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2657 			get_mplock();
2658 			TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2659 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2660 			}
2661 			rel_mplock();
2662 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2663 		} else {
2664 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2665 				mfi_disk_enable(ld);
2666 		}
2667 		break;
2668 	case MFI_DCMD_CFG_ADD:
2669 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2670 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK)
2671 			mfi_ldprobe(sc);
2672 		break;
2673 	case MFI_DCMD_PD_STATE_SET:
2674 		mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2675 		syspd_id = mbox[0];
2676 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2677 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2678 				if (syspd->pd_id == syspd_id)
2679 					break;
2680 			}
2681 		} else {
2682 			break;
2683 		}
2684 		/* If the transition fails then enable the syspd again */
2685 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2686 			mfi_syspd_enable(syspd);
2687 		break;
2688 	}
2689 }
2690 
2691 static int
2692 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2693 {
2694 	struct mfi_config_data *conf_data = cm->cm_data;
2695 	struct mfi_command *ld_cm = NULL;
2696 	struct mfi_ld_info *ld_info = NULL;
2697 	int error = 0;
2698 
2699 	if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2700 	    (conf_data->ld[0].params.isSSCD == 1)) {
2701 		error = 1;
2702 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2703 		error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2704 		    (void **)&ld_info, sizeof(*ld_info));
2705 		if (error) {
2706 			device_printf(sc->mfi_dev, "Failed to allocate"
2707 			    "MFI_DCMD_LD_GET_INFO %d", error);
2708 			if (ld_info)
2709 				kfree(ld_info, M_MFIBUF);
2710 			return 0;
2711 		}
2712 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2713 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2714 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2715 		if (mfi_wait_command(sc, ld_cm) != 0) {
2716 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2717 			mfi_release_command(ld_cm);
2718 			kfree(ld_info, M_MFIBUF);
2719 			return 0;
2720 		}
2721 
2722 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2723 			kfree(ld_info, M_MFIBUF);
2724 			mfi_release_command(ld_cm);
2725 			return 0;
2726 		} else {
2727 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2728 		}
2729 
2730 		if (ld_info->ld_config.params.isSSCD == 1)
2731 			error = 1;
2732 
2733 		mfi_release_command(ld_cm);
2734 		kfree(ld_info, M_MFIBUF);
2735 	}
2736 	return error;
2737 }
2738 
2739 static int
2740 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2741 {
2742 	uint8_t i;
2743 	struct mfi_ioc_packet *ioc;
2744 	ioc = (struct mfi_ioc_packet *)arg;
2745 	int sge_size, error;
2746 	struct megasas_sge *kern_sge;
2747 
2748 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2749 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2750 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2751 
2752 	if (sizeof(bus_addr_t) == 8) {
2753 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2754 		cm->cm_extra_frames = 2;
2755 		sge_size = sizeof(struct mfi_sg64);
2756 	} else {
2757 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2758 		sge_size = sizeof(struct mfi_sg32);
2759 	}
2760 
2761 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2762 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2763 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2764 			1, 0,			/* algnmnt, boundary */
2765 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2766 			BUS_SPACE_MAXADDR,	/* highaddr */
2767 			NULL, NULL,		/* filter, filterarg */
2768 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2769 			2,			/* nsegments */
2770 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2771 			BUS_DMA_ALLOCNOW,	/* flags */
2772 			&sc->mfi_kbuff_arr_dmat[i])) {
2773 			device_printf(sc->mfi_dev,
2774 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2775 			return (ENOMEM);
2776 		}
2777 
2778 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2779 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2780 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2781 			device_printf(sc->mfi_dev,
2782 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2783 			return (ENOMEM);
2784 		}
2785 
2786 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2787 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2788 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2789 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2790 
2791 		if (!sc->kbuff_arr[i]) {
2792 			device_printf(sc->mfi_dev,
2793 			    "Could not allocate memory for kbuff_arr info\n");
2794 			return -1;
2795 		}
2796 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2797 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2798 
2799 		if (sizeof(bus_addr_t) == 8) {
2800 			cm->cm_frame->stp.sgl.sg64[i].addr =
2801 			    kern_sge[i].phys_addr;
2802 			cm->cm_frame->stp.sgl.sg64[i].len =
2803 			    ioc->mfi_sgl[i].iov_len;
2804 		} else {
2805 			cm->cm_frame->stp.sgl.sg32[i].addr =
2806 			    kern_sge[i].phys_addr;
2807 			cm->cm_frame->stp.sgl.sg32[i].len =
2808 			    ioc->mfi_sgl[i].iov_len;
2809 		}
2810 
2811 		error = copyin(ioc->mfi_sgl[i].iov_base,
2812 		    sc->kbuff_arr[i],
2813 		    ioc->mfi_sgl[i].iov_len);
2814 		if (error != 0) {
2815 			device_printf(sc->mfi_dev, "Copy in failed\n");
2816 			return error;
2817 		}
2818 	}
2819 
2820 	cm->cm_flags |=MFI_CMD_MAPPED;
2821 	return 0;
2822 }
2823 
2824 static int
2825 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2826 {
2827 	struct mfi_command *cm;
2828 	struct mfi_dcmd_frame *dcmd;
2829 	void *ioc_buf = NULL;
2830 	uint32_t context;
2831 	int error = 0, locked;
2832 
2833 
2834 	if (ioc->buf_size > 0) {
2835 		ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2836 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2837 		if (error) {
2838 			device_printf(sc->mfi_dev, "failed to copyin\n");
2839 			kfree(ioc_buf, M_MFIBUF);
2840 			return (error);
2841 		}
2842 	}
2843 
2844 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2845 
2846 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2847 	while ((cm = mfi_dequeue_free(sc)) == NULL)
2848 		lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2849 
2850 	/* Save context for later */
2851 	context = cm->cm_frame->header.context;
2852 
2853 	dcmd = &cm->cm_frame->dcmd;
2854 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2855 
2856 	cm->cm_sg = &dcmd->sgl;
2857 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2858 	cm->cm_data = ioc_buf;
2859 	cm->cm_len = ioc->buf_size;
2860 
2861 	/* restore context */
2862 	cm->cm_frame->header.context = context;
2863 
2864 	/* Cheat since we don't know if we're writing or reading */
2865 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2866 
2867 	error = mfi_check_command_pre(sc, cm);
2868 	if (error)
2869 		goto out;
2870 
2871 	error = mfi_wait_command(sc, cm);
2872 	if (error) {
2873 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2874 		goto out;
2875 	}
2876 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2877 	mfi_check_command_post(sc, cm);
2878 out:
2879 	mfi_release_command(cm);
2880 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2881 	mfi_config_unlock(sc, locked);
2882 	if (ioc->buf_size > 0)
2883 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2884 	if (ioc_buf)
2885 		kfree(ioc_buf, M_MFIBUF);
2886 	return (error);
2887 }
2888 
2889 #define	PTRIN(p)		((void *)(uintptr_t)(p))
2890 
2891 static int
2892 mfi_ioctl(struct dev_ioctl_args *ap)
2893 {
2894 	cdev_t dev = ap->a_head.a_dev;
2895 	u_long cmd = ap->a_cmd;
2896 	int flag = ap->a_fflag;
2897 	caddr_t arg = ap->a_data;
2898 	struct mfi_softc *sc;
2899 	union mfi_statrequest *ms;
2900 	struct mfi_ioc_packet *ioc;
2901 	struct mfi_ioc_aen *aen;
2902 	struct mfi_command *cm = NULL;
2903 	uint32_t context;
2904 	union mfi_sense_ptr sense_ptr;
2905 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2906 	size_t len;
2907 	int i, res;
2908 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2909 	int error, locked;
2910 
2911 	sc = dev->si_drv1;
2912 	error = 0;
2913 
2914 	if (sc->adpreset)
2915 		return EBUSY;
2916 
2917 	if (sc->hw_crit_error)
2918 		return EBUSY;
2919 
2920 	if (sc->issuepend_done == 0)
2921 		return EBUSY;
2922 
2923 	switch (cmd) {
2924 	case MFIIO_STATS:
2925 		ms = (union mfi_statrequest *)arg;
2926 		switch (ms->ms_item) {
2927 		case MFIQ_FREE:
2928 		case MFIQ_BIO:
2929 		case MFIQ_READY:
2930 		case MFIQ_BUSY:
2931 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2932 			    sizeof(struct mfi_qstat));
2933 			break;
2934 		default:
2935 			error = ENOIOCTL;
2936 			break;
2937 		}
2938 		break;
2939 	case MFIIO_QUERY_DISK:
2940 	{
2941 		struct mfi_query_disk *qd;
2942 		struct mfi_disk *ld;
2943 
2944 		qd = (struct mfi_query_disk *)arg;
2945 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2946 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2947 			if (ld->ld_id == qd->array_id)
2948 				break;
2949 		}
2950 		if (ld == NULL) {
2951 			qd->present = 0;
2952 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2953 			return (0);
2954 		}
2955 		qd->present = 1;
2956 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2957 			qd->open = 1;
2958 		bzero(qd->devname, SPECNAMELEN + 1);
2959 		ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2960 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2961 		break;
2962 	}
2963 	case MFI_CMD:
2964 		{
2965 		devclass_t devclass;
2966 		ioc = (struct mfi_ioc_packet *)arg;
2967 		int adapter;
2968 
2969 		adapter = ioc->mfi_adapter_no;
2970 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2971 			devclass = devclass_find("mfi");
2972 			sc = devclass_get_softc(devclass, adapter);
2973 		}
2974 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2975 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2976 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2977 			return (EBUSY);
2978 		}
2979 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2980 		locked = 0;
2981 
2982 		/*
2983 		 * save off original context since copying from user
2984 		 * will clobber some data
2985 		 */
2986 		context = cm->cm_frame->header.context;
2987 		cm->cm_frame->header.context = cm->cm_index;
2988 
2989 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2990 		    2 * MEGAMFI_FRAME_SIZE);
2991 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2992 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2993 		cm->cm_frame->header.scsi_status = 0;
2994 		cm->cm_frame->header.pad0 = 0;
2995 		if (ioc->mfi_sge_count) {
2996 			cm->cm_sg =
2997 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
2998 		}
2999 		cm->cm_flags = 0;
3000 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3001 			cm->cm_flags |= MFI_CMD_DATAIN;
3002 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3003 			cm->cm_flags |= MFI_CMD_DATAOUT;
3004 		/* Legacy app shim */
3005 		if (cm->cm_flags == 0)
3006 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3007 		cm->cm_len = cm->cm_frame->header.data_len;
3008 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3009 			cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3010 			cm->cm_len += cm->cm_stp_len;
3011 		}
3012 		if (cm->cm_len &&
3013 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3014 			cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3015 			    M_WAITOK | M_ZERO);
3016 		} else {
3017 			cm->cm_data = 0;
3018 		}
3019 
3020 		/* restore header context */
3021 		cm->cm_frame->header.context = context;
3022 
3023 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3024 			res = mfi_stp_cmd(sc, cm, arg);
3025 			if (res != 0)
3026 				goto out;
3027 		} else {
3028 			temp = data;
3029 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3030 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3031 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3032 					addr = ioc->mfi_sgl[i].iov_base;
3033 					len = ioc->mfi_sgl[i].iov_len;
3034 					error = copyin(addr, temp, len);
3035 					if (error != 0) {
3036 						device_printf(sc->mfi_dev,
3037 						    "Copy in failed\n");
3038 						goto out;
3039 					}
3040 					temp = &temp[len];
3041 				}
3042 			}
3043 		}
3044 
3045 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3046 			locked = mfi_config_lock(sc,
3047 			     cm->cm_frame->dcmd.opcode);
3048 
3049 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3050 			cm->cm_frame->pass.sense_addr_lo =
3051 			    (uint32_t)cm->cm_sense_busaddr;
3052 			cm->cm_frame->pass.sense_addr_hi =
3053 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3054 		}
3055 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3056 		skip_pre_post = mfi_check_for_sscd(sc, cm);
3057 		if (!skip_pre_post) {
3058 			error = mfi_check_command_pre(sc, cm);
3059 			if (error) {
3060 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3061 				goto out;
3062 			}
3063 		}
3064 
3065 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3066 			device_printf(sc->mfi_dev,
3067 			    "Controller polled failed\n");
3068 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3069 			goto out;
3070 		}
3071 
3072 		if (!skip_pre_post)
3073 			mfi_check_command_post(sc, cm);
3074 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3075 
3076 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3077 			temp = data;
3078 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3079 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3080 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3081 					addr = ioc->mfi_sgl[i].iov_base;
3082 					len = ioc->mfi_sgl[i].iov_len;
3083 					error = copyout(temp, addr, len);
3084 					if (error != 0) {
3085 						device_printf(sc->mfi_dev,
3086 						    "Copy out failed\n");
3087 						goto out;
3088 					}
3089 					temp = &temp[len];
3090 				}
3091 			}
3092 		}
3093 
3094 		if (ioc->mfi_sense_len) {
3095 			/* get user-space sense ptr then copy out sense */
3096 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3097 			    &sense_ptr.sense_ptr_data[0],
3098 			    sizeof(sense_ptr.sense_ptr_data));
3099 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3100 			    ioc->mfi_sense_len);
3101 			if (error != 0) {
3102 				device_printf(sc->mfi_dev,
3103 				    "Copy out failed\n");
3104 				goto out;
3105 			}
3106 		}
3107 
3108 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3109 out:
3110 		mfi_config_unlock(sc, locked);
3111 		if (data)
3112 			kfree(data, M_MFIBUF);
3113 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3114 			for (i = 0; i < 2; i++) {
3115 				if (sc->kbuff_arr[i]) {
3116 					if (sc->mfi_kbuff_arr_busaddr != 0)
3117 						bus_dmamap_unload(
3118 						    sc->mfi_kbuff_arr_dmat[i],
3119 						    sc->mfi_kbuff_arr_dmamap[i]
3120 						    );
3121 					if (sc->kbuff_arr[i] != NULL)
3122 						bus_dmamem_free(
3123 						    sc->mfi_kbuff_arr_dmat[i],
3124 						    sc->kbuff_arr[i],
3125 						    sc->mfi_kbuff_arr_dmamap[i]
3126 						    );
3127 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3128 						bus_dma_tag_destroy(
3129 						    sc->mfi_kbuff_arr_dmat[i]);
3130 				}
3131 			}
3132 		}
3133 		if (cm) {
3134 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3135 			mfi_release_command(cm);
3136 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3137 		}
3138 
3139 		break;
3140 		}
3141 	case MFI_SET_AEN:
3142 		aen = (struct mfi_ioc_aen *)arg;
3143 		error = mfi_aen_register(sc, aen->aen_seq_num,
3144 		    aen->aen_class_locale);
3145 
3146 		break;
3147 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3148 		{
3149 			devclass_t devclass;
3150 			struct mfi_linux_ioc_packet l_ioc;
3151 			int adapter;
3152 
3153 			devclass = devclass_find("mfi");
3154 			if (devclass == NULL)
3155 				return (ENOENT);
3156 
3157 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3158 			if (error)
3159 				return (error);
3160 			adapter = l_ioc.lioc_adapter_no;
3161 			sc = devclass_get_softc(devclass, adapter);
3162 			if (sc == NULL)
3163 				return (ENOENT);
3164 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3165 			    cmd, arg, flag));
3166 			break;
3167 		}
3168 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3169 		{
3170 			devclass_t devclass;
3171 			struct mfi_linux_ioc_aen l_aen;
3172 			int adapter;
3173 
3174 			devclass = devclass_find("mfi");
3175 			if (devclass == NULL)
3176 				return (ENOENT);
3177 
3178 			error = copyin(arg, &l_aen, sizeof(l_aen));
3179 			if (error)
3180 				return (error);
3181 			adapter = l_aen.laen_adapter_no;
3182 			sc = devclass_get_softc(devclass, adapter);
3183 			if (sc == NULL)
3184 				return (ENOENT);
3185 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3186 			    cmd, arg, flag));
3187 			break;
3188 		}
3189 	case MFIIO_PASSTHRU:
3190 		error = mfi_user_command(sc, iop);
3191 		break;
3192 	default:
3193 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3194 		error = ENOENT;
3195 		break;
3196 	}
3197 
3198 	return (error);
3199 }
3200 
3201 static int
3202 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3203 {
3204 	struct mfi_softc *sc;
3205 	struct mfi_linux_ioc_packet l_ioc;
3206 	struct mfi_linux_ioc_aen l_aen;
3207 	struct mfi_command *cm = NULL;
3208 	struct mfi_aen *mfi_aen_entry;
3209 	union mfi_sense_ptr sense_ptr;
3210 	uint32_t context;
3211 	uint8_t *data = NULL, *temp;
3212 	int i;
3213 	int error, locked;
3214 
3215 	sc = dev->si_drv1;
3216 	error = 0;
3217 	switch (cmd) {
3218 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3219 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3220 		if (error != 0)
3221 			return (error);
3222 
3223 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3224 			return (EINVAL);
3225 		}
3226 
3227 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3228 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3229 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3230 			return (EBUSY);
3231 		}
3232 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3233 		locked = 0;
3234 
3235 		/*
3236 		 * save off original context since copying from user
3237 		 * will clobber some data
3238 		 */
3239 		context = cm->cm_frame->header.context;
3240 
3241 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3242 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3243 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3244 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3245 		cm->cm_frame->header.scsi_status = 0;
3246 		cm->cm_frame->header.pad0 = 0;
3247 		if (l_ioc.lioc_sge_count)
3248 			cm->cm_sg =
3249 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3250 		cm->cm_flags = 0;
3251 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3252 			cm->cm_flags |= MFI_CMD_DATAIN;
3253 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3254 			cm->cm_flags |= MFI_CMD_DATAOUT;
3255 		cm->cm_len = cm->cm_frame->header.data_len;
3256 		if (cm->cm_len &&
3257 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3258 			cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3259 			    M_WAITOK | M_ZERO);
3260 		} else {
3261 			cm->cm_data = 0;
3262 		}
3263 
3264 		/* restore header context */
3265 		cm->cm_frame->header.context = context;
3266 
3267 		temp = data;
3268 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3269 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3270 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3271 				       temp,
3272 				       l_ioc.lioc_sgl[i].iov_len);
3273 				if (error != 0) {
3274 					device_printf(sc->mfi_dev,
3275 					    "Copy in failed\n");
3276 					goto out;
3277 				}
3278 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3279 			}
3280 		}
3281 
3282 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3283 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3284 
3285 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3286 			cm->cm_frame->pass.sense_addr_lo =
3287 			    (uint32_t)cm->cm_sense_busaddr;
3288 			cm->cm_frame->pass.sense_addr_hi =
3289 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3290 		}
3291 
3292 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3293 		error = mfi_check_command_pre(sc, cm);
3294 		if (error) {
3295 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3296 			goto out;
3297 		}
3298 
3299 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3300 			device_printf(sc->mfi_dev,
3301 			    "Controller polled failed\n");
3302 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3303 			goto out;
3304 		}
3305 
3306 		mfi_check_command_post(sc, cm);
3307 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3308 
3309 		temp = data;
3310 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3311 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3312 				error = copyout(temp,
3313 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3314 					l_ioc.lioc_sgl[i].iov_len);
3315 				if (error != 0) {
3316 					device_printf(sc->mfi_dev,
3317 					    "Copy out failed\n");
3318 					goto out;
3319 				}
3320 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3321 			}
3322 		}
3323 
3324 		if (l_ioc.lioc_sense_len) {
3325 			/* get user-space sense ptr then copy out sense */
3326 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3327                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3328 			    &sense_ptr.sense_ptr_data[0],
3329 			    sizeof(sense_ptr.sense_ptr_data));
3330 #ifdef __x86_64__
3331 			/*
3332 			 * only 32bit Linux support so zero out any
3333 			 * address over 32bit
3334 			 */
3335 			sense_ptr.addr.high = 0;
3336 #endif
3337 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3338 			    l_ioc.lioc_sense_len);
3339 			if (error != 0) {
3340 				device_printf(sc->mfi_dev,
3341 				    "Copy out failed\n");
3342 				goto out;
3343 			}
3344 		}
3345 
3346 		error = copyout(&cm->cm_frame->header.cmd_status,
3347 			&((struct mfi_linux_ioc_packet*)arg)
3348 			->lioc_frame.hdr.cmd_status,
3349 			1);
3350 		if (error != 0) {
3351 			device_printf(sc->mfi_dev,
3352 				      "Copy out failed\n");
3353 			goto out;
3354 		}
3355 
3356 out:
3357 		mfi_config_unlock(sc, locked);
3358 		if (data)
3359 			kfree(data, M_MFIBUF);
3360 		if (cm) {
3361 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3362 			mfi_release_command(cm);
3363 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3364 		}
3365 
3366 		return (error);
3367 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3368 		error = copyin(arg, &l_aen, sizeof(l_aen));
3369 		if (error != 0)
3370 			return (error);
3371 		kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3372 		mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3373 		    M_WAITOK);
3374 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3375 		if (mfi_aen_entry != NULL) {
3376 			mfi_aen_entry->p = curproc;
3377 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3378 			    aen_link);
3379 		}
3380 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3381 		    l_aen.laen_class_locale);
3382 
3383 		if (error != 0) {
3384 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3385 			    aen_link);
3386 			kfree(mfi_aen_entry, M_MFIBUF);
3387 		}
3388 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3389 
3390 		return (error);
3391 	default:
3392 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3393 		error = ENOENT;
3394 		break;
3395 	}
3396 
3397 	return (error);
3398 }
3399 
3400 static int
3401 mfi_kqfilter(struct dev_kqfilter_args *ap)
3402 {
3403 	cdev_t dev = ap->a_head.a_dev;
3404 	struct knote *kn = ap->a_kn;
3405 	struct mfi_softc *sc;
3406 	struct klist *klist;
3407 
3408 	ap->a_result = 0;
3409 	sc = dev->si_drv1;
3410 
3411 	switch (kn->kn_filter) {
3412 	case EVFILT_READ:
3413 		kn->kn_fop = &mfi_read_filterops;
3414 		kn->kn_hook = (caddr_t)sc;
3415 		break;
3416 	case EVFILT_WRITE:
3417 		kn->kn_fop = &mfi_write_filterops;
3418 		kn->kn_hook = (caddr_t)sc;
3419 		break;
3420 	default:
3421 		ap->a_result = EOPNOTSUPP;
3422 		return (0);
3423 	}
3424 
3425 	klist = &sc->mfi_kq.ki_note;
3426 	knote_insert(klist, kn);
3427 
3428 	return(0);
3429 }
3430 
3431 static void
3432 mfi_filter_detach(struct knote *kn)
3433 {
3434 	struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3435 	struct klist *klist = &sc->mfi_kq.ki_note;
3436 
3437 	knote_remove(klist, kn);
3438 }
3439 
3440 static int
3441 mfi_filter_read(struct knote *kn, long hint)
3442 {
3443 	struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3444 	int ready = 0;
3445 
3446 	if (sc->mfi_aen_triggered != 0) {
3447 		ready = 1;
3448 		sc->mfi_aen_triggered = 0;
3449 	}
3450 	if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3451 		kn->kn_flags |= EV_ERROR;
3452 
3453 	if (ready == 0)
3454 		sc->mfi_poll_waiting = 1;
3455 
3456 	return (ready);
3457 }
3458 
3459 static int
3460 mfi_filter_write(struct knote *kn, long hint)
3461 {
3462 	return (0);
3463 }
3464 
3465 static void
3466 mfi_dump_all(void)
3467 {
3468 	struct mfi_softc *sc;
3469 	struct mfi_command *cm;
3470 	devclass_t dc;
3471 	time_t deadline;
3472 	int timedout;
3473 	int i;
3474 
3475 	dc = devclass_find("mfi");
3476 	if (dc == NULL) {
3477 		kprintf("No mfi dev class\n");
3478 		return;
3479 	}
3480 
3481 	for (i = 0; ; i++) {
3482 		sc = devclass_get_softc(dc, i);
3483 		if (sc == NULL)
3484 			break;
3485 		device_printf(sc->mfi_dev, "Dumping\n\n");
3486 		timedout = 0;
3487 		deadline = time_second - mfi_cmd_timeout;
3488 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3489 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3490 			if (cm->cm_timestamp < deadline) {
3491 				device_printf(sc->mfi_dev,
3492 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3493 				    cm, (int)(time_second - cm->cm_timestamp));
3494 				MFI_PRINT_CMD(cm);
3495 				timedout++;
3496 			}
3497 		}
3498 
3499 #if 0
3500 		if (timedout)
3501 			MFI_DUMP_CMDS(SC);
3502 #endif
3503 
3504 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3505 	}
3506 
3507 	return;
3508 }
3509 
3510 static void
3511 mfi_timeout(void *data)
3512 {
3513 	struct mfi_softc *sc = (struct mfi_softc *)data;
3514 	struct mfi_command *cm;
3515 	time_t deadline;
3516 	int timedout = 0;
3517 
3518 	deadline = time_second - mfi_cmd_timeout;
3519 	if (sc->adpreset == 0) {
3520 		if (!mfi_tbolt_reset(sc)) {
3521 			callout_reset(&sc->mfi_watchdog_callout,
3522 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3523 			return;
3524 		}
3525 	}
3526 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3527 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3528 		if (sc->mfi_aen_cm == cm)
3529 			continue;
3530 		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3531 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3532 				cm->cm_timestamp = time_second;
3533 			} else {
3534 				device_printf(sc->mfi_dev,
3535 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3536 				     cm, (int)(time_second - cm->cm_timestamp));
3537 				MFI_PRINT_CMD(cm);
3538 				MFI_VALIDATE_CMD(sc, cm);
3539 				timedout++;
3540 			}
3541 		}
3542 	}
3543 
3544 #if 0
3545 	if (timedout)
3546 		MFI_DUMP_CMDS(SC);
3547 #endif
3548 
3549 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3550 
3551 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3552 	    mfi_timeout, sc);
3553 
3554 	if (0)
3555 		mfi_dump_all();
3556 	return;
3557 }
3558