xref: /dragonfly/sys/dev/raid/mfi/mfi.c (revision fa71f50a)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  *
52  * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53  * FreeBSD projects/head_mfi/ r233016
54  */
55 
56 #include "opt_mfi.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/bus.h>
64 #include <sys/eventhandler.h>
65 #include <sys/rman.h>
66 #include <sys/bus_dma.h>
67 #include <sys/buf2.h>
68 #include <sys/uio.h>
69 #include <sys/proc.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
74 
75 #include <bus/cam/scsi/scsi_all.h>
76 
77 #include <bus/pci/pcivar.h>
78 
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
82 
83 static int	mfi_alloc_commands(struct mfi_softc *);
84 static int	mfi_comms_init(struct mfi_softc *);
85 static int	mfi_get_controller_info(struct mfi_softc *);
86 static int	mfi_get_log_state(struct mfi_softc *,
87 		    struct mfi_evt_log_state **);
88 static int	mfi_parse_entries(struct mfi_softc *, int, int);
89 static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 		    uint32_t, void **, size_t);
91 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void	mfi_startup(void *arg);
93 static void	mfi_intr(void *arg);
94 static void	mfi_ldprobe(struct mfi_softc *sc);
95 static void	mfi_syspdprobe(struct mfi_softc *sc);
96 static void	mfi_handle_evt(void *context, int pending);
97 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void	mfi_aen_complete(struct mfi_command *);
99 static int	mfi_add_ld(struct mfi_softc *sc, int);
100 static void	mfi_add_ld_complete(struct mfi_command *);
101 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void	mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void	mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void	mfi_timeout(void *);
111 static int	mfi_user_command(struct mfi_softc *,
112 		    struct mfi_ioc_passthru *);
113 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
120 		    uint32_t frame_cnt);
121 static void	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
122 		    uint32_t frame_cnt);
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
128 
129 static void	mfi_filter_detach(struct knote *);
130 static int	mfi_filter_read(struct knote *, long);
131 static int	mfi_filter_write(struct knote *, long);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137             0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142           0, "event message class");
143 
144 static int	mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
147 	   0, "Max commands");
148 
149 static int	mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153 
154 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RW, &mfi_cmd_timeout,
157 	   0, "Command timeout (in seconds)");
158 
159 /* Management interface */
160 static d_open_t		mfi_open;
161 static d_close_t	mfi_close;
162 static d_ioctl_t	mfi_ioctl;
163 static d_kqfilter_t	mfi_kqfilter;
164 
165 static struct dev_ops mfi_ops = {
166 	{ "mfi", 0, 0 },
167 	.d_open =	mfi_open,
168 	.d_close =	mfi_close,
169 	.d_ioctl =	mfi_ioctl,
170 	.d_kqfilter =	mfi_kqfilter,
171 };
172 
173 static struct filterops mfi_read_filterops =
174 	{ FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
175 static struct filterops mfi_write_filterops =
176 	{ FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
177 
178 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
179 
180 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
181 struct mfi_skinny_dma_info mfi_skinny;
182 
183 static void
184 mfi_enable_intr_xscale(struct mfi_softc *sc)
185 {
186 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
187 }
188 
189 static void
190 mfi_enable_intr_ppc(struct mfi_softc *sc)
191 {
192 	if (sc->mfi_flags & MFI_FLAGS_1078) {
193 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
195 	} else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
196 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 	} else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	} else {
201 		panic("unknown adapter type");
202 	}
203 }
204 
205 static int32_t
206 mfi_read_fw_status_xscale(struct mfi_softc *sc)
207 {
208 	return MFI_READ4(sc, MFI_OMSG0);
209 }
210 
211 static int32_t
212 mfi_read_fw_status_ppc(struct mfi_softc *sc)
213 {
214 	return MFI_READ4(sc, MFI_OSP0);
215 }
216 
217 static int
218 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
219 {
220 	int32_t status;
221 
222 	status = MFI_READ4(sc, MFI_OSTS);
223 	if ((status & MFI_OSTS_INTR_VALID) == 0)
224 		return 1;
225 
226 	MFI_WRITE4(sc, MFI_OSTS, status);
227 	return 0;
228 }
229 
230 static int
231 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
232 {
233 	int32_t status;
234 
235 	status = MFI_READ4(sc, MFI_OSTS);
236 	if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
237 	    ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
238 	    ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
239 		return 1;
240 
241 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 		MFI_WRITE4(sc, MFI_OSTS, status);
243 	else
244 		MFI_WRITE4(sc, MFI_ODCR0, status);
245 	return 0;
246 }
247 
248 static void
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
250 {
251 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
252 }
253 
254 static void
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256 {
257 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 		MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
259 		MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
260 	} else {
261 		MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
262 	}
263 }
264 
265 int
266 mfi_transition_firmware(struct mfi_softc *sc)
267 {
268 	uint32_t fw_state, cur_state;
269 	int max_wait, i;
270 	uint32_t cur_abs_reg_val = 0;
271 	uint32_t prev_abs_reg_val = 0;
272 
273 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 	while (fw_state != MFI_FWSTATE_READY) {
276 		if (bootverbose)
277 			device_printf(sc->mfi_dev, "Waiting for firmware to "
278 			"become ready\n");
279 		cur_state = fw_state;
280 		switch (fw_state) {
281 		case MFI_FWSTATE_FAULT:
282 			device_printf(sc->mfi_dev, "Firmware fault\n");
283 			return (ENXIO);
284 		case MFI_FWSTATE_WAIT_HANDSHAKE:
285 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
287 			else
288 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 			max_wait = MFI_RESET_WAIT_TIME;
290 			break;
291 		case MFI_FWSTATE_OPERATIONAL:
292 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
294 			else
295 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 			max_wait = MFI_RESET_WAIT_TIME;
297 			break;
298 		case MFI_FWSTATE_UNDEFINED:
299 		case MFI_FWSTATE_BB_INIT:
300 			max_wait = MFI_RESET_WAIT_TIME;
301 			break;
302 		case MFI_FWSTATE_FW_INIT_2:
303 			max_wait = MFI_RESET_WAIT_TIME;
304 			break;
305 		case MFI_FWSTATE_FW_INIT:
306 		case MFI_FWSTATE_FLUSH_CACHE:
307 			max_wait = MFI_RESET_WAIT_TIME;
308 			break;
309 		case MFI_FWSTATE_DEVICE_SCAN:
310 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 			prev_abs_reg_val = cur_abs_reg_val;
312 			break;
313 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
316 			else
317 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 			max_wait = MFI_RESET_WAIT_TIME;
319 			break;
320 		default:
321 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
322 			    fw_state);
323 			return (ENXIO);
324 		}
325 		for (i = 0; i < (max_wait * 10); i++) {
326 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 			if (fw_state == cur_state)
329 				DELAY(100000);
330 			else
331 				break;
332 		}
333 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 			/* Check the device scanning progress */
335 			if (prev_abs_reg_val != cur_abs_reg_val)
336 				continue;
337 		}
338 		if (fw_state == cur_state) {
339 			device_printf(sc->mfi_dev, "Firmware stuck in state "
340 			    "%#x\n", fw_state);
341 			return (ENXIO);
342 		}
343 	}
344 	return (0);
345 }
346 
347 static void
348 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
349 {
350 	bus_addr_t *addr;
351 
352 	addr = arg;
353 	*addr = segs[0].ds_addr;
354 }
355 
356 int
357 mfi_attach(struct mfi_softc *sc)
358 {
359 	uint32_t status;
360 	int error, commsz, framessz, sensesz;
361 	int frames, unit, max_fw_sge;
362 	uint32_t tb_mem_size = 0;
363 
364 	if (sc == NULL)
365 		return EINVAL;
366 
367 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
368 	    MEGASAS_VERSION);
369 
370 	lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
371 	lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
372 	TAILQ_INIT(&sc->mfi_ld_tqh);
373 	TAILQ_INIT(&sc->mfi_syspd_tqh);
374 	TAILQ_INIT(&sc->mfi_evt_queue);
375 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
376 	TAILQ_INIT(&sc->mfi_aen_pids);
377 	TAILQ_INIT(&sc->mfi_cam_ccbq);
378 
379 	mfi_initq_free(sc);
380 	mfi_initq_ready(sc);
381 	mfi_initq_busy(sc);
382 	mfi_initq_bio(sc);
383 
384 	sc->adpreset = 0;
385 	sc->last_seq_num = 0;
386 	sc->disableOnlineCtrlReset = 1;
387 	sc->issuepend_done = 1;
388 	sc->hw_crit_error = 0;
389 
390 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
391 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
392 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
393 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
394 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
395 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
396 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
397 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
398 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
399 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
400 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
401 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
402 		sc->mfi_tbolt = 1;
403 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
404 	} else {
405 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
406 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
407 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
408 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
409 	}
410 
411 
412 	/* Before we get too far, see if the firmware is working */
413 	if ((error = mfi_transition_firmware(sc)) != 0) {
414 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
415 		    "error %d\n", error);
416 		return (ENXIO);
417 	}
418 
419 	/* Start: LSIP200113393 */
420 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
421 				1, 0,			/* algnmnt, boundary */
422 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 				BUS_SPACE_MAXADDR,	/* highaddr */
424 				NULL, NULL,		/* filter, filterarg */
425 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
426 				1,			/* msegments */
427 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
428 				0,			/* flags */
429 				&sc->verbuf_h_dmat)) {
430 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
431 		return (ENOMEM);
432 	}
433 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
434 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
435 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
436 		return (ENOMEM);
437 	}
438 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
439 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
440 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
441 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
442 	/* End: LSIP200113393 */
443 
444 	/*
445 	 * Get information needed for sizing the contiguous memory for the
446 	 * frame pool.  Size down the sgl parameter since we know that
447 	 * we will never need more than what's required for MAXPHYS.
448 	 * It would be nice if these constants were available at runtime
449 	 * instead of compile time.
450 	 */
451 	status = sc->mfi_read_fw_status(sc);
452 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
453 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
454 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
455 
456 	/* ThunderBolt Support get the contiguous memory */
457 
458 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
459 		mfi_tbolt_init_globals(sc);
460 		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
461 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
462 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
463 
464 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
465 				1, 0,			/* algnmnt, boundary */
466 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
467 				BUS_SPACE_MAXADDR,	/* highaddr */
468 				NULL, NULL,		/* filter, filterarg */
469 				tb_mem_size,		/* maxsize */
470 				1,			/* msegments */
471 				tb_mem_size,		/* maxsegsize */
472 				0,			/* flags */
473 				&sc->mfi_tb_dmat)) {
474 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
475 			return (ENOMEM);
476 		}
477 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
478 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
479 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
480 			return (ENOMEM);
481 		}
482 		bzero(sc->request_message_pool, tb_mem_size);
483 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
484 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
485 
486 		/* For ThunderBolt memory init */
487 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
488 				0x100, 0,		/* alignmnt, boundary */
489 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 				BUS_SPACE_MAXADDR,	/* highaddr */
491 				NULL, NULL,		/* filter, filterarg */
492 				MFI_FRAME_SIZE,		/* maxsize */
493 				1,			/* msegments */
494 				MFI_FRAME_SIZE,		/* maxsegsize */
495 				0,			/* flags */
496 				&sc->mfi_tb_init_dmat)) {
497 		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
498 		return (ENOMEM);
499 		}
500 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
501 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
502 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
503 			return (ENOMEM);
504 		}
505 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
506 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
507 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
508 		    &sc->mfi_tb_init_busaddr, 0);
509 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
510 		    tb_mem_size)) {
511 			device_printf(sc->mfi_dev,
512 			    "Thunderbolt pool preparation error\n");
513 			return 0;
514 		}
515 
516 		/*
517 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
518 		  we are taking it diffrent from what we have allocated for Request
519 		  and reply descriptors to avoid confusion later
520 		*/
521 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
522 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
523 				1, 0,			/* algnmnt, boundary */
524 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
525 				BUS_SPACE_MAXADDR,	/* highaddr */
526 				NULL, NULL,		/* filter, filterarg */
527 				tb_mem_size,		/* maxsize */
528 				1,			/* msegments */
529 				tb_mem_size,		/* maxsegsize */
530 				0,			/* flags */
531 				&sc->mfi_tb_ioc_init_dmat)) {
532 			device_printf(sc->mfi_dev,
533 			    "Cannot allocate comms DMA tag\n");
534 			return (ENOMEM);
535 		}
536 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
537 		    (void **)&sc->mfi_tb_ioc_init_desc,
538 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
539 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
540 			return (ENOMEM);
541 		}
542 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
543 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
544 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
545 		    &sc->mfi_tb_ioc_init_busaddr, 0);
546 	}
547 	/*
548 	 * Create the dma tag for data buffers.  Used both for block I/O
549 	 * and for various internal data queries.
550 	 */
551 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
552 				1, 0,			/* algnmnt, boundary */
553 				BUS_SPACE_MAXADDR,	/* lowaddr */
554 				BUS_SPACE_MAXADDR,	/* highaddr */
555 				NULL, NULL,		/* filter, filterarg */
556 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
557 				sc->mfi_max_sge,	/* nsegments */
558 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
559 				BUS_DMA_ALLOCNOW,	/* flags */
560 				&sc->mfi_buffer_dmat)) {
561 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
562 		return (ENOMEM);
563 	}
564 
565 	/*
566 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
567 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
568 	 * entry, so the calculated size here will be will be 1 more than
569 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
570 	 */
571 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
572 	    sizeof(struct mfi_hwcomms);
573 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
574 				1, 0,			/* algnmnt, boundary */
575 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
576 				BUS_SPACE_MAXADDR,	/* highaddr */
577 				NULL, NULL,		/* filter, filterarg */
578 				commsz,			/* maxsize */
579 				1,			/* msegments */
580 				commsz,			/* maxsegsize */
581 				0,			/* flags */
582 				&sc->mfi_comms_dmat)) {
583 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
584 		return (ENOMEM);
585 	}
586 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
587 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
588 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
589 		return (ENOMEM);
590 	}
591 	bzero(sc->mfi_comms, commsz);
592 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
593 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
594 	/*
595 	 * Allocate DMA memory for the command frames.  Keep them in the
596 	 * lower 4GB for efficiency.  Calculate the size of the commands at
597 	 * the same time; each command is one 64 byte frame plus a set of
598          * additional frames for holding sg lists or other data.
599 	 * The assumption here is that the SG list will start at the second
600 	 * frame and not use the unused bytes in the first frame.  While this
601 	 * isn't technically correct, it simplifies the calculation and allows
602 	 * for command frames that might be larger than an mfi_io_frame.
603 	 */
604 	if (sizeof(bus_addr_t) == 8) {
605 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
606 		sc->mfi_flags |= MFI_FLAGS_SG64;
607 	} else {
608 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
609 	}
610 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
611 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
612 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
613 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
614 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
615 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
616 				64, 0,			/* algnmnt, boundary */
617 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
618 				BUS_SPACE_MAXADDR,	/* highaddr */
619 				NULL, NULL,		/* filter, filterarg */
620 				framessz,		/* maxsize */
621 				1,			/* nsegments */
622 				framessz,		/* maxsegsize */
623 				0,			/* flags */
624 				&sc->mfi_frames_dmat)) {
625 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
626 		return (ENOMEM);
627 	}
628 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
629 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
630 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
631 		return (ENOMEM);
632 	}
633 	bzero(sc->mfi_frames, framessz);
634 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
635 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
636 	/*
637 	 * Allocate DMA memory for the frame sense data.  Keep them in the
638 	 * lower 4GB for efficiency
639 	 */
640 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
641 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
642 				4, 0,			/* algnmnt, boundary */
643 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
644 				BUS_SPACE_MAXADDR,	/* highaddr */
645 				NULL, NULL,		/* filter, filterarg */
646 				sensesz,		/* maxsize */
647 				1,			/* nsegments */
648 				sensesz,		/* maxsegsize */
649 				0,			/* flags */
650 				&sc->mfi_sense_dmat)) {
651 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
652 		return (ENOMEM);
653 	}
654 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
655 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
656 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
657 		return (ENOMEM);
658 	}
659 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
660 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
661 	if ((error = mfi_alloc_commands(sc)) != 0)
662 		return (error);
663 
664 	/*
665 	 * Before moving the FW to operational state, check whether
666 	 * hostmemory is required by the FW or not
667 	 */
668 
669 	/* ThunderBolt MFI_IOC2 INIT */
670 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
671 		sc->mfi_disable_intr(sc);
672 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
673 			device_printf(sc->mfi_dev,
674 			    "TB Init has failed with error %d\n",error);
675 			return error;
676 		}
677 
678 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
679 			return error;
680 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
681 			mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
682 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
683 			return (EINVAL);
684 		}
685 		sc->mfi_enable_intr(sc);
686 		sc->map_id = 0;
687 	} else {
688 		if ((error = mfi_comms_init(sc)) != 0)
689 			return (error);
690 
691 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
692 			mfi_intr, sc, &sc->mfi_intr, NULL)) {
693 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
694 			return (EINVAL);
695 		}
696 		sc->mfi_enable_intr(sc);
697 	}
698 	if ((error = mfi_get_controller_info(sc)) != 0)
699 		return (error);
700 	sc->disableOnlineCtrlReset = 0;
701 
702 	/* Register a config hook to probe the bus for arrays */
703 	sc->mfi_ich.ich_func = mfi_startup;
704 	sc->mfi_ich.ich_arg = sc;
705 	sc->mfi_ich.ich_desc = "mfi";
706 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
707 		device_printf(sc->mfi_dev, "Cannot establish configuration "
708 		    "hook\n");
709 		return (EINVAL);
710 	}
711 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
712 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
713 		return (error);
714 	}
715 
716 	/*
717 	 * Register a shutdown handler.
718 	 */
719 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
720 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
721 		device_printf(sc->mfi_dev, "Warning: shutdown event "
722 		    "registration failed\n");
723 	}
724 
725 	/*
726 	 * Create the control device for doing management
727 	 */
728 	unit = device_get_unit(sc->mfi_dev);
729 	sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
730 	    0640, "mfi%d", unit);
731 	if (unit == 0)
732 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
733 	if (sc->mfi_cdev != NULL)
734 		sc->mfi_cdev->si_drv1 = sc;
735 	sysctl_ctx_init(&sc->mfi_sysctl_ctx);
736 	sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
737 	    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
738 	    device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
739 	if (sc->mfi_sysctl_tree == NULL) {
740 		device_printf(sc->mfi_dev, "can't add sysctl node\n");
741 		return (EINVAL);
742 	}
743 	SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
744 	    SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
745 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
746 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
747 	SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
748 	    SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
749 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
750 	    &sc->mfi_keep_deleted_volumes, 0,
751 	    "Don't detach the mfid device for a busy volume that is deleted");
752 
753 	device_add_child(sc->mfi_dev, "mfip", -1);
754 	bus_generic_attach(sc->mfi_dev);
755 
756 	/* Start the timeout watchdog */
757 	callout_init_mp(&sc->mfi_watchdog_callout);
758 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
759 	    mfi_timeout, sc);
760 
761 	return (0);
762 }
763 
764 static int
765 mfi_alloc_commands(struct mfi_softc *sc)
766 {
767 	struct mfi_command *cm;
768 	int i, ncmds;
769 
770 	/*
771 	 * XXX Should we allocate all the commands up front, or allocate on
772 	 * demand later like 'aac' does?
773 	 */
774 	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
775 	if (bootverbose)
776 		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
777 		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
778 
779 	sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
780 	    M_WAITOK | M_ZERO);
781 
782 	for (i = 0; i < ncmds; i++) {
783 		cm = &sc->mfi_commands[i];
784 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
785 		    sc->mfi_cmd_size * i);
786 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
787 		    sc->mfi_cmd_size * i;
788 		cm->cm_frame->header.context = i;
789 		cm->cm_sense = &sc->mfi_sense[i];
790 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
791 		cm->cm_sc = sc;
792 		cm->cm_index = i;
793 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
794 		    &cm->cm_dmamap) == 0) {
795 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
796 			mfi_release_command(cm);
797 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
798 		}
799 		else
800 			break;
801 		sc->mfi_total_cmds++;
802 	}
803 
804 	return (0);
805 }
806 
807 void
808 mfi_release_command(struct mfi_command *cm)
809 {
810 	struct mfi_frame_header *hdr;
811 	uint32_t *hdr_data;
812 
813 	mfi_lockassert(&cm->cm_sc->mfi_io_lock);
814 
815 	/*
816 	 * Zero out the important fields of the frame, but make sure the
817 	 * context field is preserved.  For efficiency, handle the fields
818 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
819 	 */
820 	hdr = &cm->cm_frame->header;
821 	if (cm->cm_data != NULL && hdr->sg_count) {
822 		cm->cm_sg->sg32[0].len = 0;
823 		cm->cm_sg->sg32[0].addr = 0;
824 	}
825 
826 	hdr_data = (uint32_t *)cm->cm_frame;
827 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
828 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
829 	hdr_data[4] = 0;	/* flags, timeout */
830 	hdr_data[5] = 0;	/* data_len */
831 
832 	cm->cm_extra_frames = 0;
833 	cm->cm_flags = 0;
834 	cm->cm_complete = NULL;
835 	cm->cm_private = NULL;
836 	cm->cm_data = NULL;
837 	cm->cm_sg = 0;
838 	cm->cm_total_frame_size = 0;
839 	cm->retry_for_fw_reset = 0;
840 
841 	mfi_enqueue_free(cm);
842 }
843 
844 static int
845 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
846     uint32_t opcode, void **bufp, size_t bufsize)
847 {
848 	struct mfi_command *cm;
849 	struct mfi_dcmd_frame *dcmd;
850 	void *buf = NULL;
851 	uint32_t context = 0;
852 
853 	mfi_lockassert(&sc->mfi_io_lock);
854 
855 	cm = mfi_dequeue_free(sc);
856 	if (cm == NULL)
857 		return (EBUSY);
858 
859 	/* Zero out the MFI frame */
860 	context = cm->cm_frame->header.context;
861 	bzero(cm->cm_frame, sizeof(union mfi_frame));
862 	cm->cm_frame->header.context = context;
863 
864 	if ((bufsize > 0) && (bufp != NULL)) {
865 		if (*bufp == NULL) {
866 			buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
867 			if (buf == NULL) {
868 				mfi_release_command(cm);
869 				return (ENOMEM);
870 			}
871 			*bufp = buf;
872 		} else {
873 			buf = *bufp;
874 		}
875 	}
876 
877 	dcmd =  &cm->cm_frame->dcmd;
878 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
879 	dcmd->header.cmd = MFI_CMD_DCMD;
880 	dcmd->header.timeout = 0;
881 	dcmd->header.flags = 0;
882 	dcmd->header.data_len = bufsize;
883 	dcmd->header.scsi_status = 0;
884 	dcmd->opcode = opcode;
885 	cm->cm_sg = &dcmd->sgl;
886 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
887 	cm->cm_flags = 0;
888 	cm->cm_data = buf;
889 	cm->cm_private = buf;
890 	cm->cm_len = bufsize;
891 
892 	*cmp = cm;
893 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
894 		*bufp = buf;
895 	return (0);
896 }
897 
898 static int
899 mfi_comms_init(struct mfi_softc *sc)
900 {
901 	struct mfi_command *cm;
902 	struct mfi_init_frame *init;
903 	struct mfi_init_qinfo *qinfo;
904 	int error;
905 	uint32_t context = 0;
906 
907 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
908 	if ((cm = mfi_dequeue_free(sc)) == NULL)
909 		return (EBUSY);
910 
911 	/* Zero out the MFI frame */
912 	context = cm->cm_frame->header.context;
913 	bzero(cm->cm_frame, sizeof(union mfi_frame));
914 	cm->cm_frame->header.context = context;
915 
916 	/*
917 	 * Abuse the SG list area of the frame to hold the init_qinfo
918 	 * object;
919 	 */
920 	init = &cm->cm_frame->init;
921 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
922 
923 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
924 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
925 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
926 	    offsetof(struct mfi_hwcomms, hw_reply_q);
927 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
928 	    offsetof(struct mfi_hwcomms, hw_pi);
929 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
930 	    offsetof(struct mfi_hwcomms, hw_ci);
931 
932 	init->header.cmd = MFI_CMD_INIT;
933 	init->header.data_len = sizeof(struct mfi_init_qinfo);
934 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
935 	cm->cm_data = NULL;
936 	cm->cm_flags = MFI_CMD_POLLED;
937 
938 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
939 		device_printf(sc->mfi_dev, "failed to send init command\n");
940 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
941 		return (error);
942 	}
943 	mfi_release_command(cm);
944 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
945 
946 	return (0);
947 }
948 
949 static int
950 mfi_get_controller_info(struct mfi_softc *sc)
951 {
952 	struct mfi_command *cm = NULL;
953 	struct mfi_ctrl_info *ci = NULL;
954 	uint32_t max_sectors_1, max_sectors_2;
955 	int error;
956 
957 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
958 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
959 	    (void **)&ci, sizeof(*ci));
960 	if (error)
961 		goto out;
962 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
963 
964 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
965 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
966 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
967 		    MFI_SECTOR_LEN;
968 		error = 0;
969 		goto out;
970 	}
971 
972 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
973 	    BUS_DMASYNC_POSTREAD);
974 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
975 
976 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
977 	max_sectors_2 = ci->max_request_size;
978 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
979 	sc->disableOnlineCtrlReset =
980 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
981 
982 out:
983 	if (ci)
984 		kfree(ci, M_MFIBUF);
985 	if (cm)
986 		mfi_release_command(cm);
987 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
988 	return (error);
989 }
990 
991 static int
992 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
993 {
994 	struct mfi_command *cm = NULL;
995 	int error;
996 
997 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
998 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
999 	    (void **)log_state, sizeof(**log_state));
1000 	if (error)
1001 		goto out;
1002 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1003 
1004 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1005 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1006 		goto out;
1007 	}
1008 
1009 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1010 	    BUS_DMASYNC_POSTREAD);
1011 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1012 
1013 out:
1014 	if (cm)
1015 		mfi_release_command(cm);
1016 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1017 
1018 	return (error);
1019 }
1020 
1021 int
1022 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1023 {
1024 	struct mfi_evt_log_state *log_state = NULL;
1025 	union mfi_evt class_locale;
1026 	int error = 0;
1027 	uint32_t seq;
1028 
1029 	class_locale.members.reserved = 0;
1030 	class_locale.members.locale = mfi_event_locale;
1031 	class_locale.members.evt_class  = mfi_event_class;
1032 
1033 	if (seq_start == 0) {
1034 		error = mfi_get_log_state(sc, &log_state);
1035 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1036 		if (error) {
1037 			if (log_state)
1038 				kfree(log_state, M_MFIBUF);
1039 			return (error);
1040 		}
1041 
1042 		/*
1043 		 * Walk through any events that fired since the last
1044 		 * shutdown.
1045 		 */
1046 		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1047 		    log_state->newest_seq_num);
1048 		seq = log_state->newest_seq_num;
1049 	} else
1050 		seq = seq_start;
1051 	mfi_aen_register(sc, seq, class_locale.word);
1052 	if (log_state != NULL)
1053 		kfree(log_state, M_MFIBUF);
1054 
1055 	return 0;
1056 }
1057 
1058 int
1059 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1060 {
1061 
1062 	mfi_lockassert(&sc->mfi_io_lock);
1063 	cm->cm_complete = NULL;
1064 
1065 
1066 	/*
1067 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1068 	 * and return 0 to it as status
1069 	 */
1070 	if (cm->cm_frame->dcmd.opcode == 0) {
1071 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1072 		cm->cm_error = 0;
1073 		return (cm->cm_error);
1074 	}
1075 	mfi_enqueue_ready(cm);
1076 	mfi_startio(sc);
1077 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1078 		lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1079 	return (cm->cm_error);
1080 }
1081 
1082 void
1083 mfi_free(struct mfi_softc *sc)
1084 {
1085 	struct mfi_command *cm;
1086 	int i;
1087 
1088 	callout_stop(&sc->mfi_watchdog_callout); /* XXX callout_drain() */
1089 
1090 	if (sc->mfi_cdev != NULL)
1091 		destroy_dev(sc->mfi_cdev);
1092 	dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1093 
1094 	if (sc->mfi_total_cmds != 0) {
1095 		for (i = 0; i < sc->mfi_total_cmds; i++) {
1096 			cm = &sc->mfi_commands[i];
1097 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1098 		}
1099 		kfree(sc->mfi_commands, M_MFIBUF);
1100 	}
1101 
1102 	if (sc->mfi_intr)
1103 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1104 	if (sc->mfi_irq != NULL)
1105 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1106 		    sc->mfi_irq);
1107 
1108 	if (sc->mfi_sense_busaddr != 0)
1109 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1110 	if (sc->mfi_sense != NULL)
1111 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1112 		    sc->mfi_sense_dmamap);
1113 	if (sc->mfi_sense_dmat != NULL)
1114 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1115 
1116 	if (sc->mfi_frames_busaddr != 0)
1117 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1118 	if (sc->mfi_frames != NULL)
1119 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1120 		    sc->mfi_frames_dmamap);
1121 	if (sc->mfi_frames_dmat != NULL)
1122 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1123 
1124 	if (sc->mfi_comms_busaddr != 0)
1125 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1126 	if (sc->mfi_comms != NULL)
1127 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1128 		    sc->mfi_comms_dmamap);
1129 	if (sc->mfi_comms_dmat != NULL)
1130 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1131 
1132 	/* ThunderBolt contiguous memory free here */
1133 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1134 		if (sc->mfi_tb_busaddr != 0)
1135 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1136 		if (sc->request_message_pool != NULL)
1137 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1138 			    sc->mfi_tb_dmamap);
1139 		if (sc->mfi_tb_dmat != NULL)
1140 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1141 
1142 		/* Version buffer memory free */
1143 		/* Start LSIP200113393 */
1144 		if (sc->verbuf_h_busaddr != 0)
1145 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1146 		if (sc->verbuf != NULL)
1147 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1148 			    sc->verbuf_h_dmamap);
1149 		if (sc->verbuf_h_dmat != NULL)
1150 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1151 
1152 		/* End LSIP200113393 */
1153 		/* ThunderBolt INIT packet memory Free */
1154 		if (sc->mfi_tb_init_busaddr != 0)
1155 			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1156 		if (sc->mfi_tb_init != NULL)
1157 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1158 			    sc->mfi_tb_init_dmamap);
1159 		if (sc->mfi_tb_init_dmat != NULL)
1160 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1161 
1162 		/* ThunderBolt IOC Init Desc memory free here */
1163 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1164 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1165 			    sc->mfi_tb_ioc_init_dmamap);
1166 		if (sc->mfi_tb_ioc_init_desc != NULL)
1167 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1168 			    sc->mfi_tb_ioc_init_desc,
1169 			    sc->mfi_tb_ioc_init_dmamap);
1170 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1171 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1172 		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1173 			if (sc->mfi_cmd_pool_tbolt != NULL) {
1174 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1175 					kfree(sc->mfi_cmd_pool_tbolt[i],
1176 					    M_MFIBUF);
1177 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1178 				}
1179 			}
1180 		}
1181 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1182 			kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1183 			sc->mfi_cmd_pool_tbolt = NULL;
1184 		}
1185 		if (sc->request_desc_pool != NULL) {
1186 			kfree(sc->request_desc_pool, M_MFIBUF);
1187 			sc->request_desc_pool = NULL;
1188 		}
1189 	}
1190 	if (sc->mfi_buffer_dmat != NULL)
1191 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1192 	if (sc->mfi_parent_dmat != NULL)
1193 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1194 
1195 	if (sc->mfi_sysctl_tree != NULL)
1196 		sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1197 
1198 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1199 	if (mtx_initialized(&sc->mfi_io_lock))
1200 #endif
1201 	{
1202 	lockuninit(&sc->mfi_io_lock);
1203 	lockuninit(&sc->mfi_config_lock);
1204 	}
1205 
1206 	return;
1207 }
1208 
1209 static void
1210 mfi_startup(void *arg)
1211 {
1212 	struct mfi_softc *sc;
1213 
1214 	sc = (struct mfi_softc *)arg;
1215 
1216 	config_intrhook_disestablish(&sc->mfi_ich);
1217 
1218 	sc->mfi_enable_intr(sc);
1219 	lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1220 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1221 	mfi_ldprobe(sc);
1222 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1223 		mfi_syspdprobe(sc);
1224 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1225 	lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1226 }
1227 
1228 static void
1229 mfi_intr(void *arg)
1230 {
1231 	struct mfi_softc *sc;
1232 	struct mfi_command *cm;
1233 	uint32_t pi, ci, context;
1234 
1235 	sc = (struct mfi_softc *)arg;
1236 
1237 	if (sc->mfi_check_clear_intr(sc))
1238 		return;
1239 
1240 restart:
1241 	pi = sc->mfi_comms->hw_pi;
1242 	ci = sc->mfi_comms->hw_ci;
1243 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1244 	while (ci != pi) {
1245 		context = sc->mfi_comms->hw_reply_q[ci];
1246 		if (context < sc->mfi_max_fw_cmds) {
1247 			cm = &sc->mfi_commands[context];
1248 			mfi_remove_busy(cm);
1249 			cm->cm_error = 0;
1250 			mfi_complete(sc, cm);
1251 		}
1252 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1253 			ci = 0;
1254 		}
1255 	}
1256 
1257 	sc->mfi_comms->hw_ci = ci;
1258 
1259 	/* Give defered I/O a chance to run */
1260 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1261 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1262 	mfi_startio(sc);
1263 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1264 
1265 	/*
1266 	 * Dummy read to flush the bus; this ensures that the indexes are up
1267 	 * to date.  Restart processing if more commands have come it.
1268 	 */
1269 	(void)sc->mfi_read_fw_status(sc);
1270 	if (pi != sc->mfi_comms->hw_pi)
1271 		goto restart;
1272 
1273 	return;
1274 }
1275 
1276 int
1277 mfi_shutdown(struct mfi_softc *sc)
1278 {
1279 	struct mfi_dcmd_frame *dcmd;
1280 	struct mfi_command *cm;
1281 	int error;
1282 
1283 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1284 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1285 	if (error) {
1286 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1287 		return (error);
1288 	}
1289 
1290 	if (sc->mfi_aen_cm != NULL)
1291 		mfi_abort(sc, sc->mfi_aen_cm);
1292 
1293 	if (sc->map_update_cmd != NULL)
1294 		mfi_abort(sc, sc->map_update_cmd);
1295 
1296 	dcmd = &cm->cm_frame->dcmd;
1297 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1298 	cm->cm_flags = MFI_CMD_POLLED;
1299 	cm->cm_data = NULL;
1300 
1301 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1302 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1303 	}
1304 
1305 	mfi_release_command(cm);
1306 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1307 	return (error);
1308 }
1309 
1310 static void
1311 mfi_syspdprobe(struct mfi_softc *sc)
1312 {
1313 	struct mfi_frame_header *hdr;
1314 	struct mfi_command *cm = NULL;
1315 	struct mfi_pd_list *pdlist = NULL;
1316 	struct mfi_system_pd *syspd, *tmp;
1317 	int error, i, found;
1318 
1319 	mfi_lockassert(&sc->mfi_config_lock);
1320 	mfi_lockassert(&sc->mfi_io_lock);
1321 	/* Add SYSTEM PD's */
1322 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1323 	    (void **)&pdlist, sizeof(*pdlist));
1324 	if (error) {
1325 		device_printf(sc->mfi_dev,
1326 		    "Error while forming SYSTEM PD list\n");
1327 		goto out;
1328 	}
1329 
1330 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1331 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1332 	cm->cm_frame->dcmd.mbox[1] = 0;
1333 	if (mfi_mapcmd(sc, cm) != 0) {
1334 		device_printf(sc->mfi_dev,
1335 		    "Failed to get syspd device listing\n");
1336 		goto out;
1337 	}
1338 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1339 	    BUS_DMASYNC_POSTREAD);
1340 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1341 	hdr = &cm->cm_frame->header;
1342 	if (hdr->cmd_status != MFI_STAT_OK) {
1343 		device_printf(sc->mfi_dev,
1344 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1345 		goto out;
1346 	}
1347 	/* Get each PD and add it to the system */
1348 	for (i = 0; i < pdlist->count; i++) {
1349 		if (pdlist->addr[i].device_id ==
1350 		    pdlist->addr[i].encl_device_id)
1351 			continue;
1352 		found = 0;
1353 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1354 			if (syspd->pd_id == pdlist->addr[i].device_id)
1355 				found = 1;
1356 		}
1357 		if (found == 0)
1358 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1359 	}
1360 	/* Delete SYSPD's whose state has been changed */
1361 	TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1362 		found = 0;
1363 		for (i = 0; i < pdlist->count; i++) {
1364 			if (syspd->pd_id == pdlist->addr[i].device_id)
1365 				found = 1;
1366 		}
1367 		if (found == 0) {
1368 			kprintf("DELETE\n");
1369 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1370 			get_mplock();
1371 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1372 			rel_mplock();
1373 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1374 		}
1375 	}
1376 out:
1377 	if (pdlist)
1378 		kfree(pdlist, M_MFIBUF);
1379 	if (cm)
1380 		mfi_release_command(cm);
1381 }
1382 
1383 static void
1384 mfi_ldprobe(struct mfi_softc *sc)
1385 {
1386 	struct mfi_frame_header *hdr;
1387 	struct mfi_command *cm = NULL;
1388 	struct mfi_ld_list *list = NULL;
1389 	struct mfi_disk *ld;
1390 	int error, i;
1391 
1392 	mfi_lockassert(&sc->mfi_config_lock);
1393 	mfi_lockassert(&sc->mfi_io_lock);
1394 
1395 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1396 	    (void **)&list, sizeof(*list));
1397 	if (error)
1398 		goto out;
1399 
1400 	cm->cm_flags = MFI_CMD_DATAIN;
1401 	if (mfi_wait_command(sc, cm) != 0) {
1402 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1403 		goto out;
1404 	}
1405 
1406 	hdr = &cm->cm_frame->header;
1407 	if (hdr->cmd_status != MFI_STAT_OK) {
1408 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1409 		    hdr->cmd_status);
1410 		goto out;
1411 	}
1412 
1413 	for (i = 0; i < list->ld_count; i++) {
1414 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1415 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1416 				goto skip_add;
1417 		}
1418 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1419 	skip_add:;
1420 	}
1421 out:
1422 	if (list)
1423 		kfree(list, M_MFIBUF);
1424 	if (cm)
1425 		mfi_release_command(cm);
1426 
1427 	return;
1428 }
1429 
1430 /*
1431  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1432  * the bits in 24-31 are all set, then it is the number of seconds since
1433  * boot.
1434  */
1435 static const char *
1436 format_timestamp(uint32_t timestamp)
1437 {
1438 	static char buffer[32];
1439 
1440 	if ((timestamp & 0xff000000) == 0xff000000)
1441 		ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1442 		    0x00ffffff);
1443 	else
1444 		ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1445 	return (buffer);
1446 }
1447 
1448 static const char *
1449 format_class(int8_t class)
1450 {
1451 	static char buffer[6];
1452 
1453 	switch (class) {
1454 	case MFI_EVT_CLASS_DEBUG:
1455 		return ("debug");
1456 	case MFI_EVT_CLASS_PROGRESS:
1457 		return ("progress");
1458 	case MFI_EVT_CLASS_INFO:
1459 		return ("info");
1460 	case MFI_EVT_CLASS_WARNING:
1461 		return ("WARN");
1462 	case MFI_EVT_CLASS_CRITICAL:
1463 		return ("CRIT");
1464 	case MFI_EVT_CLASS_FATAL:
1465 		return ("FATAL");
1466 	case MFI_EVT_CLASS_DEAD:
1467 		return ("DEAD");
1468 	default:
1469 		ksnprintf(buffer, sizeof(buffer), "%d", class);
1470 		return (buffer);
1471 	}
1472 }
1473 
1474 static void
1475 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1476 {
1477 	struct mfi_system_pd *syspd = NULL;
1478 
1479 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1480 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1481 	    format_class(detail->evt_class.members.evt_class),
1482 	    detail->description);
1483 
1484 	/* Don't act on old AEN's or while shutting down */
1485 	if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1486 		return;
1487 
1488 	switch (detail->arg_type) {
1489 	case MR_EVT_ARGS_NONE:
1490 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1491 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1492 			if (mfi_detect_jbod_change) {
1493 				/*
1494 				 * Probe for new SYSPD's and Delete
1495 				 * invalid SYSPD's
1496 				 */
1497 				lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1498 				lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1499 				mfi_syspdprobe(sc);
1500 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1501 				lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1502 			}
1503 		}
1504 		break;
1505 	case MR_EVT_ARGS_LD_STATE:
1506 		/*
1507 		 * During load time driver reads all the events starting
1508 		 * from the one that has been logged after shutdown. Avoid
1509 		 * these old events.
1510 		 */
1511 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1512 			/* Remove the LD */
1513 			struct mfi_disk *ld;
1514 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1515 				if (ld->ld_id ==
1516 				    detail->args.ld_state.ld.target_id)
1517 					break;
1518 			}
1519 			/*
1520 			Fix: for kernel panics when SSCD is removed
1521 			KASSERT(ld != NULL, ("volume dissappeared"));
1522 			*/
1523 			if (ld != NULL) {
1524 				get_mplock();
1525 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1526 				rel_mplock();
1527 			}
1528 		}
1529 		break;
1530 	case MR_EVT_ARGS_PD:
1531 		if (detail->code == MR_EVT_PD_REMOVED) {
1532 			if (mfi_detect_jbod_change) {
1533 				/*
1534 				 * If the removed device is a SYSPD then
1535 				 * delete it
1536 				 */
1537 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1538 				    pd_link) {
1539 					if (syspd->pd_id ==
1540 					    detail->args.pd.device_id) {
1541 						get_mplock();
1542 						device_delete_child(
1543 						    sc->mfi_dev,
1544 						    syspd->pd_dev);
1545 						rel_mplock();
1546 						break;
1547 					}
1548 				}
1549 			}
1550 		}
1551 		if (detail->code == MR_EVT_PD_INSERTED) {
1552 			if (mfi_detect_jbod_change) {
1553 				/* Probe for new SYSPD's */
1554 				lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1555 				lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1556 				mfi_syspdprobe(sc);
1557 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1558 				lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1559 			}
1560 		}
1561 		break;
1562 	}
1563 }
1564 
1565 static void
1566 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1567 {
1568 	struct mfi_evt_queue_elm *elm;
1569 
1570 	mfi_lockassert(&sc->mfi_io_lock);
1571 	elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1572 	if (elm == NULL)
1573 		return;
1574 	memcpy(&elm->detail, detail, sizeof(*detail));
1575 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1576 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1577 }
1578 
1579 static void
1580 mfi_handle_evt(void *context, int pending)
1581 {
1582 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1583 	struct mfi_softc *sc;
1584 	struct mfi_evt_queue_elm *elm;
1585 
1586 	sc = context;
1587 	TAILQ_INIT(&queue);
1588 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1589 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1590 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1591 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1592 		TAILQ_REMOVE(&queue, elm, link);
1593 		mfi_decode_evt(sc, &elm->detail);
1594 		kfree(elm, M_MFIBUF);
1595 	}
1596 }
1597 
1598 static int
1599 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1600 {
1601 	struct mfi_command *cm;
1602 	struct mfi_dcmd_frame *dcmd;
1603 	union mfi_evt current_aen, prior_aen;
1604 	struct mfi_evt_detail *ed = NULL;
1605 	int error = 0;
1606 
1607 	current_aen.word = locale;
1608 	if (sc->mfi_aen_cm != NULL) {
1609 		prior_aen.word =
1610 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1611 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1612 		    !((prior_aen.members.locale & current_aen.members.locale)
1613 		    ^current_aen.members.locale)) {
1614 			return (0);
1615 		} else {
1616 			prior_aen.members.locale |= current_aen.members.locale;
1617 			if (prior_aen.members.evt_class
1618 			    < current_aen.members.evt_class)
1619 				current_aen.members.evt_class =
1620 				    prior_aen.members.evt_class;
1621 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1622 			mfi_abort(sc, sc->mfi_aen_cm);
1623 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1624 		}
1625 	}
1626 
1627 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1628 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1629 	    (void **)&ed, sizeof(*ed));
1630 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1631 	if (error) {
1632 		goto out;
1633 	}
1634 
1635 	dcmd = &cm->cm_frame->dcmd;
1636 	((uint32_t *)&dcmd->mbox)[0] = seq;
1637 	((uint32_t *)&dcmd->mbox)[1] = locale;
1638 	cm->cm_flags = MFI_CMD_DATAIN;
1639 	cm->cm_complete = mfi_aen_complete;
1640 
1641 	sc->last_seq_num = seq;
1642 	sc->mfi_aen_cm = cm;
1643 
1644 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1645 	mfi_enqueue_ready(cm);
1646 	mfi_startio(sc);
1647 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1648 
1649 out:
1650 	return (error);
1651 }
1652 
1653 static void
1654 mfi_aen_complete(struct mfi_command *cm)
1655 {
1656 	struct mfi_frame_header *hdr;
1657 	struct mfi_softc *sc;
1658 	struct mfi_evt_detail *detail;
1659 	struct mfi_aen *mfi_aen_entry, *tmp;
1660 	struct proc *p;
1661 	int seq = 0, aborted = 0;
1662 
1663 	sc = cm->cm_sc;
1664 	mfi_lockassert(&sc->mfi_io_lock);
1665 
1666 	hdr = &cm->cm_frame->header;
1667 
1668 	if (sc->mfi_aen_cm == NULL)
1669 		return;
1670 
1671 	if (sc->mfi_aen_cm->cm_aen_abort ||
1672 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1673 		sc->mfi_aen_cm->cm_aen_abort = 0;
1674 		aborted = 1;
1675 	} else {
1676 		sc->mfi_aen_triggered = 1;
1677 		if (sc->mfi_poll_waiting) {
1678 			sc->mfi_poll_waiting = 0;
1679 			KNOTE(&sc->mfi_kq.ki_note, 0);
1680 		}
1681 		detail = cm->cm_data;
1682 		mfi_queue_evt(sc, detail);
1683 		seq = detail->seq + 1;
1684 		TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1685 		    aen_link, tmp) {
1686 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1687 				     aen_link);
1688 			p = mfi_aen_entry->p;
1689 			PHOLD(p);
1690 			ksignal(p, SIGIO);
1691 			PRELE(p);
1692 			kfree(mfi_aen_entry, M_MFIBUF);
1693 		}
1694 	}
1695 
1696 	kfree(cm->cm_data, M_MFIBUF);
1697 	sc->mfi_aen_cm = NULL;
1698 	wakeup(&sc->mfi_aen_cm);
1699 	mfi_release_command(cm);
1700 
1701 	/* set it up again so the driver can catch more events */
1702 	if (!aborted) {
1703 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1704 		mfi_aen_setup(sc, seq);
1705 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1706 	}
1707 }
1708 
1709 #define MAX_EVENTS 15
1710 
1711 static int
1712 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1713 {
1714 	struct mfi_command *cm;
1715 	struct mfi_dcmd_frame *dcmd;
1716 	struct mfi_evt_list *el;
1717 	union mfi_evt class_locale;
1718 	int error, i, seq, size;
1719 
1720 	class_locale.members.reserved = 0;
1721 	class_locale.members.locale = mfi_event_locale;
1722 	class_locale.members.evt_class  = mfi_event_class;
1723 
1724 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1725 		* (MAX_EVENTS - 1);
1726 	el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1727 	if (el == NULL)
1728 		return (ENOMEM);
1729 
1730 	for (seq = start_seq;;) {
1731 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1732 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1733 			kfree(el, M_MFIBUF);
1734 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1735 			return (EBUSY);
1736 		}
1737 
1738 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1739 
1740 		dcmd = &cm->cm_frame->dcmd;
1741 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1742 		dcmd->header.cmd = MFI_CMD_DCMD;
1743 		dcmd->header.timeout = 0;
1744 		dcmd->header.data_len = size;
1745 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1746 		((uint32_t *)&dcmd->mbox)[0] = seq;
1747 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1748 		cm->cm_sg = &dcmd->sgl;
1749 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1750 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1751 		cm->cm_data = el;
1752 		cm->cm_len = size;
1753 
1754 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1755 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1756 			device_printf(sc->mfi_dev,
1757 			    "Failed to get controller entries\n");
1758 			mfi_release_command(cm);
1759 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1760 			break;
1761 		}
1762 
1763 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1764 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1765 		    BUS_DMASYNC_POSTREAD);
1766 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1767 
1768 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1769 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1770 			mfi_release_command(cm);
1771 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1772 			break;
1773 		}
1774 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1775 			device_printf(sc->mfi_dev,
1776 			    "Error %d fetching controller entries\n",
1777 			    dcmd->header.cmd_status);
1778 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1779 			mfi_release_command(cm);
1780 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1781 			break;
1782 		}
1783 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1784 		mfi_release_command(cm);
1785 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1786 
1787 		for (i = 0; i < el->count; i++) {
1788 			/*
1789 			 * If this event is newer than 'stop_seq' then
1790 			 * break out of the loop.  Note that the log
1791 			 * is a circular buffer so we have to handle
1792 			 * the case that our stop point is earlier in
1793 			 * the buffer than our start point.
1794 			 */
1795 			if (el->event[i].seq >= stop_seq) {
1796 				if (start_seq <= stop_seq)
1797 					break;
1798 				else if (el->event[i].seq < start_seq)
1799 					break;
1800 			}
1801 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1802 			mfi_queue_evt(sc, &el->event[i]);
1803 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1804 		}
1805 		seq = el->event[el->count - 1].seq + 1;
1806 	}
1807 
1808 	kfree(el, M_MFIBUF);
1809 	return (0);
1810 }
1811 
1812 static int
1813 mfi_add_ld(struct mfi_softc *sc, int id)
1814 {
1815 	struct mfi_command *cm;
1816 	struct mfi_dcmd_frame *dcmd = NULL;
1817 	struct mfi_ld_info *ld_info = NULL;
1818 	int error;
1819 
1820 	mfi_lockassert(&sc->mfi_io_lock);
1821 
1822 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1823 	    (void **)&ld_info, sizeof(*ld_info));
1824 	if (error) {
1825 		device_printf(sc->mfi_dev,
1826 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1827 		if (ld_info)
1828 			kfree(ld_info, M_MFIBUF);
1829 		return (error);
1830 	}
1831 	cm->cm_flags = MFI_CMD_DATAIN;
1832 	dcmd = &cm->cm_frame->dcmd;
1833 	dcmd->mbox[0] = id;
1834 	if (mfi_wait_command(sc, cm) != 0) {
1835 		device_printf(sc->mfi_dev,
1836 		    "Failed to get logical drive: %d\n", id);
1837 		kfree(ld_info, M_MFIBUF);
1838 		return (0);
1839 	}
1840 	if (ld_info->ld_config.params.isSSCD != 1) {
1841 		mfi_add_ld_complete(cm);
1842 	} else {
1843 		mfi_release_command(cm);
1844 		if (ld_info)		/* SSCD drives ld_info free here */
1845 			kfree(ld_info, M_MFIBUF);
1846 	}
1847 	return (0);
1848 }
1849 
1850 static void
1851 mfi_add_ld_complete(struct mfi_command *cm)
1852 {
1853 	struct mfi_frame_header *hdr;
1854 	struct mfi_ld_info *ld_info;
1855 	struct mfi_softc *sc;
1856 	device_t child;
1857 
1858 	sc = cm->cm_sc;
1859 	hdr = &cm->cm_frame->header;
1860 	ld_info = cm->cm_private;
1861 
1862 	if (hdr->cmd_status != MFI_STAT_OK) {
1863 		kfree(ld_info, M_MFIBUF);
1864 		mfi_release_command(cm);
1865 		return;
1866 	}
1867 	mfi_release_command(cm);
1868 
1869 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1870 	get_mplock();
1871 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1872 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1873 		kfree(ld_info, M_MFIBUF);
1874 		rel_mplock();
1875 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1876 		return;
1877 	}
1878 
1879 	device_set_ivars(child, ld_info);
1880 	device_set_desc(child, "MFI Logical Disk");
1881 	bus_generic_attach(sc->mfi_dev);
1882 	rel_mplock();
1883 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1884 }
1885 
1886 static int
1887 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1888 {
1889 	struct mfi_command *cm;
1890 	struct mfi_dcmd_frame *dcmd = NULL;
1891 	struct mfi_pd_info *pd_info = NULL;
1892 	int error;
1893 
1894 	mfi_lockassert(&sc->mfi_io_lock);
1895 
1896 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1897 	    (void **)&pd_info, sizeof(*pd_info));
1898 	if (error) {
1899 		device_printf(sc->mfi_dev,
1900 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1901 		    error);
1902 		if (pd_info)
1903 			kfree(pd_info, M_MFIBUF);
1904 		return (error);
1905 	}
1906 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1907 	dcmd = &cm->cm_frame->dcmd;
1908 	dcmd->mbox[0] = id;
1909 	dcmd->header.scsi_status = 0;
1910 	dcmd->header.pad0 = 0;
1911 	if (mfi_mapcmd(sc, cm) != 0) {
1912 		device_printf(sc->mfi_dev,
1913 		    "Failed to get physical drive info %d\n", id);
1914 		kfree(pd_info, M_MFIBUF);
1915 		return (0);
1916 	}
1917 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1918 	    BUS_DMASYNC_POSTREAD);
1919 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1920 	mfi_add_sys_pd_complete(cm);
1921 	return (0);
1922 }
1923 
1924 static void
1925 mfi_add_sys_pd_complete(struct mfi_command *cm)
1926 {
1927 	struct mfi_frame_header *hdr;
1928 	struct mfi_pd_info *pd_info;
1929 	struct mfi_softc *sc;
1930 	device_t child;
1931 
1932 	sc = cm->cm_sc;
1933 	hdr = &cm->cm_frame->header;
1934 	pd_info = cm->cm_private;
1935 
1936 	if (hdr->cmd_status != MFI_STAT_OK) {
1937 		kfree(pd_info, M_MFIBUF);
1938 		mfi_release_command(cm);
1939 		return;
1940 	}
1941 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1942 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1943 		    pd_info->ref.v.device_id);
1944 		kfree(pd_info, M_MFIBUF);
1945 		mfi_release_command(cm);
1946 		return;
1947 	}
1948 	mfi_release_command(cm);
1949 
1950 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1951 	get_mplock();
1952 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1953 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1954 		kfree(pd_info, M_MFIBUF);
1955 		rel_mplock();
1956 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1957 		return;
1958 	}
1959 
1960 	device_set_ivars(child, pd_info);
1961 	device_set_desc(child, "MFI System PD");
1962 	bus_generic_attach(sc->mfi_dev);
1963 	rel_mplock();
1964 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1965 }
1966 
1967 static struct mfi_command *
1968 mfi_bio_command(struct mfi_softc *sc)
1969 {
1970 	struct bio *bio;
1971 	struct mfi_command *cm = NULL;
1972 	struct mfi_disk *mfid;
1973 
1974 	/* reserving two commands to avoid starvation for IOCTL */
1975 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1976 		return (NULL);
1977 	if ((bio = mfi_dequeue_bio(sc)) == NULL)
1978 		return (NULL);
1979 	mfid = bio->bio_driver_info;
1980 	if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1981 		cm = mfi_build_syspdio(sc, bio);
1982 	else
1983 		cm = mfi_build_ldio(sc, bio);
1984 	if (!cm)
1985 		mfi_enqueue_bio(sc, bio);
1986 	return cm;
1987 }
1988 
1989 static struct mfi_command *
1990 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1991 {
1992 	struct mfi_command *cm;
1993 	struct buf *bp;
1994 	struct mfi_system_pd *disk;
1995 	struct mfi_pass_frame *pass;
1996 	int flags = 0, blkcount = 0;
1997 	uint32_t context = 0;
1998 
1999 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2000 		return (NULL);
2001 
2002 	/* Zero out the MFI frame */
2003 	context = cm->cm_frame->header.context;
2004 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2005 	cm->cm_frame->header.context = context;
2006 	bp = bio->bio_buf;
2007 	pass = &cm->cm_frame->pass;
2008 	bzero(pass->cdb, 16);
2009 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2010 	switch (bp->b_cmd & 0x03) {
2011 	case BUF_CMD_READ:
2012 		pass->cdb[0] = READ_10;
2013 		flags = MFI_CMD_DATAIN;
2014 		break;
2015 	case BUF_CMD_WRITE:
2016 		pass->cdb[0] = WRITE_10;
2017 		flags = MFI_CMD_DATAOUT;
2018 		break;
2019 	default:
2020 		panic("Invalid bio command");
2021 	}
2022 
2023 	/* Cheat with the sector length to avoid a non-constant division */
2024 	blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2025 	disk = bio->bio_driver_info;
2026 	/* Fill the LBA and Transfer length in CDB */
2027 	pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2028 	pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2029 	pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2030 	pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2031 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2032 	pass->cdb[8] = (blkcount & 0x00ff);
2033 	pass->header.target_id = disk->pd_id;
2034 	pass->header.timeout = 0;
2035 	pass->header.flags = 0;
2036 	pass->header.scsi_status = 0;
2037 	pass->header.sense_len = MFI_SENSE_LEN;
2038 	pass->header.data_len = bp->b_bcount;
2039 	pass->header.cdb_len = 10;
2040 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2041 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2042 	cm->cm_complete = mfi_bio_complete;
2043 	cm->cm_private = bio;
2044 	cm->cm_data = bp->b_data;
2045 	cm->cm_len = bp->b_bcount;
2046 	cm->cm_sg = &pass->sgl;
2047 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2048 	cm->cm_flags = flags;
2049 	return (cm);
2050 }
2051 
2052 static struct mfi_command *
2053 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2054 {
2055 	struct mfi_io_frame *io;
2056 	struct buf *bp;
2057 	struct mfi_disk *disk;
2058 	struct mfi_command *cm;
2059 	int flags, blkcount;
2060 	uint32_t context = 0;
2061 
2062 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2063 	    return (NULL);
2064 
2065 	/* Zero out the MFI frame */
2066 	context = cm->cm_frame->header.context;
2067 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2068 	cm->cm_frame->header.context = context;
2069 	bp = bio->bio_buf;
2070 	io = &cm->cm_frame->io;
2071 	switch (bp->b_cmd & 0x03) {
2072 	case BUF_CMD_READ:
2073 		io->header.cmd = MFI_CMD_LD_READ;
2074 		flags = MFI_CMD_DATAIN;
2075 		break;
2076 	case BUF_CMD_WRITE:
2077 		io->header.cmd = MFI_CMD_LD_WRITE;
2078 		flags = MFI_CMD_DATAOUT;
2079 		break;
2080 	default:
2081 		panic("Invalid bio command");
2082 	}
2083 
2084 	/* Cheat with the sector length to avoid a non-constant division */
2085 	blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2086 	disk = bio->bio_driver_info;
2087 	io->header.target_id = disk->ld_id;
2088 	io->header.timeout = 0;
2089 	io->header.flags = 0;
2090 	io->header.scsi_status = 0;
2091 	io->header.sense_len = MFI_SENSE_LEN;
2092 	io->header.data_len = blkcount;
2093 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2094 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2095 	io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2096 	io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2097 	cm->cm_complete = mfi_bio_complete;
2098 	cm->cm_private = bio;
2099 	cm->cm_data = bp->b_data;
2100 	cm->cm_len = bp->b_bcount;
2101 	cm->cm_sg = &io->sgl;
2102 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2103 	cm->cm_flags = flags;
2104 	return (cm);
2105 }
2106 
2107 static void
2108 mfi_bio_complete(struct mfi_command *cm)
2109 {
2110 	struct bio *bio;
2111 	struct buf *bp;
2112 	struct mfi_frame_header *hdr;
2113 	struct mfi_softc *sc;
2114 
2115 	bio = cm->cm_private;
2116 	bp = bio->bio_buf;
2117 	hdr = &cm->cm_frame->header;
2118 	sc = cm->cm_sc;
2119 
2120 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2121 		bp->b_flags |= B_ERROR;
2122 		bp->b_error = EIO;
2123 		device_printf(sc->mfi_dev, "I/O error, status= %d "
2124 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2125 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2126 	} else if (cm->cm_error != 0) {
2127 		bp->b_flags |= B_ERROR;
2128 	}
2129 
2130 	mfi_release_command(cm);
2131 	mfi_disk_complete(bio);
2132 }
2133 
2134 void
2135 mfi_startio(struct mfi_softc *sc)
2136 {
2137 	struct mfi_command *cm;
2138 	struct ccb_hdr *ccbh;
2139 
2140 	for (;;) {
2141 		/* Don't bother if we're short on resources */
2142 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2143 			break;
2144 
2145 		/* Try a command that has already been prepared */
2146 		cm = mfi_dequeue_ready(sc);
2147 
2148 		if (cm == NULL) {
2149 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2150 				cm = sc->mfi_cam_start(ccbh);
2151 		}
2152 
2153 		/* Nope, so look for work on the bioq */
2154 		if (cm == NULL)
2155 			cm = mfi_bio_command(sc);
2156 
2157 		/* No work available, so exit */
2158 		if (cm == NULL)
2159 			break;
2160 
2161 		/* Send the command to the controller */
2162 		if (mfi_mapcmd(sc, cm) != 0) {
2163 			mfi_requeue_ready(cm);
2164 			break;
2165 		}
2166 	}
2167 }
2168 
2169 int
2170 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2171 {
2172 	int error, polled;
2173 
2174 	mfi_lockassert(&sc->mfi_io_lock);
2175 
2176 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2177 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2178 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2179 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2180 		if (error == EINPROGRESS) {
2181 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2182 			return (0);
2183 		}
2184 	} else {
2185 		if (sc->MFA_enabled)
2186 			error = mfi_tbolt_send_frame(sc, cm);
2187 		else
2188 			error = mfi_send_frame(sc, cm);
2189 	}
2190 
2191 	return (error);
2192 }
2193 
2194 static void
2195 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2196 {
2197 	struct mfi_frame_header *hdr;
2198 	struct mfi_command *cm;
2199 	union mfi_sgl *sgl;
2200 	struct mfi_softc *sc;
2201 	int i, j, first, dir;
2202 
2203 	cm = (struct mfi_command *)arg;
2204 	sc = cm->cm_sc;
2205 	hdr = &cm->cm_frame->header;
2206 	sgl = cm->cm_sg;
2207 
2208 	if (error) {
2209 		kprintf("error %d in callback\n", error);
2210 		cm->cm_error = error;
2211 		mfi_complete(sc, cm);
2212 		return;
2213 	}
2214 
2215 	/* Use IEEE sgl only for IO's on a SKINNY controller
2216 	 * For other commands on a SKINNY controller use either
2217 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2218 	 * Also calculate the total frame size based on the type
2219 	 * of SGL used.
2220 	 */
2221 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2222 	     (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2223 	     (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2224 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2225 		for (i = 0; i < nsegs; i++) {
2226 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2227 			sgl->sg_skinny[i].len = segs[i].ds_len;
2228 			sgl->sg_skinny[i].flag = 0;
2229 		}
2230 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2231 		hdr->sg_count = nsegs;
2232 	} else {
2233 		j = 0;
2234 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2235 			first = cm->cm_stp_len;
2236 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2237 				sgl->sg32[j].addr = segs[0].ds_addr;
2238 				sgl->sg32[j++].len = first;
2239 			} else {
2240 				sgl->sg64[j].addr = segs[0].ds_addr;
2241 				sgl->sg64[j++].len = first;
2242 			}
2243 		} else
2244 			first = 0;
2245 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2246 			for (i = 0; i < nsegs; i++) {
2247 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2248 				sgl->sg32[j++].len = segs[i].ds_len - first;
2249 				first = 0;
2250 			}
2251 		} else {
2252 			for (i = 0; i < nsegs; i++) {
2253 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2254 				sgl->sg64[j++].len = segs[i].ds_len - first;
2255 				first = 0;
2256 			}
2257 			hdr->flags |= MFI_FRAME_SGL64;
2258 		}
2259 		hdr->sg_count = j;
2260 	}
2261 
2262 	dir = 0;
2263 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2264 		dir |= BUS_DMASYNC_PREREAD;
2265 		hdr->flags |= MFI_FRAME_DIR_READ;
2266 	}
2267 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2268 		dir |= BUS_DMASYNC_PREWRITE;
2269 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2270 	}
2271 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2272 	cm->cm_flags |= MFI_CMD_MAPPED;
2273 
2274 	/*
2275 	 * Instead of calculating the total number of frames in the
2276 	 * compound frame, it's already assumed that there will be at
2277 	 * least 1 frame, so don't compensate for the modulo of the
2278 	 * following division.
2279 	 */
2280 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2281 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2282 
2283 	if (sc->MFA_enabled)
2284 		mfi_tbolt_send_frame(sc, cm);
2285 	else
2286 		mfi_send_frame(sc, cm);
2287 }
2288 
2289 static int
2290 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2291 {
2292 	struct mfi_frame_header *hdr;
2293 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2294 
2295 	hdr = &cm->cm_frame->header;
2296 
2297 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2298 		cm->cm_timestamp = time_uptime;
2299 		mfi_enqueue_busy(cm);
2300 	} else {
2301 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2302 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2303 	}
2304 
2305 	/*
2306 	 * The bus address of the command is aligned on a 64 byte boundary,
2307 	 * leaving the least 6 bits as zero.  For whatever reason, the
2308 	 * hardware wants the address shifted right by three, leaving just
2309 	 * 3 zero bits.  These three bits are then used as a prefetching
2310 	 * hint for the hardware to predict how many frames need to be
2311 	 * fetched across the bus.  If a command has more than 8 frames
2312 	 * then the 3 bits are set to 0x7 and the firmware uses other
2313 	 * information in the command to determine the total amount to fetch.
2314 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2315 	 * is enough for both 32bit and 64bit systems.
2316 	 */
2317 	if (cm->cm_extra_frames > 7)
2318 		cm->cm_extra_frames = 7;
2319 
2320 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2321 
2322 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2323 		return (0);
2324 
2325 	/* This is a polled command, so busy-wait for it to complete. */
2326 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2327 		DELAY(1000);
2328 		tm -= 1;
2329 		if (tm <= 0)
2330 			break;
2331 	}
2332 
2333 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2334 		device_printf(sc->mfi_dev, "Frame %p timed out "
2335 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2336 		return (ETIMEDOUT);
2337 	}
2338 
2339 	return (0);
2340 }
2341 
2342 void
2343 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2344 {
2345 	int dir;
2346 
2347 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2348 		dir = 0;
2349 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2350 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2351 			dir |= BUS_DMASYNC_POSTREAD;
2352 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2353 			dir |= BUS_DMASYNC_POSTWRITE;
2354 
2355 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2356 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2357 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2358 	}
2359 
2360 	cm->cm_flags |= MFI_CMD_COMPLETED;
2361 
2362 	if (cm->cm_complete != NULL)
2363 		cm->cm_complete(cm);
2364 	else
2365 		wakeup(cm);
2366 }
2367 
2368 static int
2369 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2370 {
2371 	struct mfi_command *cm;
2372 	struct mfi_abort_frame *abort;
2373 	int i = 0;
2374 	uint32_t context = 0;
2375 
2376 	mfi_lockassert(&sc->mfi_io_lock);
2377 
2378 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2379 		return (EBUSY);
2380 	}
2381 
2382 	/* Zero out the MFI frame */
2383 	context = cm->cm_frame->header.context;
2384 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2385 	cm->cm_frame->header.context = context;
2386 
2387 	abort = &cm->cm_frame->abort;
2388 	abort->header.cmd = MFI_CMD_ABORT;
2389 	abort->header.flags = 0;
2390 	abort->header.scsi_status = 0;
2391 	abort->abort_context = cm_abort->cm_frame->header.context;
2392 	abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2393 	abort->abort_mfi_addr_hi =
2394 	    (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2395 	cm->cm_data = NULL;
2396 	cm->cm_flags = MFI_CMD_POLLED;
2397 
2398 	if (sc->mfi_aen_cm)
2399 		sc->mfi_aen_cm->cm_aen_abort = 1;
2400 	mfi_mapcmd(sc, cm);
2401 	mfi_release_command(cm);
2402 
2403 	while (i < 5 && sc->mfi_aen_cm != NULL) {
2404 		lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2405 		    5 * hz);
2406 		i++;
2407 	}
2408 
2409 	return (0);
2410 }
2411 
2412 int
2413 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2414     int len)
2415 {
2416 	struct mfi_command *cm;
2417 	struct mfi_io_frame *io;
2418 	int error;
2419 	uint32_t context = 0;
2420 
2421 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2422 		return (EBUSY);
2423 
2424 	/* Zero out the MFI frame */
2425 	context = cm->cm_frame->header.context;
2426 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2427 	cm->cm_frame->header.context = context;
2428 
2429 	io = &cm->cm_frame->io;
2430 	io->header.cmd = MFI_CMD_LD_WRITE;
2431 	io->header.target_id = id;
2432 	io->header.timeout = 0;
2433 	io->header.flags = 0;
2434 	io->header.scsi_status = 0;
2435 	io->header.sense_len = MFI_SENSE_LEN;
2436 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2437 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2438 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2439 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2440 	io->lba_lo = lba & 0xffffffff;
2441 	cm->cm_data = virt;
2442 	cm->cm_len = len;
2443 	cm->cm_sg = &io->sgl;
2444 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2445 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2446 
2447 	error = mfi_mapcmd(sc, cm);
2448 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2449 	    BUS_DMASYNC_POSTWRITE);
2450 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2451 	mfi_release_command(cm);
2452 
2453 	return (error);
2454 }
2455 
2456 int
2457 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2458     int len)
2459 {
2460 	struct mfi_command *cm;
2461 	struct mfi_pass_frame *pass;
2462 	int error;
2463 	int blkcount = 0;
2464 
2465 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2466 		return (EBUSY);
2467 
2468 	pass = &cm->cm_frame->pass;
2469 	bzero(pass->cdb, 16);
2470 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2471 	pass->cdb[0] = WRITE_10;
2472 	pass->cdb[2] = (lba & 0xff000000) >> 24;
2473 	pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2474 	pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2475 	pass->cdb[5] = (lba & 0x000000ff);
2476 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2477 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2478 	pass->cdb[8] = (blkcount & 0x00ff);
2479 	pass->header.target_id = id;
2480 	pass->header.timeout = 0;
2481 	pass->header.flags = 0;
2482 	pass->header.scsi_status = 0;
2483 	pass->header.sense_len = MFI_SENSE_LEN;
2484 	pass->header.data_len = len;
2485 	pass->header.cdb_len = 10;
2486 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2487 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2488 	cm->cm_data = virt;
2489 	cm->cm_len = len;
2490 	cm->cm_sg = &pass->sgl;
2491 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2492 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2493 
2494 	error = mfi_mapcmd(sc, cm);
2495 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2496 	    BUS_DMASYNC_POSTWRITE);
2497 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2498 	mfi_release_command(cm);
2499 
2500 	return (error);
2501 }
2502 
2503 static int
2504 mfi_open(struct dev_open_args *ap)
2505 {
2506 	cdev_t dev = ap->a_head.a_dev;
2507 	struct mfi_softc *sc;
2508 	int error;
2509 
2510 	sc = dev->si_drv1;
2511 
2512 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2513 	if (sc->mfi_detaching)
2514 		error = ENXIO;
2515 	else {
2516 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2517 		error = 0;
2518 	}
2519 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2520 
2521 	return (error);
2522 }
2523 
2524 static int
2525 mfi_close(struct dev_close_args *ap)
2526 {
2527 	cdev_t dev = ap->a_head.a_dev;
2528 	struct mfi_softc *sc;
2529 	struct mfi_aen *mfi_aen_entry, *tmp;
2530 
2531 	sc = dev->si_drv1;
2532 
2533 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2534 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2535 
2536 	TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2537 		if (mfi_aen_entry->p == curproc) {
2538 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2539 			    aen_link);
2540 			kfree(mfi_aen_entry, M_MFIBUF);
2541 		}
2542 	}
2543 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2544 	return (0);
2545 }
2546 
2547 static int
2548 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2549 {
2550 
2551 	switch (opcode) {
2552 	case MFI_DCMD_LD_DELETE:
2553 	case MFI_DCMD_CFG_ADD:
2554 	case MFI_DCMD_CFG_CLEAR:
2555 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2556 		lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2557 		return (1);
2558 	default:
2559 		return (0);
2560 	}
2561 }
2562 
2563 static void
2564 mfi_config_unlock(struct mfi_softc *sc, int locked)
2565 {
2566 
2567 	if (locked)
2568 		lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2569 }
2570 
2571 /*
2572  * Perform pre-issue checks on commands from userland and possibly veto
2573  * them.
2574  */
2575 static int
2576 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2577 {
2578 	struct mfi_disk *ld, *ld2;
2579 	int error;
2580 	struct mfi_system_pd *syspd = NULL;
2581 	uint16_t syspd_id;
2582 	uint16_t *mbox;
2583 
2584 	mfi_lockassert(&sc->mfi_io_lock);
2585 	error = 0;
2586 	switch (cm->cm_frame->dcmd.opcode) {
2587 	case MFI_DCMD_LD_DELETE:
2588 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2589 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2590 				break;
2591 		}
2592 		if (ld == NULL)
2593 			error = ENOENT;
2594 		else
2595 			error = mfi_disk_disable(ld);
2596 		break;
2597 	case MFI_DCMD_CFG_CLEAR:
2598 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2599 			error = mfi_disk_disable(ld);
2600 			if (error)
2601 				break;
2602 		}
2603 		if (error) {
2604 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2605 				if (ld2 == ld)
2606 					break;
2607 				mfi_disk_enable(ld2);
2608 			}
2609 		}
2610 		break;
2611 	case MFI_DCMD_PD_STATE_SET:
2612 		mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2613 		syspd_id = mbox[0];
2614 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2615 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2616 				if (syspd->pd_id == syspd_id)
2617 					break;
2618 			}
2619 		} else {
2620 			break;
2621 		}
2622 		if (syspd)
2623 			error = mfi_syspd_disable(syspd);
2624 		break;
2625 	default:
2626 		break;
2627 	}
2628 	return (error);
2629 }
2630 
2631 /* Perform post-issue checks on commands from userland. */
2632 static void
2633 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2634 {
2635 	struct mfi_disk *ld, *ldn;
2636 	struct mfi_system_pd *syspd = NULL;
2637 	uint16_t syspd_id;
2638 	uint16_t *mbox;
2639 
2640 	switch (cm->cm_frame->dcmd.opcode) {
2641 	case MFI_DCMD_LD_DELETE:
2642 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2643 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2644 				break;
2645 		}
2646 		KASSERT(ld != NULL, ("volume dissappeared"));
2647 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2648 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2649 			get_mplock();
2650 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2651 			rel_mplock();
2652 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2653 		} else
2654 			mfi_disk_enable(ld);
2655 		break;
2656 	case MFI_DCMD_CFG_CLEAR:
2657 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2658 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2659 			get_mplock();
2660 			TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2661 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2662 			}
2663 			rel_mplock();
2664 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2665 		} else {
2666 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2667 				mfi_disk_enable(ld);
2668 		}
2669 		break;
2670 	case MFI_DCMD_CFG_ADD:
2671 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2672 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK)
2673 			mfi_ldprobe(sc);
2674 		break;
2675 	case MFI_DCMD_PD_STATE_SET:
2676 		mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2677 		syspd_id = mbox[0];
2678 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2679 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2680 				if (syspd->pd_id == syspd_id)
2681 					break;
2682 			}
2683 		} else {
2684 			break;
2685 		}
2686 		/* If the transition fails then enable the syspd again */
2687 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2688 			mfi_syspd_enable(syspd);
2689 		break;
2690 	}
2691 }
2692 
2693 static int
2694 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2695 {
2696 	struct mfi_config_data *conf_data = cm->cm_data;
2697 	struct mfi_command *ld_cm = NULL;
2698 	struct mfi_ld_info *ld_info = NULL;
2699 	int error = 0;
2700 
2701 	if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2702 	    (conf_data->ld[0].params.isSSCD == 1)) {
2703 		error = 1;
2704 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2705 		error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2706 		    (void **)&ld_info, sizeof(*ld_info));
2707 		if (error) {
2708 			device_printf(sc->mfi_dev, "Failed to allocate"
2709 			    "MFI_DCMD_LD_GET_INFO %d", error);
2710 			if (ld_info)
2711 				kfree(ld_info, M_MFIBUF);
2712 			return 0;
2713 		}
2714 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2715 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2716 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2717 		if (mfi_wait_command(sc, ld_cm) != 0) {
2718 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2719 			mfi_release_command(ld_cm);
2720 			kfree(ld_info, M_MFIBUF);
2721 			return 0;
2722 		}
2723 
2724 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2725 			kfree(ld_info, M_MFIBUF);
2726 			mfi_release_command(ld_cm);
2727 			return 0;
2728 		} else {
2729 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2730 		}
2731 
2732 		if (ld_info->ld_config.params.isSSCD == 1)
2733 			error = 1;
2734 
2735 		mfi_release_command(ld_cm);
2736 		kfree(ld_info, M_MFIBUF);
2737 	}
2738 	return error;
2739 }
2740 
2741 static int
2742 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2743 {
2744 	uint8_t i;
2745 	struct mfi_ioc_packet *ioc;
2746 	ioc = (struct mfi_ioc_packet *)arg;
2747 	int sge_size, error;
2748 	struct megasas_sge *kern_sge;
2749 
2750 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2751 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2752 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2753 
2754 	if (sizeof(bus_addr_t) == 8) {
2755 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2756 		cm->cm_extra_frames = 2;
2757 		sge_size = sizeof(struct mfi_sg64);
2758 	} else {
2759 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2760 		sge_size = sizeof(struct mfi_sg32);
2761 	}
2762 
2763 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2764 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2765 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2766 			1, 0,			/* algnmnt, boundary */
2767 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2768 			BUS_SPACE_MAXADDR,	/* highaddr */
2769 			NULL, NULL,		/* filter, filterarg */
2770 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2771 			2,			/* nsegments */
2772 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2773 			BUS_DMA_ALLOCNOW,	/* flags */
2774 			&sc->mfi_kbuff_arr_dmat[i])) {
2775 			device_printf(sc->mfi_dev,
2776 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2777 			return (ENOMEM);
2778 		}
2779 
2780 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2781 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2782 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2783 			device_printf(sc->mfi_dev,
2784 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2785 			return (ENOMEM);
2786 		}
2787 
2788 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2789 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2790 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2791 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2792 
2793 		if (!sc->kbuff_arr[i]) {
2794 			device_printf(sc->mfi_dev,
2795 			    "Could not allocate memory for kbuff_arr info\n");
2796 			return -1;
2797 		}
2798 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2799 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2800 
2801 		if (sizeof(bus_addr_t) == 8) {
2802 			cm->cm_frame->stp.sgl.sg64[i].addr =
2803 			    kern_sge[i].phys_addr;
2804 			cm->cm_frame->stp.sgl.sg64[i].len =
2805 			    ioc->mfi_sgl[i].iov_len;
2806 		} else {
2807 			cm->cm_frame->stp.sgl.sg32[i].addr =
2808 			    kern_sge[i].phys_addr;
2809 			cm->cm_frame->stp.sgl.sg32[i].len =
2810 			    ioc->mfi_sgl[i].iov_len;
2811 		}
2812 
2813 		error = copyin(ioc->mfi_sgl[i].iov_base,
2814 		    sc->kbuff_arr[i],
2815 		    ioc->mfi_sgl[i].iov_len);
2816 		if (error != 0) {
2817 			device_printf(sc->mfi_dev, "Copy in failed\n");
2818 			return error;
2819 		}
2820 	}
2821 
2822 	cm->cm_flags |=MFI_CMD_MAPPED;
2823 	return 0;
2824 }
2825 
2826 static int
2827 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2828 {
2829 	struct mfi_command *cm;
2830 	struct mfi_dcmd_frame *dcmd;
2831 	void *ioc_buf = NULL;
2832 	uint32_t context;
2833 	int error = 0, locked;
2834 
2835 
2836 	if (ioc->buf_size > 0) {
2837 		ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2838 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2839 		if (error) {
2840 			device_printf(sc->mfi_dev, "failed to copyin\n");
2841 			kfree(ioc_buf, M_MFIBUF);
2842 			return (error);
2843 		}
2844 	}
2845 
2846 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2847 
2848 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2849 	while ((cm = mfi_dequeue_free(sc)) == NULL)
2850 		lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2851 
2852 	/* Save context for later */
2853 	context = cm->cm_frame->header.context;
2854 
2855 	dcmd = &cm->cm_frame->dcmd;
2856 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2857 
2858 	cm->cm_sg = &dcmd->sgl;
2859 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2860 	cm->cm_data = ioc_buf;
2861 	cm->cm_len = ioc->buf_size;
2862 
2863 	/* restore context */
2864 	cm->cm_frame->header.context = context;
2865 
2866 	/* Cheat since we don't know if we're writing or reading */
2867 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2868 
2869 	error = mfi_check_command_pre(sc, cm);
2870 	if (error)
2871 		goto out;
2872 
2873 	error = mfi_wait_command(sc, cm);
2874 	if (error) {
2875 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2876 		goto out;
2877 	}
2878 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2879 	mfi_check_command_post(sc, cm);
2880 out:
2881 	mfi_release_command(cm);
2882 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2883 	mfi_config_unlock(sc, locked);
2884 	if (ioc->buf_size > 0)
2885 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2886 	if (ioc_buf)
2887 		kfree(ioc_buf, M_MFIBUF);
2888 	return (error);
2889 }
2890 
2891 #define	PTRIN(p)		((void *)(uintptr_t)(p))
2892 
2893 static int
2894 mfi_ioctl(struct dev_ioctl_args *ap)
2895 {
2896 	cdev_t dev = ap->a_head.a_dev;
2897 	u_long cmd = ap->a_cmd;
2898 	int flag = ap->a_fflag;
2899 	caddr_t arg = ap->a_data;
2900 	struct mfi_softc *sc;
2901 	union mfi_statrequest *ms;
2902 	struct mfi_ioc_packet *ioc;
2903 	struct mfi_ioc_aen *aen;
2904 	struct mfi_command *cm = NULL;
2905 	uint32_t context;
2906 	union mfi_sense_ptr sense_ptr;
2907 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2908 	size_t len;
2909 	int i, res;
2910 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2911 	int error, locked;
2912 
2913 	sc = dev->si_drv1;
2914 	error = 0;
2915 
2916 	if (sc->adpreset)
2917 		return EBUSY;
2918 
2919 	if (sc->hw_crit_error)
2920 		return EBUSY;
2921 
2922 	if (sc->issuepend_done == 0)
2923 		return EBUSY;
2924 
2925 	switch (cmd) {
2926 	case MFIIO_STATS:
2927 		ms = (union mfi_statrequest *)arg;
2928 		switch (ms->ms_item) {
2929 		case MFIQ_FREE:
2930 		case MFIQ_BIO:
2931 		case MFIQ_READY:
2932 		case MFIQ_BUSY:
2933 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2934 			    sizeof(struct mfi_qstat));
2935 			break;
2936 		default:
2937 			error = ENOIOCTL;
2938 			break;
2939 		}
2940 		break;
2941 	case MFIIO_QUERY_DISK:
2942 	{
2943 		struct mfi_query_disk *qd;
2944 		struct mfi_disk *ld;
2945 
2946 		qd = (struct mfi_query_disk *)arg;
2947 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2948 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2949 			if (ld->ld_id == qd->array_id)
2950 				break;
2951 		}
2952 		if (ld == NULL) {
2953 			qd->present = 0;
2954 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2955 			return (0);
2956 		}
2957 		qd->present = 1;
2958 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2959 			qd->open = 1;
2960 		bzero(qd->devname, SPECNAMELEN + 1);
2961 		ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2962 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2963 		break;
2964 	}
2965 	case MFI_CMD:
2966 		{
2967 		devclass_t devclass;
2968 		ioc = (struct mfi_ioc_packet *)arg;
2969 		int adapter;
2970 
2971 		adapter = ioc->mfi_adapter_no;
2972 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2973 			devclass = devclass_find("mfi");
2974 			sc = devclass_get_softc(devclass, adapter);
2975 		}
2976 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2977 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2978 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2979 			return (EBUSY);
2980 		}
2981 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2982 		locked = 0;
2983 
2984 		/*
2985 		 * save off original context since copying from user
2986 		 * will clobber some data
2987 		 */
2988 		context = cm->cm_frame->header.context;
2989 		cm->cm_frame->header.context = cm->cm_index;
2990 
2991 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2992 		    2 * MEGAMFI_FRAME_SIZE);
2993 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2994 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2995 		cm->cm_frame->header.scsi_status = 0;
2996 		cm->cm_frame->header.pad0 = 0;
2997 		if (ioc->mfi_sge_count) {
2998 			cm->cm_sg =
2999 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3000 		}
3001 		cm->cm_flags = 0;
3002 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3003 			cm->cm_flags |= MFI_CMD_DATAIN;
3004 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3005 			cm->cm_flags |= MFI_CMD_DATAOUT;
3006 		/* Legacy app shim */
3007 		if (cm->cm_flags == 0)
3008 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3009 		cm->cm_len = cm->cm_frame->header.data_len;
3010 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3011 			cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3012 			cm->cm_len += cm->cm_stp_len;
3013 		}
3014 		if (cm->cm_len &&
3015 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3016 			cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3017 			    M_WAITOK | M_ZERO);
3018 		} else {
3019 			cm->cm_data = 0;
3020 		}
3021 
3022 		/* restore header context */
3023 		cm->cm_frame->header.context = context;
3024 
3025 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3026 			res = mfi_stp_cmd(sc, cm, arg);
3027 			if (res != 0)
3028 				goto out;
3029 		} else {
3030 			temp = data;
3031 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3032 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3033 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3034 					addr = ioc->mfi_sgl[i].iov_base;
3035 					len = ioc->mfi_sgl[i].iov_len;
3036 					error = copyin(addr, temp, len);
3037 					if (error != 0) {
3038 						device_printf(sc->mfi_dev,
3039 						    "Copy in failed\n");
3040 						goto out;
3041 					}
3042 					temp = &temp[len];
3043 				}
3044 			}
3045 		}
3046 
3047 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3048 			locked = mfi_config_lock(sc,
3049 			     cm->cm_frame->dcmd.opcode);
3050 
3051 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3052 			cm->cm_frame->pass.sense_addr_lo =
3053 			    (uint32_t)cm->cm_sense_busaddr;
3054 			cm->cm_frame->pass.sense_addr_hi =
3055 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3056 		}
3057 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3058 		skip_pre_post = mfi_check_for_sscd(sc, cm);
3059 		if (!skip_pre_post) {
3060 			error = mfi_check_command_pre(sc, cm);
3061 			if (error) {
3062 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3063 				goto out;
3064 			}
3065 		}
3066 
3067 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3068 			device_printf(sc->mfi_dev,
3069 			    "Controller polled failed\n");
3070 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3071 			goto out;
3072 		}
3073 
3074 		if (!skip_pre_post)
3075 			mfi_check_command_post(sc, cm);
3076 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3077 
3078 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3079 			temp = data;
3080 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3081 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3082 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3083 					addr = ioc->mfi_sgl[i].iov_base;
3084 					len = ioc->mfi_sgl[i].iov_len;
3085 					error = copyout(temp, addr, len);
3086 					if (error != 0) {
3087 						device_printf(sc->mfi_dev,
3088 						    "Copy out failed\n");
3089 						goto out;
3090 					}
3091 					temp = &temp[len];
3092 				}
3093 			}
3094 		}
3095 
3096 		if (ioc->mfi_sense_len) {
3097 			/* get user-space sense ptr then copy out sense */
3098 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3099 			    &sense_ptr.sense_ptr_data[0],
3100 			    sizeof(sense_ptr.sense_ptr_data));
3101 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3102 			    ioc->mfi_sense_len);
3103 			if (error != 0) {
3104 				device_printf(sc->mfi_dev,
3105 				    "Copy out failed\n");
3106 				goto out;
3107 			}
3108 		}
3109 
3110 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3111 out:
3112 		mfi_config_unlock(sc, locked);
3113 		if (data)
3114 			kfree(data, M_MFIBUF);
3115 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3116 			for (i = 0; i < 2; i++) {
3117 				if (sc->kbuff_arr[i]) {
3118 					if (sc->mfi_kbuff_arr_busaddr != 0)
3119 						bus_dmamap_unload(
3120 						    sc->mfi_kbuff_arr_dmat[i],
3121 						    sc->mfi_kbuff_arr_dmamap[i]
3122 						    );
3123 					if (sc->kbuff_arr[i] != NULL)
3124 						bus_dmamem_free(
3125 						    sc->mfi_kbuff_arr_dmat[i],
3126 						    sc->kbuff_arr[i],
3127 						    sc->mfi_kbuff_arr_dmamap[i]
3128 						    );
3129 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3130 						bus_dma_tag_destroy(
3131 						    sc->mfi_kbuff_arr_dmat[i]);
3132 				}
3133 			}
3134 		}
3135 		if (cm) {
3136 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3137 			mfi_release_command(cm);
3138 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3139 		}
3140 
3141 		break;
3142 		}
3143 	case MFI_SET_AEN:
3144 		aen = (struct mfi_ioc_aen *)arg;
3145 		error = mfi_aen_register(sc, aen->aen_seq_num,
3146 		    aen->aen_class_locale);
3147 
3148 		break;
3149 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3150 		{
3151 			devclass_t devclass;
3152 			struct mfi_linux_ioc_packet l_ioc;
3153 			int adapter;
3154 
3155 			devclass = devclass_find("mfi");
3156 			if (devclass == NULL)
3157 				return (ENOENT);
3158 
3159 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3160 			if (error)
3161 				return (error);
3162 			adapter = l_ioc.lioc_adapter_no;
3163 			sc = devclass_get_softc(devclass, adapter);
3164 			if (sc == NULL)
3165 				return (ENOENT);
3166 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3167 			    cmd, arg, flag));
3168 			break;
3169 		}
3170 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3171 		{
3172 			devclass_t devclass;
3173 			struct mfi_linux_ioc_aen l_aen;
3174 			int adapter;
3175 
3176 			devclass = devclass_find("mfi");
3177 			if (devclass == NULL)
3178 				return (ENOENT);
3179 
3180 			error = copyin(arg, &l_aen, sizeof(l_aen));
3181 			if (error)
3182 				return (error);
3183 			adapter = l_aen.laen_adapter_no;
3184 			sc = devclass_get_softc(devclass, adapter);
3185 			if (sc == NULL)
3186 				return (ENOENT);
3187 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3188 			    cmd, arg, flag));
3189 			break;
3190 		}
3191 	case MFIIO_PASSTHRU:
3192 		error = mfi_user_command(sc, iop);
3193 		break;
3194 	default:
3195 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3196 		error = ENOENT;
3197 		break;
3198 	}
3199 
3200 	return (error);
3201 }
3202 
3203 static int
3204 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3205 {
3206 	struct mfi_softc *sc;
3207 	struct mfi_linux_ioc_packet l_ioc;
3208 	struct mfi_linux_ioc_aen l_aen;
3209 	struct mfi_command *cm = NULL;
3210 	struct mfi_aen *mfi_aen_entry;
3211 	union mfi_sense_ptr sense_ptr;
3212 	uint32_t context;
3213 	uint8_t *data = NULL, *temp;
3214 	int i;
3215 	int error, locked;
3216 
3217 	sc = dev->si_drv1;
3218 	error = 0;
3219 	switch (cmd) {
3220 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3221 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3222 		if (error != 0)
3223 			return (error);
3224 
3225 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3226 			return (EINVAL);
3227 		}
3228 
3229 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3230 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3231 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3232 			return (EBUSY);
3233 		}
3234 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3235 		locked = 0;
3236 
3237 		/*
3238 		 * save off original context since copying from user
3239 		 * will clobber some data
3240 		 */
3241 		context = cm->cm_frame->header.context;
3242 
3243 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3244 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3245 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3246 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3247 		cm->cm_frame->header.scsi_status = 0;
3248 		cm->cm_frame->header.pad0 = 0;
3249 		if (l_ioc.lioc_sge_count)
3250 			cm->cm_sg =
3251 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3252 		cm->cm_flags = 0;
3253 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3254 			cm->cm_flags |= MFI_CMD_DATAIN;
3255 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3256 			cm->cm_flags |= MFI_CMD_DATAOUT;
3257 		cm->cm_len = cm->cm_frame->header.data_len;
3258 		if (cm->cm_len &&
3259 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3260 			cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3261 			    M_WAITOK | M_ZERO);
3262 		} else {
3263 			cm->cm_data = 0;
3264 		}
3265 
3266 		/* restore header context */
3267 		cm->cm_frame->header.context = context;
3268 
3269 		temp = data;
3270 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3271 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3272 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3273 				       temp,
3274 				       l_ioc.lioc_sgl[i].iov_len);
3275 				if (error != 0) {
3276 					device_printf(sc->mfi_dev,
3277 					    "Copy in failed\n");
3278 					goto out;
3279 				}
3280 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3281 			}
3282 		}
3283 
3284 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3285 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3286 
3287 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3288 			cm->cm_frame->pass.sense_addr_lo =
3289 			    (uint32_t)cm->cm_sense_busaddr;
3290 			cm->cm_frame->pass.sense_addr_hi =
3291 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3292 		}
3293 
3294 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3295 		error = mfi_check_command_pre(sc, cm);
3296 		if (error) {
3297 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3298 			goto out;
3299 		}
3300 
3301 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3302 			device_printf(sc->mfi_dev,
3303 			    "Controller polled failed\n");
3304 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3305 			goto out;
3306 		}
3307 
3308 		mfi_check_command_post(sc, cm);
3309 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3310 
3311 		temp = data;
3312 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3313 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3314 				error = copyout(temp,
3315 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3316 					l_ioc.lioc_sgl[i].iov_len);
3317 				if (error != 0) {
3318 					device_printf(sc->mfi_dev,
3319 					    "Copy out failed\n");
3320 					goto out;
3321 				}
3322 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3323 			}
3324 		}
3325 
3326 		if (l_ioc.lioc_sense_len) {
3327 			/* get user-space sense ptr then copy out sense */
3328 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3329                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3330 			    &sense_ptr.sense_ptr_data[0],
3331 			    sizeof(sense_ptr.sense_ptr_data));
3332 #ifdef __x86_64__
3333 			/*
3334 			 * only 32bit Linux support so zero out any
3335 			 * address over 32bit
3336 			 */
3337 			sense_ptr.addr.high = 0;
3338 #endif
3339 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3340 			    l_ioc.lioc_sense_len);
3341 			if (error != 0) {
3342 				device_printf(sc->mfi_dev,
3343 				    "Copy out failed\n");
3344 				goto out;
3345 			}
3346 		}
3347 
3348 		error = copyout(&cm->cm_frame->header.cmd_status,
3349 			&((struct mfi_linux_ioc_packet*)arg)
3350 			->lioc_frame.hdr.cmd_status,
3351 			1);
3352 		if (error != 0) {
3353 			device_printf(sc->mfi_dev,
3354 				      "Copy out failed\n");
3355 			goto out;
3356 		}
3357 
3358 out:
3359 		mfi_config_unlock(sc, locked);
3360 		if (data)
3361 			kfree(data, M_MFIBUF);
3362 		if (cm) {
3363 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3364 			mfi_release_command(cm);
3365 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3366 		}
3367 
3368 		return (error);
3369 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3370 		error = copyin(arg, &l_aen, sizeof(l_aen));
3371 		if (error != 0)
3372 			return (error);
3373 		kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3374 		mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3375 		    M_WAITOK);
3376 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3377 		if (mfi_aen_entry != NULL) {
3378 			mfi_aen_entry->p = curproc;
3379 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3380 			    aen_link);
3381 		}
3382 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3383 		    l_aen.laen_class_locale);
3384 
3385 		if (error != 0) {
3386 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3387 			    aen_link);
3388 			kfree(mfi_aen_entry, M_MFIBUF);
3389 		}
3390 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3391 
3392 		return (error);
3393 	default:
3394 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3395 		error = ENOENT;
3396 		break;
3397 	}
3398 
3399 	return (error);
3400 }
3401 
3402 static int
3403 mfi_kqfilter(struct dev_kqfilter_args *ap)
3404 {
3405 	cdev_t dev = ap->a_head.a_dev;
3406 	struct knote *kn = ap->a_kn;
3407 	struct mfi_softc *sc;
3408 	struct klist *klist;
3409 
3410 	ap->a_result = 0;
3411 	sc = dev->si_drv1;
3412 
3413 	switch (kn->kn_filter) {
3414 	case EVFILT_READ:
3415 		kn->kn_fop = &mfi_read_filterops;
3416 		kn->kn_hook = (caddr_t)sc;
3417 		break;
3418 	case EVFILT_WRITE:
3419 		kn->kn_fop = &mfi_write_filterops;
3420 		kn->kn_hook = (caddr_t)sc;
3421 		break;
3422 	default:
3423 		ap->a_result = EOPNOTSUPP;
3424 		return (0);
3425 	}
3426 
3427 	klist = &sc->mfi_kq.ki_note;
3428 	knote_insert(klist, kn);
3429 
3430 	return(0);
3431 }
3432 
3433 static void
3434 mfi_filter_detach(struct knote *kn)
3435 {
3436 	struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3437 	struct klist *klist = &sc->mfi_kq.ki_note;
3438 
3439 	knote_remove(klist, kn);
3440 }
3441 
3442 static int
3443 mfi_filter_read(struct knote *kn, long hint)
3444 {
3445 	struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3446 	int ready = 0;
3447 
3448 	if (sc->mfi_aen_triggered != 0) {
3449 		ready = 1;
3450 		sc->mfi_aen_triggered = 0;
3451 	}
3452 	if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3453 		kn->kn_flags |= EV_ERROR;
3454 
3455 	if (ready == 0)
3456 		sc->mfi_poll_waiting = 1;
3457 
3458 	return (ready);
3459 }
3460 
3461 static int
3462 mfi_filter_write(struct knote *kn, long hint)
3463 {
3464 	return (0);
3465 }
3466 
3467 static void
3468 mfi_dump_all(void)
3469 {
3470 	struct mfi_softc *sc;
3471 	struct mfi_command *cm;
3472 	devclass_t dc;
3473 	time_t deadline;
3474 	int timedout;
3475 	int i;
3476 
3477 	dc = devclass_find("mfi");
3478 	if (dc == NULL) {
3479 		kprintf("No mfi dev class\n");
3480 		return;
3481 	}
3482 
3483 	for (i = 0; ; i++) {
3484 		sc = devclass_get_softc(dc, i);
3485 		if (sc == NULL)
3486 			break;
3487 		device_printf(sc->mfi_dev, "Dumping\n\n");
3488 		timedout = 0;
3489 		deadline = time_uptime - mfi_cmd_timeout;
3490 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3491 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3492 			if (cm->cm_timestamp < deadline) {
3493 				device_printf(sc->mfi_dev,
3494 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3495 				    cm, (int)(time_uptime - cm->cm_timestamp));
3496 				MFI_PRINT_CMD(cm);
3497 				timedout++;
3498 			}
3499 		}
3500 
3501 #if 0
3502 		if (timedout)
3503 			MFI_DUMP_CMDS(SC);
3504 #endif
3505 
3506 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3507 	}
3508 
3509 	return;
3510 }
3511 
3512 static void
3513 mfi_timeout(void *data)
3514 {
3515 	struct mfi_softc *sc = (struct mfi_softc *)data;
3516 	struct mfi_command *cm;
3517 	time_t deadline;
3518 	int timedout = 0;
3519 
3520 	deadline = time_uptime - mfi_cmd_timeout;
3521 	if (sc->adpreset == 0) {
3522 		if (!mfi_tbolt_reset(sc)) {
3523 			callout_reset(&sc->mfi_watchdog_callout,
3524 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3525 			return;
3526 		}
3527 	}
3528 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3529 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3530 		if (sc->mfi_aen_cm == cm)
3531 			continue;
3532 		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3533 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3534 				cm->cm_timestamp = time_uptime;
3535 			} else {
3536 				device_printf(sc->mfi_dev,
3537 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3538 				     cm, (int)(time_uptime - cm->cm_timestamp));
3539 				MFI_PRINT_CMD(cm);
3540 				MFI_VALIDATE_CMD(sc, cm);
3541 				timedout++;
3542 			}
3543 		}
3544 	}
3545 
3546 #if 0
3547 	if (timedout)
3548 		MFI_DUMP_CMDS(SC);
3549 #endif
3550 
3551 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3552 
3553 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3554 	    mfi_timeout, sc);
3555 
3556 	if (0)
3557 		mfi_dump_all();
3558 	return;
3559 }
3560