xref: /dragonfly/sys/dev/raid/mfi/mfi.c (revision 4ff4d99f)
1 /*-
2  * Copyright (c) 2006 IronPort Systems
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*-
27  * Copyright (c) 2007 LSI Corp.
28  * Copyright (c) 2007 Rajesh Prabhakaran.
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
41  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
44  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50  * SUCH DAMAGE.
51  *
52  * $FreeBSD: src/sys/dev/mfi/mfi.c,v 1.62 2011/11/09 21:53:49 delphij Exp $
53  * FreeBSD projects/head_mfi/ r233016
54  */
55 
56 #include "opt_mfi.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sysctl.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/bus.h>
64 #include <sys/eventhandler.h>
65 #include <sys/rman.h>
66 #include <sys/bus_dma.h>
67 #include <sys/buf2.h>
68 #include <sys/uio.h>
69 #include <sys/proc.h>
70 #include <sys/signalvar.h>
71 #include <sys/device.h>
72 #include <sys/mplock2.h>
73 #include <sys/taskqueue.h>
74 
75 #include <bus/cam/scsi/scsi_all.h>
76 
77 #include <bus/pci/pcivar.h>
78 
79 #include <dev/raid/mfi/mfireg.h>
80 #include <dev/raid/mfi/mfi_ioctl.h>
81 #include <dev/raid/mfi/mfivar.h>
82 
83 static int	mfi_alloc_commands(struct mfi_softc *);
84 static int	mfi_comms_init(struct mfi_softc *);
85 static int	mfi_get_controller_info(struct mfi_softc *);
86 static int	mfi_get_log_state(struct mfi_softc *,
87 		    struct mfi_evt_log_state **);
88 static int	mfi_parse_entries(struct mfi_softc *, int, int);
89 static int	mfi_dcmd_command(struct mfi_softc *, struct mfi_command **,
90 		    uint32_t, void **, size_t);
91 static void	mfi_data_cb(void *, bus_dma_segment_t *, int, int);
92 static void	mfi_startup(void *arg);
93 static void	mfi_intr(void *arg);
94 static void	mfi_ldprobe(struct mfi_softc *sc);
95 static void	mfi_syspdprobe(struct mfi_softc *sc);
96 static void	mfi_handle_evt(void *context, int pending);
97 static int	mfi_aen_register(struct mfi_softc *sc, int seq, int locale);
98 static void	mfi_aen_complete(struct mfi_command *);
99 static int	mfi_add_ld(struct mfi_softc *sc, int);
100 static void	mfi_add_ld_complete(struct mfi_command *);
101 static int	mfi_add_sys_pd(struct mfi_softc *sc, int);
102 static void	mfi_add_sys_pd_complete(struct mfi_command *);
103 static struct mfi_command *mfi_bio_command(struct mfi_softc *);
104 static void	mfi_bio_complete(struct mfi_command *);
105 static struct mfi_command *mfi_build_ldio(struct mfi_softc *,struct bio*);
106 static struct mfi_command *mfi_build_syspdio(struct mfi_softc *,struct bio*);
107 static int	mfi_send_frame(struct mfi_softc *, struct mfi_command *);
108 static int	mfi_abort(struct mfi_softc *, struct mfi_command *);
109 static int	mfi_linux_ioctl_int(struct cdev *, u_long, caddr_t, int);
110 static void	mfi_timeout(void *);
111 static int	mfi_user_command(struct mfi_softc *,
112 		    struct mfi_ioc_passthru *);
113 static void	mfi_enable_intr_xscale(struct mfi_softc *sc);
114 static void	mfi_enable_intr_ppc(struct mfi_softc *sc);
115 static int32_t	mfi_read_fw_status_xscale(struct mfi_softc *sc);
116 static int32_t	mfi_read_fw_status_ppc(struct mfi_softc *sc);
117 static int	mfi_check_clear_intr_xscale(struct mfi_softc *sc);
118 static int	mfi_check_clear_intr_ppc(struct mfi_softc *sc);
119 static void	mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add,
120 		    uint32_t frame_cnt);
121 static void	mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add,
122 		    uint32_t frame_cnt);
123 static int mfi_config_lock(struct mfi_softc *sc, uint32_t opcode);
124 static void mfi_config_unlock(struct mfi_softc *sc, int locked);
125 static int mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm);
126 static void mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm);
127 static int mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm);
128 
129 static void	mfi_filter_detach(struct knote *);
130 static int	mfi_filter_read(struct knote *, long);
131 static int	mfi_filter_write(struct knote *, long);
132 
133 SYSCTL_NODE(_hw, OID_AUTO, mfi, CTLFLAG_RD, 0, "MFI driver parameters");
134 static int	mfi_event_locale = MFI_EVT_LOCALE_ALL;
135 TUNABLE_INT("hw.mfi.event_locale", &mfi_event_locale);
136 SYSCTL_INT(_hw_mfi, OID_AUTO, event_locale, CTLFLAG_RW, &mfi_event_locale,
137             0, "event message locale");
138 
139 static int	mfi_event_class = MFI_EVT_CLASS_INFO;
140 TUNABLE_INT("hw.mfi.event_class", &mfi_event_class);
141 SYSCTL_INT(_hw_mfi, OID_AUTO, event_class, CTLFLAG_RW, &mfi_event_class,
142           0, "event message class");
143 
144 static int	mfi_max_cmds = 128;
145 TUNABLE_INT("hw.mfi.max_cmds", &mfi_max_cmds);
146 SYSCTL_INT(_hw_mfi, OID_AUTO, max_cmds, CTLFLAG_RD, &mfi_max_cmds,
147 	   0, "Max commands");
148 
149 static int	mfi_detect_jbod_change = 1;
150 TUNABLE_INT("hw.mfi.detect_jbod_change", &mfi_detect_jbod_change);
151 SYSCTL_INT(_hw_mfi, OID_AUTO, detect_jbod_change, CTLFLAG_RW,
152 	   &mfi_detect_jbod_change, 0, "Detect a change to a JBOD");
153 
154 static int	mfi_cmd_timeout = MFI_CMD_TIMEOUT;
155 TUNABLE_INT("hw.mfi.cmd_timeout", &mfi_cmd_timeout);
156 SYSCTL_INT(_hw_mfi, OID_AUTO, cmd_timeout, CTLFLAG_RW, &mfi_cmd_timeout,
157 	   0, "Command timeout (in seconds)");
158 
159 /* Management interface */
160 static d_open_t		mfi_open;
161 static d_close_t	mfi_close;
162 static d_ioctl_t	mfi_ioctl;
163 static d_kqfilter_t	mfi_kqfilter;
164 
165 static struct dev_ops mfi_ops = {
166 	{ "mfi", 0, 0 },
167 	.d_open =	mfi_open,
168 	.d_close =	mfi_close,
169 	.d_ioctl =	mfi_ioctl,
170 	.d_kqfilter =	mfi_kqfilter,
171 };
172 
173 static struct filterops mfi_read_filterops =
174 	{ FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_read };
175 static struct filterops mfi_write_filterops =
176 	{ FILTEROP_ISFD, NULL, mfi_filter_detach, mfi_filter_write };
177 
178 MALLOC_DEFINE(M_MFIBUF, "mfibuf", "Buffers for the MFI driver");
179 
180 #define MFI_INQ_LENGTH SHORT_INQUIRY_LENGTH
181 struct mfi_skinny_dma_info mfi_skinny;
182 
183 static void
184 mfi_enable_intr_xscale(struct mfi_softc *sc)
185 {
186 	MFI_WRITE4(sc, MFI_OMSK, 0x01);
187 }
188 
189 static void
190 mfi_enable_intr_ppc(struct mfi_softc *sc)
191 {
192 	if (sc->mfi_flags & MFI_FLAGS_1078) {
193 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
194 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_1078_EIM);
195 	} else if (sc->mfi_flags & MFI_FLAGS_GEN2) {
196 		MFI_WRITE4(sc, MFI_ODCR0, 0xFFFFFFFF);
197 		MFI_WRITE4(sc, MFI_OMSK, ~MFI_GEN2_EIM);
198 	} else if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
199 		MFI_WRITE4(sc, MFI_OMSK, ~0x00000001);
200 	} else {
201 		panic("unknown adapter type");
202 	}
203 }
204 
205 static int32_t
206 mfi_read_fw_status_xscale(struct mfi_softc *sc)
207 {
208 	return MFI_READ4(sc, MFI_OMSG0);
209 }
210 
211 static int32_t
212 mfi_read_fw_status_ppc(struct mfi_softc *sc)
213 {
214 	return MFI_READ4(sc, MFI_OSP0);
215 }
216 
217 static int
218 mfi_check_clear_intr_xscale(struct mfi_softc *sc)
219 {
220 	int32_t status;
221 
222 	status = MFI_READ4(sc, MFI_OSTS);
223 	if ((status & MFI_OSTS_INTR_VALID) == 0)
224 		return 1;
225 
226 	MFI_WRITE4(sc, MFI_OSTS, status);
227 	return 0;
228 }
229 
230 static int
231 mfi_check_clear_intr_ppc(struct mfi_softc *sc)
232 {
233 	int32_t status;
234 
235 	status = MFI_READ4(sc, MFI_OSTS);
236 	if (((sc->mfi_flags & MFI_FLAGS_1078) && !(status & MFI_1078_RM)) ||
237 	    ((sc->mfi_flags & MFI_FLAGS_GEN2) && !(status & MFI_GEN2_RM)) ||
238 	    ((sc->mfi_flags & MFI_FLAGS_SKINNY) && !(status & MFI_SKINNY_RM)))
239 		return 1;
240 
241 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
242 		MFI_WRITE4(sc, MFI_OSTS, status);
243 	else
244 		MFI_WRITE4(sc, MFI_ODCR0, status);
245 	return 0;
246 }
247 
248 static void
249 mfi_issue_cmd_xscale(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
250 {
251 	MFI_WRITE4(sc, MFI_IQP,(bus_add >>3) | frame_cnt);
252 }
253 
254 static void
255 mfi_issue_cmd_ppc(struct mfi_softc *sc, bus_addr_t bus_add, uint32_t frame_cnt)
256 {
257 	if (sc->mfi_flags & MFI_FLAGS_SKINNY) {
258 		MFI_WRITE4(sc, MFI_IQPL, (bus_add | frame_cnt << 1) | 1);
259 		MFI_WRITE4(sc, MFI_IQPH, 0x00000000);
260 	} else {
261 		MFI_WRITE4(sc, MFI_IQP, (bus_add | frame_cnt << 1) | 1);
262 	}
263 }
264 
265 int
266 mfi_transition_firmware(struct mfi_softc *sc)
267 {
268 	uint32_t fw_state, cur_state;
269 	int max_wait, i;
270 	uint32_t cur_abs_reg_val = 0;
271 	uint32_t prev_abs_reg_val = 0;
272 
273 	cur_abs_reg_val = sc->mfi_read_fw_status(sc);
274 	fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
275 	while (fw_state != MFI_FWSTATE_READY) {
276 		if (bootverbose)
277 			device_printf(sc->mfi_dev, "Waiting for firmware to "
278 			"become ready\n");
279 		cur_state = fw_state;
280 		switch (fw_state) {
281 		case MFI_FWSTATE_FAULT:
282 			device_printf(sc->mfi_dev, "Firmware fault\n");
283 			return (ENXIO);
284 		case MFI_FWSTATE_WAIT_HANDSHAKE:
285 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
286 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
287 			else
288 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_CLEAR_HANDSHAKE);
289 			max_wait = MFI_RESET_WAIT_TIME;
290 			break;
291 		case MFI_FWSTATE_OPERATIONAL:
292 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
293 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, 7);
294 			else
295 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_READY);
296 			max_wait = MFI_RESET_WAIT_TIME;
297 			break;
298 		case MFI_FWSTATE_UNDEFINED:
299 		case MFI_FWSTATE_BB_INIT:
300 			max_wait = MFI_RESET_WAIT_TIME;
301 			break;
302 		case MFI_FWSTATE_FW_INIT_2:
303 			max_wait = MFI_RESET_WAIT_TIME;
304 			break;
305 		case MFI_FWSTATE_FW_INIT:
306 		case MFI_FWSTATE_FLUSH_CACHE:
307 			max_wait = MFI_RESET_WAIT_TIME;
308 			break;
309 		case MFI_FWSTATE_DEVICE_SCAN:
310 			max_wait = MFI_RESET_WAIT_TIME; /* wait for 180 seconds */
311 			prev_abs_reg_val = cur_abs_reg_val;
312 			break;
313 		case MFI_FWSTATE_BOOT_MESSAGE_PENDING:
314 			if (sc->mfi_flags & MFI_FLAGS_SKINNY || sc->mfi_flags & MFI_FLAGS_TBOLT)
315 			    MFI_WRITE4(sc, MFI_SKINNY_IDB, MFI_FWINIT_HOTPLUG);
316 			else
317 			    MFI_WRITE4(sc, MFI_IDB, MFI_FWINIT_HOTPLUG);
318 			max_wait = MFI_RESET_WAIT_TIME;
319 			break;
320 		default:
321 			device_printf(sc->mfi_dev, "Unknown firmware state %#x\n",
322 			    fw_state);
323 			return (ENXIO);
324 		}
325 		for (i = 0; i < (max_wait * 10); i++) {
326 			cur_abs_reg_val = sc->mfi_read_fw_status(sc);
327 			fw_state = cur_abs_reg_val & MFI_FWSTATE_MASK;
328 			if (fw_state == cur_state)
329 				DELAY(100000);
330 			else
331 				break;
332 		}
333 		if (fw_state == MFI_FWSTATE_DEVICE_SCAN) {
334 			/* Check the device scanning progress */
335 			if (prev_abs_reg_val != cur_abs_reg_val)
336 				continue;
337 		}
338 		if (fw_state == cur_state) {
339 			device_printf(sc->mfi_dev, "Firmware stuck in state "
340 			    "%#x\n", fw_state);
341 			return (ENXIO);
342 		}
343 	}
344 	return (0);
345 }
346 
347 static void
348 mfi_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
349 {
350 	bus_addr_t *addr;
351 
352 	addr = arg;
353 	*addr = segs[0].ds_addr;
354 }
355 
356 int
357 mfi_attach(struct mfi_softc *sc)
358 {
359 	uint32_t status;
360 	int error, commsz, framessz, sensesz;
361 	int frames, unit, max_fw_sge;
362 	uint32_t tb_mem_size = 0;
363 
364 	if (sc == NULL)
365 		return EINVAL;
366 
367 	device_printf(sc->mfi_dev, "Megaraid SAS driver Ver %s \n",
368 	    MEGASAS_VERSION);
369 
370 	lockinit(&sc->mfi_io_lock, "MFI I/O lock", 0, LK_CANRECURSE);
371 	lockinit(&sc->mfi_config_lock, "MFI config", 0, LK_CANRECURSE);
372 	TAILQ_INIT(&sc->mfi_ld_tqh);
373 	TAILQ_INIT(&sc->mfi_syspd_tqh);
374 	TAILQ_INIT(&sc->mfi_evt_queue);
375 	TASK_INIT(&sc->mfi_evt_task, 0, mfi_handle_evt, sc);
376 	TAILQ_INIT(&sc->mfi_aen_pids);
377 	TAILQ_INIT(&sc->mfi_cam_ccbq);
378 
379 	mfi_initq_free(sc);
380 	mfi_initq_ready(sc);
381 	mfi_initq_busy(sc);
382 	mfi_initq_bio(sc);
383 
384 	sc->adpreset = 0;
385 	sc->last_seq_num = 0;
386 	sc->disableOnlineCtrlReset = 1;
387 	sc->issuepend_done = 1;
388 	sc->hw_crit_error = 0;
389 
390 	if (sc->mfi_flags & MFI_FLAGS_1064R) {
391 		sc->mfi_enable_intr = mfi_enable_intr_xscale;
392 		sc->mfi_read_fw_status = mfi_read_fw_status_xscale;
393 		sc->mfi_check_clear_intr = mfi_check_clear_intr_xscale;
394 		sc->mfi_issue_cmd = mfi_issue_cmd_xscale;
395 	} else if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
396 		sc->mfi_enable_intr = mfi_tbolt_enable_intr_ppc;
397 		sc->mfi_disable_intr = mfi_tbolt_disable_intr_ppc;
398 		sc->mfi_read_fw_status = mfi_tbolt_read_fw_status_ppc;
399 		sc->mfi_check_clear_intr = mfi_tbolt_check_clear_intr_ppc;
400 		sc->mfi_issue_cmd = mfi_tbolt_issue_cmd_ppc;
401 		sc->mfi_adp_reset = mfi_tbolt_adp_reset;
402 		sc->mfi_tbolt = 1;
403 		TAILQ_INIT(&sc->mfi_cmd_tbolt_tqh);
404 	} else {
405 		sc->mfi_enable_intr =  mfi_enable_intr_ppc;
406 		sc->mfi_read_fw_status = mfi_read_fw_status_ppc;
407 		sc->mfi_check_clear_intr = mfi_check_clear_intr_ppc;
408 		sc->mfi_issue_cmd = mfi_issue_cmd_ppc;
409 	}
410 
411 
412 	/* Before we get too far, see if the firmware is working */
413 	if ((error = mfi_transition_firmware(sc)) != 0) {
414 		device_printf(sc->mfi_dev, "Firmware not in READY state, "
415 		    "error %d\n", error);
416 		return (ENXIO);
417 	}
418 
419 	/* Start: LSIP200113393 */
420 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
421 				1, 0,			/* algnmnt, boundary */
422 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
423 				BUS_SPACE_MAXADDR,	/* highaddr */
424 				NULL, NULL,		/* filter, filterarg */
425 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsize */
426 				1,			/* msegments */
427 				MEGASAS_MAX_NAME*sizeof(bus_addr_t),			/* maxsegsize */
428 				0,			/* flags */
429 				&sc->verbuf_h_dmat)) {
430 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmat DMA tag\n");
431 		return (ENOMEM);
432 	}
433 	if (bus_dmamem_alloc(sc->verbuf_h_dmat, (void **)&sc->verbuf,
434 	    BUS_DMA_NOWAIT, &sc->verbuf_h_dmamap)) {
435 		device_printf(sc->mfi_dev, "Cannot allocate verbuf_h_dmamap memory\n");
436 		return (ENOMEM);
437 	}
438 	bzero(sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t));
439 	bus_dmamap_load(sc->verbuf_h_dmat, sc->verbuf_h_dmamap,
440 	    sc->verbuf, MEGASAS_MAX_NAME*sizeof(bus_addr_t),
441 	    mfi_addr_cb, &sc->verbuf_h_busaddr, 0);
442 	/* End: LSIP200113393 */
443 
444 	/*
445 	 * Get information needed for sizing the contiguous memory for the
446 	 * frame pool.  Size down the sgl parameter since we know that
447 	 * we will never need more than what's required for MAXPHYS.
448 	 * It would be nice if these constants were available at runtime
449 	 * instead of compile time.
450 	 */
451 	status = sc->mfi_read_fw_status(sc);
452 	sc->mfi_max_fw_cmds = status & MFI_FWSTATE_MAXCMD_MASK;
453 	max_fw_sge = (status & MFI_FWSTATE_MAXSGL_MASK) >> 16;
454 	sc->mfi_max_sge = min(max_fw_sge, ((MFI_MAXPHYS / PAGE_SIZE) + 1));
455 
456 	/* ThunderBolt Support get the contiguous memory */
457 
458 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
459 		mfi_tbolt_init_globals(sc);
460 		device_printf(sc->mfi_dev, "MaxCmd = %x MaxSgl = %x state = %x \n",
461 		    sc->mfi_max_fw_cmds, sc->mfi_max_sge, status);
462 		tb_mem_size = mfi_tbolt_get_memory_requirement(sc);
463 
464 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
465 				1, 0,			/* algnmnt, boundary */
466 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
467 				BUS_SPACE_MAXADDR,	/* highaddr */
468 				NULL, NULL,		/* filter, filterarg */
469 				tb_mem_size,		/* maxsize */
470 				1,			/* msegments */
471 				tb_mem_size,		/* maxsegsize */
472 				0,			/* flags */
473 				&sc->mfi_tb_dmat)) {
474 			device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
475 			return (ENOMEM);
476 		}
477 		if (bus_dmamem_alloc(sc->mfi_tb_dmat, (void **)&sc->request_message_pool,
478 		BUS_DMA_NOWAIT, &sc->mfi_tb_dmamap)) {
479 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
480 			return (ENOMEM);
481 		}
482 		bzero(sc->request_message_pool, tb_mem_size);
483 		bus_dmamap_load(sc->mfi_tb_dmat, sc->mfi_tb_dmamap,
484 		sc->request_message_pool, tb_mem_size, mfi_addr_cb, &sc->mfi_tb_busaddr, 0);
485 
486 		/* For ThunderBolt memory init */
487 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
488 				0x100, 0,		/* alignmnt, boundary */
489 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
490 				BUS_SPACE_MAXADDR,	/* highaddr */
491 				NULL, NULL,		/* filter, filterarg */
492 				MFI_FRAME_SIZE,		/* maxsize */
493 				1,			/* msegments */
494 				MFI_FRAME_SIZE,		/* maxsegsize */
495 				0,			/* flags */
496 				&sc->mfi_tb_init_dmat)) {
497 		device_printf(sc->mfi_dev, "Cannot allocate init DMA tag\n");
498 		return (ENOMEM);
499 		}
500 		if (bus_dmamem_alloc(sc->mfi_tb_init_dmat, (void **)&sc->mfi_tb_init,
501 		    BUS_DMA_NOWAIT, &sc->mfi_tb_init_dmamap)) {
502 			device_printf(sc->mfi_dev, "Cannot allocate init memory\n");
503 			return (ENOMEM);
504 		}
505 		bzero(sc->mfi_tb_init, MFI_FRAME_SIZE);
506 		bus_dmamap_load(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap,
507 		sc->mfi_tb_init, MFI_FRAME_SIZE, mfi_addr_cb,
508 		    &sc->mfi_tb_init_busaddr, 0);
509 		if (mfi_tbolt_init_desc_pool(sc, sc->request_message_pool,
510 		    tb_mem_size)) {
511 			device_printf(sc->mfi_dev,
512 			    "Thunderbolt pool preparation error\n");
513 			return 0;
514 		}
515 
516 		/*
517 		  Allocate DMA memory mapping for MPI2 IOC Init descriptor,
518 		  we are taking it diffrent from what we have allocated for Request
519 		  and reply descriptors to avoid confusion later
520 		*/
521 		tb_mem_size = sizeof(struct MPI2_IOC_INIT_REQUEST);
522 		if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
523 				1, 0,			/* algnmnt, boundary */
524 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
525 				BUS_SPACE_MAXADDR,	/* highaddr */
526 				NULL, NULL,		/* filter, filterarg */
527 				tb_mem_size,		/* maxsize */
528 				1,			/* msegments */
529 				tb_mem_size,		/* maxsegsize */
530 				0,			/* flags */
531 				&sc->mfi_tb_ioc_init_dmat)) {
532 			device_printf(sc->mfi_dev,
533 			    "Cannot allocate comms DMA tag\n");
534 			return (ENOMEM);
535 		}
536 		if (bus_dmamem_alloc(sc->mfi_tb_ioc_init_dmat,
537 		    (void **)&sc->mfi_tb_ioc_init_desc,
538 		    BUS_DMA_NOWAIT, &sc->mfi_tb_ioc_init_dmamap)) {
539 			device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
540 			return (ENOMEM);
541 		}
542 		bzero(sc->mfi_tb_ioc_init_desc, tb_mem_size);
543 		bus_dmamap_load(sc->mfi_tb_ioc_init_dmat, sc->mfi_tb_ioc_init_dmamap,
544 		sc->mfi_tb_ioc_init_desc, tb_mem_size, mfi_addr_cb,
545 		    &sc->mfi_tb_ioc_init_busaddr, 0);
546 	}
547 	/*
548 	 * Create the dma tag for data buffers.  Used both for block I/O
549 	 * and for various internal data queries.
550 	 */
551 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
552 				1, 0,			/* algnmnt, boundary */
553 				BUS_SPACE_MAXADDR,	/* lowaddr */
554 				BUS_SPACE_MAXADDR,	/* highaddr */
555 				NULL, NULL,		/* filter, filterarg */
556 				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
557 				sc->mfi_max_sge,	/* nsegments */
558 				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
559 				BUS_DMA_ALLOCNOW,	/* flags */
560 				&sc->mfi_buffer_dmat)) {
561 		device_printf(sc->mfi_dev, "Cannot allocate buffer DMA tag\n");
562 		return (ENOMEM);
563 	}
564 
565 	/*
566 	 * Allocate DMA memory for the comms queues.  Keep it under 4GB for
567 	 * efficiency.  The mfi_hwcomms struct includes space for 1 reply queue
568 	 * entry, so the calculated size here will be will be 1 more than
569 	 * mfi_max_fw_cmds.  This is apparently a requirement of the hardware.
570 	 */
571 	commsz = (sizeof(uint32_t) * sc->mfi_max_fw_cmds) +
572 	    sizeof(struct mfi_hwcomms);
573 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
574 				1, 0,			/* algnmnt, boundary */
575 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
576 				BUS_SPACE_MAXADDR,	/* highaddr */
577 				NULL, NULL,		/* filter, filterarg */
578 				commsz,			/* maxsize */
579 				1,			/* msegments */
580 				commsz,			/* maxsegsize */
581 				0,			/* flags */
582 				&sc->mfi_comms_dmat)) {
583 		device_printf(sc->mfi_dev, "Cannot allocate comms DMA tag\n");
584 		return (ENOMEM);
585 	}
586 	if (bus_dmamem_alloc(sc->mfi_comms_dmat, (void **)&sc->mfi_comms,
587 	    BUS_DMA_NOWAIT, &sc->mfi_comms_dmamap)) {
588 		device_printf(sc->mfi_dev, "Cannot allocate comms memory\n");
589 		return (ENOMEM);
590 	}
591 	bzero(sc->mfi_comms, commsz);
592 	bus_dmamap_load(sc->mfi_comms_dmat, sc->mfi_comms_dmamap,
593 	    sc->mfi_comms, commsz, mfi_addr_cb, &sc->mfi_comms_busaddr, 0);
594 	/*
595 	 * Allocate DMA memory for the command frames.  Keep them in the
596 	 * lower 4GB for efficiency.  Calculate the size of the commands at
597 	 * the same time; each command is one 64 byte frame plus a set of
598          * additional frames for holding sg lists or other data.
599 	 * The assumption here is that the SG list will start at the second
600 	 * frame and not use the unused bytes in the first frame.  While this
601 	 * isn't technically correct, it simplifies the calculation and allows
602 	 * for command frames that might be larger than an mfi_io_frame.
603 	 */
604 	if (sizeof(bus_addr_t) == 8) {
605 		sc->mfi_sge_size = sizeof(struct mfi_sg64);
606 		sc->mfi_flags |= MFI_FLAGS_SG64;
607 	} else {
608 		sc->mfi_sge_size = sizeof(struct mfi_sg32);
609 	}
610 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
611 		sc->mfi_sge_size = sizeof(struct mfi_sg_skinny);
612 	frames = (sc->mfi_sge_size * sc->mfi_max_sge - 1) / MFI_FRAME_SIZE + 2;
613 	sc->mfi_cmd_size = frames * MFI_FRAME_SIZE;
614 	framessz = sc->mfi_cmd_size * sc->mfi_max_fw_cmds;
615 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
616 				64, 0,			/* algnmnt, boundary */
617 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
618 				BUS_SPACE_MAXADDR,	/* highaddr */
619 				NULL, NULL,		/* filter, filterarg */
620 				framessz,		/* maxsize */
621 				1,			/* nsegments */
622 				framessz,		/* maxsegsize */
623 				0,			/* flags */
624 				&sc->mfi_frames_dmat)) {
625 		device_printf(sc->mfi_dev, "Cannot allocate frame DMA tag\n");
626 		return (ENOMEM);
627 	}
628 	if (bus_dmamem_alloc(sc->mfi_frames_dmat, (void **)&sc->mfi_frames,
629 	    BUS_DMA_NOWAIT, &sc->mfi_frames_dmamap)) {
630 		device_printf(sc->mfi_dev, "Cannot allocate frames memory\n");
631 		return (ENOMEM);
632 	}
633 	bzero(sc->mfi_frames, framessz);
634 	bus_dmamap_load(sc->mfi_frames_dmat, sc->mfi_frames_dmamap,
635 	    sc->mfi_frames, framessz, mfi_addr_cb, &sc->mfi_frames_busaddr,0);
636 	/*
637 	 * Allocate DMA memory for the frame sense data.  Keep them in the
638 	 * lower 4GB for efficiency
639 	 */
640 	sensesz = sc->mfi_max_fw_cmds * MFI_SENSE_LEN;
641 	if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
642 				4, 0,			/* algnmnt, boundary */
643 				BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
644 				BUS_SPACE_MAXADDR,	/* highaddr */
645 				NULL, NULL,		/* filter, filterarg */
646 				sensesz,		/* maxsize */
647 				1,			/* nsegments */
648 				sensesz,		/* maxsegsize */
649 				0,			/* flags */
650 				&sc->mfi_sense_dmat)) {
651 		device_printf(sc->mfi_dev, "Cannot allocate sense DMA tag\n");
652 		return (ENOMEM);
653 	}
654 	if (bus_dmamem_alloc(sc->mfi_sense_dmat, (void **)&sc->mfi_sense,
655 	    BUS_DMA_NOWAIT, &sc->mfi_sense_dmamap)) {
656 		device_printf(sc->mfi_dev, "Cannot allocate sense memory\n");
657 		return (ENOMEM);
658 	}
659 	bus_dmamap_load(sc->mfi_sense_dmat, sc->mfi_sense_dmamap,
660 	    sc->mfi_sense, sensesz, mfi_addr_cb, &sc->mfi_sense_busaddr, 0);
661 	if ((error = mfi_alloc_commands(sc)) != 0)
662 		return (error);
663 
664 	/*
665 	 * Before moving the FW to operational state, check whether
666 	 * hostmemory is required by the FW or not
667 	 */
668 
669 	/* ThunderBolt MFI_IOC2 INIT */
670 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
671 		sc->mfi_disable_intr(sc);
672 		if ((error = mfi_tbolt_init_MFI_queue(sc)) != 0) {
673 			device_printf(sc->mfi_dev,
674 			    "TB Init has failed with error %d\n",error);
675 			return error;
676 		}
677 
678 		if ((error = mfi_tbolt_alloc_cmd(sc)) != 0)
679 			return error;
680 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
681 			mfi_intr_tbolt, sc, &sc->mfi_intr, NULL)) {
682 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
683 			return (EINVAL);
684 		}
685 		sc->mfi_enable_intr(sc);
686 		sc->map_id = 0;
687 	} else {
688 		if ((error = mfi_comms_init(sc)) != 0)
689 			return (error);
690 
691 		if (bus_setup_intr(sc->mfi_dev, sc->mfi_irq, INTR_MPSAFE,
692 			mfi_intr, sc, &sc->mfi_intr, NULL)) {
693 			device_printf(sc->mfi_dev, "Cannot set up interrupt\n");
694 			return (EINVAL);
695 		}
696 		sc->mfi_enable_intr(sc);
697 	}
698 	if ((error = mfi_get_controller_info(sc)) != 0)
699 		return (error);
700 	sc->disableOnlineCtrlReset = 0;
701 
702 	/* Register a config hook to probe the bus for arrays */
703 	sc->mfi_ich.ich_func = mfi_startup;
704 	sc->mfi_ich.ich_arg = sc;
705 	sc->mfi_ich.ich_desc = "mfi";
706 	if (config_intrhook_establish(&sc->mfi_ich) != 0) {
707 		device_printf(sc->mfi_dev, "Cannot establish configuration "
708 		    "hook\n");
709 		return (EINVAL);
710 	}
711 	if ((error = mfi_aen_setup(sc, 0), 0) != 0) {
712 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
713 		return (error);
714 	}
715 
716 	/*
717 	 * Register a shutdown handler.
718 	 */
719 	if ((sc->mfi_eh = EVENTHANDLER_REGISTER(shutdown_final, mfi_shutdown,
720 	    sc, SHUTDOWN_PRI_DEFAULT)) == NULL) {
721 		device_printf(sc->mfi_dev, "Warning: shutdown event "
722 		    "registration failed\n");
723 	}
724 
725 	/*
726 	 * Create the control device for doing management
727 	 */
728 	unit = device_get_unit(sc->mfi_dev);
729 	sc->mfi_cdev = make_dev(&mfi_ops, unit, UID_ROOT, GID_OPERATOR,
730 	    0640, "mfi%d", unit);
731 	if (unit == 0)
732 		make_dev_alias(sc->mfi_cdev, "megaraid_sas_ioctl_node");
733 	if (sc->mfi_cdev != NULL)
734 		sc->mfi_cdev->si_drv1 = sc;
735 	sysctl_ctx_init(&sc->mfi_sysctl_ctx);
736 	sc->mfi_sysctl_tree = SYSCTL_ADD_NODE(&sc->mfi_sysctl_ctx,
737 	    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
738 	    device_get_nameunit(sc->mfi_dev), CTLFLAG_RD, 0, "");
739 	if (sc->mfi_sysctl_tree == NULL) {
740 		device_printf(sc->mfi_dev, "can't add sysctl node\n");
741 		return (EINVAL);
742 	}
743 	SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
744 	    SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
745 	    OID_AUTO, "delete_busy_volumes", CTLFLAG_RW,
746 	    &sc->mfi_delete_busy_volumes, 0, "Allow removal of busy volumes");
747 	SYSCTL_ADD_INT(&sc->mfi_sysctl_ctx,
748 	    SYSCTL_CHILDREN(sc->mfi_sysctl_tree),
749 	    OID_AUTO, "keep_deleted_volumes", CTLFLAG_RW,
750 	    &sc->mfi_keep_deleted_volumes, 0,
751 	    "Don't detach the mfid device for a busy volume that is deleted");
752 
753 	device_add_child(sc->mfi_dev, "mfip", -1);
754 	bus_generic_attach(sc->mfi_dev);
755 
756 	/* Start the timeout watchdog */
757 	callout_init_mp(&sc->mfi_watchdog_callout);
758 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
759 	    mfi_timeout, sc);
760 
761 	return (0);
762 }
763 
764 static int
765 mfi_alloc_commands(struct mfi_softc *sc)
766 {
767 	struct mfi_command *cm;
768 	int i, ncmds;
769 
770 	/*
771 	 * XXX Should we allocate all the commands up front, or allocate on
772 	 * demand later like 'aac' does?
773 	 */
774 	ncmds = MIN(mfi_max_cmds, sc->mfi_max_fw_cmds);
775 	if (bootverbose)
776 		device_printf(sc->mfi_dev, "Max fw cmds= %d, sizing driver "
777 		   "pool to %d\n", sc->mfi_max_fw_cmds, ncmds);
778 
779 	sc->mfi_commands = kmalloc(sizeof(struct mfi_command) * ncmds, M_MFIBUF,
780 	    M_WAITOK | M_ZERO);
781 
782 	for (i = 0; i < ncmds; i++) {
783 		cm = &sc->mfi_commands[i];
784 		cm->cm_frame = (union mfi_frame *)((uintptr_t)sc->mfi_frames +
785 		    sc->mfi_cmd_size * i);
786 		cm->cm_frame_busaddr = sc->mfi_frames_busaddr +
787 		    sc->mfi_cmd_size * i;
788 		cm->cm_frame->header.context = i;
789 		cm->cm_sense = &sc->mfi_sense[i];
790 		cm->cm_sense_busaddr= sc->mfi_sense_busaddr + MFI_SENSE_LEN * i;
791 		cm->cm_sc = sc;
792 		cm->cm_index = i;
793 		if (bus_dmamap_create(sc->mfi_buffer_dmat, 0,
794 		    &cm->cm_dmamap) == 0) {
795 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
796 			mfi_release_command(cm);
797 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
798 		}
799 		else
800 			break;
801 		sc->mfi_total_cmds++;
802 	}
803 
804 	return (0);
805 }
806 
807 void
808 mfi_release_command(struct mfi_command *cm)
809 {
810 	struct mfi_frame_header *hdr;
811 	uint32_t *hdr_data;
812 
813 	mfi_lockassert(&cm->cm_sc->mfi_io_lock);
814 
815 	/*
816 	 * Zero out the important fields of the frame, but make sure the
817 	 * context field is preserved.  For efficiency, handle the fields
818 	 * as 32 bit words.  Clear out the first S/G entry too for safety.
819 	 */
820 	hdr = &cm->cm_frame->header;
821 	if (cm->cm_data != NULL && hdr->sg_count) {
822 		cm->cm_sg->sg32[0].len = 0;
823 		cm->cm_sg->sg32[0].addr = 0;
824 	}
825 
826 	hdr_data = (uint32_t *)cm->cm_frame;
827 	hdr_data[0] = 0;	/* cmd, sense_len, cmd_status, scsi_status */
828 	hdr_data[1] = 0;	/* target_id, lun_id, cdb_len, sg_count */
829 	hdr_data[4] = 0;	/* flags, timeout */
830 	hdr_data[5] = 0;	/* data_len */
831 
832 	cm->cm_extra_frames = 0;
833 	cm->cm_flags = 0;
834 	cm->cm_complete = NULL;
835 	cm->cm_private = NULL;
836 	cm->cm_data = NULL;
837 	cm->cm_sg = 0;
838 	cm->cm_total_frame_size = 0;
839 	cm->retry_for_fw_reset = 0;
840 
841 	mfi_enqueue_free(cm);
842 }
843 
844 static int
845 mfi_dcmd_command(struct mfi_softc *sc, struct mfi_command **cmp,
846     uint32_t opcode, void **bufp, size_t bufsize)
847 {
848 	struct mfi_command *cm;
849 	struct mfi_dcmd_frame *dcmd;
850 	void *buf = NULL;
851 	uint32_t context = 0;
852 
853 	mfi_lockassert(&sc->mfi_io_lock);
854 
855 	cm = mfi_dequeue_free(sc);
856 	if (cm == NULL)
857 		return (EBUSY);
858 
859 	/* Zero out the MFI frame */
860 	context = cm->cm_frame->header.context;
861 	bzero(cm->cm_frame, sizeof(union mfi_frame));
862 	cm->cm_frame->header.context = context;
863 
864 	if ((bufsize > 0) && (bufp != NULL)) {
865 		if (*bufp == NULL) {
866 			buf = kmalloc(bufsize, M_MFIBUF, M_NOWAIT|M_ZERO);
867 			if (buf == NULL) {
868 				mfi_release_command(cm);
869 				return (ENOMEM);
870 			}
871 			*bufp = buf;
872 		} else {
873 			buf = *bufp;
874 		}
875 	}
876 
877 	dcmd =  &cm->cm_frame->dcmd;
878 	bzero(dcmd->mbox, MFI_MBOX_SIZE);
879 	dcmd->header.cmd = MFI_CMD_DCMD;
880 	dcmd->header.timeout = 0;
881 	dcmd->header.flags = 0;
882 	dcmd->header.data_len = bufsize;
883 	dcmd->header.scsi_status = 0;
884 	dcmd->opcode = opcode;
885 	cm->cm_sg = &dcmd->sgl;
886 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
887 	cm->cm_flags = 0;
888 	cm->cm_data = buf;
889 	cm->cm_private = buf;
890 	cm->cm_len = bufsize;
891 
892 	*cmp = cm;
893 	if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL))
894 		*bufp = buf;
895 	return (0);
896 }
897 
898 static int
899 mfi_comms_init(struct mfi_softc *sc)
900 {
901 	struct mfi_command *cm;
902 	struct mfi_init_frame *init;
903 	struct mfi_init_qinfo *qinfo;
904 	int error;
905 	uint32_t context = 0;
906 
907 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
908 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
909 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
910 		return (EBUSY);
911 	}
912 
913 	/* Zero out the MFI frame */
914 	context = cm->cm_frame->header.context;
915 	bzero(cm->cm_frame, sizeof(union mfi_frame));
916 	cm->cm_frame->header.context = context;
917 
918 	/*
919 	 * Abuse the SG list area of the frame to hold the init_qinfo
920 	 * object;
921 	 */
922 	init = &cm->cm_frame->init;
923 	qinfo = (struct mfi_init_qinfo *)((uintptr_t)init + MFI_FRAME_SIZE);
924 
925 	bzero(qinfo, sizeof(struct mfi_init_qinfo));
926 	qinfo->rq_entries = sc->mfi_max_fw_cmds + 1;
927 	qinfo->rq_addr_lo = sc->mfi_comms_busaddr +
928 	    offsetof(struct mfi_hwcomms, hw_reply_q);
929 	qinfo->pi_addr_lo = sc->mfi_comms_busaddr +
930 	    offsetof(struct mfi_hwcomms, hw_pi);
931 	qinfo->ci_addr_lo = sc->mfi_comms_busaddr +
932 	    offsetof(struct mfi_hwcomms, hw_ci);
933 
934 	init->header.cmd = MFI_CMD_INIT;
935 	init->header.data_len = sizeof(struct mfi_init_qinfo);
936 	init->qinfo_new_addr_lo = cm->cm_frame_busaddr + MFI_FRAME_SIZE;
937 	cm->cm_data = NULL;
938 	cm->cm_flags = MFI_CMD_POLLED;
939 
940 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
941 		device_printf(sc->mfi_dev, "failed to send init command\n");
942 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
943 		return (error);
944 	}
945 	mfi_release_command(cm);
946 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
947 
948 	return (0);
949 }
950 
951 static int
952 mfi_get_controller_info(struct mfi_softc *sc)
953 {
954 	struct mfi_command *cm = NULL;
955 	struct mfi_ctrl_info *ci = NULL;
956 	uint32_t max_sectors_1, max_sectors_2;
957 	int error;
958 
959 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
960 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_GETINFO,
961 	    (void **)&ci, sizeof(*ci));
962 	if (error)
963 		goto out;
964 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
965 
966 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
967 		device_printf(sc->mfi_dev, "Failed to get controller info\n");
968 		sc->mfi_max_io = (sc->mfi_max_sge - 1) * PAGE_SIZE /
969 		    MFI_SECTOR_LEN;
970 		error = 0;
971 		goto out;
972 	}
973 
974 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
975 	    BUS_DMASYNC_POSTREAD);
976 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
977 
978 	max_sectors_1 = (1 << ci->stripe_sz_ops.max) * ci->max_strips_per_io;
979 	max_sectors_2 = ci->max_request_size;
980 	sc->mfi_max_io = min(max_sectors_1, max_sectors_2);
981 	sc->disableOnlineCtrlReset =
982 	    ci->properties.OnOffProperties.disableOnlineCtrlReset;
983 
984 out:
985 	if (ci)
986 		kfree(ci, M_MFIBUF);
987 	if (cm)
988 		mfi_release_command(cm);
989 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
990 	return (error);
991 }
992 
993 static int
994 mfi_get_log_state(struct mfi_softc *sc, struct mfi_evt_log_state **log_state)
995 {
996 	struct mfi_command *cm = NULL;
997 	int error;
998 
999 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1000 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_GETINFO,
1001 	    (void **)log_state, sizeof(**log_state));
1002 	if (error)
1003 		goto out;
1004 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1005 
1006 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1007 		device_printf(sc->mfi_dev, "Failed to get log state\n");
1008 		goto out;
1009 	}
1010 
1011 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1012 	    BUS_DMASYNC_POSTREAD);
1013 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1014 
1015 out:
1016 	if (cm)
1017 		mfi_release_command(cm);
1018 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1019 
1020 	return (error);
1021 }
1022 
1023 int
1024 mfi_aen_setup(struct mfi_softc *sc, uint32_t seq_start)
1025 {
1026 	struct mfi_evt_log_state *log_state = NULL;
1027 	union mfi_evt class_locale;
1028 	int error = 0;
1029 	uint32_t seq;
1030 
1031 	class_locale.members.reserved = 0;
1032 	class_locale.members.locale = mfi_event_locale;
1033 	class_locale.members.evt_class  = mfi_event_class;
1034 
1035 	if (seq_start == 0) {
1036 		error = mfi_get_log_state(sc, &log_state);
1037 		sc->mfi_boot_seq_num = log_state->boot_seq_num;
1038 		if (error) {
1039 			if (log_state)
1040 				kfree(log_state, M_MFIBUF);
1041 			return (error);
1042 		}
1043 
1044 		/*
1045 		 * Walk through any events that fired since the last
1046 		 * shutdown.
1047 		 */
1048 		mfi_parse_entries(sc, log_state->shutdown_seq_num,
1049 		    log_state->newest_seq_num);
1050 		seq = log_state->newest_seq_num;
1051 	} else
1052 		seq = seq_start;
1053 	mfi_aen_register(sc, seq, class_locale.word);
1054 	if (log_state != NULL)
1055 		kfree(log_state, M_MFIBUF);
1056 
1057 	return 0;
1058 }
1059 
1060 int
1061 mfi_wait_command(struct mfi_softc *sc, struct mfi_command *cm)
1062 {
1063 
1064 	mfi_lockassert(&sc->mfi_io_lock);
1065 	cm->cm_complete = NULL;
1066 
1067 
1068 	/*
1069 	 * MegaCli can issue a DCMD of 0.  In this case do nothing
1070 	 * and return 0 to it as status
1071 	 */
1072 	if (cm->cm_frame->dcmd.opcode == 0) {
1073 		cm->cm_frame->header.cmd_status = MFI_STAT_OK;
1074 		cm->cm_error = 0;
1075 		return (cm->cm_error);
1076 	}
1077 	mfi_enqueue_ready(cm);
1078 	mfi_startio(sc);
1079 	if ((cm->cm_flags & MFI_CMD_COMPLETED) == 0)
1080 		lksleep(cm, &sc->mfi_io_lock, 0, "mfiwait", 0);
1081 	return (cm->cm_error);
1082 }
1083 
1084 void
1085 mfi_free(struct mfi_softc *sc)
1086 {
1087 	struct mfi_command *cm;
1088 	int i;
1089 
1090 	callout_stop_sync(&sc->mfi_watchdog_callout);
1091 
1092 	if (sc->mfi_cdev != NULL)
1093 		destroy_dev(sc->mfi_cdev);
1094 	dev_ops_remove_minor(&mfi_ops, device_get_unit(sc->mfi_dev));
1095 
1096 	if (sc->mfi_total_cmds != 0) {
1097 		for (i = 0; i < sc->mfi_total_cmds; i++) {
1098 			cm = &sc->mfi_commands[i];
1099 			bus_dmamap_destroy(sc->mfi_buffer_dmat, cm->cm_dmamap);
1100 		}
1101 		kfree(sc->mfi_commands, M_MFIBUF);
1102 	}
1103 
1104 	if (sc->mfi_intr)
1105 		bus_teardown_intr(sc->mfi_dev, sc->mfi_irq, sc->mfi_intr);
1106 	if (sc->mfi_irq != NULL)
1107 		bus_release_resource(sc->mfi_dev, SYS_RES_IRQ, sc->mfi_irq_rid,
1108 		    sc->mfi_irq);
1109 
1110 	if (sc->mfi_sense_busaddr != 0)
1111 		bus_dmamap_unload(sc->mfi_sense_dmat, sc->mfi_sense_dmamap);
1112 	if (sc->mfi_sense != NULL)
1113 		bus_dmamem_free(sc->mfi_sense_dmat, sc->mfi_sense,
1114 		    sc->mfi_sense_dmamap);
1115 	if (sc->mfi_sense_dmat != NULL)
1116 		bus_dma_tag_destroy(sc->mfi_sense_dmat);
1117 
1118 	if (sc->mfi_frames_busaddr != 0)
1119 		bus_dmamap_unload(sc->mfi_frames_dmat, sc->mfi_frames_dmamap);
1120 	if (sc->mfi_frames != NULL)
1121 		bus_dmamem_free(sc->mfi_frames_dmat, sc->mfi_frames,
1122 		    sc->mfi_frames_dmamap);
1123 	if (sc->mfi_frames_dmat != NULL)
1124 		bus_dma_tag_destroy(sc->mfi_frames_dmat);
1125 
1126 	if (sc->mfi_comms_busaddr != 0)
1127 		bus_dmamap_unload(sc->mfi_comms_dmat, sc->mfi_comms_dmamap);
1128 	if (sc->mfi_comms != NULL)
1129 		bus_dmamem_free(sc->mfi_comms_dmat, sc->mfi_comms,
1130 		    sc->mfi_comms_dmamap);
1131 	if (sc->mfi_comms_dmat != NULL)
1132 		bus_dma_tag_destroy(sc->mfi_comms_dmat);
1133 
1134 	/* ThunderBolt contiguous memory free here */
1135 	if (sc->mfi_flags & MFI_FLAGS_TBOLT) {
1136 		if (sc->mfi_tb_busaddr != 0)
1137 			bus_dmamap_unload(sc->mfi_tb_dmat, sc->mfi_tb_dmamap);
1138 		if (sc->request_message_pool != NULL)
1139 			bus_dmamem_free(sc->mfi_tb_dmat, sc->request_message_pool,
1140 			    sc->mfi_tb_dmamap);
1141 		if (sc->mfi_tb_dmat != NULL)
1142 			bus_dma_tag_destroy(sc->mfi_tb_dmat);
1143 
1144 		/* Version buffer memory free */
1145 		/* Start LSIP200113393 */
1146 		if (sc->verbuf_h_busaddr != 0)
1147 			bus_dmamap_unload(sc->verbuf_h_dmat, sc->verbuf_h_dmamap);
1148 		if (sc->verbuf != NULL)
1149 			bus_dmamem_free(sc->verbuf_h_dmat, sc->verbuf,
1150 			    sc->verbuf_h_dmamap);
1151 		if (sc->verbuf_h_dmat != NULL)
1152 			bus_dma_tag_destroy(sc->verbuf_h_dmat);
1153 
1154 		/* End LSIP200113393 */
1155 		/* ThunderBolt INIT packet memory Free */
1156 		if (sc->mfi_tb_init_busaddr != 0)
1157 			bus_dmamap_unload(sc->mfi_tb_init_dmat, sc->mfi_tb_init_dmamap);
1158 		if (sc->mfi_tb_init != NULL)
1159 			bus_dmamem_free(sc->mfi_tb_init_dmat, sc->mfi_tb_init,
1160 			    sc->mfi_tb_init_dmamap);
1161 		if (sc->mfi_tb_init_dmat != NULL)
1162 			bus_dma_tag_destroy(sc->mfi_tb_init_dmat);
1163 
1164 		/* ThunderBolt IOC Init Desc memory free here */
1165 		if (sc->mfi_tb_ioc_init_busaddr != 0)
1166 			bus_dmamap_unload(sc->mfi_tb_ioc_init_dmat,
1167 			    sc->mfi_tb_ioc_init_dmamap);
1168 		if (sc->mfi_tb_ioc_init_desc != NULL)
1169 			bus_dmamem_free(sc->mfi_tb_ioc_init_dmat,
1170 			    sc->mfi_tb_ioc_init_desc,
1171 			    sc->mfi_tb_ioc_init_dmamap);
1172 		if (sc->mfi_tb_ioc_init_dmat != NULL)
1173 			bus_dma_tag_destroy(sc->mfi_tb_ioc_init_dmat);
1174 		for (int i = 0; i < sc->mfi_max_fw_cmds; i++) {
1175 			if (sc->mfi_cmd_pool_tbolt != NULL) {
1176 				if (sc->mfi_cmd_pool_tbolt[i] != NULL) {
1177 					kfree(sc->mfi_cmd_pool_tbolt[i],
1178 					    M_MFIBUF);
1179 					sc->mfi_cmd_pool_tbolt[i] = NULL;
1180 				}
1181 			}
1182 		}
1183 		if (sc->mfi_cmd_pool_tbolt != NULL) {
1184 			kfree(sc->mfi_cmd_pool_tbolt, M_MFIBUF);
1185 			sc->mfi_cmd_pool_tbolt = NULL;
1186 		}
1187 		if (sc->request_desc_pool != NULL) {
1188 			kfree(sc->request_desc_pool, M_MFIBUF);
1189 			sc->request_desc_pool = NULL;
1190 		}
1191 	}
1192 	if (sc->mfi_buffer_dmat != NULL)
1193 		bus_dma_tag_destroy(sc->mfi_buffer_dmat);
1194 	if (sc->mfi_parent_dmat != NULL)
1195 		bus_dma_tag_destroy(sc->mfi_parent_dmat);
1196 
1197 	if (sc->mfi_sysctl_tree != NULL)
1198 		sysctl_ctx_free(&sc->mfi_sysctl_ctx);
1199 
1200 #if 0 /* XXX swildner: not sure if we need something like mtx_initialized() */
1201 	if (mtx_initialized(&sc->mfi_io_lock))
1202 #endif
1203 	{
1204 	lockuninit(&sc->mfi_io_lock);
1205 	lockuninit(&sc->mfi_config_lock);
1206 	}
1207 
1208 	return;
1209 }
1210 
1211 static void
1212 mfi_startup(void *arg)
1213 {
1214 	struct mfi_softc *sc;
1215 
1216 	sc = (struct mfi_softc *)arg;
1217 
1218 	config_intrhook_disestablish(&sc->mfi_ich);
1219 
1220 	sc->mfi_enable_intr(sc);
1221 	lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1222 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1223 	mfi_ldprobe(sc);
1224 	if (sc->mfi_flags & MFI_FLAGS_SKINNY)
1225 		mfi_syspdprobe(sc);
1226 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1227 	lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1228 }
1229 
1230 static void
1231 mfi_intr(void *arg)
1232 {
1233 	struct mfi_softc *sc;
1234 	struct mfi_command *cm;
1235 	uint32_t pi, ci, context;
1236 
1237 	sc = (struct mfi_softc *)arg;
1238 
1239 	if (sc->mfi_check_clear_intr(sc))
1240 		return;
1241 
1242 restart:
1243 	pi = sc->mfi_comms->hw_pi;
1244 	ci = sc->mfi_comms->hw_ci;
1245 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1246 	while (ci != pi) {
1247 		context = sc->mfi_comms->hw_reply_q[ci];
1248 		if (context < sc->mfi_max_fw_cmds) {
1249 			cm = &sc->mfi_commands[context];
1250 			mfi_remove_busy(cm);
1251 			cm->cm_error = 0;
1252 			mfi_complete(sc, cm);
1253 		}
1254 		if (++ci == (sc->mfi_max_fw_cmds + 1)) {
1255 			ci = 0;
1256 		}
1257 	}
1258 
1259 	sc->mfi_comms->hw_ci = ci;
1260 
1261 	/* Give defered I/O a chance to run */
1262 	if (sc->mfi_flags & MFI_FLAGS_QFRZN)
1263 		sc->mfi_flags &= ~MFI_FLAGS_QFRZN;
1264 	mfi_startio(sc);
1265 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1266 
1267 	/*
1268 	 * Dummy read to flush the bus; this ensures that the indexes are up
1269 	 * to date.  Restart processing if more commands have come it.
1270 	 */
1271 	(void)sc->mfi_read_fw_status(sc);
1272 	if (pi != sc->mfi_comms->hw_pi)
1273 		goto restart;
1274 
1275 	return;
1276 }
1277 
1278 int
1279 mfi_shutdown(struct mfi_softc *sc)
1280 {
1281 	struct mfi_dcmd_frame *dcmd;
1282 	struct mfi_command *cm;
1283 	int error;
1284 
1285 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1286 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_SHUTDOWN, NULL, 0);
1287 	if (error) {
1288 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1289 		return (error);
1290 	}
1291 
1292 	if (sc->mfi_aen_cm != NULL)
1293 		mfi_abort(sc, sc->mfi_aen_cm);
1294 
1295 	if (sc->map_update_cmd != NULL)
1296 		mfi_abort(sc, sc->map_update_cmd);
1297 
1298 	dcmd = &cm->cm_frame->dcmd;
1299 	dcmd->header.flags = MFI_FRAME_DIR_NONE;
1300 	cm->cm_flags = MFI_CMD_POLLED;
1301 	cm->cm_data = NULL;
1302 
1303 	if ((error = mfi_mapcmd(sc, cm)) != 0) {
1304 		device_printf(sc->mfi_dev, "Failed to shutdown controller\n");
1305 	}
1306 
1307 	mfi_release_command(cm);
1308 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1309 	return (error);
1310 }
1311 
1312 static void
1313 mfi_syspdprobe(struct mfi_softc *sc)
1314 {
1315 	struct mfi_frame_header *hdr;
1316 	struct mfi_command *cm = NULL;
1317 	struct mfi_pd_list *pdlist = NULL;
1318 	struct mfi_system_pd *syspd, *tmp;
1319 	int error, i, found;
1320 
1321 	mfi_lockassert(&sc->mfi_config_lock);
1322 	mfi_lockassert(&sc->mfi_io_lock);
1323 	/* Add SYSTEM PD's */
1324 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_LIST_QUERY,
1325 	    (void **)&pdlist, sizeof(*pdlist));
1326 	if (error) {
1327 		device_printf(sc->mfi_dev,
1328 		    "Error while forming SYSTEM PD list\n");
1329 		goto out;
1330 	}
1331 
1332 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1333 	cm->cm_frame->dcmd.mbox[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
1334 	cm->cm_frame->dcmd.mbox[1] = 0;
1335 	if (mfi_mapcmd(sc, cm) != 0) {
1336 		device_printf(sc->mfi_dev,
1337 		    "Failed to get syspd device listing\n");
1338 		goto out;
1339 	}
1340 	bus_dmamap_sync(sc->mfi_buffer_dmat,cm->cm_dmamap,
1341 	    BUS_DMASYNC_POSTREAD);
1342 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1343 	hdr = &cm->cm_frame->header;
1344 	if (hdr->cmd_status != MFI_STAT_OK) {
1345 		device_printf(sc->mfi_dev,
1346 		    "MFI_DCMD_PD_LIST_QUERY failed %x\n", hdr->cmd_status);
1347 		goto out;
1348 	}
1349 	/* Get each PD and add it to the system */
1350 	for (i = 0; i < pdlist->count; i++) {
1351 		if (pdlist->addr[i].device_id ==
1352 		    pdlist->addr[i].encl_device_id)
1353 			continue;
1354 		found = 0;
1355 		TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
1356 			if (syspd->pd_id == pdlist->addr[i].device_id)
1357 				found = 1;
1358 		}
1359 		if (found == 0)
1360 			mfi_add_sys_pd(sc, pdlist->addr[i].device_id);
1361 	}
1362 	/* Delete SYSPD's whose state has been changed */
1363 	TAILQ_FOREACH_MUTABLE(syspd, &sc->mfi_syspd_tqh, pd_link, tmp) {
1364 		found = 0;
1365 		for (i = 0; i < pdlist->count; i++) {
1366 			if (syspd->pd_id == pdlist->addr[i].device_id)
1367 				found = 1;
1368 		}
1369 		if (found == 0) {
1370 			kprintf("DELETE\n");
1371 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1372 			get_mplock();
1373 			device_delete_child(sc->mfi_dev, syspd->pd_dev);
1374 			rel_mplock();
1375 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1376 		}
1377 	}
1378 out:
1379 	if (pdlist)
1380 		kfree(pdlist, M_MFIBUF);
1381 	if (cm)
1382 		mfi_release_command(cm);
1383 }
1384 
1385 static void
1386 mfi_ldprobe(struct mfi_softc *sc)
1387 {
1388 	struct mfi_frame_header *hdr;
1389 	struct mfi_command *cm = NULL;
1390 	struct mfi_ld_list *list = NULL;
1391 	struct mfi_disk *ld;
1392 	int error, i;
1393 
1394 	mfi_lockassert(&sc->mfi_config_lock);
1395 	mfi_lockassert(&sc->mfi_io_lock);
1396 
1397 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_LIST,
1398 	    (void **)&list, sizeof(*list));
1399 	if (error)
1400 		goto out;
1401 
1402 	cm->cm_flags = MFI_CMD_DATAIN;
1403 	if (mfi_wait_command(sc, cm) != 0) {
1404 		device_printf(sc->mfi_dev, "Failed to get device listing\n");
1405 		goto out;
1406 	}
1407 
1408 	hdr = &cm->cm_frame->header;
1409 	if (hdr->cmd_status != MFI_STAT_OK) {
1410 		device_printf(sc->mfi_dev, "MFI_DCMD_LD_GET_LIST failed %x\n",
1411 		    hdr->cmd_status);
1412 		goto out;
1413 	}
1414 
1415 	for (i = 0; i < list->ld_count; i++) {
1416 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1417 			if (ld->ld_id == list->ld_list[i].ld.v.target_id)
1418 				goto skip_add;
1419 		}
1420 		mfi_add_ld(sc, list->ld_list[i].ld.v.target_id);
1421 	skip_add:;
1422 	}
1423 out:
1424 	if (list)
1425 		kfree(list, M_MFIBUF);
1426 	if (cm)
1427 		mfi_release_command(cm);
1428 
1429 	return;
1430 }
1431 
1432 /*
1433  * The timestamp is the number of seconds since 00:00 Jan 1, 2000.  If
1434  * the bits in 24-31 are all set, then it is the number of seconds since
1435  * boot.
1436  */
1437 static const char *
1438 format_timestamp(uint32_t timestamp)
1439 {
1440 	static char buffer[32];
1441 
1442 	if ((timestamp & 0xff000000) == 0xff000000)
1443 		ksnprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
1444 		    0x00ffffff);
1445 	else
1446 		ksnprintf(buffer, sizeof(buffer), "%us", timestamp);
1447 	return (buffer);
1448 }
1449 
1450 static const char *
1451 format_class(int8_t class)
1452 {
1453 	static char buffer[6];
1454 
1455 	switch (class) {
1456 	case MFI_EVT_CLASS_DEBUG:
1457 		return ("debug");
1458 	case MFI_EVT_CLASS_PROGRESS:
1459 		return ("progress");
1460 	case MFI_EVT_CLASS_INFO:
1461 		return ("info");
1462 	case MFI_EVT_CLASS_WARNING:
1463 		return ("WARN");
1464 	case MFI_EVT_CLASS_CRITICAL:
1465 		return ("CRIT");
1466 	case MFI_EVT_CLASS_FATAL:
1467 		return ("FATAL");
1468 	case MFI_EVT_CLASS_DEAD:
1469 		return ("DEAD");
1470 	default:
1471 		ksnprintf(buffer, sizeof(buffer), "%d", class);
1472 		return (buffer);
1473 	}
1474 }
1475 
1476 static void
1477 mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1478 {
1479 	struct mfi_system_pd *syspd = NULL;
1480 
1481 	device_printf(sc->mfi_dev, "%d (%s/0x%04x/%s) - %s\n", detail->seq,
1482 	    format_timestamp(detail->time), detail->evt_class.members.locale,
1483 	    format_class(detail->evt_class.members.evt_class),
1484 	    detail->description);
1485 
1486 	/* Don't act on old AEN's or while shutting down */
1487 	if (detail->seq < sc->mfi_boot_seq_num || sc->mfi_detaching)
1488 		return;
1489 
1490 	switch (detail->arg_type) {
1491 	case MR_EVT_ARGS_NONE:
1492 		if (detail->code == MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED) {
1493 		    device_printf(sc->mfi_dev, "HostBus scan raised\n");
1494 			if (mfi_detect_jbod_change) {
1495 				/*
1496 				 * Probe for new SYSPD's and Delete
1497 				 * invalid SYSPD's
1498 				 */
1499 				lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1500 				lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1501 				mfi_syspdprobe(sc);
1502 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1503 				lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1504 			}
1505 		}
1506 		break;
1507 	case MR_EVT_ARGS_LD_STATE:
1508 		/*
1509 		 * During load time driver reads all the events starting
1510 		 * from the one that has been logged after shutdown. Avoid
1511 		 * these old events.
1512 		 */
1513 		if (detail->args.ld_state.new_state == MFI_LD_STATE_OFFLINE ) {
1514 			/* Remove the LD */
1515 			struct mfi_disk *ld;
1516 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
1517 				if (ld->ld_id ==
1518 				    detail->args.ld_state.ld.target_id)
1519 					break;
1520 			}
1521 			/*
1522 			Fix: for kernel panics when SSCD is removed
1523 			KASSERT(ld != NULL, ("volume dissappeared"));
1524 			*/
1525 			if (ld != NULL) {
1526 				get_mplock();
1527 				device_delete_child(sc->mfi_dev, ld->ld_dev);
1528 				rel_mplock();
1529 			}
1530 		}
1531 		break;
1532 	case MR_EVT_ARGS_PD:
1533 		if (detail->code == MR_EVT_PD_REMOVED) {
1534 			if (mfi_detect_jbod_change) {
1535 				/*
1536 				 * If the removed device is a SYSPD then
1537 				 * delete it
1538 				 */
1539 				TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh,
1540 				    pd_link) {
1541 					if (syspd->pd_id ==
1542 					    detail->args.pd.device_id) {
1543 						get_mplock();
1544 						device_delete_child(
1545 						    sc->mfi_dev,
1546 						    syspd->pd_dev);
1547 						rel_mplock();
1548 						break;
1549 					}
1550 				}
1551 			}
1552 		}
1553 		if (detail->code == MR_EVT_PD_INSERTED) {
1554 			if (mfi_detect_jbod_change) {
1555 				/* Probe for new SYSPD's */
1556 				lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
1557 				lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1558 				mfi_syspdprobe(sc);
1559 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1560 				lockmgr(&sc->mfi_config_lock, LK_RELEASE);
1561 			}
1562 		}
1563 		break;
1564 	}
1565 }
1566 
1567 static void
1568 mfi_queue_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
1569 {
1570 	struct mfi_evt_queue_elm *elm;
1571 
1572 	mfi_lockassert(&sc->mfi_io_lock);
1573 	elm = kmalloc(sizeof(*elm), M_MFIBUF, M_NOWAIT | M_ZERO);
1574 	if (elm == NULL)
1575 		return;
1576 	memcpy(&elm->detail, detail, sizeof(*detail));
1577 	TAILQ_INSERT_TAIL(&sc->mfi_evt_queue, elm, link);
1578 	taskqueue_enqueue(taskqueue_swi, &sc->mfi_evt_task);
1579 }
1580 
1581 static void
1582 mfi_handle_evt(void *context, int pending)
1583 {
1584 	TAILQ_HEAD(,mfi_evt_queue_elm) queue;
1585 	struct mfi_softc *sc;
1586 	struct mfi_evt_queue_elm *elm;
1587 
1588 	sc = context;
1589 	TAILQ_INIT(&queue);
1590 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1591 	TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
1592 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1593 	while ((elm = TAILQ_FIRST(&queue)) != NULL) {
1594 		TAILQ_REMOVE(&queue, elm, link);
1595 		mfi_decode_evt(sc, &elm->detail);
1596 		kfree(elm, M_MFIBUF);
1597 	}
1598 }
1599 
1600 static int
1601 mfi_aen_register(struct mfi_softc *sc, int seq, int locale)
1602 {
1603 	struct mfi_command *cm;
1604 	struct mfi_dcmd_frame *dcmd;
1605 	union mfi_evt current_aen, prior_aen;
1606 	struct mfi_evt_detail *ed = NULL;
1607 	int error = 0;
1608 
1609 	current_aen.word = locale;
1610 	if (sc->mfi_aen_cm != NULL) {
1611 		prior_aen.word =
1612 		    ((uint32_t *)&sc->mfi_aen_cm->cm_frame->dcmd.mbox)[1];
1613 		if (prior_aen.members.evt_class <= current_aen.members.evt_class &&
1614 		    !((prior_aen.members.locale & current_aen.members.locale)
1615 		    ^current_aen.members.locale)) {
1616 			return (0);
1617 		} else {
1618 			prior_aen.members.locale |= current_aen.members.locale;
1619 			if (prior_aen.members.evt_class
1620 			    < current_aen.members.evt_class)
1621 				current_aen.members.evt_class =
1622 				    prior_aen.members.evt_class;
1623 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1624 			mfi_abort(sc, sc->mfi_aen_cm);
1625 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1626 		}
1627 	}
1628 
1629 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1630 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_CTRL_EVENT_WAIT,
1631 	    (void **)&ed, sizeof(*ed));
1632 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1633 	if (error) {
1634 		goto out;
1635 	}
1636 
1637 	dcmd = &cm->cm_frame->dcmd;
1638 	((uint32_t *)&dcmd->mbox)[0] = seq;
1639 	((uint32_t *)&dcmd->mbox)[1] = locale;
1640 	cm->cm_flags = MFI_CMD_DATAIN;
1641 	cm->cm_complete = mfi_aen_complete;
1642 
1643 	sc->last_seq_num = seq;
1644 	sc->mfi_aen_cm = cm;
1645 
1646 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1647 	mfi_enqueue_ready(cm);
1648 	mfi_startio(sc);
1649 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1650 
1651 out:
1652 	return (error);
1653 }
1654 
1655 static void
1656 mfi_aen_complete(struct mfi_command *cm)
1657 {
1658 	struct mfi_frame_header *hdr;
1659 	struct mfi_softc *sc;
1660 	struct mfi_evt_detail *detail;
1661 	struct mfi_aen *mfi_aen_entry, *tmp;
1662 	struct proc *p;
1663 	int seq = 0, aborted = 0;
1664 
1665 	sc = cm->cm_sc;
1666 	mfi_lockassert(&sc->mfi_io_lock);
1667 
1668 	hdr = &cm->cm_frame->header;
1669 
1670 	if (sc->mfi_aen_cm == NULL)
1671 		return;
1672 
1673 	if (sc->mfi_aen_cm->cm_aen_abort ||
1674 	    hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
1675 		sc->mfi_aen_cm->cm_aen_abort = 0;
1676 		aborted = 1;
1677 	} else {
1678 		sc->mfi_aen_triggered = 1;
1679 		if (sc->mfi_poll_waiting) {
1680 			sc->mfi_poll_waiting = 0;
1681 			KNOTE(&sc->mfi_kq.ki_note, 0);
1682 		}
1683 		detail = cm->cm_data;
1684 		mfi_queue_evt(sc, detail);
1685 		seq = detail->seq + 1;
1686 		TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids,
1687 		    aen_link, tmp) {
1688 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
1689 				     aen_link);
1690 			p = mfi_aen_entry->p;
1691 			PHOLD(p);
1692 			ksignal(p, SIGIO);
1693 			PRELE(p);
1694 			kfree(mfi_aen_entry, M_MFIBUF);
1695 		}
1696 	}
1697 
1698 	kfree(cm->cm_data, M_MFIBUF);
1699 	sc->mfi_aen_cm = NULL;
1700 	wakeup(&sc->mfi_aen_cm);
1701 	mfi_release_command(cm);
1702 
1703 	/* set it up again so the driver can catch more events */
1704 	if (!aborted) {
1705 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1706 		mfi_aen_setup(sc, seq);
1707 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1708 	}
1709 }
1710 
1711 #define MAX_EVENTS 15
1712 
1713 static int
1714 mfi_parse_entries(struct mfi_softc *sc, int start_seq, int stop_seq)
1715 {
1716 	struct mfi_command *cm;
1717 	struct mfi_dcmd_frame *dcmd;
1718 	struct mfi_evt_list *el;
1719 	union mfi_evt class_locale;
1720 	int error, i, seq, size;
1721 
1722 	class_locale.members.reserved = 0;
1723 	class_locale.members.locale = mfi_event_locale;
1724 	class_locale.members.evt_class  = mfi_event_class;
1725 
1726 	size = sizeof(struct mfi_evt_list) + sizeof(struct mfi_evt_detail)
1727 		* (MAX_EVENTS - 1);
1728 	el = kmalloc(size, M_MFIBUF, M_NOWAIT | M_ZERO);
1729 	if (el == NULL)
1730 		return (ENOMEM);
1731 
1732 	for (seq = start_seq;;) {
1733 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1734 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
1735 			kfree(el, M_MFIBUF);
1736 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1737 			return (EBUSY);
1738 		}
1739 
1740 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1741 
1742 		dcmd = &cm->cm_frame->dcmd;
1743 		bzero(dcmd->mbox, MFI_MBOX_SIZE);
1744 		dcmd->header.cmd = MFI_CMD_DCMD;
1745 		dcmd->header.timeout = 0;
1746 		dcmd->header.data_len = size;
1747 		dcmd->opcode = MFI_DCMD_CTRL_EVENT_GET;
1748 		((uint32_t *)&dcmd->mbox)[0] = seq;
1749 		((uint32_t *)&dcmd->mbox)[1] = class_locale.word;
1750 		cm->cm_sg = &dcmd->sgl;
1751 		cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
1752 		cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1753 		cm->cm_data = el;
1754 		cm->cm_len = size;
1755 
1756 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1757 		if ((error = mfi_mapcmd(sc, cm)) != 0) {
1758 			device_printf(sc->mfi_dev,
1759 			    "Failed to get controller entries\n");
1760 			mfi_release_command(cm);
1761 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1762 			break;
1763 		}
1764 
1765 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1766 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1767 		    BUS_DMASYNC_POSTREAD);
1768 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1769 
1770 		if (dcmd->header.cmd_status == MFI_STAT_NOT_FOUND) {
1771 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1772 			mfi_release_command(cm);
1773 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1774 			break;
1775 		}
1776 		if (dcmd->header.cmd_status != MFI_STAT_OK) {
1777 			device_printf(sc->mfi_dev,
1778 			    "Error %d fetching controller entries\n",
1779 			    dcmd->header.cmd_status);
1780 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1781 			mfi_release_command(cm);
1782 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1783 			break;
1784 		}
1785 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1786 		mfi_release_command(cm);
1787 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1788 
1789 		for (i = 0; i < el->count; i++) {
1790 			/*
1791 			 * If this event is newer than 'stop_seq' then
1792 			 * break out of the loop.  Note that the log
1793 			 * is a circular buffer so we have to handle
1794 			 * the case that our stop point is earlier in
1795 			 * the buffer than our start point.
1796 			 */
1797 			if (el->event[i].seq >= stop_seq) {
1798 				if (start_seq <= stop_seq)
1799 					break;
1800 				else if (el->event[i].seq < start_seq)
1801 					break;
1802 			}
1803 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1804 			mfi_queue_evt(sc, &el->event[i]);
1805 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1806 		}
1807 		seq = el->event[el->count - 1].seq + 1;
1808 	}
1809 
1810 	kfree(el, M_MFIBUF);
1811 	return (0);
1812 }
1813 
1814 static int
1815 mfi_add_ld(struct mfi_softc *sc, int id)
1816 {
1817 	struct mfi_command *cm;
1818 	struct mfi_dcmd_frame *dcmd = NULL;
1819 	struct mfi_ld_info *ld_info = NULL;
1820 	int error;
1821 
1822 	mfi_lockassert(&sc->mfi_io_lock);
1823 
1824 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_LD_GET_INFO,
1825 	    (void **)&ld_info, sizeof(*ld_info));
1826 	if (error) {
1827 		device_printf(sc->mfi_dev,
1828 		    "Failed to allocate for MFI_DCMD_LD_GET_INFO %d\n", error);
1829 		if (ld_info)
1830 			kfree(ld_info, M_MFIBUF);
1831 		return (error);
1832 	}
1833 	cm->cm_flags = MFI_CMD_DATAIN;
1834 	dcmd = &cm->cm_frame->dcmd;
1835 	dcmd->mbox[0] = id;
1836 	if (mfi_wait_command(sc, cm) != 0) {
1837 		device_printf(sc->mfi_dev,
1838 		    "Failed to get logical drive: %d\n", id);
1839 		kfree(ld_info, M_MFIBUF);
1840 		return (0);
1841 	}
1842 	if (ld_info->ld_config.params.isSSCD != 1) {
1843 		mfi_add_ld_complete(cm);
1844 	} else {
1845 		mfi_release_command(cm);
1846 		if (ld_info)		/* SSCD drives ld_info free here */
1847 			kfree(ld_info, M_MFIBUF);
1848 	}
1849 	return (0);
1850 }
1851 
1852 static void
1853 mfi_add_ld_complete(struct mfi_command *cm)
1854 {
1855 	struct mfi_frame_header *hdr;
1856 	struct mfi_ld_info *ld_info;
1857 	struct mfi_softc *sc;
1858 	device_t child;
1859 
1860 	sc = cm->cm_sc;
1861 	hdr = &cm->cm_frame->header;
1862 	ld_info = cm->cm_private;
1863 
1864 	if (hdr->cmd_status != MFI_STAT_OK) {
1865 		kfree(ld_info, M_MFIBUF);
1866 		mfi_release_command(cm);
1867 		return;
1868 	}
1869 	mfi_release_command(cm);
1870 
1871 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1872 	get_mplock();
1873 	if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
1874 		device_printf(sc->mfi_dev, "Failed to add logical disk\n");
1875 		kfree(ld_info, M_MFIBUF);
1876 		rel_mplock();
1877 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1878 		return;
1879 	}
1880 
1881 	device_set_ivars(child, ld_info);
1882 	device_set_desc(child, "MFI Logical Disk");
1883 	bus_generic_attach(sc->mfi_dev);
1884 	rel_mplock();
1885 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1886 }
1887 
1888 static int
1889 mfi_add_sys_pd(struct mfi_softc *sc, int id)
1890 {
1891 	struct mfi_command *cm;
1892 	struct mfi_dcmd_frame *dcmd = NULL;
1893 	struct mfi_pd_info *pd_info = NULL;
1894 	int error;
1895 
1896 	mfi_lockassert(&sc->mfi_io_lock);
1897 
1898 	error = mfi_dcmd_command(sc, &cm, MFI_DCMD_PD_GET_INFO,
1899 	    (void **)&pd_info, sizeof(*pd_info));
1900 	if (error) {
1901 		device_printf(sc->mfi_dev,
1902 		    "Failed to allocated for MFI_DCMD_PD_GET_INFO %d\n",
1903 		    error);
1904 		if (pd_info)
1905 			kfree(pd_info, M_MFIBUF);
1906 		return (error);
1907 	}
1908 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_POLLED;
1909 	dcmd = &cm->cm_frame->dcmd;
1910 	dcmd->mbox[0] = id;
1911 	dcmd->header.scsi_status = 0;
1912 	dcmd->header.pad0 = 0;
1913 	if (mfi_mapcmd(sc, cm) != 0) {
1914 		device_printf(sc->mfi_dev,
1915 		    "Failed to get physical drive info %d\n", id);
1916 		kfree(pd_info, M_MFIBUF);
1917 		return (0);
1918 	}
1919 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
1920 	    BUS_DMASYNC_POSTREAD);
1921 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
1922 	mfi_add_sys_pd_complete(cm);
1923 	return (0);
1924 }
1925 
1926 static void
1927 mfi_add_sys_pd_complete(struct mfi_command *cm)
1928 {
1929 	struct mfi_frame_header *hdr;
1930 	struct mfi_pd_info *pd_info;
1931 	struct mfi_softc *sc;
1932 	device_t child;
1933 
1934 	sc = cm->cm_sc;
1935 	hdr = &cm->cm_frame->header;
1936 	pd_info = cm->cm_private;
1937 
1938 	if (hdr->cmd_status != MFI_STAT_OK) {
1939 		kfree(pd_info, M_MFIBUF);
1940 		mfi_release_command(cm);
1941 		return;
1942 	}
1943 	if (pd_info->fw_state != MFI_PD_STATE_SYSTEM) {
1944 		device_printf(sc->mfi_dev, "PD=%x is not SYSTEM PD\n",
1945 		    pd_info->ref.v.device_id);
1946 		kfree(pd_info, M_MFIBUF);
1947 		mfi_release_command(cm);
1948 		return;
1949 	}
1950 	mfi_release_command(cm);
1951 
1952 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
1953 	get_mplock();
1954 	if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
1955 		device_printf(sc->mfi_dev, "Failed to add system pd\n");
1956 		kfree(pd_info, M_MFIBUF);
1957 		rel_mplock();
1958 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1959 		return;
1960 	}
1961 
1962 	device_set_ivars(child, pd_info);
1963 	device_set_desc(child, "MFI System PD");
1964 	bus_generic_attach(sc->mfi_dev);
1965 	rel_mplock();
1966 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
1967 }
1968 
1969 static struct mfi_command *
1970 mfi_bio_command(struct mfi_softc *sc)
1971 {
1972 	struct bio *bio;
1973 	struct mfi_command *cm = NULL;
1974 	struct mfi_disk *mfid;
1975 
1976 	/* reserving two commands to avoid starvation for IOCTL */
1977 	if (sc->mfi_qstat[MFIQ_FREE].q_length < 2)
1978 		return (NULL);
1979 	if ((bio = mfi_dequeue_bio(sc)) == NULL)
1980 		return (NULL);
1981 	mfid = bio->bio_driver_info;
1982 	if (mfid->ld_flags & MFI_DISK_FLAGS_SYSPD)
1983 		cm = mfi_build_syspdio(sc, bio);
1984 	else
1985 		cm = mfi_build_ldio(sc, bio);
1986 	if (!cm)
1987 		mfi_enqueue_bio(sc, bio);
1988 	return cm;
1989 }
1990 
1991 static struct mfi_command *
1992 mfi_build_syspdio(struct mfi_softc *sc, struct bio *bio)
1993 {
1994 	struct mfi_command *cm;
1995 	struct buf *bp;
1996 	struct mfi_system_pd *disk;
1997 	struct mfi_pass_frame *pass;
1998 	int flags = 0, blkcount = 0;
1999 	uint32_t context = 0;
2000 
2001 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2002 		return (NULL);
2003 
2004 	/* Zero out the MFI frame */
2005 	context = cm->cm_frame->header.context;
2006 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2007 	cm->cm_frame->header.context = context;
2008 	bp = bio->bio_buf;
2009 	pass = &cm->cm_frame->pass;
2010 	bzero(pass->cdb, 16);
2011 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2012 	switch (bp->b_cmd & 0x03) {
2013 	case BUF_CMD_READ:
2014 		pass->cdb[0] = READ_10;
2015 		flags = MFI_CMD_DATAIN;
2016 		break;
2017 	case BUF_CMD_WRITE:
2018 		pass->cdb[0] = WRITE_10;
2019 		flags = MFI_CMD_DATAOUT;
2020 		break;
2021 	default:
2022 		panic("Invalid bio command");
2023 	}
2024 
2025 	/* Cheat with the sector length to avoid a non-constant division */
2026 	blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2027 	disk = bio->bio_driver_info;
2028 	/* Fill the LBA and Transfer length in CDB */
2029 	pass->cdb[2] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xff000000) >> 24;
2030 	pass->cdb[3] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x00ff0000) >> 16;
2031 	pass->cdb[4] = ((bio->bio_offset / MFI_SECTOR_LEN) & 0x0000ff00) >> 8;
2032 	pass->cdb[5] = (bio->bio_offset / MFI_SECTOR_LEN) & 0x000000ff;
2033 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2034 	pass->cdb[8] = (blkcount & 0x00ff);
2035 	pass->header.target_id = disk->pd_id;
2036 	pass->header.timeout = 0;
2037 	pass->header.flags = 0;
2038 	pass->header.scsi_status = 0;
2039 	pass->header.sense_len = MFI_SENSE_LEN;
2040 	pass->header.data_len = bp->b_bcount;
2041 	pass->header.cdb_len = 10;
2042 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2043 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2044 	cm->cm_complete = mfi_bio_complete;
2045 	cm->cm_private = bio;
2046 	cm->cm_data = bp->b_data;
2047 	cm->cm_len = bp->b_bcount;
2048 	cm->cm_sg = &pass->sgl;
2049 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2050 	cm->cm_flags = flags;
2051 	return (cm);
2052 }
2053 
2054 static struct mfi_command *
2055 mfi_build_ldio(struct mfi_softc *sc, struct bio *bio)
2056 {
2057 	struct mfi_io_frame *io;
2058 	struct buf *bp;
2059 	struct mfi_disk *disk;
2060 	struct mfi_command *cm;
2061 	int flags, blkcount;
2062 	uint32_t context = 0;
2063 
2064 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2065 	    return (NULL);
2066 
2067 	/* Zero out the MFI frame */
2068 	context = cm->cm_frame->header.context;
2069 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2070 	cm->cm_frame->header.context = context;
2071 	bp = bio->bio_buf;
2072 	io = &cm->cm_frame->io;
2073 	switch (bp->b_cmd & 0x03) {
2074 	case BUF_CMD_READ:
2075 		io->header.cmd = MFI_CMD_LD_READ;
2076 		flags = MFI_CMD_DATAIN;
2077 		break;
2078 	case BUF_CMD_WRITE:
2079 		io->header.cmd = MFI_CMD_LD_WRITE;
2080 		flags = MFI_CMD_DATAOUT;
2081 		break;
2082 	default:
2083 		panic("Invalid bio command");
2084 	}
2085 
2086 	/* Cheat with the sector length to avoid a non-constant division */
2087 	blkcount = (bp->b_bcount + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2088 	disk = bio->bio_driver_info;
2089 	io->header.target_id = disk->ld_id;
2090 	io->header.timeout = 0;
2091 	io->header.flags = 0;
2092 	io->header.scsi_status = 0;
2093 	io->header.sense_len = MFI_SENSE_LEN;
2094 	io->header.data_len = blkcount;
2095 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2096 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2097 	io->lba_hi = ((bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff00000000) >> 32;
2098 	io->lba_lo = (bio->bio_offset / MFI_SECTOR_LEN) & 0xffffffff;
2099 	cm->cm_complete = mfi_bio_complete;
2100 	cm->cm_private = bio;
2101 	cm->cm_data = bp->b_data;
2102 	cm->cm_len = bp->b_bcount;
2103 	cm->cm_sg = &io->sgl;
2104 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2105 	cm->cm_flags = flags;
2106 	return (cm);
2107 }
2108 
2109 static void
2110 mfi_bio_complete(struct mfi_command *cm)
2111 {
2112 	struct bio *bio;
2113 	struct buf *bp;
2114 	struct mfi_frame_header *hdr;
2115 	struct mfi_softc *sc;
2116 
2117 	bio = cm->cm_private;
2118 	bp = bio->bio_buf;
2119 	hdr = &cm->cm_frame->header;
2120 	sc = cm->cm_sc;
2121 
2122 	if ((hdr->cmd_status != MFI_STAT_OK) || (hdr->scsi_status != 0)) {
2123 		bp->b_flags |= B_ERROR;
2124 		bp->b_error = EIO;
2125 		device_printf(sc->mfi_dev, "I/O error, status= %d "
2126 		    "scsi_status= %d\n", hdr->cmd_status, hdr->scsi_status);
2127 		mfi_print_sense(cm->cm_sc, cm->cm_sense);
2128 	} else if (cm->cm_error != 0) {
2129 		bp->b_flags |= B_ERROR;
2130 	}
2131 
2132 	mfi_release_command(cm);
2133 	mfi_disk_complete(bio);
2134 }
2135 
2136 void
2137 mfi_startio(struct mfi_softc *sc)
2138 {
2139 	struct mfi_command *cm;
2140 	struct ccb_hdr *ccbh;
2141 
2142 	for (;;) {
2143 		/* Don't bother if we're short on resources */
2144 		if (sc->mfi_flags & MFI_FLAGS_QFRZN)
2145 			break;
2146 
2147 		/* Try a command that has already been prepared */
2148 		cm = mfi_dequeue_ready(sc);
2149 
2150 		if (cm == NULL) {
2151 			if ((ccbh = TAILQ_FIRST(&sc->mfi_cam_ccbq)) != NULL)
2152 				cm = sc->mfi_cam_start(ccbh);
2153 		}
2154 
2155 		/* Nope, so look for work on the bioq */
2156 		if (cm == NULL)
2157 			cm = mfi_bio_command(sc);
2158 
2159 		/* No work available, so exit */
2160 		if (cm == NULL)
2161 			break;
2162 
2163 		/* Send the command to the controller */
2164 		if (mfi_mapcmd(sc, cm) != 0) {
2165 			mfi_requeue_ready(cm);
2166 			break;
2167 		}
2168 	}
2169 }
2170 
2171 int
2172 mfi_mapcmd(struct mfi_softc *sc, struct mfi_command *cm)
2173 {
2174 	int error, polled;
2175 
2176 	mfi_lockassert(&sc->mfi_io_lock);
2177 
2178 	if ((cm->cm_data != NULL) && (cm->cm_frame->header.cmd != MFI_CMD_STP)) {
2179 		polled = (cm->cm_flags & MFI_CMD_POLLED) ? BUS_DMA_NOWAIT : 0;
2180 		error = bus_dmamap_load(sc->mfi_buffer_dmat, cm->cm_dmamap,
2181 		    cm->cm_data, cm->cm_len, mfi_data_cb, cm, polled);
2182 		if (error == EINPROGRESS) {
2183 			sc->mfi_flags |= MFI_FLAGS_QFRZN;
2184 			return (0);
2185 		}
2186 	} else {
2187 		if (sc->MFA_enabled)
2188 			error = mfi_tbolt_send_frame(sc, cm);
2189 		else
2190 			error = mfi_send_frame(sc, cm);
2191 	}
2192 
2193 	return (error);
2194 }
2195 
2196 static void
2197 mfi_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2198 {
2199 	struct mfi_frame_header *hdr;
2200 	struct mfi_command *cm;
2201 	union mfi_sgl *sgl;
2202 	struct mfi_softc *sc;
2203 	int i, j, first, dir;
2204 
2205 	cm = (struct mfi_command *)arg;
2206 	sc = cm->cm_sc;
2207 	hdr = &cm->cm_frame->header;
2208 	sgl = cm->cm_sg;
2209 
2210 	if (error) {
2211 		kprintf("error %d in callback\n", error);
2212 		cm->cm_error = error;
2213 		mfi_complete(sc, cm);
2214 		return;
2215 	}
2216 
2217 	/* Use IEEE sgl only for IO's on a SKINNY controller
2218 	 * For other commands on a SKINNY controller use either
2219 	 * sg32 or sg64 based on the sizeof(bus_addr_t).
2220 	 * Also calculate the total frame size based on the type
2221 	 * of SGL used.
2222 	 */
2223 	if (((cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) ||
2224 	     (cm->cm_frame->header.cmd == MFI_CMD_LD_READ) ||
2225 	     (cm->cm_frame->header.cmd == MFI_CMD_LD_WRITE)) &&
2226 	    (sc->mfi_flags & MFI_FLAGS_SKINNY)) {
2227 		for (i = 0; i < nsegs; i++) {
2228 			sgl->sg_skinny[i].addr = segs[i].ds_addr;
2229 			sgl->sg_skinny[i].len = segs[i].ds_len;
2230 			sgl->sg_skinny[i].flag = 0;
2231 		}
2232 		hdr->flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64;
2233 		hdr->sg_count = nsegs;
2234 	} else {
2235 		j = 0;
2236 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
2237 			first = cm->cm_stp_len;
2238 			if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2239 				sgl->sg32[j].addr = segs[0].ds_addr;
2240 				sgl->sg32[j++].len = first;
2241 			} else {
2242 				sgl->sg64[j].addr = segs[0].ds_addr;
2243 				sgl->sg64[j++].len = first;
2244 			}
2245 		} else
2246 			first = 0;
2247 		if ((sc->mfi_flags & MFI_FLAGS_SG64) == 0) {
2248 			for (i = 0; i < nsegs; i++) {
2249 				sgl->sg32[j].addr = segs[i].ds_addr + first;
2250 				sgl->sg32[j++].len = segs[i].ds_len - first;
2251 				first = 0;
2252 			}
2253 		} else {
2254 			for (i = 0; i < nsegs; i++) {
2255 				sgl->sg64[j].addr = segs[i].ds_addr + first;
2256 				sgl->sg64[j++].len = segs[i].ds_len - first;
2257 				first = 0;
2258 			}
2259 			hdr->flags |= MFI_FRAME_SGL64;
2260 		}
2261 		hdr->sg_count = j;
2262 	}
2263 
2264 	dir = 0;
2265 	if (cm->cm_flags & MFI_CMD_DATAIN) {
2266 		dir |= BUS_DMASYNC_PREREAD;
2267 		hdr->flags |= MFI_FRAME_DIR_READ;
2268 	}
2269 	if (cm->cm_flags & MFI_CMD_DATAOUT) {
2270 		dir |= BUS_DMASYNC_PREWRITE;
2271 		hdr->flags |= MFI_FRAME_DIR_WRITE;
2272 	}
2273 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2274 	cm->cm_flags |= MFI_CMD_MAPPED;
2275 
2276 	/*
2277 	 * Instead of calculating the total number of frames in the
2278 	 * compound frame, it's already assumed that there will be at
2279 	 * least 1 frame, so don't compensate for the modulo of the
2280 	 * following division.
2281 	 */
2282 	cm->cm_total_frame_size += (sc->mfi_sge_size * nsegs);
2283 	cm->cm_extra_frames = (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2284 
2285 	if (sc->MFA_enabled)
2286 		mfi_tbolt_send_frame(sc, cm);
2287 	else
2288 		mfi_send_frame(sc, cm);
2289 }
2290 
2291 static int
2292 mfi_send_frame(struct mfi_softc *sc, struct mfi_command *cm)
2293 {
2294 	struct mfi_frame_header *hdr;
2295 	int tm = MFI_POLL_TIMEOUT_SECS * 1000;
2296 
2297 	hdr = &cm->cm_frame->header;
2298 
2299 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0) {
2300 		cm->cm_timestamp = time_uptime;
2301 		mfi_enqueue_busy(cm);
2302 	} else {
2303 		hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2304 		hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2305 	}
2306 
2307 	/*
2308 	 * The bus address of the command is aligned on a 64 byte boundary,
2309 	 * leaving the least 6 bits as zero.  For whatever reason, the
2310 	 * hardware wants the address shifted right by three, leaving just
2311 	 * 3 zero bits.  These three bits are then used as a prefetching
2312 	 * hint for the hardware to predict how many frames need to be
2313 	 * fetched across the bus.  If a command has more than 8 frames
2314 	 * then the 3 bits are set to 0x7 and the firmware uses other
2315 	 * information in the command to determine the total amount to fetch.
2316 	 * However, FreeBSD doesn't support I/O larger than 128K, so 8 frames
2317 	 * is enough for both 32bit and 64bit systems.
2318 	 */
2319 	if (cm->cm_extra_frames > 7)
2320 		cm->cm_extra_frames = 7;
2321 
2322 	sc->mfi_issue_cmd(sc, cm->cm_frame_busaddr, cm->cm_extra_frames);
2323 
2324 	if ((cm->cm_flags & MFI_CMD_POLLED) == 0)
2325 		return (0);
2326 
2327 	/* This is a polled command, so busy-wait for it to complete. */
2328 	while (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2329 		DELAY(1000);
2330 		tm -= 1;
2331 		if (tm <= 0)
2332 			break;
2333 	}
2334 
2335 	if (hdr->cmd_status == MFI_STAT_INVALID_STATUS) {
2336 		device_printf(sc->mfi_dev, "Frame %p timed out "
2337 		    "command 0x%X\n", hdr, cm->cm_frame->dcmd.opcode);
2338 		return (ETIMEDOUT);
2339 	}
2340 
2341 	return (0);
2342 }
2343 
2344 void
2345 mfi_complete(struct mfi_softc *sc, struct mfi_command *cm)
2346 {
2347 	int dir;
2348 
2349 	if ((cm->cm_flags & MFI_CMD_MAPPED) != 0) {
2350 		dir = 0;
2351 		if ((cm->cm_flags & MFI_CMD_DATAIN) ||
2352 		    (cm->cm_frame->header.cmd == MFI_CMD_STP))
2353 			dir |= BUS_DMASYNC_POSTREAD;
2354 		if (cm->cm_flags & MFI_CMD_DATAOUT)
2355 			dir |= BUS_DMASYNC_POSTWRITE;
2356 
2357 		bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap, dir);
2358 		bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2359 		cm->cm_flags &= ~MFI_CMD_MAPPED;
2360 	}
2361 
2362 	cm->cm_flags |= MFI_CMD_COMPLETED;
2363 
2364 	if (cm->cm_complete != NULL)
2365 		cm->cm_complete(cm);
2366 	else
2367 		wakeup(cm);
2368 }
2369 
2370 static int
2371 mfi_abort(struct mfi_softc *sc, struct mfi_command *cm_abort)
2372 {
2373 	struct mfi_command *cm;
2374 	struct mfi_abort_frame *abort;
2375 	int i = 0;
2376 	uint32_t context = 0;
2377 
2378 	mfi_lockassert(&sc->mfi_io_lock);
2379 
2380 	if ((cm = mfi_dequeue_free(sc)) == NULL) {
2381 		return (EBUSY);
2382 	}
2383 
2384 	/* Zero out the MFI frame */
2385 	context = cm->cm_frame->header.context;
2386 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2387 	cm->cm_frame->header.context = context;
2388 
2389 	abort = &cm->cm_frame->abort;
2390 	abort->header.cmd = MFI_CMD_ABORT;
2391 	abort->header.flags = 0;
2392 	abort->header.scsi_status = 0;
2393 	abort->abort_context = cm_abort->cm_frame->header.context;
2394 	abort->abort_mfi_addr_lo = (uint32_t)cm_abort->cm_frame_busaddr;
2395 	abort->abort_mfi_addr_hi =
2396 	    (uint32_t)((uint64_t)cm_abort->cm_frame_busaddr >> 32);
2397 	cm->cm_data = NULL;
2398 	cm->cm_flags = MFI_CMD_POLLED;
2399 
2400 	if (sc->mfi_aen_cm)
2401 		sc->mfi_aen_cm->cm_aen_abort = 1;
2402 	mfi_mapcmd(sc, cm);
2403 	mfi_release_command(cm);
2404 
2405 	while (i < 5 && sc->mfi_aen_cm != NULL) {
2406 		lksleep(&sc->mfi_aen_cm, &sc->mfi_io_lock, 0, "mfiabort",
2407 		    5 * hz);
2408 		i++;
2409 	}
2410 
2411 	return (0);
2412 }
2413 
2414 int
2415 mfi_dump_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2416     int len)
2417 {
2418 	struct mfi_command *cm;
2419 	struct mfi_io_frame *io;
2420 	int error;
2421 	uint32_t context = 0;
2422 
2423 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2424 		return (EBUSY);
2425 
2426 	/* Zero out the MFI frame */
2427 	context = cm->cm_frame->header.context;
2428 	bzero(cm->cm_frame, sizeof(union mfi_frame));
2429 	cm->cm_frame->header.context = context;
2430 
2431 	io = &cm->cm_frame->io;
2432 	io->header.cmd = MFI_CMD_LD_WRITE;
2433 	io->header.target_id = id;
2434 	io->header.timeout = 0;
2435 	io->header.flags = 0;
2436 	io->header.scsi_status = 0;
2437 	io->header.sense_len = MFI_SENSE_LEN;
2438 	io->header.data_len = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2439 	io->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2440 	io->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2441 	io->lba_hi = (lba & 0xffffffff00000000) >> 32;
2442 	io->lba_lo = lba & 0xffffffff;
2443 	cm->cm_data = virt;
2444 	cm->cm_len = len;
2445 	cm->cm_sg = &io->sgl;
2446 	cm->cm_total_frame_size = MFI_IO_FRAME_SIZE;
2447 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2448 
2449 	error = mfi_mapcmd(sc, cm);
2450 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2451 	    BUS_DMASYNC_POSTWRITE);
2452 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2453 	mfi_release_command(cm);
2454 
2455 	return (error);
2456 }
2457 
2458 int
2459 mfi_dump_syspd_blocks(struct mfi_softc *sc, int id, uint64_t lba, void *virt,
2460     int len)
2461 {
2462 	struct mfi_command *cm;
2463 	struct mfi_pass_frame *pass;
2464 	int error;
2465 	int blkcount = 0;
2466 
2467 	if ((cm = mfi_dequeue_free(sc)) == NULL)
2468 		return (EBUSY);
2469 
2470 	pass = &cm->cm_frame->pass;
2471 	bzero(pass->cdb, 16);
2472 	pass->header.cmd = MFI_CMD_PD_SCSI_IO;
2473 	pass->cdb[0] = WRITE_10;
2474 	pass->cdb[2] = (lba & 0xff000000) >> 24;
2475 	pass->cdb[3] = (lba & 0x00ff0000) >> 16;
2476 	pass->cdb[4] = (lba & 0x0000ff00) >> 8;
2477 	pass->cdb[5] = (lba & 0x000000ff);
2478 	blkcount = (len + MFI_SECTOR_LEN - 1) / MFI_SECTOR_LEN;
2479 	pass->cdb[7] = (blkcount & 0xff00) >> 8;
2480 	pass->cdb[8] = (blkcount & 0x00ff);
2481 	pass->header.target_id = id;
2482 	pass->header.timeout = 0;
2483 	pass->header.flags = 0;
2484 	pass->header.scsi_status = 0;
2485 	pass->header.sense_len = MFI_SENSE_LEN;
2486 	pass->header.data_len = len;
2487 	pass->header.cdb_len = 10;
2488 	pass->sense_addr_lo = (uint32_t)cm->cm_sense_busaddr;
2489 	pass->sense_addr_hi = (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
2490 	cm->cm_data = virt;
2491 	cm->cm_len = len;
2492 	cm->cm_sg = &pass->sgl;
2493 	cm->cm_total_frame_size = MFI_PASS_FRAME_SIZE;
2494 	cm->cm_flags = MFI_CMD_POLLED | MFI_CMD_DATAOUT;
2495 
2496 	error = mfi_mapcmd(sc, cm);
2497 	bus_dmamap_sync(sc->mfi_buffer_dmat, cm->cm_dmamap,
2498 	    BUS_DMASYNC_POSTWRITE);
2499 	bus_dmamap_unload(sc->mfi_buffer_dmat, cm->cm_dmamap);
2500 	mfi_release_command(cm);
2501 
2502 	return (error);
2503 }
2504 
2505 static int
2506 mfi_open(struct dev_open_args *ap)
2507 {
2508 	cdev_t dev = ap->a_head.a_dev;
2509 	struct mfi_softc *sc;
2510 	int error;
2511 
2512 	sc = dev->si_drv1;
2513 
2514 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2515 	if (sc->mfi_detaching)
2516 		error = ENXIO;
2517 	else {
2518 		sc->mfi_flags |= MFI_FLAGS_OPEN;
2519 		error = 0;
2520 	}
2521 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2522 
2523 	return (error);
2524 }
2525 
2526 static int
2527 mfi_close(struct dev_close_args *ap)
2528 {
2529 	cdev_t dev = ap->a_head.a_dev;
2530 	struct mfi_softc *sc;
2531 	struct mfi_aen *mfi_aen_entry, *tmp;
2532 
2533 	sc = dev->si_drv1;
2534 
2535 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2536 	sc->mfi_flags &= ~MFI_FLAGS_OPEN;
2537 
2538 	TAILQ_FOREACH_MUTABLE(mfi_aen_entry, &sc->mfi_aen_pids, aen_link, tmp) {
2539 		if (mfi_aen_entry->p == curproc) {
2540 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
2541 			    aen_link);
2542 			kfree(mfi_aen_entry, M_MFIBUF);
2543 		}
2544 	}
2545 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2546 	return (0);
2547 }
2548 
2549 static int
2550 mfi_config_lock(struct mfi_softc *sc, uint32_t opcode)
2551 {
2552 
2553 	switch (opcode) {
2554 	case MFI_DCMD_LD_DELETE:
2555 	case MFI_DCMD_CFG_ADD:
2556 	case MFI_DCMD_CFG_CLEAR:
2557 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2558 		lockmgr(&sc->mfi_config_lock, LK_EXCLUSIVE);
2559 		return (1);
2560 	default:
2561 		return (0);
2562 	}
2563 }
2564 
2565 static void
2566 mfi_config_unlock(struct mfi_softc *sc, int locked)
2567 {
2568 
2569 	if (locked)
2570 		lockmgr(&sc->mfi_config_lock, LK_RELEASE);
2571 }
2572 
2573 /*
2574  * Perform pre-issue checks on commands from userland and possibly veto
2575  * them.
2576  */
2577 static int
2578 mfi_check_command_pre(struct mfi_softc *sc, struct mfi_command *cm)
2579 {
2580 	struct mfi_disk *ld, *ld2;
2581 	int error;
2582 	struct mfi_system_pd *syspd = NULL;
2583 	uint16_t syspd_id;
2584 	uint16_t *mbox;
2585 
2586 	mfi_lockassert(&sc->mfi_io_lock);
2587 	error = 0;
2588 	switch (cm->cm_frame->dcmd.opcode) {
2589 	case MFI_DCMD_LD_DELETE:
2590 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2591 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2592 				break;
2593 		}
2594 		if (ld == NULL)
2595 			error = ENOENT;
2596 		else
2597 			error = mfi_disk_disable(ld);
2598 		break;
2599 	case MFI_DCMD_CFG_CLEAR:
2600 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2601 			error = mfi_disk_disable(ld);
2602 			if (error)
2603 				break;
2604 		}
2605 		if (error) {
2606 			TAILQ_FOREACH(ld2, &sc->mfi_ld_tqh, ld_link) {
2607 				if (ld2 == ld)
2608 					break;
2609 				mfi_disk_enable(ld2);
2610 			}
2611 		}
2612 		break;
2613 	case MFI_DCMD_PD_STATE_SET:
2614 		mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2615 		syspd_id = mbox[0];
2616 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2617 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2618 				if (syspd->pd_id == syspd_id)
2619 					break;
2620 			}
2621 		} else {
2622 			break;
2623 		}
2624 		if (syspd)
2625 			error = mfi_syspd_disable(syspd);
2626 		break;
2627 	default:
2628 		break;
2629 	}
2630 	return (error);
2631 }
2632 
2633 /* Perform post-issue checks on commands from userland. */
2634 static void
2635 mfi_check_command_post(struct mfi_softc *sc, struct mfi_command *cm)
2636 {
2637 	struct mfi_disk *ld, *ldn;
2638 	struct mfi_system_pd *syspd = NULL;
2639 	uint16_t syspd_id;
2640 	uint16_t *mbox;
2641 
2642 	switch (cm->cm_frame->dcmd.opcode) {
2643 	case MFI_DCMD_LD_DELETE:
2644 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2645 			if (ld->ld_id == cm->cm_frame->dcmd.mbox[0])
2646 				break;
2647 		}
2648 		KASSERT(ld != NULL, ("volume dissappeared"));
2649 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2650 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2651 			get_mplock();
2652 			device_delete_child(sc->mfi_dev, ld->ld_dev);
2653 			rel_mplock();
2654 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2655 		} else
2656 			mfi_disk_enable(ld);
2657 		break;
2658 	case MFI_DCMD_CFG_CLEAR:
2659 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK) {
2660 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2661 			get_mplock();
2662 			TAILQ_FOREACH_MUTABLE(ld, &sc->mfi_ld_tqh, ld_link, ldn) {
2663 				device_delete_child(sc->mfi_dev, ld->ld_dev);
2664 			}
2665 			rel_mplock();
2666 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2667 		} else {
2668 			TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link)
2669 				mfi_disk_enable(ld);
2670 		}
2671 		break;
2672 	case MFI_DCMD_CFG_ADD:
2673 	case MFI_DCMD_CFG_FOREIGN_IMPORT:
2674 		if (cm->cm_frame->header.cmd_status == MFI_STAT_OK)
2675 			mfi_ldprobe(sc);
2676 		break;
2677 	case MFI_DCMD_PD_STATE_SET:
2678 		mbox = (uint16_t *)cm->cm_frame->dcmd.mbox;
2679 		syspd_id = mbox[0];
2680 		if (mbox[2] == MFI_PD_STATE_UNCONFIGURED_GOOD) {
2681 			TAILQ_FOREACH(syspd, &sc->mfi_syspd_tqh, pd_link) {
2682 				if (syspd->pd_id == syspd_id)
2683 					break;
2684 			}
2685 		} else {
2686 			break;
2687 		}
2688 		/* If the transition fails then enable the syspd again */
2689 		if (syspd && cm->cm_frame->header.cmd_status != MFI_STAT_OK)
2690 			mfi_syspd_enable(syspd);
2691 		break;
2692 	}
2693 }
2694 
2695 static int
2696 mfi_check_for_sscd(struct mfi_softc *sc, struct mfi_command *cm)
2697 {
2698 	struct mfi_config_data *conf_data = cm->cm_data;
2699 	struct mfi_command *ld_cm = NULL;
2700 	struct mfi_ld_info *ld_info = NULL;
2701 	int error = 0;
2702 
2703 	if ((cm->cm_frame->dcmd.opcode == MFI_DCMD_CFG_ADD) &&
2704 	    (conf_data->ld[0].params.isSSCD == 1)) {
2705 		error = 1;
2706 	} else if (cm->cm_frame->dcmd.opcode == MFI_DCMD_LD_DELETE) {
2707 		error = mfi_dcmd_command(sc, &ld_cm, MFI_DCMD_LD_GET_INFO,
2708 		    (void **)&ld_info, sizeof(*ld_info));
2709 		if (error) {
2710 			device_printf(sc->mfi_dev, "Failed to allocate"
2711 			    "MFI_DCMD_LD_GET_INFO %d", error);
2712 			if (ld_info)
2713 				kfree(ld_info, M_MFIBUF);
2714 			return 0;
2715 		}
2716 		ld_cm->cm_flags = MFI_CMD_DATAIN;
2717 		ld_cm->cm_frame->dcmd.mbox[0]= cm->cm_frame->dcmd.mbox[0];
2718 		ld_cm->cm_frame->header.target_id = cm->cm_frame->dcmd.mbox[0];
2719 		if (mfi_wait_command(sc, ld_cm) != 0) {
2720 			device_printf(sc->mfi_dev, "failed to get log drv\n");
2721 			mfi_release_command(ld_cm);
2722 			kfree(ld_info, M_MFIBUF);
2723 			return 0;
2724 		}
2725 
2726 		if (ld_cm->cm_frame->header.cmd_status != MFI_STAT_OK) {
2727 			kfree(ld_info, M_MFIBUF);
2728 			mfi_release_command(ld_cm);
2729 			return 0;
2730 		} else {
2731 			ld_info = (struct mfi_ld_info *)ld_cm->cm_private;
2732 		}
2733 
2734 		if (ld_info->ld_config.params.isSSCD == 1)
2735 			error = 1;
2736 
2737 		mfi_release_command(ld_cm);
2738 		kfree(ld_info, M_MFIBUF);
2739 	}
2740 	return error;
2741 }
2742 
2743 static int
2744 mfi_stp_cmd(struct mfi_softc *sc, struct mfi_command *cm,caddr_t arg)
2745 {
2746 	uint8_t i;
2747 	struct mfi_ioc_packet *ioc;
2748 	ioc = (struct mfi_ioc_packet *)arg;
2749 	int sge_size, error;
2750 	struct megasas_sge *kern_sge;
2751 
2752 	memset(sc->kbuff_arr, 0, sizeof(sc->kbuff_arr));
2753 	kern_sge =(struct megasas_sge *) ((uintptr_t)cm->cm_frame + ioc->mfi_sgl_off);
2754 	cm->cm_frame->header.sg_count = ioc->mfi_sge_count;
2755 
2756 	if (sizeof(bus_addr_t) == 8) {
2757 		cm->cm_frame->header.flags |= MFI_FRAME_SGL64;
2758 		cm->cm_extra_frames = 2;
2759 		sge_size = sizeof(struct mfi_sg64);
2760 	} else {
2761 		cm->cm_extra_frames =  (cm->cm_total_frame_size - 1) / MFI_FRAME_SIZE;
2762 		sge_size = sizeof(struct mfi_sg32);
2763 	}
2764 
2765 	cm->cm_total_frame_size += (sge_size * ioc->mfi_sge_count);
2766 	for (i = 0; i < ioc->mfi_sge_count; i++) {
2767 			if (bus_dma_tag_create( sc->mfi_parent_dmat,	/* parent */
2768 			1, 0,			/* algnmnt, boundary */
2769 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
2770 			BUS_SPACE_MAXADDR,	/* highaddr */
2771 			NULL, NULL,		/* filter, filterarg */
2772 			ioc->mfi_sgl[i].iov_len,/* maxsize */
2773 			2,			/* nsegments */
2774 			ioc->mfi_sgl[i].iov_len,/* maxsegsize */
2775 			BUS_DMA_ALLOCNOW,	/* flags */
2776 			&sc->mfi_kbuff_arr_dmat[i])) {
2777 			device_printf(sc->mfi_dev,
2778 			    "Cannot allocate mfi_kbuff_arr_dmat tag\n");
2779 			return (ENOMEM);
2780 		}
2781 
2782 		if (bus_dmamem_alloc(sc->mfi_kbuff_arr_dmat[i],
2783 		    (void **)&sc->kbuff_arr[i], BUS_DMA_NOWAIT,
2784 		    &sc->mfi_kbuff_arr_dmamap[i])) {
2785 			device_printf(sc->mfi_dev,
2786 			    "Cannot allocate mfi_kbuff_arr_dmamap memory\n");
2787 			return (ENOMEM);
2788 		}
2789 
2790 		bus_dmamap_load(sc->mfi_kbuff_arr_dmat[i],
2791 		    sc->mfi_kbuff_arr_dmamap[i], sc->kbuff_arr[i],
2792 		    ioc->mfi_sgl[i].iov_len, mfi_addr_cb,
2793 		    &sc->mfi_kbuff_arr_busaddr[i], 0);
2794 
2795 		if (!sc->kbuff_arr[i]) {
2796 			device_printf(sc->mfi_dev,
2797 			    "Could not allocate memory for kbuff_arr info\n");
2798 			return -1;
2799 		}
2800 		kern_sge[i].phys_addr = sc->mfi_kbuff_arr_busaddr[i];
2801 		kern_sge[i].length = ioc->mfi_sgl[i].iov_len;
2802 
2803 		if (sizeof(bus_addr_t) == 8) {
2804 			cm->cm_frame->stp.sgl.sg64[i].addr =
2805 			    kern_sge[i].phys_addr;
2806 			cm->cm_frame->stp.sgl.sg64[i].len =
2807 			    ioc->mfi_sgl[i].iov_len;
2808 		} else {
2809 			cm->cm_frame->stp.sgl.sg32[i].addr =
2810 			    kern_sge[i].phys_addr;
2811 			cm->cm_frame->stp.sgl.sg32[i].len =
2812 			    ioc->mfi_sgl[i].iov_len;
2813 		}
2814 
2815 		error = copyin(ioc->mfi_sgl[i].iov_base,
2816 		    sc->kbuff_arr[i],
2817 		    ioc->mfi_sgl[i].iov_len);
2818 		if (error != 0) {
2819 			device_printf(sc->mfi_dev, "Copy in failed\n");
2820 			return error;
2821 		}
2822 	}
2823 
2824 	cm->cm_flags |=MFI_CMD_MAPPED;
2825 	return 0;
2826 }
2827 
2828 static int
2829 mfi_user_command(struct mfi_softc *sc, struct mfi_ioc_passthru *ioc)
2830 {
2831 	struct mfi_command *cm;
2832 	struct mfi_dcmd_frame *dcmd;
2833 	void *ioc_buf = NULL;
2834 	uint32_t context;
2835 	int error = 0, locked;
2836 
2837 
2838 	if (ioc->buf_size > 0) {
2839 		ioc_buf = kmalloc(ioc->buf_size, M_MFIBUF, M_WAITOK);
2840 		error = copyin(ioc->buf, ioc_buf, ioc->buf_size);
2841 		if (error) {
2842 			device_printf(sc->mfi_dev, "failed to copyin\n");
2843 			kfree(ioc_buf, M_MFIBUF);
2844 			return (error);
2845 		}
2846 	}
2847 
2848 	locked = mfi_config_lock(sc, ioc->ioc_frame.opcode);
2849 
2850 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2851 	while ((cm = mfi_dequeue_free(sc)) == NULL)
2852 		lksleep(mfi_user_command, &sc->mfi_io_lock, 0, "mfiioc", hz);
2853 
2854 	/* Save context for later */
2855 	context = cm->cm_frame->header.context;
2856 
2857 	dcmd = &cm->cm_frame->dcmd;
2858 	bcopy(&ioc->ioc_frame, dcmd, sizeof(struct mfi_dcmd_frame));
2859 
2860 	cm->cm_sg = &dcmd->sgl;
2861 	cm->cm_total_frame_size = MFI_DCMD_FRAME_SIZE;
2862 	cm->cm_data = ioc_buf;
2863 	cm->cm_len = ioc->buf_size;
2864 
2865 	/* restore context */
2866 	cm->cm_frame->header.context = context;
2867 
2868 	/* Cheat since we don't know if we're writing or reading */
2869 	cm->cm_flags = MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
2870 
2871 	error = mfi_check_command_pre(sc, cm);
2872 	if (error)
2873 		goto out;
2874 
2875 	error = mfi_wait_command(sc, cm);
2876 	if (error) {
2877 		device_printf(sc->mfi_dev, "ioctl failed %d\n", error);
2878 		goto out;
2879 	}
2880 	bcopy(dcmd, &ioc->ioc_frame, sizeof(struct mfi_dcmd_frame));
2881 	mfi_check_command_post(sc, cm);
2882 out:
2883 	mfi_release_command(cm);
2884 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2885 	mfi_config_unlock(sc, locked);
2886 	if (ioc->buf_size > 0)
2887 		error = copyout(ioc_buf, ioc->buf, ioc->buf_size);
2888 	if (ioc_buf)
2889 		kfree(ioc_buf, M_MFIBUF);
2890 	return (error);
2891 }
2892 
2893 #define	PTRIN(p)		((void *)(uintptr_t)(p))
2894 
2895 static int
2896 mfi_ioctl(struct dev_ioctl_args *ap)
2897 {
2898 	cdev_t dev = ap->a_head.a_dev;
2899 	u_long cmd = ap->a_cmd;
2900 	int flag = ap->a_fflag;
2901 	caddr_t arg = ap->a_data;
2902 	struct mfi_softc *sc;
2903 	union mfi_statrequest *ms;
2904 	struct mfi_ioc_packet *ioc;
2905 	struct mfi_ioc_aen *aen;
2906 	struct mfi_command *cm = NULL;
2907 	uint32_t context;
2908 	union mfi_sense_ptr sense_ptr;
2909 	uint8_t *data = NULL, *temp, *addr, skip_pre_post = 0;
2910 	size_t len;
2911 	int i, res;
2912 	struct mfi_ioc_passthru *iop = (struct mfi_ioc_passthru *)arg;
2913 	int error, locked;
2914 
2915 	sc = dev->si_drv1;
2916 	error = 0;
2917 
2918 	if (sc->adpreset)
2919 		return EBUSY;
2920 
2921 	if (sc->hw_crit_error)
2922 		return EBUSY;
2923 
2924 	if (sc->issuepend_done == 0)
2925 		return EBUSY;
2926 
2927 	switch (cmd) {
2928 	case MFIIO_STATS:
2929 		ms = (union mfi_statrequest *)arg;
2930 		switch (ms->ms_item) {
2931 		case MFIQ_FREE:
2932 		case MFIQ_BIO:
2933 		case MFIQ_READY:
2934 		case MFIQ_BUSY:
2935 			bcopy(&sc->mfi_qstat[ms->ms_item], &ms->ms_qstat,
2936 			    sizeof(struct mfi_qstat));
2937 			break;
2938 		default:
2939 			error = ENOIOCTL;
2940 			break;
2941 		}
2942 		break;
2943 	case MFIIO_QUERY_DISK:
2944 	{
2945 		struct mfi_query_disk *qd;
2946 		struct mfi_disk *ld;
2947 
2948 		qd = (struct mfi_query_disk *)arg;
2949 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2950 		TAILQ_FOREACH(ld, &sc->mfi_ld_tqh, ld_link) {
2951 			if (ld->ld_id == qd->array_id)
2952 				break;
2953 		}
2954 		if (ld == NULL) {
2955 			qd->present = 0;
2956 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2957 			return (0);
2958 		}
2959 		qd->present = 1;
2960 		if (ld->ld_flags & MFI_DISK_FLAGS_OPEN)
2961 			qd->open = 1;
2962 		bzero(qd->devname, SPECNAMELEN + 1);
2963 		ksnprintf(qd->devname, SPECNAMELEN, "mfid%d", ld->ld_unit);
2964 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2965 		break;
2966 	}
2967 	case MFI_CMD:
2968 		{
2969 		devclass_t devclass;
2970 		ioc = (struct mfi_ioc_packet *)arg;
2971 		int adapter;
2972 
2973 		adapter = ioc->mfi_adapter_no;
2974 		if (device_get_unit(sc->mfi_dev) == 0 && adapter != 0) {
2975 			devclass = devclass_find("mfi");
2976 			sc = devclass_get_softc(devclass, adapter);
2977 		}
2978 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
2979 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
2980 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2981 			return (EBUSY);
2982 		}
2983 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
2984 		locked = 0;
2985 
2986 		/*
2987 		 * save off original context since copying from user
2988 		 * will clobber some data
2989 		 */
2990 		context = cm->cm_frame->header.context;
2991 		cm->cm_frame->header.context = cm->cm_index;
2992 
2993 		bcopy(ioc->mfi_frame.raw, cm->cm_frame,
2994 		    2 * MEGAMFI_FRAME_SIZE);
2995 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
2996 		    * ioc->mfi_sge_count) + ioc->mfi_sgl_off;
2997 		cm->cm_frame->header.scsi_status = 0;
2998 		cm->cm_frame->header.pad0 = 0;
2999 		if (ioc->mfi_sge_count) {
3000 			cm->cm_sg =
3001 			    (union mfi_sgl *)&cm->cm_frame->bytes[ioc->mfi_sgl_off];
3002 		}
3003 		cm->cm_flags = 0;
3004 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3005 			cm->cm_flags |= MFI_CMD_DATAIN;
3006 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3007 			cm->cm_flags |= MFI_CMD_DATAOUT;
3008 		/* Legacy app shim */
3009 		if (cm->cm_flags == 0)
3010 			cm->cm_flags |= MFI_CMD_DATAIN | MFI_CMD_DATAOUT;
3011 		cm->cm_len = cm->cm_frame->header.data_len;
3012 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3013 			cm->cm_stp_len = ioc->mfi_sgl[0].iov_len;
3014 			cm->cm_len += cm->cm_stp_len;
3015 		}
3016 		if (cm->cm_len &&
3017 		    (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3018 			cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3019 			    M_WAITOK | M_ZERO);
3020 		} else {
3021 			cm->cm_data = 0;
3022 		}
3023 
3024 		/* restore header context */
3025 		cm->cm_frame->header.context = context;
3026 
3027 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3028 			res = mfi_stp_cmd(sc, cm, arg);
3029 			if (res != 0)
3030 				goto out;
3031 		} else {
3032 			temp = data;
3033 			if ((cm->cm_flags & MFI_CMD_DATAOUT) ||
3034 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3035 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3036 					addr = ioc->mfi_sgl[i].iov_base;
3037 					len = ioc->mfi_sgl[i].iov_len;
3038 					error = copyin(addr, temp, len);
3039 					if (error != 0) {
3040 						device_printf(sc->mfi_dev,
3041 						    "Copy in failed\n");
3042 						goto out;
3043 					}
3044 					temp = &temp[len];
3045 				}
3046 			}
3047 		}
3048 
3049 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3050 			locked = mfi_config_lock(sc,
3051 			     cm->cm_frame->dcmd.opcode);
3052 
3053 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3054 			cm->cm_frame->pass.sense_addr_lo =
3055 			    (uint32_t)cm->cm_sense_busaddr;
3056 			cm->cm_frame->pass.sense_addr_hi =
3057 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3058 		}
3059 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3060 		skip_pre_post = mfi_check_for_sscd(sc, cm);
3061 		if (!skip_pre_post) {
3062 			error = mfi_check_command_pre(sc, cm);
3063 			if (error) {
3064 				lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3065 				goto out;
3066 			}
3067 		}
3068 
3069 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3070 			device_printf(sc->mfi_dev,
3071 			    "Controller polled failed\n");
3072 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3073 			goto out;
3074 		}
3075 
3076 		if (!skip_pre_post)
3077 			mfi_check_command_post(sc, cm);
3078 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3079 
3080 		if (cm->cm_frame->header.cmd != MFI_CMD_STP) {
3081 			temp = data;
3082 			if ((cm->cm_flags & MFI_CMD_DATAIN) ||
3083 			    (cm->cm_frame->header.cmd == MFI_CMD_STP)) {
3084 				for (i = 0; i < ioc->mfi_sge_count; i++) {
3085 					addr = ioc->mfi_sgl[i].iov_base;
3086 					len = ioc->mfi_sgl[i].iov_len;
3087 					error = copyout(temp, addr, len);
3088 					if (error != 0) {
3089 						device_printf(sc->mfi_dev,
3090 						    "Copy out failed\n");
3091 						goto out;
3092 					}
3093 					temp = &temp[len];
3094 				}
3095 			}
3096 		}
3097 
3098 		if (ioc->mfi_sense_len) {
3099 			/* get user-space sense ptr then copy out sense */
3100 			bcopy(&ioc->mfi_frame.raw[ioc->mfi_sense_off],
3101 			    &sense_ptr.sense_ptr_data[0],
3102 			    sizeof(sense_ptr.sense_ptr_data));
3103 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3104 			    ioc->mfi_sense_len);
3105 			if (error != 0) {
3106 				device_printf(sc->mfi_dev,
3107 				    "Copy out failed\n");
3108 				goto out;
3109 			}
3110 		}
3111 
3112 		ioc->mfi_frame.hdr.cmd_status = cm->cm_frame->header.cmd_status;
3113 out:
3114 		mfi_config_unlock(sc, locked);
3115 		if (data)
3116 			kfree(data, M_MFIBUF);
3117 		if (cm->cm_frame->header.cmd == MFI_CMD_STP) {
3118 			for (i = 0; i < 2; i++) {
3119 				if (sc->kbuff_arr[i]) {
3120 					if (sc->mfi_kbuff_arr_busaddr != 0)
3121 						bus_dmamap_unload(
3122 						    sc->mfi_kbuff_arr_dmat[i],
3123 						    sc->mfi_kbuff_arr_dmamap[i]
3124 						    );
3125 					if (sc->kbuff_arr[i] != NULL)
3126 						bus_dmamem_free(
3127 						    sc->mfi_kbuff_arr_dmat[i],
3128 						    sc->kbuff_arr[i],
3129 						    sc->mfi_kbuff_arr_dmamap[i]
3130 						    );
3131 					if (sc->mfi_kbuff_arr_dmat[i] != NULL)
3132 						bus_dma_tag_destroy(
3133 						    sc->mfi_kbuff_arr_dmat[i]);
3134 				}
3135 			}
3136 		}
3137 		if (cm) {
3138 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3139 			mfi_release_command(cm);
3140 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3141 		}
3142 
3143 		break;
3144 		}
3145 	case MFI_SET_AEN:
3146 		aen = (struct mfi_ioc_aen *)arg;
3147 		error = mfi_aen_register(sc, aen->aen_seq_num,
3148 		    aen->aen_class_locale);
3149 
3150 		break;
3151 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3152 		{
3153 			devclass_t devclass;
3154 			struct mfi_linux_ioc_packet l_ioc;
3155 			int adapter;
3156 
3157 			devclass = devclass_find("mfi");
3158 			if (devclass == NULL)
3159 				return (ENOENT);
3160 
3161 			error = copyin(arg, &l_ioc, sizeof(l_ioc));
3162 			if (error)
3163 				return (error);
3164 			adapter = l_ioc.lioc_adapter_no;
3165 			sc = devclass_get_softc(devclass, adapter);
3166 			if (sc == NULL)
3167 				return (ENOENT);
3168 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3169 			    cmd, arg, flag));
3170 			break;
3171 		}
3172 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3173 		{
3174 			devclass_t devclass;
3175 			struct mfi_linux_ioc_aen l_aen;
3176 			int adapter;
3177 
3178 			devclass = devclass_find("mfi");
3179 			if (devclass == NULL)
3180 				return (ENOENT);
3181 
3182 			error = copyin(arg, &l_aen, sizeof(l_aen));
3183 			if (error)
3184 				return (error);
3185 			adapter = l_aen.laen_adapter_no;
3186 			sc = devclass_get_softc(devclass, adapter);
3187 			if (sc == NULL)
3188 				return (ENOENT);
3189 			return (mfi_linux_ioctl_int(sc->mfi_cdev,
3190 			    cmd, arg, flag));
3191 			break;
3192 		}
3193 	case MFIIO_PASSTHRU:
3194 		error = mfi_user_command(sc, iop);
3195 		break;
3196 	default:
3197 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3198 		error = ENOENT;
3199 		break;
3200 	}
3201 
3202 	return (error);
3203 }
3204 
3205 static int
3206 mfi_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t arg, int flag)
3207 {
3208 	struct mfi_softc *sc;
3209 	struct mfi_linux_ioc_packet l_ioc;
3210 	struct mfi_linux_ioc_aen l_aen;
3211 	struct mfi_command *cm = NULL;
3212 	struct mfi_aen *mfi_aen_entry;
3213 	union mfi_sense_ptr sense_ptr;
3214 	uint32_t context;
3215 	uint8_t *data = NULL, *temp;
3216 	int i;
3217 	int error, locked;
3218 
3219 	sc = dev->si_drv1;
3220 	error = 0;
3221 	switch (cmd) {
3222 	case MFI_LINUX_CMD_2: /* Firmware Linux ioctl shim */
3223 		error = copyin(arg, &l_ioc, sizeof(l_ioc));
3224 		if (error != 0)
3225 			return (error);
3226 
3227 		if (l_ioc.lioc_sge_count > MAX_LINUX_IOCTL_SGE) {
3228 			return (EINVAL);
3229 		}
3230 
3231 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3232 		if ((cm = mfi_dequeue_free(sc)) == NULL) {
3233 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3234 			return (EBUSY);
3235 		}
3236 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3237 		locked = 0;
3238 
3239 		/*
3240 		 * save off original context since copying from user
3241 		 * will clobber some data
3242 		 */
3243 		context = cm->cm_frame->header.context;
3244 
3245 		bcopy(l_ioc.lioc_frame.raw, cm->cm_frame,
3246 		      2 * MFI_DCMD_FRAME_SIZE);	/* this isn't quite right */
3247 		cm->cm_total_frame_size = (sizeof(union mfi_sgl)
3248 		      * l_ioc.lioc_sge_count) + l_ioc.lioc_sgl_off;
3249 		cm->cm_frame->header.scsi_status = 0;
3250 		cm->cm_frame->header.pad0 = 0;
3251 		if (l_ioc.lioc_sge_count)
3252 			cm->cm_sg =
3253 			    (union mfi_sgl *)&cm->cm_frame->bytes[l_ioc.lioc_sgl_off];
3254 		cm->cm_flags = 0;
3255 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAIN)
3256 			cm->cm_flags |= MFI_CMD_DATAIN;
3257 		if (cm->cm_frame->header.flags & MFI_FRAME_DATAOUT)
3258 			cm->cm_flags |= MFI_CMD_DATAOUT;
3259 		cm->cm_len = cm->cm_frame->header.data_len;
3260 		if (cm->cm_len &&
3261 		      (cm->cm_flags & (MFI_CMD_DATAIN | MFI_CMD_DATAOUT))) {
3262 			cm->cm_data = data = kmalloc(cm->cm_len, M_MFIBUF,
3263 			    M_WAITOK | M_ZERO);
3264 		} else {
3265 			cm->cm_data = 0;
3266 		}
3267 
3268 		/* restore header context */
3269 		cm->cm_frame->header.context = context;
3270 
3271 		temp = data;
3272 		if (cm->cm_flags & MFI_CMD_DATAOUT) {
3273 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3274 				error = copyin(PTRIN(l_ioc.lioc_sgl[i].iov_base),
3275 				       temp,
3276 				       l_ioc.lioc_sgl[i].iov_len);
3277 				if (error != 0) {
3278 					device_printf(sc->mfi_dev,
3279 					    "Copy in failed\n");
3280 					goto out;
3281 				}
3282 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3283 			}
3284 		}
3285 
3286 		if (cm->cm_frame->header.cmd == MFI_CMD_DCMD)
3287 			locked = mfi_config_lock(sc, cm->cm_frame->dcmd.opcode);
3288 
3289 		if (cm->cm_frame->header.cmd == MFI_CMD_PD_SCSI_IO) {
3290 			cm->cm_frame->pass.sense_addr_lo =
3291 			    (uint32_t)cm->cm_sense_busaddr;
3292 			cm->cm_frame->pass.sense_addr_hi =
3293 			    (uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32);
3294 		}
3295 
3296 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3297 		error = mfi_check_command_pre(sc, cm);
3298 		if (error) {
3299 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3300 			goto out;
3301 		}
3302 
3303 		if ((error = mfi_wait_command(sc, cm)) != 0) {
3304 			device_printf(sc->mfi_dev,
3305 			    "Controller polled failed\n");
3306 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3307 			goto out;
3308 		}
3309 
3310 		mfi_check_command_post(sc, cm);
3311 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3312 
3313 		temp = data;
3314 		if (cm->cm_flags & MFI_CMD_DATAIN) {
3315 			for (i = 0; i < l_ioc.lioc_sge_count; i++) {
3316 				error = copyout(temp,
3317 					PTRIN(l_ioc.lioc_sgl[i].iov_base),
3318 					l_ioc.lioc_sgl[i].iov_len);
3319 				if (error != 0) {
3320 					device_printf(sc->mfi_dev,
3321 					    "Copy out failed\n");
3322 					goto out;
3323 				}
3324 				temp = &temp[l_ioc.lioc_sgl[i].iov_len];
3325 			}
3326 		}
3327 
3328 		if (l_ioc.lioc_sense_len) {
3329 			/* get user-space sense ptr then copy out sense */
3330 			bcopy(&((struct mfi_linux_ioc_packet*)arg)
3331                             ->lioc_frame.raw[l_ioc.lioc_sense_off],
3332 			    &sense_ptr.sense_ptr_data[0],
3333 			    sizeof(sense_ptr.sense_ptr_data));
3334 #ifdef __x86_64__
3335 			/*
3336 			 * only 32bit Linux support so zero out any
3337 			 * address over 32bit
3338 			 */
3339 			sense_ptr.addr.high = 0;
3340 #endif
3341 			error = copyout(cm->cm_sense, sense_ptr.user_space,
3342 			    l_ioc.lioc_sense_len);
3343 			if (error != 0) {
3344 				device_printf(sc->mfi_dev,
3345 				    "Copy out failed\n");
3346 				goto out;
3347 			}
3348 		}
3349 
3350 		error = copyout(&cm->cm_frame->header.cmd_status,
3351 			&((struct mfi_linux_ioc_packet*)arg)
3352 			->lioc_frame.hdr.cmd_status,
3353 			1);
3354 		if (error != 0) {
3355 			device_printf(sc->mfi_dev,
3356 				      "Copy out failed\n");
3357 			goto out;
3358 		}
3359 
3360 out:
3361 		mfi_config_unlock(sc, locked);
3362 		if (data)
3363 			kfree(data, M_MFIBUF);
3364 		if (cm) {
3365 			lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3366 			mfi_release_command(cm);
3367 			lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3368 		}
3369 
3370 		return (error);
3371 	case MFI_LINUX_SET_AEN_2: /* AEN Linux ioctl shim */
3372 		error = copyin(arg, &l_aen, sizeof(l_aen));
3373 		if (error != 0)
3374 			return (error);
3375 		kprintf("AEN IMPLEMENTED for pid %d\n", curproc->p_pid);
3376 		mfi_aen_entry = kmalloc(sizeof(struct mfi_aen), M_MFIBUF,
3377 		    M_WAITOK);
3378 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3379 		if (mfi_aen_entry != NULL) {
3380 			mfi_aen_entry->p = curproc;
3381 			TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
3382 			    aen_link);
3383 		}
3384 		error = mfi_aen_register(sc, l_aen.laen_seq_num,
3385 		    l_aen.laen_class_locale);
3386 
3387 		if (error != 0) {
3388 			TAILQ_REMOVE(&sc->mfi_aen_pids, mfi_aen_entry,
3389 			    aen_link);
3390 			kfree(mfi_aen_entry, M_MFIBUF);
3391 		}
3392 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3393 
3394 		return (error);
3395 	default:
3396 		device_printf(sc->mfi_dev, "IOCTL 0x%lx not handled\n", cmd);
3397 		error = ENOENT;
3398 		break;
3399 	}
3400 
3401 	return (error);
3402 }
3403 
3404 static int
3405 mfi_kqfilter(struct dev_kqfilter_args *ap)
3406 {
3407 	cdev_t dev = ap->a_head.a_dev;
3408 	struct knote *kn = ap->a_kn;
3409 	struct mfi_softc *sc;
3410 	struct klist *klist;
3411 
3412 	ap->a_result = 0;
3413 	sc = dev->si_drv1;
3414 
3415 	switch (kn->kn_filter) {
3416 	case EVFILT_READ:
3417 		kn->kn_fop = &mfi_read_filterops;
3418 		kn->kn_hook = (caddr_t)sc;
3419 		break;
3420 	case EVFILT_WRITE:
3421 		kn->kn_fop = &mfi_write_filterops;
3422 		kn->kn_hook = (caddr_t)sc;
3423 		break;
3424 	default:
3425 		ap->a_result = EOPNOTSUPP;
3426 		return (0);
3427 	}
3428 
3429 	klist = &sc->mfi_kq.ki_note;
3430 	knote_insert(klist, kn);
3431 
3432 	return(0);
3433 }
3434 
3435 static void
3436 mfi_filter_detach(struct knote *kn)
3437 {
3438 	struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3439 	struct klist *klist = &sc->mfi_kq.ki_note;
3440 
3441 	knote_remove(klist, kn);
3442 }
3443 
3444 static int
3445 mfi_filter_read(struct knote *kn, long hint)
3446 {
3447 	struct mfi_softc *sc = (struct mfi_softc *)kn->kn_hook;
3448 	int ready = 0;
3449 
3450 	if (sc->mfi_aen_triggered != 0) {
3451 		ready = 1;
3452 		sc->mfi_aen_triggered = 0;
3453 	}
3454 	if (sc->mfi_aen_triggered == 0 && sc->mfi_aen_cm == NULL)
3455 		kn->kn_flags |= EV_ERROR;
3456 
3457 	if (ready == 0)
3458 		sc->mfi_poll_waiting = 1;
3459 
3460 	return (ready);
3461 }
3462 
3463 static int
3464 mfi_filter_write(struct knote *kn, long hint)
3465 {
3466 	return (0);
3467 }
3468 
3469 static void
3470 mfi_dump_all(void)
3471 {
3472 	struct mfi_softc *sc;
3473 	struct mfi_command *cm;
3474 	devclass_t dc;
3475 	time_t deadline;
3476 	int timedout;
3477 	int i;
3478 
3479 	dc = devclass_find("mfi");
3480 	if (dc == NULL) {
3481 		kprintf("No mfi dev class\n");
3482 		return;
3483 	}
3484 
3485 	for (i = 0; ; i++) {
3486 		sc = devclass_get_softc(dc, i);
3487 		if (sc == NULL)
3488 			break;
3489 		device_printf(sc->mfi_dev, "Dumping\n\n");
3490 		timedout = 0;
3491 		deadline = time_uptime - mfi_cmd_timeout;
3492 		lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3493 		TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3494 			if (cm->cm_timestamp < deadline) {
3495 				device_printf(sc->mfi_dev,
3496 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3497 				    cm, (int)(time_uptime - cm->cm_timestamp));
3498 				MFI_PRINT_CMD(cm);
3499 				timedout++;
3500 			}
3501 		}
3502 
3503 #if 0
3504 		if (timedout)
3505 			MFI_DUMP_CMDS(SC);
3506 #endif
3507 
3508 		lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3509 	}
3510 
3511 	return;
3512 }
3513 
3514 static void
3515 mfi_timeout(void *data)
3516 {
3517 	struct mfi_softc *sc = (struct mfi_softc *)data;
3518 	struct mfi_command *cm;
3519 	time_t deadline;
3520 	int timedout = 0;
3521 
3522 	deadline = time_uptime - mfi_cmd_timeout;
3523 	if (sc->adpreset == 0) {
3524 		if (!mfi_tbolt_reset(sc)) {
3525 			callout_reset(&sc->mfi_watchdog_callout,
3526 			    mfi_cmd_timeout * hz, mfi_timeout, sc);
3527 			return;
3528 		}
3529 	}
3530 	lockmgr(&sc->mfi_io_lock, LK_EXCLUSIVE);
3531 	TAILQ_FOREACH(cm, &sc->mfi_busy, cm_link) {
3532 		if (sc->mfi_aen_cm == cm)
3533 			continue;
3534 		if ((sc->mfi_aen_cm != cm) && (cm->cm_timestamp < deadline)) {
3535 			if (sc->adpreset != 0 && sc->issuepend_done == 0) {
3536 				cm->cm_timestamp = time_uptime;
3537 			} else {
3538 				device_printf(sc->mfi_dev,
3539 				    "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
3540 				     cm, (int)(time_uptime - cm->cm_timestamp));
3541 				MFI_PRINT_CMD(cm);
3542 				MFI_VALIDATE_CMD(sc, cm);
3543 				timedout++;
3544 			}
3545 		}
3546 	}
3547 
3548 #if 0
3549 	if (timedout)
3550 		MFI_DUMP_CMDS(SC);
3551 #endif
3552 
3553 	lockmgr(&sc->mfi_io_lock, LK_RELEASE);
3554 
3555 	callout_reset(&sc->mfi_watchdog_callout, mfi_cmd_timeout * hz,
3556 	    mfi_timeout, sc);
3557 
3558 	if (0)
3559 		mfi_dump_all();
3560 	return;
3561 }
3562