xref: /freebsd/sys/dev/aacraid/aacraid.c (revision c697fb7f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 #include <dev/aacraid/aacraid_endian.h>
73 
74 #ifndef FILTER_HANDLED
75 #define FILTER_HANDLED	0x02
76 #endif
77 
78 static void	aac_add_container(struct aac_softc *sc,
79 				  struct aac_mntinforesp *mir, int f,
80 				  u_int32_t uid);
81 static void	aac_get_bus_info(struct aac_softc *sc);
82 static void	aac_container_bus(struct aac_softc *sc);
83 static void	aac_daemon(void *arg);
84 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
85 							  int pages, int nseg, int nseg_new);
86 
87 /* Command Processing */
88 static void	aac_timeout(struct aac_softc *sc);
89 static void	aac_command_thread(struct aac_softc *sc);
90 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
91 				     u_int32_t xferstate, struct aac_fib *fib,
92 				     u_int16_t datasize);
93 /* Command Buffer Management */
94 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
95 				       int nseg, int error);
96 static int	aac_alloc_commands(struct aac_softc *sc);
97 static void	aac_free_commands(struct aac_softc *sc);
98 static void	aac_unmap_command(struct aac_command *cm);
99 
100 /* Hardware Interface */
101 static int	aac_alloc(struct aac_softc *sc);
102 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
103 			       int error);
104 static int	aac_check_firmware(struct aac_softc *sc);
105 static void	aac_define_int_mode(struct aac_softc *sc);
106 static int	aac_init(struct aac_softc *sc);
107 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
108 static int	aac_setup_intr(struct aac_softc *sc);
109 static int	aac_check_config(struct aac_softc *sc);
110 
111 /* PMC SRC interface */
112 static int	aac_src_get_fwstatus(struct aac_softc *sc);
113 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
114 static int	aac_src_get_istatus(struct aac_softc *sc);
115 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
116 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
117 				    u_int32_t arg0, u_int32_t arg1,
118 				    u_int32_t arg2, u_int32_t arg3);
119 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
120 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
121 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
122 static int aac_src_get_outb_queue(struct aac_softc *sc);
123 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
124 
125 struct aac_interface aacraid_src_interface = {
126 	aac_src_get_fwstatus,
127 	aac_src_qnotify,
128 	aac_src_get_istatus,
129 	aac_src_clear_istatus,
130 	aac_src_set_mailbox,
131 	aac_src_get_mailbox,
132 	aac_src_access_devreg,
133 	aac_src_send_command,
134 	aac_src_get_outb_queue,
135 	aac_src_set_outb_queue
136 };
137 
138 /* PMC SRCv interface */
139 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
140 				    u_int32_t arg0, u_int32_t arg1,
141 				    u_int32_t arg2, u_int32_t arg3);
142 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
143 
144 struct aac_interface aacraid_srcv_interface = {
145 	aac_src_get_fwstatus,
146 	aac_src_qnotify,
147 	aac_src_get_istatus,
148 	aac_src_clear_istatus,
149 	aac_srcv_set_mailbox,
150 	aac_srcv_get_mailbox,
151 	aac_src_access_devreg,
152 	aac_src_send_command,
153 	aac_src_get_outb_queue,
154 	aac_src_set_outb_queue
155 };
156 
157 /* Debugging and Diagnostics */
158 static struct aac_code_lookup aac_cpu_variant[] = {
159 	{"i960JX",		CPUI960_JX},
160 	{"i960CX",		CPUI960_CX},
161 	{"i960HX",		CPUI960_HX},
162 	{"i960RX",		CPUI960_RX},
163 	{"i960 80303",		CPUI960_80303},
164 	{"StrongARM SA110",	CPUARM_SA110},
165 	{"PPC603e",		CPUPPC_603e},
166 	{"XScale 80321",	CPU_XSCALE_80321},
167 	{"MIPS 4KC",		CPU_MIPS_4KC},
168 	{"MIPS 5KC",		CPU_MIPS_5KC},
169 	{"Unknown StrongARM",	CPUARM_xxx},
170 	{"Unknown PowerPC",	CPUPPC_xxx},
171 	{NULL, 0},
172 	{"Unknown processor",	0}
173 };
174 
175 static struct aac_code_lookup aac_battery_platform[] = {
176 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
177 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
178 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
179 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
180 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
181 	{NULL, 0},
182 	{"unknown battery platform",		0}
183 };
184 static void	aac_describe_controller(struct aac_softc *sc);
185 static char	*aac_describe_code(struct aac_code_lookup *table,
186 				   u_int32_t code);
187 
188 /* Management Interface */
189 static d_open_t		aac_open;
190 static d_ioctl_t	aac_ioctl;
191 static d_poll_t		aac_poll;
192 static void		aac_cdevpriv_dtor(void *arg);
193 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
194 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
195 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
196 static void	aac_request_aif(struct aac_softc *sc);
197 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
198 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
199 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
200 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
201 static int	aac_return_aif(struct aac_softc *sc,
202 			       struct aac_fib_context *ctx, caddr_t uptr);
203 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
204 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
205 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
206 static void	aac_ioctl_event(struct aac_softc *sc,
207 				struct aac_event *event, void *arg);
208 static int	aac_reset_adapter(struct aac_softc *sc);
209 static int	aac_get_container_info(struct aac_softc *sc,
210 				       struct aac_fib *fib, int cid,
211 				       struct aac_mntinforesp *mir,
212 				       u_int32_t *uid);
213 static u_int32_t
214 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
215 
216 static struct cdevsw aacraid_cdevsw = {
217 	.d_version =	D_VERSION,
218 	.d_flags =	0,
219 	.d_open =	aac_open,
220 	.d_ioctl =	aac_ioctl,
221 	.d_poll =	aac_poll,
222 	.d_name =	"aacraid",
223 };
224 
225 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
226 
227 /* sysctl node */
228 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
229     "AACRAID driver parameters");
230 
231 /*
232  * Device Interface
233  */
234 
235 /*
236  * Initialize the controller and softc
237  */
238 int
239 aacraid_attach(struct aac_softc *sc)
240 {
241 	int error, unit;
242 	struct aac_fib *fib;
243 	struct aac_mntinforesp mir;
244 	int count = 0, i = 0;
245 	u_int32_t uid;
246 
247 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
248 	sc->hint_flags = device_get_flags(sc->aac_dev);
249 	/*
250 	 * Initialize per-controller queues.
251 	 */
252 	aac_initq_free(sc);
253 	aac_initq_ready(sc);
254 	aac_initq_busy(sc);
255 
256 	/* mark controller as suspended until we get ourselves organised */
257 	sc->aac_state |= AAC_STATE_SUSPEND;
258 
259 	/*
260 	 * Check that the firmware on the card is supported.
261 	 */
262 	sc->msi_enabled = sc->msi_tupelo = FALSE;
263 	if ((error = aac_check_firmware(sc)) != 0)
264 		return(error);
265 
266 	/*
267 	 * Initialize locks
268 	 */
269 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
270 	TAILQ_INIT(&sc->aac_container_tqh);
271 	TAILQ_INIT(&sc->aac_ev_cmfree);
272 
273 	/* Initialize the clock daemon callout. */
274 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
275 
276 	/*
277 	 * Initialize the adapter.
278 	 */
279 	if ((error = aac_alloc(sc)) != 0)
280 		return(error);
281 	aac_define_int_mode(sc);
282 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
283 		if ((error = aac_init(sc)) != 0)
284 			return(error);
285 	}
286 
287 	/*
288 	 * Allocate and connect our interrupt.
289 	 */
290 	if ((error = aac_setup_intr(sc)) != 0)
291 		return(error);
292 
293 	/*
294 	 * Print a little information about the controller.
295 	 */
296 	aac_describe_controller(sc);
297 
298 	/*
299 	 * Make the control device.
300 	 */
301 	unit = device_get_unit(sc->aac_dev);
302 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
303 				 0640, "aacraid%d", unit);
304 	sc->aac_dev_t->si_drv1 = sc;
305 
306 	/* Create the AIF thread */
307 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
308 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
309 		panic("Could not create AIF thread");
310 
311 	/* Register the shutdown method to only be called post-dump */
312 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
313 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
314 		device_printf(sc->aac_dev,
315 			      "shutdown event registration failed\n");
316 
317 	/* Find containers */
318 	mtx_lock(&sc->aac_io_lock);
319 	aac_alloc_sync_fib(sc, &fib);
320 	/* loop over possible containers */
321 	do {
322 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
323 			continue;
324 		if (i == 0)
325 			count = mir.MntRespCount;
326 		aac_add_container(sc, &mir, 0, uid);
327 		i++;
328 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
329 	aac_release_sync_fib(sc);
330 	mtx_unlock(&sc->aac_io_lock);
331 
332 	/* Register with CAM for the containers */
333 	TAILQ_INIT(&sc->aac_sim_tqh);
334 	aac_container_bus(sc);
335 	/* Register with CAM for the non-DASD devices */
336 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
337 		aac_get_bus_info(sc);
338 
339 	/* poke the bus to actually attach the child devices */
340 	bus_generic_attach(sc->aac_dev);
341 
342 	/* mark the controller up */
343 	sc->aac_state &= ~AAC_STATE_SUSPEND;
344 
345 	/* enable interrupts now */
346 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
347 
348 	mtx_lock(&sc->aac_io_lock);
349 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
350 	mtx_unlock(&sc->aac_io_lock);
351 
352 	return(0);
353 }
354 
355 static void
356 aac_daemon(void *arg)
357 {
358 	struct aac_softc *sc;
359 	struct timeval tv;
360 	struct aac_command *cm;
361 	struct aac_fib *fib;
362 
363 	sc = arg;
364 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
365 
366 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
367 	if (callout_pending(&sc->aac_daemontime) ||
368 	    callout_active(&sc->aac_daemontime) == 0)
369 		return;
370 	getmicrotime(&tv);
371 
372 	if (!aacraid_alloc_command(sc, &cm)) {
373 		fib = cm->cm_fib;
374 		cm->cm_timestamp = time_uptime;
375 		cm->cm_datalen = 0;
376 		cm->cm_flags |= AAC_CMD_WAIT;
377 
378 		fib->Header.Size =
379 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
380 		fib->Header.XferState =
381 			AAC_FIBSTATE_HOSTOWNED   |
382 			AAC_FIBSTATE_INITIALISED |
383 			AAC_FIBSTATE_EMPTY	 |
384 			AAC_FIBSTATE_FROMHOST	 |
385 			AAC_FIBSTATE_REXPECTED   |
386 			AAC_FIBSTATE_NORM	 |
387 			AAC_FIBSTATE_ASYNC	 |
388 			AAC_FIBSTATE_FAST_RESPONSE;
389 		fib->Header.Command = SendHostTime;
390 		*(uint32_t *)fib->data = htole32(tv.tv_sec);
391 
392 		aacraid_map_command_sg(cm, NULL, 0, 0);
393 		aacraid_release_command(cm);
394 	}
395 
396 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
397 }
398 
399 void
400 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
401 {
402 
403 	switch (event->ev_type & AAC_EVENT_MASK) {
404 	case AAC_EVENT_CMFREE:
405 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
406 		break;
407 	default:
408 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
409 		    event->ev_type);
410 		break;
411 	}
412 
413 	return;
414 }
415 
416 /*
417  * Request information of container #cid
418  */
419 static int
420 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
421 		       struct aac_mntinforesp *mir, u_int32_t *uid)
422 {
423 	struct aac_command *cm;
424 	struct aac_fib *fib;
425 	struct aac_mntinfo *mi;
426 	struct aac_cnt_config *ccfg;
427 	int rval;
428 
429 	if (sync_fib == NULL) {
430 		if (aacraid_alloc_command(sc, &cm)) {
431 			device_printf(sc->aac_dev,
432 				"Warning, no free command available\n");
433 			return (-1);
434 		}
435 		fib = cm->cm_fib;
436 	} else {
437 		fib = sync_fib;
438 	}
439 
440 	mi = (struct aac_mntinfo *)&fib->data[0];
441 	/* 4KB support?, 64-bit LBA? */
442 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
443 		mi->Command = VM_NameServeAllBlk;
444 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
445 		mi->Command = VM_NameServe64;
446 	else
447 		mi->Command = VM_NameServe;
448 	mi->MntType = FT_FILESYS;
449 	mi->MntCount = cid;
450 	aac_mntinfo_tole(mi);
451 
452 	if (sync_fib) {
453 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
454 			 sizeof(struct aac_mntinfo))) {
455 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
456 			return (-1);
457 		}
458 	} else {
459 		cm->cm_timestamp = time_uptime;
460 		cm->cm_datalen = 0;
461 
462 		fib->Header.Size =
463 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
464 		fib->Header.XferState =
465 			AAC_FIBSTATE_HOSTOWNED   |
466 			AAC_FIBSTATE_INITIALISED |
467 			AAC_FIBSTATE_EMPTY	 |
468 			AAC_FIBSTATE_FROMHOST	 |
469 			AAC_FIBSTATE_REXPECTED   |
470 			AAC_FIBSTATE_NORM	 |
471 			AAC_FIBSTATE_ASYNC	 |
472 			AAC_FIBSTATE_FAST_RESPONSE;
473 		fib->Header.Command = ContainerCommand;
474 		if (aacraid_wait_command(cm) != 0) {
475 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
476 			aacraid_release_command(cm);
477 			return (-1);
478 		}
479 	}
480 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
481 	aac_mntinforesp_toh(mir);
482 
483 	/* UID */
484 	*uid = cid;
485 	if (mir->MntTable[0].VolType != CT_NONE &&
486 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
487 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
488 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
489 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
490 		}
491 		ccfg = (struct aac_cnt_config *)&fib->data[0];
492 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
493 		ccfg->Command = VM_ContainerConfig;
494 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
495 		ccfg->CTCommand.param[0] = cid;
496 		aac_cnt_config_tole(ccfg);
497 
498 		if (sync_fib) {
499 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
500 				sizeof(struct aac_cnt_config));
501 			aac_cnt_config_toh(ccfg);
502 			if (rval == 0 && ccfg->Command == ST_OK &&
503 				ccfg->CTCommand.param[0] == CT_OK &&
504 				mir->MntTable[0].VolType != CT_PASSTHRU)
505 				*uid = ccfg->CTCommand.param[1];
506 		} else {
507 			fib->Header.Size =
508 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
509 			fib->Header.XferState =
510 				AAC_FIBSTATE_HOSTOWNED   |
511 				AAC_FIBSTATE_INITIALISED |
512 				AAC_FIBSTATE_EMPTY	 |
513 				AAC_FIBSTATE_FROMHOST	 |
514 				AAC_FIBSTATE_REXPECTED   |
515 				AAC_FIBSTATE_NORM	 |
516 				AAC_FIBSTATE_ASYNC	 |
517 				AAC_FIBSTATE_FAST_RESPONSE;
518 			fib->Header.Command = ContainerCommand;
519 			rval = aacraid_wait_command(cm);
520 			aac_cnt_config_toh(ccfg);
521 			if (rval == 0 && ccfg->Command == ST_OK &&
522 				ccfg->CTCommand.param[0] == CT_OK &&
523 				mir->MntTable[0].VolType != CT_PASSTHRU)
524 				*uid = ccfg->CTCommand.param[1];
525 			aacraid_release_command(cm);
526 		}
527 	}
528 
529 	return (0);
530 }
531 
532 /*
533  * Create a device to represent a new container
534  */
535 static void
536 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
537 		  u_int32_t uid)
538 {
539 	struct aac_container *co;
540 
541 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
542 
543 	/*
544 	 * Check container volume type for validity.  Note that many of
545 	 * the possible types may never show up.
546 	 */
547 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
548 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
549 		       M_NOWAIT | M_ZERO);
550 		if (co == NULL) {
551 			panic("Out of memory?!");
552 		}
553 
554 		co->co_found = f;
555 		bcopy(&mir->MntTable[0], &co->co_mntobj,
556 		      sizeof(struct aac_mntobj));
557 		co->co_uid = uid;
558 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
559 	}
560 }
561 
562 /*
563  * Allocate resources associated with (sc)
564  */
565 static int
566 aac_alloc(struct aac_softc *sc)
567 {
568 	bus_size_t maxsize;
569 
570 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
571 
572 	/*
573 	 * Create DMA tag for mapping buffers into controller-addressable space.
574 	 */
575 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
576 			       1, 0, 			/* algnmnt, boundary */
577 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
578 			       BUS_SPACE_MAXADDR :
579 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
580 			       BUS_SPACE_MAXADDR, 	/* highaddr */
581 			       NULL, NULL, 		/* filter, filterarg */
582 			       sc->aac_max_sectors << 9, /* maxsize */
583 			       sc->aac_sg_tablesize,	/* nsegments */
584 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
585 			       BUS_DMA_ALLOCNOW,	/* flags */
586 			       busdma_lock_mutex,	/* lockfunc */
587 			       &sc->aac_io_lock,	/* lockfuncarg */
588 			       &sc->aac_buffer_dmat)) {
589 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
590 		return (ENOMEM);
591 	}
592 
593 	/*
594 	 * Create DMA tag for mapping FIBs into controller-addressable space..
595 	 */
596 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
597 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
598 			sizeof(struct aac_fib_xporthdr) + 31);
599 	else
600 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
601 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
602 			       1, 0, 			/* algnmnt, boundary */
603 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
604 			       BUS_SPACE_MAXADDR_32BIT :
605 			       0x7fffffff,		/* lowaddr */
606 			       BUS_SPACE_MAXADDR, 	/* highaddr */
607 			       NULL, NULL, 		/* filter, filterarg */
608 			       maxsize,  		/* maxsize */
609 			       1,			/* nsegments */
610 			       maxsize,			/* maxsize */
611 			       0,			/* flags */
612 			       NULL, NULL,		/* No locking needed */
613 			       &sc->aac_fib_dmat)) {
614 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
615 		return (ENOMEM);
616 	}
617 
618 	/*
619 	 * Create DMA tag for the common structure and allocate it.
620 	 */
621 	maxsize = sizeof(struct aac_common);
622 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
623 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
624 			       1, 0,			/* algnmnt, boundary */
625 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
626 			       BUS_SPACE_MAXADDR_32BIT :
627 			       0x7fffffff,		/* lowaddr */
628 			       BUS_SPACE_MAXADDR, 	/* highaddr */
629 			       NULL, NULL, 		/* filter, filterarg */
630 			       maxsize, 		/* maxsize */
631 			       1,			/* nsegments */
632 			       maxsize,			/* maxsegsize */
633 			       0,			/* flags */
634 			       NULL, NULL,		/* No locking needed */
635 			       &sc->aac_common_dmat)) {
636 		device_printf(sc->aac_dev,
637 			      "can't allocate common structure DMA tag\n");
638 		return (ENOMEM);
639 	}
640 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
641 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
642 		device_printf(sc->aac_dev, "can't allocate common structure\n");
643 		return (ENOMEM);
644 	}
645 
646 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
647 			sc->aac_common, maxsize,
648 			aac_common_map, sc, 0);
649 	bzero(sc->aac_common, maxsize);
650 
651 	/* Allocate some FIBs and associated command structs */
652 	TAILQ_INIT(&sc->aac_fibmap_tqh);
653 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
654 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
655 	mtx_lock(&sc->aac_io_lock);
656 	while (sc->total_fibs < sc->aac_max_fibs) {
657 		if (aac_alloc_commands(sc) != 0)
658 			break;
659 	}
660 	mtx_unlock(&sc->aac_io_lock);
661 	if (sc->total_fibs == 0)
662 		return (ENOMEM);
663 
664 	return (0);
665 }
666 
667 /*
668  * Free all of the resources associated with (sc)
669  *
670  * Should not be called if the controller is active.
671  */
672 void
673 aacraid_free(struct aac_softc *sc)
674 {
675 	int i;
676 
677 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
678 
679 	/* remove the control device */
680 	if (sc->aac_dev_t != NULL)
681 		destroy_dev(sc->aac_dev_t);
682 
683 	/* throw away any FIB buffers, discard the FIB DMA tag */
684 	aac_free_commands(sc);
685 	if (sc->aac_fib_dmat)
686 		bus_dma_tag_destroy(sc->aac_fib_dmat);
687 
688 	free(sc->aac_commands, M_AACRAIDBUF);
689 
690 	/* destroy the common area */
691 	if (sc->aac_common) {
692 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
693 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
694 				sc->aac_common_dmamap);
695 	}
696 	if (sc->aac_common_dmat)
697 		bus_dma_tag_destroy(sc->aac_common_dmat);
698 
699 	/* disconnect the interrupt handler */
700 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
701 		if (sc->aac_intr[i])
702 			bus_teardown_intr(sc->aac_dev,
703 				sc->aac_irq[i], sc->aac_intr[i]);
704 		if (sc->aac_irq[i])
705 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
706 				sc->aac_irq_rid[i], sc->aac_irq[i]);
707 		else
708 			break;
709 	}
710 	if (sc->msi_enabled || sc->msi_tupelo)
711 		pci_release_msi(sc->aac_dev);
712 
713 	/* destroy data-transfer DMA tag */
714 	if (sc->aac_buffer_dmat)
715 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
716 
717 	/* destroy the parent DMA tag */
718 	if (sc->aac_parent_dmat)
719 		bus_dma_tag_destroy(sc->aac_parent_dmat);
720 
721 	/* release the register window mapping */
722 	if (sc->aac_regs_res0 != NULL)
723 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
724 				     sc->aac_regs_rid0, sc->aac_regs_res0);
725 	if (sc->aac_regs_res1 != NULL)
726 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
727 				     sc->aac_regs_rid1, sc->aac_regs_res1);
728 }
729 
730 /*
731  * Disconnect from the controller completely, in preparation for unload.
732  */
733 int
734 aacraid_detach(device_t dev)
735 {
736 	struct aac_softc *sc;
737 	struct aac_container *co;
738 	struct aac_sim	*sim;
739 	int error;
740 
741 	sc = device_get_softc(dev);
742 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
743 
744 	callout_drain(&sc->aac_daemontime);
745 	/* Remove the child containers */
746 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
747 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
748 		free(co, M_AACRAIDBUF);
749 	}
750 
751 	/* Remove the CAM SIMs */
752 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
753 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
754 		error = device_delete_child(dev, sim->sim_dev);
755 		if (error)
756 			return (error);
757 		free(sim, M_AACRAIDBUF);
758 	}
759 
760 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
761 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
762 		wakeup(sc->aifthread);
763 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
764 	}
765 
766 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
767 		panic("Cannot shutdown AIF thread");
768 
769 	if ((error = aacraid_shutdown(dev)))
770 		return(error);
771 
772 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
773 
774 	aacraid_free(sc);
775 
776 	mtx_destroy(&sc->aac_io_lock);
777 
778 	return(0);
779 }
780 
781 /*
782  * Bring the controller down to a dormant state and detach all child devices.
783  *
784  * This function is called before detach or system shutdown.
785  *
786  * Note that we can assume that the bioq on the controller is empty, as we won't
787  * allow shutdown if any device is open.
788  */
789 int
790 aacraid_shutdown(device_t dev)
791 {
792 	struct aac_softc *sc;
793 	struct aac_fib *fib;
794 	struct aac_close_command *cc;
795 
796 	sc = device_get_softc(dev);
797 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
798 
799 	sc->aac_state |= AAC_STATE_SUSPEND;
800 
801 	/*
802 	 * Send a Container shutdown followed by a HostShutdown FIB to the
803 	 * controller to convince it that we don't want to talk to it anymore.
804 	 * We've been closed and all I/O completed already
805 	 */
806 	device_printf(sc->aac_dev, "shutting down controller...");
807 
808 	mtx_lock(&sc->aac_io_lock);
809 	aac_alloc_sync_fib(sc, &fib);
810 	cc = (struct aac_close_command *)&fib->data[0];
811 
812 	bzero(cc, sizeof(struct aac_close_command));
813 	cc->Command = htole32(VM_CloseAll);
814 	cc->ContainerId = htole32(0xfffffffe);
815 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
816 	    sizeof(struct aac_close_command)))
817 		printf("FAILED.\n");
818 	else
819 		printf("done\n");
820 
821 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
822 	aac_release_sync_fib(sc);
823 	mtx_unlock(&sc->aac_io_lock);
824 
825 	return(0);
826 }
827 
828 /*
829  * Bring the controller to a quiescent state, ready for system suspend.
830  */
831 int
832 aacraid_suspend(device_t dev)
833 {
834 	struct aac_softc *sc;
835 
836 	sc = device_get_softc(dev);
837 
838 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
839 	sc->aac_state |= AAC_STATE_SUSPEND;
840 
841 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
842 	return(0);
843 }
844 
845 /*
846  * Bring the controller back to a state ready for operation.
847  */
848 int
849 aacraid_resume(device_t dev)
850 {
851 	struct aac_softc *sc;
852 
853 	sc = device_get_softc(dev);
854 
855 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
856 	sc->aac_state &= ~AAC_STATE_SUSPEND;
857 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
858 	return(0);
859 }
860 
861 /*
862  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
863  */
864 void
865 aacraid_new_intr_type1(void *arg)
866 {
867 	struct aac_msix_ctx *ctx;
868 	struct aac_softc *sc;
869 	int vector_no;
870 	struct aac_command *cm;
871 	struct aac_fib *fib;
872 	u_int32_t bellbits, bellbits_shifted, index, handle;
873 	int isFastResponse, isAif, noMoreAif, mode;
874 
875 	ctx = (struct aac_msix_ctx *)arg;
876 	sc = ctx->sc;
877 	vector_no = ctx->vector_no;
878 
879 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
880 	mtx_lock(&sc->aac_io_lock);
881 
882 	if (sc->msi_enabled) {
883 		mode = AAC_INT_MODE_MSI;
884 		if (vector_no == 0) {
885 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
886 			if (bellbits & 0x40000)
887 				mode |= AAC_INT_MODE_AIF;
888 			else if (bellbits & 0x1000)
889 				mode |= AAC_INT_MODE_SYNC;
890 		}
891 	} else {
892 		mode = AAC_INT_MODE_INTX;
893 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
894 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
895 			bellbits = AAC_DB_RESPONSE_SENT_NS;
896 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
897 		} else {
898 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
899 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
900 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
901 				mode |= AAC_INT_MODE_AIF;
902 			if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
903 				mode |= AAC_INT_MODE_SYNC;
904 		}
905 		/* ODR readback, Prep #238630 */
906 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
907 	}
908 
909 	if (mode & AAC_INT_MODE_SYNC) {
910 		if (sc->aac_sync_cm) {
911 			cm = sc->aac_sync_cm;
912 			aac_unmap_command(cm);
913 			cm->cm_flags |= AAC_CMD_COMPLETED;
914 			aac_fib_header_toh(&cm->cm_fib->Header);
915 
916 			/* is there a completion handler? */
917 			if (cm->cm_complete != NULL) {
918 				cm->cm_complete(cm);
919 			} else {
920 				/* assume that someone is sleeping on this command */
921 				wakeup(cm);
922 			}
923 			sc->flags &= ~AAC_QUEUE_FRZN;
924 			sc->aac_sync_cm = NULL;
925 		}
926 		if (mode & AAC_INT_MODE_INTX)
927 			mode &= ~AAC_INT_MODE_SYNC;
928 		else
929 			mode = 0;
930 	}
931 
932 	if (mode & AAC_INT_MODE_AIF) {
933 		if (mode & AAC_INT_MODE_INTX) {
934 			aac_request_aif(sc);
935 			mode = 0;
936 		}
937 	}
938 
939 	if (sc->flags & AAC_FLAGS_SYNC_MODE)
940 		mode = 0;
941 
942 	if (mode) {
943 		/* handle async. status */
944 		index = sc->aac_host_rrq_idx[vector_no];
945 		for (;;) {
946 			isFastResponse = isAif = noMoreAif = 0;
947 			/* remove toggle bit (31) */
948 			handle = (le32toh(sc->aac_common->ac_host_rrq[index]) &
949 			    0x7fffffff);
950 			/* check fast response bit (30) */
951 			if (handle & 0x40000000)
952 				isFastResponse = 1;
953 			/* check AIF bit (23) */
954 			else if (handle & 0x00800000)
955 				isAif = TRUE;
956 			handle &= 0x0000ffff;
957 			if (handle == 0)
958 				break;
959 
960 			cm = sc->aac_commands + (handle - 1);
961 			fib = cm->cm_fib;
962 			aac_fib_header_toh(&fib->Header);
963 			sc->aac_rrq_outstanding[vector_no]--;
964 			if (isAif) {
965 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
966 				if (!noMoreAif)
967 					aac_handle_aif(sc, fib);
968 				aac_remove_busy(cm);
969 				aacraid_release_command(cm);
970 			} else {
971 				if (isFastResponse) {
972 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
973 					*((u_int32_t *)(fib->data)) = htole32(ST_OK);
974 					cm->cm_flags |= AAC_CMD_FASTRESP;
975 				}
976 				aac_remove_busy(cm);
977 				aac_unmap_command(cm);
978 				cm->cm_flags |= AAC_CMD_COMPLETED;
979 
980 				/* is there a completion handler? */
981 				if (cm->cm_complete != NULL) {
982 					cm->cm_complete(cm);
983 				} else {
984 					/* assume that someone is sleeping on this command */
985 					wakeup(cm);
986 				}
987 				sc->flags &= ~AAC_QUEUE_FRZN;
988 			}
989 
990 			sc->aac_common->ac_host_rrq[index++] = 0;
991 			if (index == (vector_no + 1) * sc->aac_vector_cap)
992 				index = vector_no * sc->aac_vector_cap;
993 			sc->aac_host_rrq_idx[vector_no] = index;
994 
995 			if ((isAif && !noMoreAif) || sc->aif_pending)
996 				aac_request_aif(sc);
997 		}
998 	}
999 
1000 	if (mode & AAC_INT_MODE_AIF) {
1001 		aac_request_aif(sc);
1002 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1003 		mode = 0;
1004 	}
1005 
1006 	/* see if we can start some more I/O */
1007 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1008 		aacraid_startio(sc);
1009 	mtx_unlock(&sc->aac_io_lock);
1010 }
1011 
1012 /*
1013  * Handle notification of one or more FIBs coming from the controller.
1014  */
1015 static void
1016 aac_command_thread(struct aac_softc *sc)
1017 {
1018 	int retval;
1019 
1020 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1021 
1022 	mtx_lock(&sc->aac_io_lock);
1023 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1024 
1025 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1026 
1027 		retval = 0;
1028 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1029 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1030 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1031 
1032 		/*
1033 		 * First see if any FIBs need to be allocated.
1034 		 */
1035 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1036 			aac_alloc_commands(sc);
1037 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1038 			aacraid_startio(sc);
1039 		}
1040 
1041 		/*
1042 		 * While we're here, check to see if any commands are stuck.
1043 		 * This is pretty low-priority, so it's ok if it doesn't
1044 		 * always fire.
1045 		 */
1046 		if (retval == EWOULDBLOCK)
1047 			aac_timeout(sc);
1048 
1049 		/* Check the hardware printf message buffer */
1050 		if (sc->aac_common->ac_printf[0] != 0)
1051 			aac_print_printf(sc);
1052 	}
1053 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1054 	mtx_unlock(&sc->aac_io_lock);
1055 	wakeup(sc->aac_dev);
1056 
1057 	aac_kthread_exit(0);
1058 }
1059 
1060 /*
1061  * Submit a command to the controller, return when it completes.
1062  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1063  *     be stuck here forever.  At the same time, signals are not caught
1064  *     because there is a risk that a signal could wakeup the sleep before
1065  *     the card has a chance to complete the command.  Since there is no way
1066  *     to cancel a command that is in progress, we can't protect against the
1067  *     card completing a command late and spamming the command and data
1068  *     memory.  So, we are held hostage until the command completes.
1069  */
1070 int
1071 aacraid_wait_command(struct aac_command *cm)
1072 {
1073 	struct aac_softc *sc;
1074 	int error;
1075 
1076 	sc = cm->cm_sc;
1077 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1078 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1079 
1080 	/* Put the command on the ready queue and get things going */
1081 	aac_enqueue_ready(cm);
1082 	aacraid_startio(sc);
1083 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1084 	return(error);
1085 }
1086 
1087 /*
1088  *Command Buffer Management
1089  */
1090 
1091 /*
1092  * Allocate a command.
1093  */
1094 int
1095 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1096 {
1097 	struct aac_command *cm;
1098 
1099 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1100 
1101 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1102 		if (sc->total_fibs < sc->aac_max_fibs) {
1103 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1104 			wakeup(sc->aifthread);
1105 		}
1106 		return (EBUSY);
1107 	}
1108 
1109 	*cmp = cm;
1110 	return(0);
1111 }
1112 
1113 /*
1114  * Release a command back to the freelist.
1115  */
1116 void
1117 aacraid_release_command(struct aac_command *cm)
1118 {
1119 	struct aac_event *event;
1120 	struct aac_softc *sc;
1121 
1122 	sc = cm->cm_sc;
1123 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1124 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1125 
1126 	/* (re)initialize the command/FIB */
1127 	cm->cm_sgtable = NULL;
1128 	cm->cm_flags = 0;
1129 	cm->cm_complete = NULL;
1130 	cm->cm_ccb = NULL;
1131 	cm->cm_passthr_dmat = 0;
1132 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1133 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1134 	cm->cm_fib->Header.Unused = 0;
1135 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1136 
1137 	/*
1138 	 * These are duplicated in aac_start to cover the case where an
1139 	 * intermediate stage may have destroyed them.  They're left
1140 	 * initialized here for debugging purposes only.
1141 	 */
1142 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1143 	cm->cm_fib->Header.Handle = 0;
1144 
1145 	aac_enqueue_free(cm);
1146 
1147 	/*
1148 	 * Dequeue all events so that there's no risk of events getting
1149 	 * stranded.
1150 	 */
1151 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1152 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1153 		event->ev_callback(sc, event, event->ev_arg);
1154 	}
1155 }
1156 
1157 /*
1158  * Map helper for command/FIB allocation.
1159  */
1160 static void
1161 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1162 {
1163 	uint64_t	*fibphys;
1164 
1165 	fibphys = (uint64_t *)arg;
1166 
1167 	*fibphys = segs[0].ds_addr;
1168 }
1169 
1170 /*
1171  * Allocate and initialize commands/FIBs for this adapter.
1172  */
1173 static int
1174 aac_alloc_commands(struct aac_softc *sc)
1175 {
1176 	struct aac_command *cm;
1177 	struct aac_fibmap *fm;
1178 	uint64_t fibphys;
1179 	int i, error;
1180 	u_int32_t maxsize;
1181 
1182 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1183 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1184 
1185 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1186 		return (ENOMEM);
1187 
1188 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1189 	if (fm == NULL)
1190 		return (ENOMEM);
1191 
1192 	mtx_unlock(&sc->aac_io_lock);
1193 	/* allocate the FIBs in DMAable memory and load them */
1194 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1195 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1196 		device_printf(sc->aac_dev,
1197 			      "Not enough contiguous memory available.\n");
1198 		free(fm, M_AACRAIDBUF);
1199 		mtx_lock(&sc->aac_io_lock);
1200 		return (ENOMEM);
1201 	}
1202 
1203 	maxsize = sc->aac_max_fib_size + 31;
1204 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1205 		maxsize += sizeof(struct aac_fib_xporthdr);
1206 	/* Ignore errors since this doesn't bounce */
1207 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1208 			      sc->aac_max_fibs_alloc * maxsize,
1209 			      aac_map_command_helper, &fibphys, 0);
1210 	mtx_lock(&sc->aac_io_lock);
1211 
1212 	/* initialize constant fields in the command structure */
1213 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1214 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1215 		cm = sc->aac_commands + sc->total_fibs;
1216 		fm->aac_commands = cm;
1217 		cm->cm_sc = sc;
1218 		cm->cm_fib = (struct aac_fib *)
1219 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1220 		cm->cm_fibphys = fibphys + i * maxsize;
1221 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1222 			u_int64_t fibphys_aligned;
1223 			fibphys_aligned =
1224 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1225 			cm->cm_fib = (struct aac_fib *)
1226 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1227 			cm->cm_fibphys = fibphys_aligned;
1228 		} else {
1229 			u_int64_t fibphys_aligned;
1230 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1231 			cm->cm_fib = (struct aac_fib *)
1232 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1233 			cm->cm_fibphys = fibphys_aligned;
1234 		}
1235 		cm->cm_index = sc->total_fibs;
1236 
1237 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1238 					       &cm->cm_datamap)) != 0)
1239 			break;
1240 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1241 			aacraid_release_command(cm);
1242 		sc->total_fibs++;
1243 	}
1244 
1245 	if (i > 0) {
1246 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1247 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1248 		return (0);
1249 	}
1250 
1251 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1252 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1253 	free(fm, M_AACRAIDBUF);
1254 	return (ENOMEM);
1255 }
1256 
1257 /*
1258  * Free FIBs owned by this adapter.
1259  */
1260 static void
1261 aac_free_commands(struct aac_softc *sc)
1262 {
1263 	struct aac_fibmap *fm;
1264 	struct aac_command *cm;
1265 	int i;
1266 
1267 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1268 
1269 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1270 
1271 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1272 		/*
1273 		 * We check against total_fibs to handle partially
1274 		 * allocated blocks.
1275 		 */
1276 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1277 			cm = fm->aac_commands + i;
1278 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1279 		}
1280 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1281 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1282 		free(fm, M_AACRAIDBUF);
1283 	}
1284 }
1285 
1286 /*
1287  * Command-mapping helper function - populate this command's s/g table.
1288  */
1289 void
1290 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1291 {
1292 	struct aac_softc *sc;
1293 	struct aac_command *cm;
1294 	struct aac_fib *fib;
1295 	int i;
1296 
1297 	cm = (struct aac_command *)arg;
1298 	sc = cm->cm_sc;
1299 	fib = cm->cm_fib;
1300 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1301 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1302 
1303 	if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm)
1304 		return;
1305 
1306 	/* copy into the FIB */
1307 	if (cm->cm_sgtable != NULL) {
1308 		if (fib->Header.Command == RawIo2) {
1309 			struct aac_raw_io2 *raw;
1310 			struct aac_sge_ieee1212 *sg;
1311 			u_int32_t min_size = PAGE_SIZE, cur_size;
1312 			int conformable = TRUE;
1313 
1314 			raw = (struct aac_raw_io2 *)&fib->data[0];
1315 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1316 			raw->sgeCnt = nseg;
1317 
1318 			for (i = 0; i < nseg; i++) {
1319 				cur_size = segs[i].ds_len;
1320 				sg[i].addrHigh = 0;
1321 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1322 				sg[i].length = cur_size;
1323 				sg[i].flags = 0;
1324 				if (i == 0) {
1325 					raw->sgeFirstSize = cur_size;
1326 				} else if (i == 1) {
1327 					raw->sgeNominalSize = cur_size;
1328 					min_size = cur_size;
1329 				} else if ((i+1) < nseg &&
1330 					cur_size != raw->sgeNominalSize) {
1331 					conformable = FALSE;
1332 					if (cur_size < min_size)
1333 						min_size = cur_size;
1334 				}
1335 			}
1336 
1337 			/* not conformable: evaluate required sg elements */
1338 			if (!conformable) {
1339 				int j, err_found, nseg_new = nseg;
1340 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1341 					err_found = FALSE;
1342 					nseg_new = 2;
1343 					for (j = 1; j < nseg - 1; ++j) {
1344 						if (sg[j].length % (i*PAGE_SIZE)) {
1345 							err_found = TRUE;
1346 							break;
1347 						}
1348 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1349 					}
1350 					if (!err_found)
1351 						break;
1352 				}
1353 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1354 					!(sc->hint_flags & 4))
1355 					nseg = aac_convert_sgraw2(sc,
1356 						raw, i, nseg, nseg_new);
1357 			} else {
1358 				raw->flags |= RIO2_SGL_CONFORMANT;
1359 			}
1360 
1361 			for (i = 0; i < nseg; i++)
1362 				aac_sge_ieee1212_tole(sg + i);
1363 			aac_raw_io2_tole(raw);
1364 
1365 			/* update the FIB size for the s/g count */
1366 			fib->Header.Size += nseg *
1367 				sizeof(struct aac_sge_ieee1212);
1368 
1369 		} else if (fib->Header.Command == RawIo) {
1370 			struct aac_sg_tableraw *sg;
1371 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1372 			sg->SgCount = htole32(nseg);
1373 			for (i = 0; i < nseg; i++) {
1374 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1375 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1376 				sg->SgEntryRaw[i].Next = 0;
1377 				sg->SgEntryRaw[i].Prev = 0;
1378 				sg->SgEntryRaw[i].Flags = 0;
1379 				aac_sg_entryraw_tole(&sg->SgEntryRaw[i]);
1380 			}
1381 			aac_raw_io_tole((struct aac_raw_io *)&fib->data[0]);
1382 			/* update the FIB size for the s/g count */
1383 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1384 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1385 			struct aac_sg_table *sg;
1386 			sg = cm->cm_sgtable;
1387 			sg->SgCount = htole32(nseg);
1388 			for (i = 0; i < nseg; i++) {
1389 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1390 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1391 				aac_sg_entry_tole(&sg->SgEntry[i]);
1392 			}
1393 			/* update the FIB size for the s/g count */
1394 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1395 		} else {
1396 			struct aac_sg_table64 *sg;
1397 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1398 			sg->SgCount = htole32(nseg);
1399 			for (i = 0; i < nseg; i++) {
1400 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1401 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1402 				aac_sg_entry64_tole(&sg->SgEntry64[i]);
1403 			}
1404 			/* update the FIB size for the s/g count */
1405 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1406 		}
1407 	}
1408 
1409 	/* Fix up the address values in the FIB.  Use the command array index
1410 	 * instead of a pointer since these fields are only 32 bits.  Shift
1411 	 * the SenderFibAddress over to make room for the fast response bit
1412 	 * and for the AIF bit
1413 	 */
1414 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1415 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1416 
1417 	/* save a pointer to the command for speedy reverse-lookup */
1418 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1419 
1420 	if (cm->cm_passthr_dmat == 0) {
1421 		if (cm->cm_flags & AAC_CMD_DATAIN)
1422 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1423 							BUS_DMASYNC_PREREAD);
1424 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1425 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1426 							BUS_DMASYNC_PREWRITE);
1427 	}
1428 
1429 	cm->cm_flags |= AAC_CMD_MAPPED;
1430 
1431 	if (cm->cm_flags & AAC_CMD_WAIT) {
1432 		aac_fib_header_tole(&fib->Header);
1433 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1434 			cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1435 	} else if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1436 		u_int32_t wait = 0;
1437 		sc->aac_sync_cm = cm;
1438 		aac_fib_header_tole(&fib->Header);
1439 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
1440 			cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1441 	} else {
1442 		int count = 10000000L;
1443 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1444 			if (--count == 0) {
1445 				aac_unmap_command(cm);
1446 				sc->flags |= AAC_QUEUE_FRZN;
1447 				aac_requeue_ready(cm);
1448 			}
1449 			DELAY(5);			/* wait 5 usec. */
1450 		}
1451 	}
1452 }
1453 
1454 
1455 static int
1456 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1457 				   int pages, int nseg, int nseg_new)
1458 {
1459 	struct aac_sge_ieee1212 *sge;
1460 	int i, j, pos;
1461 	u_int32_t addr_low;
1462 
1463 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1464 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1465 	if (sge == NULL)
1466 		return nseg;
1467 
1468 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1469 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1470 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1471 			sge[pos].addrLow = addr_low;
1472 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1473 			if (addr_low < raw->sge[i].addrLow)
1474 				sge[pos].addrHigh++;
1475 			sge[pos].length = pages * PAGE_SIZE;
1476 			sge[pos].flags = 0;
1477 			pos++;
1478 		}
1479 	}
1480 	sge[pos] = raw->sge[nseg-1];
1481 	for (i = 1; i < nseg_new; ++i)
1482 		raw->sge[i] = sge[i];
1483 
1484 	free(sge, M_AACRAIDBUF);
1485 	raw->sgeCnt = nseg_new;
1486 	raw->flags |= RIO2_SGL_CONFORMANT;
1487 	raw->sgeNominalSize = pages * PAGE_SIZE;
1488 	return nseg_new;
1489 }
1490 
1491 
1492 /*
1493  * Unmap a command from controller-visible space.
1494  */
1495 static void
1496 aac_unmap_command(struct aac_command *cm)
1497 {
1498 	struct aac_softc *sc;
1499 
1500 	sc = cm->cm_sc;
1501 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1502 
1503 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1504 		return;
1505 
1506 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1507 		if (cm->cm_flags & AAC_CMD_DATAIN)
1508 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1509 					BUS_DMASYNC_POSTREAD);
1510 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1511 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1512 					BUS_DMASYNC_POSTWRITE);
1513 
1514 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1515 	}
1516 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1517 }
1518 
1519 /*
1520  * Hardware Interface
1521  */
1522 
1523 /*
1524  * Initialize the adapter.
1525  */
1526 static void
1527 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1528 {
1529 	struct aac_softc *sc;
1530 
1531 	sc = (struct aac_softc *)arg;
1532 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1533 
1534 	sc->aac_common_busaddr = segs[0].ds_addr;
1535 }
1536 
1537 static int
1538 aac_check_firmware(struct aac_softc *sc)
1539 {
1540 	u_int32_t code, major, minor, maxsize;
1541 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1542 	time_t then;
1543 
1544 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1545 
1546 	/* check if flash update is running */
1547 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1548 		then = time_uptime;
1549 		do {
1550 			code = AAC_GET_FWSTATUS(sc);
1551 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1552 				device_printf(sc->aac_dev,
1553 						  "FATAL: controller not coming ready, "
1554 						   "status %x\n", code);
1555 				return(ENXIO);
1556 			}
1557 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1558 		/*
1559 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1560 		 * do not read scratch pad register at this time
1561 		 */
1562 		waitCount = 10 * 10000;
1563 		while (waitCount) {
1564 			DELAY(100);		/* delay 100 microseconds */
1565 			waitCount--;
1566 		}
1567 	}
1568 
1569 	/*
1570 	 * Wait for the adapter to come ready.
1571 	 */
1572 	then = time_uptime;
1573 	do {
1574 		code = AAC_GET_FWSTATUS(sc);
1575 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1576 			device_printf(sc->aac_dev,
1577 				      "FATAL: controller not coming ready, "
1578 					   "status %x\n", code);
1579 			return(ENXIO);
1580 		}
1581 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1582 
1583 	/*
1584 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1585 	 * firmware version 1.x are not compatible with this driver.
1586 	 */
1587 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1588 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1589 				     NULL, NULL)) {
1590 			device_printf(sc->aac_dev,
1591 				      "Error reading firmware version\n");
1592 			return (EIO);
1593 		}
1594 
1595 		/* These numbers are stored as ASCII! */
1596 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1597 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1598 		if (major == 1) {
1599 			device_printf(sc->aac_dev,
1600 			    "Firmware version %d.%d is not supported.\n",
1601 			    major, minor);
1602 			return (EINVAL);
1603 		}
1604 	}
1605 	/*
1606 	 * Retrieve the capabilities/supported options word so we know what
1607 	 * work-arounds to enable.  Some firmware revs don't support this
1608 	 * command.
1609 	 */
1610 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1611 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1612 			device_printf(sc->aac_dev,
1613 			     "RequestAdapterInfo failed\n");
1614 			return (EIO);
1615 		}
1616 	} else {
1617 		options = AAC_GET_MAILBOX(sc, 1);
1618 		atu_size = AAC_GET_MAILBOX(sc, 2);
1619 		sc->supported_options = options;
1620 		sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3);
1621 
1622 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1623 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1624 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1625 		if (options & AAC_SUPPORTED_NONDASD)
1626 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1627 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1628 			&& (sizeof(bus_addr_t) > 4)
1629 			&& (sc->hint_flags & 0x1)) {
1630 			device_printf(sc->aac_dev,
1631 			    "Enabling 64-bit address support\n");
1632 			sc->flags |= AAC_FLAGS_SG_64BIT;
1633 		}
1634 		if (sc->aac_if.aif_send_command) {
1635 			if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1636 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1637 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1638 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1639 			else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1640 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1641 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1642 		}
1643 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1644 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1645 	}
1646 
1647 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1648 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1649 		return (ENXIO);
1650 	}
1651 
1652 	if (sc->hint_flags & 2) {
1653 		device_printf(sc->aac_dev,
1654 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1655 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1656 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1657 		device_printf(sc->aac_dev,
1658 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1659 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1660 	}
1661 
1662 	/* Check for broken hardware that does a lower number of commands */
1663 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1664 
1665 	/* Remap mem. resource, if required */
1666 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1667 		bus_release_resource(
1668 			sc->aac_dev, SYS_RES_MEMORY,
1669 			sc->aac_regs_rid0, sc->aac_regs_res0);
1670 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1671 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1672 			atu_size, RF_ACTIVE);
1673 		if (sc->aac_regs_res0 == NULL) {
1674 			sc->aac_regs_res0 = bus_alloc_resource_any(
1675 				sc->aac_dev, SYS_RES_MEMORY,
1676 				&sc->aac_regs_rid0, RF_ACTIVE);
1677 			if (sc->aac_regs_res0 == NULL) {
1678 				device_printf(sc->aac_dev,
1679 					"couldn't allocate register window\n");
1680 				return (ENXIO);
1681 			}
1682 		}
1683 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1684 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1685 	}
1686 
1687 	/* Read preferred settings */
1688 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1689 	sc->aac_max_sectors = 128;				/* 64KB */
1690 	sc->aac_max_aif = 1;
1691 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1692 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1693 		 - sizeof(struct aac_blockwrite64))
1694 		 / sizeof(struct aac_sg_entry64);
1695 	else
1696 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1697 		 - sizeof(struct aac_blockwrite))
1698 		 / sizeof(struct aac_sg_entry);
1699 
1700 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1701 		options = AAC_GET_MAILBOX(sc, 1);
1702 		sc->aac_max_fib_size = (options & 0xFFFF);
1703 		sc->aac_max_sectors = (options >> 16) << 1;
1704 		options = AAC_GET_MAILBOX(sc, 2);
1705 		sc->aac_sg_tablesize = (options >> 16);
1706 		options = AAC_GET_MAILBOX(sc, 3);
1707 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1708 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1709 			sc->aac_max_fibs = (options & 0xFFFF);
1710 		options = AAC_GET_MAILBOX(sc, 4);
1711 		sc->aac_max_aif = (options & 0xFFFF);
1712 		options = AAC_GET_MAILBOX(sc, 5);
1713 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1714 	}
1715 
1716 	maxsize = sc->aac_max_fib_size + 31;
1717 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1718 		maxsize += sizeof(struct aac_fib_xporthdr);
1719 	if (maxsize > PAGE_SIZE) {
1720     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1721 		maxsize = PAGE_SIZE;
1722 	}
1723 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1724 
1725 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1726 		sc->flags |= AAC_FLAGS_RAW_IO;
1727 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1728 	}
1729 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1730 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1731 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1732 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1733 	}
1734 
1735 #ifdef AACRAID_DEBUG
1736 	aacraid_get_fw_debug_buffer(sc);
1737 #endif
1738 	return (0);
1739 }
1740 
1741 static int
1742 aac_init(struct aac_softc *sc)
1743 {
1744 	struct aac_adapter_init	*ip;
1745 	int i, error;
1746 
1747 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1748 
1749 	/* reset rrq index */
1750 	sc->aac_fibs_pushed_no = 0;
1751 	for (i = 0; i < sc->aac_max_msix; i++)
1752 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1753 
1754 	/*
1755 	 * Fill in the init structure.  This tells the adapter about the
1756 	 * physical location of various important shared data structures.
1757 	 */
1758 	ip = &sc->aac_common->ac_init;
1759 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1760 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1761 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1762 		sc->flags |= AAC_FLAGS_RAW_IO;
1763 	}
1764 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1765 
1766 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1767 					 offsetof(struct aac_common, ac_fibs);
1768 	ip->AdapterFibsVirtualAddress = 0;
1769 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1770 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1771 
1772 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1773 				  offsetof(struct aac_common, ac_printf);
1774 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1775 
1776 	/*
1777 	 * The adapter assumes that pages are 4K in size, except on some
1778  	 * broken firmware versions that do the page->byte conversion twice,
1779 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1780 	 * Round up since the granularity is so high.
1781 	 */
1782 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1783 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1784 		ip->HostPhysMemPages =
1785 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1786 	}
1787 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1788 
1789 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1790 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1791 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1792 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1793 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1794 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1795 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1796 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1797 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1798 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1799 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1800 	}
1801 	ip->MaxNumAif = sc->aac_max_aif;
1802 	ip->HostRRQ_AddrLow =
1803 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1804 	/* always 32-bit address */
1805 	ip->HostRRQ_AddrHigh = 0;
1806 
1807 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1808 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1809 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1810 		device_printf(sc->aac_dev, "Power Management enabled\n");
1811 	}
1812 
1813 	ip->MaxIoCommands = sc->aac_max_fibs;
1814 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1815 	ip->MaxFibSize = sc->aac_max_fib_size;
1816 
1817 	aac_adapter_init_tole(ip);
1818 
1819 	/*
1820 	 * Do controller-type-specific initialisation
1821 	 */
1822 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1823 
1824 	/*
1825 	 * Give the init structure to the controller.
1826 	 */
1827 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1828 			     sc->aac_common_busaddr +
1829 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1830 			     NULL, NULL)) {
1831 		device_printf(sc->aac_dev,
1832 			      "error establishing init structure\n");
1833 		error = EIO;
1834 		goto out;
1835 	}
1836 
1837 	/*
1838 	 * Check configuration issues
1839 	 */
1840 	if ((error = aac_check_config(sc)) != 0)
1841 		goto out;
1842 
1843 	error = 0;
1844 out:
1845 	return(error);
1846 }
1847 
1848 static void
1849 aac_define_int_mode(struct aac_softc *sc)
1850 {
1851 	device_t dev;
1852 	int cap, msi_count, error = 0;
1853 	uint32_t val;
1854 
1855 	dev = sc->aac_dev;
1856 
1857 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1858 		device_printf(dev, "using line interrupts\n");
1859 		sc->aac_max_msix = 1;
1860 		sc->aac_vector_cap = sc->aac_max_fibs;
1861 		return;
1862 	}
1863 
1864 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1865 	if (sc->aac_max_msix == 0) {
1866 		if (sc->aac_hwif == AAC_HWIF_SRC) {
1867 			msi_count = 1;
1868 			if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1869 				device_printf(dev, "alloc msi failed - err=%d; "
1870 				    "will use INTx\n", error);
1871 				pci_release_msi(dev);
1872 			} else {
1873 				sc->msi_tupelo = TRUE;
1874 			}
1875 		}
1876 		if (sc->msi_tupelo)
1877 			device_printf(dev, "using MSI interrupts\n");
1878 		else
1879 			device_printf(dev, "using line interrupts\n");
1880 
1881 		sc->aac_max_msix = 1;
1882 		sc->aac_vector_cap = sc->aac_max_fibs;
1883 		return;
1884 	}
1885 
1886 	/* OS capability */
1887 	msi_count = pci_msix_count(dev);
1888 	if (msi_count > AAC_MAX_MSIX)
1889 		msi_count = AAC_MAX_MSIX;
1890 	if (msi_count > sc->aac_max_msix)
1891 		msi_count = sc->aac_max_msix;
1892 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1893 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1894 				   "will try MSI\n", msi_count, error);
1895 		pci_release_msi(dev);
1896 	} else {
1897 		sc->msi_enabled = TRUE;
1898 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1899 			msi_count);
1900 	}
1901 
1902 	if (!sc->msi_enabled) {
1903 		msi_count = 1;
1904 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1905 			device_printf(dev, "alloc msi failed - err=%d; "
1906 				           "will use INTx\n", error);
1907 			pci_release_msi(dev);
1908 		} else {
1909 			sc->msi_enabled = TRUE;
1910 			device_printf(dev, "using MSI interrupts\n");
1911 		}
1912 	}
1913 
1914 	if (sc->msi_enabled) {
1915 		/* now read controller capability from PCI config. space */
1916 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1917 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1918 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1919 			pci_release_msi(dev);
1920 			sc->msi_enabled = FALSE;
1921 		}
1922 	}
1923 
1924 	if (!sc->msi_enabled) {
1925 		device_printf(dev, "using legacy interrupts\n");
1926 		sc->aac_max_msix = 1;
1927 	} else {
1928 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1929 		if (sc->aac_max_msix > msi_count)
1930 			sc->aac_max_msix = msi_count;
1931 	}
1932 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1933 
1934 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1935 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1936 }
1937 
1938 static int
1939 aac_find_pci_capability(struct aac_softc *sc, int cap)
1940 {
1941 	device_t dev;
1942 	uint32_t status;
1943 	uint8_t ptr;
1944 
1945 	dev = sc->aac_dev;
1946 
1947 	status = pci_read_config(dev, PCIR_STATUS, 2);
1948 	if (!(status & PCIM_STATUS_CAPPRESENT))
1949 		return (0);
1950 
1951 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1952 	switch (status & PCIM_HDRTYPE) {
1953 	case 0:
1954 	case 1:
1955 		ptr = PCIR_CAP_PTR;
1956 		break;
1957 	case 2:
1958 		ptr = PCIR_CAP_PTR_2;
1959 		break;
1960 	default:
1961 		return (0);
1962 		break;
1963 	}
1964 	ptr = pci_read_config(dev, ptr, 1);
1965 
1966 	while (ptr != 0) {
1967 		int next, val;
1968 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1969 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1970 		if (val == cap)
1971 			return (ptr);
1972 		ptr = next;
1973 	}
1974 
1975 	return (0);
1976 }
1977 
1978 static int
1979 aac_setup_intr(struct aac_softc *sc)
1980 {
1981 	int i, msi_count, rid;
1982 	struct resource *res;
1983 	void *tag;
1984 
1985 	msi_count = sc->aac_max_msix;
1986 	rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0);
1987 
1988 	for (i = 0; i < msi_count; i++, rid++) {
1989 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1990 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1991 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1992 			return (EINVAL);
1993 		}
1994 		sc->aac_irq_rid[i] = rid;
1995 		sc->aac_irq[i] = res;
1996 		if (aac_bus_setup_intr(sc->aac_dev, res,
1997 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1998 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1999 			device_printf(sc->aac_dev, "can't set up interrupt\n");
2000 			return (EINVAL);
2001 		}
2002 		sc->aac_msix[i].vector_no = i;
2003 		sc->aac_msix[i].sc = sc;
2004 		sc->aac_intr[i] = tag;
2005 	}
2006 
2007 	return (0);
2008 }
2009 
2010 static int
2011 aac_check_config(struct aac_softc *sc)
2012 {
2013 	struct aac_fib *fib;
2014 	struct aac_cnt_config *ccfg;
2015 	struct aac_cf_status_hdr *cf_shdr;
2016 	int rval;
2017 
2018 	mtx_lock(&sc->aac_io_lock);
2019 	aac_alloc_sync_fib(sc, &fib);
2020 
2021 	ccfg = (struct aac_cnt_config *)&fib->data[0];
2022 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2023 	ccfg->Command = VM_ContainerConfig;
2024 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2025 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2026 
2027 	aac_cnt_config_tole(ccfg);
2028 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2029 		sizeof (struct aac_cnt_config));
2030 	aac_cnt_config_toh(ccfg);
2031 
2032 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2033 	if (rval == 0 && ccfg->Command == ST_OK &&
2034 		ccfg->CTCommand.param[0] == CT_OK) {
2035 		if (le32toh(cf_shdr->action) <= CFACT_PAUSE) {
2036 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2037 			ccfg->Command = VM_ContainerConfig;
2038 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2039 
2040 			aac_cnt_config_tole(ccfg);
2041 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2042 				sizeof (struct aac_cnt_config));
2043 			aac_cnt_config_toh(ccfg);
2044 
2045 			if (rval == 0 && ccfg->Command == ST_OK &&
2046 				ccfg->CTCommand.param[0] == CT_OK) {
2047 				/* successful completion */
2048 				rval = 0;
2049 			} else {
2050 				/* auto commit aborted due to error(s) */
2051 				rval = -2;
2052 			}
2053 		} else {
2054 			/* auto commit aborted due to adapter indicating
2055 			   config. issues too dangerous to auto commit  */
2056 			rval = -3;
2057 		}
2058 	} else {
2059 		/* error */
2060 		rval = -1;
2061 	}
2062 
2063 	aac_release_sync_fib(sc);
2064 	mtx_unlock(&sc->aac_io_lock);
2065 	return(rval);
2066 }
2067 
2068 /*
2069  * Send a synchronous command to the controller and wait for a result.
2070  * Indicate if the controller completed the command with an error status.
2071  */
2072 int
2073 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2074 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2075 		 u_int32_t *sp, u_int32_t *r1)
2076 {
2077 	time_t then;
2078 	u_int32_t status;
2079 
2080 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2081 
2082 	/* populate the mailbox */
2083 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2084 
2085 	/* ensure the sync command doorbell flag is cleared */
2086 	if (!sc->msi_enabled)
2087 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2088 
2089 	/* then set it to signal the adapter */
2090 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2091 
2092 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2093 		/* spin waiting for the command to complete */
2094 		then = time_uptime;
2095 		do {
2096 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2097 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2098 				return(EIO);
2099 			}
2100 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2101 
2102 		/* clear the completion flag */
2103 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2104 
2105 		/* get the command status */
2106 		status = AAC_GET_MAILBOX(sc, 0);
2107 		if (sp != NULL)
2108 			*sp = status;
2109 
2110 		/* return parameter */
2111 		if (r1 != NULL)
2112 			*r1 = AAC_GET_MAILBOX(sc, 1);
2113 
2114 		if (status != AAC_SRB_STS_SUCCESS)
2115 			return (-1);
2116 	}
2117 	return(0);
2118 }
2119 
2120 static int
2121 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2122 		 struct aac_fib *fib, u_int16_t datasize)
2123 {
2124 	uint32_t ReceiverFibAddress;
2125 
2126 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2127 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2128 
2129 	if (datasize > AAC_FIB_DATASIZE)
2130 		return(EINVAL);
2131 
2132 	/*
2133 	 * Set up the sync FIB
2134 	 */
2135 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2136 				AAC_FIBSTATE_INITIALISED |
2137 				AAC_FIBSTATE_EMPTY;
2138 	fib->Header.XferState |= xferstate;
2139 	fib->Header.Command = command;
2140 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2141 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2142 	fib->Header.SenderSize = sizeof(struct aac_fib);
2143 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2144 	ReceiverFibAddress = sc->aac_common_busaddr +
2145 		offsetof(struct aac_common, ac_sync_fib);
2146 	fib->Header.u.ReceiverFibAddress = ReceiverFibAddress;
2147 	aac_fib_header_tole(&fib->Header);
2148 
2149 	/*
2150 	 * Give the FIB to the controller, wait for a response.
2151 	 */
2152 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2153 		ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2154 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2155 		aac_fib_header_toh(&fib->Header);
2156 		return(EIO);
2157 	}
2158 
2159 	aac_fib_header_toh(&fib->Header);
2160 	return (0);
2161 }
2162 
2163 /*
2164  * Check for commands that have been outstanding for a suspiciously long time,
2165  * and complain about them.
2166  */
2167 static void
2168 aac_timeout(struct aac_softc *sc)
2169 {
2170 	struct aac_command *cm;
2171 	time_t deadline;
2172 	int timedout;
2173 
2174 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2175 	/*
2176 	 * Traverse the busy command list, bitch about late commands once
2177 	 * only.
2178 	 */
2179 	timedout = 0;
2180 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2181 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2182 		if (cm->cm_timestamp < deadline) {
2183 			device_printf(sc->aac_dev,
2184 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2185 				      cm, (int)(time_uptime-cm->cm_timestamp));
2186 			AAC_PRINT_FIB(sc, cm->cm_fib);
2187 			timedout++;
2188 		}
2189 	}
2190 
2191 	if (timedout)
2192 		aac_reset_adapter(sc);
2193 	aacraid_print_queues(sc);
2194 }
2195 
2196 /*
2197  * Interface Function Vectors
2198  */
2199 
2200 /*
2201  * Read the current firmware status word.
2202  */
2203 static int
2204 aac_src_get_fwstatus(struct aac_softc *sc)
2205 {
2206 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2207 
2208 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2209 }
2210 
2211 /*
2212  * Notify the controller of a change in a given queue
2213  */
2214 static void
2215 aac_src_qnotify(struct aac_softc *sc, int qbit)
2216 {
2217 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2218 
2219 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2220 }
2221 
2222 /*
2223  * Get the interrupt reason bits
2224  */
2225 static int
2226 aac_src_get_istatus(struct aac_softc *sc)
2227 {
2228 	int val;
2229 
2230 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2231 
2232 	if (sc->msi_enabled) {
2233 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2234 		if (val & AAC_MSI_SYNC_STATUS)
2235 			val = AAC_DB_SYNC_COMMAND;
2236 		else
2237 			val = 0;
2238 	} else {
2239 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2240 	}
2241 	return(val);
2242 }
2243 
2244 /*
2245  * Clear some interrupt reason bits
2246  */
2247 static void
2248 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2249 {
2250 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2251 
2252 	if (sc->msi_enabled) {
2253 		if (mask == AAC_DB_SYNC_COMMAND)
2254 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2255 	} else {
2256 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2257 	}
2258 }
2259 
2260 /*
2261  * Populate the mailbox and set the command word
2262  */
2263 static void
2264 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2265 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2266 {
2267 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2268 
2269 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2270 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2271 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2272 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2273 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2274 }
2275 
2276 static void
2277 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2278 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2279 {
2280 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2281 
2282 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2283 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2284 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2285 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2286 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2287 }
2288 
2289 /*
2290  * Fetch the immediate command status word
2291  */
2292 static int
2293 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2294 {
2295 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2296 
2297 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2298 }
2299 
2300 static int
2301 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2302 {
2303 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2304 
2305 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2306 }
2307 
2308 /*
2309  * Set/clear interrupt masks
2310  */
2311 static void
2312 aac_src_access_devreg(struct aac_softc *sc, int mode)
2313 {
2314 	u_int32_t val;
2315 
2316 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2317 
2318 	switch (mode) {
2319 	case AAC_ENABLE_INTERRUPT:
2320 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2321 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2322 				           AAC_INT_ENABLE_TYPE1_INTX));
2323 		break;
2324 
2325 	case AAC_DISABLE_INTERRUPT:
2326 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2327 		break;
2328 
2329 	case AAC_ENABLE_MSIX:
2330 		/* set bit 6 */
2331 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2332 		val |= 0x40;
2333 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2334 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2335 		/* unmask int. */
2336 		val = PMC_ALL_INTERRUPT_BITS;
2337 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2338 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2339 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2340 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2341 		break;
2342 
2343 	case AAC_DISABLE_MSIX:
2344 		/* reset bit 6 */
2345 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2346 		val &= ~0x40;
2347 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2348 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2349 		break;
2350 
2351 	case AAC_CLEAR_AIF_BIT:
2352 		/* set bit 5 */
2353 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2354 		val |= 0x20;
2355 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2356 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2357 		break;
2358 
2359 	case AAC_CLEAR_SYNC_BIT:
2360 		/* set bit 4 */
2361 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2362 		val |= 0x10;
2363 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2364 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2365 		break;
2366 
2367 	case AAC_ENABLE_INTX:
2368 		/* set bit 7 */
2369 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2370 		val |= 0x80;
2371 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2372 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2373 		/* unmask int. */
2374 		val = PMC_ALL_INTERRUPT_BITS;
2375 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2376 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2377 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2378 			val & (~(PMC_GLOBAL_INT_BIT2)));
2379 		break;
2380 
2381 	default:
2382 		break;
2383 	}
2384 }
2385 
2386 /*
2387  * New comm. interface: Send command functions
2388  */
2389 static int
2390 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2391 {
2392 	struct aac_fib_xporthdr *pFibX;
2393 	u_int32_t fibsize, high_addr;
2394 	u_int64_t address;
2395 
2396 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2397 
2398 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2399 		sc->aac_max_msix > 1) {
2400 		u_int16_t vector_no, first_choice = 0xffff;
2401 
2402 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2403 		do {
2404 			vector_no += 1;
2405 			if (vector_no == sc->aac_max_msix)
2406 				vector_no = 1;
2407 			if (sc->aac_rrq_outstanding[vector_no] <
2408 				sc->aac_vector_cap)
2409 				break;
2410 			if (0xffff == first_choice)
2411 				first_choice = vector_no;
2412 			else if (vector_no == first_choice)
2413 				break;
2414 		} while (1);
2415 		if (vector_no == first_choice)
2416 			vector_no = 0;
2417 		sc->aac_rrq_outstanding[vector_no]++;
2418 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2419 			sc->aac_fibs_pushed_no = 0;
2420 		else
2421 			sc->aac_fibs_pushed_no++;
2422 
2423 		cm->cm_fib->Header.Handle += (vector_no << 16);
2424 	}
2425 
2426 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2427 		/* Calculate the amount to the fibsize bits */
2428 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2429 		/* Fill new FIB header */
2430 		address = cm->cm_fibphys;
2431 		high_addr = (u_int32_t)(address >> 32);
2432 		if (high_addr == 0L) {
2433 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2434 			cm->cm_fib->Header.u.TimeStamp = 0L;
2435 		} else {
2436 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2437 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2438 		}
2439 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2440 	} else {
2441 		/* Calculate the amount to the fibsize bits */
2442 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2443 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2444 		/* Fill XPORT header */
2445 		pFibX = (struct aac_fib_xporthdr *)
2446 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2447 		pFibX->Handle = cm->cm_fib->Header.Handle;
2448 		pFibX->HostAddress = cm->cm_fibphys;
2449 		pFibX->Size = cm->cm_fib->Header.Size;
2450 		aac_fib_xporthdr_tole(pFibX);
2451 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2452 		high_addr = (u_int32_t)(address >> 32);
2453 	}
2454 
2455 	aac_fib_header_tole(&cm->cm_fib->Header);
2456 
2457 	if (fibsize > 31)
2458 		fibsize = 31;
2459 	aac_enqueue_busy(cm);
2460 	if (high_addr) {
2461 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2462 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2463 	} else {
2464 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2465 	}
2466 	return 0;
2467 }
2468 
2469 /*
2470  * New comm. interface: get, set outbound queue index
2471  */
2472 static int
2473 aac_src_get_outb_queue(struct aac_softc *sc)
2474 {
2475 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2476 
2477 	return(-1);
2478 }
2479 
2480 static void
2481 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2482 {
2483 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2484 }
2485 
2486 /*
2487  * Debugging and Diagnostics
2488  */
2489 
2490 /*
2491  * Print some information about the controller.
2492  */
2493 static void
2494 aac_describe_controller(struct aac_softc *sc)
2495 {
2496 	struct aac_fib *fib;
2497 	struct aac_adapter_info	*info;
2498 	char *adapter_type = "Adaptec RAID controller";
2499 
2500 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2501 
2502 	mtx_lock(&sc->aac_io_lock);
2503 	aac_alloc_sync_fib(sc, &fib);
2504 
2505 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2506 		fib->data[0] = 0;
2507 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2508 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2509 		else {
2510 			struct aac_supplement_adapter_info *supp_info;
2511 
2512 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2513 			adapter_type = (char *)supp_info->AdapterTypeText;
2514 			sc->aac_feature_bits = le32toh(supp_info->FeatureBits);
2515 			sc->aac_support_opt2 = le32toh(supp_info->SupportedOptions2);
2516 		}
2517 	}
2518 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2519 		adapter_type,
2520 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2521 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2522 
2523 	fib->data[0] = 0;
2524 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2525 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2526 		aac_release_sync_fib(sc);
2527 		mtx_unlock(&sc->aac_io_lock);
2528 		return;
2529 	}
2530 
2531 	/* save the kernel revision structure for later use */
2532 	info = (struct aac_adapter_info *)&fib->data[0];
2533 	aac_adapter_info_toh(info);
2534 	sc->aac_revision = info->KernelRevision;
2535 
2536 	if (bootverbose) {
2537 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2538 		    "(%dMB cache, %dMB execution), %s\n",
2539 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2540 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2541 		    info->BufferMem / (1024 * 1024),
2542 		    info->ExecutionMem / (1024 * 1024),
2543 		    aac_describe_code(aac_battery_platform,
2544 		    info->batteryPlatform));
2545 
2546 		device_printf(sc->aac_dev,
2547 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2548 		    info->KernelRevision.external.comp.major,
2549 		    info->KernelRevision.external.comp.minor,
2550 		    info->KernelRevision.external.comp.dash,
2551 		    info->KernelRevision.buildNumber,
2552 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2553 
2554 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2555 			      sc->supported_options,
2556 			      "\20"
2557 			      "\1SNAPSHOT"
2558 			      "\2CLUSTERS"
2559 			      "\3WCACHE"
2560 			      "\4DATA64"
2561 			      "\5HOSTTIME"
2562 			      "\6RAID50"
2563 			      "\7WINDOW4GB"
2564 			      "\10SCSIUPGD"
2565 			      "\11SOFTERR"
2566 			      "\12NORECOND"
2567 			      "\13SGMAP64"
2568 			      "\14ALARM"
2569 			      "\15NONDASD"
2570 			      "\16SCSIMGT"
2571 			      "\17RAIDSCSI"
2572 			      "\21ADPTINFO"
2573 			      "\22NEWCOMM"
2574 			      "\23ARRAY64BIT"
2575 			      "\24HEATSENSOR");
2576 	}
2577 
2578 	aac_release_sync_fib(sc);
2579 	mtx_unlock(&sc->aac_io_lock);
2580 }
2581 
2582 /*
2583  * Look up a text description of a numeric error code and return a pointer to
2584  * same.
2585  */
2586 static char *
2587 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2588 {
2589 	int i;
2590 
2591 	for (i = 0; table[i].string != NULL; i++)
2592 		if (table[i].code == code)
2593 			return(table[i].string);
2594 	return(table[i + 1].string);
2595 }
2596 
2597 /*
2598  * Management Interface
2599  */
2600 
2601 static int
2602 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2603 {
2604 	struct aac_softc *sc;
2605 
2606 	sc = dev->si_drv1;
2607 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2608 	device_busy(sc->aac_dev);
2609 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2610 	return 0;
2611 }
2612 
2613 static int
2614 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2615 {
2616 	union aac_statrequest *as;
2617 	struct aac_softc *sc;
2618 	int error = 0;
2619 
2620 	as = (union aac_statrequest *)arg;
2621 	sc = dev->si_drv1;
2622 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2623 
2624 	switch (cmd) {
2625 	case AACIO_STATS:
2626 		switch (as->as_item) {
2627 		case AACQ_FREE:
2628 		case AACQ_READY:
2629 		case AACQ_BUSY:
2630 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2631 			      sizeof(struct aac_qstat));
2632 			break;
2633 		default:
2634 			error = ENOENT;
2635 			break;
2636 		}
2637 	break;
2638 
2639 	case FSACTL_SENDFIB:
2640 	case FSACTL_SEND_LARGE_FIB:
2641 		arg = *(caddr_t*)arg;
2642 	case FSACTL_LNX_SENDFIB:
2643 	case FSACTL_LNX_SEND_LARGE_FIB:
2644 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2645 		error = aac_ioctl_sendfib(sc, arg);
2646 		break;
2647 	case FSACTL_SEND_RAW_SRB:
2648 		arg = *(caddr_t*)arg;
2649 	case FSACTL_LNX_SEND_RAW_SRB:
2650 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2651 		error = aac_ioctl_send_raw_srb(sc, arg);
2652 		break;
2653 	case FSACTL_AIF_THREAD:
2654 	case FSACTL_LNX_AIF_THREAD:
2655 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2656 		error = EINVAL;
2657 		break;
2658 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2659 		arg = *(caddr_t*)arg;
2660 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2661 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2662 		error = aac_open_aif(sc, arg);
2663 		break;
2664 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2665 		arg = *(caddr_t*)arg;
2666 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2667 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2668 		error = aac_getnext_aif(sc, arg);
2669 		break;
2670 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2671 		arg = *(caddr_t*)arg;
2672 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2673 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2674 		error = aac_close_aif(sc, arg);
2675 		break;
2676 	case FSACTL_MINIPORT_REV_CHECK:
2677 		arg = *(caddr_t*)arg;
2678 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2679 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2680 		error = aac_rev_check(sc, arg);
2681 		break;
2682 	case FSACTL_QUERY_DISK:
2683 		arg = *(caddr_t*)arg;
2684 	case FSACTL_LNX_QUERY_DISK:
2685 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2686 		error = aac_query_disk(sc, arg);
2687 		break;
2688 	case FSACTL_DELETE_DISK:
2689 	case FSACTL_LNX_DELETE_DISK:
2690 		/*
2691 		 * We don't trust the underland to tell us when to delete a
2692 		 * container, rather we rely on an AIF coming from the
2693 		 * controller
2694 		 */
2695 		error = 0;
2696 		break;
2697 	case FSACTL_GET_PCI_INFO:
2698 		arg = *(caddr_t*)arg;
2699 	case FSACTL_LNX_GET_PCI_INFO:
2700 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2701 		error = aac_get_pci_info(sc, arg);
2702 		break;
2703 	case FSACTL_GET_FEATURES:
2704 		arg = *(caddr_t*)arg;
2705 	case FSACTL_LNX_GET_FEATURES:
2706 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2707 		error = aac_supported_features(sc, arg);
2708 		break;
2709 	default:
2710 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2711 		error = EINVAL;
2712 		break;
2713 	}
2714 	return(error);
2715 }
2716 
2717 static int
2718 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2719 {
2720 	struct aac_softc *sc;
2721 	struct aac_fib_context *ctx;
2722 	int revents;
2723 
2724 	sc = dev->si_drv1;
2725 	revents = 0;
2726 
2727 	mtx_lock(&sc->aac_io_lock);
2728 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2729 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2730 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2731 				revents |= poll_events & (POLLIN | POLLRDNORM);
2732 				break;
2733 			}
2734 		}
2735 	}
2736 	mtx_unlock(&sc->aac_io_lock);
2737 
2738 	if (revents == 0) {
2739 		if (poll_events & (POLLIN | POLLRDNORM))
2740 			selrecord(td, &sc->rcv_select);
2741 	}
2742 
2743 	return (revents);
2744 }
2745 
2746 static void
2747 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2748 {
2749 
2750 	switch (event->ev_type) {
2751 	case AAC_EVENT_CMFREE:
2752 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2753 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2754 			aacraid_add_event(sc, event);
2755 			return;
2756 		}
2757 		free(event, M_AACRAIDBUF);
2758 		wakeup(arg);
2759 		break;
2760 	default:
2761 		break;
2762 	}
2763 }
2764 
2765 /*
2766  * Send a FIB supplied from userspace
2767  *
2768  * Currently, sending a FIB from userspace in BE hosts is not supported.
2769  * There are several things that need to be considered in order to
2770  * support this, such as:
2771  * - At least the FIB data part from userspace should already be in LE,
2772  *   or else the kernel would need to know all FIB types to be able to
2773  *   correctly convert it to BE.
2774  * - SG tables are converted to BE by aacraid_map_command_sg(). This
2775  *   conversion should be supressed if the FIB comes from userspace.
2776  * - aacraid_wait_command() calls functions that convert the FIB header
2777  *   to LE. But if the header is already in LE, the conversion should not
2778  *   be performed.
2779  */
2780 static int
2781 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2782 {
2783 	struct aac_command *cm;
2784 	int size, error;
2785 
2786 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2787 
2788 	cm = NULL;
2789 
2790 	/*
2791 	 * Get a command
2792 	 */
2793 	mtx_lock(&sc->aac_io_lock);
2794 	if (aacraid_alloc_command(sc, &cm)) {
2795 		struct aac_event *event;
2796 
2797 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2798 		    M_NOWAIT | M_ZERO);
2799 		if (event == NULL) {
2800 			error = EBUSY;
2801 			mtx_unlock(&sc->aac_io_lock);
2802 			goto out;
2803 		}
2804 		event->ev_type = AAC_EVENT_CMFREE;
2805 		event->ev_callback = aac_ioctl_event;
2806 		event->ev_arg = &cm;
2807 		aacraid_add_event(sc, event);
2808 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2809 	}
2810 	mtx_unlock(&sc->aac_io_lock);
2811 
2812 	/*
2813 	 * Fetch the FIB header, then re-copy to get data as well.
2814 	 */
2815 	if ((error = copyin(ufib, cm->cm_fib,
2816 			    sizeof(struct aac_fib_header))) != 0)
2817 		goto out;
2818 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2819 	if (size > sc->aac_max_fib_size) {
2820 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2821 			      size, sc->aac_max_fib_size);
2822 		size = sc->aac_max_fib_size;
2823 	}
2824 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2825 		goto out;
2826 	cm->cm_fib->Header.Size = size;
2827 	cm->cm_timestamp = time_uptime;
2828 	cm->cm_datalen = 0;
2829 
2830 	/*
2831 	 * Pass the FIB to the controller, wait for it to complete.
2832 	 */
2833 	mtx_lock(&sc->aac_io_lock);
2834 	error = aacraid_wait_command(cm);
2835 	mtx_unlock(&sc->aac_io_lock);
2836 	if (error != 0) {
2837 		device_printf(sc->aac_dev,
2838 			      "aacraid_wait_command return %d\n", error);
2839 		goto out;
2840 	}
2841 
2842 	/*
2843 	 * Copy the FIB and data back out to the caller.
2844 	 */
2845 	size = cm->cm_fib->Header.Size;
2846 	if (size > sc->aac_max_fib_size) {
2847 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2848 			      size, sc->aac_max_fib_size);
2849 		size = sc->aac_max_fib_size;
2850 	}
2851 	error = copyout(cm->cm_fib, ufib, size);
2852 
2853 out:
2854 	if (cm != NULL) {
2855 		mtx_lock(&sc->aac_io_lock);
2856 		aacraid_release_command(cm);
2857 		mtx_unlock(&sc->aac_io_lock);
2858 	}
2859 	return(error);
2860 }
2861 
2862 /*
2863  * Send a passthrough FIB supplied from userspace
2864  */
2865 static int
2866 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2867 {
2868 	struct aac_command *cm;
2869 	struct aac_fib *fib;
2870 	struct aac_srb *srbcmd;
2871 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2872 	void *user_reply;
2873 	int error, transfer_data = 0;
2874 	bus_dmamap_t orig_map = 0;
2875 	u_int32_t fibsize = 0;
2876 	u_int64_t srb_sg_address;
2877 	u_int32_t srb_sg_bytecount;
2878 
2879 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2880 
2881 	cm = NULL;
2882 
2883 	mtx_lock(&sc->aac_io_lock);
2884 	if (aacraid_alloc_command(sc, &cm)) {
2885 		struct aac_event *event;
2886 
2887 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2888 		    M_NOWAIT | M_ZERO);
2889 		if (event == NULL) {
2890 			error = EBUSY;
2891 			mtx_unlock(&sc->aac_io_lock);
2892 			goto out;
2893 		}
2894 		event->ev_type = AAC_EVENT_CMFREE;
2895 		event->ev_callback = aac_ioctl_event;
2896 		event->ev_arg = &cm;
2897 		aacraid_add_event(sc, event);
2898 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2899 	}
2900 	mtx_unlock(&sc->aac_io_lock);
2901 
2902 	cm->cm_data = NULL;
2903 	/* save original dma map */
2904 	orig_map = cm->cm_datamap;
2905 
2906 	fib = cm->cm_fib;
2907 	srbcmd = (struct aac_srb *)fib->data;
2908 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2909 	    sizeof (u_int32_t))) != 0)
2910 		goto out;
2911 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2912 		error = EINVAL;
2913 		goto out;
2914 	}
2915 	if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0)
2916 		goto out;
2917 
2918 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2919 	srbcmd->retry_limit = 0;	/* obsolete */
2920 
2921 	/* only one sg element from userspace supported */
2922 	if (srbcmd->sg_map.SgCount > 1) {
2923 		error = EINVAL;
2924 		goto out;
2925 	}
2926 	/* check fibsize */
2927 	if (fibsize == (sizeof(struct aac_srb) +
2928 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2929 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2930 		struct aac_sg_entry sg;
2931 
2932 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2933 			goto out;
2934 
2935 		srb_sg_bytecount = sg.SgByteCount;
2936 		srb_sg_address = (u_int64_t)sg.SgAddress;
2937 	} else if (fibsize == (sizeof(struct aac_srb) +
2938 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2939 #ifdef __LP64__
2940 		struct aac_sg_entry64 *sgp =
2941 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2942 		struct aac_sg_entry64 sg;
2943 
2944 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2945 			goto out;
2946 
2947 		srb_sg_bytecount = sg.SgByteCount;
2948 		srb_sg_address = sg.SgAddress;
2949 #else
2950 		error = EINVAL;
2951 		goto out;
2952 #endif
2953 	} else {
2954 		error = EINVAL;
2955 		goto out;
2956 	}
2957 	user_reply = (char *)arg + fibsize;
2958 	srbcmd->data_len = srb_sg_bytecount;
2959 	if (srbcmd->sg_map.SgCount == 1)
2960 		transfer_data = 1;
2961 
2962 	if (transfer_data) {
2963 		/*
2964 		 * Create DMA tag for the passthr. data buffer and allocate it.
2965 		 */
2966 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2967 			1, 0,			/* algnmnt, boundary */
2968 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2969 			BUS_SPACE_MAXADDR_32BIT :
2970 			0x7fffffff,		/* lowaddr */
2971 			BUS_SPACE_MAXADDR, 	/* highaddr */
2972 			NULL, NULL, 		/* filter, filterarg */
2973 			srb_sg_bytecount, 	/* size */
2974 			sc->aac_sg_tablesize,	/* nsegments */
2975 			srb_sg_bytecount, 	/* maxsegsize */
2976 			0,			/* flags */
2977 			NULL, NULL,		/* No locking needed */
2978 			&cm->cm_passthr_dmat)) {
2979 			error = ENOMEM;
2980 			goto out;
2981 		}
2982 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2983 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2984 			error = ENOMEM;
2985 			goto out;
2986 		}
2987 		/* fill some cm variables */
2988 		cm->cm_datalen = srb_sg_bytecount;
2989 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2990 			cm->cm_flags |= AAC_CMD_DATAIN;
2991 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2992 			cm->cm_flags |= AAC_CMD_DATAOUT;
2993 
2994 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2995 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2996 				cm->cm_data, cm->cm_datalen)) != 0)
2997 				goto out;
2998 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2999 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3000 				BUS_DMASYNC_PREWRITE);
3001 		}
3002 	}
3003 
3004 	/* build the FIB */
3005 	fib->Header.Size = sizeof(struct aac_fib_header) +
3006 		sizeof(struct aac_srb);
3007 	fib->Header.XferState =
3008 		AAC_FIBSTATE_HOSTOWNED   |
3009 		AAC_FIBSTATE_INITIALISED |
3010 		AAC_FIBSTATE_EMPTY	 |
3011 		AAC_FIBSTATE_FROMHOST	 |
3012 		AAC_FIBSTATE_REXPECTED   |
3013 		AAC_FIBSTATE_NORM	 |
3014 		AAC_FIBSTATE_ASYNC;
3015 
3016 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
3017 		ScsiPortCommandU64 : ScsiPortCommand;
3018 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
3019 
3020 	aac_srb_tole(srbcmd);
3021 
3022 	/* send command */
3023 	if (transfer_data) {
3024 		bus_dmamap_load(cm->cm_passthr_dmat,
3025 			cm->cm_datamap, cm->cm_data,
3026 			cm->cm_datalen,
3027 			aacraid_map_command_sg, cm, 0);
3028 	} else {
3029 		aacraid_map_command_sg(cm, NULL, 0, 0);
3030 	}
3031 
3032 	/* wait for completion */
3033 	mtx_lock(&sc->aac_io_lock);
3034 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
3035 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
3036 	mtx_unlock(&sc->aac_io_lock);
3037 
3038 	/* copy data */
3039 	if (transfer_data && (le32toh(srbcmd->flags) & AAC_SRB_FLAGS_DATA_IN)) {
3040 		if ((error = copyout(cm->cm_data,
3041 			(void *)(uintptr_t)srb_sg_address,
3042 			cm->cm_datalen)) != 0)
3043 			goto out;
3044 		/* sync required for bus_dmamem_alloc() allocated mem.? */
3045 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
3046 				BUS_DMASYNC_POSTREAD);
3047 	}
3048 
3049 	/* status */
3050 	aac_srb_response_toh((struct aac_srb_response *)fib->data);
3051 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3052 
3053 out:
3054 	if (cm && cm->cm_data) {
3055 		if (transfer_data)
3056 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3057 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3058 		cm->cm_datamap = orig_map;
3059 	}
3060 	if (cm && cm->cm_passthr_dmat)
3061 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3062 	if (cm) {
3063 		mtx_lock(&sc->aac_io_lock);
3064 		aacraid_release_command(cm);
3065 		mtx_unlock(&sc->aac_io_lock);
3066 	}
3067 	return(error);
3068 }
3069 
3070 /*
3071  * Request an AIF from the controller (new comm. type1)
3072  */
3073 static void
3074 aac_request_aif(struct aac_softc *sc)
3075 {
3076 	struct aac_command *cm;
3077 	struct aac_fib *fib;
3078 
3079 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3080 
3081 	if (aacraid_alloc_command(sc, &cm)) {
3082 		sc->aif_pending = 1;
3083 		return;
3084 	}
3085 	sc->aif_pending = 0;
3086 
3087 	/* build the FIB */
3088 	fib = cm->cm_fib;
3089 	fib->Header.Size = sizeof(struct aac_fib);
3090 	fib->Header.XferState =
3091         AAC_FIBSTATE_HOSTOWNED   |
3092         AAC_FIBSTATE_INITIALISED |
3093         AAC_FIBSTATE_EMPTY	 |
3094         AAC_FIBSTATE_FROMHOST	 |
3095         AAC_FIBSTATE_REXPECTED   |
3096         AAC_FIBSTATE_NORM	 |
3097         AAC_FIBSTATE_ASYNC;
3098 	/* set AIF marker */
3099 	fib->Header.Handle = 0x00800000;
3100 	fib->Header.Command = AifRequest;
3101 	((struct aac_aif_command *)fib->data)->command = htole32(AifReqEvent);
3102 
3103 	aacraid_map_command_sg(cm, NULL, 0, 0);
3104 }
3105 
3106 
3107 /*
3108  * cdevpriv interface private destructor.
3109  */
3110 static void
3111 aac_cdevpriv_dtor(void *arg)
3112 {
3113 	struct aac_softc *sc;
3114 
3115 	sc = arg;
3116 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3117 	device_unbusy(sc->aac_dev);
3118 }
3119 
3120 /*
3121  * Handle an AIF sent to us by the controller; queue it for later reference.
3122  * If the queue fills up, then drop the older entries.
3123  */
3124 static void
3125 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3126 {
3127 	struct aac_aif_command *aif;
3128 	struct aac_container *co, *co_next;
3129 	struct aac_fib_context *ctx;
3130 	struct aac_fib *sync_fib;
3131 	struct aac_mntinforesp mir;
3132 	int next, current, found;
3133 	int count = 0, changed = 0, i = 0;
3134 	u_int32_t channel, uid;
3135 
3136 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3137 
3138 	aif = (struct aac_aif_command*)&fib->data[0];
3139 	aacraid_print_aif(sc, aif);
3140 
3141 	/* Is it an event that we should care about? */
3142 	switch (le32toh(aif->command)) {
3143 	case AifCmdEventNotify:
3144 		switch (le32toh(aif->data.EN.type)) {
3145 		case AifEnAddContainer:
3146 		case AifEnDeleteContainer:
3147 			/*
3148 			 * A container was added or deleted, but the message
3149 			 * doesn't tell us anything else!  Re-enumerate the
3150 			 * containers and sort things out.
3151 			 */
3152 			aac_alloc_sync_fib(sc, &sync_fib);
3153 			do {
3154 				/*
3155 				 * Ask the controller for its containers one at
3156 				 * a time.
3157 				 * XXX What if the controller's list changes
3158 				 * midway through this enumaration?
3159 				 * XXX This should be done async.
3160 				 */
3161 				if (aac_get_container_info(sc, sync_fib, i,
3162 					&mir, &uid) != 0)
3163 					continue;
3164 				if (i == 0)
3165 					count = mir.MntRespCount;
3166 				/*
3167 				 * Check the container against our list.
3168 				 * co->co_found was already set to 0 in a
3169 				 * previous run.
3170 				 */
3171 				if ((mir.Status == ST_OK) &&
3172 				    (mir.MntTable[0].VolType != CT_NONE)) {
3173 					found = 0;
3174 					TAILQ_FOREACH(co,
3175 						      &sc->aac_container_tqh,
3176 						      co_link) {
3177 						if (co->co_mntobj.ObjectId ==
3178 						    mir.MntTable[0].ObjectId) {
3179 							co->co_found = 1;
3180 							found = 1;
3181 							break;
3182 						}
3183 					}
3184 					/*
3185 					 * If the container matched, continue
3186 					 * in the list.
3187 					 */
3188 					if (found) {
3189 						i++;
3190 						continue;
3191 					}
3192 
3193 					/*
3194 					 * This is a new container.  Do all the
3195 					 * appropriate things to set it up.
3196 					 */
3197 					aac_add_container(sc, &mir, 1, uid);
3198 					changed = 1;
3199 				}
3200 				i++;
3201 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3202 			aac_release_sync_fib(sc);
3203 
3204 			/*
3205 			 * Go through our list of containers and see which ones
3206 			 * were not marked 'found'.  Since the controller didn't
3207 			 * list them they must have been deleted.  Do the
3208 			 * appropriate steps to destroy the device.  Also reset
3209 			 * the co->co_found field.
3210 			 */
3211 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3212 			while (co != NULL) {
3213 				if (co->co_found == 0) {
3214 					co_next = TAILQ_NEXT(co, co_link);
3215 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3216 						     co_link);
3217 					free(co, M_AACRAIDBUF);
3218 					changed = 1;
3219 					co = co_next;
3220 				} else {
3221 					co->co_found = 0;
3222 					co = TAILQ_NEXT(co, co_link);
3223 				}
3224 			}
3225 
3226 			/* Attach the newly created containers */
3227 			if (changed) {
3228 				if (sc->cam_rescan_cb != NULL)
3229 					sc->cam_rescan_cb(sc, 0,
3230 				    	AAC_CAM_TARGET_WILDCARD);
3231 			}
3232 
3233 			break;
3234 
3235 		case AifEnEnclosureManagement:
3236 			switch (le32toh(aif->data.EN.data.EEE.eventType)) {
3237 			case AIF_EM_DRIVE_INSERTION:
3238 			case AIF_EM_DRIVE_REMOVAL:
3239 				channel = le32toh(aif->data.EN.data.EEE.unitID);
3240 				if (sc->cam_rescan_cb != NULL)
3241 					sc->cam_rescan_cb(sc,
3242 					    ((channel>>24) & 0xF) + 1,
3243 					    (channel & 0xFFFF));
3244 				break;
3245 			}
3246 			break;
3247 
3248 		case AifEnAddJBOD:
3249 		case AifEnDeleteJBOD:
3250 		case AifRawDeviceRemove:
3251 			channel = le32toh(aif->data.EN.data.ECE.container);
3252 			if (sc->cam_rescan_cb != NULL)
3253 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3254 				    AAC_CAM_TARGET_WILDCARD);
3255 			break;
3256 
3257 		default:
3258 			break;
3259 		}
3260 
3261 	default:
3262 		break;
3263 	}
3264 
3265 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3266 	current = sc->aifq_idx;
3267 	next = (current + 1) % AAC_AIFQ_LENGTH;
3268 	if (next == 0)
3269 		sc->aifq_filled = 1;
3270 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3271 	/* Make aifq's FIB header and data LE */
3272 	aac_fib_header_tole(&sc->aac_aifq[current].Header);
3273 	/* modify AIF contexts */
3274 	if (sc->aifq_filled) {
3275 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3276 			if (next == ctx->ctx_idx)
3277 				ctx->ctx_wrap = 1;
3278 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3279 				ctx->ctx_idx = next;
3280 		}
3281 	}
3282 	sc->aifq_idx = next;
3283 	/* On the off chance that someone is sleeping for an aif... */
3284 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3285 		wakeup(sc->aac_aifq);
3286 	/* Wakeup any poll()ers */
3287 	selwakeuppri(&sc->rcv_select, PRIBIO);
3288 
3289 	return;
3290 }
3291 
3292 /*
3293  * Return the Revision of the driver to userspace and check to see if the
3294  * userspace app is possibly compatible.  This is extremely bogus since
3295  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3296  * returning what the card reported.
3297  */
3298 static int
3299 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3300 {
3301 	struct aac_rev_check rev_check;
3302 	struct aac_rev_check_resp rev_check_resp;
3303 	int error = 0;
3304 
3305 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3306 
3307 	/*
3308 	 * Copyin the revision struct from userspace
3309 	 */
3310 	if ((error = copyin(udata, (caddr_t)&rev_check,
3311 			sizeof(struct aac_rev_check))) != 0) {
3312 		return error;
3313 	}
3314 
3315 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3316 	      rev_check.callingRevision.buildNumber);
3317 
3318 	/*
3319 	 * Doctor up the response struct.
3320 	 */
3321 	rev_check_resp.possiblyCompatible = 1;
3322 	rev_check_resp.adapterSWRevision.external.comp.major =
3323 	    AAC_DRIVER_MAJOR_VERSION;
3324 	rev_check_resp.adapterSWRevision.external.comp.minor =
3325 	    AAC_DRIVER_MINOR_VERSION;
3326 	rev_check_resp.adapterSWRevision.external.comp.type =
3327 	    AAC_DRIVER_TYPE;
3328 	rev_check_resp.adapterSWRevision.external.comp.dash =
3329 	    AAC_DRIVER_BUGFIX_LEVEL;
3330 	rev_check_resp.adapterSWRevision.buildNumber =
3331 	    AAC_DRIVER_BUILD;
3332 
3333 	return(copyout((caddr_t)&rev_check_resp, udata,
3334 			sizeof(struct aac_rev_check_resp)));
3335 }
3336 
3337 /*
3338  * Pass the fib context to the caller
3339  */
3340 static int
3341 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3342 {
3343 	struct aac_fib_context *fibctx, *ctx;
3344 	int error = 0;
3345 
3346 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3347 
3348 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3349 	if (fibctx == NULL)
3350 		return (ENOMEM);
3351 
3352 	mtx_lock(&sc->aac_io_lock);
3353 	/* all elements are already 0, add to queue */
3354 	if (sc->fibctx == NULL)
3355 		sc->fibctx = fibctx;
3356 	else {
3357 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3358 			;
3359 		ctx->next = fibctx;
3360 		fibctx->prev = ctx;
3361 	}
3362 
3363 	/* evaluate unique value */
3364 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3365 	ctx = sc->fibctx;
3366 	while (ctx != fibctx) {
3367 		if (ctx->unique == fibctx->unique) {
3368 			fibctx->unique++;
3369 			ctx = sc->fibctx;
3370 		} else {
3371 			ctx = ctx->next;
3372 		}
3373 	}
3374 
3375 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3376 	mtx_unlock(&sc->aac_io_lock);
3377 	if (error)
3378 		aac_close_aif(sc, (caddr_t)ctx);
3379 	return error;
3380 }
3381 
3382 /*
3383  * Close the caller's fib context
3384  */
3385 static int
3386 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3387 {
3388 	struct aac_fib_context *ctx;
3389 
3390 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3391 
3392 	mtx_lock(&sc->aac_io_lock);
3393 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3394 		if (ctx->unique == *(uint32_t *)&arg) {
3395 			if (ctx == sc->fibctx)
3396 				sc->fibctx = NULL;
3397 			else {
3398 				ctx->prev->next = ctx->next;
3399 				if (ctx->next)
3400 					ctx->next->prev = ctx->prev;
3401 			}
3402 			break;
3403 		}
3404 	}
3405 	if (ctx)
3406 		free(ctx, M_AACRAIDBUF);
3407 
3408 	mtx_unlock(&sc->aac_io_lock);
3409 	return 0;
3410 }
3411 
3412 /*
3413  * Pass the caller the next AIF in their queue
3414  */
3415 static int
3416 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3417 {
3418 	struct get_adapter_fib_ioctl agf;
3419 	struct aac_fib_context *ctx;
3420 	int error;
3421 
3422 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3423 
3424 	mtx_lock(&sc->aac_io_lock);
3425 #ifdef COMPAT_FREEBSD32
3426 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3427 		struct get_adapter_fib_ioctl32 agf32;
3428 		error = copyin(arg, &agf32, sizeof(agf32));
3429 		if (error == 0) {
3430 			agf.AdapterFibContext = agf32.AdapterFibContext;
3431 			agf.Wait = agf32.Wait;
3432 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3433 		}
3434 	} else
3435 #endif
3436 		error = copyin(arg, &agf, sizeof(agf));
3437 	if (error == 0) {
3438 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3439 			if (agf.AdapterFibContext == ctx->unique)
3440 				break;
3441 		}
3442 		if (!ctx) {
3443 			mtx_unlock(&sc->aac_io_lock);
3444 			return (EFAULT);
3445 		}
3446 
3447 		error = aac_return_aif(sc, ctx, agf.AifFib);
3448 		if (error == EAGAIN && agf.Wait) {
3449 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3450 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3451 			while (error == EAGAIN) {
3452 				mtx_unlock(&sc->aac_io_lock);
3453 				error = tsleep(sc->aac_aifq, PRIBIO |
3454 					       PCATCH, "aacaif", 0);
3455 				mtx_lock(&sc->aac_io_lock);
3456 				if (error == 0)
3457 					error = aac_return_aif(sc, ctx, agf.AifFib);
3458 			}
3459 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3460 		}
3461 	}
3462 	mtx_unlock(&sc->aac_io_lock);
3463 	return(error);
3464 }
3465 
3466 /*
3467  * Hand the next AIF off the top of the queue out to userspace.
3468  */
3469 static int
3470 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3471 {
3472 	int current, error;
3473 
3474 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3475 
3476 	current = ctx->ctx_idx;
3477 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3478 		/* empty */
3479 		return (EAGAIN);
3480 	}
3481 	error =
3482 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3483 	if (error)
3484 		device_printf(sc->aac_dev,
3485 		    "aac_return_aif: copyout returned %d\n", error);
3486 	else {
3487 		ctx->ctx_wrap = 0;
3488 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3489 	}
3490 	return(error);
3491 }
3492 
3493 static int
3494 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3495 {
3496 	struct aac_pci_info {
3497 		u_int32_t bus;
3498 		u_int32_t slot;
3499 	} pciinf;
3500 	int error;
3501 
3502 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3503 
3504 	pciinf.bus = pci_get_bus(sc->aac_dev);
3505 	pciinf.slot = pci_get_slot(sc->aac_dev);
3506 
3507 	error = copyout((caddr_t)&pciinf, uptr,
3508 			sizeof(struct aac_pci_info));
3509 
3510 	return (error);
3511 }
3512 
3513 static int
3514 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3515 {
3516 	struct aac_features f;
3517 	int error;
3518 
3519 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3520 
3521 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3522 		return (error);
3523 
3524 	/*
3525 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3526 	 * ALL zero in the featuresState, the driver will return the current
3527 	 * state of all the supported features, the data field will not be
3528 	 * valid.
3529 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3530 	 * a specific bit set in the featuresState, the driver will return the
3531 	 * current state of this specific feature and whatever data that are
3532 	 * associated with the feature in the data field or perform whatever
3533 	 * action needed indicates in the data field.
3534 	 */
3535 	 if (f.feat.fValue == 0) {
3536 		f.feat.fBits.largeLBA =
3537 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3538 		f.feat.fBits.JBODSupport = 1;
3539 		/* TODO: In the future, add other features state here as well */
3540 	} else {
3541 		if (f.feat.fBits.largeLBA)
3542 			f.feat.fBits.largeLBA =
3543 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3544 		/* TODO: Add other features state and data in the future */
3545 	}
3546 
3547 	error = copyout(&f, uptr, sizeof (f));
3548 	return (error);
3549 }
3550 
3551 /*
3552  * Give the userland some information about the container.  The AAC arch
3553  * expects the driver to be a SCSI passthrough type driver, so it expects
3554  * the containers to have b:t:l numbers.  Fake it.
3555  */
3556 static int
3557 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3558 {
3559 	struct aac_query_disk query_disk;
3560 	struct aac_container *co;
3561 	int error, id;
3562 
3563 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3564 
3565 	mtx_lock(&sc->aac_io_lock);
3566 	error = copyin(uptr, (caddr_t)&query_disk,
3567 		       sizeof(struct aac_query_disk));
3568 	if (error) {
3569 		mtx_unlock(&sc->aac_io_lock);
3570 		return (error);
3571 	}
3572 
3573 	id = query_disk.ContainerNumber;
3574 	if (id == -1) {
3575 		mtx_unlock(&sc->aac_io_lock);
3576 		return (EINVAL);
3577 	}
3578 
3579 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3580 		if (co->co_mntobj.ObjectId == id)
3581 			break;
3582 		}
3583 
3584 	if (co == NULL) {
3585 			query_disk.Valid = 0;
3586 			query_disk.Locked = 0;
3587 			query_disk.Deleted = 1;		/* XXX is this right? */
3588 	} else {
3589 		query_disk.Valid = 1;
3590 		query_disk.Locked = 1;
3591 		query_disk.Deleted = 0;
3592 		query_disk.Bus = device_get_unit(sc->aac_dev);
3593 		query_disk.Target = 0;
3594 		query_disk.Lun = 0;
3595 		query_disk.UnMapped = 0;
3596 	}
3597 
3598 	error = copyout((caddr_t)&query_disk, uptr,
3599 			sizeof(struct aac_query_disk));
3600 
3601 	mtx_unlock(&sc->aac_io_lock);
3602 	return (error);
3603 }
3604 
3605 static void
3606 aac_container_bus(struct aac_softc *sc)
3607 {
3608 	struct aac_sim *sim;
3609 	device_t child;
3610 
3611 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3612 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3613 	if (sim == NULL) {
3614 		device_printf(sc->aac_dev,
3615 	    	"No memory to add container bus\n");
3616 		panic("Out of memory?!");
3617 	}
3618 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3619 	if (child == NULL) {
3620 		device_printf(sc->aac_dev,
3621 	    	"device_add_child failed for container bus\n");
3622 		free(sim, M_AACRAIDBUF);
3623 		panic("Out of memory?!");
3624 	}
3625 
3626 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3627 	sim->BusNumber = 0;
3628 	sim->BusType = CONTAINER_BUS;
3629 	sim->InitiatorBusId = -1;
3630 	sim->aac_sc = sc;
3631 	sim->sim_dev = child;
3632 	sim->aac_cam = NULL;
3633 
3634 	device_set_ivars(child, sim);
3635 	device_set_desc(child, "Container Bus");
3636 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3637 	/*
3638 	device_set_desc(child, aac_describe_code(aac_container_types,
3639 			mir->MntTable[0].VolType));
3640 	*/
3641 	bus_generic_attach(sc->aac_dev);
3642 }
3643 
3644 static void
3645 aac_get_bus_info(struct aac_softc *sc)
3646 {
3647 	struct aac_fib *fib;
3648 	struct aac_ctcfg *c_cmd;
3649 	struct aac_ctcfg_resp *c_resp;
3650 	struct aac_vmioctl *vmi;
3651 	struct aac_vmi_businf_resp *vmi_resp;
3652 	struct aac_getbusinf businfo;
3653 	struct aac_sim *caminf;
3654 	device_t child;
3655 	int i, error;
3656 
3657 	mtx_lock(&sc->aac_io_lock);
3658 	aac_alloc_sync_fib(sc, &fib);
3659 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3660 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3661 
3662 	c_cmd->Command = VM_ContainerConfig;
3663 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3664 	c_cmd->param = 0;
3665 
3666 	aac_ctcfg_tole(c_cmd);
3667 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3668 	    sizeof(struct aac_ctcfg));
3669 	if (error) {
3670 		device_printf(sc->aac_dev, "Error %d sending "
3671 		    "VM_ContainerConfig command\n", error);
3672 		aac_release_sync_fib(sc);
3673 		mtx_unlock(&sc->aac_io_lock);
3674 		return;
3675 	}
3676 
3677 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3678 	aac_ctcfg_resp_toh(c_resp);
3679 	if (c_resp->Status != ST_OK) {
3680 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3681 		    c_resp->Status);
3682 		aac_release_sync_fib(sc);
3683 		mtx_unlock(&sc->aac_io_lock);
3684 		return;
3685 	}
3686 
3687 	sc->scsi_method_id = c_resp->param;
3688 
3689 	vmi = (struct aac_vmioctl *)&fib->data[0];
3690 	bzero(vmi, sizeof(struct aac_vmioctl));
3691 
3692 	vmi->Command = VM_Ioctl;
3693 	vmi->ObjType = FT_DRIVE;
3694 	vmi->MethId = sc->scsi_method_id;
3695 	vmi->ObjId = 0;
3696 	vmi->IoctlCmd = GetBusInfo;
3697 
3698 	aac_vmioctl_tole(vmi);
3699 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3700 	    sizeof(struct aac_vmi_businf_resp));
3701 	if (error) {
3702 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3703 		    error);
3704 		aac_release_sync_fib(sc);
3705 		mtx_unlock(&sc->aac_io_lock);
3706 		return;
3707 	}
3708 
3709 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3710 	aac_vmi_businf_resp_toh(vmi_resp);
3711 	if (vmi_resp->Status != ST_OK) {
3712 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3713 		    vmi_resp->Status);
3714 		aac_release_sync_fib(sc);
3715 		mtx_unlock(&sc->aac_io_lock);
3716 		return;
3717 	}
3718 
3719 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3720 	aac_release_sync_fib(sc);
3721 	mtx_unlock(&sc->aac_io_lock);
3722 
3723 	for (i = 0; i < businfo.BusCount; i++) {
3724 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3725 			continue;
3726 
3727 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3728 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3729 		if (caminf == NULL) {
3730 			device_printf(sc->aac_dev,
3731 			    "No memory to add passthrough bus %d\n", i);
3732 			break;
3733 		}
3734 
3735 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3736 		if (child == NULL) {
3737 			device_printf(sc->aac_dev,
3738 			    "device_add_child failed for passthrough bus %d\n",
3739 			    i);
3740 			free(caminf, M_AACRAIDBUF);
3741 			break;
3742 		}
3743 
3744 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3745 		caminf->BusNumber = i+1;
3746 		caminf->BusType = PASSTHROUGH_BUS;
3747 		caminf->InitiatorBusId = -1;
3748 		caminf->aac_sc = sc;
3749 		caminf->sim_dev = child;
3750 		caminf->aac_cam = NULL;
3751 
3752 		device_set_ivars(child, caminf);
3753 		device_set_desc(child, "SCSI Passthrough Bus");
3754 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3755 	}
3756 }
3757 
3758 /*
3759  * Check to see if the kernel is up and running. If we are in a
3760  * BlinkLED state, return the BlinkLED code.
3761  */
3762 static u_int32_t
3763 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3764 {
3765 	u_int32_t ret;
3766 
3767 	ret = AAC_GET_FWSTATUS(sc);
3768 
3769 	if (ret & AAC_UP_AND_RUNNING)
3770 		ret = 0;
3771 	else if (ret & AAC_KERNEL_PANIC && bled)
3772 		*bled = (ret >> 16) & 0xff;
3773 
3774 	return (ret);
3775 }
3776 
3777 /*
3778  * Once do an IOP reset, basically have to re-initialize the card as
3779  * if coming up from a cold boot, and the driver is responsible for
3780  * any IO that was outstanding to the adapter at the time of the IOP
3781  * RESET. And prepare the driver for IOP RESET by making the init code
3782  * modular with the ability to call it from multiple places.
3783  */
3784 static int
3785 aac_reset_adapter(struct aac_softc *sc)
3786 {
3787 	struct aac_command *cm;
3788 	struct aac_fib *fib;
3789 	struct aac_pause_command *pc;
3790 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3791 	int ret, msi_enabled_orig;
3792 
3793 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3794 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3795 
3796 	if (sc->aac_state & AAC_STATE_RESET) {
3797 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3798 		return (EINVAL);
3799 	}
3800 	sc->aac_state |= AAC_STATE_RESET;
3801 
3802 	/* disable interrupt */
3803 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3804 
3805 	/*
3806 	 * Abort all pending commands:
3807 	 * a) on the controller
3808 	 */
3809 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3810 		cm->cm_flags |= AAC_CMD_RESET;
3811 
3812 		/* is there a completion handler? */
3813 		if (cm->cm_complete != NULL) {
3814 			cm->cm_complete(cm);
3815 		} else {
3816 			/* assume that someone is sleeping on this
3817 			 * command
3818 			 */
3819 			wakeup(cm);
3820 		}
3821 	}
3822 
3823 	/* b) in the waiting queues */
3824 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3825 		cm->cm_flags |= AAC_CMD_RESET;
3826 
3827 		/* is there a completion handler? */
3828 		if (cm->cm_complete != NULL) {
3829 			cm->cm_complete(cm);
3830 		} else {
3831 			/* assume that someone is sleeping on this
3832 			 * command
3833 			 */
3834 			wakeup(cm);
3835 		}
3836 	}
3837 
3838 	/* flush drives */
3839 	if (aac_check_adapter_health(sc, NULL) == 0) {
3840 		mtx_unlock(&sc->aac_io_lock);
3841 		(void) aacraid_shutdown(sc->aac_dev);
3842 		mtx_lock(&sc->aac_io_lock);
3843 	}
3844 
3845 	/* execute IOP reset */
3846 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3847 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3848 
3849 		/* We need to wait for 5 seconds before accessing the MU again
3850 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3851 		 */
3852 		waitCount = 5 * 10000;
3853 		while (waitCount) {
3854 			DELAY(100);			/* delay 100 microseconds */
3855 			waitCount--;
3856 		}
3857 	} else {
3858 		ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS,
3859 			0, 0, 0, 0, &status, &reset_mask);
3860 		if (ret && !sc->doorbell_mask) {
3861 			/* call IOP_RESET for older firmware */
3862 			if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0,
3863 			    &status, NULL)) != 0) {
3864 				if (status == AAC_SRB_STS_INVALID_REQUEST) {
3865 					device_printf(sc->aac_dev,
3866 					    "IOP_RESET not supported\n");
3867 				} else {
3868 					/* probably timeout */
3869 					device_printf(sc->aac_dev,
3870 					    "IOP_RESET failed\n");
3871 				}
3872 
3873 				/* unwind aac_shutdown() */
3874 				aac_alloc_sync_fib(sc, &fib);
3875 				pc = (struct aac_pause_command *)&fib->data[0];
3876 				pc->Command = VM_ContainerConfig;
3877 				pc->Type = CT_PAUSE_IO;
3878 				pc->Timeout = 1;
3879 				pc->Min = 1;
3880 				pc->NoRescan = 1;
3881 
3882 				aac_pause_command_tole(pc);
3883 				(void) aac_sync_fib(sc, ContainerCommand, 0,
3884 				    fib, sizeof (struct aac_pause_command));
3885 				aac_release_sync_fib(sc);
3886 
3887 				goto finish;
3888 			}
3889 		} else if (sc->doorbell_mask) {
3890 			ret = 0;
3891 			reset_mask = sc->doorbell_mask;
3892 		}
3893 		if (!ret &&
3894 		    (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) {
3895 			AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3896 			/*
3897 			 * We need to wait for 5 seconds before accessing the
3898 			 * doorbell again;
3899 			 * 10000 * 100us = 1000,000us = 1000ms = 1s
3900 			 */
3901 			waitCount = 5 * 10000;
3902 			while (waitCount) {
3903 				DELAY(100);	/* delay 100 microseconds */
3904 				waitCount--;
3905 			}
3906 		}
3907 	}
3908 
3909 	/*
3910 	 * Initialize the adapter.
3911 	 */
3912 	max_msix_orig = sc->aac_max_msix;
3913 	msi_enabled_orig = sc->msi_enabled;
3914 	sc->msi_enabled = FALSE;
3915 	if (aac_check_firmware(sc) != 0)
3916 		goto finish;
3917 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3918 		sc->aac_max_msix = max_msix_orig;
3919 		if (msi_enabled_orig) {
3920 			sc->msi_enabled = msi_enabled_orig;
3921 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3922 		}
3923 		mtx_unlock(&sc->aac_io_lock);
3924 		aac_init(sc);
3925 		mtx_lock(&sc->aac_io_lock);
3926 	}
3927 
3928 finish:
3929 	sc->aac_state &= ~AAC_STATE_RESET;
3930 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3931 	aacraid_startio(sc);
3932 	return (0);
3933 }
3934