xref: /freebsd/sys/dev/aacraid/aacraid.c (revision 148a8da8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000 Michael Smith
5  * Copyright (c) 2001 Scott Long
6  * Copyright (c) 2000 BSDi
7  * Copyright (c) 2001-2010 Adaptec, Inc.
8  * Copyright (c) 2010-2012 PMC-Sierra, Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers
38  */
39 #define AAC_DRIVERNAME			"aacraid"
40 
41 #include "opt_aacraid.h"
42 
43 /* #include <stddef.h> */
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/sysent.h>
52 #include <sys/poll.h>
53 #include <sys/ioccom.h>
54 
55 #include <sys/bus.h>
56 #include <sys/conf.h>
57 #include <sys/signalvar.h>
58 #include <sys/time.h>
59 #include <sys/eventhandler.h>
60 #include <sys/rman.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
67 
68 #include <dev/aacraid/aacraid_reg.h>
69 #include <sys/aac_ioctl.h>
70 #include <dev/aacraid/aacraid_debug.h>
71 #include <dev/aacraid/aacraid_var.h>
72 
73 #ifndef FILTER_HANDLED
74 #define FILTER_HANDLED	0x02
75 #endif
76 
77 static void	aac_add_container(struct aac_softc *sc,
78 				  struct aac_mntinforesp *mir, int f,
79 				  u_int32_t uid);
80 static void	aac_get_bus_info(struct aac_softc *sc);
81 static void	aac_container_bus(struct aac_softc *sc);
82 static void	aac_daemon(void *arg);
83 static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
84 							  int pages, int nseg, int nseg_new);
85 
86 /* Command Processing */
87 static void	aac_timeout(struct aac_softc *sc);
88 static void	aac_command_thread(struct aac_softc *sc);
89 static int	aac_sync_fib(struct aac_softc *sc, u_int32_t command,
90 				     u_int32_t xferstate, struct aac_fib *fib,
91 				     u_int16_t datasize);
92 /* Command Buffer Management */
93 static void	aac_map_command_helper(void *arg, bus_dma_segment_t *segs,
94 				       int nseg, int error);
95 static int	aac_alloc_commands(struct aac_softc *sc);
96 static void	aac_free_commands(struct aac_softc *sc);
97 static void	aac_unmap_command(struct aac_command *cm);
98 
99 /* Hardware Interface */
100 static int	aac_alloc(struct aac_softc *sc);
101 static void	aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg,
102 			       int error);
103 static int	aac_check_firmware(struct aac_softc *sc);
104 static void	aac_define_int_mode(struct aac_softc *sc);
105 static int	aac_init(struct aac_softc *sc);
106 static int	aac_find_pci_capability(struct aac_softc *sc, int cap);
107 static int	aac_setup_intr(struct aac_softc *sc);
108 static int	aac_check_config(struct aac_softc *sc);
109 
110 /* PMC SRC interface */
111 static int	aac_src_get_fwstatus(struct aac_softc *sc);
112 static void	aac_src_qnotify(struct aac_softc *sc, int qbit);
113 static int	aac_src_get_istatus(struct aac_softc *sc);
114 static void	aac_src_clear_istatus(struct aac_softc *sc, int mask);
115 static void	aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command,
116 				    u_int32_t arg0, u_int32_t arg1,
117 				    u_int32_t arg2, u_int32_t arg3);
118 static int	aac_src_get_mailbox(struct aac_softc *sc, int mb);
119 static void	aac_src_access_devreg(struct aac_softc *sc, int mode);
120 static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm);
121 static int aac_src_get_outb_queue(struct aac_softc *sc);
122 static void aac_src_set_outb_queue(struct aac_softc *sc, int index);
123 
124 struct aac_interface aacraid_src_interface = {
125 	aac_src_get_fwstatus,
126 	aac_src_qnotify,
127 	aac_src_get_istatus,
128 	aac_src_clear_istatus,
129 	aac_src_set_mailbox,
130 	aac_src_get_mailbox,
131 	aac_src_access_devreg,
132 	aac_src_send_command,
133 	aac_src_get_outb_queue,
134 	aac_src_set_outb_queue
135 };
136 
137 /* PMC SRCv interface */
138 static void	aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command,
139 				    u_int32_t arg0, u_int32_t arg1,
140 				    u_int32_t arg2, u_int32_t arg3);
141 static int	aac_srcv_get_mailbox(struct aac_softc *sc, int mb);
142 
143 struct aac_interface aacraid_srcv_interface = {
144 	aac_src_get_fwstatus,
145 	aac_src_qnotify,
146 	aac_src_get_istatus,
147 	aac_src_clear_istatus,
148 	aac_srcv_set_mailbox,
149 	aac_srcv_get_mailbox,
150 	aac_src_access_devreg,
151 	aac_src_send_command,
152 	aac_src_get_outb_queue,
153 	aac_src_set_outb_queue
154 };
155 
156 /* Debugging and Diagnostics */
157 static struct aac_code_lookup aac_cpu_variant[] = {
158 	{"i960JX",		CPUI960_JX},
159 	{"i960CX",		CPUI960_CX},
160 	{"i960HX",		CPUI960_HX},
161 	{"i960RX",		CPUI960_RX},
162 	{"i960 80303",		CPUI960_80303},
163 	{"StrongARM SA110",	CPUARM_SA110},
164 	{"PPC603e",		CPUPPC_603e},
165 	{"XScale 80321",	CPU_XSCALE_80321},
166 	{"MIPS 4KC",		CPU_MIPS_4KC},
167 	{"MIPS 5KC",		CPU_MIPS_5KC},
168 	{"Unknown StrongARM",	CPUARM_xxx},
169 	{"Unknown PowerPC",	CPUPPC_xxx},
170 	{NULL, 0},
171 	{"Unknown processor",	0}
172 };
173 
174 static struct aac_code_lookup aac_battery_platform[] = {
175 	{"required battery present",		PLATFORM_BAT_REQ_PRESENT},
176 	{"REQUIRED BATTERY NOT PRESENT",	PLATFORM_BAT_REQ_NOTPRESENT},
177 	{"optional battery present",		PLATFORM_BAT_OPT_PRESENT},
178 	{"optional battery not installed",	PLATFORM_BAT_OPT_NOTPRESENT},
179 	{"no battery support",			PLATFORM_BAT_NOT_SUPPORTED},
180 	{NULL, 0},
181 	{"unknown battery platform",		0}
182 };
183 static void	aac_describe_controller(struct aac_softc *sc);
184 static char	*aac_describe_code(struct aac_code_lookup *table,
185 				   u_int32_t code);
186 
187 /* Management Interface */
188 static d_open_t		aac_open;
189 static d_ioctl_t	aac_ioctl;
190 static d_poll_t		aac_poll;
191 #if __FreeBSD_version >= 702000
192 static void		aac_cdevpriv_dtor(void *arg);
193 #else
194 static d_close_t	aac_close;
195 #endif
196 static int	aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib);
197 static int	aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg);
198 static void	aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib);
199 static void	aac_request_aif(struct aac_softc *sc);
200 static int	aac_rev_check(struct aac_softc *sc, caddr_t udata);
201 static int	aac_open_aif(struct aac_softc *sc, caddr_t arg);
202 static int	aac_close_aif(struct aac_softc *sc, caddr_t arg);
203 static int	aac_getnext_aif(struct aac_softc *sc, caddr_t arg);
204 static int	aac_return_aif(struct aac_softc *sc,
205 			       struct aac_fib_context *ctx, caddr_t uptr);
206 static int	aac_query_disk(struct aac_softc *sc, caddr_t uptr);
207 static int	aac_get_pci_info(struct aac_softc *sc, caddr_t uptr);
208 static int	aac_supported_features(struct aac_softc *sc, caddr_t uptr);
209 static void	aac_ioctl_event(struct aac_softc *sc,
210 				struct aac_event *event, void *arg);
211 static int	aac_reset_adapter(struct aac_softc *sc);
212 static int	aac_get_container_info(struct aac_softc *sc,
213 				       struct aac_fib *fib, int cid,
214 				       struct aac_mntinforesp *mir,
215 				       u_int32_t *uid);
216 static u_int32_t
217 	aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled);
218 
219 static struct cdevsw aacraid_cdevsw = {
220 	.d_version =	D_VERSION,
221 	.d_flags =	D_NEEDGIANT,
222 	.d_open =	aac_open,
223 #if __FreeBSD_version < 702000
224 	.d_close =	aac_close,
225 #endif
226 	.d_ioctl =	aac_ioctl,
227 	.d_poll =	aac_poll,
228 	.d_name =	"aacraid",
229 };
230 
231 MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver");
232 
233 /* sysctl node */
234 SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters");
235 
236 /*
237  * Device Interface
238  */
239 
240 /*
241  * Initialize the controller and softc
242  */
243 int
244 aacraid_attach(struct aac_softc *sc)
245 {
246 	int error, unit;
247 	struct aac_fib *fib;
248 	struct aac_mntinforesp mir;
249 	int count = 0, i = 0;
250 	u_int32_t uid;
251 
252 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
253 	sc->hint_flags = device_get_flags(sc->aac_dev);
254 	/*
255 	 * Initialize per-controller queues.
256 	 */
257 	aac_initq_free(sc);
258 	aac_initq_ready(sc);
259 	aac_initq_busy(sc);
260 
261 	/* mark controller as suspended until we get ourselves organised */
262 	sc->aac_state |= AAC_STATE_SUSPEND;
263 
264 	/*
265 	 * Check that the firmware on the card is supported.
266 	 */
267 	sc->msi_enabled = FALSE;
268 	if ((error = aac_check_firmware(sc)) != 0)
269 		return(error);
270 
271 	/*
272 	 * Initialize locks
273 	 */
274 	mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF);
275 	TAILQ_INIT(&sc->aac_container_tqh);
276 	TAILQ_INIT(&sc->aac_ev_cmfree);
277 
278 #if __FreeBSD_version >= 800000
279 	/* Initialize the clock daemon callout. */
280 	callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0);
281 #endif
282 	/*
283 	 * Initialize the adapter.
284 	 */
285 	if ((error = aac_alloc(sc)) != 0)
286 		return(error);
287 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
288 		aac_define_int_mode(sc);
289 		if ((error = aac_init(sc)) != 0)
290 			return(error);
291 	}
292 
293 	/*
294 	 * Allocate and connect our interrupt.
295 	 */
296 	if ((error = aac_setup_intr(sc)) != 0)
297 		return(error);
298 
299 	/*
300 	 * Print a little information about the controller.
301 	 */
302 	aac_describe_controller(sc);
303 
304 	/*
305 	 * Make the control device.
306 	 */
307 	unit = device_get_unit(sc->aac_dev);
308 	sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR,
309 				 0640, "aacraid%d", unit);
310 	sc->aac_dev_t->si_drv1 = sc;
311 
312 	/* Create the AIF thread */
313 	if (aac_kthread_create((void(*)(void *))aac_command_thread, sc,
314 		   &sc->aifthread, 0, 0, "aacraid%daif", unit))
315 		panic("Could not create AIF thread");
316 
317 	/* Register the shutdown method to only be called post-dump */
318 	if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown,
319 	    sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL)
320 		device_printf(sc->aac_dev,
321 			      "shutdown event registration failed\n");
322 
323 	/* Find containers */
324 	mtx_lock(&sc->aac_io_lock);
325 	aac_alloc_sync_fib(sc, &fib);
326 	/* loop over possible containers */
327 	do {
328 		if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0)
329 			continue;
330 		if (i == 0)
331 			count = mir.MntRespCount;
332 		aac_add_container(sc, &mir, 0, uid);
333 		i++;
334 	} while ((i < count) && (i < AAC_MAX_CONTAINERS));
335 	aac_release_sync_fib(sc);
336 	mtx_unlock(&sc->aac_io_lock);
337 
338 	/* Register with CAM for the containers */
339 	TAILQ_INIT(&sc->aac_sim_tqh);
340 	aac_container_bus(sc);
341 	/* Register with CAM for the non-DASD devices */
342 	if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0)
343 		aac_get_bus_info(sc);
344 
345 	/* poke the bus to actually attach the child devices */
346 	bus_generic_attach(sc->aac_dev);
347 
348 	/* mark the controller up */
349 	sc->aac_state &= ~AAC_STATE_SUSPEND;
350 
351 	/* enable interrupts now */
352 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
353 
354 #if __FreeBSD_version >= 800000
355 	mtx_lock(&sc->aac_io_lock);
356 	callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc);
357 	mtx_unlock(&sc->aac_io_lock);
358 #else
359 	{
360 		struct timeval tv;
361 		tv.tv_sec = 60;
362 		tv.tv_usec = 0;
363 		sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
364 	}
365 #endif
366 
367 	return(0);
368 }
369 
370 static void
371 aac_daemon(void *arg)
372 {
373 	struct aac_softc *sc;
374 	struct timeval tv;
375 	struct aac_command *cm;
376 	struct aac_fib *fib;
377 
378 	sc = arg;
379 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
380 
381 #if __FreeBSD_version >= 800000
382 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
383 	if (callout_pending(&sc->aac_daemontime) ||
384 	    callout_active(&sc->aac_daemontime) == 0)
385 		return;
386 #else
387 	mtx_lock(&sc->aac_io_lock);
388 #endif
389 	getmicrotime(&tv);
390 
391 	if (!aacraid_alloc_command(sc, &cm)) {
392 		fib = cm->cm_fib;
393 		cm->cm_timestamp = time_uptime;
394 		cm->cm_datalen = 0;
395 		cm->cm_flags |= AAC_CMD_WAIT;
396 
397 		fib->Header.Size =
398 			sizeof(struct aac_fib_header) + sizeof(u_int32_t);
399 		fib->Header.XferState =
400 			AAC_FIBSTATE_HOSTOWNED   |
401 			AAC_FIBSTATE_INITIALISED |
402 			AAC_FIBSTATE_EMPTY	 |
403 			AAC_FIBSTATE_FROMHOST	 |
404 			AAC_FIBSTATE_REXPECTED   |
405 			AAC_FIBSTATE_NORM	 |
406 			AAC_FIBSTATE_ASYNC	 |
407 			AAC_FIBSTATE_FAST_RESPONSE;
408 		fib->Header.Command = SendHostTime;
409 		*(uint32_t *)fib->data = tv.tv_sec;
410 
411 		aacraid_map_command_sg(cm, NULL, 0, 0);
412 		aacraid_release_command(cm);
413 	}
414 
415 #if __FreeBSD_version >= 800000
416 	callout_schedule(&sc->aac_daemontime, 30 * 60 * hz);
417 #else
418 	mtx_unlock(&sc->aac_io_lock);
419 	tv.tv_sec = 30 * 60;
420 	tv.tv_usec = 0;
421 	sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv));
422 #endif
423 }
424 
425 void
426 aacraid_add_event(struct aac_softc *sc, struct aac_event *event)
427 {
428 
429 	switch (event->ev_type & AAC_EVENT_MASK) {
430 	case AAC_EVENT_CMFREE:
431 		TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links);
432 		break;
433 	default:
434 		device_printf(sc->aac_dev, "aac_add event: unknown event %d\n",
435 		    event->ev_type);
436 		break;
437 	}
438 
439 	return;
440 }
441 
442 /*
443  * Request information of container #cid
444  */
445 static int
446 aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid,
447 		       struct aac_mntinforesp *mir, u_int32_t *uid)
448 {
449 	struct aac_command *cm;
450 	struct aac_fib *fib;
451 	struct aac_mntinfo *mi;
452 	struct aac_cnt_config *ccfg;
453 	int rval;
454 
455 	if (sync_fib == NULL) {
456 		if (aacraid_alloc_command(sc, &cm)) {
457 			device_printf(sc->aac_dev,
458 				"Warning, no free command available\n");
459 			return (-1);
460 		}
461 		fib = cm->cm_fib;
462 	} else {
463 		fib = sync_fib;
464 	}
465 
466 	mi = (struct aac_mntinfo *)&fib->data[0];
467 	/* 4KB support?, 64-bit LBA? */
468 	if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)
469 		mi->Command = VM_NameServeAllBlk;
470 	else if (sc->flags & AAC_FLAGS_LBA_64BIT)
471 		mi->Command = VM_NameServe64;
472 	else
473 		mi->Command = VM_NameServe;
474 	mi->MntType = FT_FILESYS;
475 	mi->MntCount = cid;
476 
477 	if (sync_fib) {
478 		if (aac_sync_fib(sc, ContainerCommand, 0, fib,
479 			 sizeof(struct aac_mntinfo))) {
480 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
481 			return (-1);
482 		}
483 	} else {
484 		cm->cm_timestamp = time_uptime;
485 		cm->cm_datalen = 0;
486 
487 		fib->Header.Size =
488 			sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo);
489 		fib->Header.XferState =
490 			AAC_FIBSTATE_HOSTOWNED   |
491 			AAC_FIBSTATE_INITIALISED |
492 			AAC_FIBSTATE_EMPTY	 |
493 			AAC_FIBSTATE_FROMHOST	 |
494 			AAC_FIBSTATE_REXPECTED   |
495 			AAC_FIBSTATE_NORM	 |
496 			AAC_FIBSTATE_ASYNC	 |
497 			AAC_FIBSTATE_FAST_RESPONSE;
498 		fib->Header.Command = ContainerCommand;
499 		if (aacraid_wait_command(cm) != 0) {
500 			device_printf(sc->aac_dev, "Error probing container %d\n", cid);
501 			aacraid_release_command(cm);
502 			return (-1);
503 		}
504 	}
505 	bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp));
506 
507 	/* UID */
508 	*uid = cid;
509 	if (mir->MntTable[0].VolType != CT_NONE &&
510 		!(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) {
511 		if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) {
512 			mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200;
513 			mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0;
514 		}
515 		ccfg = (struct aac_cnt_config *)&fib->data[0];
516 		bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
517 		ccfg->Command = VM_ContainerConfig;
518 		ccfg->CTCommand.command = CT_CID_TO_32BITS_UID;
519 		ccfg->CTCommand.param[0] = cid;
520 
521 		if (sync_fib) {
522 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
523 				sizeof(struct aac_cnt_config));
524 			if (rval == 0 && ccfg->Command == ST_OK &&
525 				ccfg->CTCommand.param[0] == CT_OK &&
526 				mir->MntTable[0].VolType != CT_PASSTHRU)
527 				*uid = ccfg->CTCommand.param[1];
528 		} else {
529 			fib->Header.Size =
530 				sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config);
531 			fib->Header.XferState =
532 				AAC_FIBSTATE_HOSTOWNED   |
533 				AAC_FIBSTATE_INITIALISED |
534 				AAC_FIBSTATE_EMPTY	 |
535 				AAC_FIBSTATE_FROMHOST	 |
536 				AAC_FIBSTATE_REXPECTED   |
537 				AAC_FIBSTATE_NORM	 |
538 				AAC_FIBSTATE_ASYNC	 |
539 				AAC_FIBSTATE_FAST_RESPONSE;
540 			fib->Header.Command = ContainerCommand;
541 			rval = aacraid_wait_command(cm);
542 			if (rval == 0 && ccfg->Command == ST_OK &&
543 				ccfg->CTCommand.param[0] == CT_OK &&
544 				mir->MntTable[0].VolType != CT_PASSTHRU)
545 				*uid = ccfg->CTCommand.param[1];
546 			aacraid_release_command(cm);
547 		}
548 	}
549 
550 	return (0);
551 }
552 
553 /*
554  * Create a device to represent a new container
555  */
556 static void
557 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f,
558 		  u_int32_t uid)
559 {
560 	struct aac_container *co;
561 
562 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
563 
564 	/*
565 	 * Check container volume type for validity.  Note that many of
566 	 * the possible types may never show up.
567 	 */
568 	if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) {
569 		co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF,
570 		       M_NOWAIT | M_ZERO);
571 		if (co == NULL) {
572 			panic("Out of memory?!");
573 		}
574 
575 		co->co_found = f;
576 		bcopy(&mir->MntTable[0], &co->co_mntobj,
577 		      sizeof(struct aac_mntobj));
578 		co->co_uid = uid;
579 		TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link);
580 	}
581 }
582 
583 /*
584  * Allocate resources associated with (sc)
585  */
586 static int
587 aac_alloc(struct aac_softc *sc)
588 {
589 	bus_size_t maxsize;
590 
591 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
592 
593 	/*
594 	 * Create DMA tag for mapping buffers into controller-addressable space.
595 	 */
596 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
597 			       1, 0, 			/* algnmnt, boundary */
598 			       (sc->flags & AAC_FLAGS_SG_64BIT) ?
599 			       BUS_SPACE_MAXADDR :
600 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
601 			       BUS_SPACE_MAXADDR, 	/* highaddr */
602 			       NULL, NULL, 		/* filter, filterarg */
603 			       sc->aac_max_sectors << 9, /* maxsize */
604 			       sc->aac_sg_tablesize,	/* nsegments */
605 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
606 			       BUS_DMA_ALLOCNOW,	/* flags */
607 			       busdma_lock_mutex,	/* lockfunc */
608 			       &sc->aac_io_lock,	/* lockfuncarg */
609 			       &sc->aac_buffer_dmat)) {
610 		device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n");
611 		return (ENOMEM);
612 	}
613 
614 	/*
615 	 * Create DMA tag for mapping FIBs into controller-addressable space..
616 	 */
617 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
618 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size +
619 			sizeof(struct aac_fib_xporthdr) + 31);
620 	else
621 		maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31);
622 	if (bus_dma_tag_create(sc->aac_parent_dmat,	/* parent */
623 			       1, 0, 			/* algnmnt, boundary */
624 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
625 			       BUS_SPACE_MAXADDR_32BIT :
626 			       0x7fffffff,		/* lowaddr */
627 			       BUS_SPACE_MAXADDR, 	/* highaddr */
628 			       NULL, NULL, 		/* filter, filterarg */
629 			       maxsize,  		/* maxsize */
630 			       1,			/* nsegments */
631 			       maxsize,			/* maxsize */
632 			       0,			/* flags */
633 			       NULL, NULL,		/* No locking needed */
634 			       &sc->aac_fib_dmat)) {
635 		device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n");
636 		return (ENOMEM);
637 	}
638 
639 	/*
640 	 * Create DMA tag for the common structure and allocate it.
641 	 */
642 	maxsize = sizeof(struct aac_common);
643 	maxsize += sc->aac_max_fibs * sizeof(u_int32_t);
644 	if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
645 			       1, 0,			/* algnmnt, boundary */
646 			       (sc->flags & AAC_FLAGS_4GB_WINDOW) ?
647 			       BUS_SPACE_MAXADDR_32BIT :
648 			       0x7fffffff,		/* lowaddr */
649 			       BUS_SPACE_MAXADDR, 	/* highaddr */
650 			       NULL, NULL, 		/* filter, filterarg */
651 			       maxsize, 		/* maxsize */
652 			       1,			/* nsegments */
653 			       maxsize,			/* maxsegsize */
654 			       0,			/* flags */
655 			       NULL, NULL,		/* No locking needed */
656 			       &sc->aac_common_dmat)) {
657 		device_printf(sc->aac_dev,
658 			      "can't allocate common structure DMA tag\n");
659 		return (ENOMEM);
660 	}
661 	if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common,
662 			     BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) {
663 		device_printf(sc->aac_dev, "can't allocate common structure\n");
664 		return (ENOMEM);
665 	}
666 
667 	(void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap,
668 			sc->aac_common, maxsize,
669 			aac_common_map, sc, 0);
670 	bzero(sc->aac_common, maxsize);
671 
672 	/* Allocate some FIBs and associated command structs */
673 	TAILQ_INIT(&sc->aac_fibmap_tqh);
674 	sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command),
675 				  M_AACRAIDBUF, M_WAITOK|M_ZERO);
676 	mtx_lock(&sc->aac_io_lock);
677 	while (sc->total_fibs < sc->aac_max_fibs) {
678 		if (aac_alloc_commands(sc) != 0)
679 			break;
680 	}
681 	mtx_unlock(&sc->aac_io_lock);
682 	if (sc->total_fibs == 0)
683 		return (ENOMEM);
684 
685 	return (0);
686 }
687 
688 /*
689  * Free all of the resources associated with (sc)
690  *
691  * Should not be called if the controller is active.
692  */
693 void
694 aacraid_free(struct aac_softc *sc)
695 {
696 	int i;
697 
698 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
699 
700 	/* remove the control device */
701 	if (sc->aac_dev_t != NULL)
702 		destroy_dev(sc->aac_dev_t);
703 
704 	/* throw away any FIB buffers, discard the FIB DMA tag */
705 	aac_free_commands(sc);
706 	if (sc->aac_fib_dmat)
707 		bus_dma_tag_destroy(sc->aac_fib_dmat);
708 
709 	free(sc->aac_commands, M_AACRAIDBUF);
710 
711 	/* destroy the common area */
712 	if (sc->aac_common) {
713 		bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap);
714 		bus_dmamem_free(sc->aac_common_dmat, sc->aac_common,
715 				sc->aac_common_dmamap);
716 	}
717 	if (sc->aac_common_dmat)
718 		bus_dma_tag_destroy(sc->aac_common_dmat);
719 
720 	/* disconnect the interrupt handler */
721 	for (i = 0; i < AAC_MAX_MSIX; ++i) {
722 		if (sc->aac_intr[i])
723 			bus_teardown_intr(sc->aac_dev,
724 				sc->aac_irq[i], sc->aac_intr[i]);
725 		if (sc->aac_irq[i])
726 			bus_release_resource(sc->aac_dev, SYS_RES_IRQ,
727 				sc->aac_irq_rid[i], sc->aac_irq[i]);
728 		else
729 			break;
730 	}
731 	if (sc->msi_enabled)
732 		pci_release_msi(sc->aac_dev);
733 
734 	/* destroy data-transfer DMA tag */
735 	if (sc->aac_buffer_dmat)
736 		bus_dma_tag_destroy(sc->aac_buffer_dmat);
737 
738 	/* destroy the parent DMA tag */
739 	if (sc->aac_parent_dmat)
740 		bus_dma_tag_destroy(sc->aac_parent_dmat);
741 
742 	/* release the register window mapping */
743 	if (sc->aac_regs_res0 != NULL)
744 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
745 				     sc->aac_regs_rid0, sc->aac_regs_res0);
746 	if (sc->aac_regs_res1 != NULL)
747 		bus_release_resource(sc->aac_dev, SYS_RES_MEMORY,
748 				     sc->aac_regs_rid1, sc->aac_regs_res1);
749 }
750 
751 /*
752  * Disconnect from the controller completely, in preparation for unload.
753  */
754 int
755 aacraid_detach(device_t dev)
756 {
757 	struct aac_softc *sc;
758 	struct aac_container *co;
759 	struct aac_sim	*sim;
760 	int error;
761 
762 	sc = device_get_softc(dev);
763 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
764 
765 #if __FreeBSD_version >= 800000
766 	callout_drain(&sc->aac_daemontime);
767 #else
768 	untimeout(aac_daemon, (void *)sc, sc->timeout_id);
769 #endif
770 	/* Remove the child containers */
771 	while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
772 		TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
773 		free(co, M_AACRAIDBUF);
774 	}
775 
776 	/* Remove the CAM SIMs */
777 	while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
778 		TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
779 		error = device_delete_child(dev, sim->sim_dev);
780 		if (error)
781 			return (error);
782 		free(sim, M_AACRAIDBUF);
783 	}
784 
785 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING) {
786 		sc->aifflags |= AAC_AIFFLAGS_EXIT;
787 		wakeup(sc->aifthread);
788 		tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz);
789 	}
790 
791 	if (sc->aifflags & AAC_AIFFLAGS_RUNNING)
792 		panic("Cannot shutdown AIF thread");
793 
794 	if ((error = aacraid_shutdown(dev)))
795 		return(error);
796 
797 	EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh);
798 
799 	aacraid_free(sc);
800 
801 	mtx_destroy(&sc->aac_io_lock);
802 
803 	return(0);
804 }
805 
806 /*
807  * Bring the controller down to a dormant state and detach all child devices.
808  *
809  * This function is called before detach or system shutdown.
810  *
811  * Note that we can assume that the bioq on the controller is empty, as we won't
812  * allow shutdown if any device is open.
813  */
814 int
815 aacraid_shutdown(device_t dev)
816 {
817 	struct aac_softc *sc;
818 	struct aac_fib *fib;
819 	struct aac_close_command *cc;
820 
821 	sc = device_get_softc(dev);
822 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
823 
824 	sc->aac_state |= AAC_STATE_SUSPEND;
825 
826 	/*
827 	 * Send a Container shutdown followed by a HostShutdown FIB to the
828 	 * controller to convince it that we don't want to talk to it anymore.
829 	 * We've been closed and all I/O completed already
830 	 */
831 	device_printf(sc->aac_dev, "shutting down controller...");
832 
833 	mtx_lock(&sc->aac_io_lock);
834 	aac_alloc_sync_fib(sc, &fib);
835 	cc = (struct aac_close_command *)&fib->data[0];
836 
837 	bzero(cc, sizeof(struct aac_close_command));
838 	cc->Command = VM_CloseAll;
839 	cc->ContainerId = 0xfffffffe;
840 	if (aac_sync_fib(sc, ContainerCommand, 0, fib,
841 	    sizeof(struct aac_close_command)))
842 		printf("FAILED.\n");
843 	else
844 		printf("done\n");
845 
846 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
847 	aac_release_sync_fib(sc);
848 	mtx_unlock(&sc->aac_io_lock);
849 
850 	return(0);
851 }
852 
853 /*
854  * Bring the controller to a quiescent state, ready for system suspend.
855  */
856 int
857 aacraid_suspend(device_t dev)
858 {
859 	struct aac_softc *sc;
860 
861 	sc = device_get_softc(dev);
862 
863 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
864 	sc->aac_state |= AAC_STATE_SUSPEND;
865 
866 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
867 	return(0);
868 }
869 
870 /*
871  * Bring the controller back to a state ready for operation.
872  */
873 int
874 aacraid_resume(device_t dev)
875 {
876 	struct aac_softc *sc;
877 
878 	sc = device_get_softc(dev);
879 
880 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
881 	sc->aac_state &= ~AAC_STATE_SUSPEND;
882 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
883 	return(0);
884 }
885 
886 /*
887  * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface.
888  */
889 void
890 aacraid_new_intr_type1(void *arg)
891 {
892 	struct aac_msix_ctx *ctx;
893 	struct aac_softc *sc;
894 	int vector_no;
895 	struct aac_command *cm;
896 	struct aac_fib *fib;
897 	u_int32_t bellbits, bellbits_shifted, index, handle;
898 	int isFastResponse, isAif, noMoreAif, mode;
899 
900 	ctx = (struct aac_msix_ctx *)arg;
901 	sc = ctx->sc;
902 	vector_no = ctx->vector_no;
903 
904 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
905 	mtx_lock(&sc->aac_io_lock);
906 
907 	if (sc->msi_enabled) {
908 		mode = AAC_INT_MODE_MSI;
909 		if (vector_no == 0) {
910 			bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
911 			if (bellbits & 0x40000)
912 				mode |= AAC_INT_MODE_AIF;
913 			else if (bellbits & 0x1000)
914 				mode |= AAC_INT_MODE_SYNC;
915 		}
916 	} else {
917 		mode = AAC_INT_MODE_INTX;
918 		bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
919 		if (bellbits & AAC_DB_RESPONSE_SENT_NS) {
920 			bellbits = AAC_DB_RESPONSE_SENT_NS;
921 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
922 		} else {
923 			bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT);
924 			AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits);
925 			if (bellbits_shifted & AAC_DB_AIF_PENDING)
926 				mode |= AAC_INT_MODE_AIF;
927 			else if (bellbits_shifted & AAC_DB_SYNC_COMMAND)
928 				mode |= AAC_INT_MODE_SYNC;
929 		}
930 		/* ODR readback, Prep #238630 */
931 		AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R);
932 	}
933 
934 	if (mode & AAC_INT_MODE_SYNC) {
935 		if (sc->aac_sync_cm) {
936 			cm = sc->aac_sync_cm;
937 			cm->cm_flags |= AAC_CMD_COMPLETED;
938 			/* is there a completion handler? */
939 			if (cm->cm_complete != NULL) {
940 				cm->cm_complete(cm);
941 			} else {
942 				/* assume that someone is sleeping on this command */
943 				wakeup(cm);
944 			}
945 			sc->flags &= ~AAC_QUEUE_FRZN;
946 			sc->aac_sync_cm = NULL;
947 		}
948 		mode = 0;
949 	}
950 
951 	if (mode & AAC_INT_MODE_AIF) {
952 		if (mode & AAC_INT_MODE_INTX) {
953 			aac_request_aif(sc);
954 			mode = 0;
955 		}
956 	}
957 
958 	if (mode) {
959 		/* handle async. status */
960 		index = sc->aac_host_rrq_idx[vector_no];
961 		for (;;) {
962 			isFastResponse = isAif = noMoreAif = 0;
963 			/* remove toggle bit (31) */
964 			handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff);
965 			/* check fast response bit (30) */
966 			if (handle & 0x40000000)
967 				isFastResponse = 1;
968 			/* check AIF bit (23) */
969 			else if (handle & 0x00800000)
970 				isAif = TRUE;
971 			handle &= 0x0000ffff;
972 			if (handle == 0)
973 				break;
974 
975 			cm = sc->aac_commands + (handle - 1);
976 			fib = cm->cm_fib;
977 			sc->aac_rrq_outstanding[vector_no]--;
978 			if (isAif) {
979 				noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0;
980 				if (!noMoreAif)
981 					aac_handle_aif(sc, fib);
982 				aac_remove_busy(cm);
983 				aacraid_release_command(cm);
984 			} else {
985 				if (isFastResponse) {
986 					fib->Header.XferState |= AAC_FIBSTATE_DONEADAP;
987 					*((u_int32_t *)(fib->data)) = ST_OK;
988 					cm->cm_flags |= AAC_CMD_FASTRESP;
989 				}
990 				aac_remove_busy(cm);
991 				aac_unmap_command(cm);
992 				cm->cm_flags |= AAC_CMD_COMPLETED;
993 
994 				/* is there a completion handler? */
995 				if (cm->cm_complete != NULL) {
996 					cm->cm_complete(cm);
997 				} else {
998 					/* assume that someone is sleeping on this command */
999 					wakeup(cm);
1000 				}
1001 				sc->flags &= ~AAC_QUEUE_FRZN;
1002 			}
1003 
1004 			sc->aac_common->ac_host_rrq[index++] = 0;
1005 			if (index == (vector_no + 1) * sc->aac_vector_cap)
1006 				index = vector_no * sc->aac_vector_cap;
1007 			sc->aac_host_rrq_idx[vector_no] = index;
1008 
1009 			if ((isAif && !noMoreAif) || sc->aif_pending)
1010 				aac_request_aif(sc);
1011 		}
1012 	}
1013 
1014 	if (mode & AAC_INT_MODE_AIF) {
1015 		aac_request_aif(sc);
1016 		AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT);
1017 		mode = 0;
1018 	}
1019 
1020 	/* see if we can start some more I/O */
1021 	if ((sc->flags & AAC_QUEUE_FRZN) == 0)
1022 		aacraid_startio(sc);
1023 	mtx_unlock(&sc->aac_io_lock);
1024 }
1025 
1026 /*
1027  * Handle notification of one or more FIBs coming from the controller.
1028  */
1029 static void
1030 aac_command_thread(struct aac_softc *sc)
1031 {
1032 	int retval;
1033 
1034 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1035 
1036 	mtx_lock(&sc->aac_io_lock);
1037 	sc->aifflags = AAC_AIFFLAGS_RUNNING;
1038 
1039 	while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) {
1040 
1041 		retval = 0;
1042 		if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0)
1043 			retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO,
1044 					"aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz);
1045 
1046 		/*
1047 		 * First see if any FIBs need to be allocated.  This needs
1048 		 * to be called without the driver lock because contigmalloc
1049 		 * will grab Giant, and would result in an LOR.
1050 		 */
1051 		if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) {
1052 			aac_alloc_commands(sc);
1053 			sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS;
1054 			aacraid_startio(sc);
1055 		}
1056 
1057 		/*
1058 		 * While we're here, check to see if any commands are stuck.
1059 		 * This is pretty low-priority, so it's ok if it doesn't
1060 		 * always fire.
1061 		 */
1062 		if (retval == EWOULDBLOCK)
1063 			aac_timeout(sc);
1064 
1065 		/* Check the hardware printf message buffer */
1066 		if (sc->aac_common->ac_printf[0] != 0)
1067 			aac_print_printf(sc);
1068 	}
1069 	sc->aifflags &= ~AAC_AIFFLAGS_RUNNING;
1070 	mtx_unlock(&sc->aac_io_lock);
1071 	wakeup(sc->aac_dev);
1072 
1073 	aac_kthread_exit(0);
1074 }
1075 
1076 /*
1077  * Submit a command to the controller, return when it completes.
1078  * XXX This is very dangerous!  If the card has gone out to lunch, we could
1079  *     be stuck here forever.  At the same time, signals are not caught
1080  *     because there is a risk that a signal could wakeup the sleep before
1081  *     the card has a chance to complete the command.  Since there is no way
1082  *     to cancel a command that is in progress, we can't protect against the
1083  *     card completing a command late and spamming the command and data
1084  *     memory.  So, we are held hostage until the command completes.
1085  */
1086 int
1087 aacraid_wait_command(struct aac_command *cm)
1088 {
1089 	struct aac_softc *sc;
1090 	int error;
1091 
1092 	sc = cm->cm_sc;
1093 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1094 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1095 
1096 	/* Put the command on the ready queue and get things going */
1097 	aac_enqueue_ready(cm);
1098 	aacraid_startio(sc);
1099 	error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0);
1100 	return(error);
1101 }
1102 
1103 /*
1104  *Command Buffer Management
1105  */
1106 
1107 /*
1108  * Allocate a command.
1109  */
1110 int
1111 aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp)
1112 {
1113 	struct aac_command *cm;
1114 
1115 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1116 
1117 	if ((cm = aac_dequeue_free(sc)) == NULL) {
1118 		if (sc->total_fibs < sc->aac_max_fibs) {
1119 			sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS;
1120 			wakeup(sc->aifthread);
1121 		}
1122 		return (EBUSY);
1123 	}
1124 
1125 	*cmp = cm;
1126 	return(0);
1127 }
1128 
1129 /*
1130  * Release a command back to the freelist.
1131  */
1132 void
1133 aacraid_release_command(struct aac_command *cm)
1134 {
1135 	struct aac_event *event;
1136 	struct aac_softc *sc;
1137 
1138 	sc = cm->cm_sc;
1139 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1140 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1141 
1142 	/* (re)initialize the command/FIB */
1143 	cm->cm_sgtable = NULL;
1144 	cm->cm_flags = 0;
1145 	cm->cm_complete = NULL;
1146 	cm->cm_ccb = NULL;
1147 	cm->cm_passthr_dmat = 0;
1148 	cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY;
1149 	cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB;
1150 	cm->cm_fib->Header.Unused = 0;
1151 	cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size;
1152 
1153 	/*
1154 	 * These are duplicated in aac_start to cover the case where an
1155 	 * intermediate stage may have destroyed them.  They're left
1156 	 * initialized here for debugging purposes only.
1157 	 */
1158 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1159 	cm->cm_fib->Header.Handle = 0;
1160 
1161 	aac_enqueue_free(cm);
1162 
1163 	/*
1164 	 * Dequeue all events so that there's no risk of events getting
1165 	 * stranded.
1166 	 */
1167 	while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) {
1168 		TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links);
1169 		event->ev_callback(sc, event, event->ev_arg);
1170 	}
1171 }
1172 
1173 /*
1174  * Map helper for command/FIB allocation.
1175  */
1176 static void
1177 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1178 {
1179 	uint64_t	*fibphys;
1180 
1181 	fibphys = (uint64_t *)arg;
1182 
1183 	*fibphys = segs[0].ds_addr;
1184 }
1185 
1186 /*
1187  * Allocate and initialize commands/FIBs for this adapter.
1188  */
1189 static int
1190 aac_alloc_commands(struct aac_softc *sc)
1191 {
1192 	struct aac_command *cm;
1193 	struct aac_fibmap *fm;
1194 	uint64_t fibphys;
1195 	int i, error;
1196 	u_int32_t maxsize;
1197 
1198 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1199 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1200 
1201 	if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs)
1202 		return (ENOMEM);
1203 
1204 	fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1205 	if (fm == NULL)
1206 		return (ENOMEM);
1207 
1208 	mtx_unlock(&sc->aac_io_lock);
1209 	/* allocate the FIBs in DMAable memory and load them */
1210 	if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs,
1211 			     BUS_DMA_NOWAIT, &fm->aac_fibmap)) {
1212 		device_printf(sc->aac_dev,
1213 			      "Not enough contiguous memory available.\n");
1214 		free(fm, M_AACRAIDBUF);
1215 		mtx_lock(&sc->aac_io_lock);
1216 		return (ENOMEM);
1217 	}
1218 
1219 	maxsize = sc->aac_max_fib_size + 31;
1220 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1221 		maxsize += sizeof(struct aac_fib_xporthdr);
1222 	/* Ignore errors since this doesn't bounce */
1223 	(void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs,
1224 			      sc->aac_max_fibs_alloc * maxsize,
1225 			      aac_map_command_helper, &fibphys, 0);
1226 	mtx_lock(&sc->aac_io_lock);
1227 
1228 	/* initialize constant fields in the command structure */
1229 	bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize);
1230 	for (i = 0; i < sc->aac_max_fibs_alloc; i++) {
1231 		cm = sc->aac_commands + sc->total_fibs;
1232 		fm->aac_commands = cm;
1233 		cm->cm_sc = sc;
1234 		cm->cm_fib = (struct aac_fib *)
1235 			((u_int8_t *)fm->aac_fibs + i * maxsize);
1236 		cm->cm_fibphys = fibphys + i * maxsize;
1237 		if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1238 			u_int64_t fibphys_aligned;
1239 			fibphys_aligned =
1240 				(cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31;
1241 			cm->cm_fib = (struct aac_fib *)
1242 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1243 			cm->cm_fibphys = fibphys_aligned;
1244 		} else {
1245 			u_int64_t fibphys_aligned;
1246 			fibphys_aligned = (cm->cm_fibphys + 31) & ~31;
1247 			cm->cm_fib = (struct aac_fib *)
1248 				((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys));
1249 			cm->cm_fibphys = fibphys_aligned;
1250 		}
1251 		cm->cm_index = sc->total_fibs;
1252 
1253 		if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0,
1254 					       &cm->cm_datamap)) != 0)
1255 			break;
1256 		if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1)
1257 			aacraid_release_command(cm);
1258 		sc->total_fibs++;
1259 	}
1260 
1261 	if (i > 0) {
1262 		TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link);
1263 		fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs);
1264 		return (0);
1265 	}
1266 
1267 	bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1268 	bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1269 	free(fm, M_AACRAIDBUF);
1270 	return (ENOMEM);
1271 }
1272 
1273 /*
1274  * Free FIBs owned by this adapter.
1275  */
1276 static void
1277 aac_free_commands(struct aac_softc *sc)
1278 {
1279 	struct aac_fibmap *fm;
1280 	struct aac_command *cm;
1281 	int i;
1282 
1283 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1284 
1285 	while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) {
1286 
1287 		TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link);
1288 		/*
1289 		 * We check against total_fibs to handle partially
1290 		 * allocated blocks.
1291 		 */
1292 		for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) {
1293 			cm = fm->aac_commands + i;
1294 			bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap);
1295 		}
1296 		bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap);
1297 		bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap);
1298 		free(fm, M_AACRAIDBUF);
1299 	}
1300 }
1301 
1302 /*
1303  * Command-mapping helper function - populate this command's s/g table.
1304  */
1305 void
1306 aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1307 {
1308 	struct aac_softc *sc;
1309 	struct aac_command *cm;
1310 	struct aac_fib *fib;
1311 	int i;
1312 
1313 	cm = (struct aac_command *)arg;
1314 	sc = cm->cm_sc;
1315 	fib = cm->cm_fib;
1316 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg);
1317 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
1318 
1319 	/* copy into the FIB */
1320 	if (cm->cm_sgtable != NULL) {
1321 		if (fib->Header.Command == RawIo2) {
1322 			struct aac_raw_io2 *raw;
1323 			struct aac_sge_ieee1212 *sg;
1324 			u_int32_t min_size = PAGE_SIZE, cur_size;
1325 			int conformable = TRUE;
1326 
1327 			raw = (struct aac_raw_io2 *)&fib->data[0];
1328 			sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable;
1329 			raw->sgeCnt = nseg;
1330 
1331 			for (i = 0; i < nseg; i++) {
1332 				cur_size = segs[i].ds_len;
1333 				sg[i].addrHigh = 0;
1334 				*(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr;
1335 				sg[i].length = cur_size;
1336 				sg[i].flags = 0;
1337 				if (i == 0) {
1338 					raw->sgeFirstSize = cur_size;
1339 				} else if (i == 1) {
1340 					raw->sgeNominalSize = cur_size;
1341 					min_size = cur_size;
1342 				} else if ((i+1) < nseg &&
1343 					cur_size != raw->sgeNominalSize) {
1344 					conformable = FALSE;
1345 					if (cur_size < min_size)
1346 						min_size = cur_size;
1347 				}
1348 			}
1349 
1350 			/* not conformable: evaluate required sg elements */
1351 			if (!conformable) {
1352 				int j, err_found, nseg_new = nseg;
1353 				for (i = min_size / PAGE_SIZE; i >= 1; --i) {
1354 					err_found = FALSE;
1355 					nseg_new = 2;
1356 					for (j = 1; j < nseg - 1; ++j) {
1357 						if (sg[j].length % (i*PAGE_SIZE)) {
1358 							err_found = TRUE;
1359 							break;
1360 						}
1361 						nseg_new += (sg[j].length / (i*PAGE_SIZE));
1362 					}
1363 					if (!err_found)
1364 						break;
1365 				}
1366 				if (i>0 && nseg_new<=sc->aac_sg_tablesize &&
1367 					!(sc->hint_flags & 4))
1368 					nseg = aac_convert_sgraw2(sc,
1369 						raw, i, nseg, nseg_new);
1370 			} else {
1371 				raw->flags |= RIO2_SGL_CONFORMANT;
1372 			}
1373 
1374 			/* update the FIB size for the s/g count */
1375 			fib->Header.Size += nseg *
1376 				sizeof(struct aac_sge_ieee1212);
1377 
1378 		} else if (fib->Header.Command == RawIo) {
1379 			struct aac_sg_tableraw *sg;
1380 			sg = (struct aac_sg_tableraw *)cm->cm_sgtable;
1381 			sg->SgCount = nseg;
1382 			for (i = 0; i < nseg; i++) {
1383 				sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr;
1384 				sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len;
1385 				sg->SgEntryRaw[i].Next = 0;
1386 				sg->SgEntryRaw[i].Prev = 0;
1387 				sg->SgEntryRaw[i].Flags = 0;
1388 			}
1389 			/* update the FIB size for the s/g count */
1390 			fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw);
1391 		} else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) {
1392 			struct aac_sg_table *sg;
1393 			sg = cm->cm_sgtable;
1394 			sg->SgCount = nseg;
1395 			for (i = 0; i < nseg; i++) {
1396 				sg->SgEntry[i].SgAddress = segs[i].ds_addr;
1397 				sg->SgEntry[i].SgByteCount = segs[i].ds_len;
1398 			}
1399 			/* update the FIB size for the s/g count */
1400 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry);
1401 		} else {
1402 			struct aac_sg_table64 *sg;
1403 			sg = (struct aac_sg_table64 *)cm->cm_sgtable;
1404 			sg->SgCount = nseg;
1405 			for (i = 0; i < nseg; i++) {
1406 				sg->SgEntry64[i].SgAddress = segs[i].ds_addr;
1407 				sg->SgEntry64[i].SgByteCount = segs[i].ds_len;
1408 			}
1409 			/* update the FIB size for the s/g count */
1410 			fib->Header.Size += nseg*sizeof(struct aac_sg_entry64);
1411 		}
1412 	}
1413 
1414 	/* Fix up the address values in the FIB.  Use the command array index
1415 	 * instead of a pointer since these fields are only 32 bits.  Shift
1416 	 * the SenderFibAddress over to make room for the fast response bit
1417 	 * and for the AIF bit
1418 	 */
1419 	cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2);
1420 	cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys;
1421 
1422 	/* save a pointer to the command for speedy reverse-lookup */
1423 	cm->cm_fib->Header.Handle += cm->cm_index + 1;
1424 
1425 	if (cm->cm_passthr_dmat == 0) {
1426 		if (cm->cm_flags & AAC_CMD_DATAIN)
1427 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1428 							BUS_DMASYNC_PREREAD);
1429 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1430 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1431 							BUS_DMASYNC_PREWRITE);
1432 	}
1433 
1434 	cm->cm_flags |= AAC_CMD_MAPPED;
1435 
1436 	if (sc->flags & AAC_FLAGS_SYNC_MODE) {
1437 		u_int32_t wait = 0;
1438 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL);
1439 	} else if (cm->cm_flags & AAC_CMD_WAIT) {
1440 		aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL);
1441 	} else {
1442 		int count = 10000000L;
1443 		while (AAC_SEND_COMMAND(sc, cm) != 0) {
1444 			if (--count == 0) {
1445 				aac_unmap_command(cm);
1446 				sc->flags |= AAC_QUEUE_FRZN;
1447 				aac_requeue_ready(cm);
1448 			}
1449 			DELAY(5);			/* wait 5 usec. */
1450 		}
1451 	}
1452 }
1453 
1454 
1455 static int
1456 aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw,
1457 				   int pages, int nseg, int nseg_new)
1458 {
1459 	struct aac_sge_ieee1212 *sge;
1460 	int i, j, pos;
1461 	u_int32_t addr_low;
1462 
1463 	sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212),
1464 		M_AACRAIDBUF, M_NOWAIT|M_ZERO);
1465 	if (sge == NULL)
1466 		return nseg;
1467 
1468 	for (i = 1, pos = 1; i < nseg - 1; ++i) {
1469 		for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) {
1470 			addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE;
1471 			sge[pos].addrLow = addr_low;
1472 			sge[pos].addrHigh = raw->sge[i].addrHigh;
1473 			if (addr_low < raw->sge[i].addrLow)
1474 				sge[pos].addrHigh++;
1475 			sge[pos].length = pages * PAGE_SIZE;
1476 			sge[pos].flags = 0;
1477 			pos++;
1478 		}
1479 	}
1480 	sge[pos] = raw->sge[nseg-1];
1481 	for (i = 1; i < nseg_new; ++i)
1482 		raw->sge[i] = sge[i];
1483 
1484 	free(sge, M_AACRAIDBUF);
1485 	raw->sgeCnt = nseg_new;
1486 	raw->flags |= RIO2_SGL_CONFORMANT;
1487 	raw->sgeNominalSize = pages * PAGE_SIZE;
1488 	return nseg_new;
1489 }
1490 
1491 
1492 /*
1493  * Unmap a command from controller-visible space.
1494  */
1495 static void
1496 aac_unmap_command(struct aac_command *cm)
1497 {
1498 	struct aac_softc *sc;
1499 
1500 	sc = cm->cm_sc;
1501 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1502 
1503 	if (!(cm->cm_flags & AAC_CMD_MAPPED))
1504 		return;
1505 
1506 	if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) {
1507 		if (cm->cm_flags & AAC_CMD_DATAIN)
1508 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1509 					BUS_DMASYNC_POSTREAD);
1510 		if (cm->cm_flags & AAC_CMD_DATAOUT)
1511 			bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap,
1512 					BUS_DMASYNC_POSTWRITE);
1513 
1514 		bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap);
1515 	}
1516 	cm->cm_flags &= ~AAC_CMD_MAPPED;
1517 }
1518 
1519 /*
1520  * Hardware Interface
1521  */
1522 
1523 /*
1524  * Initialize the adapter.
1525  */
1526 static void
1527 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1528 {
1529 	struct aac_softc *sc;
1530 
1531 	sc = (struct aac_softc *)arg;
1532 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1533 
1534 	sc->aac_common_busaddr = segs[0].ds_addr;
1535 }
1536 
1537 static int
1538 aac_check_firmware(struct aac_softc *sc)
1539 {
1540 	u_int32_t code, major, minor, maxsize;
1541 	u_int32_t options = 0, atu_size = 0, status, waitCount;
1542 	time_t then;
1543 
1544 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1545 
1546 	/* check if flash update is running */
1547 	if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) {
1548 		then = time_uptime;
1549 		do {
1550 			code = AAC_GET_FWSTATUS(sc);
1551 			if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) {
1552 				device_printf(sc->aac_dev,
1553 						  "FATAL: controller not coming ready, "
1554 						   "status %x\n", code);
1555 				return(ENXIO);
1556 			}
1557 		} while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED));
1558 		/*
1559 		 * Delay 10 seconds. Because right now FW is doing a soft reset,
1560 		 * do not read scratch pad register at this time
1561 		 */
1562 		waitCount = 10 * 10000;
1563 		while (waitCount) {
1564 			DELAY(100);		/* delay 100 microseconds */
1565 			waitCount--;
1566 		}
1567 	}
1568 
1569 	/*
1570 	 * Wait for the adapter to come ready.
1571 	 */
1572 	then = time_uptime;
1573 	do {
1574 		code = AAC_GET_FWSTATUS(sc);
1575 		if (time_uptime > (then + AAC_BOOT_TIMEOUT)) {
1576 			device_printf(sc->aac_dev,
1577 				      "FATAL: controller not coming ready, "
1578 					   "status %x\n", code);
1579 			return(ENXIO);
1580 		}
1581 	} while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff);
1582 
1583 	/*
1584 	 * Retrieve the firmware version numbers.  Dell PERC2/QC cards with
1585 	 * firmware version 1.x are not compatible with this driver.
1586 	 */
1587 	if (sc->flags & AAC_FLAGS_PERC2QC) {
1588 		if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0,
1589 				     NULL, NULL)) {
1590 			device_printf(sc->aac_dev,
1591 				      "Error reading firmware version\n");
1592 			return (EIO);
1593 		}
1594 
1595 		/* These numbers are stored as ASCII! */
1596 		major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30;
1597 		minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30;
1598 		if (major == 1) {
1599 			device_printf(sc->aac_dev,
1600 			    "Firmware version %d.%d is not supported.\n",
1601 			    major, minor);
1602 			return (EINVAL);
1603 		}
1604 	}
1605 	/*
1606 	 * Retrieve the capabilities/supported options word so we know what
1607 	 * work-arounds to enable.  Some firmware revs don't support this
1608 	 * command.
1609 	 */
1610 	if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) {
1611 		if (status != AAC_SRB_STS_INVALID_REQUEST) {
1612 			device_printf(sc->aac_dev,
1613 			     "RequestAdapterInfo failed\n");
1614 			return (EIO);
1615 		}
1616 	} else {
1617 		options = AAC_GET_MAILBOX(sc, 1);
1618 		atu_size = AAC_GET_MAILBOX(sc, 2);
1619 		sc->supported_options = options;
1620 
1621 		if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
1622 		    (sc->flags & AAC_FLAGS_NO4GB) == 0)
1623 			sc->flags |= AAC_FLAGS_4GB_WINDOW;
1624 		if (options & AAC_SUPPORTED_NONDASD)
1625 			sc->flags |= AAC_FLAGS_ENABLE_CAM;
1626 		if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0
1627 			&& (sizeof(bus_addr_t) > 4)
1628 			&& (sc->hint_flags & 0x1)) {
1629 			device_printf(sc->aac_dev,
1630 			    "Enabling 64-bit address support\n");
1631 			sc->flags |= AAC_FLAGS_SG_64BIT;
1632 		}
1633 		if (sc->aac_if.aif_send_command) {
1634 			if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) ||
1635 				(options & AAC_SUPPORTED_NEW_COMM_TYPE4))
1636 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34;
1637 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1)
1638 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1;
1639 			else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2)
1640 				sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2;
1641 		}
1642 		if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE)
1643 			sc->flags |= AAC_FLAGS_ARRAY_64BIT;
1644 	}
1645 
1646 	if (!(sc->flags & AAC_FLAGS_NEW_COMM)) {
1647 		device_printf(sc->aac_dev, "Communication interface not supported!\n");
1648 		return (ENXIO);
1649 	}
1650 
1651 	if (sc->hint_flags & 2) {
1652 		device_printf(sc->aac_dev,
1653 			"Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n");
1654 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1655 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) {
1656 		device_printf(sc->aac_dev,
1657 			"Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n");
1658 		sc->flags |= AAC_FLAGS_SYNC_MODE;
1659 	}
1660 
1661 	/* Check for broken hardware that does a lower number of commands */
1662 	sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512);
1663 
1664 	/* Remap mem. resource, if required */
1665 	if (atu_size > rman_get_size(sc->aac_regs_res0)) {
1666 		bus_release_resource(
1667 			sc->aac_dev, SYS_RES_MEMORY,
1668 			sc->aac_regs_rid0, sc->aac_regs_res0);
1669 		sc->aac_regs_res0 = bus_alloc_resource_anywhere(
1670 			sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0,
1671 			atu_size, RF_ACTIVE);
1672 		if (sc->aac_regs_res0 == NULL) {
1673 			sc->aac_regs_res0 = bus_alloc_resource_any(
1674 				sc->aac_dev, SYS_RES_MEMORY,
1675 				&sc->aac_regs_rid0, RF_ACTIVE);
1676 			if (sc->aac_regs_res0 == NULL) {
1677 				device_printf(sc->aac_dev,
1678 					"couldn't allocate register window\n");
1679 				return (ENXIO);
1680 			}
1681 		}
1682 		sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0);
1683 		sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0);
1684 	}
1685 
1686 	/* Read preferred settings */
1687 	sc->aac_max_fib_size = sizeof(struct aac_fib);
1688 	sc->aac_max_sectors = 128;				/* 64KB */
1689 	sc->aac_max_aif = 1;
1690 	if (sc->flags & AAC_FLAGS_SG_64BIT)
1691 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1692 		 - sizeof(struct aac_blockwrite64))
1693 		 / sizeof(struct aac_sg_entry64);
1694 	else
1695 		sc->aac_sg_tablesize = (AAC_FIB_DATASIZE
1696 		 - sizeof(struct aac_blockwrite))
1697 		 / sizeof(struct aac_sg_entry);
1698 
1699 	if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) {
1700 		options = AAC_GET_MAILBOX(sc, 1);
1701 		sc->aac_max_fib_size = (options & 0xFFFF);
1702 		sc->aac_max_sectors = (options >> 16) << 1;
1703 		options = AAC_GET_MAILBOX(sc, 2);
1704 		sc->aac_sg_tablesize = (options >> 16);
1705 		options = AAC_GET_MAILBOX(sc, 3);
1706 		sc->aac_max_fibs = ((options >> 16) & 0xFFFF);
1707 		if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV)
1708 			sc->aac_max_fibs = (options & 0xFFFF);
1709 		options = AAC_GET_MAILBOX(sc, 4);
1710 		sc->aac_max_aif = (options & 0xFFFF);
1711 		options = AAC_GET_MAILBOX(sc, 5);
1712 		sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0;
1713 	}
1714 
1715 	maxsize = sc->aac_max_fib_size + 31;
1716 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1)
1717 		maxsize += sizeof(struct aac_fib_xporthdr);
1718 	if (maxsize > PAGE_SIZE) {
1719     	sc->aac_max_fib_size -= (maxsize - PAGE_SIZE);
1720 		maxsize = PAGE_SIZE;
1721 	}
1722 	sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize;
1723 
1724 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1725 		sc->flags |= AAC_FLAGS_RAW_IO;
1726 		device_printf(sc->aac_dev, "Enable Raw I/O\n");
1727 	}
1728 	if ((sc->flags & AAC_FLAGS_RAW_IO) &&
1729 	    (sc->flags & AAC_FLAGS_ARRAY_64BIT)) {
1730 		sc->flags |= AAC_FLAGS_LBA_64BIT;
1731 		device_printf(sc->aac_dev, "Enable 64-bit array\n");
1732 	}
1733 
1734 #ifdef AACRAID_DEBUG
1735 	aacraid_get_fw_debug_buffer(sc);
1736 #endif
1737 	return (0);
1738 }
1739 
1740 static int
1741 aac_init(struct aac_softc *sc)
1742 {
1743 	struct aac_adapter_init	*ip;
1744 	int i, error;
1745 
1746 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
1747 
1748 	/* reset rrq index */
1749 	sc->aac_fibs_pushed_no = 0;
1750 	for (i = 0; i < sc->aac_max_msix; i++)
1751 		sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap;
1752 
1753 	/*
1754 	 * Fill in the init structure.  This tells the adapter about the
1755 	 * physical location of various important shared data structures.
1756 	 */
1757 	ip = &sc->aac_common->ac_init;
1758 	ip->InitStructRevision = AAC_INIT_STRUCT_REVISION;
1759 	if (sc->aac_max_fib_size > sizeof(struct aac_fib)) {
1760 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4;
1761 		sc->flags |= AAC_FLAGS_RAW_IO;
1762 	}
1763 	ip->NoOfMSIXVectors = sc->aac_max_msix;
1764 
1765 	ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr +
1766 					 offsetof(struct aac_common, ac_fibs);
1767 	ip->AdapterFibsVirtualAddress = 0;
1768 	ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib);
1769 	ip->AdapterFibAlign = sizeof(struct aac_fib);
1770 
1771 	ip->PrintfBufferAddress = sc->aac_common_busaddr +
1772 				  offsetof(struct aac_common, ac_printf);
1773 	ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE;
1774 
1775 	/*
1776 	 * The adapter assumes that pages are 4K in size, except on some
1777  	 * broken firmware versions that do the page->byte conversion twice,
1778 	 * therefore 'assuming' that this value is in 16MB units (2^24).
1779 	 * Round up since the granularity is so high.
1780 	 */
1781 	ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE;
1782 	if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) {
1783 		ip->HostPhysMemPages =
1784 		    (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE;
1785 	}
1786 	ip->HostElapsedSeconds = time_uptime;	/* reset later if invalid */
1787 
1788 	ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED;
1789 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) {
1790 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6;
1791 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
1792 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1793 		device_printf(sc->aac_dev, "New comm. interface type1 enabled\n");
1794 	} else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
1795 		ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7;
1796 		ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
1797 			AAC_INITFLAGS_FAST_JBOD_SUPPORTED);
1798 		device_printf(sc->aac_dev, "New comm. interface type2 enabled\n");
1799 	}
1800 	ip->MaxNumAif = sc->aac_max_aif;
1801 	ip->HostRRQ_AddrLow =
1802 		sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq);
1803 	/* always 32-bit address */
1804 	ip->HostRRQ_AddrHigh = 0;
1805 
1806 	if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
1807 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM;
1808 		ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME;
1809 		device_printf(sc->aac_dev, "Power Management enabled\n");
1810 	}
1811 
1812 	ip->MaxIoCommands = sc->aac_max_fibs;
1813 	ip->MaxIoSize = sc->aac_max_sectors << 9;
1814 	ip->MaxFibSize = sc->aac_max_fib_size;
1815 
1816 	/*
1817 	 * Do controller-type-specific initialisation
1818 	 */
1819 	AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0);
1820 
1821 	/*
1822 	 * Give the init structure to the controller.
1823 	 */
1824 	if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT,
1825 			     sc->aac_common_busaddr +
1826 			     offsetof(struct aac_common, ac_init), 0, 0, 0,
1827 			     NULL, NULL)) {
1828 		device_printf(sc->aac_dev,
1829 			      "error establishing init structure\n");
1830 		error = EIO;
1831 		goto out;
1832 	}
1833 
1834 	/*
1835 	 * Check configuration issues
1836 	 */
1837 	if ((error = aac_check_config(sc)) != 0)
1838 		goto out;
1839 
1840 	error = 0;
1841 out:
1842 	return(error);
1843 }
1844 
1845 static void
1846 aac_define_int_mode(struct aac_softc *sc)
1847 {
1848 	device_t dev;
1849 	int cap, msi_count, error = 0;
1850 	uint32_t val;
1851 
1852 	dev = sc->aac_dev;
1853 
1854 	/* max. vectors from AAC_MONKER_GETCOMMPREF */
1855 	if (sc->aac_max_msix == 0) {
1856 		sc->aac_max_msix = 1;
1857 		sc->aac_vector_cap = sc->aac_max_fibs;
1858 		return;
1859 	}
1860 
1861 	/* OS capability */
1862 	msi_count = pci_msix_count(dev);
1863 	if (msi_count > AAC_MAX_MSIX)
1864 		msi_count = AAC_MAX_MSIX;
1865 	if (msi_count > sc->aac_max_msix)
1866 		msi_count = sc->aac_max_msix;
1867 	if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) {
1868 		device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; "
1869 				   "will try MSI\n", msi_count, error);
1870 		pci_release_msi(dev);
1871 	} else {
1872 		sc->msi_enabled = TRUE;
1873 		device_printf(dev, "using MSI-X interrupts (%u vectors)\n",
1874 			msi_count);
1875 	}
1876 
1877 	if (!sc->msi_enabled) {
1878 		msi_count = 1;
1879 		if ((error = pci_alloc_msi(dev, &msi_count)) != 0) {
1880 			device_printf(dev, "alloc msi failed - err=%d; "
1881 				           "will use INTx\n", error);
1882 			pci_release_msi(dev);
1883 		} else {
1884 			sc->msi_enabled = TRUE;
1885 			device_printf(dev, "using MSI interrupts\n");
1886 		}
1887 	}
1888 
1889 	if (sc->msi_enabled) {
1890 		/* now read controller capability from PCI config. space */
1891 		cap = aac_find_pci_capability(sc, PCIY_MSIX);
1892 		val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0);
1893 		if (!(val & AAC_PCI_MSI_ENABLE)) {
1894 			pci_release_msi(dev);
1895 			sc->msi_enabled = FALSE;
1896 		}
1897 	}
1898 
1899 	if (!sc->msi_enabled) {
1900 		device_printf(dev, "using legacy interrupts\n");
1901 		sc->aac_max_msix = 1;
1902 	} else {
1903 		AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
1904 		if (sc->aac_max_msix > msi_count)
1905 			sc->aac_max_msix = msi_count;
1906 	}
1907 	sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix;
1908 
1909 	fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d",
1910 		sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix);
1911 }
1912 
1913 static int
1914 aac_find_pci_capability(struct aac_softc *sc, int cap)
1915 {
1916 	device_t dev;
1917 	uint32_t status;
1918 	uint8_t ptr;
1919 
1920 	dev = sc->aac_dev;
1921 
1922 	status = pci_read_config(dev, PCIR_STATUS, 2);
1923 	if (!(status & PCIM_STATUS_CAPPRESENT))
1924 		return (0);
1925 
1926 	status = pci_read_config(dev, PCIR_HDRTYPE, 1);
1927 	switch (status & PCIM_HDRTYPE) {
1928 	case 0:
1929 	case 1:
1930 		ptr = PCIR_CAP_PTR;
1931 		break;
1932 	case 2:
1933 		ptr = PCIR_CAP_PTR_2;
1934 		break;
1935 	default:
1936 		return (0);
1937 		break;
1938 	}
1939 	ptr = pci_read_config(dev, ptr, 1);
1940 
1941 	while (ptr != 0) {
1942 		int next, val;
1943 		next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
1944 		val = pci_read_config(dev, ptr + PCICAP_ID, 1);
1945 		if (val == cap)
1946 			return (ptr);
1947 		ptr = next;
1948 	}
1949 
1950 	return (0);
1951 }
1952 
1953 static int
1954 aac_setup_intr(struct aac_softc *sc)
1955 {
1956 	int i, msi_count, rid;
1957 	struct resource *res;
1958 	void *tag;
1959 
1960 	msi_count = sc->aac_max_msix;
1961 	rid = (sc->msi_enabled ? 1:0);
1962 
1963 	for (i = 0; i < msi_count; i++, rid++) {
1964 		if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid,
1965 			RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1966 			device_printf(sc->aac_dev,"can't allocate interrupt\n");
1967 			return (EINVAL);
1968 		}
1969 		sc->aac_irq_rid[i] = rid;
1970 		sc->aac_irq[i] = res;
1971 		if (aac_bus_setup_intr(sc->aac_dev, res,
1972 			INTR_MPSAFE | INTR_TYPE_BIO, NULL,
1973 			aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) {
1974 			device_printf(sc->aac_dev, "can't set up interrupt\n");
1975 			return (EINVAL);
1976 		}
1977 		sc->aac_msix[i].vector_no = i;
1978 		sc->aac_msix[i].sc = sc;
1979 		sc->aac_intr[i] = tag;
1980 	}
1981 
1982 	return (0);
1983 }
1984 
1985 static int
1986 aac_check_config(struct aac_softc *sc)
1987 {
1988 	struct aac_fib *fib;
1989 	struct aac_cnt_config *ccfg;
1990 	struct aac_cf_status_hdr *cf_shdr;
1991 	int rval;
1992 
1993 	mtx_lock(&sc->aac_io_lock);
1994 	aac_alloc_sync_fib(sc, &fib);
1995 
1996 	ccfg = (struct aac_cnt_config *)&fib->data[0];
1997 	bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
1998 	ccfg->Command = VM_ContainerConfig;
1999 	ccfg->CTCommand.command = CT_GET_CONFIG_STATUS;
2000 	ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr);
2001 
2002 	rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2003 		sizeof (struct aac_cnt_config));
2004 	cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data;
2005 	if (rval == 0 && ccfg->Command == ST_OK &&
2006 		ccfg->CTCommand.param[0] == CT_OK) {
2007 		if (cf_shdr->action <= CFACT_PAUSE) {
2008 			bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE);
2009 			ccfg->Command = VM_ContainerConfig;
2010 			ccfg->CTCommand.command = CT_COMMIT_CONFIG;
2011 
2012 			rval = aac_sync_fib(sc, ContainerCommand, 0, fib,
2013 				sizeof (struct aac_cnt_config));
2014 			if (rval == 0 && ccfg->Command == ST_OK &&
2015 				ccfg->CTCommand.param[0] == CT_OK) {
2016 				/* successful completion */
2017 				rval = 0;
2018 			} else {
2019 				/* auto commit aborted due to error(s) */
2020 				rval = -2;
2021 			}
2022 		} else {
2023 			/* auto commit aborted due to adapter indicating
2024 			   config. issues too dangerous to auto commit  */
2025 			rval = -3;
2026 		}
2027 	} else {
2028 		/* error */
2029 		rval = -1;
2030 	}
2031 
2032 	aac_release_sync_fib(sc);
2033 	mtx_unlock(&sc->aac_io_lock);
2034 	return(rval);
2035 }
2036 
2037 /*
2038  * Send a synchronous command to the controller and wait for a result.
2039  * Indicate if the controller completed the command with an error status.
2040  */
2041 int
2042 aacraid_sync_command(struct aac_softc *sc, u_int32_t command,
2043 		 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3,
2044 		 u_int32_t *sp, u_int32_t *r1)
2045 {
2046 	time_t then;
2047 	u_int32_t status;
2048 
2049 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2050 
2051 	/* populate the mailbox */
2052 	AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3);
2053 
2054 	/* ensure the sync command doorbell flag is cleared */
2055 	if (!sc->msi_enabled)
2056 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2057 
2058 	/* then set it to signal the adapter */
2059 	AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND);
2060 
2061 	if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) {
2062 		/* spin waiting for the command to complete */
2063 		then = time_uptime;
2064 		do {
2065 			if (time_uptime > (then + AAC_SYNC_TIMEOUT)) {
2066 				fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out");
2067 				return(EIO);
2068 			}
2069 		} while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND));
2070 
2071 		/* clear the completion flag */
2072 		AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND);
2073 
2074 		/* get the command status */
2075 		status = AAC_GET_MAILBOX(sc, 0);
2076 		if (sp != NULL)
2077 			*sp = status;
2078 
2079 		/* return parameter */
2080 		if (r1 != NULL)
2081 			*r1 = AAC_GET_MAILBOX(sc, 1);
2082 
2083 		if (status != AAC_SRB_STS_SUCCESS)
2084 			return (-1);
2085 	}
2086 	return(0);
2087 }
2088 
2089 static int
2090 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate,
2091 		 struct aac_fib *fib, u_int16_t datasize)
2092 {
2093 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2094 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
2095 
2096 	if (datasize > AAC_FIB_DATASIZE)
2097 		return(EINVAL);
2098 
2099 	/*
2100 	 * Set up the sync FIB
2101 	 */
2102 	fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED |
2103 				AAC_FIBSTATE_INITIALISED |
2104 				AAC_FIBSTATE_EMPTY;
2105 	fib->Header.XferState |= xferstate;
2106 	fib->Header.Command = command;
2107 	fib->Header.StructType = AAC_FIBTYPE_TFIB;
2108 	fib->Header.Size = sizeof(struct aac_fib_header) + datasize;
2109 	fib->Header.SenderSize = sizeof(struct aac_fib);
2110 	fib->Header.SenderFibAddress = 0;	/* Not needed */
2111 	fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr +
2112 		offsetof(struct aac_common, ac_sync_fib);
2113 
2114 	/*
2115 	 * Give the FIB to the controller, wait for a response.
2116 	 */
2117 	if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB,
2118 		fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) {
2119 		fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error");
2120 		return(EIO);
2121 	}
2122 
2123 	return (0);
2124 }
2125 
2126 /*
2127  * Check for commands that have been outstanding for a suspiciously long time,
2128  * and complain about them.
2129  */
2130 static void
2131 aac_timeout(struct aac_softc *sc)
2132 {
2133 	struct aac_command *cm;
2134 	time_t deadline;
2135 	int timedout;
2136 
2137 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2138 	/*
2139 	 * Traverse the busy command list, bitch about late commands once
2140 	 * only.
2141 	 */
2142 	timedout = 0;
2143 	deadline = time_uptime - AAC_CMD_TIMEOUT;
2144 	TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) {
2145 		if (cm->cm_timestamp < deadline) {
2146 			device_printf(sc->aac_dev,
2147 				      "COMMAND %p TIMEOUT AFTER %d SECONDS\n",
2148 				      cm, (int)(time_uptime-cm->cm_timestamp));
2149 			AAC_PRINT_FIB(sc, cm->cm_fib);
2150 			timedout++;
2151 		}
2152 	}
2153 
2154 	if (timedout)
2155 		aac_reset_adapter(sc);
2156 	aacraid_print_queues(sc);
2157 }
2158 
2159 /*
2160  * Interface Function Vectors
2161  */
2162 
2163 /*
2164  * Read the current firmware status word.
2165  */
2166 static int
2167 aac_src_get_fwstatus(struct aac_softc *sc)
2168 {
2169 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2170 
2171 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR));
2172 }
2173 
2174 /*
2175  * Notify the controller of a change in a given queue
2176  */
2177 static void
2178 aac_src_qnotify(struct aac_softc *sc, int qbit)
2179 {
2180 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2181 
2182 	AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT);
2183 }
2184 
2185 /*
2186  * Get the interrupt reason bits
2187  */
2188 static int
2189 aac_src_get_istatus(struct aac_softc *sc)
2190 {
2191 	int val;
2192 
2193 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2194 
2195 	if (sc->msi_enabled) {
2196 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI);
2197 		if (val & AAC_MSI_SYNC_STATUS)
2198 			val = AAC_DB_SYNC_COMMAND;
2199 		else
2200 			val = 0;
2201 	} else {
2202 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT;
2203 	}
2204 	return(val);
2205 }
2206 
2207 /*
2208  * Clear some interrupt reason bits
2209  */
2210 static void
2211 aac_src_clear_istatus(struct aac_softc *sc, int mask)
2212 {
2213 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2214 
2215 	if (sc->msi_enabled) {
2216 		if (mask == AAC_DB_SYNC_COMMAND)
2217 			AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT);
2218 	} else {
2219 		AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT);
2220 	}
2221 }
2222 
2223 /*
2224  * Populate the mailbox and set the command word
2225  */
2226 static void
2227 aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2228 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2229 {
2230 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2231 
2232 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command);
2233 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0);
2234 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1);
2235 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2);
2236 	AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3);
2237 }
2238 
2239 static void
2240 aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0,
2241 		    u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
2242 {
2243 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2244 
2245 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command);
2246 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0);
2247 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1);
2248 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2);
2249 	AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3);
2250 }
2251 
2252 /*
2253  * Fetch the immediate command status word
2254  */
2255 static int
2256 aac_src_get_mailbox(struct aac_softc *sc, int mb)
2257 {
2258 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2259 
2260 	return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4)));
2261 }
2262 
2263 static int
2264 aac_srcv_get_mailbox(struct aac_softc *sc, int mb)
2265 {
2266 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2267 
2268 	return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4)));
2269 }
2270 
2271 /*
2272  * Set/clear interrupt masks
2273  */
2274 static void
2275 aac_src_access_devreg(struct aac_softc *sc, int mode)
2276 {
2277 	u_int32_t val;
2278 
2279 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2280 
2281 	switch (mode) {
2282 	case AAC_ENABLE_INTERRUPT:
2283 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2284 			(sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX :
2285 				           AAC_INT_ENABLE_TYPE1_INTX));
2286 		break;
2287 
2288 	case AAC_DISABLE_INTERRUPT:
2289 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL);
2290 		break;
2291 
2292 	case AAC_ENABLE_MSIX:
2293 		/* set bit 6 */
2294 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2295 		val |= 0x40;
2296 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2297 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2298 		/* unmask int. */
2299 		val = PMC_ALL_INTERRUPT_BITS;
2300 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2301 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2302 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2303 			val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
2304 		break;
2305 
2306 	case AAC_DISABLE_MSIX:
2307 		/* reset bit 6 */
2308 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2309 		val &= ~0x40;
2310 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2311 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2312 		break;
2313 
2314 	case AAC_CLEAR_AIF_BIT:
2315 		/* set bit 5 */
2316 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2317 		val |= 0x20;
2318 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2319 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2320 		break;
2321 
2322 	case AAC_CLEAR_SYNC_BIT:
2323 		/* set bit 4 */
2324 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2325 		val |= 0x10;
2326 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2327 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2328 		break;
2329 
2330 	case AAC_ENABLE_INTX:
2331 		/* set bit 7 */
2332 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2333 		val |= 0x80;
2334 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val);
2335 		AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR);
2336 		/* unmask int. */
2337 		val = PMC_ALL_INTERRUPT_BITS;
2338 		AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val);
2339 		val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR);
2340 		AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR,
2341 			val & (~(PMC_GLOBAL_INT_BIT2)));
2342 		break;
2343 
2344 	default:
2345 		break;
2346 	}
2347 }
2348 
2349 /*
2350  * New comm. interface: Send command functions
2351  */
2352 static int
2353 aac_src_send_command(struct aac_softc *sc, struct aac_command *cm)
2354 {
2355 	struct aac_fib_xporthdr *pFibX;
2356 	u_int32_t fibsize, high_addr;
2357 	u_int64_t address;
2358 
2359 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)");
2360 
2361 	if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest &&
2362 		sc->aac_max_msix > 1) {
2363 		u_int16_t vector_no, first_choice = 0xffff;
2364 
2365 		vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix;
2366 		do {
2367 			vector_no += 1;
2368 			if (vector_no == sc->aac_max_msix)
2369 				vector_no = 1;
2370 			if (sc->aac_rrq_outstanding[vector_no] <
2371 				sc->aac_vector_cap)
2372 				break;
2373 			if (0xffff == first_choice)
2374 				first_choice = vector_no;
2375 			else if (vector_no == first_choice)
2376 				break;
2377 		} while (1);
2378 		if (vector_no == first_choice)
2379 			vector_no = 0;
2380 		sc->aac_rrq_outstanding[vector_no]++;
2381 		if (sc->aac_fibs_pushed_no == 0xffffffff)
2382 			sc->aac_fibs_pushed_no = 0;
2383 		else
2384 			sc->aac_fibs_pushed_no++;
2385 
2386 		cm->cm_fib->Header.Handle += (vector_no << 16);
2387 	}
2388 
2389 	if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) {
2390 		/* Calculate the amount to the fibsize bits */
2391 		fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1;
2392 		/* Fill new FIB header */
2393 		address = cm->cm_fibphys;
2394 		high_addr = (u_int32_t)(address >> 32);
2395 		if (high_addr == 0L) {
2396 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2;
2397 			cm->cm_fib->Header.u.TimeStamp = 0L;
2398 		} else {
2399 			cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64;
2400 			cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr;
2401 		}
2402 		cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address;
2403 	} else {
2404 		/* Calculate the amount to the fibsize bits */
2405 		fibsize = (sizeof(struct aac_fib_xporthdr) +
2406 		   cm->cm_fib->Header.Size + 127) / 128 - 1;
2407 		/* Fill XPORT header */
2408 		pFibX = (struct aac_fib_xporthdr *)
2409 			((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr));
2410 		pFibX->Handle = cm->cm_fib->Header.Handle;
2411 		pFibX->HostAddress = cm->cm_fibphys;
2412 		pFibX->Size = cm->cm_fib->Header.Size;
2413 		address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr);
2414 		high_addr = (u_int32_t)(address >> 32);
2415 	}
2416 
2417 	if (fibsize > 31)
2418 		fibsize = 31;
2419 	aac_enqueue_busy(cm);
2420 	if (high_addr) {
2421 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr);
2422 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize);
2423 	} else {
2424 		AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize);
2425 	}
2426 	return 0;
2427 }
2428 
2429 /*
2430  * New comm. interface: get, set outbound queue index
2431  */
2432 static int
2433 aac_src_get_outb_queue(struct aac_softc *sc)
2434 {
2435 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2436 
2437 	return(-1);
2438 }
2439 
2440 static void
2441 aac_src_set_outb_queue(struct aac_softc *sc, int index)
2442 {
2443 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2444 }
2445 
2446 /*
2447  * Debugging and Diagnostics
2448  */
2449 
2450 /*
2451  * Print some information about the controller.
2452  */
2453 static void
2454 aac_describe_controller(struct aac_softc *sc)
2455 {
2456 	struct aac_fib *fib;
2457 	struct aac_adapter_info	*info;
2458 	char *adapter_type = "Adaptec RAID controller";
2459 
2460 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2461 
2462 	mtx_lock(&sc->aac_io_lock);
2463 	aac_alloc_sync_fib(sc, &fib);
2464 
2465 	if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) {
2466 		fib->data[0] = 0;
2467 		if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1))
2468 			device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n");
2469 		else {
2470 			struct aac_supplement_adapter_info *supp_info;
2471 
2472 			supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]);
2473 			adapter_type = (char *)supp_info->AdapterTypeText;
2474 			sc->aac_feature_bits = supp_info->FeatureBits;
2475 			sc->aac_support_opt2 = supp_info->SupportedOptions2;
2476 		}
2477 	}
2478 	device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n",
2479 		adapter_type,
2480 		AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION,
2481 		AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD);
2482 
2483 	fib->data[0] = 0;
2484 	if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) {
2485 		device_printf(sc->aac_dev, "RequestAdapterInfo failed\n");
2486 		aac_release_sync_fib(sc);
2487 		mtx_unlock(&sc->aac_io_lock);
2488 		return;
2489 	}
2490 
2491 	/* save the kernel revision structure for later use */
2492 	info = (struct aac_adapter_info *)&fib->data[0];
2493 	sc->aac_revision = info->KernelRevision;
2494 
2495 	if (bootverbose) {
2496 		device_printf(sc->aac_dev, "%s %dMHz, %dMB memory "
2497 		    "(%dMB cache, %dMB execution), %s\n",
2498 		    aac_describe_code(aac_cpu_variant, info->CpuVariant),
2499 		    info->ClockSpeed, info->TotalMem / (1024 * 1024),
2500 		    info->BufferMem / (1024 * 1024),
2501 		    info->ExecutionMem / (1024 * 1024),
2502 		    aac_describe_code(aac_battery_platform,
2503 		    info->batteryPlatform));
2504 
2505 		device_printf(sc->aac_dev,
2506 		    "Kernel %d.%d-%d, Build %d, S/N %6X\n",
2507 		    info->KernelRevision.external.comp.major,
2508 		    info->KernelRevision.external.comp.minor,
2509 		    info->KernelRevision.external.comp.dash,
2510 		    info->KernelRevision.buildNumber,
2511 		    (u_int32_t)(info->SerialNumber & 0xffffff));
2512 
2513 		device_printf(sc->aac_dev, "Supported Options=%b\n",
2514 			      sc->supported_options,
2515 			      "\20"
2516 			      "\1SNAPSHOT"
2517 			      "\2CLUSTERS"
2518 			      "\3WCACHE"
2519 			      "\4DATA64"
2520 			      "\5HOSTTIME"
2521 			      "\6RAID50"
2522 			      "\7WINDOW4GB"
2523 			      "\10SCSIUPGD"
2524 			      "\11SOFTERR"
2525 			      "\12NORECOND"
2526 			      "\13SGMAP64"
2527 			      "\14ALARM"
2528 			      "\15NONDASD"
2529 			      "\16SCSIMGT"
2530 			      "\17RAIDSCSI"
2531 			      "\21ADPTINFO"
2532 			      "\22NEWCOMM"
2533 			      "\23ARRAY64BIT"
2534 			      "\24HEATSENSOR");
2535 	}
2536 
2537 	aac_release_sync_fib(sc);
2538 	mtx_unlock(&sc->aac_io_lock);
2539 }
2540 
2541 /*
2542  * Look up a text description of a numeric error code and return a pointer to
2543  * same.
2544  */
2545 static char *
2546 aac_describe_code(struct aac_code_lookup *table, u_int32_t code)
2547 {
2548 	int i;
2549 
2550 	for (i = 0; table[i].string != NULL; i++)
2551 		if (table[i].code == code)
2552 			return(table[i].string);
2553 	return(table[i + 1].string);
2554 }
2555 
2556 /*
2557  * Management Interface
2558  */
2559 
2560 static int
2561 aac_open(struct cdev *dev, int flags, int fmt, struct thread *td)
2562 {
2563 	struct aac_softc *sc;
2564 
2565 	sc = dev->si_drv1;
2566 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2567 #if __FreeBSD_version >= 702000
2568 	device_busy(sc->aac_dev);
2569 	devfs_set_cdevpriv(sc, aac_cdevpriv_dtor);
2570 #endif
2571 	return 0;
2572 }
2573 
2574 static int
2575 aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
2576 {
2577 	union aac_statrequest *as;
2578 	struct aac_softc *sc;
2579 	int error = 0;
2580 
2581 	as = (union aac_statrequest *)arg;
2582 	sc = dev->si_drv1;
2583 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2584 
2585 	switch (cmd) {
2586 	case AACIO_STATS:
2587 		switch (as->as_item) {
2588 		case AACQ_FREE:
2589 		case AACQ_READY:
2590 		case AACQ_BUSY:
2591 			bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat,
2592 			      sizeof(struct aac_qstat));
2593 			break;
2594 		default:
2595 			error = ENOENT;
2596 			break;
2597 		}
2598 	break;
2599 
2600 	case FSACTL_SENDFIB:
2601 	case FSACTL_SEND_LARGE_FIB:
2602 		arg = *(caddr_t*)arg;
2603 	case FSACTL_LNX_SENDFIB:
2604 	case FSACTL_LNX_SEND_LARGE_FIB:
2605 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB");
2606 		error = aac_ioctl_sendfib(sc, arg);
2607 		break;
2608 	case FSACTL_SEND_RAW_SRB:
2609 		arg = *(caddr_t*)arg;
2610 	case FSACTL_LNX_SEND_RAW_SRB:
2611 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB");
2612 		error = aac_ioctl_send_raw_srb(sc, arg);
2613 		break;
2614 	case FSACTL_AIF_THREAD:
2615 	case FSACTL_LNX_AIF_THREAD:
2616 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD");
2617 		error = EINVAL;
2618 		break;
2619 	case FSACTL_OPEN_GET_ADAPTER_FIB:
2620 		arg = *(caddr_t*)arg;
2621 	case FSACTL_LNX_OPEN_GET_ADAPTER_FIB:
2622 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB");
2623 		error = aac_open_aif(sc, arg);
2624 		break;
2625 	case FSACTL_GET_NEXT_ADAPTER_FIB:
2626 		arg = *(caddr_t*)arg;
2627 	case FSACTL_LNX_GET_NEXT_ADAPTER_FIB:
2628 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB");
2629 		error = aac_getnext_aif(sc, arg);
2630 		break;
2631 	case FSACTL_CLOSE_GET_ADAPTER_FIB:
2632 		arg = *(caddr_t*)arg;
2633 	case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB:
2634 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB");
2635 		error = aac_close_aif(sc, arg);
2636 		break;
2637 	case FSACTL_MINIPORT_REV_CHECK:
2638 		arg = *(caddr_t*)arg;
2639 	case FSACTL_LNX_MINIPORT_REV_CHECK:
2640 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK");
2641 		error = aac_rev_check(sc, arg);
2642 		break;
2643 	case FSACTL_QUERY_DISK:
2644 		arg = *(caddr_t*)arg;
2645 	case FSACTL_LNX_QUERY_DISK:
2646 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK");
2647 		error = aac_query_disk(sc, arg);
2648 		break;
2649 	case FSACTL_DELETE_DISK:
2650 	case FSACTL_LNX_DELETE_DISK:
2651 		/*
2652 		 * We don't trust the underland to tell us when to delete a
2653 		 * container, rather we rely on an AIF coming from the
2654 		 * controller
2655 		 */
2656 		error = 0;
2657 		break;
2658 	case FSACTL_GET_PCI_INFO:
2659 		arg = *(caddr_t*)arg;
2660 	case FSACTL_LNX_GET_PCI_INFO:
2661 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO");
2662 		error = aac_get_pci_info(sc, arg);
2663 		break;
2664 	case FSACTL_GET_FEATURES:
2665 		arg = *(caddr_t*)arg;
2666 	case FSACTL_LNX_GET_FEATURES:
2667 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES");
2668 		error = aac_supported_features(sc, arg);
2669 		break;
2670 	default:
2671 		fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd);
2672 		error = EINVAL;
2673 		break;
2674 	}
2675 	return(error);
2676 }
2677 
2678 static int
2679 aac_poll(struct cdev *dev, int poll_events, struct thread *td)
2680 {
2681 	struct aac_softc *sc;
2682 	struct aac_fib_context *ctx;
2683 	int revents;
2684 
2685 	sc = dev->si_drv1;
2686 	revents = 0;
2687 
2688 	mtx_lock(&sc->aac_io_lock);
2689 	if ((poll_events & (POLLRDNORM | POLLIN)) != 0) {
2690 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
2691 			if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) {
2692 				revents |= poll_events & (POLLIN | POLLRDNORM);
2693 				break;
2694 			}
2695 		}
2696 	}
2697 	mtx_unlock(&sc->aac_io_lock);
2698 
2699 	if (revents == 0) {
2700 		if (poll_events & (POLLIN | POLLRDNORM))
2701 			selrecord(td, &sc->rcv_select);
2702 	}
2703 
2704 	return (revents);
2705 }
2706 
2707 static void
2708 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg)
2709 {
2710 
2711 	switch (event->ev_type) {
2712 	case AAC_EVENT_CMFREE:
2713 		mtx_assert(&sc->aac_io_lock, MA_OWNED);
2714 		if (aacraid_alloc_command(sc, (struct aac_command **)arg)) {
2715 			aacraid_add_event(sc, event);
2716 			return;
2717 		}
2718 		free(event, M_AACRAIDBUF);
2719 		wakeup(arg);
2720 		break;
2721 	default:
2722 		break;
2723 	}
2724 }
2725 
2726 /*
2727  * Send a FIB supplied from userspace
2728  */
2729 static int
2730 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib)
2731 {
2732 	struct aac_command *cm;
2733 	int size, error;
2734 
2735 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2736 
2737 	cm = NULL;
2738 
2739 	/*
2740 	 * Get a command
2741 	 */
2742 	mtx_lock(&sc->aac_io_lock);
2743 	if (aacraid_alloc_command(sc, &cm)) {
2744 		struct aac_event *event;
2745 
2746 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2747 		    M_NOWAIT | M_ZERO);
2748 		if (event == NULL) {
2749 			error = EBUSY;
2750 			mtx_unlock(&sc->aac_io_lock);
2751 			goto out;
2752 		}
2753 		event->ev_type = AAC_EVENT_CMFREE;
2754 		event->ev_callback = aac_ioctl_event;
2755 		event->ev_arg = &cm;
2756 		aacraid_add_event(sc, event);
2757 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0);
2758 	}
2759 	mtx_unlock(&sc->aac_io_lock);
2760 
2761 	/*
2762 	 * Fetch the FIB header, then re-copy to get data as well.
2763 	 */
2764 	if ((error = copyin(ufib, cm->cm_fib,
2765 			    sizeof(struct aac_fib_header))) != 0)
2766 		goto out;
2767 	size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header);
2768 	if (size > sc->aac_max_fib_size) {
2769 		device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n",
2770 			      size, sc->aac_max_fib_size);
2771 		size = sc->aac_max_fib_size;
2772 	}
2773 	if ((error = copyin(ufib, cm->cm_fib, size)) != 0)
2774 		goto out;
2775 	cm->cm_fib->Header.Size = size;
2776 	cm->cm_timestamp = time_uptime;
2777 	cm->cm_datalen = 0;
2778 
2779 	/*
2780 	 * Pass the FIB to the controller, wait for it to complete.
2781 	 */
2782 	mtx_lock(&sc->aac_io_lock);
2783 	error = aacraid_wait_command(cm);
2784 	mtx_unlock(&sc->aac_io_lock);
2785 	if (error != 0) {
2786 		device_printf(sc->aac_dev,
2787 			      "aacraid_wait_command return %d\n", error);
2788 		goto out;
2789 	}
2790 
2791 	/*
2792 	 * Copy the FIB and data back out to the caller.
2793 	 */
2794 	size = cm->cm_fib->Header.Size;
2795 	if (size > sc->aac_max_fib_size) {
2796 		device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n",
2797 			      size, sc->aac_max_fib_size);
2798 		size = sc->aac_max_fib_size;
2799 	}
2800 	error = copyout(cm->cm_fib, ufib, size);
2801 
2802 out:
2803 	if (cm != NULL) {
2804 		mtx_lock(&sc->aac_io_lock);
2805 		aacraid_release_command(cm);
2806 		mtx_unlock(&sc->aac_io_lock);
2807 	}
2808 	return(error);
2809 }
2810 
2811 /*
2812  * Send a passthrough FIB supplied from userspace
2813  */
2814 static int
2815 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg)
2816 {
2817 	struct aac_command *cm;
2818 	struct aac_fib *fib;
2819 	struct aac_srb *srbcmd;
2820 	struct aac_srb *user_srb = (struct aac_srb *)arg;
2821 	void *user_reply;
2822 	int error, transfer_data = 0;
2823 	bus_dmamap_t orig_map = 0;
2824 	u_int32_t fibsize = 0;
2825 	u_int64_t srb_sg_address;
2826 	u_int32_t srb_sg_bytecount;
2827 
2828 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
2829 
2830 	cm = NULL;
2831 
2832 	mtx_lock(&sc->aac_io_lock);
2833 	if (aacraid_alloc_command(sc, &cm)) {
2834 		struct aac_event *event;
2835 
2836 		event = malloc(sizeof(struct aac_event), M_AACRAIDBUF,
2837 		    M_NOWAIT | M_ZERO);
2838 		if (event == NULL) {
2839 			error = EBUSY;
2840 			mtx_unlock(&sc->aac_io_lock);
2841 			goto out;
2842 		}
2843 		event->ev_type = AAC_EVENT_CMFREE;
2844 		event->ev_callback = aac_ioctl_event;
2845 		event->ev_arg = &cm;
2846 		aacraid_add_event(sc, event);
2847 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0);
2848 	}
2849 	mtx_unlock(&sc->aac_io_lock);
2850 
2851 	cm->cm_data = NULL;
2852 	/* save original dma map */
2853 	orig_map = cm->cm_datamap;
2854 
2855 	fib = cm->cm_fib;
2856 	srbcmd = (struct aac_srb *)fib->data;
2857 	if ((error = copyin((void *)&user_srb->data_len, &fibsize,
2858 		sizeof (u_int32_t)) != 0))
2859 		goto out;
2860 	if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) {
2861 		error = EINVAL;
2862 		goto out;
2863 	}
2864 	if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0))
2865 		goto out;
2866 
2867 	srbcmd->function = 0;		/* SRBF_ExecuteScsi */
2868 	srbcmd->retry_limit = 0;	/* obsolete */
2869 
2870 	/* only one sg element from userspace supported */
2871 	if (srbcmd->sg_map.SgCount > 1) {
2872 		error = EINVAL;
2873 		goto out;
2874 	}
2875 	/* check fibsize */
2876 	if (fibsize == (sizeof(struct aac_srb) +
2877 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) {
2878 		struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry;
2879 		struct aac_sg_entry sg;
2880 
2881 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2882 			goto out;
2883 
2884 		srb_sg_bytecount = sg.SgByteCount;
2885 		srb_sg_address = (u_int64_t)sg.SgAddress;
2886 	} else if (fibsize == (sizeof(struct aac_srb) +
2887 		srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) {
2888 #ifdef __LP64__
2889 		struct aac_sg_entry64 *sgp =
2890 			(struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry;
2891 		struct aac_sg_entry64 sg;
2892 
2893 		if ((error = copyin(sgp, &sg, sizeof(sg))) != 0)
2894 			goto out;
2895 
2896 		srb_sg_bytecount = sg.SgByteCount;
2897 		srb_sg_address = sg.SgAddress;
2898 		if (srb_sg_address > 0xffffffffull &&
2899 			!(sc->flags & AAC_FLAGS_SG_64BIT))
2900 #endif
2901 		{
2902 			error = EINVAL;
2903 			goto out;
2904 		}
2905 	} else {
2906 		error = EINVAL;
2907 		goto out;
2908 	}
2909 	user_reply = (char *)arg + fibsize;
2910 	srbcmd->data_len = srb_sg_bytecount;
2911 	if (srbcmd->sg_map.SgCount == 1)
2912 		transfer_data = 1;
2913 
2914 	if (transfer_data) {
2915 		/*
2916 		 * Create DMA tag for the passthr. data buffer and allocate it.
2917 		 */
2918 		if (bus_dma_tag_create(sc->aac_parent_dmat, 	/* parent */
2919 			1, 0,			/* algnmnt, boundary */
2920 			(sc->flags & AAC_FLAGS_SG_64BIT) ?
2921 			BUS_SPACE_MAXADDR_32BIT :
2922 			0x7fffffff,		/* lowaddr */
2923 			BUS_SPACE_MAXADDR, 	/* highaddr */
2924 			NULL, NULL, 		/* filter, filterarg */
2925 			srb_sg_bytecount, 	/* size */
2926 			sc->aac_sg_tablesize,	/* nsegments */
2927 			srb_sg_bytecount, 	/* maxsegsize */
2928 			0,			/* flags */
2929 			NULL, NULL,		/* No locking needed */
2930 			&cm->cm_passthr_dmat)) {
2931 			error = ENOMEM;
2932 			goto out;
2933 		}
2934 		if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data,
2935 			BUS_DMA_NOWAIT, &cm->cm_datamap)) {
2936 			error = ENOMEM;
2937 			goto out;
2938 		}
2939 		/* fill some cm variables */
2940 		cm->cm_datalen = srb_sg_bytecount;
2941 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)
2942 			cm->cm_flags |= AAC_CMD_DATAIN;
2943 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT)
2944 			cm->cm_flags |= AAC_CMD_DATAOUT;
2945 
2946 		if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) {
2947 			if ((error = copyin((void *)(uintptr_t)srb_sg_address,
2948 				cm->cm_data, cm->cm_datalen)) != 0)
2949 				goto out;
2950 			/* sync required for bus_dmamem_alloc() alloc. mem.? */
2951 			bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2952 				BUS_DMASYNC_PREWRITE);
2953 		}
2954 	}
2955 
2956 	/* build the FIB */
2957 	fib->Header.Size = sizeof(struct aac_fib_header) +
2958 		sizeof(struct aac_srb);
2959 	fib->Header.XferState =
2960 		AAC_FIBSTATE_HOSTOWNED   |
2961 		AAC_FIBSTATE_INITIALISED |
2962 		AAC_FIBSTATE_EMPTY	 |
2963 		AAC_FIBSTATE_FROMHOST	 |
2964 		AAC_FIBSTATE_REXPECTED   |
2965 		AAC_FIBSTATE_NORM	 |
2966 		AAC_FIBSTATE_ASYNC;
2967 
2968 	fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ?
2969 		ScsiPortCommandU64 : ScsiPortCommand;
2970 	cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map;
2971 
2972 	/* send command */
2973 	if (transfer_data) {
2974 		bus_dmamap_load(cm->cm_passthr_dmat,
2975 			cm->cm_datamap, cm->cm_data,
2976 			cm->cm_datalen,
2977 			aacraid_map_command_sg, cm, 0);
2978 	} else {
2979 		aacraid_map_command_sg(cm, NULL, 0, 0);
2980 	}
2981 
2982 	/* wait for completion */
2983 	mtx_lock(&sc->aac_io_lock);
2984 	while (!(cm->cm_flags & AAC_CMD_COMPLETED))
2985 		msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0);
2986 	mtx_unlock(&sc->aac_io_lock);
2987 
2988 	/* copy data */
2989 	if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) {
2990 		if ((error = copyout(cm->cm_data,
2991 			(void *)(uintptr_t)srb_sg_address,
2992 			cm->cm_datalen)) != 0)
2993 			goto out;
2994 		/* sync required for bus_dmamem_alloc() allocated mem.? */
2995 		bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap,
2996 				BUS_DMASYNC_POSTREAD);
2997 	}
2998 
2999 	/* status */
3000 	error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response));
3001 
3002 out:
3003 	if (cm && cm->cm_data) {
3004 		if (transfer_data)
3005 			bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap);
3006 		bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap);
3007 		cm->cm_datamap = orig_map;
3008 	}
3009 	if (cm && cm->cm_passthr_dmat)
3010 		bus_dma_tag_destroy(cm->cm_passthr_dmat);
3011 	if (cm) {
3012 		mtx_lock(&sc->aac_io_lock);
3013 		aacraid_release_command(cm);
3014 		mtx_unlock(&sc->aac_io_lock);
3015 	}
3016 	return(error);
3017 }
3018 
3019 /*
3020  * Request an AIF from the controller (new comm. type1)
3021  */
3022 static void
3023 aac_request_aif(struct aac_softc *sc)
3024 {
3025 	struct aac_command *cm;
3026 	struct aac_fib *fib;
3027 
3028 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3029 
3030 	if (aacraid_alloc_command(sc, &cm)) {
3031 		sc->aif_pending = 1;
3032 		return;
3033 	}
3034 	sc->aif_pending = 0;
3035 
3036 	/* build the FIB */
3037 	fib = cm->cm_fib;
3038 	fib->Header.Size = sizeof(struct aac_fib);
3039 	fib->Header.XferState =
3040         AAC_FIBSTATE_HOSTOWNED   |
3041         AAC_FIBSTATE_INITIALISED |
3042         AAC_FIBSTATE_EMPTY	 |
3043         AAC_FIBSTATE_FROMHOST	 |
3044         AAC_FIBSTATE_REXPECTED   |
3045         AAC_FIBSTATE_NORM	 |
3046         AAC_FIBSTATE_ASYNC;
3047 	/* set AIF marker */
3048 	fib->Header.Handle = 0x00800000;
3049 	fib->Header.Command = AifRequest;
3050 	((struct aac_aif_command *)fib->data)->command = AifReqEvent;
3051 
3052 	aacraid_map_command_sg(cm, NULL, 0, 0);
3053 }
3054 
3055 
3056 #if __FreeBSD_version >= 702000
3057 /*
3058  * cdevpriv interface private destructor.
3059  */
3060 static void
3061 aac_cdevpriv_dtor(void *arg)
3062 {
3063 	struct aac_softc *sc;
3064 
3065 	sc = arg;
3066 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3067 	mtx_lock(&Giant);
3068 	device_unbusy(sc->aac_dev);
3069 	mtx_unlock(&Giant);
3070 }
3071 #else
3072 static int
3073 aac_close(struct cdev *dev, int flags, int fmt, struct thread *td)
3074 {
3075 	struct aac_softc *sc;
3076 
3077 	sc = dev->si_drv1;
3078 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3079 	return 0;
3080 }
3081 #endif
3082 
3083 /*
3084  * Handle an AIF sent to us by the controller; queue it for later reference.
3085  * If the queue fills up, then drop the older entries.
3086  */
3087 static void
3088 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
3089 {
3090 	struct aac_aif_command *aif;
3091 	struct aac_container *co, *co_next;
3092 	struct aac_fib_context *ctx;
3093 	struct aac_fib *sync_fib;
3094 	struct aac_mntinforesp mir;
3095 	int next, current, found;
3096 	int count = 0, changed = 0, i = 0;
3097 	u_int32_t channel, uid;
3098 
3099 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3100 
3101 	aif = (struct aac_aif_command*)&fib->data[0];
3102 	aacraid_print_aif(sc, aif);
3103 
3104 	/* Is it an event that we should care about? */
3105 	switch (aif->command) {
3106 	case AifCmdEventNotify:
3107 		switch (aif->data.EN.type) {
3108 		case AifEnAddContainer:
3109 		case AifEnDeleteContainer:
3110 			/*
3111 			 * A container was added or deleted, but the message
3112 			 * doesn't tell us anything else!  Re-enumerate the
3113 			 * containers and sort things out.
3114 			 */
3115 			aac_alloc_sync_fib(sc, &sync_fib);
3116 			do {
3117 				/*
3118 				 * Ask the controller for its containers one at
3119 				 * a time.
3120 				 * XXX What if the controller's list changes
3121 				 * midway through this enumaration?
3122 				 * XXX This should be done async.
3123 				 */
3124 				if (aac_get_container_info(sc, sync_fib, i,
3125 					&mir, &uid) != 0)
3126 					continue;
3127 				if (i == 0)
3128 					count = mir.MntRespCount;
3129 				/*
3130 				 * Check the container against our list.
3131 				 * co->co_found was already set to 0 in a
3132 				 * previous run.
3133 				 */
3134 				if ((mir.Status == ST_OK) &&
3135 				    (mir.MntTable[0].VolType != CT_NONE)) {
3136 					found = 0;
3137 					TAILQ_FOREACH(co,
3138 						      &sc->aac_container_tqh,
3139 						      co_link) {
3140 						if (co->co_mntobj.ObjectId ==
3141 						    mir.MntTable[0].ObjectId) {
3142 							co->co_found = 1;
3143 							found = 1;
3144 							break;
3145 						}
3146 					}
3147 					/*
3148 					 * If the container matched, continue
3149 					 * in the list.
3150 					 */
3151 					if (found) {
3152 						i++;
3153 						continue;
3154 					}
3155 
3156 					/*
3157 					 * This is a new container.  Do all the
3158 					 * appropriate things to set it up.
3159 					 */
3160 					aac_add_container(sc, &mir, 1, uid);
3161 					changed = 1;
3162 				}
3163 				i++;
3164 			} while ((i < count) && (i < AAC_MAX_CONTAINERS));
3165 			aac_release_sync_fib(sc);
3166 
3167 			/*
3168 			 * Go through our list of containers and see which ones
3169 			 * were not marked 'found'.  Since the controller didn't
3170 			 * list them they must have been deleted.  Do the
3171 			 * appropriate steps to destroy the device.  Also reset
3172 			 * the co->co_found field.
3173 			 */
3174 			co = TAILQ_FIRST(&sc->aac_container_tqh);
3175 			while (co != NULL) {
3176 				if (co->co_found == 0) {
3177 					co_next = TAILQ_NEXT(co, co_link);
3178 					TAILQ_REMOVE(&sc->aac_container_tqh, co,
3179 						     co_link);
3180 					free(co, M_AACRAIDBUF);
3181 					changed = 1;
3182 					co = co_next;
3183 				} else {
3184 					co->co_found = 0;
3185 					co = TAILQ_NEXT(co, co_link);
3186 				}
3187 			}
3188 
3189 			/* Attach the newly created containers */
3190 			if (changed) {
3191 				if (sc->cam_rescan_cb != NULL)
3192 					sc->cam_rescan_cb(sc, 0,
3193 				    	AAC_CAM_TARGET_WILDCARD);
3194 			}
3195 
3196 			break;
3197 
3198 		case AifEnEnclosureManagement:
3199 			switch (aif->data.EN.data.EEE.eventType) {
3200 			case AIF_EM_DRIVE_INSERTION:
3201 			case AIF_EM_DRIVE_REMOVAL:
3202 				channel = aif->data.EN.data.EEE.unitID;
3203 				if (sc->cam_rescan_cb != NULL)
3204 					sc->cam_rescan_cb(sc,
3205 					    ((channel>>24) & 0xF) + 1,
3206 					    (channel & 0xFFFF));
3207 				break;
3208 			}
3209 			break;
3210 
3211 		case AifEnAddJBOD:
3212 		case AifEnDeleteJBOD:
3213 		case AifRawDeviceRemove:
3214 			channel = aif->data.EN.data.ECE.container;
3215 			if (sc->cam_rescan_cb != NULL)
3216 				sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1,
3217 				    AAC_CAM_TARGET_WILDCARD);
3218 			break;
3219 
3220 		default:
3221 			break;
3222 		}
3223 
3224 	default:
3225 		break;
3226 	}
3227 
3228 	/* Copy the AIF data to the AIF queue for ioctl retrieval */
3229 	current = sc->aifq_idx;
3230 	next = (current + 1) % AAC_AIFQ_LENGTH;
3231 	if (next == 0)
3232 		sc->aifq_filled = 1;
3233 	bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib));
3234 	/* modify AIF contexts */
3235 	if (sc->aifq_filled) {
3236 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3237 			if (next == ctx->ctx_idx)
3238 				ctx->ctx_wrap = 1;
3239 			else if (current == ctx->ctx_idx && ctx->ctx_wrap)
3240 				ctx->ctx_idx = next;
3241 		}
3242 	}
3243 	sc->aifq_idx = next;
3244 	/* On the off chance that someone is sleeping for an aif... */
3245 	if (sc->aac_state & AAC_STATE_AIF_SLEEPER)
3246 		wakeup(sc->aac_aifq);
3247 	/* Wakeup any poll()ers */
3248 	selwakeuppri(&sc->rcv_select, PRIBIO);
3249 
3250 	return;
3251 }
3252 
3253 /*
3254  * Return the Revision of the driver to userspace and check to see if the
3255  * userspace app is possibly compatible.  This is extremely bogus since
3256  * our driver doesn't follow Adaptec's versioning system.  Cheat by just
3257  * returning what the card reported.
3258  */
3259 static int
3260 aac_rev_check(struct aac_softc *sc, caddr_t udata)
3261 {
3262 	struct aac_rev_check rev_check;
3263 	struct aac_rev_check_resp rev_check_resp;
3264 	int error = 0;
3265 
3266 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3267 
3268 	/*
3269 	 * Copyin the revision struct from userspace
3270 	 */
3271 	if ((error = copyin(udata, (caddr_t)&rev_check,
3272 			sizeof(struct aac_rev_check))) != 0) {
3273 		return error;
3274 	}
3275 
3276 	fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n",
3277 	      rev_check.callingRevision.buildNumber);
3278 
3279 	/*
3280 	 * Doctor up the response struct.
3281 	 */
3282 	rev_check_resp.possiblyCompatible = 1;
3283 	rev_check_resp.adapterSWRevision.external.comp.major =
3284 	    AAC_DRIVER_MAJOR_VERSION;
3285 	rev_check_resp.adapterSWRevision.external.comp.minor =
3286 	    AAC_DRIVER_MINOR_VERSION;
3287 	rev_check_resp.adapterSWRevision.external.comp.type =
3288 	    AAC_DRIVER_TYPE;
3289 	rev_check_resp.adapterSWRevision.external.comp.dash =
3290 	    AAC_DRIVER_BUGFIX_LEVEL;
3291 	rev_check_resp.adapterSWRevision.buildNumber =
3292 	    AAC_DRIVER_BUILD;
3293 
3294 	return(copyout((caddr_t)&rev_check_resp, udata,
3295 			sizeof(struct aac_rev_check_resp)));
3296 }
3297 
3298 /*
3299  * Pass the fib context to the caller
3300  */
3301 static int
3302 aac_open_aif(struct aac_softc *sc, caddr_t arg)
3303 {
3304 	struct aac_fib_context *fibctx, *ctx;
3305 	int error = 0;
3306 
3307 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3308 
3309 	fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO);
3310 	if (fibctx == NULL)
3311 		return (ENOMEM);
3312 
3313 	mtx_lock(&sc->aac_io_lock);
3314 	/* all elements are already 0, add to queue */
3315 	if (sc->fibctx == NULL)
3316 		sc->fibctx = fibctx;
3317 	else {
3318 		for (ctx = sc->fibctx; ctx->next; ctx = ctx->next)
3319 			;
3320 		ctx->next = fibctx;
3321 		fibctx->prev = ctx;
3322 	}
3323 
3324 	/* evaluate unique value */
3325 	fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff);
3326 	ctx = sc->fibctx;
3327 	while (ctx != fibctx) {
3328 		if (ctx->unique == fibctx->unique) {
3329 			fibctx->unique++;
3330 			ctx = sc->fibctx;
3331 		} else {
3332 			ctx = ctx->next;
3333 		}
3334 	}
3335 
3336 	error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t));
3337 	mtx_unlock(&sc->aac_io_lock);
3338 	if (error)
3339 		aac_close_aif(sc, (caddr_t)ctx);
3340 	return error;
3341 }
3342 
3343 /*
3344  * Close the caller's fib context
3345  */
3346 static int
3347 aac_close_aif(struct aac_softc *sc, caddr_t arg)
3348 {
3349 	struct aac_fib_context *ctx;
3350 
3351 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3352 
3353 	mtx_lock(&sc->aac_io_lock);
3354 	for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3355 		if (ctx->unique == *(uint32_t *)&arg) {
3356 			if (ctx == sc->fibctx)
3357 				sc->fibctx = NULL;
3358 			else {
3359 				ctx->prev->next = ctx->next;
3360 				if (ctx->next)
3361 					ctx->next->prev = ctx->prev;
3362 			}
3363 			break;
3364 		}
3365 	}
3366 	if (ctx)
3367 		free(ctx, M_AACRAIDBUF);
3368 
3369 	mtx_unlock(&sc->aac_io_lock);
3370 	return 0;
3371 }
3372 
3373 /*
3374  * Pass the caller the next AIF in their queue
3375  */
3376 static int
3377 aac_getnext_aif(struct aac_softc *sc, caddr_t arg)
3378 {
3379 	struct get_adapter_fib_ioctl agf;
3380 	struct aac_fib_context *ctx;
3381 	int error;
3382 
3383 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3384 
3385 	mtx_lock(&sc->aac_io_lock);
3386 #ifdef COMPAT_FREEBSD32
3387 	if (SV_CURPROC_FLAG(SV_ILP32)) {
3388 		struct get_adapter_fib_ioctl32 agf32;
3389 		error = copyin(arg, &agf32, sizeof(agf32));
3390 		if (error == 0) {
3391 			agf.AdapterFibContext = agf32.AdapterFibContext;
3392 			agf.Wait = agf32.Wait;
3393 			agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib;
3394 		}
3395 	} else
3396 #endif
3397 		error = copyin(arg, &agf, sizeof(agf));
3398 	if (error == 0) {
3399 		for (ctx = sc->fibctx; ctx; ctx = ctx->next) {
3400 			if (agf.AdapterFibContext == ctx->unique)
3401 				break;
3402 		}
3403 		if (!ctx) {
3404 			mtx_unlock(&sc->aac_io_lock);
3405 			return (EFAULT);
3406 		}
3407 
3408 		error = aac_return_aif(sc, ctx, agf.AifFib);
3409 		if (error == EAGAIN && agf.Wait) {
3410 			fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF");
3411 			sc->aac_state |= AAC_STATE_AIF_SLEEPER;
3412 			while (error == EAGAIN) {
3413 				mtx_unlock(&sc->aac_io_lock);
3414 				error = tsleep(sc->aac_aifq, PRIBIO |
3415 					       PCATCH, "aacaif", 0);
3416 				mtx_lock(&sc->aac_io_lock);
3417 				if (error == 0)
3418 					error = aac_return_aif(sc, ctx, agf.AifFib);
3419 			}
3420 			sc->aac_state &= ~AAC_STATE_AIF_SLEEPER;
3421 		}
3422 	}
3423 	mtx_unlock(&sc->aac_io_lock);
3424 	return(error);
3425 }
3426 
3427 /*
3428  * Hand the next AIF off the top of the queue out to userspace.
3429  */
3430 static int
3431 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr)
3432 {
3433 	int current, error;
3434 
3435 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3436 
3437 	current = ctx->ctx_idx;
3438 	if (current == sc->aifq_idx && !ctx->ctx_wrap) {
3439 		/* empty */
3440 		return (EAGAIN);
3441 	}
3442 	error =
3443 		copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib));
3444 	if (error)
3445 		device_printf(sc->aac_dev,
3446 		    "aac_return_aif: copyout returned %d\n", error);
3447 	else {
3448 		ctx->ctx_wrap = 0;
3449 		ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
3450 	}
3451 	return(error);
3452 }
3453 
3454 static int
3455 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr)
3456 {
3457 	struct aac_pci_info {
3458 		u_int32_t bus;
3459 		u_int32_t slot;
3460 	} pciinf;
3461 	int error;
3462 
3463 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3464 
3465 	pciinf.bus = pci_get_bus(sc->aac_dev);
3466 	pciinf.slot = pci_get_slot(sc->aac_dev);
3467 
3468 	error = copyout((caddr_t)&pciinf, uptr,
3469 			sizeof(struct aac_pci_info));
3470 
3471 	return (error);
3472 }
3473 
3474 static int
3475 aac_supported_features(struct aac_softc *sc, caddr_t uptr)
3476 {
3477 	struct aac_features f;
3478 	int error;
3479 
3480 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3481 
3482 	if ((error = copyin(uptr, &f, sizeof (f))) != 0)
3483 		return (error);
3484 
3485 	/*
3486 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3487 	 * ALL zero in the featuresState, the driver will return the current
3488 	 * state of all the supported features, the data field will not be
3489 	 * valid.
3490 	 * When the management driver receives FSACTL_GET_FEATURES ioctl with
3491 	 * a specific bit set in the featuresState, the driver will return the
3492 	 * current state of this specific feature and whatever data that are
3493 	 * associated with the feature in the data field or perform whatever
3494 	 * action needed indicates in the data field.
3495 	 */
3496 	 if (f.feat.fValue == 0) {
3497 		f.feat.fBits.largeLBA =
3498 		    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3499 		f.feat.fBits.JBODSupport = 1;
3500 		/* TODO: In the future, add other features state here as well */
3501 	} else {
3502 		if (f.feat.fBits.largeLBA)
3503 			f.feat.fBits.largeLBA =
3504 			    (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0;
3505 		/* TODO: Add other features state and data in the future */
3506 	}
3507 
3508 	error = copyout(&f, uptr, sizeof (f));
3509 	return (error);
3510 }
3511 
3512 /*
3513  * Give the userland some information about the container.  The AAC arch
3514  * expects the driver to be a SCSI passthrough type driver, so it expects
3515  * the containers to have b:t:l numbers.  Fake it.
3516  */
3517 static int
3518 aac_query_disk(struct aac_softc *sc, caddr_t uptr)
3519 {
3520 	struct aac_query_disk query_disk;
3521 	struct aac_container *co;
3522 	int error, id;
3523 
3524 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3525 
3526 	mtx_lock(&sc->aac_io_lock);
3527 	error = copyin(uptr, (caddr_t)&query_disk,
3528 		       sizeof(struct aac_query_disk));
3529 	if (error) {
3530 		mtx_unlock(&sc->aac_io_lock);
3531 		return (error);
3532 	}
3533 
3534 	id = query_disk.ContainerNumber;
3535 	if (id == -1) {
3536 		mtx_unlock(&sc->aac_io_lock);
3537 		return (EINVAL);
3538 	}
3539 
3540 	TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) {
3541 		if (co->co_mntobj.ObjectId == id)
3542 			break;
3543 		}
3544 
3545 	if (co == NULL) {
3546 			query_disk.Valid = 0;
3547 			query_disk.Locked = 0;
3548 			query_disk.Deleted = 1;		/* XXX is this right? */
3549 	} else {
3550 		query_disk.Valid = 1;
3551 		query_disk.Locked = 1;
3552 		query_disk.Deleted = 0;
3553 		query_disk.Bus = device_get_unit(sc->aac_dev);
3554 		query_disk.Target = 0;
3555 		query_disk.Lun = 0;
3556 		query_disk.UnMapped = 0;
3557 	}
3558 
3559 	error = copyout((caddr_t)&query_disk, uptr,
3560 			sizeof(struct aac_query_disk));
3561 
3562 	mtx_unlock(&sc->aac_io_lock);
3563 	return (error);
3564 }
3565 
3566 static void
3567 aac_container_bus(struct aac_softc *sc)
3568 {
3569 	struct aac_sim *sim;
3570 	device_t child;
3571 
3572 	sim =(struct aac_sim *)malloc(sizeof(struct aac_sim),
3573 		M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3574 	if (sim == NULL) {
3575 		device_printf(sc->aac_dev,
3576 	    	"No memory to add container bus\n");
3577 		panic("Out of memory?!");
3578 	}
3579 	child = device_add_child(sc->aac_dev, "aacraidp", -1);
3580 	if (child == NULL) {
3581 		device_printf(sc->aac_dev,
3582 	    	"device_add_child failed for container bus\n");
3583 		free(sim, M_AACRAIDBUF);
3584 		panic("Out of memory?!");
3585 	}
3586 
3587 	sim->TargetsPerBus = AAC_MAX_CONTAINERS;
3588 	sim->BusNumber = 0;
3589 	sim->BusType = CONTAINER_BUS;
3590 	sim->InitiatorBusId = -1;
3591 	sim->aac_sc = sc;
3592 	sim->sim_dev = child;
3593 	sim->aac_cam = NULL;
3594 
3595 	device_set_ivars(child, sim);
3596 	device_set_desc(child, "Container Bus");
3597 	TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link);
3598 	/*
3599 	device_set_desc(child, aac_describe_code(aac_container_types,
3600 			mir->MntTable[0].VolType));
3601 	*/
3602 	bus_generic_attach(sc->aac_dev);
3603 }
3604 
3605 static void
3606 aac_get_bus_info(struct aac_softc *sc)
3607 {
3608 	struct aac_fib *fib;
3609 	struct aac_ctcfg *c_cmd;
3610 	struct aac_ctcfg_resp *c_resp;
3611 	struct aac_vmioctl *vmi;
3612 	struct aac_vmi_businf_resp *vmi_resp;
3613 	struct aac_getbusinf businfo;
3614 	struct aac_sim *caminf;
3615 	device_t child;
3616 	int i, error;
3617 
3618 	mtx_lock(&sc->aac_io_lock);
3619 	aac_alloc_sync_fib(sc, &fib);
3620 	c_cmd = (struct aac_ctcfg *)&fib->data[0];
3621 	bzero(c_cmd, sizeof(struct aac_ctcfg));
3622 
3623 	c_cmd->Command = VM_ContainerConfig;
3624 	c_cmd->cmd = CT_GET_SCSI_METHOD;
3625 	c_cmd->param = 0;
3626 
3627 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3628 	    sizeof(struct aac_ctcfg));
3629 	if (error) {
3630 		device_printf(sc->aac_dev, "Error %d sending "
3631 		    "VM_ContainerConfig command\n", error);
3632 		aac_release_sync_fib(sc);
3633 		mtx_unlock(&sc->aac_io_lock);
3634 		return;
3635 	}
3636 
3637 	c_resp = (struct aac_ctcfg_resp *)&fib->data[0];
3638 	if (c_resp->Status != ST_OK) {
3639 		device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n",
3640 		    c_resp->Status);
3641 		aac_release_sync_fib(sc);
3642 		mtx_unlock(&sc->aac_io_lock);
3643 		return;
3644 	}
3645 
3646 	sc->scsi_method_id = c_resp->param;
3647 
3648 	vmi = (struct aac_vmioctl *)&fib->data[0];
3649 	bzero(vmi, sizeof(struct aac_vmioctl));
3650 
3651 	vmi->Command = VM_Ioctl;
3652 	vmi->ObjType = FT_DRIVE;
3653 	vmi->MethId = sc->scsi_method_id;
3654 	vmi->ObjId = 0;
3655 	vmi->IoctlCmd = GetBusInfo;
3656 
3657 	error = aac_sync_fib(sc, ContainerCommand, 0, fib,
3658 	    sizeof(struct aac_vmi_businf_resp));
3659 	if (error) {
3660 		device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n",
3661 		    error);
3662 		aac_release_sync_fib(sc);
3663 		mtx_unlock(&sc->aac_io_lock);
3664 		return;
3665 	}
3666 
3667 	vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0];
3668 	if (vmi_resp->Status != ST_OK) {
3669 		device_printf(sc->aac_dev, "VM_Ioctl returned %d\n",
3670 		    vmi_resp->Status);
3671 		aac_release_sync_fib(sc);
3672 		mtx_unlock(&sc->aac_io_lock);
3673 		return;
3674 	}
3675 
3676 	bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf));
3677 	aac_release_sync_fib(sc);
3678 	mtx_unlock(&sc->aac_io_lock);
3679 
3680 	for (i = 0; i < businfo.BusCount; i++) {
3681 		if (businfo.BusValid[i] != AAC_BUS_VALID)
3682 			continue;
3683 
3684 		caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim),
3685 		    M_AACRAIDBUF, M_NOWAIT | M_ZERO);
3686 		if (caminf == NULL) {
3687 			device_printf(sc->aac_dev,
3688 			    "No memory to add passthrough bus %d\n", i);
3689 			break;
3690 		}
3691 
3692 		child = device_add_child(sc->aac_dev, "aacraidp", -1);
3693 		if (child == NULL) {
3694 			device_printf(sc->aac_dev,
3695 			    "device_add_child failed for passthrough bus %d\n",
3696 			    i);
3697 			free(caminf, M_AACRAIDBUF);
3698 			break;
3699 		}
3700 
3701 		caminf->TargetsPerBus = businfo.TargetsPerBus;
3702 		caminf->BusNumber = i+1;
3703 		caminf->BusType = PASSTHROUGH_BUS;
3704 		caminf->InitiatorBusId = businfo.InitiatorBusId[i];
3705 		caminf->aac_sc = sc;
3706 		caminf->sim_dev = child;
3707 		caminf->aac_cam = NULL;
3708 
3709 		device_set_ivars(child, caminf);
3710 		device_set_desc(child, "SCSI Passthrough Bus");
3711 		TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link);
3712 	}
3713 }
3714 
3715 /*
3716  * Check to see if the kernel is up and running. If we are in a
3717  * BlinkLED state, return the BlinkLED code.
3718  */
3719 static u_int32_t
3720 aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled)
3721 {
3722 	u_int32_t ret;
3723 
3724 	ret = AAC_GET_FWSTATUS(sc);
3725 
3726 	if (ret & AAC_UP_AND_RUNNING)
3727 		ret = 0;
3728 	else if (ret & AAC_KERNEL_PANIC && bled)
3729 		*bled = (ret >> 16) & 0xff;
3730 
3731 	return (ret);
3732 }
3733 
3734 /*
3735  * Once do an IOP reset, basically have to re-initialize the card as
3736  * if coming up from a cold boot, and the driver is responsible for
3737  * any IO that was outstanding to the adapter at the time of the IOP
3738  * RESET. And prepare the driver for IOP RESET by making the init code
3739  * modular with the ability to call it from multiple places.
3740  */
3741 static int
3742 aac_reset_adapter(struct aac_softc *sc)
3743 {
3744 	struct aac_command *cm;
3745 	struct aac_fib *fib;
3746 	struct aac_pause_command *pc;
3747 	u_int32_t status, reset_mask, waitCount, max_msix_orig;
3748 	int msi_enabled_orig;
3749 
3750 	fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
3751 	mtx_assert(&sc->aac_io_lock, MA_OWNED);
3752 
3753 	if (sc->aac_state & AAC_STATE_RESET) {
3754 		device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n");
3755 		return (EINVAL);
3756 	}
3757 	sc->aac_state |= AAC_STATE_RESET;
3758 
3759 	/* disable interrupt */
3760 	AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT);
3761 
3762 	/*
3763 	 * Abort all pending commands:
3764 	 * a) on the controller
3765 	 */
3766 	while ((cm = aac_dequeue_busy(sc)) != NULL) {
3767 		cm->cm_flags |= AAC_CMD_RESET;
3768 
3769 		/* is there a completion handler? */
3770 		if (cm->cm_complete != NULL) {
3771 			cm->cm_complete(cm);
3772 		} else {
3773 			/* assume that someone is sleeping on this
3774 			 * command
3775 			 */
3776 			wakeup(cm);
3777 		}
3778 	}
3779 
3780 	/* b) in the waiting queues */
3781 	while ((cm = aac_dequeue_ready(sc)) != NULL) {
3782 		cm->cm_flags |= AAC_CMD_RESET;
3783 
3784 		/* is there a completion handler? */
3785 		if (cm->cm_complete != NULL) {
3786 			cm->cm_complete(cm);
3787 		} else {
3788 			/* assume that someone is sleeping on this
3789 			 * command
3790 			 */
3791 			wakeup(cm);
3792 		}
3793 	}
3794 
3795 	/* flush drives */
3796 	if (aac_check_adapter_health(sc, NULL) == 0) {
3797 		mtx_unlock(&sc->aac_io_lock);
3798 		(void) aacraid_shutdown(sc->aac_dev);
3799 		mtx_lock(&sc->aac_io_lock);
3800 	}
3801 
3802 	/* execute IOP reset */
3803 	if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) {
3804 		AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST);
3805 
3806 		/* We need to wait for 5 seconds before accessing the MU again
3807 		 * 10000 * 100us = 1000,000us = 1000ms = 1s
3808 		 */
3809 		waitCount = 5 * 10000;
3810 		while (waitCount) {
3811 			DELAY(100);			/* delay 100 microseconds */
3812 			waitCount--;
3813 		}
3814 	} else if ((aacraid_sync_command(sc,
3815 		AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) {
3816 		/* call IOP_RESET for older firmware */
3817 		if ((aacraid_sync_command(sc,
3818 			AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) {
3819 
3820 			if (status == AAC_SRB_STS_INVALID_REQUEST)
3821 				device_printf(sc->aac_dev, "IOP_RESET not supported\n");
3822 			else
3823 				/* probably timeout */
3824 				device_printf(sc->aac_dev, "IOP_RESET failed\n");
3825 
3826 			/* unwind aac_shutdown() */
3827 			aac_alloc_sync_fib(sc, &fib);
3828 			pc = (struct aac_pause_command *)&fib->data[0];
3829 			pc->Command = VM_ContainerConfig;
3830 			pc->Type = CT_PAUSE_IO;
3831 			pc->Timeout = 1;
3832 			pc->Min = 1;
3833 			pc->NoRescan = 1;
3834 
3835 			(void) aac_sync_fib(sc, ContainerCommand, 0, fib,
3836 				sizeof (struct aac_pause_command));
3837 			aac_release_sync_fib(sc);
3838 
3839 			goto finish;
3840 		}
3841 	} else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) {
3842 		AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask);
3843 		/*
3844 		 * We need to wait for 5 seconds before accessing the doorbell
3845 		 * again, 10000 * 100us = 1000,000us = 1000ms = 1s
3846 		 */
3847 		waitCount = 5 * 10000;
3848 		while (waitCount) {
3849 			DELAY(100);		/* delay 100 microseconds */
3850 			waitCount--;
3851 		}
3852 	}
3853 
3854 	/*
3855 	 * Initialize the adapter.
3856 	 */
3857 	max_msix_orig = sc->aac_max_msix;
3858 	msi_enabled_orig = sc->msi_enabled;
3859 	sc->msi_enabled = FALSE;
3860 	if (aac_check_firmware(sc) != 0)
3861 		goto finish;
3862 	if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) {
3863 		sc->aac_max_msix = max_msix_orig;
3864 		if (msi_enabled_orig) {
3865 			sc->msi_enabled = msi_enabled_orig;
3866 			AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX);
3867 		}
3868 		mtx_unlock(&sc->aac_io_lock);
3869 		aac_init(sc);
3870 		mtx_lock(&sc->aac_io_lock);
3871 	}
3872 
3873 finish:
3874 	sc->aac_state &= ~AAC_STATE_RESET;
3875 	AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT);
3876 	aacraid_startio(sc);
3877 	return (0);
3878 }
3879