1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2001 Scott Long 4 * Copyright (c) 2000 BSDi 5 * Copyright (c) 2001 Adaptec, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: head/sys/dev/aac/aac.c 260044 2013-12-29 17:37:32Z marius $ 30 */ 31 32 /* 33 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 34 */ 35 #define AAC_DRIVERNAME "aac" 36 37 #include "opt_aac.h" 38 39 /* #include <stddef.h> */ 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/kthread.h> 45 #include <sys/poll.h> 46 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/signalvar.h> 50 #include <sys/time.h> 51 #include <sys/eventhandler.h> 52 #include <sys/rman.h> 53 54 #include <sys/bus_dma.h> 55 #include <sys/device.h> 56 #include <sys/mplock2.h> 57 58 #include <bus/pci/pcireg.h> 59 #include <bus/pci/pcivar.h> 60 61 #include <dev/raid/aac/aacreg.h> 62 #include <dev/raid/aac/aac_ioctl.h> 63 #include <dev/raid/aac/aacvar.h> 64 #include <dev/raid/aac/aac_tables.h> 65 66 static void aac_startup(void *arg); 67 static void aac_add_container(struct aac_softc *sc, 68 struct aac_mntinforesp *mir, int f); 69 static void aac_get_bus_info(struct aac_softc *sc); 70 static void aac_daemon(void *arg); 71 72 /* Command Processing */ 73 static void aac_timeout(struct aac_softc *sc); 74 static void aac_complete(void *context, int pending); 75 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 76 static void aac_bio_complete(struct aac_command *cm); 77 static int aac_wait_command(struct aac_command *cm); 78 static void aac_command_thread(void *arg); 79 80 /* Command Buffer Management */ 81 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 82 int nseg, int error); 83 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 84 int nseg, int error); 85 static int aac_alloc_commands(struct aac_softc *sc); 86 static void aac_free_commands(struct aac_softc *sc); 87 static void aac_unmap_command(struct aac_command *cm); 88 89 /* Hardware Interface */ 90 static int aac_alloc(struct aac_softc *sc); 91 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 92 int error); 93 static int aac_check_firmware(struct aac_softc *sc); 94 static int aac_init(struct aac_softc *sc); 95 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 96 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 97 u_int32_t arg3, u_int32_t *sp); 98 static int aac_setup_intr(struct aac_softc *sc); 99 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 100 struct aac_command *cm); 101 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 102 u_int32_t *fib_size, struct aac_fib **fib_addr); 103 static int aac_enqueue_response(struct aac_softc *sc, int queue, 104 struct aac_fib *fib); 105 106 /* StrongARM interface */ 107 static int aac_sa_get_fwstatus(struct aac_softc *sc); 108 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 109 static int aac_sa_get_istatus(struct aac_softc *sc); 110 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 111 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 112 u_int32_t arg0, u_int32_t arg1, 113 u_int32_t arg2, u_int32_t arg3); 114 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 115 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 116 117 const struct aac_interface aac_sa_interface = { 118 aac_sa_get_fwstatus, 119 aac_sa_qnotify, 120 aac_sa_get_istatus, 121 aac_sa_clear_istatus, 122 aac_sa_set_mailbox, 123 aac_sa_get_mailbox, 124 aac_sa_set_interrupts, 125 NULL, NULL, NULL 126 }; 127 128 /* i960Rx interface */ 129 static int aac_rx_get_fwstatus(struct aac_softc *sc); 130 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 131 static int aac_rx_get_istatus(struct aac_softc *sc); 132 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 133 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 134 u_int32_t arg0, u_int32_t arg1, 135 u_int32_t arg2, u_int32_t arg3); 136 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 137 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 138 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 139 static int aac_rx_get_outb_queue(struct aac_softc *sc); 140 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 141 142 const struct aac_interface aac_rx_interface = { 143 aac_rx_get_fwstatus, 144 aac_rx_qnotify, 145 aac_rx_get_istatus, 146 aac_rx_clear_istatus, 147 aac_rx_set_mailbox, 148 aac_rx_get_mailbox, 149 aac_rx_set_interrupts, 150 aac_rx_send_command, 151 aac_rx_get_outb_queue, 152 aac_rx_set_outb_queue 153 }; 154 155 /* Rocket/MIPS interface */ 156 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 157 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 158 static int aac_rkt_get_istatus(struct aac_softc *sc); 159 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 160 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 161 u_int32_t arg0, u_int32_t arg1, 162 u_int32_t arg2, u_int32_t arg3); 163 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 164 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 165 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 166 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 167 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 168 169 const struct aac_interface aac_rkt_interface = { 170 aac_rkt_get_fwstatus, 171 aac_rkt_qnotify, 172 aac_rkt_get_istatus, 173 aac_rkt_clear_istatus, 174 aac_rkt_set_mailbox, 175 aac_rkt_get_mailbox, 176 aac_rkt_set_interrupts, 177 aac_rkt_send_command, 178 aac_rkt_get_outb_queue, 179 aac_rkt_set_outb_queue 180 }; 181 182 /* Debugging and Diagnostics */ 183 static void aac_describe_controller(struct aac_softc *sc); 184 static const char *aac_describe_code(const struct aac_code_lookup *table, 185 u_int32_t code); 186 187 /* Management Interface */ 188 static d_open_t aac_open; 189 static d_close_t aac_close; 190 static d_ioctl_t aac_ioctl; 191 static d_kqfilter_t aac_kqfilter; 192 static void aac_filter_detach(struct knote *kn); 193 static int aac_filter_read(struct knote *kn, long hint); 194 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 195 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 196 static void aac_handle_aif(struct aac_softc *sc, 197 struct aac_fib *fib); 198 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 199 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 200 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 201 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 202 static int aac_return_aif(struct aac_softc *sc, 203 struct aac_fib_context *ctx, caddr_t uptr); 204 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 205 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 206 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); 207 static void aac_ioctl_event(struct aac_softc *sc, 208 struct aac_event *event, void *arg); 209 static struct aac_mntinforesp * 210 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); 211 212 static struct dev_ops aac_ops = { 213 { "aac", 0, 0 }, 214 .d_open = aac_open, 215 .d_close = aac_close, 216 .d_ioctl = aac_ioctl, 217 .d_kqfilter = aac_kqfilter 218 }; 219 220 static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 221 222 /* sysctl node */ 223 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); 224 225 /* 226 * Device Interface 227 */ 228 229 /* 230 * Initialize the controller and softc 231 */ 232 int 233 aac_attach(struct aac_softc *sc) 234 { 235 int error, unit; 236 237 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 238 239 /* 240 * Initialize per-controller queues. 241 */ 242 aac_initq_free(sc); 243 aac_initq_ready(sc); 244 aac_initq_busy(sc); 245 aac_initq_bio(sc); 246 247 /* 248 * Initialize command-completion task. 249 */ 250 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 251 252 /* mark controller as suspended until we get ourselves organised */ 253 sc->aac_state |= AAC_STATE_SUSPEND; 254 255 /* 256 * Check that the firmware on the card is supported. 257 */ 258 if ((error = aac_check_firmware(sc)) != 0) 259 return(error); 260 261 /* 262 * Initialize locks 263 */ 264 lockinit(&sc->aac_aifq_lock, "AAC AIF lock", 0, LK_CANRECURSE); 265 lockinit(&sc->aac_io_lock, "AAC I/O lock", 0, LK_CANRECURSE); 266 lockinit(&sc->aac_container_lock, "AAC container lock", 0, LK_CANRECURSE); 267 TAILQ_INIT(&sc->aac_container_tqh); 268 TAILQ_INIT(&sc->aac_ev_cmfree); 269 270 /* Initialize the clock daemon callout. */ 271 callout_init_mp(&sc->aac_daemontime); 272 273 /* 274 * Initialize the adapter. 275 */ 276 if ((error = aac_alloc(sc)) != 0) 277 return(error); 278 if ((error = aac_init(sc)) != 0) 279 return(error); 280 281 /* 282 * Allocate and connect our interrupt. 283 */ 284 if ((error = aac_setup_intr(sc)) != 0) 285 return(error); 286 287 /* 288 * Print a little information about the controller. 289 */ 290 aac_describe_controller(sc); 291 292 /* 293 * Add sysctls. 294 */ 295 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev), 296 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)), 297 OID_AUTO, "firmware_build", CTLFLAG_RD, 298 &sc->aac_revision.buildNumber, 0, 299 "firmware build number"); 300 301 /* 302 * Register to probe our containers later. 303 */ 304 sc->aac_ich.ich_func = aac_startup; 305 sc->aac_ich.ich_arg = sc; 306 sc->aac_ich.ich_desc = "aac"; 307 if (config_intrhook_establish(&sc->aac_ich) != 0) { 308 device_printf(sc->aac_dev, 309 "can't establish configuration hook\n"); 310 return(ENXIO); 311 } 312 313 /* 314 * Make the control device. 315 */ 316 unit = device_get_unit(sc->aac_dev); 317 sc->aac_dev_t = make_dev(&aac_ops, unit, UID_ROOT, GID_OPERATOR, 318 0640, "aac%d", unit); 319 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 320 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 321 sc->aac_dev_t->si_drv1 = sc; 322 323 /* Create the AIF thread */ 324 if (kthread_create(aac_command_thread, sc, 325 &sc->aifthread, "aac%daif", unit)) 326 panic("Could not create AIF thread"); 327 328 /* Register the shutdown method to only be called post-dump */ 329 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 330 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 331 device_printf(sc->aac_dev, 332 "shutdown event registration failed\n"); 333 334 /* Register with CAM for the non-DASD devices */ 335 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 336 TAILQ_INIT(&sc->aac_sim_tqh); 337 aac_get_bus_info(sc); 338 } 339 340 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 341 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); 342 lockmgr(&sc->aac_io_lock, LK_RELEASE); 343 344 return(0); 345 } 346 347 static void 348 aac_daemon(void *arg) 349 { 350 struct timeval tv; 351 struct aac_softc *sc; 352 struct aac_fib *fib; 353 354 sc = arg; 355 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 356 357 if (callout_pending(&sc->aac_daemontime) || 358 callout_active(&sc->aac_daemontime) == 0) { 359 lockmgr(&sc->aac_io_lock, LK_RELEASE); 360 return; 361 } 362 getmicrotime(&tv); 363 aac_alloc_sync_fib(sc, &fib); 364 *(uint32_t *)fib->data = tv.tv_sec; 365 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); 366 aac_release_sync_fib(sc); 367 callout_reset(&sc->aac_daemontime, 30 * 60 * hz, aac_daemon, sc); 368 lockmgr(&sc->aac_io_lock, LK_RELEASE); 369 } 370 371 void 372 aac_add_event(struct aac_softc *sc, struct aac_event *event) 373 { 374 375 switch (event->ev_type & AAC_EVENT_MASK) { 376 case AAC_EVENT_CMFREE: 377 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 378 break; 379 default: 380 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 381 event->ev_type); 382 break; 383 } 384 } 385 386 /* 387 * Request information of container #cid 388 */ 389 static struct aac_mntinforesp * 390 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) 391 { 392 struct aac_mntinfo *mi; 393 394 mi = (struct aac_mntinfo *)&fib->data[0]; 395 /* use 64-bit LBA if enabled */ 396 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 397 VM_NameServe64 : VM_NameServe; 398 mi->MntType = FT_FILESYS; 399 mi->MntCount = cid; 400 401 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 402 sizeof(struct aac_mntinfo))) { 403 device_printf(sc->aac_dev, "Error probing container %d\n", cid); 404 return (NULL); 405 } 406 407 return ((struct aac_mntinforesp *)&fib->data[0]); 408 } 409 410 /* 411 * Probe for containers, create disks. 412 */ 413 static void 414 aac_startup(void *arg) 415 { 416 struct aac_softc *sc; 417 struct aac_fib *fib; 418 struct aac_mntinforesp *mir; 419 int count = 0, i = 0; 420 421 sc = (struct aac_softc *)arg; 422 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 423 424 /* disconnect ourselves from the intrhook chain */ 425 config_intrhook_disestablish(&sc->aac_ich); 426 427 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 428 aac_alloc_sync_fib(sc, &fib); 429 430 /* loop over possible containers */ 431 do { 432 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 433 continue; 434 if (i == 0) 435 count = mir->MntRespCount; 436 aac_add_container(sc, mir, 0); 437 i++; 438 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 439 440 aac_release_sync_fib(sc); 441 lockmgr(&sc->aac_io_lock, LK_RELEASE); 442 443 /* poke the bus to actually attach the child devices */ 444 if (bus_generic_attach(sc->aac_dev)) 445 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 446 447 /* mark the controller up */ 448 sc->aac_state &= ~AAC_STATE_SUSPEND; 449 450 /* enable interrupts now */ 451 AAC_UNMASK_INTERRUPTS(sc); 452 } 453 454 /* 455 * Create a device to represent a new container 456 */ 457 static void 458 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 459 { 460 struct aac_container *co; 461 device_t child; 462 463 /* 464 * Check container volume type for validity. Note that many of 465 * the possible types may never show up. 466 */ 467 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 468 co = (struct aac_container *)kmalloc(sizeof *co, M_AACBUF, 469 M_INTWAIT | M_ZERO); 470 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", 471 mir->MntTable[0].ObjectId, 472 mir->MntTable[0].FileSystemName, 473 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 474 475 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 476 device_printf(sc->aac_dev, "device_add_child failed\n"); 477 else 478 device_set_ivars(child, co); 479 device_set_desc(child, aac_describe_code(aac_container_types, 480 mir->MntTable[0].VolType)); 481 co->co_disk = child; 482 co->co_found = f; 483 bcopy(&mir->MntTable[0], &co->co_mntobj, 484 sizeof(struct aac_mntobj)); 485 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 486 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 487 lockmgr(&sc->aac_container_lock, LK_RELEASE); 488 } 489 } 490 491 /* 492 * Allocate resources associated with (sc) 493 */ 494 static int 495 aac_alloc(struct aac_softc *sc) 496 { 497 498 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 499 500 /* 501 * Create DMA tag for mapping buffers into controller-addressable space. 502 */ 503 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 504 1, 0, /* algnmnt, boundary */ 505 (sc->flags & AAC_FLAGS_SG_64BIT) ? 506 BUS_SPACE_MAXADDR : 507 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 508 BUS_SPACE_MAXADDR, /* highaddr */ 509 NULL, NULL, /* filter, filterarg */ 510 MAXBSIZE, /* maxsize */ 511 sc->aac_sg_tablesize, /* nsegments */ 512 MAXBSIZE, /* maxsegsize */ 513 BUS_DMA_ALLOCNOW, /* flags */ 514 &sc->aac_buffer_dmat)) { 515 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 516 return (ENOMEM); 517 } 518 519 /* 520 * Create DMA tag for mapping FIBs into controller-addressable space.. 521 */ 522 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 523 1, 0, /* algnmnt, boundary */ 524 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 525 BUS_SPACE_MAXADDR_32BIT : 526 0x7fffffff, /* lowaddr */ 527 BUS_SPACE_MAXADDR, /* highaddr */ 528 NULL, NULL, /* filter, filterarg */ 529 sc->aac_max_fibs_alloc * 530 sc->aac_max_fib_size, /* maxsize */ 531 1, /* nsegments */ 532 sc->aac_max_fibs_alloc * 533 sc->aac_max_fib_size, /* maxsize */ 534 0, /* flags */ 535 &sc->aac_fib_dmat)) { 536 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); 537 return (ENOMEM); 538 } 539 540 /* 541 * Create DMA tag for the common structure and allocate it. 542 */ 543 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 544 1, 0, /* algnmnt, boundary */ 545 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 546 BUS_SPACE_MAXADDR_32BIT : 547 0x7fffffff, /* lowaddr */ 548 BUS_SPACE_MAXADDR, /* highaddr */ 549 NULL, NULL, /* filter, filterarg */ 550 8192 + sizeof(struct aac_common), /* maxsize */ 551 1, /* nsegments */ 552 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 553 0, /* flags */ 554 &sc->aac_common_dmat)) { 555 device_printf(sc->aac_dev, 556 "can't allocate common structure DMA tag\n"); 557 return (ENOMEM); 558 } 559 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 560 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 561 device_printf(sc->aac_dev, "can't allocate common structure\n"); 562 return (ENOMEM); 563 } 564 565 /* 566 * Work around a bug in the 2120 and 2200 that cannot DMA commands 567 * below address 8192 in physical memory. 568 * XXX If the padding is not needed, can it be put to use instead 569 * of ignored? 570 */ 571 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 572 sc->aac_common, 8192 + sizeof(*sc->aac_common), 573 aac_common_map, sc, 0); 574 575 if (sc->aac_common_busaddr < 8192) { 576 sc->aac_common = (struct aac_common *) 577 ((uint8_t *)sc->aac_common + 8192); 578 sc->aac_common_busaddr += 8192; 579 } 580 bzero(sc->aac_common, sizeof(*sc->aac_common)); 581 582 /* Allocate some FIBs and associated command structs */ 583 TAILQ_INIT(&sc->aac_fibmap_tqh); 584 sc->aac_commands = kmalloc(sc->aac_max_fibs * sizeof(struct aac_command), 585 M_AACBUF, M_WAITOK|M_ZERO); 586 while (sc->total_fibs < sc->aac_max_fibs) { 587 if (aac_alloc_commands(sc) != 0) 588 break; 589 } 590 if (sc->total_fibs == 0) 591 return (ENOMEM); 592 593 return (0); 594 } 595 596 /* 597 * Free all of the resources associated with (sc) 598 * 599 * Should not be called if the controller is active. 600 */ 601 void 602 aac_free(struct aac_softc *sc) 603 { 604 605 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 606 607 /* remove the control device */ 608 if (sc->aac_dev_t != NULL) 609 destroy_dev(sc->aac_dev_t); 610 611 /* throw away any FIB buffers, discard the FIB DMA tag */ 612 aac_free_commands(sc); 613 if (sc->aac_fib_dmat) 614 bus_dma_tag_destroy(sc->aac_fib_dmat); 615 616 kfree(sc->aac_commands, M_AACBUF); 617 618 /* destroy the common area */ 619 if (sc->aac_common) { 620 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 621 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 622 sc->aac_common_dmamap); 623 } 624 if (sc->aac_common_dmat) 625 bus_dma_tag_destroy(sc->aac_common_dmat); 626 627 /* disconnect the interrupt handler */ 628 if (sc->aac_intr) 629 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 630 if (sc->aac_irq != NULL) { 631 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, 632 rman_get_rid(sc->aac_irq), sc->aac_irq); 633 if (sc->aac_irq_type == PCI_INTR_TYPE_MSI) 634 pci_release_msi(sc->aac_dev); 635 } 636 637 /* destroy data-transfer DMA tag */ 638 if (sc->aac_buffer_dmat) 639 bus_dma_tag_destroy(sc->aac_buffer_dmat); 640 641 /* destroy the parent DMA tag */ 642 if (sc->aac_parent_dmat) 643 bus_dma_tag_destroy(sc->aac_parent_dmat); 644 645 /* release the register window mapping */ 646 if (sc->aac_regs_res0 != NULL) 647 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 648 rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0); 649 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) 650 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 651 rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1); 652 dev_ops_remove_minor(&aac_ops, device_get_unit(sc->aac_dev)); 653 } 654 655 /* 656 * Disconnect from the controller completely, in preparation for unload. 657 */ 658 int 659 aac_detach(device_t dev) 660 { 661 struct aac_softc *sc; 662 struct aac_container *co; 663 struct aac_sim *sim; 664 int error; 665 666 sc = device_get_softc(dev); 667 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 668 669 callout_stop_sync(&sc->aac_daemontime); 670 671 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 672 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 673 sc->aifflags |= AAC_AIFFLAGS_EXIT; 674 wakeup(sc->aifthread); 675 lksleep(sc->aac_dev, &sc->aac_io_lock, 0, "aacdch", 0); 676 } 677 lockmgr(&sc->aac_io_lock, LK_RELEASE); 678 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, 679 ("%s: invalid detach state", __func__)); 680 681 /* Remove the child containers */ 682 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 683 error = device_delete_child(dev, co->co_disk); 684 if (error) 685 return (error); 686 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 687 kfree(co, M_AACBUF); 688 } 689 690 /* Remove the CAM SIMs */ 691 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 692 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 693 error = device_delete_child(dev, sim->sim_dev); 694 if (error) 695 return (error); 696 kfree(sim, M_AACBUF); 697 } 698 699 if ((error = aac_shutdown(dev))) 700 return(error); 701 702 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 703 704 aac_free(sc); 705 706 lockuninit(&sc->aac_aifq_lock); 707 lockuninit(&sc->aac_io_lock); 708 lockuninit(&sc->aac_container_lock); 709 710 return(0); 711 } 712 713 /* 714 * Bring the controller down to a dormant state and detach all child devices. 715 * 716 * This function is called before detach or system shutdown. 717 * 718 * Note that we can assume that the bioq on the controller is empty, as we won't 719 * allow shutdown if any device is open. 720 */ 721 int 722 aac_shutdown(device_t dev) 723 { 724 struct aac_softc *sc; 725 struct aac_fib *fib; 726 struct aac_close_command *cc; 727 728 sc = device_get_softc(dev); 729 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 730 731 sc->aac_state |= AAC_STATE_SUSPEND; 732 733 /* 734 * Send a Container shutdown followed by a HostShutdown FIB to the 735 * controller to convince it that we don't want to talk to it anymore. 736 * We've been closed and all I/O completed already 737 */ 738 device_printf(sc->aac_dev, "shutting down controller..."); 739 740 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 741 aac_alloc_sync_fib(sc, &fib); 742 cc = (struct aac_close_command *)&fib->data[0]; 743 744 bzero(cc, sizeof(struct aac_close_command)); 745 cc->Command = VM_CloseAll; 746 cc->ContainerId = 0xffffffff; 747 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 748 sizeof(struct aac_close_command))) 749 kprintf("FAILED.\n"); 750 else 751 kprintf("done\n"); 752 #if 0 753 else { 754 fib->data[0] = 0; 755 /* 756 * XXX Issuing this command to the controller makes it shut down 757 * but also keeps it from coming back up without a reset of the 758 * PCI bus. This is not desirable if you are just unloading the 759 * driver module with the intent to reload it later. 760 */ 761 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 762 fib, 1)) { 763 kprintf("FAILED.\n"); 764 } else { 765 kprintf("done.\n"); 766 } 767 } 768 #endif 769 770 AAC_MASK_INTERRUPTS(sc); 771 aac_release_sync_fib(sc); 772 lockmgr(&sc->aac_io_lock, LK_RELEASE); 773 774 return(0); 775 } 776 777 /* 778 * Bring the controller to a quiescent state, ready for system suspend. 779 */ 780 int 781 aac_suspend(device_t dev) 782 { 783 struct aac_softc *sc; 784 785 sc = device_get_softc(dev); 786 787 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 788 sc->aac_state |= AAC_STATE_SUSPEND; 789 790 AAC_MASK_INTERRUPTS(sc); 791 return(0); 792 } 793 794 /* 795 * Bring the controller back to a state ready for operation. 796 */ 797 int 798 aac_resume(device_t dev) 799 { 800 struct aac_softc *sc; 801 802 sc = device_get_softc(dev); 803 804 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 805 sc->aac_state &= ~AAC_STATE_SUSPEND; 806 AAC_UNMASK_INTERRUPTS(sc); 807 return(0); 808 } 809 810 /* 811 * Interrupt handler for NEW_COMM interface. 812 */ 813 void 814 aac_new_intr(void *arg) 815 { 816 struct aac_softc *sc; 817 u_int32_t index, fast; 818 struct aac_command *cm; 819 struct aac_fib *fib; 820 int i; 821 822 sc = (struct aac_softc *)arg; 823 824 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 825 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 826 while (1) { 827 index = AAC_GET_OUTB_QUEUE(sc); 828 if (index == 0xffffffff) 829 index = AAC_GET_OUTB_QUEUE(sc); 830 if (index == 0xffffffff) 831 break; 832 if (index & 2) { 833 if (index == 0xfffffffe) { 834 /* XXX This means that the controller wants 835 * more work. Ignore it for now. 836 */ 837 continue; 838 } 839 /* AIF */ 840 fib = (struct aac_fib *)kmalloc(sizeof *fib, M_AACBUF, 841 M_INTWAIT | M_ZERO); 842 index &= ~2; 843 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 844 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); 845 aac_handle_aif(sc, fib); 846 kfree(fib, M_AACBUF); 847 848 /* 849 * AIF memory is owned by the adapter, so let it 850 * know that we are done with it. 851 */ 852 AAC_SET_OUTB_QUEUE(sc, index); 853 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 854 } else { 855 fast = index & 1; 856 cm = sc->aac_commands + (index >> 2); 857 fib = cm->cm_fib; 858 if (fast) { 859 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 860 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 861 } 862 aac_remove_busy(cm); 863 aac_unmap_command(cm); 864 cm->cm_flags |= AAC_CMD_COMPLETED; 865 866 /* is there a completion handler? */ 867 if (cm->cm_complete != NULL) { 868 cm->cm_complete(cm); 869 } else { 870 /* assume that someone is sleeping on this 871 * command 872 */ 873 wakeup(cm); 874 } 875 sc->flags &= ~AAC_QUEUE_FRZN; 876 } 877 } 878 /* see if we can start some more I/O */ 879 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 880 aac_startio(sc); 881 882 lockmgr(&sc->aac_io_lock, LK_RELEASE); 883 } 884 885 /* 886 * Interrupt filter for !NEW_COMM interface. 887 */ 888 void 889 aac_filter(void *arg) 890 { 891 struct aac_softc *sc; 892 u_int16_t reason; 893 894 sc = (struct aac_softc *)arg; 895 896 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 897 /* 898 * Read the status register directly. This is faster than taking the 899 * driver lock and reading the queues directly. It also saves having 900 * to turn parts of the driver lock into a spin mutex, which would be 901 * ugly. 902 */ 903 reason = AAC_GET_ISTATUS(sc); 904 AAC_CLEAR_ISTATUS(sc, reason); 905 906 /* handle completion processing */ 907 if (reason & AAC_DB_RESPONSE_READY) 908 taskqueue_enqueue(taskqueue_swi, &sc->aac_task_complete); 909 910 /* controller wants to talk to us */ 911 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 912 /* 913 * XXX Make sure that we don't get fooled by strange messages 914 * that start with a NULL. 915 */ 916 if ((reason & AAC_DB_PRINTF) && 917 (sc->aac_common->ac_printf[0] == 0)) 918 sc->aac_common->ac_printf[0] = 32; 919 920 /* 921 * This might miss doing the actual wakeup. However, the 922 * lksleep that this is waking up has a timeout, so it will 923 * wake up eventually. AIFs and printfs are low enough 924 * priority that they can handle hanging out for a few seconds 925 * if needed. 926 */ 927 wakeup(sc->aifthread); 928 } 929 } 930 931 /* 932 * Command Processing 933 */ 934 935 /* 936 * Start as much queued I/O as possible on the controller 937 */ 938 void 939 aac_startio(struct aac_softc *sc) 940 { 941 struct aac_command *cm; 942 int error; 943 944 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 945 946 for (;;) { 947 /* 948 * This flag might be set if the card is out of resources. 949 * Checking it here prevents an infinite loop of deferrals. 950 */ 951 if (sc->flags & AAC_QUEUE_FRZN) 952 break; 953 954 /* 955 * Try to get a command that's been put off for lack of 956 * resources 957 */ 958 cm = aac_dequeue_ready(sc); 959 960 /* 961 * Try to build a command off the bio queue (ignore error 962 * return) 963 */ 964 if (cm == NULL) 965 aac_bio_command(sc, &cm); 966 967 /* nothing to do? */ 968 if (cm == NULL) 969 break; 970 971 /* don't map more than once */ 972 if (cm->cm_flags & AAC_CMD_MAPPED) 973 panic("aac: command %p already mapped", cm); 974 975 /* 976 * Set up the command to go to the controller. If there are no 977 * data buffers associated with the command then it can bypass 978 * busdma. 979 */ 980 if (cm->cm_datalen != 0) { 981 error = bus_dmamap_load(sc->aac_buffer_dmat, 982 cm->cm_datamap, cm->cm_data, 983 cm->cm_datalen, 984 aac_map_command_sg, cm, 0); 985 if (error == EINPROGRESS) { 986 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); 987 sc->flags |= AAC_QUEUE_FRZN; 988 error = 0; 989 } else if (error != 0) 990 panic("aac_startio: unexpected error %d from " 991 "busdma", error); 992 } else 993 aac_map_command_sg(cm, NULL, 0, 0); 994 } 995 } 996 997 /* 998 * Handle notification of one or more FIBs coming from the controller. 999 */ 1000 static void 1001 aac_command_thread(void *arg) 1002 { 1003 struct aac_softc *sc = arg; 1004 struct aac_fib *fib; 1005 u_int32_t fib_size; 1006 int size, retval; 1007 1008 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1009 1010 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1011 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1012 1013 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1014 1015 retval = 0; 1016 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1017 retval = lksleep(sc->aifthread, &sc->aac_io_lock, 0, 1018 "aifthd", AAC_PERIODIC_INTERVAL * hz); 1019 1020 /* 1021 * First see if any FIBs need to be allocated. This needs 1022 * to be called without the driver lock because contigmalloc 1023 * can sleep. 1024 */ 1025 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1026 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1027 aac_alloc_commands(sc); 1028 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1029 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1030 aac_startio(sc); 1031 } 1032 1033 /* 1034 * While we're here, check to see if any commands are stuck. 1035 * This is pretty low-priority, so it's ok if it doesn't 1036 * always fire. 1037 */ 1038 if (retval == EWOULDBLOCK) 1039 aac_timeout(sc); 1040 1041 /* Check the hardware printf message buffer */ 1042 if (sc->aac_common->ac_printf[0] != 0) 1043 aac_print_printf(sc); 1044 1045 /* Also check to see if the adapter has a command for us. */ 1046 if (sc->flags & AAC_FLAGS_NEW_COMM) 1047 continue; 1048 for (;;) { 1049 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 1050 &fib_size, &fib)) 1051 break; 1052 1053 AAC_PRINT_FIB(sc, fib); 1054 1055 switch (fib->Header.Command) { 1056 case AifRequest: 1057 aac_handle_aif(sc, fib); 1058 break; 1059 default: 1060 device_printf(sc->aac_dev, "unknown command " 1061 "from controller\n"); 1062 break; 1063 } 1064 1065 if ((fib->Header.XferState == 0) || 1066 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 1067 break; 1068 } 1069 1070 /* Return the AIF to the controller. */ 1071 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 1072 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 1073 *(AAC_FSAStatus*)fib->data = ST_OK; 1074 1075 /* XXX Compute the Size field? */ 1076 size = fib->Header.Size; 1077 if (size > sizeof(struct aac_fib)) { 1078 size = sizeof(struct aac_fib); 1079 fib->Header.Size = size; 1080 } 1081 /* 1082 * Since we did not generate this command, it 1083 * cannot go through the normal 1084 * enqueue->startio chain. 1085 */ 1086 aac_enqueue_response(sc, 1087 AAC_ADAP_NORM_RESP_QUEUE, 1088 fib); 1089 } 1090 } 1091 } 1092 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1093 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1094 wakeup(sc->aac_dev); 1095 } 1096 1097 /* 1098 * Process completed commands. 1099 */ 1100 static void 1101 aac_complete(void *context, int pending) 1102 { 1103 struct aac_softc *sc; 1104 struct aac_command *cm; 1105 struct aac_fib *fib; 1106 u_int32_t fib_size; 1107 1108 sc = (struct aac_softc *)context; 1109 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1110 1111 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1112 1113 /* pull completed commands off the queue */ 1114 for (;;) { 1115 /* look for completed FIBs on our queue */ 1116 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1117 &fib)) 1118 break; /* nothing to do */ 1119 1120 /* get the command, unmap and hand off for processing */ 1121 cm = sc->aac_commands + fib->Header.SenderData; 1122 if (cm == NULL) { 1123 AAC_PRINT_FIB(sc, fib); 1124 break; 1125 } 1126 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) 1127 device_printf(sc->aac_dev, 1128 "COMMAND %p COMPLETED AFTER %d SECONDS\n", 1129 cm, (int)(time_uptime - cm->cm_timestamp)); 1130 1131 aac_remove_busy(cm); 1132 1133 aac_unmap_command(cm); 1134 cm->cm_flags |= AAC_CMD_COMPLETED; 1135 1136 /* is there a completion handler? */ 1137 if (cm->cm_complete != NULL) { 1138 cm->cm_complete(cm); 1139 } else { 1140 /* assume that someone is sleeping on this command */ 1141 wakeup(cm); 1142 } 1143 } 1144 1145 /* see if we can start some more I/O */ 1146 sc->flags &= ~AAC_QUEUE_FRZN; 1147 aac_startio(sc); 1148 1149 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1150 } 1151 1152 /* 1153 * Handle a bio submitted from a disk device. 1154 */ 1155 void 1156 aac_submit_bio(struct aac_disk *ad, struct bio *bio) 1157 { 1158 struct aac_softc *sc; 1159 1160 bio->bio_driver_info = ad; 1161 sc = ad->ad_controller; 1162 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1163 1164 /* queue the BIO and try to get some work done */ 1165 aac_enqueue_bio(sc, bio); 1166 aac_startio(sc); 1167 } 1168 1169 /* 1170 * Get a bio and build a command to go with it. 1171 */ 1172 static int 1173 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1174 { 1175 struct aac_command *cm; 1176 struct aac_fib *fib; 1177 struct aac_disk *ad; 1178 struct bio *bio; 1179 struct buf *bp; 1180 1181 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1182 1183 /* get the resources we will need */ 1184 cm = NULL; 1185 bio = NULL; 1186 if (aac_alloc_command(sc, &cm)) /* get a command */ 1187 goto fail; 1188 if ((bio = aac_dequeue_bio(sc)) == NULL) 1189 goto fail; 1190 1191 /* fill out the command */ 1192 bp = bio->bio_buf; 1193 cm->cm_data = (void *)bp->b_data; 1194 cm->cm_datalen = bp->b_bcount; 1195 cm->cm_complete = aac_bio_complete; 1196 cm->cm_private = bio; 1197 cm->cm_timestamp = time_uptime; 1198 1199 /* build the FIB */ 1200 fib = cm->cm_fib; 1201 fib->Header.Size = sizeof(struct aac_fib_header); 1202 fib->Header.XferState = 1203 AAC_FIBSTATE_HOSTOWNED | 1204 AAC_FIBSTATE_INITIALISED | 1205 AAC_FIBSTATE_EMPTY | 1206 AAC_FIBSTATE_FROMHOST | 1207 AAC_FIBSTATE_REXPECTED | 1208 AAC_FIBSTATE_NORM | 1209 AAC_FIBSTATE_ASYNC | 1210 AAC_FIBSTATE_FAST_RESPONSE; 1211 1212 /* build the read/write request */ 1213 ad = (struct aac_disk *)bio->bio_driver_info; 1214 1215 if (sc->flags & AAC_FLAGS_RAW_IO) { 1216 struct aac_raw_io *raw; 1217 raw = (struct aac_raw_io *)&fib->data[0]; 1218 fib->Header.Command = RawIo; 1219 raw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1220 raw->ByteCount = bp->b_bcount; 1221 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1222 raw->BpTotal = 0; 1223 raw->BpComplete = 0; 1224 fib->Header.Size += sizeof(struct aac_raw_io); 1225 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1226 if (bp->b_cmd == BUF_CMD_READ) { 1227 raw->Flags = 1; 1228 cm->cm_flags |= AAC_CMD_DATAIN; 1229 } else { 1230 raw->Flags = 0; 1231 cm->cm_flags |= AAC_CMD_DATAOUT; 1232 } 1233 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1234 fib->Header.Command = ContainerCommand; 1235 if (bp->b_cmd == BUF_CMD_READ) { 1236 struct aac_blockread *br; 1237 br = (struct aac_blockread *)&fib->data[0]; 1238 br->Command = VM_CtBlockRead; 1239 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1240 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1241 br->ByteCount = bp->b_bcount; 1242 fib->Header.Size += sizeof(struct aac_blockread); 1243 cm->cm_sgtable = &br->SgMap; 1244 cm->cm_flags |= AAC_CMD_DATAIN; 1245 } else { 1246 struct aac_blockwrite *bw; 1247 bw = (struct aac_blockwrite *)&fib->data[0]; 1248 bw->Command = VM_CtBlockWrite; 1249 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1250 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1251 bw->ByteCount = bp->b_bcount; 1252 bw->Stable = CUNSTABLE; 1253 fib->Header.Size += sizeof(struct aac_blockwrite); 1254 cm->cm_flags |= AAC_CMD_DATAOUT; 1255 cm->cm_sgtable = &bw->SgMap; 1256 } 1257 } else { 1258 fib->Header.Command = ContainerCommand64; 1259 if (bp->b_cmd == BUF_CMD_READ) { 1260 struct aac_blockread64 *br; 1261 br = (struct aac_blockread64 *)&fib->data[0]; 1262 br->Command = VM_CtHostRead64; 1263 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1264 br->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE; 1265 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1266 br->Pad = 0; 1267 br->Flags = 0; 1268 fib->Header.Size += sizeof(struct aac_blockread64); 1269 cm->cm_flags |= AAC_CMD_DATAIN; 1270 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1271 } else { 1272 struct aac_blockwrite64 *bw; 1273 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1274 bw->Command = VM_CtHostWrite64; 1275 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1276 bw->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE; 1277 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1278 bw->Pad = 0; 1279 bw->Flags = 0; 1280 fib->Header.Size += sizeof(struct aac_blockwrite64); 1281 cm->cm_flags |= AAC_CMD_DATAOUT; 1282 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1283 } 1284 } 1285 1286 *cmp = cm; 1287 return(0); 1288 1289 fail: 1290 if (bio != NULL) 1291 aac_enqueue_bio(sc, bio); 1292 if (cm != NULL) 1293 aac_release_command(cm); 1294 return(ENOMEM); 1295 } 1296 1297 /* 1298 * Handle a bio-instigated command that has been completed. 1299 */ 1300 static void 1301 aac_bio_complete(struct aac_command *cm) 1302 { 1303 struct aac_blockread_response *brr; 1304 struct aac_blockwrite_response *bwr; 1305 struct bio *bio; 1306 struct buf *bp; 1307 const char *code; 1308 AAC_FSAStatus status; 1309 1310 /* fetch relevant status and then release the command */ 1311 bio = (struct bio *)cm->cm_private; 1312 bp = bio->bio_buf; 1313 if (bp->b_cmd == BUF_CMD_READ) { 1314 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1315 status = brr->Status; 1316 } else { 1317 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1318 status = bwr->Status; 1319 } 1320 aac_release_command(cm); 1321 1322 /* fix up the bio based on status */ 1323 if (status == ST_OK) { 1324 bp->b_resid = 0; 1325 code = NULL; 1326 } else { 1327 bp->b_error = EIO; 1328 bp->b_flags |= B_ERROR; 1329 } 1330 aac_biodone(bio, code); 1331 } 1332 1333 /* 1334 * Submit a command to the controller, return when it completes. 1335 * XXX This is very dangerous! If the card has gone out to lunch, we could 1336 * be stuck here forever. At the same time, signals are not caught 1337 * because there is a risk that a signal could wakeup the sleep before 1338 * the card has a chance to complete the command. Since there is no way 1339 * to cancel a command that is in progress, we can't protect against the 1340 * card completing a command late and spamming the command and data 1341 * memory. So, we are held hostage until the command completes. 1342 */ 1343 static int 1344 aac_wait_command(struct aac_command *cm) 1345 { 1346 struct aac_softc *sc; 1347 int error; 1348 1349 sc = cm->cm_sc; 1350 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1351 1352 /* Put the command on the ready queue and get things going */ 1353 aac_enqueue_ready(cm); 1354 aac_startio(sc); 1355 error = lksleep(cm, &sc->aac_io_lock, 0, "aacwait", 0); 1356 return(error); 1357 } 1358 1359 /* 1360 *Command Buffer Management 1361 */ 1362 1363 /* 1364 * Allocate a command. 1365 */ 1366 int 1367 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1368 { 1369 struct aac_command *cm; 1370 1371 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1372 1373 if ((cm = aac_dequeue_free(sc)) == NULL) { 1374 if (sc->total_fibs < sc->aac_max_fibs) { 1375 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1376 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1377 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1378 wakeup(sc->aifthread); 1379 } 1380 return (EBUSY); 1381 } 1382 1383 *cmp = cm; 1384 return(0); 1385 } 1386 1387 /* 1388 * Release a command back to the freelist. 1389 */ 1390 void 1391 aac_release_command(struct aac_command *cm) 1392 { 1393 struct aac_event *event; 1394 struct aac_softc *sc; 1395 1396 sc = cm->cm_sc; 1397 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1398 1399 /* (re)initialize the command/FIB */ 1400 cm->cm_sgtable = NULL; 1401 cm->cm_flags = 0; 1402 cm->cm_complete = NULL; 1403 cm->cm_private = NULL; 1404 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1405 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1406 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1407 cm->cm_fib->Header.Flags = 0; 1408 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1409 1410 /* 1411 * These are duplicated in aac_start to cover the case where an 1412 * intermediate stage may have destroyed them. They're left 1413 * initialized here for debugging purposes only. 1414 */ 1415 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1416 cm->cm_fib->Header.SenderData = 0; 1417 1418 aac_enqueue_free(cm); 1419 1420 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1421 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1422 event->ev_callback(sc, event, event->ev_arg); 1423 } 1424 } 1425 1426 /* 1427 * Map helper for command/FIB allocation. 1428 */ 1429 static void 1430 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1431 { 1432 uint64_t *fibphys; 1433 1434 fibphys = (uint64_t *)arg; 1435 1436 *fibphys = segs[0].ds_addr; 1437 } 1438 1439 /* 1440 * Allocate and initialize commands/FIBs for this adapter. 1441 */ 1442 static int 1443 aac_alloc_commands(struct aac_softc *sc) 1444 { 1445 struct aac_command *cm; 1446 struct aac_fibmap *fm; 1447 uint64_t fibphys; 1448 int i, error; 1449 1450 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1451 1452 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1453 return (ENOMEM); 1454 1455 fm = kmalloc(sizeof(struct aac_fibmap), M_AACBUF, M_INTWAIT | M_ZERO); 1456 1457 /* allocate the FIBs in DMAable memory and load them */ 1458 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1459 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1460 device_printf(sc->aac_dev, 1461 "Not enough contiguous memory available.\n"); 1462 kfree(fm, M_AACBUF); 1463 return (ENOMEM); 1464 } 1465 1466 /* Ignore errors since this doesn't bounce */ 1467 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1468 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1469 aac_map_command_helper, &fibphys, 0); 1470 1471 /* initialize constant fields in the command structure */ 1472 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1473 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1474 cm = sc->aac_commands + sc->total_fibs; 1475 fm->aac_commands = cm; 1476 cm->cm_sc = sc; 1477 cm->cm_fib = (struct aac_fib *) 1478 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1479 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1480 cm->cm_index = sc->total_fibs; 1481 1482 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1483 &cm->cm_datamap)) != 0) 1484 break; 1485 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1486 aac_release_command(cm); 1487 sc->total_fibs++; 1488 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1489 } 1490 1491 if (i > 0) { 1492 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1493 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1494 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1495 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1496 return (0); 1497 } 1498 1499 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1500 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1501 kfree(fm, M_AACBUF); 1502 return (ENOMEM); 1503 } 1504 1505 /* 1506 * Free FIBs owned by this adapter. 1507 */ 1508 static void 1509 aac_free_commands(struct aac_softc *sc) 1510 { 1511 struct aac_fibmap *fm; 1512 struct aac_command *cm; 1513 int i; 1514 1515 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1516 1517 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1518 1519 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1520 /* 1521 * We check against total_fibs to handle partially 1522 * allocated blocks. 1523 */ 1524 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1525 cm = fm->aac_commands + i; 1526 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1527 } 1528 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1529 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1530 kfree(fm, M_AACBUF); 1531 } 1532 } 1533 1534 /* 1535 * Command-mapping helper function - populate this command's s/g table. 1536 */ 1537 static void 1538 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1539 { 1540 struct aac_softc *sc; 1541 struct aac_command *cm; 1542 struct aac_fib *fib; 1543 int i; 1544 1545 cm = (struct aac_command *)arg; 1546 sc = cm->cm_sc; 1547 fib = cm->cm_fib; 1548 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1549 1550 /* copy into the FIB */ 1551 if (cm->cm_sgtable != NULL) { 1552 if (fib->Header.Command == RawIo) { 1553 struct aac_sg_tableraw *sg; 1554 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1555 sg->SgCount = nseg; 1556 for (i = 0; i < nseg; i++) { 1557 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1558 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1559 sg->SgEntryRaw[i].Next = 0; 1560 sg->SgEntryRaw[i].Prev = 0; 1561 sg->SgEntryRaw[i].Flags = 0; 1562 } 1563 /* update the FIB size for the s/g count */ 1564 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1565 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1566 struct aac_sg_table *sg; 1567 sg = cm->cm_sgtable; 1568 sg->SgCount = nseg; 1569 for (i = 0; i < nseg; i++) { 1570 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1571 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1572 } 1573 /* update the FIB size for the s/g count */ 1574 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1575 } else { 1576 struct aac_sg_table64 *sg; 1577 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1578 sg->SgCount = nseg; 1579 for (i = 0; i < nseg; i++) { 1580 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1581 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1582 } 1583 /* update the FIB size for the s/g count */ 1584 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1585 } 1586 } 1587 1588 /* Fix up the address values in the FIB. Use the command array index 1589 * instead of a pointer since these fields are only 32 bits. Shift 1590 * the SenderFibAddress over to make room for the fast response bit 1591 * and for the AIF bit 1592 */ 1593 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1594 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1595 1596 /* save a pointer to the command for speedy reverse-lookup */ 1597 cm->cm_fib->Header.SenderData = cm->cm_index; 1598 1599 if (cm->cm_flags & AAC_CMD_DATAIN) 1600 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1601 BUS_DMASYNC_PREREAD); 1602 if (cm->cm_flags & AAC_CMD_DATAOUT) 1603 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1604 BUS_DMASYNC_PREWRITE); 1605 cm->cm_flags |= AAC_CMD_MAPPED; 1606 1607 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1608 int count = 10000000L; 1609 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1610 if (--count == 0) { 1611 aac_unmap_command(cm); 1612 sc->flags |= AAC_QUEUE_FRZN; 1613 aac_requeue_ready(cm); 1614 } 1615 DELAY(5); /* wait 5 usec. */ 1616 } 1617 } else { 1618 /* Put the FIB on the outbound queue */ 1619 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1620 aac_unmap_command(cm); 1621 sc->flags |= AAC_QUEUE_FRZN; 1622 aac_requeue_ready(cm); 1623 } 1624 } 1625 } 1626 1627 /* 1628 * Unmap a command from controller-visible space. 1629 */ 1630 static void 1631 aac_unmap_command(struct aac_command *cm) 1632 { 1633 struct aac_softc *sc; 1634 1635 sc = cm->cm_sc; 1636 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1637 1638 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1639 return; 1640 1641 if (cm->cm_datalen != 0) { 1642 if (cm->cm_flags & AAC_CMD_DATAIN) 1643 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1644 BUS_DMASYNC_POSTREAD); 1645 if (cm->cm_flags & AAC_CMD_DATAOUT) 1646 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1647 BUS_DMASYNC_POSTWRITE); 1648 1649 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1650 } 1651 cm->cm_flags &= ~AAC_CMD_MAPPED; 1652 } 1653 1654 /* 1655 * Hardware Interface 1656 */ 1657 1658 /* 1659 * Initialize the adapter. 1660 */ 1661 static void 1662 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1663 { 1664 struct aac_softc *sc; 1665 1666 sc = (struct aac_softc *)arg; 1667 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1668 1669 sc->aac_common_busaddr = segs[0].ds_addr; 1670 } 1671 1672 static int 1673 aac_check_firmware(struct aac_softc *sc) 1674 { 1675 u_int32_t code, major, minor, options = 0, atu_size = 0; 1676 int rid, status; 1677 time_t then; 1678 1679 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1680 /* 1681 * Wait for the adapter to come ready. 1682 */ 1683 then = time_uptime; 1684 do { 1685 code = AAC_GET_FWSTATUS(sc); 1686 if (code & AAC_SELF_TEST_FAILED) { 1687 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1688 return(ENXIO); 1689 } 1690 if (code & AAC_KERNEL_PANIC) { 1691 device_printf(sc->aac_dev, 1692 "FATAL: controller kernel panic"); 1693 return(ENXIO); 1694 } 1695 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1696 device_printf(sc->aac_dev, 1697 "FATAL: controller not coming ready, " 1698 "status %x\n", code); 1699 return(ENXIO); 1700 } 1701 } while (!(code & AAC_UP_AND_RUNNING)); 1702 1703 /* 1704 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1705 * firmware version 1.x are not compatible with this driver. 1706 */ 1707 if (sc->flags & AAC_FLAGS_PERC2QC) { 1708 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1709 NULL)) { 1710 device_printf(sc->aac_dev, 1711 "Error reading firmware version\n"); 1712 return (EIO); 1713 } 1714 1715 /* These numbers are stored as ASCII! */ 1716 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1717 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1718 if (major == 1) { 1719 device_printf(sc->aac_dev, 1720 "Firmware version %d.%d is not supported.\n", 1721 major, minor); 1722 return (EINVAL); 1723 } 1724 } 1725 1726 /* 1727 * Retrieve the capabilities/supported options word so we know what 1728 * work-arounds to enable. Some firmware revs don't support this 1729 * command. 1730 */ 1731 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1732 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1733 device_printf(sc->aac_dev, 1734 "RequestAdapterInfo failed\n"); 1735 return (EIO); 1736 } 1737 } else { 1738 options = AAC_GET_MAILBOX(sc, 1); 1739 atu_size = AAC_GET_MAILBOX(sc, 2); 1740 sc->supported_options = options; 1741 1742 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1743 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1744 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1745 if (options & AAC_SUPPORTED_NONDASD) 1746 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1747 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1748 && (sizeof(bus_addr_t) > 4)) { 1749 device_printf(sc->aac_dev, 1750 "Enabling 64-bit address support\n"); 1751 sc->flags |= AAC_FLAGS_SG_64BIT; 1752 } 1753 if ((options & AAC_SUPPORTED_NEW_COMM) 1754 && sc->aac_if->aif_send_command) 1755 sc->flags |= AAC_FLAGS_NEW_COMM; 1756 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1757 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1758 } 1759 1760 /* Check for broken hardware that does a lower number of commands */ 1761 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1762 1763 /* Remap mem. resource, if required */ 1764 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1765 atu_size > rman_get_size(sc->aac_regs_res1)) { 1766 rid = rman_get_rid(sc->aac_regs_res1); 1767 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid, 1768 sc->aac_regs_res1); 1769 sc->aac_regs_res1 = bus_alloc_resource(sc->aac_dev, 1770 SYS_RES_MEMORY, &rid, 0ul, ~0ul, atu_size, RF_ACTIVE); 1771 if (sc->aac_regs_res1 == NULL) { 1772 sc->aac_regs_res1 = bus_alloc_resource_any( 1773 sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1774 if (sc->aac_regs_res1 == NULL) { 1775 device_printf(sc->aac_dev, 1776 "couldn't allocate register window\n"); 1777 return (ENXIO); 1778 } 1779 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1780 } 1781 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); 1782 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); 1783 1784 if (sc->aac_hwif == AAC_HWIF_NARK) { 1785 sc->aac_regs_res0 = sc->aac_regs_res1; 1786 sc->aac_btag0 = sc->aac_btag1; 1787 sc->aac_bhandle0 = sc->aac_bhandle1; 1788 } 1789 } 1790 1791 /* Read preferred settings */ 1792 sc->aac_max_fib_size = sizeof(struct aac_fib); 1793 sc->aac_max_sectors = 128; /* 64KB */ 1794 if (sc->flags & AAC_FLAGS_SG_64BIT) 1795 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1796 - sizeof(struct aac_blockwrite64)) 1797 / sizeof(struct aac_sg_entry64); 1798 else 1799 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1800 - sizeof(struct aac_blockwrite)) 1801 / sizeof(struct aac_sg_entry); 1802 1803 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1804 options = AAC_GET_MAILBOX(sc, 1); 1805 sc->aac_max_fib_size = (options & 0xFFFF); 1806 sc->aac_max_sectors = (options >> 16) << 1; 1807 options = AAC_GET_MAILBOX(sc, 2); 1808 sc->aac_sg_tablesize = (options >> 16); 1809 options = AAC_GET_MAILBOX(sc, 3); 1810 sc->aac_max_fibs = (options & 0xFFFF); 1811 } 1812 if (sc->aac_max_fib_size > PAGE_SIZE) 1813 sc->aac_max_fib_size = PAGE_SIZE; 1814 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1815 1816 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1817 sc->flags |= AAC_FLAGS_RAW_IO; 1818 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1819 } 1820 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1821 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1822 sc->flags |= AAC_FLAGS_LBA_64BIT; 1823 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1824 } 1825 1826 return (0); 1827 } 1828 1829 static int 1830 aac_init(struct aac_softc *sc) 1831 { 1832 struct aac_adapter_init *ip; 1833 u_int32_t qoffset; 1834 int error; 1835 1836 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1837 1838 /* 1839 * Fill in the init structure. This tells the adapter about the 1840 * physical location of various important shared data structures. 1841 */ 1842 ip = &sc->aac_common->ac_init; 1843 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1844 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1845 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1846 sc->flags |= AAC_FLAGS_RAW_IO; 1847 } 1848 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1849 1850 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1851 offsetof(struct aac_common, ac_fibs); 1852 ip->AdapterFibsVirtualAddress = 0; 1853 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1854 ip->AdapterFibAlign = sizeof(struct aac_fib); 1855 1856 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1857 offsetof(struct aac_common, ac_printf); 1858 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1859 1860 /* 1861 * The adapter assumes that pages are 4K in size, except on some 1862 * broken firmware versions that do the page->byte conversion twice, 1863 * therefore 'assuming' that this value is in 16MB units (2^24). 1864 * Round up since the granularity is so high. 1865 */ 1866 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1867 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1868 ip->HostPhysMemPages = 1869 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1870 } 1871 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1872 1873 ip->InitFlags = 0; 1874 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1875 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; 1876 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1877 } 1878 1879 ip->MaxIoCommands = sc->aac_max_fibs; 1880 ip->MaxIoSize = sc->aac_max_sectors << 9; 1881 ip->MaxFibSize = sc->aac_max_fib_size; 1882 1883 /* 1884 * Initialize FIB queues. Note that it appears that the layout of the 1885 * indexes and the segmentation of the entries may be mandated by the 1886 * adapter, which is only told about the base of the queue index fields. 1887 * 1888 * The initial values of the indices are assumed to inform the adapter 1889 * of the sizes of the respective queues, and theoretically it could 1890 * work out the entire layout of the queue structures from this. We 1891 * take the easy route and just lay this area out like everyone else 1892 * does. 1893 * 1894 * The Linux driver uses a much more complex scheme whereby several 1895 * header records are kept for each queue. We use a couple of generic 1896 * list manipulation functions which 'know' the size of each list by 1897 * virtue of a table. 1898 */ 1899 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1900 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1901 sc->aac_queues = 1902 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1903 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1904 1905 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1906 AAC_HOST_NORM_CMD_ENTRIES; 1907 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1908 AAC_HOST_NORM_CMD_ENTRIES; 1909 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1910 AAC_HOST_HIGH_CMD_ENTRIES; 1911 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1912 AAC_HOST_HIGH_CMD_ENTRIES; 1913 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1914 AAC_ADAP_NORM_CMD_ENTRIES; 1915 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1916 AAC_ADAP_NORM_CMD_ENTRIES; 1917 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1918 AAC_ADAP_HIGH_CMD_ENTRIES; 1919 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1920 AAC_ADAP_HIGH_CMD_ENTRIES; 1921 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1922 AAC_HOST_NORM_RESP_ENTRIES; 1923 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1924 AAC_HOST_NORM_RESP_ENTRIES; 1925 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1926 AAC_HOST_HIGH_RESP_ENTRIES; 1927 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1928 AAC_HOST_HIGH_RESP_ENTRIES; 1929 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1930 AAC_ADAP_NORM_RESP_ENTRIES; 1931 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1932 AAC_ADAP_NORM_RESP_ENTRIES; 1933 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1934 AAC_ADAP_HIGH_RESP_ENTRIES; 1935 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1936 AAC_ADAP_HIGH_RESP_ENTRIES; 1937 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1938 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1939 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1940 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1941 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1942 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1943 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1944 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1945 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1946 &sc->aac_queues->qt_HostNormRespQueue[0]; 1947 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1948 &sc->aac_queues->qt_HostHighRespQueue[0]; 1949 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1950 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1951 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1952 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1953 1954 /* 1955 * Do controller-type-specific initialisation 1956 */ 1957 switch (sc->aac_hwif) { 1958 case AAC_HWIF_I960RX: 1959 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); 1960 break; 1961 case AAC_HWIF_RKT: 1962 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); 1963 break; 1964 default: 1965 break; 1966 } 1967 1968 /* 1969 * Give the init structure to the controller. 1970 */ 1971 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1972 sc->aac_common_busaddr + 1973 offsetof(struct aac_common, ac_init), 0, 0, 0, 1974 NULL)) { 1975 device_printf(sc->aac_dev, 1976 "error establishing init structure\n"); 1977 error = EIO; 1978 goto out; 1979 } 1980 1981 error = 0; 1982 out: 1983 return(error); 1984 } 1985 1986 static int 1987 aac_setup_intr(struct aac_softc *sc) 1988 { 1989 1990 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1991 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 1992 INTR_MPSAFE, 1993 aac_new_intr, sc, &sc->aac_intr, NULL)) { 1994 device_printf(sc->aac_dev, "can't set up interrupt\n"); 1995 return (EINVAL); 1996 } 1997 } else { 1998 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 1999 0, aac_filter, 2000 sc, &sc->aac_intr, NULL)) { 2001 device_printf(sc->aac_dev, 2002 "can't set up interrupt filter\n"); 2003 return (EINVAL); 2004 } 2005 } 2006 return (0); 2007 } 2008 2009 /* 2010 * Send a synchronous command to the controller and wait for a result. 2011 * Indicate if the controller completed the command with an error status. 2012 */ 2013 static int 2014 aac_sync_command(struct aac_softc *sc, u_int32_t command, 2015 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2016 u_int32_t *sp) 2017 { 2018 time_t then; 2019 u_int32_t status; 2020 2021 if (sp != NULL) 2022 *sp = 0; /* avoid gcc warnings */ 2023 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2024 2025 /* populate the mailbox */ 2026 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2027 2028 /* ensure the sync command doorbell flag is cleared */ 2029 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2030 2031 /* then set it to signal the adapter */ 2032 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2033 2034 /* spin waiting for the command to complete */ 2035 then = time_uptime; 2036 do { 2037 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { 2038 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2039 return(EIO); 2040 } 2041 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2042 2043 /* clear the completion flag */ 2044 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2045 2046 /* get the command status */ 2047 status = AAC_GET_MAILBOX(sc, 0); 2048 if (sp != NULL) 2049 *sp = status; 2050 2051 if (status != AAC_SRB_STS_SUCCESS) 2052 return (-1); 2053 return(0); 2054 } 2055 2056 int 2057 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2058 struct aac_fib *fib, u_int16_t datasize) 2059 { 2060 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2061 #if 0 /* XXX swildner */ 2062 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0); 2063 #endif 2064 2065 if (datasize > AAC_FIB_DATASIZE) 2066 return(EINVAL); 2067 2068 /* 2069 * Set up the sync FIB 2070 */ 2071 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2072 AAC_FIBSTATE_INITIALISED | 2073 AAC_FIBSTATE_EMPTY; 2074 fib->Header.XferState |= xferstate; 2075 fib->Header.Command = command; 2076 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2077 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2078 fib->Header.SenderSize = sizeof(struct aac_fib); 2079 fib->Header.SenderFibAddress = 0; /* Not needed */ 2080 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2081 offsetof(struct aac_common, 2082 ac_sync_fib); 2083 2084 /* 2085 * Give the FIB to the controller, wait for a response. 2086 */ 2087 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2088 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2089 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2090 return(EIO); 2091 } 2092 2093 return (0); 2094 } 2095 2096 /* 2097 * Adapter-space FIB queue manipulation 2098 * 2099 * Note that the queue implementation here is a little funky; neither the PI or 2100 * CI will ever be zero. This behaviour is a controller feature. 2101 */ 2102 static const struct { 2103 int size; 2104 int notify; 2105 } aac_qinfo[] = { 2106 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2107 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2108 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2109 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2110 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2111 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2112 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2113 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2114 }; 2115 2116 /* 2117 * Atomically insert an entry into the nominated queue, returns 0 on success or 2118 * EBUSY if the queue is full. 2119 * 2120 * Note: it would be more efficient to defer notifying the controller in 2121 * the case where we may be inserting several entries in rapid succession, 2122 * but implementing this usefully may be difficult (it would involve a 2123 * separate queue/notify interface). 2124 */ 2125 static int 2126 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2127 { 2128 u_int32_t pi, ci; 2129 int error; 2130 u_int32_t fib_size; 2131 u_int32_t fib_addr; 2132 2133 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2134 2135 fib_size = cm->cm_fib->Header.Size; 2136 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2137 2138 /* get the producer/consumer indices */ 2139 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2140 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2141 2142 /* wrap the queue? */ 2143 if (pi >= aac_qinfo[queue].size) 2144 pi = 0; 2145 2146 /* check for queue full */ 2147 if ((pi + 1) == ci) { 2148 error = EBUSY; 2149 goto out; 2150 } 2151 2152 /* 2153 * To avoid a race with its completion interrupt, place this command on 2154 * the busy queue prior to advertising it to the controller. 2155 */ 2156 aac_enqueue_busy(cm); 2157 2158 /* populate queue entry */ 2159 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2160 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2161 2162 /* update producer index */ 2163 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2164 2165 /* notify the adapter if we know how */ 2166 if (aac_qinfo[queue].notify != 0) 2167 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2168 2169 error = 0; 2170 2171 out: 2172 return(error); 2173 } 2174 2175 /* 2176 * Atomically remove one entry from the nominated queue, returns 0 on 2177 * success or ENOENT if the queue is empty. 2178 */ 2179 static int 2180 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2181 struct aac_fib **fib_addr) 2182 { 2183 u_int32_t pi, ci; 2184 u_int32_t fib_index; 2185 int error; 2186 int notify; 2187 2188 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2189 2190 /* get the producer/consumer indices */ 2191 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2192 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2193 2194 /* check for queue empty */ 2195 if (ci == pi) { 2196 error = ENOENT; 2197 goto out; 2198 } 2199 2200 /* wrap the pi so the following test works */ 2201 if (pi >= aac_qinfo[queue].size) 2202 pi = 0; 2203 2204 notify = 0; 2205 if (ci == pi + 1) 2206 notify++; 2207 2208 /* wrap the queue? */ 2209 if (ci >= aac_qinfo[queue].size) 2210 ci = 0; 2211 2212 /* fetch the entry */ 2213 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2214 2215 switch (queue) { 2216 case AAC_HOST_NORM_CMD_QUEUE: 2217 case AAC_HOST_HIGH_CMD_QUEUE: 2218 /* 2219 * The aq_fib_addr is only 32 bits wide so it can't be counted 2220 * on to hold an address. For AIF's, the adapter assumes 2221 * that it's giving us an address into the array of AIF fibs. 2222 * Therefore, we have to convert it to an index. 2223 */ 2224 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2225 sizeof(struct aac_fib); 2226 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2227 break; 2228 2229 case AAC_HOST_NORM_RESP_QUEUE: 2230 case AAC_HOST_HIGH_RESP_QUEUE: 2231 { 2232 struct aac_command *cm; 2233 2234 /* 2235 * As above, an index is used instead of an actual address. 2236 * Gotta shift the index to account for the fast response 2237 * bit. No other correction is needed since this value was 2238 * originally provided by the driver via the SenderFibAddress 2239 * field. 2240 */ 2241 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2242 cm = sc->aac_commands + (fib_index >> 2); 2243 *fib_addr = cm->cm_fib; 2244 2245 /* 2246 * Is this a fast response? If it is, update the fib fields in 2247 * local memory since the whole fib isn't DMA'd back up. 2248 */ 2249 if (fib_index & 0x01) { 2250 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2251 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2252 } 2253 break; 2254 } 2255 default: 2256 panic("Invalid queue in aac_dequeue_fib()"); 2257 break; 2258 } 2259 2260 /* update consumer index */ 2261 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2262 2263 /* if we have made the queue un-full, notify the adapter */ 2264 if (notify && (aac_qinfo[queue].notify != 0)) 2265 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2266 error = 0; 2267 2268 out: 2269 return(error); 2270 } 2271 2272 /* 2273 * Put our response to an Adapter Initialed Fib on the response queue 2274 */ 2275 static int 2276 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2277 { 2278 u_int32_t pi, ci; 2279 int error; 2280 u_int32_t fib_size; 2281 u_int32_t fib_addr; 2282 2283 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2284 2285 /* Tell the adapter where the FIB is */ 2286 fib_size = fib->Header.Size; 2287 fib_addr = fib->Header.SenderFibAddress; 2288 fib->Header.ReceiverFibAddress = fib_addr; 2289 2290 /* get the producer/consumer indices */ 2291 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2292 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2293 2294 /* wrap the queue? */ 2295 if (pi >= aac_qinfo[queue].size) 2296 pi = 0; 2297 2298 /* check for queue full */ 2299 if ((pi + 1) == ci) { 2300 error = EBUSY; 2301 goto out; 2302 } 2303 2304 /* populate queue entry */ 2305 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2306 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2307 2308 /* update producer index */ 2309 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2310 2311 /* notify the adapter if we know how */ 2312 if (aac_qinfo[queue].notify != 0) 2313 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2314 2315 error = 0; 2316 2317 out: 2318 return(error); 2319 } 2320 2321 /* 2322 * Check for commands that have been outstanding for a suspiciously long time, 2323 * and complain about them. 2324 */ 2325 static void 2326 aac_timeout(struct aac_softc *sc) 2327 { 2328 struct aac_command *cm; 2329 time_t deadline; 2330 int timedout, code; 2331 2332 /* 2333 * Traverse the busy command list, bitch about late commands once 2334 * only. 2335 */ 2336 timedout = 0; 2337 deadline = time_uptime - AAC_CMD_TIMEOUT; 2338 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2339 if ((cm->cm_timestamp < deadline) 2340 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { 2341 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2342 device_printf(sc->aac_dev, 2343 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", 2344 cm, cm->cm_fib->Header.Command, 2345 (int)(time_uptime-cm->cm_timestamp)); 2346 AAC_PRINT_FIB(sc, cm->cm_fib); 2347 timedout++; 2348 } 2349 } 2350 2351 if (timedout) { 2352 code = AAC_GET_FWSTATUS(sc); 2353 if (code != AAC_UP_AND_RUNNING) { 2354 device_printf(sc->aac_dev, "WARNING! Controller is no " 2355 "longer running! code= 0x%x\n", code); 2356 } 2357 } 2358 } 2359 2360 /* 2361 * Interface Function Vectors 2362 */ 2363 2364 /* 2365 * Read the current firmware status word. 2366 */ 2367 static int 2368 aac_sa_get_fwstatus(struct aac_softc *sc) 2369 { 2370 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2371 2372 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); 2373 } 2374 2375 static int 2376 aac_rx_get_fwstatus(struct aac_softc *sc) 2377 { 2378 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2379 2380 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2381 AAC_RX_OMR0 : AAC_RX_FWSTATUS)); 2382 } 2383 2384 static int 2385 aac_rkt_get_fwstatus(struct aac_softc *sc) 2386 { 2387 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2388 2389 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2390 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); 2391 } 2392 2393 /* 2394 * Notify the controller of a change in a given queue 2395 */ 2396 2397 static void 2398 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2399 { 2400 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2401 2402 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2403 } 2404 2405 static void 2406 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2407 { 2408 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2409 2410 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); 2411 } 2412 2413 static void 2414 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2415 { 2416 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2417 2418 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); 2419 } 2420 2421 /* 2422 * Get the interrupt reason bits 2423 */ 2424 static int 2425 aac_sa_get_istatus(struct aac_softc *sc) 2426 { 2427 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2428 2429 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); 2430 } 2431 2432 static int 2433 aac_rx_get_istatus(struct aac_softc *sc) 2434 { 2435 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2436 2437 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); 2438 } 2439 2440 static int 2441 aac_rkt_get_istatus(struct aac_softc *sc) 2442 { 2443 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2444 2445 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); 2446 } 2447 2448 /* 2449 * Clear some interrupt reason bits 2450 */ 2451 static void 2452 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2453 { 2454 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2455 2456 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2457 } 2458 2459 static void 2460 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2461 { 2462 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2463 2464 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); 2465 } 2466 2467 static void 2468 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2469 { 2470 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2471 2472 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); 2473 } 2474 2475 /* 2476 * Populate the mailbox and set the command word 2477 */ 2478 static void 2479 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2480 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2481 { 2482 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2483 2484 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); 2485 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2486 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2487 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2488 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2489 } 2490 2491 static void 2492 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2493 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2494 { 2495 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2496 2497 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); 2498 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2499 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2500 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2501 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2502 } 2503 2504 static void 2505 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2506 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2507 { 2508 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2509 2510 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); 2511 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2512 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2513 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2514 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2515 } 2516 2517 /* 2518 * Fetch the immediate command status word 2519 */ 2520 static int 2521 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2522 { 2523 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2524 2525 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2526 } 2527 2528 static int 2529 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2530 { 2531 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2532 2533 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2534 } 2535 2536 static int 2537 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2538 { 2539 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2540 2541 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2542 } 2543 2544 /* 2545 * Set/clear interrupt masks 2546 */ 2547 static void 2548 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2549 { 2550 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2551 2552 if (enable) { 2553 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2554 } else { 2555 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2556 } 2557 } 2558 2559 static void 2560 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2561 { 2562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2563 2564 if (enable) { 2565 if (sc->flags & AAC_FLAGS_NEW_COMM) 2566 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2567 else 2568 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2569 } else { 2570 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); 2571 } 2572 } 2573 2574 static void 2575 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2576 { 2577 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2578 2579 if (enable) { 2580 if (sc->flags & AAC_FLAGS_NEW_COMM) 2581 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2582 else 2583 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2584 } else { 2585 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); 2586 } 2587 } 2588 2589 /* 2590 * New comm. interface: Send command functions 2591 */ 2592 static int 2593 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2594 { 2595 u_int32_t index, device; 2596 2597 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2598 2599 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2600 if (index == 0xffffffffL) 2601 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2602 if (index == 0xffffffffL) 2603 return index; 2604 aac_enqueue_busy(cm); 2605 device = index; 2606 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2607 device += 4; 2608 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2609 device += 4; 2610 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2611 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); 2612 return 0; 2613 } 2614 2615 static int 2616 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2617 { 2618 u_int32_t index, device; 2619 2620 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2621 2622 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2623 if (index == 0xffffffffL) 2624 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2625 if (index == 0xffffffffL) 2626 return index; 2627 aac_enqueue_busy(cm); 2628 device = index; 2629 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2630 device += 4; 2631 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2632 device += 4; 2633 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2634 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); 2635 return 0; 2636 } 2637 2638 /* 2639 * New comm. interface: get, set outbound queue index 2640 */ 2641 static int 2642 aac_rx_get_outb_queue(struct aac_softc *sc) 2643 { 2644 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2645 2646 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); 2647 } 2648 2649 static int 2650 aac_rkt_get_outb_queue(struct aac_softc *sc) 2651 { 2652 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2653 2654 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); 2655 } 2656 2657 static void 2658 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2659 { 2660 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2661 2662 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); 2663 } 2664 2665 static void 2666 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2667 { 2668 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2669 2670 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); 2671 } 2672 2673 /* 2674 * Debugging and Diagnostics 2675 */ 2676 2677 /* 2678 * Print some information about the controller. 2679 */ 2680 static void 2681 aac_describe_controller(struct aac_softc *sc) 2682 { 2683 struct aac_fib *fib; 2684 struct aac_adapter_info *info; 2685 char *adapter_type = "Adaptec RAID controller"; 2686 2687 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2688 2689 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 2690 aac_alloc_sync_fib(sc, &fib); 2691 2692 fib->data[0] = 0; 2693 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2694 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2695 aac_release_sync_fib(sc); 2696 lockmgr(&sc->aac_io_lock, LK_RELEASE); 2697 return; 2698 } 2699 2700 /* save the kernel revision structure for later use */ 2701 info = (struct aac_adapter_info *)&fib->data[0]; 2702 sc->aac_revision = info->KernelRevision; 2703 2704 if (bootverbose) { 2705 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2706 "(%dMB cache, %dMB execution), %s\n", 2707 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2708 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2709 info->BufferMem / (1024 * 1024), 2710 info->ExecutionMem / (1024 * 1024), 2711 aac_describe_code(aac_battery_platform, 2712 info->batteryPlatform)); 2713 2714 device_printf(sc->aac_dev, 2715 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2716 info->KernelRevision.external.comp.major, 2717 info->KernelRevision.external.comp.minor, 2718 info->KernelRevision.external.comp.dash, 2719 info->KernelRevision.buildNumber, 2720 (u_int32_t)(info->SerialNumber & 0xffffff)); 2721 2722 device_printf(sc->aac_dev, "Supported Options=%b\n", 2723 sc->supported_options, 2724 "\20" 2725 "\1SNAPSHOT" 2726 "\2CLUSTERS" 2727 "\3WCACHE" 2728 "\4DATA64" 2729 "\5HOSTTIME" 2730 "\6RAID50" 2731 "\7WINDOW4GB" 2732 "\10SCSIUPGD" 2733 "\11SOFTERR" 2734 "\12NORECOND" 2735 "\13SGMAP64" 2736 "\14ALARM" 2737 "\15NONDASD" 2738 "\16SCSIMGT" 2739 "\17RAIDSCSI" 2740 "\21ADPTINFO" 2741 "\22NEWCOMM" 2742 "\23ARRAY64BIT" 2743 "\24HEATSENSOR"); 2744 } 2745 2746 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2747 fib->data[0] = 0; 2748 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2749 device_printf(sc->aac_dev, 2750 "RequestSupplementAdapterInfo failed\n"); 2751 else 2752 adapter_type = ((struct aac_supplement_adapter_info *) 2753 &fib->data[0])->AdapterTypeText; 2754 } 2755 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2756 adapter_type, 2757 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, 2758 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); 2759 2760 aac_release_sync_fib(sc); 2761 lockmgr(&sc->aac_io_lock, LK_RELEASE); 2762 } 2763 2764 /* 2765 * Look up a text description of a numeric error code and return a pointer to 2766 * same. 2767 */ 2768 static const char * 2769 aac_describe_code(const struct aac_code_lookup *table, u_int32_t code) 2770 { 2771 int i; 2772 2773 for (i = 0; table[i].string != NULL; i++) 2774 if (table[i].code == code) 2775 return(table[i].string); 2776 return(table[i + 1].string); 2777 } 2778 2779 /* 2780 * Management Interface 2781 */ 2782 2783 static int 2784 aac_open(struct dev_open_args *ap) 2785 { 2786 cdev_t dev = ap->a_head.a_dev; 2787 struct aac_softc *sc; 2788 2789 sc = dev->si_drv1; 2790 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2791 device_busy(sc->aac_dev); 2792 2793 return 0; 2794 } 2795 2796 static int 2797 aac_ioctl(struct dev_ioctl_args *ap) 2798 { 2799 caddr_t arg = ap->a_data; 2800 cdev_t dev = ap->a_head.a_dev; 2801 u_long cmd = ap->a_cmd; 2802 union aac_statrequest *as; 2803 struct aac_softc *sc; 2804 int error = 0; 2805 2806 as = (union aac_statrequest *)arg; 2807 sc = dev->si_drv1; 2808 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2809 2810 switch (cmd) { 2811 case AACIO_STATS: 2812 switch (as->as_item) { 2813 case AACQ_FREE: 2814 case AACQ_BIO: 2815 case AACQ_READY: 2816 case AACQ_BUSY: 2817 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2818 sizeof(struct aac_qstat)); 2819 break; 2820 default: 2821 error = ENOENT; 2822 break; 2823 } 2824 break; 2825 2826 case FSACTL_SENDFIB: 2827 case FSACTL_SEND_LARGE_FIB: 2828 arg = *(caddr_t*)arg; 2829 case FSACTL_LNX_SENDFIB: 2830 case FSACTL_LNX_SEND_LARGE_FIB: 2831 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2832 error = aac_ioctl_sendfib(sc, arg); 2833 break; 2834 case FSACTL_SEND_RAW_SRB: 2835 arg = *(caddr_t*)arg; 2836 case FSACTL_LNX_SEND_RAW_SRB: 2837 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2838 error = aac_ioctl_send_raw_srb(sc, arg); 2839 break; 2840 case FSACTL_AIF_THREAD: 2841 case FSACTL_LNX_AIF_THREAD: 2842 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2843 error = EINVAL; 2844 break; 2845 case FSACTL_OPEN_GET_ADAPTER_FIB: 2846 arg = *(caddr_t*)arg; 2847 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2848 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2849 error = aac_open_aif(sc, arg); 2850 break; 2851 case FSACTL_GET_NEXT_ADAPTER_FIB: 2852 arg = *(caddr_t*)arg; 2853 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2854 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2855 error = aac_getnext_aif(sc, arg); 2856 break; 2857 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2858 arg = *(caddr_t*)arg; 2859 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2860 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2861 error = aac_close_aif(sc, arg); 2862 break; 2863 case FSACTL_MINIPORT_REV_CHECK: 2864 arg = *(caddr_t*)arg; 2865 case FSACTL_LNX_MINIPORT_REV_CHECK: 2866 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2867 error = aac_rev_check(sc, arg); 2868 break; 2869 case FSACTL_QUERY_DISK: 2870 arg = *(caddr_t*)arg; 2871 case FSACTL_LNX_QUERY_DISK: 2872 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2873 error = aac_query_disk(sc, arg); 2874 break; 2875 case FSACTL_DELETE_DISK: 2876 case FSACTL_LNX_DELETE_DISK: 2877 /* 2878 * We don't trust the underland to tell us when to delete a 2879 * container, rather we rely on an AIF coming from the 2880 * controller 2881 */ 2882 error = 0; 2883 break; 2884 case FSACTL_GET_PCI_INFO: 2885 arg = *(caddr_t*)arg; 2886 case FSACTL_LNX_GET_PCI_INFO: 2887 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2888 error = aac_get_pci_info(sc, arg); 2889 break; 2890 case FSACTL_GET_FEATURES: 2891 arg = *(caddr_t*)arg; 2892 case FSACTL_LNX_GET_FEATURES: 2893 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); 2894 error = aac_supported_features(sc, arg); 2895 break; 2896 default: 2897 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2898 error = EINVAL; 2899 break; 2900 } 2901 return(error); 2902 } 2903 2904 static struct filterops aac_filterops = 2905 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, aac_filter_detach, aac_filter_read }; 2906 2907 static int 2908 aac_kqfilter(struct dev_kqfilter_args *ap) 2909 { 2910 cdev_t dev = ap->a_head.a_dev; 2911 struct aac_softc *sc = dev->si_drv1; 2912 struct knote *kn = ap->a_kn; 2913 struct klist *klist; 2914 2915 ap->a_result = 0; 2916 2917 switch (kn->kn_filter) { 2918 case EVFILT_READ: 2919 kn->kn_fop = &aac_filterops; 2920 kn->kn_hook = (caddr_t)sc; 2921 break; 2922 default: 2923 ap->a_result = EOPNOTSUPP; 2924 return (0); 2925 } 2926 2927 klist = &sc->rcv_kq.ki_note; 2928 knote_insert(klist, kn); 2929 2930 return (0); 2931 } 2932 2933 static void 2934 aac_filter_detach(struct knote *kn) 2935 { 2936 struct aac_softc *sc = (struct aac_softc *)kn->kn_hook; 2937 struct klist *klist; 2938 2939 klist = &sc->rcv_kq.ki_note; 2940 knote_remove(klist, kn); 2941 } 2942 2943 static int 2944 aac_filter_read(struct knote *kn, long hint) 2945 { 2946 struct aac_softc *sc; 2947 struct aac_fib_context *ctx; 2948 int ret = 0; 2949 2950 sc = (struct aac_softc *)kn->kn_hook; 2951 2952 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 2953 for (ctx = sc->fibctx; ctx; ctx = ctx->next) 2954 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) 2955 ret = 1; 2956 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 2957 2958 return(ret); 2959 } 2960 2961 static void 2962 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2963 { 2964 2965 switch (event->ev_type) { 2966 case AAC_EVENT_CMFREE: 2967 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0); 2968 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 2969 aac_add_event(sc, event); 2970 return; 2971 } 2972 kfree(event, M_AACBUF); 2973 wakeup(arg); 2974 break; 2975 default: 2976 break; 2977 } 2978 } 2979 2980 /* 2981 * Send a FIB supplied from userspace 2982 */ 2983 static int 2984 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 2985 { 2986 struct aac_command *cm; 2987 int size, error; 2988 2989 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2990 2991 cm = NULL; 2992 2993 /* 2994 * Get a command 2995 */ 2996 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 2997 if (aac_alloc_command(sc, &cm)) { 2998 struct aac_event *event; 2999 3000 event = kmalloc(sizeof(struct aac_event), M_AACBUF, 3001 M_INTWAIT | M_ZERO); 3002 event->ev_type = AAC_EVENT_CMFREE; 3003 event->ev_callback = aac_ioctl_event; 3004 event->ev_arg = &cm; 3005 aac_add_event(sc, event); 3006 lksleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 3007 } 3008 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3009 3010 /* 3011 * Fetch the FIB header, then re-copy to get data as well. 3012 */ 3013 if ((error = copyin(ufib, cm->cm_fib, 3014 sizeof(struct aac_fib_header))) != 0) 3015 goto out; 3016 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3017 if (size > sc->aac_max_fib_size) { 3018 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 3019 size, sc->aac_max_fib_size); 3020 size = sc->aac_max_fib_size; 3021 } 3022 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3023 goto out; 3024 cm->cm_fib->Header.Size = size; 3025 cm->cm_timestamp = time_uptime; 3026 3027 /* 3028 * Pass the FIB to the controller, wait for it to complete. 3029 */ 3030 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3031 error = aac_wait_command(cm); 3032 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3033 if (error != 0) { 3034 device_printf(sc->aac_dev, 3035 "aac_wait_command return %d\n", error); 3036 goto out; 3037 } 3038 3039 /* 3040 * Copy the FIB and data back out to the caller. 3041 */ 3042 size = cm->cm_fib->Header.Size; 3043 if (size > sc->aac_max_fib_size) { 3044 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 3045 size, sc->aac_max_fib_size); 3046 size = sc->aac_max_fib_size; 3047 } 3048 error = copyout(cm->cm_fib, ufib, size); 3049 3050 out: 3051 if (cm != NULL) { 3052 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3053 aac_release_command(cm); 3054 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3055 } 3056 return(error); 3057 } 3058 3059 /* 3060 * Send a passthrough FIB supplied from userspace 3061 */ 3062 static int 3063 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 3064 { 3065 struct aac_command *cm; 3066 struct aac_event *event; 3067 struct aac_fib *fib; 3068 struct aac_srb *srbcmd, *user_srb; 3069 struct aac_sg_entry *sge; 3070 #ifdef __x86_64__ 3071 struct aac_sg_entry64 *sge64; 3072 #endif 3073 void *srb_sg_address, *ureply; 3074 uint32_t fibsize, srb_sg_bytecount; 3075 int error, transfer_data; 3076 3077 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3078 3079 cm = NULL; 3080 transfer_data = 0; 3081 fibsize = 0; 3082 user_srb = (struct aac_srb *)arg; 3083 3084 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3085 if (aac_alloc_command(sc, &cm)) { 3086 event = kmalloc(sizeof(struct aac_event), M_AACBUF, 3087 M_NOWAIT | M_ZERO); 3088 if (event == NULL) { 3089 error = EBUSY; 3090 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3091 goto out; 3092 } 3093 event->ev_type = AAC_EVENT_CMFREE; 3094 event->ev_callback = aac_ioctl_event; 3095 event->ev_arg = &cm; 3096 aac_add_event(sc, event); 3097 lksleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); 3098 } 3099 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3100 3101 cm->cm_data = NULL; 3102 fib = cm->cm_fib; 3103 srbcmd = (struct aac_srb *)fib->data; 3104 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); 3105 if (error != 0) 3106 goto out; 3107 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { 3108 error = EINVAL; 3109 goto out; 3110 } 3111 error = copyin(user_srb, srbcmd, fibsize); 3112 if (error != 0) 3113 goto out; 3114 srbcmd->function = 0; 3115 srbcmd->retry_limit = 0; 3116 if (srbcmd->sg_map.SgCount > 1) { 3117 error = EINVAL; 3118 goto out; 3119 } 3120 3121 /* Retrieve correct SG entries. */ 3122 if (fibsize == (sizeof(struct aac_srb) + 3123 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { 3124 sge = srbcmd->sg_map.SgEntry; 3125 srb_sg_bytecount = sge->SgByteCount; 3126 srb_sg_address = (void *)(uintptr_t)sge->SgAddress; 3127 } 3128 #ifdef __x86_64__ 3129 else if (fibsize == (sizeof(struct aac_srb) + 3130 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { 3131 sge = NULL; 3132 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; 3133 srb_sg_bytecount = sge64->SgByteCount; 3134 srb_sg_address = (void *)sge64->SgAddress; 3135 if (sge64->SgAddress > 0xffffffffull && 3136 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 3137 error = EINVAL; 3138 goto out; 3139 } 3140 } 3141 #endif 3142 else { 3143 error = EINVAL; 3144 goto out; 3145 } 3146 ureply = (char *)arg + fibsize; 3147 srbcmd->data_len = srb_sg_bytecount; 3148 if (srbcmd->sg_map.SgCount == 1) 3149 transfer_data = 1; 3150 3151 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; 3152 if (transfer_data) { 3153 cm->cm_datalen = srb_sg_bytecount; 3154 cm->cm_data = kmalloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); 3155 if (cm->cm_data == NULL) { 3156 error = ENOMEM; 3157 goto out; 3158 } 3159 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) 3160 cm->cm_flags |= AAC_CMD_DATAIN; 3161 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { 3162 cm->cm_flags |= AAC_CMD_DATAOUT; 3163 error = copyin(srb_sg_address, cm->cm_data, 3164 cm->cm_datalen); 3165 if (error != 0) 3166 goto out; 3167 } 3168 } 3169 3170 fib->Header.Size = sizeof(struct aac_fib_header) + 3171 sizeof(struct aac_srb); 3172 fib->Header.XferState = 3173 AAC_FIBSTATE_HOSTOWNED | 3174 AAC_FIBSTATE_INITIALISED | 3175 AAC_FIBSTATE_EMPTY | 3176 AAC_FIBSTATE_FROMHOST | 3177 AAC_FIBSTATE_REXPECTED | 3178 AAC_FIBSTATE_NORM | 3179 AAC_FIBSTATE_ASYNC | 3180 AAC_FIBSTATE_FAST_RESPONSE; 3181 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? 3182 ScsiPortCommandU64 : ScsiPortCommand; 3183 3184 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3185 aac_wait_command(cm); 3186 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3187 3188 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { 3189 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); 3190 if (error != 0) 3191 goto out; 3192 } 3193 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); 3194 out: 3195 if (cm != NULL) { 3196 if (cm->cm_data != NULL) 3197 kfree(cm->cm_data, M_AACBUF); 3198 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3199 aac_release_command(cm); 3200 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3201 } 3202 return(error); 3203 } 3204 3205 static int 3206 aac_close(struct dev_close_args *ap) 3207 { 3208 cdev_t dev = ap->a_head.a_dev; 3209 struct aac_softc *sc; 3210 3211 sc = dev->si_drv1; 3212 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3213 get_mplock(); 3214 device_unbusy(sc->aac_dev); 3215 rel_mplock(); 3216 3217 return 0; 3218 } 3219 3220 /* 3221 * Handle an AIF sent to us by the controller; queue it for later reference. 3222 * If the queue fills up, then drop the older entries. 3223 */ 3224 static void 3225 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3226 { 3227 struct aac_aif_command *aif; 3228 struct aac_container *co, *co_next; 3229 struct aac_fib_context *ctx; 3230 struct aac_mntinforesp *mir; 3231 int next, current, found; 3232 int count = 0, added = 0, i = 0; 3233 uint32_t channel; 3234 3235 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3236 3237 aif = (struct aac_aif_command*)&fib->data[0]; 3238 aac_print_aif(sc, aif); 3239 3240 /* Is it an event that we should care about? */ 3241 switch (aif->command) { 3242 case AifCmdEventNotify: 3243 switch (aif->data.EN.type) { 3244 case AifEnAddContainer: 3245 case AifEnDeleteContainer: 3246 /* 3247 * A container was added or deleted, but the message 3248 * doesn't tell us anything else! Re-enumerate the 3249 * containers and sort things out. 3250 */ 3251 aac_alloc_sync_fib(sc, &fib); 3252 do { 3253 /* 3254 * Ask the controller for its containers one at 3255 * a time. 3256 * XXX What if the controller's list changes 3257 * midway through this enumaration? 3258 * XXX This should be done async. 3259 */ 3260 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 3261 continue; 3262 if (i == 0) 3263 count = mir->MntRespCount; 3264 /* 3265 * Check the container against our list. 3266 * co->co_found was already set to 0 in a 3267 * previous run. 3268 */ 3269 if ((mir->Status == ST_OK) && 3270 (mir->MntTable[0].VolType != CT_NONE)) { 3271 found = 0; 3272 TAILQ_FOREACH(co, 3273 &sc->aac_container_tqh, 3274 co_link) { 3275 if (co->co_mntobj.ObjectId == 3276 mir->MntTable[0].ObjectId) { 3277 co->co_found = 1; 3278 found = 1; 3279 break; 3280 } 3281 } 3282 /* 3283 * If the container matched, continue 3284 * in the list. 3285 */ 3286 if (found) { 3287 i++; 3288 continue; 3289 } 3290 3291 /* 3292 * This is a new container. Do all the 3293 * appropriate things to set it up. 3294 */ 3295 aac_add_container(sc, mir, 1); 3296 added = 1; 3297 } 3298 i++; 3299 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3300 aac_release_sync_fib(sc); 3301 3302 /* 3303 * Go through our list of containers and see which ones 3304 * were not marked 'found'. Since the controller didn't 3305 * list them they must have been deleted. Do the 3306 * appropriate steps to destroy the device. Also reset 3307 * the co->co_found field. 3308 */ 3309 co = TAILQ_FIRST(&sc->aac_container_tqh); 3310 while (co != NULL) { 3311 if (co->co_found == 0) { 3312 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3313 get_mplock(); 3314 device_delete_child(sc->aac_dev, 3315 co->co_disk); 3316 rel_mplock(); 3317 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3318 co_next = TAILQ_NEXT(co, co_link); 3319 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 3320 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3321 co_link); 3322 lockmgr(&sc->aac_container_lock, LK_RELEASE); 3323 kfree(co, M_AACBUF); 3324 co = co_next; 3325 } else { 3326 co->co_found = 0; 3327 co = TAILQ_NEXT(co, co_link); 3328 } 3329 } 3330 3331 /* Attach the newly created containers */ 3332 if (added) { 3333 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3334 get_mplock(); 3335 bus_generic_attach(sc->aac_dev); 3336 rel_mplock(); 3337 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3338 } 3339 3340 break; 3341 3342 case AifEnEnclosureManagement: 3343 switch (aif->data.EN.data.EEE.eventType) { 3344 case AIF_EM_DRIVE_INSERTION: 3345 case AIF_EM_DRIVE_REMOVAL: 3346 channel = aif->data.EN.data.EEE.unitID; 3347 if (sc->cam_rescan_cb != NULL) 3348 sc->cam_rescan_cb(sc, 3349 (channel >> 24) & 0xF, 3350 (channel & 0xFFFF)); 3351 break; 3352 } 3353 break; 3354 3355 case AifEnAddJBOD: 3356 case AifEnDeleteJBOD: 3357 channel = aif->data.EN.data.ECE.container; 3358 if (sc->cam_rescan_cb != NULL) 3359 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, 3360 AAC_CAM_TARGET_WILDCARD); 3361 break; 3362 3363 default: 3364 break; 3365 } 3366 3367 default: 3368 break; 3369 } 3370 3371 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3372 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3373 current = sc->aifq_idx; 3374 next = (current + 1) % AAC_AIFQ_LENGTH; 3375 if (next == 0) 3376 sc->aifq_filled = 1; 3377 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3378 /* modify AIF contexts */ 3379 if (sc->aifq_filled) { 3380 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3381 if (next == ctx->ctx_idx) 3382 ctx->ctx_wrap = 1; 3383 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3384 ctx->ctx_idx = next; 3385 } 3386 } 3387 sc->aifq_idx = next; 3388 /* On the off chance that someone is sleeping for an aif... */ 3389 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3390 wakeup(sc->aac_aifq); 3391 /* token may have been lost */ 3392 /* Wakeup any poll()ers */ 3393 KNOTE(&sc->rcv_kq.ki_note, 0); 3394 /* token may have been lost */ 3395 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3396 } 3397 3398 /* 3399 * Return the Revision of the driver to userspace and check to see if the 3400 * userspace app is possibly compatible. This is extremely bogus since 3401 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3402 * returning what the card reported. 3403 */ 3404 static int 3405 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3406 { 3407 struct aac_rev_check rev_check; 3408 struct aac_rev_check_resp rev_check_resp; 3409 int error = 0; 3410 3411 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3412 3413 /* 3414 * Copyin the revision struct from userspace 3415 */ 3416 if ((error = copyin(udata, (caddr_t)&rev_check, 3417 sizeof(struct aac_rev_check))) != 0) { 3418 return error; 3419 } 3420 3421 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3422 rev_check.callingRevision.buildNumber); 3423 3424 /* 3425 * Doctor up the response struct. 3426 */ 3427 rev_check_resp.possiblyCompatible = 1; 3428 rev_check_resp.adapterSWRevision.external.comp.major = 3429 AAC_DRIVER_MAJOR_VERSION; 3430 rev_check_resp.adapterSWRevision.external.comp.minor = 3431 AAC_DRIVER_MINOR_VERSION; 3432 rev_check_resp.adapterSWRevision.external.comp.type = 3433 AAC_DRIVER_TYPE; 3434 rev_check_resp.adapterSWRevision.external.comp.dash = 3435 AAC_DRIVER_BUGFIX_LEVEL; 3436 rev_check_resp.adapterSWRevision.buildNumber = 3437 AAC_DRIVER_BUILD; 3438 3439 return(copyout((caddr_t)&rev_check_resp, udata, 3440 sizeof(struct aac_rev_check_resp))); 3441 } 3442 3443 /* 3444 * Pass the fib context to the caller 3445 */ 3446 static int 3447 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3448 { 3449 struct aac_fib_context *fibctx, *ctx; 3450 int error = 0; 3451 3452 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3453 3454 fibctx = kmalloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3455 if (fibctx == NULL) 3456 return (ENOMEM); 3457 3458 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3459 /* all elements are already 0, add to queue */ 3460 if (sc->fibctx == NULL) 3461 sc->fibctx = fibctx; 3462 else { 3463 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3464 ; 3465 ctx->next = fibctx; 3466 fibctx->prev = ctx; 3467 } 3468 3469 /* evaluate unique value */ 3470 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3471 ctx = sc->fibctx; 3472 while (ctx != fibctx) { 3473 if (ctx->unique == fibctx->unique) { 3474 fibctx->unique++; 3475 ctx = sc->fibctx; 3476 } else { 3477 ctx = ctx->next; 3478 } 3479 } 3480 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3481 3482 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3483 if (error) 3484 aac_close_aif(sc, (caddr_t)ctx); 3485 return error; 3486 } 3487 3488 /* 3489 * Close the caller's fib context 3490 */ 3491 static int 3492 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3493 { 3494 struct aac_fib_context *ctx; 3495 3496 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3497 3498 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3499 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3500 if (ctx->unique == *(uint32_t *)&arg) { 3501 if (ctx == sc->fibctx) 3502 sc->fibctx = NULL; 3503 else { 3504 ctx->prev->next = ctx->next; 3505 if (ctx->next) 3506 ctx->next->prev = ctx->prev; 3507 } 3508 break; 3509 } 3510 } 3511 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3512 if (ctx) 3513 kfree(ctx, M_AACBUF); 3514 3515 return 0; 3516 } 3517 3518 /* 3519 * Pass the caller the next AIF in their queue 3520 */ 3521 static int 3522 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3523 { 3524 struct get_adapter_fib_ioctl agf; 3525 struct aac_fib_context *ctx; 3526 int error; 3527 3528 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3529 3530 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { 3531 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3532 if (agf.AdapterFibContext == ctx->unique) 3533 break; 3534 } 3535 if (!ctx) 3536 return (EFAULT); 3537 3538 error = aac_return_aif(sc, ctx, agf.AifFib); 3539 if (error == EAGAIN && agf.Wait) { 3540 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3541 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3542 while (error == EAGAIN) { 3543 error = tsleep(sc->aac_aifq, 3544 PCATCH, "aacaif", 0); 3545 if (error == 0) 3546 error = aac_return_aif(sc, ctx, agf.AifFib); 3547 } 3548 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3549 } 3550 } 3551 return(error); 3552 } 3553 3554 /* 3555 * Hand the next AIF off the top of the queue out to userspace. 3556 */ 3557 static int 3558 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3559 { 3560 int current, error; 3561 3562 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3563 3564 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3565 current = ctx->ctx_idx; 3566 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3567 /* empty */ 3568 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3569 return (EAGAIN); 3570 } 3571 error = 3572 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3573 if (error) 3574 device_printf(sc->aac_dev, 3575 "aac_return_aif: copyout returned %d\n", error); 3576 else { 3577 ctx->ctx_wrap = 0; 3578 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3579 } 3580 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3581 return(error); 3582 } 3583 3584 static int 3585 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3586 { 3587 struct aac_pci_info { 3588 u_int32_t bus; 3589 u_int32_t slot; 3590 } pciinf; 3591 int error; 3592 3593 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3594 3595 pciinf.bus = pci_get_bus(sc->aac_dev); 3596 pciinf.slot = pci_get_slot(sc->aac_dev); 3597 3598 error = copyout((caddr_t)&pciinf, uptr, 3599 sizeof(struct aac_pci_info)); 3600 3601 return (error); 3602 } 3603 3604 static int 3605 aac_supported_features(struct aac_softc *sc, caddr_t uptr) 3606 { 3607 struct aac_features f; 3608 int error; 3609 3610 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3611 3612 if ((error = copyin(uptr, &f, sizeof (f))) != 0) 3613 return (error); 3614 3615 /* 3616 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3617 * ALL zero in the featuresState, the driver will return the current 3618 * state of all the supported features, the data field will not be 3619 * valid. 3620 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3621 * a specific bit set in the featuresState, the driver will return the 3622 * current state of this specific feature and whatever data that are 3623 * associated with the feature in the data field or perform whatever 3624 * action needed indicates in the data field. 3625 */ 3626 if (f.feat.fValue == 0) { 3627 f.feat.fBits.largeLBA = 3628 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3629 /* TODO: In the future, add other features state here as well */ 3630 } else { 3631 if (f.feat.fBits.largeLBA) 3632 f.feat.fBits.largeLBA = 3633 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3634 /* TODO: Add other features state and data in the future */ 3635 } 3636 3637 error = copyout(&f, uptr, sizeof (f)); 3638 return (error); 3639 } 3640 3641 /* 3642 * Give the userland some information about the container. The AAC arch 3643 * expects the driver to be a SCSI passthrough type driver, so it expects 3644 * the containers to have b:t:l numbers. Fake it. 3645 */ 3646 static int 3647 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3648 { 3649 struct aac_query_disk query_disk; 3650 struct aac_container *co; 3651 struct aac_disk *disk; 3652 int error, id; 3653 3654 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3655 3656 disk = NULL; 3657 3658 error = copyin(uptr, (caddr_t)&query_disk, 3659 sizeof(struct aac_query_disk)); 3660 if (error) 3661 return (error); 3662 3663 id = query_disk.ContainerNumber; 3664 if (id == -1) 3665 return (EINVAL); 3666 3667 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 3668 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3669 if (co->co_mntobj.ObjectId == id) 3670 break; 3671 } 3672 3673 if (co == NULL) { 3674 query_disk.Valid = 0; 3675 query_disk.Locked = 0; 3676 query_disk.Deleted = 1; /* XXX is this right? */ 3677 } else { 3678 disk = device_get_softc(co->co_disk); 3679 query_disk.Valid = 1; 3680 query_disk.Locked = 3681 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3682 query_disk.Deleted = 0; 3683 query_disk.Bus = device_get_unit(sc->aac_dev); 3684 query_disk.Target = disk->unit; 3685 query_disk.Lun = 0; 3686 query_disk.UnMapped = 0; 3687 bcopy(disk->ad_dev_t->si_name, 3688 &query_disk.diskDeviceName[0], 10); 3689 } 3690 lockmgr(&sc->aac_container_lock, LK_RELEASE); 3691 3692 error = copyout((caddr_t)&query_disk, uptr, 3693 sizeof(struct aac_query_disk)); 3694 3695 return (error); 3696 } 3697 3698 static void 3699 aac_get_bus_info(struct aac_softc *sc) 3700 { 3701 struct aac_fib *fib; 3702 struct aac_ctcfg *c_cmd; 3703 struct aac_ctcfg_resp *c_resp; 3704 struct aac_vmioctl *vmi; 3705 struct aac_vmi_businf_resp *vmi_resp; 3706 struct aac_getbusinf businfo; 3707 struct aac_sim *caminf; 3708 device_t child; 3709 int i, found, error; 3710 3711 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3712 aac_alloc_sync_fib(sc, &fib); 3713 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3714 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3715 3716 c_cmd->Command = VM_ContainerConfig; 3717 c_cmd->cmd = CT_GET_SCSI_METHOD; 3718 c_cmd->param = 0; 3719 3720 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3721 sizeof(struct aac_ctcfg)); 3722 if (error) { 3723 device_printf(sc->aac_dev, "Error %d sending " 3724 "VM_ContainerConfig command\n", error); 3725 aac_release_sync_fib(sc); 3726 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3727 return; 3728 } 3729 3730 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3731 if (c_resp->Status != ST_OK) { 3732 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3733 c_resp->Status); 3734 aac_release_sync_fib(sc); 3735 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3736 return; 3737 } 3738 3739 sc->scsi_method_id = c_resp->param; 3740 3741 vmi = (struct aac_vmioctl *)&fib->data[0]; 3742 bzero(vmi, sizeof(struct aac_vmioctl)); 3743 3744 vmi->Command = VM_Ioctl; 3745 vmi->ObjType = FT_DRIVE; 3746 vmi->MethId = sc->scsi_method_id; 3747 vmi->ObjId = 0; 3748 vmi->IoctlCmd = GetBusInfo; 3749 3750 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3751 sizeof(struct aac_vmi_businf_resp)); 3752 if (error) { 3753 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3754 error); 3755 aac_release_sync_fib(sc); 3756 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3757 return; 3758 } 3759 3760 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3761 if (vmi_resp->Status != ST_OK) { 3762 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3763 vmi_resp->Status); 3764 aac_release_sync_fib(sc); 3765 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3766 return; 3767 } 3768 3769 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3770 aac_release_sync_fib(sc); 3771 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3772 3773 found = 0; 3774 for (i = 0; i < businfo.BusCount; i++) { 3775 if (businfo.BusValid[i] != AAC_BUS_VALID) 3776 continue; 3777 3778 caminf = (struct aac_sim *)kmalloc(sizeof(struct aac_sim), 3779 M_AACBUF, M_INTWAIT | M_ZERO); 3780 3781 child = device_add_child(sc->aac_dev, "aacp", -1); 3782 if (child == NULL) { 3783 device_printf(sc->aac_dev, 3784 "device_add_child failed for passthrough bus %d\n", 3785 i); 3786 kfree(caminf, M_AACBUF); 3787 break; 3788 } 3789 3790 caminf->TargetsPerBus = businfo.TargetsPerBus; 3791 caminf->BusNumber = i; 3792 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3793 caminf->aac_sc = sc; 3794 caminf->sim_dev = child; 3795 3796 device_set_ivars(child, caminf); 3797 device_set_desc(child, "SCSI Passthrough Bus"); 3798 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3799 3800 found = 1; 3801 } 3802 3803 if (found) 3804 bus_generic_attach(sc->aac_dev); 3805 } 3806