1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2001 Scott Long 4 * Copyright (c) 2000 BSDi 5 * Copyright (c) 2001 Adaptec, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: head/sys/dev/aac/aac.c 260044 2013-12-29 17:37:32Z marius $ 30 */ 31 32 /* 33 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 34 */ 35 #define AAC_DRIVERNAME "aac" 36 37 #include "opt_aac.h" 38 39 /* #include <stddef.h> */ 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/kthread.h> 45 #include <sys/poll.h> 46 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/signalvar.h> 50 #include <sys/time.h> 51 #include <sys/eventhandler.h> 52 #include <sys/rman.h> 53 54 #include <sys/bus_dma.h> 55 #include <sys/device.h> 56 #include <sys/mplock2.h> 57 58 #include <bus/pci/pcireg.h> 59 #include <bus/pci/pcivar.h> 60 61 #include <dev/raid/aac/aacreg.h> 62 #include <dev/raid/aac/aac_ioctl.h> 63 #include <dev/raid/aac/aacvar.h> 64 #include <dev/raid/aac/aac_tables.h> 65 66 static void aac_startup(void *arg); 67 static void aac_add_container(struct aac_softc *sc, 68 struct aac_mntinforesp *mir, int f); 69 static void aac_get_bus_info(struct aac_softc *sc); 70 static void aac_daemon(void *arg); 71 72 /* Command Processing */ 73 static void aac_timeout(struct aac_softc *sc); 74 static void aac_complete(void *context, int pending); 75 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 76 static void aac_bio_complete(struct aac_command *cm); 77 static int aac_wait_command(struct aac_command *cm); 78 static void aac_command_thread(void *arg); 79 80 /* Command Buffer Management */ 81 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 82 int nseg, int error); 83 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 84 int nseg, int error); 85 static int aac_alloc_commands(struct aac_softc *sc); 86 static void aac_free_commands(struct aac_softc *sc); 87 static void aac_unmap_command(struct aac_command *cm); 88 89 /* Hardware Interface */ 90 static int aac_alloc(struct aac_softc *sc); 91 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 92 int error); 93 static int aac_check_firmware(struct aac_softc *sc); 94 static int aac_init(struct aac_softc *sc); 95 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 96 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 97 u_int32_t arg3, u_int32_t *sp); 98 static int aac_setup_intr(struct aac_softc *sc); 99 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 100 struct aac_command *cm); 101 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 102 u_int32_t *fib_size, struct aac_fib **fib_addr); 103 static int aac_enqueue_response(struct aac_softc *sc, int queue, 104 struct aac_fib *fib); 105 106 /* StrongARM interface */ 107 static int aac_sa_get_fwstatus(struct aac_softc *sc); 108 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 109 static int aac_sa_get_istatus(struct aac_softc *sc); 110 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 111 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 112 u_int32_t arg0, u_int32_t arg1, 113 u_int32_t arg2, u_int32_t arg3); 114 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 115 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 116 117 const struct aac_interface aac_sa_interface = { 118 aac_sa_get_fwstatus, 119 aac_sa_qnotify, 120 aac_sa_get_istatus, 121 aac_sa_clear_istatus, 122 aac_sa_set_mailbox, 123 aac_sa_get_mailbox, 124 aac_sa_set_interrupts, 125 NULL, NULL, NULL 126 }; 127 128 /* i960Rx interface */ 129 static int aac_rx_get_fwstatus(struct aac_softc *sc); 130 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 131 static int aac_rx_get_istatus(struct aac_softc *sc); 132 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 133 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 134 u_int32_t arg0, u_int32_t arg1, 135 u_int32_t arg2, u_int32_t arg3); 136 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 137 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 138 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 139 static int aac_rx_get_outb_queue(struct aac_softc *sc); 140 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 141 142 const struct aac_interface aac_rx_interface = { 143 aac_rx_get_fwstatus, 144 aac_rx_qnotify, 145 aac_rx_get_istatus, 146 aac_rx_clear_istatus, 147 aac_rx_set_mailbox, 148 aac_rx_get_mailbox, 149 aac_rx_set_interrupts, 150 aac_rx_send_command, 151 aac_rx_get_outb_queue, 152 aac_rx_set_outb_queue 153 }; 154 155 /* Rocket/MIPS interface */ 156 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 157 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 158 static int aac_rkt_get_istatus(struct aac_softc *sc); 159 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 160 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 161 u_int32_t arg0, u_int32_t arg1, 162 u_int32_t arg2, u_int32_t arg3); 163 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 164 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 165 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 166 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 167 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 168 169 const struct aac_interface aac_rkt_interface = { 170 aac_rkt_get_fwstatus, 171 aac_rkt_qnotify, 172 aac_rkt_get_istatus, 173 aac_rkt_clear_istatus, 174 aac_rkt_set_mailbox, 175 aac_rkt_get_mailbox, 176 aac_rkt_set_interrupts, 177 aac_rkt_send_command, 178 aac_rkt_get_outb_queue, 179 aac_rkt_set_outb_queue 180 }; 181 182 /* Debugging and Diagnostics */ 183 static void aac_describe_controller(struct aac_softc *sc); 184 static const char *aac_describe_code(const struct aac_code_lookup *table, 185 u_int32_t code); 186 187 /* Management Interface */ 188 static d_open_t aac_open; 189 static d_close_t aac_close; 190 static d_ioctl_t aac_ioctl; 191 static d_kqfilter_t aac_kqfilter; 192 static void aac_filter_detach(struct knote *kn); 193 static int aac_filter_read(struct knote *kn, long hint); 194 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 195 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 196 static void aac_handle_aif(struct aac_softc *sc, 197 struct aac_fib *fib); 198 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 199 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 200 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 201 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 202 static int aac_return_aif(struct aac_softc *sc, 203 struct aac_fib_context *ctx, caddr_t uptr); 204 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 205 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 206 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); 207 static void aac_ioctl_event(struct aac_softc *sc, 208 struct aac_event *event, void *arg); 209 static struct aac_mntinforesp * 210 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); 211 212 static struct dev_ops aac_ops = { 213 { "aac", 0, 0 }, 214 .d_open = aac_open, 215 .d_close = aac_close, 216 .d_ioctl = aac_ioctl, 217 .d_kqfilter = aac_kqfilter 218 }; 219 220 static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 221 222 /* sysctl node */ 223 SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); 224 225 /* 226 * Device Interface 227 */ 228 229 /* 230 * Initialize the controller and softc 231 */ 232 int 233 aac_attach(struct aac_softc *sc) 234 { 235 int error, unit; 236 237 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 238 239 /* 240 * Initialize per-controller queues. 241 */ 242 aac_initq_free(sc); 243 aac_initq_ready(sc); 244 aac_initq_busy(sc); 245 aac_initq_bio(sc); 246 247 /* 248 * Initialize command-completion task. 249 */ 250 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 251 252 /* mark controller as suspended until we get ourselves organised */ 253 sc->aac_state |= AAC_STATE_SUSPEND; 254 255 /* 256 * Check that the firmware on the card is supported. 257 */ 258 if ((error = aac_check_firmware(sc)) != 0) 259 return(error); 260 261 /* 262 * Initialize locks 263 */ 264 lockinit(&sc->aac_aifq_lock, "AAC AIF lock", 0, LK_CANRECURSE); 265 lockinit(&sc->aac_io_lock, "AAC I/O lock", 0, LK_CANRECURSE); 266 lockinit(&sc->aac_container_lock, "AAC container lock", 0, LK_CANRECURSE); 267 TAILQ_INIT(&sc->aac_container_tqh); 268 TAILQ_INIT(&sc->aac_ev_cmfree); 269 270 /* Initialize the clock daemon callout. */ 271 callout_init_mp(&sc->aac_daemontime); 272 273 /* 274 * Initialize the adapter. 275 */ 276 if ((error = aac_alloc(sc)) != 0) 277 return(error); 278 if ((error = aac_init(sc)) != 0) 279 return(error); 280 281 /* 282 * Allocate and connect our interrupt. 283 */ 284 if ((error = aac_setup_intr(sc)) != 0) 285 return(error); 286 287 /* 288 * Print a little information about the controller. 289 */ 290 aac_describe_controller(sc); 291 292 /* 293 * Add sysctls. 294 */ 295 sysctl_ctx_init(&sc->aac_sysctl_ctx); 296 sc->aac_sysctl_tree = SYSCTL_ADD_NODE(&sc->aac_sysctl_ctx, 297 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 298 device_get_nameunit(sc->aac_dev), CTLFLAG_RD, 0, ""); 299 if (sc->aac_sysctl_tree == NULL) { 300 device_printf(sc->aac_dev, "can't add sysctl node\n"); 301 return (EINVAL); 302 } 303 SYSCTL_ADD_INT(&sc->aac_sysctl_ctx, 304 SYSCTL_CHILDREN(sc->aac_sysctl_tree), 305 OID_AUTO, "firmware_build", CTLFLAG_RD, 306 &sc->aac_revision.buildNumber, 0, 307 "firmware build number"); 308 309 /* 310 * Register to probe our containers later. 311 */ 312 sc->aac_ich.ich_func = aac_startup; 313 sc->aac_ich.ich_arg = sc; 314 sc->aac_ich.ich_desc = "aac"; 315 if (config_intrhook_establish(&sc->aac_ich) != 0) { 316 device_printf(sc->aac_dev, 317 "can't establish configuration hook\n"); 318 return(ENXIO); 319 } 320 321 /* 322 * Make the control device. 323 */ 324 unit = device_get_unit(sc->aac_dev); 325 sc->aac_dev_t = make_dev(&aac_ops, unit, UID_ROOT, GID_OPERATOR, 326 0640, "aac%d", unit); 327 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 328 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 329 sc->aac_dev_t->si_drv1 = sc; 330 331 /* Create the AIF thread */ 332 if (kthread_create(aac_command_thread, sc, 333 &sc->aifthread, "aac%daif", unit)) 334 panic("Could not create AIF thread"); 335 336 /* Register the shutdown method to only be called post-dump */ 337 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 338 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 339 device_printf(sc->aac_dev, 340 "shutdown event registration failed\n"); 341 342 /* Register with CAM for the non-DASD devices */ 343 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 344 TAILQ_INIT(&sc->aac_sim_tqh); 345 aac_get_bus_info(sc); 346 } 347 348 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 349 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); 350 lockmgr(&sc->aac_io_lock, LK_RELEASE); 351 352 return(0); 353 } 354 355 static void 356 aac_daemon(void *arg) 357 { 358 struct timeval tv; 359 struct aac_softc *sc; 360 struct aac_fib *fib; 361 362 sc = arg; 363 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 364 365 if (callout_pending(&sc->aac_daemontime) || 366 callout_active(&sc->aac_daemontime) == 0) { 367 lockmgr(&sc->aac_io_lock, LK_RELEASE); 368 return; 369 } 370 getmicrotime(&tv); 371 aac_alloc_sync_fib(sc, &fib); 372 *(uint32_t *)fib->data = tv.tv_sec; 373 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); 374 aac_release_sync_fib(sc); 375 callout_reset(&sc->aac_daemontime, 30 * 60 * hz, aac_daemon, sc); 376 lockmgr(&sc->aac_io_lock, LK_RELEASE); 377 } 378 379 void 380 aac_add_event(struct aac_softc *sc, struct aac_event *event) 381 { 382 383 switch (event->ev_type & AAC_EVENT_MASK) { 384 case AAC_EVENT_CMFREE: 385 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 386 break; 387 default: 388 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 389 event->ev_type); 390 break; 391 } 392 } 393 394 /* 395 * Request information of container #cid 396 */ 397 static struct aac_mntinforesp * 398 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) 399 { 400 struct aac_mntinfo *mi; 401 402 mi = (struct aac_mntinfo *)&fib->data[0]; 403 /* use 64-bit LBA if enabled */ 404 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 405 VM_NameServe64 : VM_NameServe; 406 mi->MntType = FT_FILESYS; 407 mi->MntCount = cid; 408 409 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 410 sizeof(struct aac_mntinfo))) { 411 device_printf(sc->aac_dev, "Error probing container %d\n", cid); 412 return (NULL); 413 } 414 415 return ((struct aac_mntinforesp *)&fib->data[0]); 416 } 417 418 /* 419 * Probe for containers, create disks. 420 */ 421 static void 422 aac_startup(void *arg) 423 { 424 struct aac_softc *sc; 425 struct aac_fib *fib; 426 struct aac_mntinforesp *mir; 427 int count = 0, i = 0; 428 429 sc = (struct aac_softc *)arg; 430 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 431 432 /* disconnect ourselves from the intrhook chain */ 433 config_intrhook_disestablish(&sc->aac_ich); 434 435 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 436 aac_alloc_sync_fib(sc, &fib); 437 438 /* loop over possible containers */ 439 do { 440 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 441 continue; 442 if (i == 0) 443 count = mir->MntRespCount; 444 aac_add_container(sc, mir, 0); 445 i++; 446 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 447 448 aac_release_sync_fib(sc); 449 lockmgr(&sc->aac_io_lock, LK_RELEASE); 450 451 /* poke the bus to actually attach the child devices */ 452 if (bus_generic_attach(sc->aac_dev)) 453 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 454 455 /* mark the controller up */ 456 sc->aac_state &= ~AAC_STATE_SUSPEND; 457 458 /* enable interrupts now */ 459 AAC_UNMASK_INTERRUPTS(sc); 460 } 461 462 /* 463 * Create a device to represent a new container 464 */ 465 static void 466 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 467 { 468 struct aac_container *co; 469 device_t child; 470 471 /* 472 * Check container volume type for validity. Note that many of 473 * the possible types may never show up. 474 */ 475 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 476 co = (struct aac_container *)kmalloc(sizeof *co, M_AACBUF, 477 M_INTWAIT | M_ZERO); 478 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", 479 mir->MntTable[0].ObjectId, 480 mir->MntTable[0].FileSystemName, 481 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 482 483 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 484 device_printf(sc->aac_dev, "device_add_child failed\n"); 485 else 486 device_set_ivars(child, co); 487 device_set_desc(child, aac_describe_code(aac_container_types, 488 mir->MntTable[0].VolType)); 489 co->co_disk = child; 490 co->co_found = f; 491 bcopy(&mir->MntTable[0], &co->co_mntobj, 492 sizeof(struct aac_mntobj)); 493 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 494 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 495 lockmgr(&sc->aac_container_lock, LK_RELEASE); 496 } 497 } 498 499 /* 500 * Allocate resources associated with (sc) 501 */ 502 static int 503 aac_alloc(struct aac_softc *sc) 504 { 505 506 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 507 508 /* 509 * Create DMA tag for mapping buffers into controller-addressable space. 510 */ 511 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 512 1, 0, /* algnmnt, boundary */ 513 (sc->flags & AAC_FLAGS_SG_64BIT) ? 514 BUS_SPACE_MAXADDR : 515 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 516 BUS_SPACE_MAXADDR, /* highaddr */ 517 NULL, NULL, /* filter, filterarg */ 518 MAXBSIZE, /* maxsize */ 519 sc->aac_sg_tablesize, /* nsegments */ 520 MAXBSIZE, /* maxsegsize */ 521 BUS_DMA_ALLOCNOW, /* flags */ 522 &sc->aac_buffer_dmat)) { 523 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 524 return (ENOMEM); 525 } 526 527 /* 528 * Create DMA tag for mapping FIBs into controller-addressable space.. 529 */ 530 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 531 1, 0, /* algnmnt, boundary */ 532 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 533 BUS_SPACE_MAXADDR_32BIT : 534 0x7fffffff, /* lowaddr */ 535 BUS_SPACE_MAXADDR, /* highaddr */ 536 NULL, NULL, /* filter, filterarg */ 537 sc->aac_max_fibs_alloc * 538 sc->aac_max_fib_size, /* maxsize */ 539 1, /* nsegments */ 540 sc->aac_max_fibs_alloc * 541 sc->aac_max_fib_size, /* maxsize */ 542 0, /* flags */ 543 &sc->aac_fib_dmat)) { 544 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); 545 return (ENOMEM); 546 } 547 548 /* 549 * Create DMA tag for the common structure and allocate it. 550 */ 551 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 552 1, 0, /* algnmnt, boundary */ 553 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 554 BUS_SPACE_MAXADDR_32BIT : 555 0x7fffffff, /* lowaddr */ 556 BUS_SPACE_MAXADDR, /* highaddr */ 557 NULL, NULL, /* filter, filterarg */ 558 8192 + sizeof(struct aac_common), /* maxsize */ 559 1, /* nsegments */ 560 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 561 0, /* flags */ 562 &sc->aac_common_dmat)) { 563 device_printf(sc->aac_dev, 564 "can't allocate common structure DMA tag\n"); 565 return (ENOMEM); 566 } 567 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 568 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 569 device_printf(sc->aac_dev, "can't allocate common structure\n"); 570 return (ENOMEM); 571 } 572 573 /* 574 * Work around a bug in the 2120 and 2200 that cannot DMA commands 575 * below address 8192 in physical memory. 576 * XXX If the padding is not needed, can it be put to use instead 577 * of ignored? 578 */ 579 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 580 sc->aac_common, 8192 + sizeof(*sc->aac_common), 581 aac_common_map, sc, 0); 582 583 if (sc->aac_common_busaddr < 8192) { 584 sc->aac_common = (struct aac_common *) 585 ((uint8_t *)sc->aac_common + 8192); 586 sc->aac_common_busaddr += 8192; 587 } 588 bzero(sc->aac_common, sizeof(*sc->aac_common)); 589 590 /* Allocate some FIBs and associated command structs */ 591 TAILQ_INIT(&sc->aac_fibmap_tqh); 592 sc->aac_commands = kmalloc(sc->aac_max_fibs * sizeof(struct aac_command), 593 M_AACBUF, M_WAITOK|M_ZERO); 594 while (sc->total_fibs < sc->aac_max_fibs) { 595 if (aac_alloc_commands(sc) != 0) 596 break; 597 } 598 if (sc->total_fibs == 0) 599 return (ENOMEM); 600 601 return (0); 602 } 603 604 /* 605 * Free all of the resources associated with (sc) 606 * 607 * Should not be called if the controller is active. 608 */ 609 void 610 aac_free(struct aac_softc *sc) 611 { 612 613 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 614 615 /* remove the control device */ 616 if (sc->aac_dev_t != NULL) 617 destroy_dev(sc->aac_dev_t); 618 619 /* throw away any FIB buffers, discard the FIB DMA tag */ 620 aac_free_commands(sc); 621 if (sc->aac_fib_dmat) 622 bus_dma_tag_destroy(sc->aac_fib_dmat); 623 624 kfree(sc->aac_commands, M_AACBUF); 625 626 /* destroy the common area */ 627 if (sc->aac_common) { 628 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 629 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 630 sc->aac_common_dmamap); 631 } 632 if (sc->aac_common_dmat) 633 bus_dma_tag_destroy(sc->aac_common_dmat); 634 635 /* disconnect the interrupt handler */ 636 if (sc->aac_intr) 637 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 638 if (sc->aac_irq != NULL) { 639 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, 640 rman_get_rid(sc->aac_irq), sc->aac_irq); 641 if (sc->aac_irq_type == PCI_INTR_TYPE_MSI) 642 pci_release_msi(sc->aac_dev); 643 } 644 645 /* destroy data-transfer DMA tag */ 646 if (sc->aac_buffer_dmat) 647 bus_dma_tag_destroy(sc->aac_buffer_dmat); 648 649 /* destroy the parent DMA tag */ 650 if (sc->aac_parent_dmat) 651 bus_dma_tag_destroy(sc->aac_parent_dmat); 652 653 /* release the register window mapping */ 654 if (sc->aac_regs_res0 != NULL) 655 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 656 rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0); 657 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) 658 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 659 rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1); 660 dev_ops_remove_minor(&aac_ops, device_get_unit(sc->aac_dev)); 661 662 sysctl_ctx_free(&sc->aac_sysctl_ctx); 663 } 664 665 /* 666 * Disconnect from the controller completely, in preparation for unload. 667 */ 668 int 669 aac_detach(device_t dev) 670 { 671 struct aac_softc *sc; 672 struct aac_container *co; 673 struct aac_sim *sim; 674 int error; 675 676 sc = device_get_softc(dev); 677 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 678 679 callout_stop_sync(&sc->aac_daemontime); 680 681 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 682 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 683 sc->aifflags |= AAC_AIFFLAGS_EXIT; 684 wakeup(sc->aifthread); 685 lksleep(sc->aac_dev, &sc->aac_io_lock, 0, "aacdch", 0); 686 } 687 lockmgr(&sc->aac_io_lock, LK_RELEASE); 688 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, 689 ("%s: invalid detach state", __func__)); 690 691 /* Remove the child containers */ 692 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 693 error = device_delete_child(dev, co->co_disk); 694 if (error) 695 return (error); 696 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 697 kfree(co, M_AACBUF); 698 } 699 700 /* Remove the CAM SIMs */ 701 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 702 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 703 error = device_delete_child(dev, sim->sim_dev); 704 if (error) 705 return (error); 706 kfree(sim, M_AACBUF); 707 } 708 709 if ((error = aac_shutdown(dev))) 710 return(error); 711 712 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 713 714 aac_free(sc); 715 716 lockuninit(&sc->aac_aifq_lock); 717 lockuninit(&sc->aac_io_lock); 718 lockuninit(&sc->aac_container_lock); 719 720 return(0); 721 } 722 723 /* 724 * Bring the controller down to a dormant state and detach all child devices. 725 * 726 * This function is called before detach or system shutdown. 727 * 728 * Note that we can assume that the bioq on the controller is empty, as we won't 729 * allow shutdown if any device is open. 730 */ 731 int 732 aac_shutdown(device_t dev) 733 { 734 struct aac_softc *sc; 735 struct aac_fib *fib; 736 struct aac_close_command *cc; 737 738 sc = device_get_softc(dev); 739 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 740 741 sc->aac_state |= AAC_STATE_SUSPEND; 742 743 /* 744 * Send a Container shutdown followed by a HostShutdown FIB to the 745 * controller to convince it that we don't want to talk to it anymore. 746 * We've been closed and all I/O completed already 747 */ 748 device_printf(sc->aac_dev, "shutting down controller..."); 749 750 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 751 aac_alloc_sync_fib(sc, &fib); 752 cc = (struct aac_close_command *)&fib->data[0]; 753 754 bzero(cc, sizeof(struct aac_close_command)); 755 cc->Command = VM_CloseAll; 756 cc->ContainerId = 0xffffffff; 757 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 758 sizeof(struct aac_close_command))) 759 kprintf("FAILED.\n"); 760 else 761 kprintf("done\n"); 762 #if 0 763 else { 764 fib->data[0] = 0; 765 /* 766 * XXX Issuing this command to the controller makes it shut down 767 * but also keeps it from coming back up without a reset of the 768 * PCI bus. This is not desirable if you are just unloading the 769 * driver module with the intent to reload it later. 770 */ 771 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 772 fib, 1)) { 773 kprintf("FAILED.\n"); 774 } else { 775 kprintf("done.\n"); 776 } 777 } 778 #endif 779 780 AAC_MASK_INTERRUPTS(sc); 781 aac_release_sync_fib(sc); 782 lockmgr(&sc->aac_io_lock, LK_RELEASE); 783 784 return(0); 785 } 786 787 /* 788 * Bring the controller to a quiescent state, ready for system suspend. 789 */ 790 int 791 aac_suspend(device_t dev) 792 { 793 struct aac_softc *sc; 794 795 sc = device_get_softc(dev); 796 797 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 798 sc->aac_state |= AAC_STATE_SUSPEND; 799 800 AAC_MASK_INTERRUPTS(sc); 801 return(0); 802 } 803 804 /* 805 * Bring the controller back to a state ready for operation. 806 */ 807 int 808 aac_resume(device_t dev) 809 { 810 struct aac_softc *sc; 811 812 sc = device_get_softc(dev); 813 814 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 815 sc->aac_state &= ~AAC_STATE_SUSPEND; 816 AAC_UNMASK_INTERRUPTS(sc); 817 return(0); 818 } 819 820 /* 821 * Interrupt handler for NEW_COMM interface. 822 */ 823 void 824 aac_new_intr(void *arg) 825 { 826 struct aac_softc *sc; 827 u_int32_t index, fast; 828 struct aac_command *cm; 829 struct aac_fib *fib; 830 int i; 831 832 sc = (struct aac_softc *)arg; 833 834 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 835 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 836 while (1) { 837 index = AAC_GET_OUTB_QUEUE(sc); 838 if (index == 0xffffffff) 839 index = AAC_GET_OUTB_QUEUE(sc); 840 if (index == 0xffffffff) 841 break; 842 if (index & 2) { 843 if (index == 0xfffffffe) { 844 /* XXX This means that the controller wants 845 * more work. Ignore it for now. 846 */ 847 continue; 848 } 849 /* AIF */ 850 fib = (struct aac_fib *)kmalloc(sizeof *fib, M_AACBUF, 851 M_INTWAIT | M_ZERO); 852 index &= ~2; 853 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 854 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); 855 aac_handle_aif(sc, fib); 856 kfree(fib, M_AACBUF); 857 858 /* 859 * AIF memory is owned by the adapter, so let it 860 * know that we are done with it. 861 */ 862 AAC_SET_OUTB_QUEUE(sc, index); 863 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 864 } else { 865 fast = index & 1; 866 cm = sc->aac_commands + (index >> 2); 867 fib = cm->cm_fib; 868 if (fast) { 869 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 870 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 871 } 872 aac_remove_busy(cm); 873 aac_unmap_command(cm); 874 cm->cm_flags |= AAC_CMD_COMPLETED; 875 876 /* is there a completion handler? */ 877 if (cm->cm_complete != NULL) { 878 cm->cm_complete(cm); 879 } else { 880 /* assume that someone is sleeping on this 881 * command 882 */ 883 wakeup(cm); 884 } 885 sc->flags &= ~AAC_QUEUE_FRZN; 886 } 887 } 888 /* see if we can start some more I/O */ 889 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 890 aac_startio(sc); 891 892 lockmgr(&sc->aac_io_lock, LK_RELEASE); 893 } 894 895 /* 896 * Interrupt filter for !NEW_COMM interface. 897 */ 898 void 899 aac_filter(void *arg) 900 { 901 struct aac_softc *sc; 902 u_int16_t reason; 903 904 sc = (struct aac_softc *)arg; 905 906 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 907 /* 908 * Read the status register directly. This is faster than taking the 909 * driver lock and reading the queues directly. It also saves having 910 * to turn parts of the driver lock into a spin mutex, which would be 911 * ugly. 912 */ 913 reason = AAC_GET_ISTATUS(sc); 914 AAC_CLEAR_ISTATUS(sc, reason); 915 916 /* handle completion processing */ 917 if (reason & AAC_DB_RESPONSE_READY) 918 taskqueue_enqueue(taskqueue_swi, &sc->aac_task_complete); 919 920 /* controller wants to talk to us */ 921 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 922 /* 923 * XXX Make sure that we don't get fooled by strange messages 924 * that start with a NULL. 925 */ 926 if ((reason & AAC_DB_PRINTF) && 927 (sc->aac_common->ac_printf[0] == 0)) 928 sc->aac_common->ac_printf[0] = 32; 929 930 /* 931 * This might miss doing the actual wakeup. However, the 932 * lksleep that this is waking up has a timeout, so it will 933 * wake up eventually. AIFs and printfs are low enough 934 * priority that they can handle hanging out for a few seconds 935 * if needed. 936 */ 937 wakeup(sc->aifthread); 938 } 939 } 940 941 /* 942 * Command Processing 943 */ 944 945 /* 946 * Start as much queued I/O as possible on the controller 947 */ 948 void 949 aac_startio(struct aac_softc *sc) 950 { 951 struct aac_command *cm; 952 int error; 953 954 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 955 956 for (;;) { 957 /* 958 * This flag might be set if the card is out of resources. 959 * Checking it here prevents an infinite loop of deferrals. 960 */ 961 if (sc->flags & AAC_QUEUE_FRZN) 962 break; 963 964 /* 965 * Try to get a command that's been put off for lack of 966 * resources 967 */ 968 cm = aac_dequeue_ready(sc); 969 970 /* 971 * Try to build a command off the bio queue (ignore error 972 * return) 973 */ 974 if (cm == NULL) 975 aac_bio_command(sc, &cm); 976 977 /* nothing to do? */ 978 if (cm == NULL) 979 break; 980 981 /* don't map more than once */ 982 if (cm->cm_flags & AAC_CMD_MAPPED) 983 panic("aac: command %p already mapped", cm); 984 985 /* 986 * Set up the command to go to the controller. If there are no 987 * data buffers associated with the command then it can bypass 988 * busdma. 989 */ 990 if (cm->cm_datalen != 0) { 991 error = bus_dmamap_load(sc->aac_buffer_dmat, 992 cm->cm_datamap, cm->cm_data, 993 cm->cm_datalen, 994 aac_map_command_sg, cm, 0); 995 if (error == EINPROGRESS) { 996 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); 997 sc->flags |= AAC_QUEUE_FRZN; 998 error = 0; 999 } else if (error != 0) 1000 panic("aac_startio: unexpected error %d from " 1001 "busdma", error); 1002 } else 1003 aac_map_command_sg(cm, NULL, 0, 0); 1004 } 1005 } 1006 1007 /* 1008 * Handle notification of one or more FIBs coming from the controller. 1009 */ 1010 static void 1011 aac_command_thread(void *arg) 1012 { 1013 struct aac_softc *sc = arg; 1014 struct aac_fib *fib; 1015 u_int32_t fib_size; 1016 int size, retval; 1017 1018 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1019 1020 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1021 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1022 1023 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1024 1025 retval = 0; 1026 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1027 retval = lksleep(sc->aifthread, &sc->aac_io_lock, 0, 1028 "aifthd", AAC_PERIODIC_INTERVAL * hz); 1029 1030 /* 1031 * First see if any FIBs need to be allocated. This needs 1032 * to be called without the driver lock because contigmalloc 1033 * can sleep. 1034 */ 1035 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1036 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1037 aac_alloc_commands(sc); 1038 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1039 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1040 aac_startio(sc); 1041 } 1042 1043 /* 1044 * While we're here, check to see if any commands are stuck. 1045 * This is pretty low-priority, so it's ok if it doesn't 1046 * always fire. 1047 */ 1048 if (retval == EWOULDBLOCK) 1049 aac_timeout(sc); 1050 1051 /* Check the hardware printf message buffer */ 1052 if (sc->aac_common->ac_printf[0] != 0) 1053 aac_print_printf(sc); 1054 1055 /* Also check to see if the adapter has a command for us. */ 1056 if (sc->flags & AAC_FLAGS_NEW_COMM) 1057 continue; 1058 for (;;) { 1059 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 1060 &fib_size, &fib)) 1061 break; 1062 1063 AAC_PRINT_FIB(sc, fib); 1064 1065 switch (fib->Header.Command) { 1066 case AifRequest: 1067 aac_handle_aif(sc, fib); 1068 break; 1069 default: 1070 device_printf(sc->aac_dev, "unknown command " 1071 "from controller\n"); 1072 break; 1073 } 1074 1075 if ((fib->Header.XferState == 0) || 1076 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 1077 break; 1078 } 1079 1080 /* Return the AIF to the controller. */ 1081 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 1082 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 1083 *(AAC_FSAStatus*)fib->data = ST_OK; 1084 1085 /* XXX Compute the Size field? */ 1086 size = fib->Header.Size; 1087 if (size > sizeof(struct aac_fib)) { 1088 size = sizeof(struct aac_fib); 1089 fib->Header.Size = size; 1090 } 1091 /* 1092 * Since we did not generate this command, it 1093 * cannot go through the normal 1094 * enqueue->startio chain. 1095 */ 1096 aac_enqueue_response(sc, 1097 AAC_ADAP_NORM_RESP_QUEUE, 1098 fib); 1099 } 1100 } 1101 } 1102 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1103 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1104 wakeup(sc->aac_dev); 1105 } 1106 1107 /* 1108 * Process completed commands. 1109 */ 1110 static void 1111 aac_complete(void *context, int pending) 1112 { 1113 struct aac_softc *sc; 1114 struct aac_command *cm; 1115 struct aac_fib *fib; 1116 u_int32_t fib_size; 1117 1118 sc = (struct aac_softc *)context; 1119 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1120 1121 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1122 1123 /* pull completed commands off the queue */ 1124 for (;;) { 1125 /* look for completed FIBs on our queue */ 1126 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1127 &fib)) 1128 break; /* nothing to do */ 1129 1130 /* get the command, unmap and hand off for processing */ 1131 cm = sc->aac_commands + fib->Header.SenderData; 1132 if (cm == NULL) { 1133 AAC_PRINT_FIB(sc, fib); 1134 break; 1135 } 1136 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) 1137 device_printf(sc->aac_dev, 1138 "COMMAND %p COMPLETED AFTER %d SECONDS\n", 1139 cm, (int)(time_uptime - cm->cm_timestamp)); 1140 1141 aac_remove_busy(cm); 1142 1143 aac_unmap_command(cm); 1144 cm->cm_flags |= AAC_CMD_COMPLETED; 1145 1146 /* is there a completion handler? */ 1147 if (cm->cm_complete != NULL) { 1148 cm->cm_complete(cm); 1149 } else { 1150 /* assume that someone is sleeping on this command */ 1151 wakeup(cm); 1152 } 1153 } 1154 1155 /* see if we can start some more I/O */ 1156 sc->flags &= ~AAC_QUEUE_FRZN; 1157 aac_startio(sc); 1158 1159 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1160 } 1161 1162 /* 1163 * Handle a bio submitted from a disk device. 1164 */ 1165 void 1166 aac_submit_bio(struct aac_disk *ad, struct bio *bio) 1167 { 1168 struct aac_softc *sc; 1169 1170 bio->bio_driver_info = ad; 1171 sc = ad->ad_controller; 1172 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1173 1174 /* queue the BIO and try to get some work done */ 1175 aac_enqueue_bio(sc, bio); 1176 aac_startio(sc); 1177 } 1178 1179 /* 1180 * Get a bio and build a command to go with it. 1181 */ 1182 static int 1183 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1184 { 1185 struct aac_command *cm; 1186 struct aac_fib *fib; 1187 struct aac_disk *ad; 1188 struct bio *bio; 1189 struct buf *bp; 1190 1191 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1192 1193 /* get the resources we will need */ 1194 cm = NULL; 1195 bio = NULL; 1196 if (aac_alloc_command(sc, &cm)) /* get a command */ 1197 goto fail; 1198 if ((bio = aac_dequeue_bio(sc)) == NULL) 1199 goto fail; 1200 1201 /* fill out the command */ 1202 bp = bio->bio_buf; 1203 cm->cm_data = (void *)bp->b_data; 1204 cm->cm_datalen = bp->b_bcount; 1205 cm->cm_complete = aac_bio_complete; 1206 cm->cm_private = bio; 1207 cm->cm_timestamp = time_uptime; 1208 1209 /* build the FIB */ 1210 fib = cm->cm_fib; 1211 fib->Header.Size = sizeof(struct aac_fib_header); 1212 fib->Header.XferState = 1213 AAC_FIBSTATE_HOSTOWNED | 1214 AAC_FIBSTATE_INITIALISED | 1215 AAC_FIBSTATE_EMPTY | 1216 AAC_FIBSTATE_FROMHOST | 1217 AAC_FIBSTATE_REXPECTED | 1218 AAC_FIBSTATE_NORM | 1219 AAC_FIBSTATE_ASYNC | 1220 AAC_FIBSTATE_FAST_RESPONSE; 1221 1222 /* build the read/write request */ 1223 ad = (struct aac_disk *)bio->bio_driver_info; 1224 1225 if (sc->flags & AAC_FLAGS_RAW_IO) { 1226 struct aac_raw_io *raw; 1227 raw = (struct aac_raw_io *)&fib->data[0]; 1228 fib->Header.Command = RawIo; 1229 raw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1230 raw->ByteCount = bp->b_bcount; 1231 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1232 raw->BpTotal = 0; 1233 raw->BpComplete = 0; 1234 fib->Header.Size += sizeof(struct aac_raw_io); 1235 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1236 if (bp->b_cmd == BUF_CMD_READ) { 1237 raw->Flags = 1; 1238 cm->cm_flags |= AAC_CMD_DATAIN; 1239 } else { 1240 raw->Flags = 0; 1241 cm->cm_flags |= AAC_CMD_DATAOUT; 1242 } 1243 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1244 fib->Header.Command = ContainerCommand; 1245 if (bp->b_cmd == BUF_CMD_READ) { 1246 struct aac_blockread *br; 1247 br = (struct aac_blockread *)&fib->data[0]; 1248 br->Command = VM_CtBlockRead; 1249 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1250 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1251 br->ByteCount = bp->b_bcount; 1252 fib->Header.Size += sizeof(struct aac_blockread); 1253 cm->cm_sgtable = &br->SgMap; 1254 cm->cm_flags |= AAC_CMD_DATAIN; 1255 } else { 1256 struct aac_blockwrite *bw; 1257 bw = (struct aac_blockwrite *)&fib->data[0]; 1258 bw->Command = VM_CtBlockWrite; 1259 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1260 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1261 bw->ByteCount = bp->b_bcount; 1262 bw->Stable = CUNSTABLE; 1263 fib->Header.Size += sizeof(struct aac_blockwrite); 1264 cm->cm_flags |= AAC_CMD_DATAOUT; 1265 cm->cm_sgtable = &bw->SgMap; 1266 } 1267 } else { 1268 fib->Header.Command = ContainerCommand64; 1269 if (bp->b_cmd == BUF_CMD_READ) { 1270 struct aac_blockread64 *br; 1271 br = (struct aac_blockread64 *)&fib->data[0]; 1272 br->Command = VM_CtHostRead64; 1273 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1274 br->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE; 1275 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1276 br->Pad = 0; 1277 br->Flags = 0; 1278 fib->Header.Size += sizeof(struct aac_blockread64); 1279 cm->cm_flags |= AAC_CMD_DATAIN; 1280 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1281 } else { 1282 struct aac_blockwrite64 *bw; 1283 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1284 bw->Command = VM_CtHostWrite64; 1285 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1286 bw->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE; 1287 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1288 bw->Pad = 0; 1289 bw->Flags = 0; 1290 fib->Header.Size += sizeof(struct aac_blockwrite64); 1291 cm->cm_flags |= AAC_CMD_DATAOUT; 1292 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1293 } 1294 } 1295 1296 *cmp = cm; 1297 return(0); 1298 1299 fail: 1300 if (bio != NULL) 1301 aac_enqueue_bio(sc, bio); 1302 if (cm != NULL) 1303 aac_release_command(cm); 1304 return(ENOMEM); 1305 } 1306 1307 /* 1308 * Handle a bio-instigated command that has been completed. 1309 */ 1310 static void 1311 aac_bio_complete(struct aac_command *cm) 1312 { 1313 struct aac_blockread_response *brr; 1314 struct aac_blockwrite_response *bwr; 1315 struct bio *bio; 1316 struct buf *bp; 1317 const char *code; 1318 AAC_FSAStatus status; 1319 1320 /* fetch relevant status and then release the command */ 1321 bio = (struct bio *)cm->cm_private; 1322 bp = bio->bio_buf; 1323 if (bp->b_cmd == BUF_CMD_READ) { 1324 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1325 status = brr->Status; 1326 } else { 1327 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1328 status = bwr->Status; 1329 } 1330 aac_release_command(cm); 1331 1332 /* fix up the bio based on status */ 1333 if (status == ST_OK) { 1334 bp->b_resid = 0; 1335 code = NULL; 1336 } else { 1337 bp->b_error = EIO; 1338 bp->b_flags |= B_ERROR; 1339 } 1340 aac_biodone(bio, code); 1341 } 1342 1343 /* 1344 * Submit a command to the controller, return when it completes. 1345 * XXX This is very dangerous! If the card has gone out to lunch, we could 1346 * be stuck here forever. At the same time, signals are not caught 1347 * because there is a risk that a signal could wakeup the sleep before 1348 * the card has a chance to complete the command. Since there is no way 1349 * to cancel a command that is in progress, we can't protect against the 1350 * card completing a command late and spamming the command and data 1351 * memory. So, we are held hostage until the command completes. 1352 */ 1353 static int 1354 aac_wait_command(struct aac_command *cm) 1355 { 1356 struct aac_softc *sc; 1357 int error; 1358 1359 sc = cm->cm_sc; 1360 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1361 1362 /* Put the command on the ready queue and get things going */ 1363 aac_enqueue_ready(cm); 1364 aac_startio(sc); 1365 error = lksleep(cm, &sc->aac_io_lock, 0, "aacwait", 0); 1366 return(error); 1367 } 1368 1369 /* 1370 *Command Buffer Management 1371 */ 1372 1373 /* 1374 * Allocate a command. 1375 */ 1376 int 1377 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1378 { 1379 struct aac_command *cm; 1380 1381 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1382 1383 if ((cm = aac_dequeue_free(sc)) == NULL) { 1384 if (sc->total_fibs < sc->aac_max_fibs) { 1385 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1386 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1387 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1388 wakeup(sc->aifthread); 1389 } 1390 return (EBUSY); 1391 } 1392 1393 *cmp = cm; 1394 return(0); 1395 } 1396 1397 /* 1398 * Release a command back to the freelist. 1399 */ 1400 void 1401 aac_release_command(struct aac_command *cm) 1402 { 1403 struct aac_event *event; 1404 struct aac_softc *sc; 1405 1406 sc = cm->cm_sc; 1407 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1408 1409 /* (re)initialize the command/FIB */ 1410 cm->cm_sgtable = NULL; 1411 cm->cm_flags = 0; 1412 cm->cm_complete = NULL; 1413 cm->cm_private = NULL; 1414 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1415 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1416 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1417 cm->cm_fib->Header.Flags = 0; 1418 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1419 1420 /* 1421 * These are duplicated in aac_start to cover the case where an 1422 * intermediate stage may have destroyed them. They're left 1423 * initialized here for debugging purposes only. 1424 */ 1425 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1426 cm->cm_fib->Header.SenderData = 0; 1427 1428 aac_enqueue_free(cm); 1429 1430 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1431 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1432 event->ev_callback(sc, event, event->ev_arg); 1433 } 1434 } 1435 1436 /* 1437 * Map helper for command/FIB allocation. 1438 */ 1439 static void 1440 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1441 { 1442 uint64_t *fibphys; 1443 1444 fibphys = (uint64_t *)arg; 1445 1446 *fibphys = segs[0].ds_addr; 1447 } 1448 1449 /* 1450 * Allocate and initialize commands/FIBs for this adapter. 1451 */ 1452 static int 1453 aac_alloc_commands(struct aac_softc *sc) 1454 { 1455 struct aac_command *cm; 1456 struct aac_fibmap *fm; 1457 uint64_t fibphys; 1458 int i, error; 1459 1460 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1461 1462 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1463 return (ENOMEM); 1464 1465 fm = kmalloc(sizeof(struct aac_fibmap), M_AACBUF, M_INTWAIT | M_ZERO); 1466 1467 /* allocate the FIBs in DMAable memory and load them */ 1468 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1469 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1470 device_printf(sc->aac_dev, 1471 "Not enough contiguous memory available.\n"); 1472 kfree(fm, M_AACBUF); 1473 return (ENOMEM); 1474 } 1475 1476 /* Ignore errors since this doesn't bounce */ 1477 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1478 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1479 aac_map_command_helper, &fibphys, 0); 1480 1481 /* initialize constant fields in the command structure */ 1482 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1483 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1484 cm = sc->aac_commands + sc->total_fibs; 1485 fm->aac_commands = cm; 1486 cm->cm_sc = sc; 1487 cm->cm_fib = (struct aac_fib *) 1488 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1489 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1490 cm->cm_index = sc->total_fibs; 1491 1492 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1493 &cm->cm_datamap)) != 0) 1494 break; 1495 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1496 aac_release_command(cm); 1497 sc->total_fibs++; 1498 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1499 } 1500 1501 if (i > 0) { 1502 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1503 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1504 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1505 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1506 return (0); 1507 } 1508 1509 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1510 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1511 kfree(fm, M_AACBUF); 1512 return (ENOMEM); 1513 } 1514 1515 /* 1516 * Free FIBs owned by this adapter. 1517 */ 1518 static void 1519 aac_free_commands(struct aac_softc *sc) 1520 { 1521 struct aac_fibmap *fm; 1522 struct aac_command *cm; 1523 int i; 1524 1525 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1526 1527 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1528 1529 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1530 /* 1531 * We check against total_fibs to handle partially 1532 * allocated blocks. 1533 */ 1534 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1535 cm = fm->aac_commands + i; 1536 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1537 } 1538 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1539 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1540 kfree(fm, M_AACBUF); 1541 } 1542 } 1543 1544 /* 1545 * Command-mapping helper function - populate this command's s/g table. 1546 */ 1547 static void 1548 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1549 { 1550 struct aac_softc *sc; 1551 struct aac_command *cm; 1552 struct aac_fib *fib; 1553 int i; 1554 1555 cm = (struct aac_command *)arg; 1556 sc = cm->cm_sc; 1557 fib = cm->cm_fib; 1558 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1559 1560 /* copy into the FIB */ 1561 if (cm->cm_sgtable != NULL) { 1562 if (fib->Header.Command == RawIo) { 1563 struct aac_sg_tableraw *sg; 1564 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1565 sg->SgCount = nseg; 1566 for (i = 0; i < nseg; i++) { 1567 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1568 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1569 sg->SgEntryRaw[i].Next = 0; 1570 sg->SgEntryRaw[i].Prev = 0; 1571 sg->SgEntryRaw[i].Flags = 0; 1572 } 1573 /* update the FIB size for the s/g count */ 1574 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1575 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1576 struct aac_sg_table *sg; 1577 sg = cm->cm_sgtable; 1578 sg->SgCount = nseg; 1579 for (i = 0; i < nseg; i++) { 1580 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1581 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1582 } 1583 /* update the FIB size for the s/g count */ 1584 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1585 } else { 1586 struct aac_sg_table64 *sg; 1587 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1588 sg->SgCount = nseg; 1589 for (i = 0; i < nseg; i++) { 1590 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1591 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1592 } 1593 /* update the FIB size for the s/g count */ 1594 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1595 } 1596 } 1597 1598 /* Fix up the address values in the FIB. Use the command array index 1599 * instead of a pointer since these fields are only 32 bits. Shift 1600 * the SenderFibAddress over to make room for the fast response bit 1601 * and for the AIF bit 1602 */ 1603 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1604 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1605 1606 /* save a pointer to the command for speedy reverse-lookup */ 1607 cm->cm_fib->Header.SenderData = cm->cm_index; 1608 1609 if (cm->cm_flags & AAC_CMD_DATAIN) 1610 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1611 BUS_DMASYNC_PREREAD); 1612 if (cm->cm_flags & AAC_CMD_DATAOUT) 1613 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1614 BUS_DMASYNC_PREWRITE); 1615 cm->cm_flags |= AAC_CMD_MAPPED; 1616 1617 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1618 int count = 10000000L; 1619 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1620 if (--count == 0) { 1621 aac_unmap_command(cm); 1622 sc->flags |= AAC_QUEUE_FRZN; 1623 aac_requeue_ready(cm); 1624 } 1625 DELAY(5); /* wait 5 usec. */ 1626 } 1627 } else { 1628 /* Put the FIB on the outbound queue */ 1629 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1630 aac_unmap_command(cm); 1631 sc->flags |= AAC_QUEUE_FRZN; 1632 aac_requeue_ready(cm); 1633 } 1634 } 1635 } 1636 1637 /* 1638 * Unmap a command from controller-visible space. 1639 */ 1640 static void 1641 aac_unmap_command(struct aac_command *cm) 1642 { 1643 struct aac_softc *sc; 1644 1645 sc = cm->cm_sc; 1646 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1647 1648 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1649 return; 1650 1651 if (cm->cm_datalen != 0) { 1652 if (cm->cm_flags & AAC_CMD_DATAIN) 1653 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1654 BUS_DMASYNC_POSTREAD); 1655 if (cm->cm_flags & AAC_CMD_DATAOUT) 1656 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1657 BUS_DMASYNC_POSTWRITE); 1658 1659 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1660 } 1661 cm->cm_flags &= ~AAC_CMD_MAPPED; 1662 } 1663 1664 /* 1665 * Hardware Interface 1666 */ 1667 1668 /* 1669 * Initialize the adapter. 1670 */ 1671 static void 1672 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1673 { 1674 struct aac_softc *sc; 1675 1676 sc = (struct aac_softc *)arg; 1677 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1678 1679 sc->aac_common_busaddr = segs[0].ds_addr; 1680 } 1681 1682 static int 1683 aac_check_firmware(struct aac_softc *sc) 1684 { 1685 u_int32_t code, major, minor, options = 0, atu_size = 0; 1686 int rid, status; 1687 time_t then; 1688 1689 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1690 /* 1691 * Wait for the adapter to come ready. 1692 */ 1693 then = time_uptime; 1694 do { 1695 code = AAC_GET_FWSTATUS(sc); 1696 if (code & AAC_SELF_TEST_FAILED) { 1697 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1698 return(ENXIO); 1699 } 1700 if (code & AAC_KERNEL_PANIC) { 1701 device_printf(sc->aac_dev, 1702 "FATAL: controller kernel panic"); 1703 return(ENXIO); 1704 } 1705 if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { 1706 device_printf(sc->aac_dev, 1707 "FATAL: controller not coming ready, " 1708 "status %x\n", code); 1709 return(ENXIO); 1710 } 1711 } while (!(code & AAC_UP_AND_RUNNING)); 1712 1713 /* 1714 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1715 * firmware version 1.x are not compatible with this driver. 1716 */ 1717 if (sc->flags & AAC_FLAGS_PERC2QC) { 1718 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1719 NULL)) { 1720 device_printf(sc->aac_dev, 1721 "Error reading firmware version\n"); 1722 return (EIO); 1723 } 1724 1725 /* These numbers are stored as ASCII! */ 1726 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1727 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1728 if (major == 1) { 1729 device_printf(sc->aac_dev, 1730 "Firmware version %d.%d is not supported.\n", 1731 major, minor); 1732 return (EINVAL); 1733 } 1734 } 1735 1736 /* 1737 * Retrieve the capabilities/supported options word so we know what 1738 * work-arounds to enable. Some firmware revs don't support this 1739 * command. 1740 */ 1741 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1742 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1743 device_printf(sc->aac_dev, 1744 "RequestAdapterInfo failed\n"); 1745 return (EIO); 1746 } 1747 } else { 1748 options = AAC_GET_MAILBOX(sc, 1); 1749 atu_size = AAC_GET_MAILBOX(sc, 2); 1750 sc->supported_options = options; 1751 1752 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1753 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1754 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1755 if (options & AAC_SUPPORTED_NONDASD) 1756 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1757 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1758 && (sizeof(bus_addr_t) > 4)) { 1759 device_printf(sc->aac_dev, 1760 "Enabling 64-bit address support\n"); 1761 sc->flags |= AAC_FLAGS_SG_64BIT; 1762 } 1763 if ((options & AAC_SUPPORTED_NEW_COMM) 1764 && sc->aac_if->aif_send_command) 1765 sc->flags |= AAC_FLAGS_NEW_COMM; 1766 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1767 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1768 } 1769 1770 /* Check for broken hardware that does a lower number of commands */ 1771 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1772 1773 /* Remap mem. resource, if required */ 1774 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1775 atu_size > rman_get_size(sc->aac_regs_res1)) { 1776 rid = rman_get_rid(sc->aac_regs_res1); 1777 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid, 1778 sc->aac_regs_res1); 1779 sc->aac_regs_res1 = bus_alloc_resource(sc->aac_dev, 1780 SYS_RES_MEMORY, &rid, 0ul, ~0ul, atu_size, RF_ACTIVE); 1781 if (sc->aac_regs_res1 == NULL) { 1782 sc->aac_regs_res1 = bus_alloc_resource_any( 1783 sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 1784 if (sc->aac_regs_res1 == NULL) { 1785 device_printf(sc->aac_dev, 1786 "couldn't allocate register window\n"); 1787 return (ENXIO); 1788 } 1789 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1790 } 1791 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); 1792 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); 1793 1794 if (sc->aac_hwif == AAC_HWIF_NARK) { 1795 sc->aac_regs_res0 = sc->aac_regs_res1; 1796 sc->aac_btag0 = sc->aac_btag1; 1797 sc->aac_bhandle0 = sc->aac_bhandle1; 1798 } 1799 } 1800 1801 /* Read preferred settings */ 1802 sc->aac_max_fib_size = sizeof(struct aac_fib); 1803 sc->aac_max_sectors = 128; /* 64KB */ 1804 if (sc->flags & AAC_FLAGS_SG_64BIT) 1805 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1806 - sizeof(struct aac_blockwrite64)) 1807 / sizeof(struct aac_sg_entry64); 1808 else 1809 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1810 - sizeof(struct aac_blockwrite)) 1811 / sizeof(struct aac_sg_entry); 1812 1813 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1814 options = AAC_GET_MAILBOX(sc, 1); 1815 sc->aac_max_fib_size = (options & 0xFFFF); 1816 sc->aac_max_sectors = (options >> 16) << 1; 1817 options = AAC_GET_MAILBOX(sc, 2); 1818 sc->aac_sg_tablesize = (options >> 16); 1819 options = AAC_GET_MAILBOX(sc, 3); 1820 sc->aac_max_fibs = (options & 0xFFFF); 1821 } 1822 if (sc->aac_max_fib_size > PAGE_SIZE) 1823 sc->aac_max_fib_size = PAGE_SIZE; 1824 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1825 1826 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1827 sc->flags |= AAC_FLAGS_RAW_IO; 1828 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1829 } 1830 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1831 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1832 sc->flags |= AAC_FLAGS_LBA_64BIT; 1833 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1834 } 1835 1836 return (0); 1837 } 1838 1839 static int 1840 aac_init(struct aac_softc *sc) 1841 { 1842 struct aac_adapter_init *ip; 1843 u_int32_t qoffset; 1844 int error; 1845 1846 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1847 1848 /* 1849 * Fill in the init structure. This tells the adapter about the 1850 * physical location of various important shared data structures. 1851 */ 1852 ip = &sc->aac_common->ac_init; 1853 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1854 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1855 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1856 sc->flags |= AAC_FLAGS_RAW_IO; 1857 } 1858 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1859 1860 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1861 offsetof(struct aac_common, ac_fibs); 1862 ip->AdapterFibsVirtualAddress = 0; 1863 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1864 ip->AdapterFibAlign = sizeof(struct aac_fib); 1865 1866 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1867 offsetof(struct aac_common, ac_printf); 1868 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1869 1870 /* 1871 * The adapter assumes that pages are 4K in size, except on some 1872 * broken firmware versions that do the page->byte conversion twice, 1873 * therefore 'assuming' that this value is in 16MB units (2^24). 1874 * Round up since the granularity is so high. 1875 */ 1876 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1877 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1878 ip->HostPhysMemPages = 1879 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1880 } 1881 ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ 1882 1883 ip->InitFlags = 0; 1884 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1885 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; 1886 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1887 } 1888 1889 ip->MaxIoCommands = sc->aac_max_fibs; 1890 ip->MaxIoSize = sc->aac_max_sectors << 9; 1891 ip->MaxFibSize = sc->aac_max_fib_size; 1892 1893 /* 1894 * Initialize FIB queues. Note that it appears that the layout of the 1895 * indexes and the segmentation of the entries may be mandated by the 1896 * adapter, which is only told about the base of the queue index fields. 1897 * 1898 * The initial values of the indices are assumed to inform the adapter 1899 * of the sizes of the respective queues, and theoretically it could 1900 * work out the entire layout of the queue structures from this. We 1901 * take the easy route and just lay this area out like everyone else 1902 * does. 1903 * 1904 * The Linux driver uses a much more complex scheme whereby several 1905 * header records are kept for each queue. We use a couple of generic 1906 * list manipulation functions which 'know' the size of each list by 1907 * virtue of a table. 1908 */ 1909 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1910 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1911 sc->aac_queues = 1912 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1913 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1914 1915 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1916 AAC_HOST_NORM_CMD_ENTRIES; 1917 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1918 AAC_HOST_NORM_CMD_ENTRIES; 1919 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1920 AAC_HOST_HIGH_CMD_ENTRIES; 1921 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1922 AAC_HOST_HIGH_CMD_ENTRIES; 1923 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1924 AAC_ADAP_NORM_CMD_ENTRIES; 1925 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1926 AAC_ADAP_NORM_CMD_ENTRIES; 1927 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1928 AAC_ADAP_HIGH_CMD_ENTRIES; 1929 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1930 AAC_ADAP_HIGH_CMD_ENTRIES; 1931 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1932 AAC_HOST_NORM_RESP_ENTRIES; 1933 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1934 AAC_HOST_NORM_RESP_ENTRIES; 1935 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1936 AAC_HOST_HIGH_RESP_ENTRIES; 1937 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1938 AAC_HOST_HIGH_RESP_ENTRIES; 1939 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1940 AAC_ADAP_NORM_RESP_ENTRIES; 1941 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1942 AAC_ADAP_NORM_RESP_ENTRIES; 1943 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1944 AAC_ADAP_HIGH_RESP_ENTRIES; 1945 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1946 AAC_ADAP_HIGH_RESP_ENTRIES; 1947 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1948 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1949 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1950 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1951 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1952 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1953 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1954 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1955 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1956 &sc->aac_queues->qt_HostNormRespQueue[0]; 1957 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1958 &sc->aac_queues->qt_HostHighRespQueue[0]; 1959 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1960 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1961 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1962 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1963 1964 /* 1965 * Do controller-type-specific initialisation 1966 */ 1967 switch (sc->aac_hwif) { 1968 case AAC_HWIF_I960RX: 1969 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); 1970 break; 1971 case AAC_HWIF_RKT: 1972 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); 1973 break; 1974 default: 1975 break; 1976 } 1977 1978 /* 1979 * Give the init structure to the controller. 1980 */ 1981 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1982 sc->aac_common_busaddr + 1983 offsetof(struct aac_common, ac_init), 0, 0, 0, 1984 NULL)) { 1985 device_printf(sc->aac_dev, 1986 "error establishing init structure\n"); 1987 error = EIO; 1988 goto out; 1989 } 1990 1991 error = 0; 1992 out: 1993 return(error); 1994 } 1995 1996 static int 1997 aac_setup_intr(struct aac_softc *sc) 1998 { 1999 2000 if (sc->flags & AAC_FLAGS_NEW_COMM) { 2001 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2002 INTR_MPSAFE, 2003 aac_new_intr, sc, &sc->aac_intr, NULL)) { 2004 device_printf(sc->aac_dev, "can't set up interrupt\n"); 2005 return (EINVAL); 2006 } 2007 } else { 2008 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2009 0, aac_filter, 2010 sc, &sc->aac_intr, NULL)) { 2011 device_printf(sc->aac_dev, 2012 "can't set up interrupt filter\n"); 2013 return (EINVAL); 2014 } 2015 } 2016 return (0); 2017 } 2018 2019 /* 2020 * Send a synchronous command to the controller and wait for a result. 2021 * Indicate if the controller completed the command with an error status. 2022 */ 2023 static int 2024 aac_sync_command(struct aac_softc *sc, u_int32_t command, 2025 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2026 u_int32_t *sp) 2027 { 2028 time_t then; 2029 u_int32_t status; 2030 2031 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2032 2033 /* populate the mailbox */ 2034 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2035 2036 /* ensure the sync command doorbell flag is cleared */ 2037 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2038 2039 /* then set it to signal the adapter */ 2040 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2041 2042 /* spin waiting for the command to complete */ 2043 then = time_uptime; 2044 do { 2045 if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { 2046 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2047 return(EIO); 2048 } 2049 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2050 2051 /* clear the completion flag */ 2052 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2053 2054 /* get the command status */ 2055 status = AAC_GET_MAILBOX(sc, 0); 2056 if (sp != NULL) 2057 *sp = status; 2058 2059 if (status != AAC_SRB_STS_SUCCESS) 2060 return (-1); 2061 return(0); 2062 } 2063 2064 int 2065 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2066 struct aac_fib *fib, u_int16_t datasize) 2067 { 2068 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2069 #if 0 /* XXX swildner */ 2070 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0); 2071 #endif 2072 2073 if (datasize > AAC_FIB_DATASIZE) 2074 return(EINVAL); 2075 2076 /* 2077 * Set up the sync FIB 2078 */ 2079 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2080 AAC_FIBSTATE_INITIALISED | 2081 AAC_FIBSTATE_EMPTY; 2082 fib->Header.XferState |= xferstate; 2083 fib->Header.Command = command; 2084 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2085 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2086 fib->Header.SenderSize = sizeof(struct aac_fib); 2087 fib->Header.SenderFibAddress = 0; /* Not needed */ 2088 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2089 offsetof(struct aac_common, 2090 ac_sync_fib); 2091 2092 /* 2093 * Give the FIB to the controller, wait for a response. 2094 */ 2095 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2096 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2097 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2098 return(EIO); 2099 } 2100 2101 return (0); 2102 } 2103 2104 /* 2105 * Adapter-space FIB queue manipulation 2106 * 2107 * Note that the queue implementation here is a little funky; neither the PI or 2108 * CI will ever be zero. This behaviour is a controller feature. 2109 */ 2110 static const struct { 2111 int size; 2112 int notify; 2113 } aac_qinfo[] = { 2114 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2115 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2116 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2117 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2118 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2119 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2120 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2121 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2122 }; 2123 2124 /* 2125 * Atomically insert an entry into the nominated queue, returns 0 on success or 2126 * EBUSY if the queue is full. 2127 * 2128 * Note: it would be more efficient to defer notifying the controller in 2129 * the case where we may be inserting several entries in rapid succession, 2130 * but implementing this usefully may be difficult (it would involve a 2131 * separate queue/notify interface). 2132 */ 2133 static int 2134 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2135 { 2136 u_int32_t pi, ci; 2137 int error; 2138 u_int32_t fib_size; 2139 u_int32_t fib_addr; 2140 2141 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2142 2143 fib_size = cm->cm_fib->Header.Size; 2144 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2145 2146 /* get the producer/consumer indices */ 2147 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2148 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2149 2150 /* wrap the queue? */ 2151 if (pi >= aac_qinfo[queue].size) 2152 pi = 0; 2153 2154 /* check for queue full */ 2155 if ((pi + 1) == ci) { 2156 error = EBUSY; 2157 goto out; 2158 } 2159 2160 /* 2161 * To avoid a race with its completion interrupt, place this command on 2162 * the busy queue prior to advertising it to the controller. 2163 */ 2164 aac_enqueue_busy(cm); 2165 2166 /* populate queue entry */ 2167 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2168 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2169 2170 /* update producer index */ 2171 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2172 2173 /* notify the adapter if we know how */ 2174 if (aac_qinfo[queue].notify != 0) 2175 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2176 2177 error = 0; 2178 2179 out: 2180 return(error); 2181 } 2182 2183 /* 2184 * Atomically remove one entry from the nominated queue, returns 0 on 2185 * success or ENOENT if the queue is empty. 2186 */ 2187 static int 2188 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2189 struct aac_fib **fib_addr) 2190 { 2191 u_int32_t pi, ci; 2192 u_int32_t fib_index; 2193 int error; 2194 int notify; 2195 2196 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2197 2198 /* get the producer/consumer indices */ 2199 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2200 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2201 2202 /* check for queue empty */ 2203 if (ci == pi) { 2204 error = ENOENT; 2205 goto out; 2206 } 2207 2208 /* wrap the pi so the following test works */ 2209 if (pi >= aac_qinfo[queue].size) 2210 pi = 0; 2211 2212 notify = 0; 2213 if (ci == pi + 1) 2214 notify++; 2215 2216 /* wrap the queue? */ 2217 if (ci >= aac_qinfo[queue].size) 2218 ci = 0; 2219 2220 /* fetch the entry */ 2221 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2222 2223 switch (queue) { 2224 case AAC_HOST_NORM_CMD_QUEUE: 2225 case AAC_HOST_HIGH_CMD_QUEUE: 2226 /* 2227 * The aq_fib_addr is only 32 bits wide so it can't be counted 2228 * on to hold an address. For AIF's, the adapter assumes 2229 * that it's giving us an address into the array of AIF fibs. 2230 * Therefore, we have to convert it to an index. 2231 */ 2232 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2233 sizeof(struct aac_fib); 2234 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2235 break; 2236 2237 case AAC_HOST_NORM_RESP_QUEUE: 2238 case AAC_HOST_HIGH_RESP_QUEUE: 2239 { 2240 struct aac_command *cm; 2241 2242 /* 2243 * As above, an index is used instead of an actual address. 2244 * Gotta shift the index to account for the fast response 2245 * bit. No other correction is needed since this value was 2246 * originally provided by the driver via the SenderFibAddress 2247 * field. 2248 */ 2249 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2250 cm = sc->aac_commands + (fib_index >> 2); 2251 *fib_addr = cm->cm_fib; 2252 2253 /* 2254 * Is this a fast response? If it is, update the fib fields in 2255 * local memory since the whole fib isn't DMA'd back up. 2256 */ 2257 if (fib_index & 0x01) { 2258 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2259 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2260 } 2261 break; 2262 } 2263 default: 2264 panic("Invalid queue in aac_dequeue_fib()"); 2265 break; 2266 } 2267 2268 /* update consumer index */ 2269 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2270 2271 /* if we have made the queue un-full, notify the adapter */ 2272 if (notify && (aac_qinfo[queue].notify != 0)) 2273 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2274 error = 0; 2275 2276 out: 2277 return(error); 2278 } 2279 2280 /* 2281 * Put our response to an Adapter Initialed Fib on the response queue 2282 */ 2283 static int 2284 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2285 { 2286 u_int32_t pi, ci; 2287 int error; 2288 u_int32_t fib_size; 2289 u_int32_t fib_addr; 2290 2291 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2292 2293 /* Tell the adapter where the FIB is */ 2294 fib_size = fib->Header.Size; 2295 fib_addr = fib->Header.SenderFibAddress; 2296 fib->Header.ReceiverFibAddress = fib_addr; 2297 2298 /* get the producer/consumer indices */ 2299 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2300 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2301 2302 /* wrap the queue? */ 2303 if (pi >= aac_qinfo[queue].size) 2304 pi = 0; 2305 2306 /* check for queue full */ 2307 if ((pi + 1) == ci) { 2308 error = EBUSY; 2309 goto out; 2310 } 2311 2312 /* populate queue entry */ 2313 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2314 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2315 2316 /* update producer index */ 2317 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2318 2319 /* notify the adapter if we know how */ 2320 if (aac_qinfo[queue].notify != 0) 2321 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2322 2323 error = 0; 2324 2325 out: 2326 return(error); 2327 } 2328 2329 /* 2330 * Check for commands that have been outstanding for a suspiciously long time, 2331 * and complain about them. 2332 */ 2333 static void 2334 aac_timeout(struct aac_softc *sc) 2335 { 2336 struct aac_command *cm; 2337 time_t deadline; 2338 int timedout, code; 2339 2340 /* 2341 * Traverse the busy command list, bitch about late commands once 2342 * only. 2343 */ 2344 timedout = 0; 2345 deadline = time_uptime - AAC_CMD_TIMEOUT; 2346 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2347 if ((cm->cm_timestamp < deadline) 2348 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { 2349 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2350 device_printf(sc->aac_dev, 2351 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", 2352 cm, cm->cm_fib->Header.Command, 2353 (int)(time_uptime-cm->cm_timestamp)); 2354 AAC_PRINT_FIB(sc, cm->cm_fib); 2355 timedout++; 2356 } 2357 } 2358 2359 if (timedout) { 2360 code = AAC_GET_FWSTATUS(sc); 2361 if (code != AAC_UP_AND_RUNNING) { 2362 device_printf(sc->aac_dev, "WARNING! Controller is no " 2363 "longer running! code= 0x%x\n", code); 2364 } 2365 } 2366 } 2367 2368 /* 2369 * Interface Function Vectors 2370 */ 2371 2372 /* 2373 * Read the current firmware status word. 2374 */ 2375 static int 2376 aac_sa_get_fwstatus(struct aac_softc *sc) 2377 { 2378 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2379 2380 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); 2381 } 2382 2383 static int 2384 aac_rx_get_fwstatus(struct aac_softc *sc) 2385 { 2386 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2387 2388 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2389 AAC_RX_OMR0 : AAC_RX_FWSTATUS)); 2390 } 2391 2392 static int 2393 aac_rkt_get_fwstatus(struct aac_softc *sc) 2394 { 2395 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2396 2397 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2398 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); 2399 } 2400 2401 /* 2402 * Notify the controller of a change in a given queue 2403 */ 2404 2405 static void 2406 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2407 { 2408 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2409 2410 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2411 } 2412 2413 static void 2414 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2415 { 2416 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2417 2418 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); 2419 } 2420 2421 static void 2422 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2423 { 2424 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2425 2426 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); 2427 } 2428 2429 /* 2430 * Get the interrupt reason bits 2431 */ 2432 static int 2433 aac_sa_get_istatus(struct aac_softc *sc) 2434 { 2435 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2436 2437 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); 2438 } 2439 2440 static int 2441 aac_rx_get_istatus(struct aac_softc *sc) 2442 { 2443 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2444 2445 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); 2446 } 2447 2448 static int 2449 aac_rkt_get_istatus(struct aac_softc *sc) 2450 { 2451 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2452 2453 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); 2454 } 2455 2456 /* 2457 * Clear some interrupt reason bits 2458 */ 2459 static void 2460 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2461 { 2462 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2463 2464 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2465 } 2466 2467 static void 2468 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2469 { 2470 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2471 2472 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); 2473 } 2474 2475 static void 2476 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2477 { 2478 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2479 2480 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); 2481 } 2482 2483 /* 2484 * Populate the mailbox and set the command word 2485 */ 2486 static void 2487 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2488 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2489 { 2490 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2491 2492 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); 2493 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2494 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2495 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2496 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2497 } 2498 2499 static void 2500 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2501 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2502 { 2503 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2504 2505 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); 2506 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2507 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2508 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2509 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2510 } 2511 2512 static void 2513 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2514 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2515 { 2516 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2517 2518 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); 2519 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2520 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2521 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2522 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2523 } 2524 2525 /* 2526 * Fetch the immediate command status word 2527 */ 2528 static int 2529 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2530 { 2531 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2532 2533 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2534 } 2535 2536 static int 2537 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2538 { 2539 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2540 2541 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2542 } 2543 2544 static int 2545 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2546 { 2547 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2548 2549 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2550 } 2551 2552 /* 2553 * Set/clear interrupt masks 2554 */ 2555 static void 2556 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2557 { 2558 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2559 2560 if (enable) { 2561 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2562 } else { 2563 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2564 } 2565 } 2566 2567 static void 2568 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2569 { 2570 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2571 2572 if (enable) { 2573 if (sc->flags & AAC_FLAGS_NEW_COMM) 2574 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2575 else 2576 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2577 } else { 2578 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); 2579 } 2580 } 2581 2582 static void 2583 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2584 { 2585 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2586 2587 if (enable) { 2588 if (sc->flags & AAC_FLAGS_NEW_COMM) 2589 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2590 else 2591 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2592 } else { 2593 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); 2594 } 2595 } 2596 2597 /* 2598 * New comm. interface: Send command functions 2599 */ 2600 static int 2601 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2602 { 2603 u_int32_t index, device; 2604 2605 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2606 2607 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2608 if (index == 0xffffffffL) 2609 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2610 if (index == 0xffffffffL) 2611 return index; 2612 aac_enqueue_busy(cm); 2613 device = index; 2614 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2615 device += 4; 2616 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2617 device += 4; 2618 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2619 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); 2620 return 0; 2621 } 2622 2623 static int 2624 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2625 { 2626 u_int32_t index, device; 2627 2628 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2629 2630 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2631 if (index == 0xffffffffL) 2632 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2633 if (index == 0xffffffffL) 2634 return index; 2635 aac_enqueue_busy(cm); 2636 device = index; 2637 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2638 device += 4; 2639 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2640 device += 4; 2641 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2642 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); 2643 return 0; 2644 } 2645 2646 /* 2647 * New comm. interface: get, set outbound queue index 2648 */ 2649 static int 2650 aac_rx_get_outb_queue(struct aac_softc *sc) 2651 { 2652 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2653 2654 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); 2655 } 2656 2657 static int 2658 aac_rkt_get_outb_queue(struct aac_softc *sc) 2659 { 2660 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2661 2662 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); 2663 } 2664 2665 static void 2666 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2667 { 2668 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2669 2670 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); 2671 } 2672 2673 static void 2674 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2675 { 2676 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2677 2678 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); 2679 } 2680 2681 /* 2682 * Debugging and Diagnostics 2683 */ 2684 2685 /* 2686 * Print some information about the controller. 2687 */ 2688 static void 2689 aac_describe_controller(struct aac_softc *sc) 2690 { 2691 struct aac_fib *fib; 2692 struct aac_adapter_info *info; 2693 char *adapter_type = "Adaptec RAID controller"; 2694 2695 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2696 2697 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 2698 aac_alloc_sync_fib(sc, &fib); 2699 2700 fib->data[0] = 0; 2701 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2702 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2703 aac_release_sync_fib(sc); 2704 lockmgr(&sc->aac_io_lock, LK_RELEASE); 2705 return; 2706 } 2707 2708 /* save the kernel revision structure for later use */ 2709 info = (struct aac_adapter_info *)&fib->data[0]; 2710 sc->aac_revision = info->KernelRevision; 2711 2712 if (bootverbose) { 2713 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2714 "(%dMB cache, %dMB execution), %s\n", 2715 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2716 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2717 info->BufferMem / (1024 * 1024), 2718 info->ExecutionMem / (1024 * 1024), 2719 aac_describe_code(aac_battery_platform, 2720 info->batteryPlatform)); 2721 2722 device_printf(sc->aac_dev, 2723 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2724 info->KernelRevision.external.comp.major, 2725 info->KernelRevision.external.comp.minor, 2726 info->KernelRevision.external.comp.dash, 2727 info->KernelRevision.buildNumber, 2728 (u_int32_t)(info->SerialNumber & 0xffffff)); 2729 2730 device_printf(sc->aac_dev, "Supported Options=%b\n", 2731 sc->supported_options, 2732 "\20" 2733 "\1SNAPSHOT" 2734 "\2CLUSTERS" 2735 "\3WCACHE" 2736 "\4DATA64" 2737 "\5HOSTTIME" 2738 "\6RAID50" 2739 "\7WINDOW4GB" 2740 "\10SCSIUPGD" 2741 "\11SOFTERR" 2742 "\12NORECOND" 2743 "\13SGMAP64" 2744 "\14ALARM" 2745 "\15NONDASD" 2746 "\16SCSIMGT" 2747 "\17RAIDSCSI" 2748 "\21ADPTINFO" 2749 "\22NEWCOMM" 2750 "\23ARRAY64BIT" 2751 "\24HEATSENSOR"); 2752 } 2753 2754 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2755 fib->data[0] = 0; 2756 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2757 device_printf(sc->aac_dev, 2758 "RequestSupplementAdapterInfo failed\n"); 2759 else 2760 adapter_type = ((struct aac_supplement_adapter_info *) 2761 &fib->data[0])->AdapterTypeText; 2762 } 2763 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2764 adapter_type, 2765 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, 2766 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); 2767 2768 aac_release_sync_fib(sc); 2769 lockmgr(&sc->aac_io_lock, LK_RELEASE); 2770 } 2771 2772 /* 2773 * Look up a text description of a numeric error code and return a pointer to 2774 * same. 2775 */ 2776 static const char * 2777 aac_describe_code(const struct aac_code_lookup *table, u_int32_t code) 2778 { 2779 int i; 2780 2781 for (i = 0; table[i].string != NULL; i++) 2782 if (table[i].code == code) 2783 return(table[i].string); 2784 return(table[i + 1].string); 2785 } 2786 2787 /* 2788 * Management Interface 2789 */ 2790 2791 static int 2792 aac_open(struct dev_open_args *ap) 2793 { 2794 cdev_t dev = ap->a_head.a_dev; 2795 struct aac_softc *sc; 2796 2797 sc = dev->si_drv1; 2798 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2799 device_busy(sc->aac_dev); 2800 2801 return 0; 2802 } 2803 2804 static int 2805 aac_ioctl(struct dev_ioctl_args *ap) 2806 { 2807 caddr_t arg = ap->a_data; 2808 cdev_t dev = ap->a_head.a_dev; 2809 u_long cmd = ap->a_cmd; 2810 union aac_statrequest *as; 2811 struct aac_softc *sc; 2812 int error = 0; 2813 2814 as = (union aac_statrequest *)arg; 2815 sc = dev->si_drv1; 2816 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2817 2818 switch (cmd) { 2819 case AACIO_STATS: 2820 switch (as->as_item) { 2821 case AACQ_FREE: 2822 case AACQ_BIO: 2823 case AACQ_READY: 2824 case AACQ_BUSY: 2825 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2826 sizeof(struct aac_qstat)); 2827 break; 2828 default: 2829 error = ENOENT; 2830 break; 2831 } 2832 break; 2833 2834 case FSACTL_SENDFIB: 2835 case FSACTL_SEND_LARGE_FIB: 2836 arg = *(caddr_t*)arg; 2837 case FSACTL_LNX_SENDFIB: 2838 case FSACTL_LNX_SEND_LARGE_FIB: 2839 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2840 error = aac_ioctl_sendfib(sc, arg); 2841 break; 2842 case FSACTL_SEND_RAW_SRB: 2843 arg = *(caddr_t*)arg; 2844 case FSACTL_LNX_SEND_RAW_SRB: 2845 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2846 error = aac_ioctl_send_raw_srb(sc, arg); 2847 break; 2848 case FSACTL_AIF_THREAD: 2849 case FSACTL_LNX_AIF_THREAD: 2850 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2851 error = EINVAL; 2852 break; 2853 case FSACTL_OPEN_GET_ADAPTER_FIB: 2854 arg = *(caddr_t*)arg; 2855 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2856 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2857 error = aac_open_aif(sc, arg); 2858 break; 2859 case FSACTL_GET_NEXT_ADAPTER_FIB: 2860 arg = *(caddr_t*)arg; 2861 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2862 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2863 error = aac_getnext_aif(sc, arg); 2864 break; 2865 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2866 arg = *(caddr_t*)arg; 2867 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2868 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2869 error = aac_close_aif(sc, arg); 2870 break; 2871 case FSACTL_MINIPORT_REV_CHECK: 2872 arg = *(caddr_t*)arg; 2873 case FSACTL_LNX_MINIPORT_REV_CHECK: 2874 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2875 error = aac_rev_check(sc, arg); 2876 break; 2877 case FSACTL_QUERY_DISK: 2878 arg = *(caddr_t*)arg; 2879 case FSACTL_LNX_QUERY_DISK: 2880 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2881 error = aac_query_disk(sc, arg); 2882 break; 2883 case FSACTL_DELETE_DISK: 2884 case FSACTL_LNX_DELETE_DISK: 2885 /* 2886 * We don't trust the underland to tell us when to delete a 2887 * container, rather we rely on an AIF coming from the 2888 * controller 2889 */ 2890 error = 0; 2891 break; 2892 case FSACTL_GET_PCI_INFO: 2893 arg = *(caddr_t*)arg; 2894 case FSACTL_LNX_GET_PCI_INFO: 2895 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2896 error = aac_get_pci_info(sc, arg); 2897 break; 2898 case FSACTL_GET_FEATURES: 2899 arg = *(caddr_t*)arg; 2900 case FSACTL_LNX_GET_FEATURES: 2901 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); 2902 error = aac_supported_features(sc, arg); 2903 break; 2904 default: 2905 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2906 error = EINVAL; 2907 break; 2908 } 2909 return(error); 2910 } 2911 2912 static struct filterops aac_filterops = 2913 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, aac_filter_detach, aac_filter_read }; 2914 2915 static int 2916 aac_kqfilter(struct dev_kqfilter_args *ap) 2917 { 2918 cdev_t dev = ap->a_head.a_dev; 2919 struct aac_softc *sc = dev->si_drv1; 2920 struct knote *kn = ap->a_kn; 2921 struct klist *klist; 2922 2923 ap->a_result = 0; 2924 2925 switch (kn->kn_filter) { 2926 case EVFILT_READ: 2927 kn->kn_fop = &aac_filterops; 2928 kn->kn_hook = (caddr_t)sc; 2929 break; 2930 default: 2931 ap->a_result = EOPNOTSUPP; 2932 return (0); 2933 } 2934 2935 klist = &sc->rcv_kq.ki_note; 2936 knote_insert(klist, kn); 2937 2938 return (0); 2939 } 2940 2941 static void 2942 aac_filter_detach(struct knote *kn) 2943 { 2944 struct aac_softc *sc = (struct aac_softc *)kn->kn_hook; 2945 struct klist *klist; 2946 2947 klist = &sc->rcv_kq.ki_note; 2948 knote_remove(klist, kn); 2949 } 2950 2951 static int 2952 aac_filter_read(struct knote *kn, long hint) 2953 { 2954 struct aac_softc *sc; 2955 struct aac_fib_context *ctx; 2956 2957 sc = (struct aac_softc *)kn->kn_hook; 2958 2959 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 2960 for (ctx = sc->fibctx; ctx; ctx = ctx->next) 2961 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) 2962 return(1); 2963 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 2964 2965 return (0); 2966 } 2967 2968 static void 2969 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2970 { 2971 2972 switch (event->ev_type) { 2973 case AAC_EVENT_CMFREE: 2974 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0); 2975 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 2976 aac_add_event(sc, event); 2977 return; 2978 } 2979 kfree(event, M_AACBUF); 2980 wakeup(arg); 2981 break; 2982 default: 2983 break; 2984 } 2985 } 2986 2987 /* 2988 * Send a FIB supplied from userspace 2989 */ 2990 static int 2991 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 2992 { 2993 struct aac_command *cm; 2994 int size, error; 2995 2996 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2997 2998 cm = NULL; 2999 3000 /* 3001 * Get a command 3002 */ 3003 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3004 if (aac_alloc_command(sc, &cm)) { 3005 struct aac_event *event; 3006 3007 event = kmalloc(sizeof(struct aac_event), M_AACBUF, 3008 M_INTWAIT | M_ZERO); 3009 event->ev_type = AAC_EVENT_CMFREE; 3010 event->ev_callback = aac_ioctl_event; 3011 event->ev_arg = &cm; 3012 aac_add_event(sc, event); 3013 lksleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 3014 } 3015 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3016 3017 /* 3018 * Fetch the FIB header, then re-copy to get data as well. 3019 */ 3020 if ((error = copyin(ufib, cm->cm_fib, 3021 sizeof(struct aac_fib_header))) != 0) 3022 goto out; 3023 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3024 if (size > sc->aac_max_fib_size) { 3025 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 3026 size, sc->aac_max_fib_size); 3027 size = sc->aac_max_fib_size; 3028 } 3029 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3030 goto out; 3031 cm->cm_fib->Header.Size = size; 3032 cm->cm_timestamp = time_uptime; 3033 3034 /* 3035 * Pass the FIB to the controller, wait for it to complete. 3036 */ 3037 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3038 error = aac_wait_command(cm); 3039 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3040 if (error != 0) { 3041 device_printf(sc->aac_dev, 3042 "aac_wait_command return %d\n", error); 3043 goto out; 3044 } 3045 3046 /* 3047 * Copy the FIB and data back out to the caller. 3048 */ 3049 size = cm->cm_fib->Header.Size; 3050 if (size > sc->aac_max_fib_size) { 3051 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 3052 size, sc->aac_max_fib_size); 3053 size = sc->aac_max_fib_size; 3054 } 3055 error = copyout(cm->cm_fib, ufib, size); 3056 3057 out: 3058 if (cm != NULL) { 3059 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3060 aac_release_command(cm); 3061 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3062 } 3063 return(error); 3064 } 3065 3066 /* 3067 * Send a passthrough FIB supplied from userspace 3068 */ 3069 static int 3070 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 3071 { 3072 struct aac_command *cm; 3073 struct aac_event *event; 3074 struct aac_fib *fib; 3075 struct aac_srb *srbcmd, *user_srb; 3076 struct aac_sg_entry *sge; 3077 #ifdef __x86_64__ 3078 struct aac_sg_entry64 *sge64; 3079 #endif 3080 void *srb_sg_address, *ureply; 3081 uint32_t fibsize, srb_sg_bytecount; 3082 int error, transfer_data; 3083 3084 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3085 3086 cm = NULL; 3087 transfer_data = 0; 3088 fibsize = 0; 3089 user_srb = (struct aac_srb *)arg; 3090 3091 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3092 if (aac_alloc_command(sc, &cm)) { 3093 event = kmalloc(sizeof(struct aac_event), M_AACBUF, 3094 M_NOWAIT | M_ZERO); 3095 if (event == NULL) { 3096 error = EBUSY; 3097 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3098 goto out; 3099 } 3100 event->ev_type = AAC_EVENT_CMFREE; 3101 event->ev_callback = aac_ioctl_event; 3102 event->ev_arg = &cm; 3103 aac_add_event(sc, event); 3104 lksleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); 3105 } 3106 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3107 3108 cm->cm_data = NULL; 3109 fib = cm->cm_fib; 3110 srbcmd = (struct aac_srb *)fib->data; 3111 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); 3112 if (error != 0) 3113 goto out; 3114 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { 3115 error = EINVAL; 3116 goto out; 3117 } 3118 error = copyin(user_srb, srbcmd, fibsize); 3119 if (error != 0) 3120 goto out; 3121 srbcmd->function = 0; 3122 srbcmd->retry_limit = 0; 3123 if (srbcmd->sg_map.SgCount > 1) { 3124 error = EINVAL; 3125 goto out; 3126 } 3127 3128 /* Retrieve correct SG entries. */ 3129 if (fibsize == (sizeof(struct aac_srb) + 3130 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { 3131 sge = srbcmd->sg_map.SgEntry; 3132 srb_sg_bytecount = sge->SgByteCount; 3133 srb_sg_address = (void *)(uintptr_t)sge->SgAddress; 3134 } 3135 #ifdef __x86_64__ 3136 else if (fibsize == (sizeof(struct aac_srb) + 3137 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { 3138 sge = NULL; 3139 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; 3140 srb_sg_bytecount = sge64->SgByteCount; 3141 srb_sg_address = (void *)sge64->SgAddress; 3142 if (sge64->SgAddress > 0xffffffffull && 3143 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 3144 error = EINVAL; 3145 goto out; 3146 } 3147 } 3148 #endif 3149 else { 3150 error = EINVAL; 3151 goto out; 3152 } 3153 ureply = (char *)arg + fibsize; 3154 srbcmd->data_len = srb_sg_bytecount; 3155 if (srbcmd->sg_map.SgCount == 1) 3156 transfer_data = 1; 3157 3158 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; 3159 if (transfer_data) { 3160 cm->cm_datalen = srb_sg_bytecount; 3161 cm->cm_data = kmalloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); 3162 if (cm->cm_data == NULL) { 3163 error = ENOMEM; 3164 goto out; 3165 } 3166 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) 3167 cm->cm_flags |= AAC_CMD_DATAIN; 3168 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { 3169 cm->cm_flags |= AAC_CMD_DATAOUT; 3170 error = copyin(srb_sg_address, cm->cm_data, 3171 cm->cm_datalen); 3172 if (error != 0) 3173 goto out; 3174 } 3175 } 3176 3177 fib->Header.Size = sizeof(struct aac_fib_header) + 3178 sizeof(struct aac_srb); 3179 fib->Header.XferState = 3180 AAC_FIBSTATE_HOSTOWNED | 3181 AAC_FIBSTATE_INITIALISED | 3182 AAC_FIBSTATE_EMPTY | 3183 AAC_FIBSTATE_FROMHOST | 3184 AAC_FIBSTATE_REXPECTED | 3185 AAC_FIBSTATE_NORM | 3186 AAC_FIBSTATE_ASYNC | 3187 AAC_FIBSTATE_FAST_RESPONSE; 3188 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? 3189 ScsiPortCommandU64 : ScsiPortCommand; 3190 3191 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3192 aac_wait_command(cm); 3193 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3194 3195 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { 3196 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); 3197 if (error != 0) 3198 goto out; 3199 } 3200 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); 3201 out: 3202 if (cm != NULL) { 3203 if (cm->cm_data != NULL) 3204 kfree(cm->cm_data, M_AACBUF); 3205 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3206 aac_release_command(cm); 3207 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3208 } 3209 return(error); 3210 } 3211 3212 static int 3213 aac_close(struct dev_close_args *ap) 3214 { 3215 cdev_t dev = ap->a_head.a_dev; 3216 struct aac_softc *sc; 3217 3218 sc = dev->si_drv1; 3219 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3220 get_mplock(); 3221 device_unbusy(sc->aac_dev); 3222 rel_mplock(); 3223 3224 return 0; 3225 } 3226 3227 /* 3228 * Handle an AIF sent to us by the controller; queue it for later reference. 3229 * If the queue fills up, then drop the older entries. 3230 */ 3231 static void 3232 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3233 { 3234 struct aac_aif_command *aif; 3235 struct aac_container *co, *co_next; 3236 struct aac_fib_context *ctx; 3237 struct aac_mntinforesp *mir; 3238 int next, current, found; 3239 int count = 0, added = 0, i = 0; 3240 uint32_t channel; 3241 3242 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3243 3244 aif = (struct aac_aif_command*)&fib->data[0]; 3245 aac_print_aif(sc, aif); 3246 3247 /* Is it an event that we should care about? */ 3248 switch (aif->command) { 3249 case AifCmdEventNotify: 3250 switch (aif->data.EN.type) { 3251 case AifEnAddContainer: 3252 case AifEnDeleteContainer: 3253 /* 3254 * A container was added or deleted, but the message 3255 * doesn't tell us anything else! Re-enumerate the 3256 * containers and sort things out. 3257 */ 3258 aac_alloc_sync_fib(sc, &fib); 3259 do { 3260 /* 3261 * Ask the controller for its containers one at 3262 * a time. 3263 * XXX What if the controller's list changes 3264 * midway through this enumaration? 3265 * XXX This should be done async. 3266 */ 3267 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 3268 continue; 3269 if (i == 0) 3270 count = mir->MntRespCount; 3271 /* 3272 * Check the container against our list. 3273 * co->co_found was already set to 0 in a 3274 * previous run. 3275 */ 3276 if ((mir->Status == ST_OK) && 3277 (mir->MntTable[0].VolType != CT_NONE)) { 3278 found = 0; 3279 TAILQ_FOREACH(co, 3280 &sc->aac_container_tqh, 3281 co_link) { 3282 if (co->co_mntobj.ObjectId == 3283 mir->MntTable[0].ObjectId) { 3284 co->co_found = 1; 3285 found = 1; 3286 break; 3287 } 3288 } 3289 /* 3290 * If the container matched, continue 3291 * in the list. 3292 */ 3293 if (found) { 3294 i++; 3295 continue; 3296 } 3297 3298 /* 3299 * This is a new container. Do all the 3300 * appropriate things to set it up. 3301 */ 3302 aac_add_container(sc, mir, 1); 3303 added = 1; 3304 } 3305 i++; 3306 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3307 aac_release_sync_fib(sc); 3308 3309 /* 3310 * Go through our list of containers and see which ones 3311 * were not marked 'found'. Since the controller didn't 3312 * list them they must have been deleted. Do the 3313 * appropriate steps to destroy the device. Also reset 3314 * the co->co_found field. 3315 */ 3316 co = TAILQ_FIRST(&sc->aac_container_tqh); 3317 while (co != NULL) { 3318 if (co->co_found == 0) { 3319 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3320 get_mplock(); 3321 device_delete_child(sc->aac_dev, 3322 co->co_disk); 3323 rel_mplock(); 3324 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3325 co_next = TAILQ_NEXT(co, co_link); 3326 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 3327 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3328 co_link); 3329 lockmgr(&sc->aac_container_lock, LK_RELEASE); 3330 kfree(co, M_AACBUF); 3331 co = co_next; 3332 } else { 3333 co->co_found = 0; 3334 co = TAILQ_NEXT(co, co_link); 3335 } 3336 } 3337 3338 /* Attach the newly created containers */ 3339 if (added) { 3340 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3341 get_mplock(); 3342 bus_generic_attach(sc->aac_dev); 3343 rel_mplock(); 3344 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3345 } 3346 3347 break; 3348 3349 case AifEnEnclosureManagement: 3350 switch (aif->data.EN.data.EEE.eventType) { 3351 case AIF_EM_DRIVE_INSERTION: 3352 case AIF_EM_DRIVE_REMOVAL: 3353 channel = aif->data.EN.data.EEE.unitID; 3354 if (sc->cam_rescan_cb != NULL) 3355 sc->cam_rescan_cb(sc, 3356 (channel >> 24) & 0xF, 3357 (channel & 0xFFFF)); 3358 break; 3359 } 3360 break; 3361 3362 case AifEnAddJBOD: 3363 case AifEnDeleteJBOD: 3364 channel = aif->data.EN.data.ECE.container; 3365 if (sc->cam_rescan_cb != NULL) 3366 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, 3367 AAC_CAM_TARGET_WILDCARD); 3368 break; 3369 3370 default: 3371 break; 3372 } 3373 3374 default: 3375 break; 3376 } 3377 3378 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3379 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3380 current = sc->aifq_idx; 3381 next = (current + 1) % AAC_AIFQ_LENGTH; 3382 if (next == 0) 3383 sc->aifq_filled = 1; 3384 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3385 /* modify AIF contexts */ 3386 if (sc->aifq_filled) { 3387 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3388 if (next == ctx->ctx_idx) 3389 ctx->ctx_wrap = 1; 3390 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3391 ctx->ctx_idx = next; 3392 } 3393 } 3394 sc->aifq_idx = next; 3395 /* On the off chance that someone is sleeping for an aif... */ 3396 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3397 wakeup(sc->aac_aifq); 3398 /* token may have been lost */ 3399 /* Wakeup any poll()ers */ 3400 KNOTE(&sc->rcv_kq.ki_note, 0); 3401 /* token may have been lost */ 3402 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3403 } 3404 3405 /* 3406 * Return the Revision of the driver to userspace and check to see if the 3407 * userspace app is possibly compatible. This is extremely bogus since 3408 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3409 * returning what the card reported. 3410 */ 3411 static int 3412 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3413 { 3414 struct aac_rev_check rev_check; 3415 struct aac_rev_check_resp rev_check_resp; 3416 int error = 0; 3417 3418 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3419 3420 /* 3421 * Copyin the revision struct from userspace 3422 */ 3423 if ((error = copyin(udata, (caddr_t)&rev_check, 3424 sizeof(struct aac_rev_check))) != 0) { 3425 return error; 3426 } 3427 3428 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3429 rev_check.callingRevision.buildNumber); 3430 3431 /* 3432 * Doctor up the response struct. 3433 */ 3434 rev_check_resp.possiblyCompatible = 1; 3435 rev_check_resp.adapterSWRevision.external.comp.major = 3436 AAC_DRIVER_MAJOR_VERSION; 3437 rev_check_resp.adapterSWRevision.external.comp.minor = 3438 AAC_DRIVER_MINOR_VERSION; 3439 rev_check_resp.adapterSWRevision.external.comp.type = 3440 AAC_DRIVER_TYPE; 3441 rev_check_resp.adapterSWRevision.external.comp.dash = 3442 AAC_DRIVER_BUGFIX_LEVEL; 3443 rev_check_resp.adapterSWRevision.buildNumber = 3444 AAC_DRIVER_BUILD; 3445 3446 return(copyout((caddr_t)&rev_check_resp, udata, 3447 sizeof(struct aac_rev_check_resp))); 3448 } 3449 3450 /* 3451 * Pass the fib context to the caller 3452 */ 3453 static int 3454 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3455 { 3456 struct aac_fib_context *fibctx, *ctx; 3457 int error = 0; 3458 3459 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3460 3461 fibctx = kmalloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3462 if (fibctx == NULL) 3463 return (ENOMEM); 3464 3465 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3466 /* all elements are already 0, add to queue */ 3467 if (sc->fibctx == NULL) 3468 sc->fibctx = fibctx; 3469 else { 3470 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3471 ; 3472 ctx->next = fibctx; 3473 fibctx->prev = ctx; 3474 } 3475 3476 /* evaluate unique value */ 3477 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3478 ctx = sc->fibctx; 3479 while (ctx != fibctx) { 3480 if (ctx->unique == fibctx->unique) { 3481 fibctx->unique++; 3482 ctx = sc->fibctx; 3483 } else { 3484 ctx = ctx->next; 3485 } 3486 } 3487 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3488 3489 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3490 if (error) 3491 aac_close_aif(sc, (caddr_t)ctx); 3492 return error; 3493 } 3494 3495 /* 3496 * Close the caller's fib context 3497 */ 3498 static int 3499 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3500 { 3501 struct aac_fib_context *ctx; 3502 3503 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3504 3505 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3506 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3507 if (ctx->unique == *(uint32_t *)&arg) { 3508 if (ctx == sc->fibctx) 3509 sc->fibctx = NULL; 3510 else { 3511 ctx->prev->next = ctx->next; 3512 if (ctx->next) 3513 ctx->next->prev = ctx->prev; 3514 } 3515 break; 3516 } 3517 } 3518 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3519 if (ctx) 3520 kfree(ctx, M_AACBUF); 3521 3522 return 0; 3523 } 3524 3525 /* 3526 * Pass the caller the next AIF in their queue 3527 */ 3528 static int 3529 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3530 { 3531 struct get_adapter_fib_ioctl agf; 3532 struct aac_fib_context *ctx; 3533 int error; 3534 3535 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3536 3537 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { 3538 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3539 if (agf.AdapterFibContext == ctx->unique) 3540 break; 3541 } 3542 if (!ctx) 3543 return (EFAULT); 3544 3545 error = aac_return_aif(sc, ctx, agf.AifFib); 3546 if (error == EAGAIN && agf.Wait) { 3547 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3548 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3549 while (error == EAGAIN) { 3550 error = tsleep(sc->aac_aifq, 3551 PCATCH, "aacaif", 0); 3552 if (error == 0) 3553 error = aac_return_aif(sc, ctx, agf.AifFib); 3554 } 3555 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3556 } 3557 } 3558 return(error); 3559 } 3560 3561 /* 3562 * Hand the next AIF off the top of the queue out to userspace. 3563 */ 3564 static int 3565 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3566 { 3567 int current, error; 3568 3569 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3570 3571 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3572 current = ctx->ctx_idx; 3573 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3574 /* empty */ 3575 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3576 return (EAGAIN); 3577 } 3578 error = 3579 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3580 if (error) 3581 device_printf(sc->aac_dev, 3582 "aac_return_aif: copyout returned %d\n", error); 3583 else { 3584 ctx->ctx_wrap = 0; 3585 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3586 } 3587 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3588 return(error); 3589 } 3590 3591 static int 3592 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3593 { 3594 struct aac_pci_info { 3595 u_int32_t bus; 3596 u_int32_t slot; 3597 } pciinf; 3598 int error; 3599 3600 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3601 3602 pciinf.bus = pci_get_bus(sc->aac_dev); 3603 pciinf.slot = pci_get_slot(sc->aac_dev); 3604 3605 error = copyout((caddr_t)&pciinf, uptr, 3606 sizeof(struct aac_pci_info)); 3607 3608 return (error); 3609 } 3610 3611 static int 3612 aac_supported_features(struct aac_softc *sc, caddr_t uptr) 3613 { 3614 struct aac_features f; 3615 int error; 3616 3617 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3618 3619 if ((error = copyin(uptr, &f, sizeof (f))) != 0) 3620 return (error); 3621 3622 /* 3623 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3624 * ALL zero in the featuresState, the driver will return the current 3625 * state of all the supported features, the data field will not be 3626 * valid. 3627 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3628 * a specific bit set in the featuresState, the driver will return the 3629 * current state of this specific feature and whatever data that are 3630 * associated with the feature in the data field or perform whatever 3631 * action needed indicates in the data field. 3632 */ 3633 if (f.feat.fValue == 0) { 3634 f.feat.fBits.largeLBA = 3635 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3636 /* TODO: In the future, add other features state here as well */ 3637 } else { 3638 if (f.feat.fBits.largeLBA) 3639 f.feat.fBits.largeLBA = 3640 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3641 /* TODO: Add other features state and data in the future */ 3642 } 3643 3644 error = copyout(&f, uptr, sizeof (f)); 3645 return (error); 3646 } 3647 3648 /* 3649 * Give the userland some information about the container. The AAC arch 3650 * expects the driver to be a SCSI passthrough type driver, so it expects 3651 * the containers to have b:t:l numbers. Fake it. 3652 */ 3653 static int 3654 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3655 { 3656 struct aac_query_disk query_disk; 3657 struct aac_container *co; 3658 struct aac_disk *disk; 3659 int error, id; 3660 3661 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3662 3663 disk = NULL; 3664 3665 error = copyin(uptr, (caddr_t)&query_disk, 3666 sizeof(struct aac_query_disk)); 3667 if (error) 3668 return (error); 3669 3670 id = query_disk.ContainerNumber; 3671 if (id == -1) 3672 return (EINVAL); 3673 3674 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 3675 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3676 if (co->co_mntobj.ObjectId == id) 3677 break; 3678 } 3679 3680 if (co == NULL) { 3681 query_disk.Valid = 0; 3682 query_disk.Locked = 0; 3683 query_disk.Deleted = 1; /* XXX is this right? */ 3684 } else { 3685 disk = device_get_softc(co->co_disk); 3686 query_disk.Valid = 1; 3687 query_disk.Locked = 3688 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3689 query_disk.Deleted = 0; 3690 query_disk.Bus = device_get_unit(sc->aac_dev); 3691 query_disk.Target = disk->unit; 3692 query_disk.Lun = 0; 3693 query_disk.UnMapped = 0; 3694 bcopy(disk->ad_dev_t->si_name, 3695 &query_disk.diskDeviceName[0], 10); 3696 } 3697 lockmgr(&sc->aac_container_lock, LK_RELEASE); 3698 3699 error = copyout((caddr_t)&query_disk, uptr, 3700 sizeof(struct aac_query_disk)); 3701 3702 return (error); 3703 } 3704 3705 static void 3706 aac_get_bus_info(struct aac_softc *sc) 3707 { 3708 struct aac_fib *fib; 3709 struct aac_ctcfg *c_cmd; 3710 struct aac_ctcfg_resp *c_resp; 3711 struct aac_vmioctl *vmi; 3712 struct aac_vmi_businf_resp *vmi_resp; 3713 struct aac_getbusinf businfo; 3714 struct aac_sim *caminf; 3715 device_t child; 3716 int i, found, error; 3717 3718 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3719 aac_alloc_sync_fib(sc, &fib); 3720 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3721 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3722 3723 c_cmd->Command = VM_ContainerConfig; 3724 c_cmd->cmd = CT_GET_SCSI_METHOD; 3725 c_cmd->param = 0; 3726 3727 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3728 sizeof(struct aac_ctcfg)); 3729 if (error) { 3730 device_printf(sc->aac_dev, "Error %d sending " 3731 "VM_ContainerConfig command\n", error); 3732 aac_release_sync_fib(sc); 3733 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3734 return; 3735 } 3736 3737 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3738 if (c_resp->Status != ST_OK) { 3739 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3740 c_resp->Status); 3741 aac_release_sync_fib(sc); 3742 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3743 return; 3744 } 3745 3746 sc->scsi_method_id = c_resp->param; 3747 3748 vmi = (struct aac_vmioctl *)&fib->data[0]; 3749 bzero(vmi, sizeof(struct aac_vmioctl)); 3750 3751 vmi->Command = VM_Ioctl; 3752 vmi->ObjType = FT_DRIVE; 3753 vmi->MethId = sc->scsi_method_id; 3754 vmi->ObjId = 0; 3755 vmi->IoctlCmd = GetBusInfo; 3756 3757 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3758 sizeof(struct aac_vmi_businf_resp)); 3759 if (error) { 3760 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3761 error); 3762 aac_release_sync_fib(sc); 3763 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3764 return; 3765 } 3766 3767 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3768 if (vmi_resp->Status != ST_OK) { 3769 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3770 vmi_resp->Status); 3771 aac_release_sync_fib(sc); 3772 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3773 return; 3774 } 3775 3776 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3777 aac_release_sync_fib(sc); 3778 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3779 3780 found = 0; 3781 for (i = 0; i < businfo.BusCount; i++) { 3782 if (businfo.BusValid[i] != AAC_BUS_VALID) 3783 continue; 3784 3785 caminf = (struct aac_sim *)kmalloc(sizeof(struct aac_sim), 3786 M_AACBUF, M_INTWAIT | M_ZERO); 3787 3788 child = device_add_child(sc->aac_dev, "aacp", -1); 3789 if (child == NULL) { 3790 device_printf(sc->aac_dev, 3791 "device_add_child failed for passthrough bus %d\n", 3792 i); 3793 kfree(caminf, M_AACBUF); 3794 break; 3795 } 3796 3797 caminf->TargetsPerBus = businfo.TargetsPerBus; 3798 caminf->BusNumber = i; 3799 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3800 caminf->aac_sc = sc; 3801 caminf->sim_dev = child; 3802 3803 device_set_ivars(child, caminf); 3804 device_set_desc(child, "SCSI Passthrough Bus"); 3805 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3806 3807 found = 1; 3808 } 3809 3810 if (found) 3811 bus_generic_attach(sc->aac_dev); 3812 } 3813