1 /* 2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation. 3 * Copyright (c) 2004-05 Vinod Kashyap. 4 * Copyright (c) 2000 Michael Smith 5 * Copyright (c) 2000 BSDi 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 254263 2013-08-12 23:30:01Z scottl $ 30 */ 31 32 /* 33 * AMCC'S 3ware driver for 9000 series storage controllers. 34 * 35 * Author: Vinod Kashyap 36 * Modifications by: Adam Radford 37 * Modifications by: Manjunath Ranganathaiah 38 */ 39 40 41 /* 42 * FreeBSD specific functions not related to CAM, and other 43 * miscellaneous functions. 44 */ 45 46 47 #include <dev/raid/twa/tw_osl_includes.h> 48 #include <dev/raid/twa/tw_cl_fwif.h> 49 #include <dev/raid/twa/tw_cl_ioctl.h> 50 #include <dev/raid/twa/tw_osl_ioctl.h> 51 52 #ifdef TW_OSL_DEBUG 53 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG; 54 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG; 55 #endif /* TW_OSL_DEBUG */ 56 57 static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands"); 58 59 60 static d_open_t twa_open; 61 static d_close_t twa_close; 62 static d_ioctl_t twa_ioctl; 63 64 static struct dev_ops twa_ops = { 65 { "twa", 0, D_MPSAFE }, 66 .d_open = twa_open, 67 .d_close = twa_close, 68 .d_ioctl = twa_ioctl, 69 }; 70 71 static devclass_t twa_devclass; 72 73 static int twa_msi_enable = 0; 74 TUNABLE_INT("hw.twa.msi.enable", &twa_msi_enable); 75 76 77 /* 78 * Function name: twa_open 79 * Description: Called when the controller is opened. 80 * Simply marks the controller as open. 81 * 82 * Input: dev -- control device corresponding to the ctlr 83 * flags -- mode of open 84 * fmt -- device type (character/block etc.) 85 * proc -- current process 86 * Output: None 87 * Return value: 0 -- success 88 * non-zero-- failure 89 */ 90 static TW_INT32 91 twa_open(struct dev_open_args *ap) 92 { 93 cdev_t dev = ap->a_head.a_dev; 94 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 95 96 tw_osli_dbg_dprintf(5, sc, "entered"); 97 sc->open = TW_CL_TRUE; 98 return(0); 99 } 100 101 102 103 /* 104 * Function name: twa_close 105 * Description: Called when the controller is closed. 106 * Simply marks the controller as not open. 107 * 108 * Input: dev -- control device corresponding to the ctlr 109 * flags -- mode of corresponding open 110 * fmt -- device type (character/block etc.) 111 * proc -- current process 112 * Output: None 113 * Return value: 0 -- success 114 * non-zero-- failure 115 */ 116 static TW_INT32 117 twa_close(struct dev_close_args *ap) 118 { 119 cdev_t dev = ap->a_head.a_dev; 120 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 121 122 tw_osli_dbg_dprintf(5, sc, "entered"); 123 sc->open = TW_CL_FALSE; 124 return(0); 125 } 126 127 128 129 /* 130 * Function name: twa_ioctl 131 * Description: Called when an ioctl is posted to the controller. 132 * Handles any OS Layer specific cmds, passes the rest 133 * on to the Common Layer. 134 * 135 * Input: dev -- control device corresponding to the ctlr 136 * cmd -- ioctl cmd 137 * buf -- ptr to buffer in kernel memory, which is 138 * a copy of the input buffer in user-space 139 * flags -- mode of corresponding open 140 * proc -- current process 141 * Output: buf -- ptr to buffer in kernel memory, which will 142 * be copied to the output buffer in user-space 143 * Return value: 0 -- success 144 * non-zero-- failure 145 */ 146 static TW_INT32 147 twa_ioctl(struct dev_ioctl_args *ap) 148 { 149 cdev_t dev = ap->a_head.a_dev; 150 u_long cmd = ap->a_cmd; 151 caddr_t buf = ap->a_data; 152 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 153 TW_INT32 error; 154 155 tw_osli_dbg_dprintf(5, sc, "entered"); 156 157 switch (cmd) { 158 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH: 159 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru"); 160 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf); 161 break; 162 163 case TW_OSL_IOCTL_SCAN_BUS: 164 /* Request CAM for a bus scan. */ 165 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus"); 166 error = tw_osli_request_bus_scan(sc); 167 break; 168 169 default: 170 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd); 171 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf); 172 break; 173 } 174 return(error); 175 } 176 177 178 179 static TW_INT32 twa_probe(device_t dev); 180 static TW_INT32 twa_attach(device_t dev); 181 static TW_INT32 twa_detach(device_t dev); 182 static TW_INT32 twa_shutdown(device_t dev); 183 static TW_VOID twa_pci_intr(TW_VOID *arg); 184 static TW_VOID twa_watchdog(TW_VOID *arg); 185 int twa_setup_intr(struct twa_softc *sc); 186 int twa_teardown_intr(struct twa_softc *sc); 187 188 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc); 189 static TW_VOID tw_osli_free_resources(struct twa_softc *sc); 190 191 static TW_VOID twa_map_load_data_callback(TW_VOID *arg, 192 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 193 static TW_VOID twa_map_load_callback(TW_VOID *arg, 194 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 195 196 197 static device_method_t twa_methods[] = { 198 /* Device interface */ 199 DEVMETHOD(device_probe, twa_probe), 200 DEVMETHOD(device_attach, twa_attach), 201 DEVMETHOD(device_detach, twa_detach), 202 DEVMETHOD(device_shutdown, twa_shutdown), 203 204 DEVMETHOD_END 205 }; 206 207 static driver_t twa_pci_driver = { 208 "twa", 209 twa_methods, 210 sizeof(struct twa_softc) 211 }; 212 213 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, NULL, NULL); 214 MODULE_DEPEND(twa, cam, 1, 1, 1); 215 MODULE_DEPEND(twa, pci, 1, 1, 1); 216 MODULE_VERSION(twa, 1); 217 218 219 /* 220 * Function name: twa_probe 221 * Description: Called at driver load time. Claims 9000 ctlrs. 222 * 223 * Input: dev -- bus device corresponding to the ctlr 224 * Output: None 225 * Return value: <= 0 -- success 226 * > 0 -- failure 227 */ 228 static TW_INT32 229 twa_probe(device_t dev) 230 { 231 static TW_UINT8 first_ctlr = 1; 232 233 tw_osli_dbg_printf(3, "entered"); 234 235 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) { 236 device_set_desc(dev, TW_OSLI_DEVICE_NAME); 237 /* Print the driver version only once. */ 238 if (first_ctlr) { 239 kprintf("3ware device driver for 9000 series storage " 240 "controllers, version: %s\n", 241 TW_OSL_DRIVER_VERSION_STRING); 242 first_ctlr = 0; 243 } 244 return(0); 245 } 246 return(ENXIO); 247 } 248 249 int twa_setup_intr(struct twa_softc *sc) 250 { 251 int error = 0; 252 253 if (!(sc->intr_handle) && (sc->irq_res)) { 254 error = bus_setup_intr(sc->bus_dev, sc->irq_res, 255 INTR_MPSAFE, 256 twa_pci_intr, 257 sc, &sc->intr_handle, NULL); 258 } 259 return( error ); 260 } 261 262 263 int twa_teardown_intr(struct twa_softc *sc) 264 { 265 int error = 0; 266 267 if ((sc->intr_handle) && (sc->irq_res)) { 268 error = bus_teardown_intr(sc->bus_dev, 269 sc->irq_res, sc->intr_handle); 270 sc->intr_handle = NULL; 271 } 272 return( error ); 273 } 274 275 276 277 /* 278 * Function name: twa_attach 279 * Description: Allocates pci resources; updates sc; adds a node to the 280 * sysctl tree to expose the driver version; makes calls 281 * (to the Common Layer) to initialize ctlr, and to 282 * attach to CAM. 283 * 284 * Input: dev -- bus device corresponding to the ctlr 285 * Output: None 286 * Return value: 0 -- success 287 * non-zero-- failure 288 */ 289 static TW_INT32 290 twa_attach(device_t dev) 291 { 292 struct twa_softc *sc = device_get_softc(dev); 293 TW_INT32 bar_num; 294 TW_INT32 bar0_offset; 295 TW_INT32 bar_size; 296 TW_INT32 irq_flags; 297 TW_INT32 error; 298 299 sc->ctlr_handle.osl_ctlr_ctxt = sc; 300 301 /* Initialize the softc structure. */ 302 sc->bus_dev = dev; 303 tw_osli_dbg_dprintf(3, sc, "entered"); 304 sc->device_id = pci_get_device(dev); 305 306 /* Initialize the mutexes right here. */ 307 sc->io_lock = &(sc->io_lock_handle); 308 spin_init(sc->io_lock, "twa_iolock"); 309 sc->q_lock = &(sc->q_lock_handle); 310 spin_init(sc->q_lock, "twa_qlock"); 311 sc->sim_lock = &(sc->sim_lock_handle); 312 lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE); 313 314 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 315 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 316 OID_AUTO, "driver_version", CTLFLAG_RD, 317 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); 318 319 /* Force the busmaster enable bit on, in case the BIOS forgot. */ 320 pci_enable_busmaster(dev); 321 322 /* Allocate the PCI register window. */ 323 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, 324 &bar_num, &bar0_offset, &bar_size))) { 325 tw_osli_printf(sc, "error = %d", 326 TW_CL_SEVERITY_ERROR_STRING, 327 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 328 0x201F, 329 "Can't get PCI BAR info", 330 error); 331 tw_osli_free_resources(sc); 332 return(error); 333 } 334 sc->reg_res_id = PCIR_BARS + bar0_offset; 335 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 336 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 337 == NULL) { 338 tw_osli_printf(sc, "error = %d", 339 TW_CL_SEVERITY_ERROR_STRING, 340 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 341 0x2002, 342 "Can't allocate register window", 343 ENXIO); 344 tw_osli_free_resources(sc); 345 return(ENXIO); 346 } 347 sc->bus_tag = rman_get_bustag(sc->reg_res); 348 sc->bus_handle = rman_get_bushandle(sc->reg_res); 349 350 /* Allocate and register our interrupt. */ 351 sc->irq_res_id = 0; 352 sc->irq_type = pci_alloc_1intr(sc->bus_dev, twa_msi_enable, 353 &sc->irq_res_id, &irq_flags); 354 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, 355 &(sc->irq_res_id), 0, ~0, 1, 356 irq_flags)) == NULL) { 357 tw_osli_printf(sc, "error = %d", 358 TW_CL_SEVERITY_ERROR_STRING, 359 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 360 0x2003, 361 "Can't allocate interrupt", 362 ENXIO); 363 tw_osli_free_resources(sc); 364 return(ENXIO); 365 } 366 if ((error = twa_setup_intr(sc))) { 367 tw_osli_printf(sc, "error = %d", 368 TW_CL_SEVERITY_ERROR_STRING, 369 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 370 0x2004, 371 "Can't set up interrupt", 372 error); 373 tw_osli_free_resources(sc); 374 return(error); 375 } 376 377 if ((error = tw_osli_alloc_mem(sc))) { 378 tw_osli_printf(sc, "error = %d", 379 TW_CL_SEVERITY_ERROR_STRING, 380 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 381 0x2005, 382 "Memory allocation failure", 383 error); 384 tw_osli_free_resources(sc); 385 return(error); 386 } 387 388 /* Initialize the Common Layer for this controller. */ 389 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, 390 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 391 sc->non_dma_mem, sc->dma_mem, 392 sc->dma_mem_phys 393 ))) { 394 tw_osli_printf(sc, "error = %d", 395 TW_CL_SEVERITY_ERROR_STRING, 396 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 397 0x2006, 398 "Failed to initialize Common Layer/controller", 399 error); 400 tw_osli_free_resources(sc); 401 return(error); 402 } 403 404 /* Create the control device. */ 405 sc->ctrl_dev = make_dev(&twa_ops, device_get_unit(sc->bus_dev), 406 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 407 "twa%d", device_get_unit(sc->bus_dev)); 408 sc->ctrl_dev->si_drv1 = sc; 409 410 if ((error = tw_osli_cam_attach(sc))) { 411 tw_osli_free_resources(sc); 412 tw_osli_printf(sc, "error = %d", 413 TW_CL_SEVERITY_ERROR_STRING, 414 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 415 0x2007, 416 "Failed to initialize CAM", 417 error); 418 return(error); 419 } 420 421 sc->watchdog_index = 0; 422 callout_init_mp(&(sc->watchdog_callout[0])); 423 callout_init_mp(&(sc->watchdog_callout[1])); 424 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); 425 426 return(0); 427 } 428 429 430 static TW_VOID 431 twa_watchdog(TW_VOID *arg) 432 { 433 struct tw_cl_ctlr_handle *ctlr_handle = 434 (struct tw_cl_ctlr_handle *)arg; 435 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; 436 int i; 437 int i_need_a_reset = 0; 438 int driver_is_active = 0; 439 TW_UINT64 current_time; 440 struct tw_osli_req_context *my_req; 441 442 443 //============================================================================== 444 current_time = (TW_UINT64) (tw_osl_get_local_time()); 445 446 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 447 my_req = &(sc->req_ctx_buf[i]); 448 449 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) && 450 (my_req->deadline) && 451 (my_req->deadline < current_time)) { 452 tw_cl_set_reset_needed(ctlr_handle); 453 #ifdef TW_OSL_DEBUG 454 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time); 455 #else /* TW_OSL_DEBUG */ 456 device_printf((sc)->bus_dev, "Request %d timed out!\n", i); 457 #endif /* TW_OSL_DEBUG */ 458 break; 459 } 460 } 461 //============================================================================== 462 463 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle); 464 465 i = (int) ((sc->watchdog_index++) & 1); 466 467 driver_is_active = tw_cl_is_active(ctlr_handle); 468 469 if (i_need_a_reset) { 470 #ifdef TW_OSL_DEBUG 471 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n"); 472 #endif /* TW_OSL_DEBUG */ 473 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle); 474 tw_cl_reset_ctlr(ctlr_handle); 475 #ifdef TW_OSL_DEBUG 476 device_printf((sc)->bus_dev, "Watchdog reset completed!\n"); 477 #endif /* TW_OSL_DEBUG */ 478 } else if (driver_is_active) { 479 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle); 480 } 481 #ifdef TW_OSL_DEBUG 482 if (i_need_a_reset) 483 device_printf((sc)->bus_dev, "i_need_a_reset = %d, " 484 "driver_is_active = %d\n", 485 i_need_a_reset, driver_is_active); 486 #endif /* TW_OSL_DEBUG */ 487 } 488 489 490 /* 491 * Function name: tw_osli_alloc_mem 492 * Description: Allocates memory needed both by CL and OSL. 493 * 494 * Input: sc -- OSL internal controller context 495 * Output: None 496 * Return value: 0 -- success 497 * non-zero-- failure 498 */ 499 static TW_INT32 500 tw_osli_alloc_mem(struct twa_softc *sc) 501 { 502 struct tw_osli_req_context *req; 503 TW_UINT32 max_sg_elements; 504 TW_UINT32 non_dma_mem_size; 505 TW_UINT32 dma_mem_size; 506 TW_INT32 error; 507 TW_INT32 i; 508 509 tw_osli_dbg_dprintf(3, sc, "entered"); 510 511 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0; 512 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0; 513 514 max_sg_elements = (sizeof(bus_addr_t) == 8) ? 515 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS; 516 517 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags, 518 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 519 &(sc->alignment), &(sc->sg_size_factor), 520 &non_dma_mem_size, &dma_mem_size 521 ))) { 522 tw_osli_printf(sc, "error = %d", 523 TW_CL_SEVERITY_ERROR_STRING, 524 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 525 0x2008, 526 "Can't get Common Layer's memory requirements", 527 error); 528 return(error); 529 } 530 531 sc->non_dma_mem = kmalloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS, 532 M_WAITOK); 533 534 /* Create the parent dma tag. */ 535 if (bus_dma_tag_create(NULL, /* parent */ 536 sc->alignment, /* alignment */ 537 TW_OSLI_DMA_BOUNDARY, /* boundary */ 538 BUS_SPACE_MAXADDR, /* lowaddr */ 539 BUS_SPACE_MAXADDR, /* highaddr */ 540 NULL, NULL, /* filter, filterarg */ 541 TW_CL_MAX_IO_SIZE, /* maxsize */ 542 max_sg_elements, /* nsegments */ 543 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 544 0, /* flags */ 545 &sc->parent_tag /* tag */)) { 546 tw_osli_printf(sc, "error = %d", 547 TW_CL_SEVERITY_ERROR_STRING, 548 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 549 0x200A, 550 "Can't allocate parent DMA tag", 551 ENOMEM); 552 return(ENOMEM); 553 } 554 555 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */ 556 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 557 sc->alignment, /* alignment */ 558 0, /* boundary */ 559 BUS_SPACE_MAXADDR, /* lowaddr */ 560 BUS_SPACE_MAXADDR, /* highaddr */ 561 NULL, NULL, /* filter, filterarg */ 562 dma_mem_size, /* maxsize */ 563 1, /* nsegments */ 564 BUS_SPACE_MAXSIZE, /* maxsegsize */ 565 0, /* flags */ 566 &sc->cmd_tag /* tag */)) { 567 tw_osli_printf(sc, "error = %d", 568 TW_CL_SEVERITY_ERROR_STRING, 569 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 570 0x200B, 571 "Can't allocate DMA tag for Common Layer's " 572 "DMA'able memory", 573 ENOMEM); 574 return(ENOMEM); 575 } 576 577 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 578 BUS_DMA_NOWAIT, &sc->cmd_map)) { 579 /* Try a second time. */ 580 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 581 BUS_DMA_NOWAIT, &sc->cmd_map)) { 582 tw_osli_printf(sc, "error = %d", 583 TW_CL_SEVERITY_ERROR_STRING, 584 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 585 0x200C, 586 "Can't allocate DMA'able memory for the" 587 "Common Layer", 588 ENOMEM); 589 return(ENOMEM); 590 } 591 } 592 593 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, 594 dma_mem_size, twa_map_load_callback, 595 &sc->dma_mem_phys, 0); 596 597 /* 598 * Create a dma tag for data buffers; size will be the maximum 599 * possible I/O size (128kB). 600 */ 601 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 602 sc->alignment, /* alignment */ 603 0, /* boundary */ 604 BUS_SPACE_MAXADDR, /* lowaddr */ 605 BUS_SPACE_MAXADDR, /* highaddr */ 606 NULL, NULL, /* filter, filterarg */ 607 TW_CL_MAX_IO_SIZE, /* maxsize */ 608 max_sg_elements, /* nsegments */ 609 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 610 BUS_DMA_ALLOCNOW, /* flags */ 611 &sc->dma_tag /* tag */)) { 612 tw_osli_printf(sc, "error = %d", 613 TW_CL_SEVERITY_ERROR_STRING, 614 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 615 0x200F, 616 "Can't allocate DMA tag for data buffers", 617 ENOMEM); 618 return(ENOMEM); 619 } 620 621 /* 622 * Create a dma tag for ioctl data buffers; size will be the maximum 623 * possible I/O size (128kB). 624 */ 625 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 626 sc->alignment, /* alignment */ 627 0, /* boundary */ 628 BUS_SPACE_MAXADDR, /* lowaddr */ 629 BUS_SPACE_MAXADDR, /* highaddr */ 630 NULL, NULL, /* filter, filterarg */ 631 TW_CL_MAX_IO_SIZE, /* maxsize */ 632 max_sg_elements, /* nsegments */ 633 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 634 BUS_DMA_ALLOCNOW, /* flags */ 635 &sc->ioctl_tag /* tag */)) { 636 tw_osli_printf(sc, "error = %d", 637 TW_CL_SEVERITY_ERROR_STRING, 638 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 639 0x2010, 640 "Can't allocate DMA tag for ioctl data buffers", 641 ENOMEM); 642 return(ENOMEM); 643 } 644 645 /* Create just one map for all ioctl request data buffers. */ 646 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) { 647 tw_osli_printf(sc, "error = %d", 648 TW_CL_SEVERITY_ERROR_STRING, 649 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 650 0x2011, 651 "Can't create ioctl map", 652 ENOMEM); 653 return(ENOMEM); 654 } 655 656 657 /* Initialize request queues. */ 658 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q); 659 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q); 660 661 sc->req_ctx_buf = kmalloc((sizeof(struct tw_osli_req_context) * 662 TW_OSLI_MAX_NUM_REQUESTS), TW_OSLI_MALLOC_CLASS, 663 M_WAITOK | M_ZERO); 664 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 665 req = &(sc->req_ctx_buf[i]); 666 req->ctlr = sc; 667 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) { 668 tw_osli_printf(sc, "request # = %d, error = %d", 669 TW_CL_SEVERITY_ERROR_STRING, 670 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 671 0x2013, 672 "Can't create dma map", 673 i, ENOMEM); 674 return(ENOMEM); 675 } 676 677 /* Initialize the ioctl wakeup/ timeout mutex */ 678 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle); 679 lockinit(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", 0, 0); 680 681 /* Insert request into the free queue. */ 682 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 683 } 684 685 return(0); 686 } 687 688 689 690 /* 691 * Function name: tw_osli_free_resources 692 * Description: Performs clean-up at the time of going down. 693 * 694 * Input: sc -- ptr to OSL internal ctlr context 695 * Output: None 696 * Return value: None 697 */ 698 static TW_VOID 699 tw_osli_free_resources(struct twa_softc *sc) 700 { 701 struct tw_osli_req_context *req; 702 TW_INT32 error = 0; 703 704 tw_osli_dbg_dprintf(3, sc, "entered"); 705 706 /* Detach from CAM */ 707 tw_osli_cam_detach(sc); 708 709 if (sc->req_ctx_buf) 710 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) != 711 NULL) { 712 lockuninit(req->ioctl_wake_timeout_lock); 713 714 if ((error = bus_dmamap_destroy(sc->dma_tag, 715 req->dma_map))) 716 tw_osli_dbg_dprintf(1, sc, 717 "dmamap_destroy(dma) returned %d", 718 error); 719 } 720 721 if ((sc->ioctl_tag) && (sc->ioctl_map)) 722 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map))) 723 tw_osli_dbg_dprintf(1, sc, 724 "dmamap_destroy(ioctl) returned %d", error); 725 726 /* Free all memory allocated so far. */ 727 if (sc->req_ctx_buf) 728 kfree(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS); 729 730 if (sc->non_dma_mem) 731 kfree(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS); 732 733 if (sc->dma_mem) { 734 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); 735 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, 736 sc->cmd_map); 737 } 738 if (sc->cmd_tag) 739 if ((error = bus_dma_tag_destroy(sc->cmd_tag))) 740 tw_osli_dbg_dprintf(1, sc, 741 "dma_tag_destroy(cmd) returned %d", error); 742 743 if (sc->dma_tag) 744 if ((error = bus_dma_tag_destroy(sc->dma_tag))) 745 tw_osli_dbg_dprintf(1, sc, 746 "dma_tag_destroy(dma) returned %d", error); 747 748 if (sc->ioctl_tag) 749 if ((error = bus_dma_tag_destroy(sc->ioctl_tag))) 750 tw_osli_dbg_dprintf(1, sc, 751 "dma_tag_destroy(ioctl) returned %d", error); 752 753 if (sc->parent_tag) 754 if ((error = bus_dma_tag_destroy(sc->parent_tag))) 755 tw_osli_dbg_dprintf(1, sc, 756 "dma_tag_destroy(parent) returned %d", error); 757 758 759 /* Disconnect the interrupt handler. */ 760 if ((error = twa_teardown_intr(sc))) 761 tw_osli_dbg_dprintf(1, sc, 762 "teardown_intr returned %d", error); 763 764 if (sc->irq_res != NULL) 765 if ((error = bus_release_resource(sc->bus_dev, 766 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))) 767 tw_osli_dbg_dprintf(1, sc, 768 "release_resource(irq) returned %d", error); 769 770 if (sc->irq_type == PCI_INTR_TYPE_MSI) 771 pci_release_msi(sc->bus_dev); 772 773 /* Release the register window mapping. */ 774 if (sc->reg_res != NULL) 775 if ((error = bus_release_resource(sc->bus_dev, 776 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))) 777 tw_osli_dbg_dprintf(1, sc, 778 "release_resource(io) returned %d", error); 779 780 /* Destroy the control device. */ 781 if (sc->ctrl_dev != NULL) 782 destroy_dev(sc->ctrl_dev); 783 dev_ops_remove_minor(&twa_ops, device_get_unit(sc->bus_dev)); 784 } 785 786 787 788 /* 789 * Function name: twa_detach 790 * Description: Called when the controller is being detached from 791 * the pci bus. 792 * 793 * Input: dev -- bus device corresponding to the ctlr 794 * Output: None 795 * Return value: 0 -- success 796 * non-zero-- failure 797 */ 798 static TW_INT32 799 twa_detach(device_t dev) 800 { 801 struct twa_softc *sc = device_get_softc(dev); 802 TW_INT32 error; 803 804 tw_osli_dbg_dprintf(3, sc, "entered"); 805 806 error = EBUSY; 807 if (sc->open) { 808 tw_osli_printf(sc, "error = %d", 809 TW_CL_SEVERITY_ERROR_STRING, 810 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 811 0x2014, 812 "Device open", 813 error); 814 goto out; 815 } 816 817 /* Shut the controller down. */ 818 if ((error = twa_shutdown(dev))) 819 goto out; 820 821 /* Free all resources associated with this controller. */ 822 tw_osli_free_resources(sc); 823 error = 0; 824 825 out: 826 return(error); 827 } 828 829 830 831 /* 832 * Function name: twa_shutdown 833 * Description: Called at unload/shutdown time. Lets the controller 834 * know that we are going down. 835 * 836 * Input: dev -- bus device corresponding to the ctlr 837 * Output: None 838 * Return value: 0 -- success 839 * non-zero-- failure 840 */ 841 static TW_INT32 842 twa_shutdown(device_t dev) 843 { 844 struct twa_softc *sc = device_get_softc(dev); 845 TW_INT32 error = 0; 846 847 tw_osli_dbg_dprintf(3, sc, "entered"); 848 849 /* Disconnect interrupts. */ 850 error = twa_teardown_intr(sc); 851 852 /* Stop watchdog task. */ 853 callout_stop_sync(&(sc->watchdog_callout[0])); 854 callout_stop_sync(&(sc->watchdog_callout[1])); 855 856 /* Disconnect from the controller. */ 857 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) { 858 tw_osli_printf(sc, "error = %d", 859 TW_CL_SEVERITY_ERROR_STRING, 860 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 861 0x2015, 862 "Failed to shutdown Common Layer/controller", 863 error); 864 } 865 return(error); 866 } 867 868 869 870 /* 871 * Function name: twa_pci_intr 872 * Description: Interrupt handler. Wrapper for twa_interrupt. 873 * 874 * Input: arg -- ptr to OSL internal ctlr context 875 * Output: None 876 * Return value: None 877 */ 878 static TW_VOID 879 twa_pci_intr(TW_VOID *arg) 880 { 881 struct twa_softc *sc = (struct twa_softc *)arg; 882 883 tw_osli_dbg_dprintf(10, sc, "entered"); 884 tw_cl_interrupt(&(sc->ctlr_handle)); 885 } 886 887 888 /* 889 * Function name: tw_osli_fw_passthru 890 * Description: Builds a fw passthru cmd pkt, and submits it to CL. 891 * 892 * Input: sc -- ptr to OSL internal ctlr context 893 * buf -- ptr to ioctl pkt understood by CL 894 * Output: None 895 * Return value: 0 -- success 896 * non-zero-- failure 897 */ 898 TW_INT32 899 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf) 900 { 901 struct tw_osli_req_context *req; 902 struct tw_osli_ioctl_no_data_buf *user_buf = 903 (struct tw_osli_ioctl_no_data_buf *)buf; 904 TW_TIME end_time; 905 TW_UINT32 timeout = 60; 906 TW_UINT32 data_buf_size_adjusted; 907 struct tw_cl_req_packet *req_pkt; 908 struct tw_cl_passthru_req_packet *pt_req; 909 TW_INT32 error; 910 911 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru"); 912 913 if ((req = tw_osli_get_request(sc)) == NULL) 914 return(EBUSY); 915 916 req->req_handle.osl_req_ctxt = req; 917 req->orig_req = buf; 918 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU; 919 920 req_pkt = &(req->req_pkt); 921 req_pkt->status = 0; 922 req_pkt->tw_osl_callback = tw_osl_complete_passthru; 923 /* Let the Common Layer retry the request on cmd queue full. */ 924 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY; 925 926 pt_req = &(req_pkt->gen_req_pkt.pt_req); 927 /* 928 * Make sure that the data buffer sent to firmware is a 929 * 512 byte multiple in size. 930 */ 931 data_buf_size_adjusted = roundup2(user_buf->driver_pkt.buffer_length, 932 sc->sg_size_factor); 933 if ((req->length = data_buf_size_adjusted)) { 934 req->data = kmalloc(data_buf_size_adjusted, 935 TW_OSLI_MALLOC_CLASS, M_WAITOK); 936 /* Copy the payload. */ 937 if ((error = copyin((TW_VOID *)(user_buf->pdata), 938 req->data, 939 user_buf->driver_pkt.buffer_length)) != 0) { 940 tw_osli_printf(sc, "error = %d", 941 TW_CL_SEVERITY_ERROR_STRING, 942 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 943 0x2017, 944 "Could not copyin fw_passthru data_buf", 945 error); 946 goto fw_passthru_err; 947 } 948 pt_req->sgl_entries = 1; /* will be updated during mapping */ 949 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN | 950 TW_OSLI_REQ_FLAGS_DATA_OUT); 951 } else 952 pt_req->sgl_entries = 0; /* no payload */ 953 954 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt)); 955 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet); 956 957 if ((error = tw_osli_map_request(req))) 958 goto fw_passthru_err; 959 960 end_time = tw_osl_get_local_time() + timeout; 961 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) { 962 lockmgr(req->ioctl_wake_timeout_lock, LK_EXCLUSIVE); 963 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING; 964 965 error = lksleep(req, req->ioctl_wake_timeout_lock, 0, 966 "twa_passthru", timeout*hz); 967 lockmgr(req->ioctl_wake_timeout_lock, LK_RELEASE); 968 969 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING)) 970 error = 0; 971 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 972 973 if (! error) { 974 if (((error = req->error_code)) || 975 ((error = (req->state != 976 TW_OSLI_REQ_STATE_COMPLETE))) || 977 ((error = req_pkt->status))) 978 goto fw_passthru_err; 979 break; 980 } 981 982 if (req_pkt->status) { 983 error = req_pkt->status; 984 goto fw_passthru_err; 985 } 986 987 if (error == EWOULDBLOCK) { 988 /* Time out! */ 989 if ((!(req->error_code)) && 990 (req->state == TW_OSLI_REQ_STATE_COMPLETE) && 991 (!(req_pkt->status)) ) { 992 #ifdef TW_OSL_DEBUG 993 tw_osli_printf(sc, "request = %p", 994 TW_CL_SEVERITY_ERROR_STRING, 995 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 996 0x7777, 997 "FALSE Passthru timeout!", 998 req); 999 #endif /* TW_OSL_DEBUG */ 1000 error = 0; /* False error */ 1001 break; 1002 } 1003 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { 1004 #ifdef TW_OSL_DEBUG 1005 tw_osli_printf(sc, "request = %p", 1006 TW_CL_SEVERITY_ERROR_STRING, 1007 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1008 0x2018, 1009 "Passthru request timed out!", 1010 req); 1011 #else /* TW_OSL_DEBUG */ 1012 device_printf((sc)->bus_dev, "Passthru request timed out!\n"); 1013 #endif /* TW_OSL_DEBUG */ 1014 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle)); 1015 } 1016 1017 error = 0; 1018 end_time = tw_osl_get_local_time() + timeout; 1019 continue; 1020 /* 1021 * Don't touch req after a reset. It (and any 1022 * associated data) will be 1023 * unmapped by the callback. 1024 */ 1025 } 1026 /* 1027 * Either the request got completed, or we were woken up by a 1028 * signal. Calculate the new timeout, in case it was the latter. 1029 */ 1030 timeout = (end_time - tw_osl_get_local_time()); 1031 } /* End of while loop */ 1032 1033 /* If there was a payload, copy it back. */ 1034 if ((!error) && (req->length)) 1035 if ((error = copyout(req->data, user_buf->pdata, 1036 user_buf->driver_pkt.buffer_length))) 1037 tw_osli_printf(sc, "error = %d", 1038 TW_CL_SEVERITY_ERROR_STRING, 1039 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1040 0x2019, 1041 "Could not copyout fw_passthru data_buf", 1042 error); 1043 1044 fw_passthru_err: 1045 1046 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1047 error = EBUSY; 1048 1049 user_buf->driver_pkt.os_status = error; 1050 /* Free resources. */ 1051 if (req->data) 1052 kfree(req->data, TW_OSLI_MALLOC_CLASS); 1053 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1054 return(error); 1055 } 1056 1057 1058 1059 /* 1060 * Function name: tw_osl_complete_passthru 1061 * Description: Called to complete passthru requests. 1062 * 1063 * Input: req_handle -- ptr to request handle 1064 * Output: None 1065 * Return value: None 1066 */ 1067 TW_VOID 1068 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle) 1069 { 1070 struct tw_osli_req_context *req = req_handle->osl_req_ctxt; 1071 struct tw_cl_req_packet *req_pkt = 1072 (struct tw_cl_req_packet *)(&req->req_pkt); 1073 struct twa_softc *sc = req->ctlr; 1074 1075 tw_osli_dbg_dprintf(5, sc, "entered"); 1076 1077 if (req->state != TW_OSLI_REQ_STATE_BUSY) { 1078 tw_osli_printf(sc, "request = %p, status = %d", 1079 TW_CL_SEVERITY_ERROR_STRING, 1080 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1081 0x201B, 1082 "Unposted command completed!!", 1083 req, req->state); 1084 } 1085 1086 /* 1087 * Remove request from the busy queue. Just mark it complete. 1088 * There's no need to move it into the complete queue as we are 1089 * going to be done with it right now. 1090 */ 1091 req->state = TW_OSLI_REQ_STATE_COMPLETE; 1092 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); 1093 1094 tw_osli_unmap_request(req); 1095 1096 /* 1097 * Don't do a wake up if there was an error even before the request 1098 * was sent down to the Common Layer, and we hadn't gotten an 1099 * EINPROGRESS. The request originator will then be returned an 1100 * error, and he can do the clean-up. 1101 */ 1102 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS))) 1103 return; 1104 1105 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1106 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) { 1107 /* Wake up the sleeping command originator. */ 1108 tw_osli_dbg_dprintf(5, sc, 1109 "Waking up originator of request %p", req); 1110 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 1111 wakeup_one(req); 1112 } else { 1113 /* 1114 * If the request completed even before mtx_sleep 1115 * was called, simply return. 1116 */ 1117 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED) 1118 return; 1119 1120 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1121 return; 1122 1123 tw_osli_printf(sc, "request = %p", 1124 TW_CL_SEVERITY_ERROR_STRING, 1125 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1126 0x201C, 1127 "Passthru callback called, " 1128 "and caller not sleeping", 1129 req); 1130 } 1131 } else { 1132 tw_osli_printf(sc, "request = %p", 1133 TW_CL_SEVERITY_ERROR_STRING, 1134 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1135 0x201D, 1136 "Passthru callback called for non-passthru request", 1137 req); 1138 } 1139 } 1140 1141 1142 1143 /* 1144 * Function name: tw_osli_get_request 1145 * Description: Gets a request pkt from the free queue. 1146 * 1147 * Input: sc -- ptr to OSL internal ctlr context 1148 * Output: None 1149 * Return value: ptr to request pkt -- success 1150 * NULL -- failure 1151 */ 1152 struct tw_osli_req_context * 1153 tw_osli_get_request(struct twa_softc *sc) 1154 { 1155 struct tw_osli_req_context *req; 1156 1157 tw_osli_dbg_dprintf(4, sc, "entered"); 1158 1159 /* Get a free request packet. */ 1160 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q); 1161 1162 /* Initialize some fields to their defaults. */ 1163 if (req) { 1164 req->req_handle.osl_req_ctxt = NULL; 1165 req->req_handle.cl_req_ctxt = NULL; 1166 req->req_handle.is_io = 0; 1167 req->data = NULL; 1168 req->length = 0; 1169 req->deadline = 0; 1170 req->real_data = NULL; 1171 req->real_length = 0; 1172 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */ 1173 req->flags = 0; 1174 req->error_code = 0; 1175 req->orig_req = NULL; 1176 1177 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet)); 1178 1179 } 1180 return(req); 1181 } 1182 1183 1184 1185 /* 1186 * Function name: twa_map_load_data_callback 1187 * Description: Callback of bus_dmamap_load for the buffer associated 1188 * with data. Updates the cmd pkt (size/sgl_entries 1189 * fields, as applicable) to reflect the number of sg 1190 * elements. 1191 * 1192 * Input: arg -- ptr to OSL internal request context 1193 * segs -- ptr to a list of segment descriptors 1194 * nsegments--# of segments 1195 * error -- 0 if no errors encountered before callback, 1196 * non-zero if errors were encountered 1197 * Output: None 1198 * Return value: None 1199 */ 1200 static TW_VOID 1201 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1202 TW_INT32 nsegments, TW_INT32 error) 1203 { 1204 struct tw_osli_req_context *req = 1205 (struct tw_osli_req_context *)arg; 1206 struct twa_softc *sc = req->ctlr; 1207 struct tw_cl_req_packet *req_pkt = &(req->req_pkt); 1208 1209 tw_osli_dbg_dprintf(10, sc, "entered"); 1210 1211 if (error == EINVAL) { 1212 req->error_code = error; 1213 return; 1214 } 1215 1216 /* Mark the request as currently being processed. */ 1217 req->state = TW_OSLI_REQ_STATE_BUSY; 1218 /* Move the request into the busy queue. */ 1219 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); 1220 1221 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED; 1222 1223 if (error == EFBIG) { 1224 req->error_code = error; 1225 goto out; 1226 } 1227 1228 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1229 struct tw_cl_passthru_req_packet *pt_req; 1230 1231 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1232 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1233 BUS_DMASYNC_PREREAD); 1234 1235 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1236 /* 1237 * If we're using an alignment buffer, and we're 1238 * writing data, copy the real data out. 1239 */ 1240 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1241 bcopy(req->real_data, req->data, req->real_length); 1242 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1243 BUS_DMASYNC_PREWRITE); 1244 } 1245 1246 pt_req = &(req_pkt->gen_req_pkt.pt_req); 1247 pt_req->sg_list = (TW_UINT8 *)segs; 1248 pt_req->sgl_entries += (nsegments - 1); 1249 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt, 1250 &(req->req_handle)); 1251 } else { 1252 struct tw_cl_scsi_req_packet *scsi_req; 1253 1254 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1255 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1256 BUS_DMASYNC_PREREAD); 1257 1258 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1259 /* 1260 * If we're using an alignment buffer, and we're 1261 * writing data, copy the real data out. 1262 */ 1263 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1264 bcopy(req->real_data, req->data, req->real_length); 1265 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1266 BUS_DMASYNC_PREWRITE); 1267 } 1268 1269 scsi_req = &(req_pkt->gen_req_pkt.scsi_req); 1270 scsi_req->sg_list = (TW_UINT8 *)segs; 1271 scsi_req->sgl_entries += (nsegments - 1); 1272 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt, 1273 &(req->req_handle)); 1274 } 1275 1276 out: 1277 if (error) { 1278 req->error_code = error; 1279 req_pkt->tw_osl_callback(&(req->req_handle)); 1280 /* 1281 * If the caller had been returned EINPROGRESS, and he has 1282 * registered a callback for handling completion, the callback 1283 * will never get called because we were unable to submit the 1284 * request. So, free up the request right here. 1285 */ 1286 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS) 1287 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1288 } 1289 } 1290 1291 1292 1293 /* 1294 * Function name: twa_map_load_callback 1295 * Description: Callback of bus_dmamap_load for the buffer associated 1296 * with a cmd pkt. 1297 * 1298 * Input: arg -- ptr to variable to hold phys addr 1299 * segs -- ptr to a list of segment descriptors 1300 * nsegments--# of segments 1301 * error -- 0 if no errors encountered before callback, 1302 * non-zero if errors were encountered 1303 * Output: None 1304 * Return value: None 1305 */ 1306 static TW_VOID 1307 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1308 TW_INT32 nsegments, TW_INT32 error) 1309 { 1310 *((bus_addr_t *)arg) = segs[0].ds_addr; 1311 } 1312 1313 1314 1315 /* 1316 * Function name: tw_osli_map_request 1317 * Description: Maps a cmd pkt and data associated with it, into 1318 * DMA'able memory. 1319 * 1320 * Input: req -- ptr to request pkt 1321 * Output: None 1322 * Return value: 0 -- success 1323 * non-zero-- failure 1324 */ 1325 TW_INT32 1326 tw_osli_map_request(struct tw_osli_req_context *req) 1327 { 1328 struct twa_softc *sc = req->ctlr; 1329 TW_INT32 error = 0; 1330 1331 tw_osli_dbg_dprintf(10, sc, "entered"); 1332 1333 /* If the command involves data, map that too. */ 1334 if (req->data != NULL) { 1335 /* 1336 * It's sufficient for the data pointer to be 4-byte aligned 1337 * to work with 9000. However, if 4-byte aligned addresses 1338 * are passed to bus_dmamap_load, we can get back sg elements 1339 * that are not 512-byte multiples in size. So, we will let 1340 * only those buffers that are 512-byte aligned to pass 1341 * through, and bounce the rest, so as to make sure that we 1342 * always get back sg elements that are 512-byte multiples 1343 * in size. 1344 */ 1345 if (((vm_offset_t)req->data % sc->sg_size_factor) || 1346 (req->length % sc->sg_size_factor)) { 1347 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED; 1348 /* Save original data pointer and length. */ 1349 req->real_data = req->data; 1350 req->real_length = req->length; 1351 req->length = roundup2(req->length, sc->sg_size_factor); 1352 req->data = kmalloc(req->length, TW_OSLI_MALLOC_CLASS, 1353 M_NOWAIT); 1354 if (req->data == NULL) { 1355 tw_osli_printf(sc, "error = %d", 1356 TW_CL_SEVERITY_ERROR_STRING, 1357 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1358 0x201E, 1359 "Failed to allocate memory " 1360 "for bounce buffer", 1361 ENOMEM); 1362 /* Restore original data pointer and length. */ 1363 req->data = req->real_data; 1364 req->length = req->real_length; 1365 return(ENOMEM); 1366 } 1367 } 1368 1369 /* 1370 * Map the data buffer into bus space and build the SG list. 1371 */ 1372 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1373 /* Lock against multiple simultaneous ioctl calls. */ 1374 spin_lock(sc->io_lock); 1375 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map, 1376 req->data, req->length, 1377 twa_map_load_data_callback, req, 1378 BUS_DMA_WAITOK); 1379 spin_unlock(sc->io_lock); 1380 } else { 1381 /* 1382 * There's only one CAM I/O thread running at a time. 1383 * So, there's no need to hold the io_lock. 1384 */ 1385 error = bus_dmamap_load(sc->dma_tag, req->dma_map, 1386 req->data, req->length, 1387 twa_map_load_data_callback, req, 1388 BUS_DMA_WAITOK); 1389 } 1390 1391 if (!error) 1392 error = req->error_code; 1393 else { 1394 if (error == EINPROGRESS) { 1395 /* 1396 * Specifying sc->io_lock as the lockfuncarg 1397 * in ...tag_create should protect the access 1398 * of ...FLAGS_MAPPED from the callback. 1399 */ 1400 spin_lock(sc->io_lock); 1401 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) 1402 req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS; 1403 tw_osli_disallow_new_requests(sc, &(req->req_handle)); 1404 spin_unlock(sc->io_lock); 1405 error = 0; 1406 } else { 1407 tw_osli_printf(sc, "error = %d", 1408 TW_CL_SEVERITY_ERROR_STRING, 1409 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1410 0x9999, 1411 "Failed to map DMA memory " 1412 "for I/O request", 1413 error); 1414 req->flags |= TW_OSLI_REQ_FLAGS_FAILED; 1415 /* Free alignment buffer if it was used. */ 1416 if (req->flags & 1417 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { 1418 kfree(req->data, TW_OSLI_MALLOC_CLASS); 1419 /* 1420 * Restore original data pointer 1421 * and length. 1422 */ 1423 req->data = req->real_data; 1424 req->length = req->real_length; 1425 } 1426 } 1427 } 1428 1429 } else { 1430 /* Mark the request as currently being processed. */ 1431 req->state = TW_OSLI_REQ_STATE_BUSY; 1432 /* Move the request into the busy queue. */ 1433 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); 1434 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) 1435 error = tw_cl_fw_passthru(&sc->ctlr_handle, 1436 &(req->req_pkt), &(req->req_handle)); 1437 else 1438 error = tw_cl_start_io(&sc->ctlr_handle, 1439 &(req->req_pkt), &(req->req_handle)); 1440 if (error) { 1441 req->error_code = error; 1442 req->req_pkt.tw_osl_callback(&(req->req_handle)); 1443 } 1444 } 1445 return(error); 1446 } 1447 1448 1449 1450 /* 1451 * Function name: tw_osli_unmap_request 1452 * Description: Undoes the mapping done by tw_osli_map_request. 1453 * 1454 * Input: req -- ptr to request pkt 1455 * Output: None 1456 * Return value: None 1457 */ 1458 TW_VOID 1459 tw_osli_unmap_request(struct tw_osli_req_context *req) 1460 { 1461 struct twa_softc *sc = req->ctlr; 1462 1463 tw_osli_dbg_dprintf(10, sc, "entered"); 1464 1465 /* If the command involved data, unmap that too. */ 1466 if (req->data != NULL) { 1467 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1468 /* Lock against multiple simultaneous ioctl calls. */ 1469 spin_lock(sc->io_lock); 1470 1471 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { 1472 bus_dmamap_sync(sc->ioctl_tag, 1473 sc->ioctl_map, BUS_DMASYNC_POSTREAD); 1474 1475 /* 1476 * If we are using a bounce buffer, and we are 1477 * reading data, copy the real data in. 1478 */ 1479 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1480 bcopy(req->data, req->real_data, 1481 req->real_length); 1482 } 1483 1484 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) 1485 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1486 BUS_DMASYNC_POSTWRITE); 1487 1488 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map); 1489 1490 spin_unlock(sc->io_lock); 1491 } else { 1492 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { 1493 bus_dmamap_sync(sc->dma_tag, 1494 req->dma_map, BUS_DMASYNC_POSTREAD); 1495 1496 /* 1497 * If we are using a bounce buffer, and we are 1498 * reading data, copy the real data in. 1499 */ 1500 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1501 bcopy(req->data, req->real_data, 1502 req->real_length); 1503 } 1504 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) 1505 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1506 BUS_DMASYNC_POSTWRITE); 1507 1508 bus_dmamap_unload(sc->dma_tag, req->dma_map); 1509 } 1510 } 1511 1512 /* Free alignment buffer if it was used. */ 1513 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { 1514 kfree(req->data, TW_OSLI_MALLOC_CLASS); 1515 /* Restore original data pointer and length. */ 1516 req->data = req->real_data; 1517 req->length = req->real_length; 1518 } 1519 } 1520 1521 1522 1523 #ifdef TW_OSL_DEBUG 1524 1525 TW_VOID twa_report_stats(TW_VOID); 1526 TW_VOID twa_reset_stats(TW_VOID); 1527 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc); 1528 TW_VOID twa_print_req_info(struct tw_osli_req_context *req); 1529 1530 1531 /* 1532 * Function name: twa_report_stats 1533 * Description: For being called from ddb. Calls functions that print 1534 * OSL and CL internal stats for the controller. 1535 * 1536 * Input: None 1537 * Output: None 1538 * Return value: None 1539 */ 1540 TW_VOID 1541 twa_report_stats(TW_VOID) 1542 { 1543 struct twa_softc *sc; 1544 TW_INT32 i; 1545 1546 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { 1547 tw_osli_print_ctlr_stats(sc); 1548 tw_cl_print_ctlr_stats(&sc->ctlr_handle); 1549 } 1550 } 1551 1552 1553 1554 /* 1555 * Function name: tw_osli_print_ctlr_stats 1556 * Description: For being called from ddb. Prints OSL controller stats 1557 * 1558 * Input: sc -- ptr to OSL internal controller context 1559 * Output: None 1560 * Return value: None 1561 */ 1562 TW_VOID 1563 tw_osli_print_ctlr_stats(struct twa_softc *sc) 1564 { 1565 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc); 1566 twa_printf(sc, "OSLq type current max\n"); 1567 twa_printf(sc, "free %04d %04d\n", 1568 sc->q_stats[TW_OSLI_FREE_Q].cur_len, 1569 sc->q_stats[TW_OSLI_FREE_Q].max_len); 1570 twa_printf(sc, "busy %04d %04d\n", 1571 sc->q_stats[TW_OSLI_BUSY_Q].cur_len, 1572 sc->q_stats[TW_OSLI_BUSY_Q].max_len); 1573 } 1574 1575 1576 1577 /* 1578 * Function name: twa_print_req_info 1579 * Description: For being called from ddb. Calls functions that print 1580 * OSL and CL internal details for the request. 1581 * 1582 * Input: req -- ptr to OSL internal request context 1583 * Output: None 1584 * Return value: None 1585 */ 1586 TW_VOID 1587 twa_print_req_info(struct tw_osli_req_context *req) 1588 { 1589 struct twa_softc *sc = req->ctlr; 1590 1591 twa_printf(sc, "OSL details for request:\n"); 1592 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n" 1593 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n" 1594 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n" 1595 "next_req = %p, prev_req = %p, dma_map = %p\n", 1596 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt, 1597 req->data, req->length, req->real_data, req->real_length, 1598 req->state, req->flags, req->error_code, req->orig_req, 1599 req->link.next, req->link.prev, req->dma_map); 1600 tw_cl_print_req_info(&(req->req_handle)); 1601 } 1602 1603 1604 1605 /* 1606 * Function name: twa_reset_stats 1607 * Description: For being called from ddb. 1608 * Resets some OSL controller stats. 1609 * 1610 * Input: None 1611 * Output: None 1612 * Return value: None 1613 */ 1614 TW_VOID 1615 twa_reset_stats(TW_VOID) 1616 { 1617 struct twa_softc *sc; 1618 TW_INT32 i; 1619 1620 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { 1621 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0; 1622 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0; 1623 tw_cl_reset_stats(&sc->ctlr_handle); 1624 } 1625 } 1626 1627 #endif /* TW_OSL_DEBUG */ 1628