1 /* 2 * Copyright (c) 2004-07 Applied Micro Circuits Corporation. 3 * Copyright (c) 2004-05 Vinod Kashyap. 4 * Copyright (c) 2000 Michael Smith 5 * Copyright (c) 2000 BSDi 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: head/sys/dev/twa/tw_osl_freebsd.c 254263 2013-08-12 23:30:01Z scottl $ 30 */ 31 32 /* 33 * AMCC'S 3ware driver for 9000 series storage controllers. 34 * 35 * Author: Vinod Kashyap 36 * Modifications by: Adam Radford 37 * Modifications by: Manjunath Ranganathaiah 38 */ 39 40 41 /* 42 * FreeBSD specific functions not related to CAM, and other 43 * miscellaneous functions. 44 */ 45 46 47 #include <dev/raid/twa/tw_osl_includes.h> 48 #include <dev/raid/twa/tw_cl_fwif.h> 49 #include <dev/raid/twa/tw_cl_ioctl.h> 50 #include <dev/raid/twa/tw_osl_ioctl.h> 51 52 #ifdef TW_OSL_DEBUG 53 TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG; 54 TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG; 55 #endif /* TW_OSL_DEBUG */ 56 57 static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands"); 58 59 60 static d_open_t twa_open; 61 static d_close_t twa_close; 62 static d_ioctl_t twa_ioctl; 63 64 static struct dev_ops twa_ops = { 65 { "twa", 0, D_MPSAFE }, 66 .d_open = twa_open, 67 .d_close = twa_close, 68 .d_ioctl = twa_ioctl, 69 }; 70 71 static devclass_t twa_devclass; 72 73 static int twa_msi_enable = 0; 74 TUNABLE_INT("hw.twa.msi.enable", &twa_msi_enable); 75 76 77 /* 78 * Function name: twa_open 79 * Description: Called when the controller is opened. 80 * Simply marks the controller as open. 81 * 82 * Input: dev -- control device corresponding to the ctlr 83 * flags -- mode of open 84 * fmt -- device type (character/block etc.) 85 * proc -- current process 86 * Output: None 87 * Return value: 0 -- success 88 * non-zero-- failure 89 */ 90 static TW_INT32 91 twa_open(struct dev_open_args *ap) 92 { 93 cdev_t dev = ap->a_head.a_dev; 94 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 95 96 tw_osli_dbg_dprintf(5, sc, "entered"); 97 sc->open = TW_CL_TRUE; 98 return(0); 99 } 100 101 102 103 /* 104 * Function name: twa_close 105 * Description: Called when the controller is closed. 106 * Simply marks the controller as not open. 107 * 108 * Input: dev -- control device corresponding to the ctlr 109 * flags -- mode of corresponding open 110 * fmt -- device type (character/block etc.) 111 * proc -- current process 112 * Output: None 113 * Return value: 0 -- success 114 * non-zero-- failure 115 */ 116 static TW_INT32 117 twa_close(struct dev_close_args *ap) 118 { 119 cdev_t dev = ap->a_head.a_dev; 120 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 121 122 tw_osli_dbg_dprintf(5, sc, "entered"); 123 sc->open = TW_CL_FALSE; 124 return(0); 125 } 126 127 128 129 /* 130 * Function name: twa_ioctl 131 * Description: Called when an ioctl is posted to the controller. 132 * Handles any OS Layer specific cmds, passes the rest 133 * on to the Common Layer. 134 * 135 * Input: dev -- control device corresponding to the ctlr 136 * cmd -- ioctl cmd 137 * buf -- ptr to buffer in kernel memory, which is 138 * a copy of the input buffer in user-space 139 * flags -- mode of corresponding open 140 * proc -- current process 141 * Output: buf -- ptr to buffer in kernel memory, which will 142 * be copied to the output buffer in user-space 143 * Return value: 0 -- success 144 * non-zero-- failure 145 */ 146 static TW_INT32 147 twa_ioctl(struct dev_ioctl_args *ap) 148 { 149 cdev_t dev = ap->a_head.a_dev; 150 u_long cmd = ap->a_cmd; 151 caddr_t buf = ap->a_data; 152 struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); 153 TW_INT32 error; 154 155 tw_osli_dbg_dprintf(5, sc, "entered"); 156 157 switch (cmd) { 158 case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH: 159 tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru"); 160 error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf); 161 break; 162 163 case TW_OSL_IOCTL_SCAN_BUS: 164 /* Request CAM for a bus scan. */ 165 tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus"); 166 error = tw_osli_request_bus_scan(sc); 167 break; 168 169 default: 170 tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd); 171 error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf); 172 break; 173 } 174 return(error); 175 } 176 177 178 179 static TW_INT32 twa_probe(device_t dev); 180 static TW_INT32 twa_attach(device_t dev); 181 static TW_INT32 twa_detach(device_t dev); 182 static TW_INT32 twa_shutdown(device_t dev); 183 static TW_VOID twa_pci_intr(TW_VOID *arg); 184 static TW_VOID twa_watchdog(TW_VOID *arg); 185 int twa_setup_intr(struct twa_softc *sc); 186 int twa_teardown_intr(struct twa_softc *sc); 187 188 static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc); 189 static TW_VOID tw_osli_free_resources(struct twa_softc *sc); 190 191 static TW_VOID twa_map_load_data_callback(TW_VOID *arg, 192 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 193 static TW_VOID twa_map_load_callback(TW_VOID *arg, 194 bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); 195 196 197 static device_method_t twa_methods[] = { 198 /* Device interface */ 199 DEVMETHOD(device_probe, twa_probe), 200 DEVMETHOD(device_attach, twa_attach), 201 DEVMETHOD(device_detach, twa_detach), 202 DEVMETHOD(device_shutdown, twa_shutdown), 203 204 DEVMETHOD_END 205 }; 206 207 static driver_t twa_pci_driver = { 208 "twa", 209 twa_methods, 210 sizeof(struct twa_softc) 211 }; 212 213 DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, NULL, NULL); 214 MODULE_DEPEND(twa, cam, 1, 1, 1); 215 MODULE_DEPEND(twa, pci, 1, 1, 1); 216 MODULE_VERSION(twa, 1); 217 218 219 /* 220 * Function name: twa_probe 221 * Description: Called at driver load time. Claims 9000 ctlrs. 222 * 223 * Input: dev -- bus device corresponding to the ctlr 224 * Output: None 225 * Return value: <= 0 -- success 226 * > 0 -- failure 227 */ 228 static TW_INT32 229 twa_probe(device_t dev) 230 { 231 static TW_UINT8 first_ctlr = 1; 232 233 tw_osli_dbg_printf(3, "entered"); 234 235 if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) { 236 device_set_desc(dev, TW_OSLI_DEVICE_NAME); 237 /* Print the driver version only once. */ 238 if (first_ctlr) { 239 kprintf("3ware device driver for 9000 series storage " 240 "controllers, version: %s\n", 241 TW_OSL_DRIVER_VERSION_STRING); 242 first_ctlr = 0; 243 } 244 return(0); 245 } 246 return(ENXIO); 247 } 248 249 int twa_setup_intr(struct twa_softc *sc) 250 { 251 int error = 0; 252 253 if (!(sc->intr_handle) && (sc->irq_res)) { 254 error = bus_setup_intr(sc->bus_dev, sc->irq_res, 255 INTR_MPSAFE, 256 twa_pci_intr, 257 sc, &sc->intr_handle, NULL); 258 } 259 return( error ); 260 } 261 262 263 int twa_teardown_intr(struct twa_softc *sc) 264 { 265 int error = 0; 266 267 if ((sc->intr_handle) && (sc->irq_res)) { 268 error = bus_teardown_intr(sc->bus_dev, 269 sc->irq_res, sc->intr_handle); 270 sc->intr_handle = NULL; 271 } 272 return( error ); 273 } 274 275 276 277 /* 278 * Function name: twa_attach 279 * Description: Allocates pci resources; updates sc; adds a node to the 280 * sysctl tree to expose the driver version; makes calls 281 * (to the Common Layer) to initialize ctlr, and to 282 * attach to CAM. 283 * 284 * Input: dev -- bus device corresponding to the ctlr 285 * Output: None 286 * Return value: 0 -- success 287 * non-zero-- failure 288 */ 289 static TW_INT32 290 twa_attach(device_t dev) 291 { 292 struct twa_softc *sc = device_get_softc(dev); 293 TW_INT32 bar_num; 294 TW_INT32 bar0_offset; 295 TW_INT32 bar_size; 296 TW_INT32 irq_flags; 297 TW_INT32 error; 298 299 sc->ctlr_handle.osl_ctlr_ctxt = sc; 300 301 /* Initialize the softc structure. */ 302 sc->bus_dev = dev; 303 tw_osli_dbg_dprintf(3, sc, "entered"); 304 sc->device_id = pci_get_device(dev); 305 306 /* Initialize the mutexes right here. */ 307 sc->io_lock = &(sc->io_lock_handle); 308 spin_init(sc->io_lock, "twa_iolock"); 309 sc->q_lock = &(sc->q_lock_handle); 310 spin_init(sc->q_lock, "twa_qlock"); 311 sc->sim_lock = &(sc->sim_lock_handle); 312 lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE); 313 314 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 315 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 316 OID_AUTO, "driver_version", CTLFLAG_RD, 317 TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); 318 319 /* Force the busmaster enable bit on, in case the BIOS forgot. */ 320 pci_enable_busmaster(dev); 321 322 /* Allocate the PCI register window. */ 323 if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, 324 &bar_num, &bar0_offset, &bar_size))) { 325 tw_osli_printf(sc, "error = %d", 326 TW_CL_SEVERITY_ERROR_STRING, 327 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 328 0x201F, 329 "Can't get PCI BAR info", 330 error); 331 tw_osli_free_resources(sc); 332 return(error); 333 } 334 sc->reg_res_id = PCIR_BARS + bar0_offset; 335 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 336 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 337 == NULL) { 338 tw_osli_printf(sc, "error = %d", 339 TW_CL_SEVERITY_ERROR_STRING, 340 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 341 0x2002, 342 "Can't allocate register window", 343 ENXIO); 344 tw_osli_free_resources(sc); 345 return(ENXIO); 346 } 347 sc->bus_tag = rman_get_bustag(sc->reg_res); 348 sc->bus_handle = rman_get_bushandle(sc->reg_res); 349 350 /* Allocate and register our interrupt. */ 351 sc->irq_res_id = 0; 352 sc->irq_type = pci_alloc_1intr(sc->bus_dev, twa_msi_enable, 353 &sc->irq_res_id, &irq_flags); 354 if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, 355 &(sc->irq_res_id), 0, ~0, 1, 356 irq_flags)) == NULL) { 357 tw_osli_printf(sc, "error = %d", 358 TW_CL_SEVERITY_ERROR_STRING, 359 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 360 0x2003, 361 "Can't allocate interrupt", 362 ENXIO); 363 tw_osli_free_resources(sc); 364 return(ENXIO); 365 } 366 if ((error = twa_setup_intr(sc))) { 367 tw_osli_printf(sc, "error = %d", 368 TW_CL_SEVERITY_ERROR_STRING, 369 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 370 0x2004, 371 "Can't set up interrupt", 372 error); 373 tw_osli_free_resources(sc); 374 return(error); 375 } 376 377 if ((error = tw_osli_alloc_mem(sc))) { 378 tw_osli_printf(sc, "error = %d", 379 TW_CL_SEVERITY_ERROR_STRING, 380 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 381 0x2005, 382 "Memory allocation failure", 383 error); 384 tw_osli_free_resources(sc); 385 return(error); 386 } 387 388 /* Initialize the Common Layer for this controller. */ 389 if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, 390 TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 391 sc->non_dma_mem, sc->dma_mem, 392 sc->dma_mem_phys 393 ))) { 394 tw_osli_printf(sc, "error = %d", 395 TW_CL_SEVERITY_ERROR_STRING, 396 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 397 0x2006, 398 "Failed to initialize Common Layer/controller", 399 error); 400 tw_osli_free_resources(sc); 401 return(error); 402 } 403 404 /* Create the control device. */ 405 sc->ctrl_dev = make_dev(&twa_ops, device_get_unit(sc->bus_dev), 406 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 407 "twa%d", device_get_unit(sc->bus_dev)); 408 sc->ctrl_dev->si_drv1 = sc; 409 410 if ((error = tw_osli_cam_attach(sc))) { 411 tw_osli_free_resources(sc); 412 tw_osli_printf(sc, "error = %d", 413 TW_CL_SEVERITY_ERROR_STRING, 414 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 415 0x2007, 416 "Failed to initialize CAM", 417 error); 418 return(error); 419 } 420 421 sc->watchdog_index = 0; 422 callout_init_mp(&(sc->watchdog_callout[0])); 423 callout_init_mp(&(sc->watchdog_callout[1])); 424 callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); 425 426 return(0); 427 } 428 429 430 static TW_VOID 431 twa_watchdog(TW_VOID *arg) 432 { 433 struct tw_cl_ctlr_handle *ctlr_handle = 434 (struct tw_cl_ctlr_handle *)arg; 435 struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; 436 int i; 437 int i_need_a_reset = 0; 438 int driver_is_active = 0; 439 TW_UINT64 current_time; 440 struct tw_osli_req_context *my_req; 441 442 443 //============================================================================== 444 current_time = (TW_UINT64) (tw_osl_get_local_time()); 445 446 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 447 my_req = &(sc->req_ctx_buf[i]); 448 449 if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) && 450 (my_req->deadline) && 451 (my_req->deadline < current_time)) { 452 tw_cl_set_reset_needed(ctlr_handle); 453 #ifdef TW_OSL_DEBUG 454 device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time); 455 #else /* TW_OSL_DEBUG */ 456 device_printf((sc)->bus_dev, "Request %d timed out!\n", i); 457 #endif /* TW_OSL_DEBUG */ 458 break; 459 } 460 } 461 //============================================================================== 462 463 i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle); 464 465 i = (int) ((sc->watchdog_index++) & 1); 466 467 driver_is_active = tw_cl_is_active(ctlr_handle); 468 469 if (i_need_a_reset) { 470 #ifdef TW_OSL_DEBUG 471 device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n"); 472 #endif /* TW_OSL_DEBUG */ 473 callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle); 474 tw_cl_reset_ctlr(ctlr_handle); 475 #ifdef TW_OSL_DEBUG 476 device_printf((sc)->bus_dev, "Watchdog reset completed!\n"); 477 #endif /* TW_OSL_DEBUG */ 478 } else if (driver_is_active) { 479 callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle); 480 } 481 #ifdef TW_OSL_DEBUG 482 if (i_need_a_reset) 483 device_printf((sc)->bus_dev, "i_need_a_reset = %d, " 484 "driver_is_active = %d\n", 485 i_need_a_reset, driver_is_active); 486 #endif /* TW_OSL_DEBUG */ 487 } 488 489 490 /* 491 * Function name: tw_osli_alloc_mem 492 * Description: Allocates memory needed both by CL and OSL. 493 * 494 * Input: sc -- OSL internal controller context 495 * Output: None 496 * Return value: 0 -- success 497 * non-zero-- failure 498 */ 499 static TW_INT32 500 tw_osli_alloc_mem(struct twa_softc *sc) 501 { 502 struct tw_osli_req_context *req; 503 TW_UINT32 max_sg_elements; 504 TW_UINT32 non_dma_mem_size; 505 TW_UINT32 dma_mem_size; 506 TW_INT32 error; 507 TW_INT32 i; 508 509 tw_osli_dbg_dprintf(3, sc, "entered"); 510 511 sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0; 512 sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0; 513 514 max_sg_elements = (sizeof(bus_addr_t) == 8) ? 515 TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS; 516 517 if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags, 518 sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, 519 &(sc->alignment), &(sc->sg_size_factor), 520 &non_dma_mem_size, &dma_mem_size 521 ))) { 522 tw_osli_printf(sc, "error = %d", 523 TW_CL_SEVERITY_ERROR_STRING, 524 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 525 0x2008, 526 "Can't get Common Layer's memory requirements", 527 error); 528 return(error); 529 } 530 531 sc->non_dma_mem = kmalloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS, 532 M_WAITOK); 533 534 /* Create the parent dma tag. */ 535 if (bus_dma_tag_create(NULL, /* parent */ 536 sc->alignment, /* alignment */ 537 TW_OSLI_DMA_BOUNDARY, /* boundary */ 538 BUS_SPACE_MAXADDR, /* lowaddr */ 539 BUS_SPACE_MAXADDR, /* highaddr */ 540 NULL, NULL, /* filter, filterarg */ 541 TW_CL_MAX_IO_SIZE, /* maxsize */ 542 max_sg_elements, /* nsegments */ 543 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 544 0, /* flags */ 545 &sc->parent_tag /* tag */)) { 546 tw_osli_printf(sc, "error = %d", 547 TW_CL_SEVERITY_ERROR_STRING, 548 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 549 0x200A, 550 "Can't allocate parent DMA tag", 551 ENOMEM); 552 return(ENOMEM); 553 } 554 555 /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */ 556 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 557 sc->alignment, /* alignment */ 558 0, /* boundary */ 559 BUS_SPACE_MAXADDR, /* lowaddr */ 560 BUS_SPACE_MAXADDR, /* highaddr */ 561 NULL, NULL, /* filter, filterarg */ 562 dma_mem_size, /* maxsize */ 563 1, /* nsegments */ 564 BUS_SPACE_MAXSIZE, /* maxsegsize */ 565 0, /* flags */ 566 &sc->cmd_tag /* tag */)) { 567 tw_osli_printf(sc, "error = %d", 568 TW_CL_SEVERITY_ERROR_STRING, 569 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 570 0x200B, 571 "Can't allocate DMA tag for Common Layer's " 572 "DMA'able memory", 573 ENOMEM); 574 return(ENOMEM); 575 } 576 577 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 578 BUS_DMA_NOWAIT, &sc->cmd_map)) { 579 /* Try a second time. */ 580 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, 581 BUS_DMA_NOWAIT, &sc->cmd_map)) { 582 tw_osli_printf(sc, "error = %d", 583 TW_CL_SEVERITY_ERROR_STRING, 584 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 585 0x200C, 586 "Can't allocate DMA'able memory for the" 587 "Common Layer", 588 ENOMEM); 589 return(ENOMEM); 590 } 591 } 592 593 bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, 594 dma_mem_size, twa_map_load_callback, 595 &sc->dma_mem_phys, 0); 596 597 /* 598 * Create a dma tag for data buffers; size will be the maximum 599 * possible I/O size (128kB). 600 */ 601 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 602 sc->alignment, /* alignment */ 603 0, /* boundary */ 604 BUS_SPACE_MAXADDR, /* lowaddr */ 605 BUS_SPACE_MAXADDR, /* highaddr */ 606 NULL, NULL, /* filter, filterarg */ 607 TW_CL_MAX_IO_SIZE, /* maxsize */ 608 max_sg_elements, /* nsegments */ 609 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 610 BUS_DMA_ALLOCNOW, /* flags */ 611 &sc->dma_tag /* tag */)) { 612 tw_osli_printf(sc, "error = %d", 613 TW_CL_SEVERITY_ERROR_STRING, 614 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 615 0x200F, 616 "Can't allocate DMA tag for data buffers", 617 ENOMEM); 618 return(ENOMEM); 619 } 620 621 /* 622 * Create a dma tag for ioctl data buffers; size will be the maximum 623 * possible I/O size (128kB). 624 */ 625 if (bus_dma_tag_create(sc->parent_tag, /* parent */ 626 sc->alignment, /* alignment */ 627 0, /* boundary */ 628 BUS_SPACE_MAXADDR, /* lowaddr */ 629 BUS_SPACE_MAXADDR, /* highaddr */ 630 NULL, NULL, /* filter, filterarg */ 631 TW_CL_MAX_IO_SIZE, /* maxsize */ 632 max_sg_elements, /* nsegments */ 633 TW_CL_MAX_IO_SIZE, /* maxsegsize */ 634 BUS_DMA_ALLOCNOW, /* flags */ 635 &sc->ioctl_tag /* tag */)) { 636 tw_osli_printf(sc, "error = %d", 637 TW_CL_SEVERITY_ERROR_STRING, 638 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 639 0x2010, 640 "Can't allocate DMA tag for ioctl data buffers", 641 ENOMEM); 642 return(ENOMEM); 643 } 644 645 /* Create just one map for all ioctl request data buffers. */ 646 if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) { 647 tw_osli_printf(sc, "error = %d", 648 TW_CL_SEVERITY_ERROR_STRING, 649 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 650 0x2011, 651 "Can't create ioctl map", 652 ENOMEM); 653 return(ENOMEM); 654 } 655 656 657 /* Initialize request queues. */ 658 tw_osli_req_q_init(sc, TW_OSLI_FREE_Q); 659 tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q); 660 661 sc->req_ctx_buf = kmalloc((sizeof(struct tw_osli_req_context) * 662 TW_OSLI_MAX_NUM_REQUESTS), TW_OSLI_MALLOC_CLASS, 663 M_WAITOK | M_ZERO); 664 for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { 665 req = &(sc->req_ctx_buf[i]); 666 req->ctlr = sc; 667 if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) { 668 tw_osli_printf(sc, "request # = %d, error = %d", 669 TW_CL_SEVERITY_ERROR_STRING, 670 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 671 0x2013, 672 "Can't create dma map", 673 i, ENOMEM); 674 return(ENOMEM); 675 } 676 677 /* Initialize the ioctl wakeup/ timeout mutex */ 678 req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle); 679 lockinit(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", 0, 0); 680 681 /* Insert request into the free queue. */ 682 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 683 } 684 685 return(0); 686 } 687 688 689 690 /* 691 * Function name: tw_osli_free_resources 692 * Description: Performs clean-up at the time of going down. 693 * 694 * Input: sc -- ptr to OSL internal ctlr context 695 * Output: None 696 * Return value: None 697 */ 698 static TW_VOID 699 tw_osli_free_resources(struct twa_softc *sc) 700 { 701 struct tw_osli_req_context *req; 702 TW_INT32 error = 0; 703 704 tw_osli_dbg_dprintf(3, sc, "entered"); 705 706 /* Detach from CAM */ 707 tw_osli_cam_detach(sc); 708 709 if (sc->req_ctx_buf) 710 while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) != 711 NULL) { 712 lockuninit(req->ioctl_wake_timeout_lock); 713 714 if ((error = bus_dmamap_destroy(sc->dma_tag, 715 req->dma_map))) 716 tw_osli_dbg_dprintf(1, sc, 717 "dmamap_destroy(dma) returned %d", 718 error); 719 } 720 721 if ((sc->ioctl_tag) && (sc->ioctl_map)) 722 if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map))) 723 tw_osli_dbg_dprintf(1, sc, 724 "dmamap_destroy(ioctl) returned %d", error); 725 726 /* Free all memory allocated so far. */ 727 if (sc->req_ctx_buf) 728 kfree(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS); 729 730 if (sc->non_dma_mem) 731 kfree(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS); 732 733 if (sc->dma_mem) { 734 bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); 735 bus_dmamem_free(sc->cmd_tag, sc->dma_mem, 736 sc->cmd_map); 737 } 738 if (sc->cmd_tag) 739 if ((error = bus_dma_tag_destroy(sc->cmd_tag))) 740 tw_osli_dbg_dprintf(1, sc, 741 "dma_tag_destroy(cmd) returned %d", error); 742 743 if (sc->dma_tag) 744 if ((error = bus_dma_tag_destroy(sc->dma_tag))) 745 tw_osli_dbg_dprintf(1, sc, 746 "dma_tag_destroy(dma) returned %d", error); 747 748 if (sc->ioctl_tag) 749 if ((error = bus_dma_tag_destroy(sc->ioctl_tag))) 750 tw_osli_dbg_dprintf(1, sc, 751 "dma_tag_destroy(ioctl) returned %d", error); 752 753 if (sc->parent_tag) 754 if ((error = bus_dma_tag_destroy(sc->parent_tag))) 755 tw_osli_dbg_dprintf(1, sc, 756 "dma_tag_destroy(parent) returned %d", error); 757 758 759 /* Disconnect the interrupt handler. */ 760 if ((error = twa_teardown_intr(sc))) 761 tw_osli_dbg_dprintf(1, sc, 762 "teardown_intr returned %d", error); 763 764 if (sc->irq_res != NULL) 765 if ((error = bus_release_resource(sc->bus_dev, 766 SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))) 767 tw_osli_dbg_dprintf(1, sc, 768 "release_resource(irq) returned %d", error); 769 770 if (sc->irq_type == PCI_INTR_TYPE_MSI) 771 pci_release_msi(sc->bus_dev); 772 773 /* Release the register window mapping. */ 774 if (sc->reg_res != NULL) 775 if ((error = bus_release_resource(sc->bus_dev, 776 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))) 777 tw_osli_dbg_dprintf(1, sc, 778 "release_resource(io) returned %d", error); 779 780 /* Destroy the control device. */ 781 if (sc->ctrl_dev != NULL) 782 destroy_dev(sc->ctrl_dev); 783 dev_ops_remove_minor(&twa_ops, device_get_unit(sc->bus_dev)); 784 } 785 786 787 788 /* 789 * Function name: twa_detach 790 * Description: Called when the controller is being detached from 791 * the pci bus. 792 * 793 * Input: dev -- bus device corresponding to the ctlr 794 * Output: None 795 * Return value: 0 -- success 796 * non-zero-- failure 797 */ 798 static TW_INT32 799 twa_detach(device_t dev) 800 { 801 struct twa_softc *sc = device_get_softc(dev); 802 TW_INT32 error; 803 804 tw_osli_dbg_dprintf(3, sc, "entered"); 805 806 error = EBUSY; 807 if (sc->open) { 808 tw_osli_printf(sc, "error = %d", 809 TW_CL_SEVERITY_ERROR_STRING, 810 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 811 0x2014, 812 "Device open", 813 error); 814 goto out; 815 } 816 817 /* Shut the controller down. */ 818 if ((error = twa_shutdown(dev))) 819 goto out; 820 821 /* Free all resources associated with this controller. */ 822 tw_osli_free_resources(sc); 823 error = 0; 824 825 out: 826 return(error); 827 } 828 829 830 831 /* 832 * Function name: twa_shutdown 833 * Description: Called at unload/shutdown time. Lets the controller 834 * know that we are going down. 835 * 836 * Input: dev -- bus device corresponding to the ctlr 837 * Output: None 838 * Return value: 0 -- success 839 * non-zero-- failure 840 */ 841 static TW_INT32 842 twa_shutdown(device_t dev) 843 { 844 struct twa_softc *sc = device_get_softc(dev); 845 TW_INT32 error = 0; 846 847 tw_osli_dbg_dprintf(3, sc, "entered"); 848 849 /* Disconnect interrupts. */ 850 error = twa_teardown_intr(sc); 851 852 /* Stop watchdog task. */ 853 callout_stop_sync(&(sc->watchdog_callout[0])); 854 callout_stop_sync(&(sc->watchdog_callout[1])); 855 856 /* Disconnect from the controller. */ 857 if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) { 858 tw_osli_printf(sc, "error = %d", 859 TW_CL_SEVERITY_ERROR_STRING, 860 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 861 0x2015, 862 "Failed to shutdown Common Layer/controller", 863 error); 864 } 865 return(error); 866 } 867 868 869 870 /* 871 * Function name: twa_pci_intr 872 * Description: Interrupt handler. Wrapper for twa_interrupt. 873 * 874 * Input: arg -- ptr to OSL internal ctlr context 875 * Output: None 876 * Return value: None 877 */ 878 static TW_VOID 879 twa_pci_intr(TW_VOID *arg) 880 { 881 struct twa_softc *sc = (struct twa_softc *)arg; 882 883 tw_osli_dbg_dprintf(10, sc, "entered"); 884 tw_cl_interrupt(&(sc->ctlr_handle)); 885 } 886 887 888 /* 889 * Function name: tw_osli_fw_passthru 890 * Description: Builds a fw passthru cmd pkt, and submits it to CL. 891 * 892 * Input: sc -- ptr to OSL internal ctlr context 893 * buf -- ptr to ioctl pkt understood by CL 894 * Output: None 895 * Return value: 0 -- success 896 * non-zero-- failure 897 */ 898 TW_INT32 899 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf) 900 { 901 struct tw_osli_req_context *req; 902 struct tw_osli_ioctl_no_data_buf *user_buf = 903 (struct tw_osli_ioctl_no_data_buf *)buf; 904 TW_TIME end_time; 905 TW_UINT32 timeout = 60; 906 TW_UINT32 data_buf_size_adjusted; 907 struct tw_cl_req_packet *req_pkt; 908 struct tw_cl_passthru_req_packet *pt_req; 909 TW_INT32 error; 910 911 tw_osli_dbg_dprintf(5, sc, "ioctl: passthru"); 912 913 if ((req = tw_osli_get_request(sc)) == NULL) 914 return(EBUSY); 915 916 req->req_handle.osl_req_ctxt = req; 917 req->orig_req = buf; 918 req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU; 919 920 req_pkt = &(req->req_pkt); 921 req_pkt->status = 0; 922 req_pkt->tw_osl_callback = tw_osl_complete_passthru; 923 /* Let the Common Layer retry the request on cmd queue full. */ 924 req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY; 925 926 pt_req = &(req_pkt->gen_req_pkt.pt_req); 927 /* 928 * Make sure that the data buffer sent to firmware is a 929 * 512 byte multiple in size. 930 */ 931 data_buf_size_adjusted = 932 (user_buf->driver_pkt.buffer_length + 933 (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); 934 if ((req->length = data_buf_size_adjusted)) { 935 req->data = kmalloc(data_buf_size_adjusted, 936 TW_OSLI_MALLOC_CLASS, M_WAITOK); 937 /* Copy the payload. */ 938 if ((error = copyin((TW_VOID *)(user_buf->pdata), 939 req->data, 940 user_buf->driver_pkt.buffer_length)) != 0) { 941 tw_osli_printf(sc, "error = %d", 942 TW_CL_SEVERITY_ERROR_STRING, 943 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 944 0x2017, 945 "Could not copyin fw_passthru data_buf", 946 error); 947 goto fw_passthru_err; 948 } 949 pt_req->sgl_entries = 1; /* will be updated during mapping */ 950 req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN | 951 TW_OSLI_REQ_FLAGS_DATA_OUT); 952 } else 953 pt_req->sgl_entries = 0; /* no payload */ 954 955 pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt)); 956 pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet); 957 958 if ((error = tw_osli_map_request(req))) 959 goto fw_passthru_err; 960 961 end_time = tw_osl_get_local_time() + timeout; 962 while (req->state != TW_OSLI_REQ_STATE_COMPLETE) { 963 lockmgr(req->ioctl_wake_timeout_lock, LK_EXCLUSIVE); 964 req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING; 965 966 error = lksleep(req, req->ioctl_wake_timeout_lock, 0, 967 "twa_passthru", timeout*hz); 968 lockmgr(req->ioctl_wake_timeout_lock, LK_RELEASE); 969 970 if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING)) 971 error = 0; 972 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 973 974 if (! error) { 975 if (((error = req->error_code)) || 976 ((error = (req->state != 977 TW_OSLI_REQ_STATE_COMPLETE))) || 978 ((error = req_pkt->status))) 979 goto fw_passthru_err; 980 break; 981 } 982 983 if (req_pkt->status) { 984 error = req_pkt->status; 985 goto fw_passthru_err; 986 } 987 988 if (error == EWOULDBLOCK) { 989 /* Time out! */ 990 if ((!(req->error_code)) && 991 (req->state == TW_OSLI_REQ_STATE_COMPLETE) && 992 (!(req_pkt->status)) ) { 993 #ifdef TW_OSL_DEBUG 994 tw_osli_printf(sc, "request = %p", 995 TW_CL_SEVERITY_ERROR_STRING, 996 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 997 0x7777, 998 "FALSE Passthru timeout!", 999 req); 1000 #endif /* TW_OSL_DEBUG */ 1001 error = 0; /* False error */ 1002 break; 1003 } 1004 if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { 1005 #ifdef TW_OSL_DEBUG 1006 tw_osli_printf(sc, "request = %p", 1007 TW_CL_SEVERITY_ERROR_STRING, 1008 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1009 0x2018, 1010 "Passthru request timed out!", 1011 req); 1012 #else /* TW_OSL_DEBUG */ 1013 device_printf((sc)->bus_dev, "Passthru request timed out!\n"); 1014 #endif /* TW_OSL_DEBUG */ 1015 tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle)); 1016 } 1017 1018 error = 0; 1019 end_time = tw_osl_get_local_time() + timeout; 1020 continue; 1021 /* 1022 * Don't touch req after a reset. It (and any 1023 * associated data) will be 1024 * unmapped by the callback. 1025 */ 1026 } 1027 /* 1028 * Either the request got completed, or we were woken up by a 1029 * signal. Calculate the new timeout, in case it was the latter. 1030 */ 1031 timeout = (end_time - tw_osl_get_local_time()); 1032 } /* End of while loop */ 1033 1034 /* If there was a payload, copy it back. */ 1035 if ((!error) && (req->length)) 1036 if ((error = copyout(req->data, user_buf->pdata, 1037 user_buf->driver_pkt.buffer_length))) 1038 tw_osli_printf(sc, "error = %d", 1039 TW_CL_SEVERITY_ERROR_STRING, 1040 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1041 0x2019, 1042 "Could not copyout fw_passthru data_buf", 1043 error); 1044 1045 fw_passthru_err: 1046 1047 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1048 error = EBUSY; 1049 1050 user_buf->driver_pkt.os_status = error; 1051 /* Free resources. */ 1052 if (req->data) 1053 kfree(req->data, TW_OSLI_MALLOC_CLASS); 1054 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1055 return(error); 1056 } 1057 1058 1059 1060 /* 1061 * Function name: tw_osl_complete_passthru 1062 * Description: Called to complete passthru requests. 1063 * 1064 * Input: req_handle -- ptr to request handle 1065 * Output: None 1066 * Return value: None 1067 */ 1068 TW_VOID 1069 tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle) 1070 { 1071 struct tw_osli_req_context *req = req_handle->osl_req_ctxt; 1072 struct tw_cl_req_packet *req_pkt = 1073 (struct tw_cl_req_packet *)(&req->req_pkt); 1074 struct twa_softc *sc = req->ctlr; 1075 1076 tw_osli_dbg_dprintf(5, sc, "entered"); 1077 1078 if (req->state != TW_OSLI_REQ_STATE_BUSY) { 1079 tw_osli_printf(sc, "request = %p, status = %d", 1080 TW_CL_SEVERITY_ERROR_STRING, 1081 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1082 0x201B, 1083 "Unposted command completed!!", 1084 req, req->state); 1085 } 1086 1087 /* 1088 * Remove request from the busy queue. Just mark it complete. 1089 * There's no need to move it into the complete queue as we are 1090 * going to be done with it right now. 1091 */ 1092 req->state = TW_OSLI_REQ_STATE_COMPLETE; 1093 tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); 1094 1095 tw_osli_unmap_request(req); 1096 1097 /* 1098 * Don't do a wake up if there was an error even before the request 1099 * was sent down to the Common Layer, and we hadn't gotten an 1100 * EINPROGRESS. The request originator will then be returned an 1101 * error, and he can do the clean-up. 1102 */ 1103 if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS))) 1104 return; 1105 1106 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1107 if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) { 1108 /* Wake up the sleeping command originator. */ 1109 tw_osli_dbg_dprintf(5, sc, 1110 "Waking up originator of request %p", req); 1111 req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; 1112 wakeup_one(req); 1113 } else { 1114 /* 1115 * If the request completed even before mtx_sleep 1116 * was called, simply return. 1117 */ 1118 if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED) 1119 return; 1120 1121 if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) 1122 return; 1123 1124 tw_osli_printf(sc, "request = %p", 1125 TW_CL_SEVERITY_ERROR_STRING, 1126 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1127 0x201C, 1128 "Passthru callback called, " 1129 "and caller not sleeping", 1130 req); 1131 } 1132 } else { 1133 tw_osli_printf(sc, "request = %p", 1134 TW_CL_SEVERITY_ERROR_STRING, 1135 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1136 0x201D, 1137 "Passthru callback called for non-passthru request", 1138 req); 1139 } 1140 } 1141 1142 1143 1144 /* 1145 * Function name: tw_osli_get_request 1146 * Description: Gets a request pkt from the free queue. 1147 * 1148 * Input: sc -- ptr to OSL internal ctlr context 1149 * Output: None 1150 * Return value: ptr to request pkt -- success 1151 * NULL -- failure 1152 */ 1153 struct tw_osli_req_context * 1154 tw_osli_get_request(struct twa_softc *sc) 1155 { 1156 struct tw_osli_req_context *req; 1157 1158 tw_osli_dbg_dprintf(4, sc, "entered"); 1159 1160 /* Get a free request packet. */ 1161 req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q); 1162 1163 /* Initialize some fields to their defaults. */ 1164 if (req) { 1165 req->req_handle.osl_req_ctxt = NULL; 1166 req->req_handle.cl_req_ctxt = NULL; 1167 req->req_handle.is_io = 0; 1168 req->data = NULL; 1169 req->length = 0; 1170 req->deadline = 0; 1171 req->real_data = NULL; 1172 req->real_length = 0; 1173 req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */ 1174 req->flags = 0; 1175 req->error_code = 0; 1176 req->orig_req = NULL; 1177 1178 bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet)); 1179 1180 } 1181 return(req); 1182 } 1183 1184 1185 1186 /* 1187 * Function name: twa_map_load_data_callback 1188 * Description: Callback of bus_dmamap_load for the buffer associated 1189 * with data. Updates the cmd pkt (size/sgl_entries 1190 * fields, as applicable) to reflect the number of sg 1191 * elements. 1192 * 1193 * Input: arg -- ptr to OSL internal request context 1194 * segs -- ptr to a list of segment descriptors 1195 * nsegments--# of segments 1196 * error -- 0 if no errors encountered before callback, 1197 * non-zero if errors were encountered 1198 * Output: None 1199 * Return value: None 1200 */ 1201 static TW_VOID 1202 twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1203 TW_INT32 nsegments, TW_INT32 error) 1204 { 1205 struct tw_osli_req_context *req = 1206 (struct tw_osli_req_context *)arg; 1207 struct twa_softc *sc = req->ctlr; 1208 struct tw_cl_req_packet *req_pkt = &(req->req_pkt); 1209 1210 tw_osli_dbg_dprintf(10, sc, "entered"); 1211 1212 if (error == EINVAL) { 1213 req->error_code = error; 1214 return; 1215 } 1216 1217 /* Mark the request as currently being processed. */ 1218 req->state = TW_OSLI_REQ_STATE_BUSY; 1219 /* Move the request into the busy queue. */ 1220 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); 1221 1222 req->flags |= TW_OSLI_REQ_FLAGS_MAPPED; 1223 1224 if (error == EFBIG) { 1225 req->error_code = error; 1226 goto out; 1227 } 1228 1229 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1230 struct tw_cl_passthru_req_packet *pt_req; 1231 1232 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1233 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1234 BUS_DMASYNC_PREREAD); 1235 1236 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1237 /* 1238 * If we're using an alignment buffer, and we're 1239 * writing data, copy the real data out. 1240 */ 1241 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1242 bcopy(req->real_data, req->data, req->real_length); 1243 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1244 BUS_DMASYNC_PREWRITE); 1245 } 1246 1247 pt_req = &(req_pkt->gen_req_pkt.pt_req); 1248 pt_req->sg_list = (TW_UINT8 *)segs; 1249 pt_req->sgl_entries += (nsegments - 1); 1250 error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt, 1251 &(req->req_handle)); 1252 } else { 1253 struct tw_cl_scsi_req_packet *scsi_req; 1254 1255 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) 1256 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1257 BUS_DMASYNC_PREREAD); 1258 1259 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { 1260 /* 1261 * If we're using an alignment buffer, and we're 1262 * writing data, copy the real data out. 1263 */ 1264 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1265 bcopy(req->real_data, req->data, req->real_length); 1266 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1267 BUS_DMASYNC_PREWRITE); 1268 } 1269 1270 scsi_req = &(req_pkt->gen_req_pkt.scsi_req); 1271 scsi_req->sg_list = (TW_UINT8 *)segs; 1272 scsi_req->sgl_entries += (nsegments - 1); 1273 error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt, 1274 &(req->req_handle)); 1275 } 1276 1277 out: 1278 if (error) { 1279 req->error_code = error; 1280 req_pkt->tw_osl_callback(&(req->req_handle)); 1281 /* 1282 * If the caller had been returned EINPROGRESS, and he has 1283 * registered a callback for handling completion, the callback 1284 * will never get called because we were unable to submit the 1285 * request. So, free up the request right here. 1286 */ 1287 if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS) 1288 tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); 1289 } 1290 } 1291 1292 1293 1294 /* 1295 * Function name: twa_map_load_callback 1296 * Description: Callback of bus_dmamap_load for the buffer associated 1297 * with a cmd pkt. 1298 * 1299 * Input: arg -- ptr to variable to hold phys addr 1300 * segs -- ptr to a list of segment descriptors 1301 * nsegments--# of segments 1302 * error -- 0 if no errors encountered before callback, 1303 * non-zero if errors were encountered 1304 * Output: None 1305 * Return value: None 1306 */ 1307 static TW_VOID 1308 twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, 1309 TW_INT32 nsegments, TW_INT32 error) 1310 { 1311 *((bus_addr_t *)arg) = segs[0].ds_addr; 1312 } 1313 1314 1315 1316 /* 1317 * Function name: tw_osli_map_request 1318 * Description: Maps a cmd pkt and data associated with it, into 1319 * DMA'able memory. 1320 * 1321 * Input: req -- ptr to request pkt 1322 * Output: None 1323 * Return value: 0 -- success 1324 * non-zero-- failure 1325 */ 1326 TW_INT32 1327 tw_osli_map_request(struct tw_osli_req_context *req) 1328 { 1329 struct twa_softc *sc = req->ctlr; 1330 TW_INT32 error = 0; 1331 1332 tw_osli_dbg_dprintf(10, sc, "entered"); 1333 1334 /* If the command involves data, map that too. */ 1335 if (req->data != NULL) { 1336 /* 1337 * It's sufficient for the data pointer to be 4-byte aligned 1338 * to work with 9000. However, if 4-byte aligned addresses 1339 * are passed to bus_dmamap_load, we can get back sg elements 1340 * that are not 512-byte multiples in size. So, we will let 1341 * only those buffers that are 512-byte aligned to pass 1342 * through, and bounce the rest, so as to make sure that we 1343 * always get back sg elements that are 512-byte multiples 1344 * in size. 1345 */ 1346 if (((vm_offset_t)req->data % sc->sg_size_factor) || 1347 (req->length % sc->sg_size_factor)) { 1348 req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED; 1349 /* Save original data pointer and length. */ 1350 req->real_data = req->data; 1351 req->real_length = req->length; 1352 req->length = (req->length + 1353 (sc->sg_size_factor - 1)) & 1354 ~(sc->sg_size_factor - 1); 1355 req->data = kmalloc(req->length, TW_OSLI_MALLOC_CLASS, 1356 M_NOWAIT); 1357 if (req->data == NULL) { 1358 tw_osli_printf(sc, "error = %d", 1359 TW_CL_SEVERITY_ERROR_STRING, 1360 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1361 0x201E, 1362 "Failed to allocate memory " 1363 "for bounce buffer", 1364 ENOMEM); 1365 /* Restore original data pointer and length. */ 1366 req->data = req->real_data; 1367 req->length = req->real_length; 1368 return(ENOMEM); 1369 } 1370 } 1371 1372 /* 1373 * Map the data buffer into bus space and build the SG list. 1374 */ 1375 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1376 /* Lock against multiple simultaneous ioctl calls. */ 1377 spin_lock(sc->io_lock); 1378 error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map, 1379 req->data, req->length, 1380 twa_map_load_data_callback, req, 1381 BUS_DMA_WAITOK); 1382 spin_unlock(sc->io_lock); 1383 } else { 1384 /* 1385 * There's only one CAM I/O thread running at a time. 1386 * So, there's no need to hold the io_lock. 1387 */ 1388 error = bus_dmamap_load(sc->dma_tag, req->dma_map, 1389 req->data, req->length, 1390 twa_map_load_data_callback, req, 1391 BUS_DMA_WAITOK); 1392 } 1393 1394 if (!error) 1395 error = req->error_code; 1396 else { 1397 if (error == EINPROGRESS) { 1398 /* 1399 * Specifying sc->io_lock as the lockfuncarg 1400 * in ...tag_create should protect the access 1401 * of ...FLAGS_MAPPED from the callback. 1402 */ 1403 spin_lock(sc->io_lock); 1404 if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) 1405 req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS; 1406 tw_osli_disallow_new_requests(sc, &(req->req_handle)); 1407 spin_unlock(sc->io_lock); 1408 error = 0; 1409 } else { 1410 tw_osli_printf(sc, "error = %d", 1411 TW_CL_SEVERITY_ERROR_STRING, 1412 TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 1413 0x9999, 1414 "Failed to map DMA memory " 1415 "for I/O request", 1416 error); 1417 req->flags |= TW_OSLI_REQ_FLAGS_FAILED; 1418 /* Free alignment buffer if it was used. */ 1419 if (req->flags & 1420 TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { 1421 kfree(req->data, TW_OSLI_MALLOC_CLASS); 1422 /* 1423 * Restore original data pointer 1424 * and length. 1425 */ 1426 req->data = req->real_data; 1427 req->length = req->real_length; 1428 } 1429 } 1430 } 1431 1432 } else { 1433 /* Mark the request as currently being processed. */ 1434 req->state = TW_OSLI_REQ_STATE_BUSY; 1435 /* Move the request into the busy queue. */ 1436 tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); 1437 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) 1438 error = tw_cl_fw_passthru(&sc->ctlr_handle, 1439 &(req->req_pkt), &(req->req_handle)); 1440 else 1441 error = tw_cl_start_io(&sc->ctlr_handle, 1442 &(req->req_pkt), &(req->req_handle)); 1443 if (error) { 1444 req->error_code = error; 1445 req->req_pkt.tw_osl_callback(&(req->req_handle)); 1446 } 1447 } 1448 return(error); 1449 } 1450 1451 1452 1453 /* 1454 * Function name: tw_osli_unmap_request 1455 * Description: Undoes the mapping done by tw_osli_map_request. 1456 * 1457 * Input: req -- ptr to request pkt 1458 * Output: None 1459 * Return value: None 1460 */ 1461 TW_VOID 1462 tw_osli_unmap_request(struct tw_osli_req_context *req) 1463 { 1464 struct twa_softc *sc = req->ctlr; 1465 1466 tw_osli_dbg_dprintf(10, sc, "entered"); 1467 1468 /* If the command involved data, unmap that too. */ 1469 if (req->data != NULL) { 1470 if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { 1471 /* Lock against multiple simultaneous ioctl calls. */ 1472 spin_lock(sc->io_lock); 1473 1474 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { 1475 bus_dmamap_sync(sc->ioctl_tag, 1476 sc->ioctl_map, BUS_DMASYNC_POSTREAD); 1477 1478 /* 1479 * If we are using a bounce buffer, and we are 1480 * reading data, copy the real data in. 1481 */ 1482 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1483 bcopy(req->data, req->real_data, 1484 req->real_length); 1485 } 1486 1487 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) 1488 bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, 1489 BUS_DMASYNC_POSTWRITE); 1490 1491 bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map); 1492 1493 spin_unlock(sc->io_lock); 1494 } else { 1495 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { 1496 bus_dmamap_sync(sc->dma_tag, 1497 req->dma_map, BUS_DMASYNC_POSTREAD); 1498 1499 /* 1500 * If we are using a bounce buffer, and we are 1501 * reading data, copy the real data in. 1502 */ 1503 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) 1504 bcopy(req->data, req->real_data, 1505 req->real_length); 1506 } 1507 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) 1508 bus_dmamap_sync(sc->dma_tag, req->dma_map, 1509 BUS_DMASYNC_POSTWRITE); 1510 1511 bus_dmamap_unload(sc->dma_tag, req->dma_map); 1512 } 1513 } 1514 1515 /* Free alignment buffer if it was used. */ 1516 if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { 1517 kfree(req->data, TW_OSLI_MALLOC_CLASS); 1518 /* Restore original data pointer and length. */ 1519 req->data = req->real_data; 1520 req->length = req->real_length; 1521 } 1522 } 1523 1524 1525 1526 #ifdef TW_OSL_DEBUG 1527 1528 TW_VOID twa_report_stats(TW_VOID); 1529 TW_VOID twa_reset_stats(TW_VOID); 1530 TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc); 1531 TW_VOID twa_print_req_info(struct tw_osli_req_context *req); 1532 1533 1534 /* 1535 * Function name: twa_report_stats 1536 * Description: For being called from ddb. Calls functions that print 1537 * OSL and CL internal stats for the controller. 1538 * 1539 * Input: None 1540 * Output: None 1541 * Return value: None 1542 */ 1543 TW_VOID 1544 twa_report_stats(TW_VOID) 1545 { 1546 struct twa_softc *sc; 1547 TW_INT32 i; 1548 1549 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { 1550 tw_osli_print_ctlr_stats(sc); 1551 tw_cl_print_ctlr_stats(&sc->ctlr_handle); 1552 } 1553 } 1554 1555 1556 1557 /* 1558 * Function name: tw_osli_print_ctlr_stats 1559 * Description: For being called from ddb. Prints OSL controller stats 1560 * 1561 * Input: sc -- ptr to OSL internal controller context 1562 * Output: None 1563 * Return value: None 1564 */ 1565 TW_VOID 1566 tw_osli_print_ctlr_stats(struct twa_softc *sc) 1567 { 1568 twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc); 1569 twa_printf(sc, "OSLq type current max\n"); 1570 twa_printf(sc, "free %04d %04d\n", 1571 sc->q_stats[TW_OSLI_FREE_Q].cur_len, 1572 sc->q_stats[TW_OSLI_FREE_Q].max_len); 1573 twa_printf(sc, "busy %04d %04d\n", 1574 sc->q_stats[TW_OSLI_BUSY_Q].cur_len, 1575 sc->q_stats[TW_OSLI_BUSY_Q].max_len); 1576 } 1577 1578 1579 1580 /* 1581 * Function name: twa_print_req_info 1582 * Description: For being called from ddb. Calls functions that print 1583 * OSL and CL internal details for the request. 1584 * 1585 * Input: req -- ptr to OSL internal request context 1586 * Output: None 1587 * Return value: None 1588 */ 1589 TW_VOID 1590 twa_print_req_info(struct tw_osli_req_context *req) 1591 { 1592 struct twa_softc *sc = req->ctlr; 1593 1594 twa_printf(sc, "OSL details for request:\n"); 1595 twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n" 1596 "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n" 1597 "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n" 1598 "next_req = %p, prev_req = %p, dma_map = %p\n", 1599 req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt, 1600 req->data, req->length, req->real_data, req->real_length, 1601 req->state, req->flags, req->error_code, req->orig_req, 1602 req->link.next, req->link.prev, req->dma_map); 1603 tw_cl_print_req_info(&(req->req_handle)); 1604 } 1605 1606 1607 1608 /* 1609 * Function name: twa_reset_stats 1610 * Description: For being called from ddb. 1611 * Resets some OSL controller stats. 1612 * 1613 * Input: None 1614 * Output: None 1615 * Return value: None 1616 */ 1617 TW_VOID 1618 twa_reset_stats(TW_VOID) 1619 { 1620 struct twa_softc *sc; 1621 TW_INT32 i; 1622 1623 for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { 1624 sc->q_stats[TW_OSLI_FREE_Q].max_len = 0; 1625 sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0; 1626 tw_cl_reset_stats(&sc->ctlr_handle); 1627 } 1628 } 1629 1630 #endif /* TW_OSL_DEBUG */ 1631