1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */ 2 /* $DragonFly: src/sys/dev/disk/isp/isp_pci.c,v 1.3 2003/08/07 21:16:53 dillon Exp $ */ 3 /* 4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 5 * FreeBSD Version. 6 * 7 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice immediately at the beginning of the file, without modification, 14 * this list of conditions, and the following disclaimer. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/bus.h> 36 37 #include <bus/pci/pcireg.h> 38 #include <bus/pci/pcivar.h> 39 40 #include <machine/bus_memio.h> 41 #include <machine/bus_pio.h> 42 #include <machine/bus.h> 43 #include <machine/resource.h> 44 #include <sys/rman.h> 45 #include <sys/malloc.h> 46 47 #include "isp_freebsd.h" 48 49 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 50 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 51 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 52 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 53 static int 54 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 55 static int 56 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 57 static int isp_pci_mbxdma(struct ispsoftc *); 58 static int 59 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 60 static void 61 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 62 63 static void isp_pci_reset1(struct ispsoftc *); 64 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 65 66 static struct ispmdvec mdvec = { 67 isp_pci_rd_isr, 68 isp_pci_rd_reg, 69 isp_pci_wr_reg, 70 isp_pci_mbxdma, 71 isp_pci_dmasetup, 72 isp_pci_dmateardown, 73 NULL, 74 isp_pci_reset1, 75 isp_pci_dumpregs, 76 NULL, 77 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 78 }; 79 80 static struct ispmdvec mdvec_1080 = { 81 isp_pci_rd_isr, 82 isp_pci_rd_reg_1080, 83 isp_pci_wr_reg_1080, 84 isp_pci_mbxdma, 85 isp_pci_dmasetup, 86 isp_pci_dmateardown, 87 NULL, 88 isp_pci_reset1, 89 isp_pci_dumpregs, 90 NULL, 91 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 92 }; 93 94 static struct ispmdvec mdvec_12160 = { 95 isp_pci_rd_isr, 96 isp_pci_rd_reg_1080, 97 isp_pci_wr_reg_1080, 98 isp_pci_mbxdma, 99 isp_pci_dmasetup, 100 isp_pci_dmateardown, 101 NULL, 102 isp_pci_reset1, 103 isp_pci_dumpregs, 104 NULL, 105 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 106 }; 107 108 static struct ispmdvec mdvec_2100 = { 109 isp_pci_rd_isr, 110 isp_pci_rd_reg, 111 isp_pci_wr_reg, 112 isp_pci_mbxdma, 113 isp_pci_dmasetup, 114 isp_pci_dmateardown, 115 NULL, 116 isp_pci_reset1, 117 isp_pci_dumpregs 118 }; 119 120 static struct ispmdvec mdvec_2200 = { 121 isp_pci_rd_isr, 122 isp_pci_rd_reg, 123 isp_pci_wr_reg, 124 isp_pci_mbxdma, 125 isp_pci_dmasetup, 126 isp_pci_dmateardown, 127 NULL, 128 isp_pci_reset1, 129 isp_pci_dumpregs 130 }; 131 132 static struct ispmdvec mdvec_2300 = { 133 isp_pci_rd_isr_2300, 134 isp_pci_rd_reg, 135 isp_pci_wr_reg, 136 isp_pci_mbxdma, 137 isp_pci_dmasetup, 138 isp_pci_dmateardown, 139 NULL, 140 isp_pci_reset1, 141 isp_pci_dumpregs 142 }; 143 144 #ifndef PCIM_CMD_INVEN 145 #define PCIM_CMD_INVEN 0x10 146 #endif 147 #ifndef PCIM_CMD_BUSMASTEREN 148 #define PCIM_CMD_BUSMASTEREN 0x0004 149 #endif 150 #ifndef PCIM_CMD_PERRESPEN 151 #define PCIM_CMD_PERRESPEN 0x0040 152 #endif 153 #ifndef PCIM_CMD_SEREN 154 #define PCIM_CMD_SEREN 0x0100 155 #endif 156 157 #ifndef PCIR_COMMAND 158 #define PCIR_COMMAND 0x04 159 #endif 160 161 #ifndef PCIR_CACHELNSZ 162 #define PCIR_CACHELNSZ 0x0c 163 #endif 164 165 #ifndef PCIR_LATTIMER 166 #define PCIR_LATTIMER 0x0d 167 #endif 168 169 #ifndef PCIR_ROMADDR 170 #define PCIR_ROMADDR 0x30 171 #endif 172 173 #ifndef PCI_VENDOR_QLOGIC 174 #define PCI_VENDOR_QLOGIC 0x1077 175 #endif 176 177 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 178 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 179 #endif 180 181 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 182 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 183 #endif 184 185 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 186 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 187 #endif 188 189 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 190 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 191 #endif 192 193 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 194 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 195 #endif 196 197 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 198 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 199 #endif 200 201 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 202 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 203 #endif 204 205 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 206 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 207 #endif 208 209 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 210 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 211 #endif 212 213 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 214 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 215 #endif 216 217 #define PCI_QLOGIC_ISP1020 \ 218 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 219 220 #define PCI_QLOGIC_ISP1080 \ 221 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 222 223 #define PCI_QLOGIC_ISP10160 \ 224 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 225 226 #define PCI_QLOGIC_ISP12160 \ 227 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 228 229 #define PCI_QLOGIC_ISP1240 \ 230 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 231 232 #define PCI_QLOGIC_ISP1280 \ 233 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 234 235 #define PCI_QLOGIC_ISP2100 \ 236 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 237 238 #define PCI_QLOGIC_ISP2200 \ 239 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 240 241 #define PCI_QLOGIC_ISP2300 \ 242 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 243 244 #define PCI_QLOGIC_ISP2312 \ 245 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 246 247 /* 248 * Odd case for some AMI raid cards... We need to *not* attach to this. 249 */ 250 #define AMI_RAID_SUBVENDOR_ID 0x101e 251 252 #define IO_MAP_REG 0x10 253 #define MEM_MAP_REG 0x14 254 255 #define PCI_DFLT_LTNCY 0x40 256 #define PCI_DFLT_LNSZ 0x10 257 258 static int isp_pci_probe (device_t); 259 static int isp_pci_attach (device_t); 260 261 262 struct isp_pcisoftc { 263 struct ispsoftc pci_isp; 264 device_t pci_dev; 265 struct resource * pci_reg; 266 bus_space_tag_t pci_st; 267 bus_space_handle_t pci_sh; 268 void * ih; 269 int16_t pci_poff[_NREG_BLKS]; 270 bus_dma_tag_t dmat; 271 bus_dmamap_t *dmaps; 272 }; 273 ispfwfunc *isp_get_firmware_p = NULL; 274 275 static device_method_t isp_pci_methods[] = { 276 /* Device interface */ 277 DEVMETHOD(device_probe, isp_pci_probe), 278 DEVMETHOD(device_attach, isp_pci_attach), 279 { 0, 0 } 280 }; 281 static void isp_pci_intr(void *); 282 283 static driver_t isp_pci_driver = { 284 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 285 }; 286 static devclass_t isp_devclass; 287 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 288 MODULE_VERSION(isp, 1); 289 290 static int 291 isp_pci_probe(device_t dev) 292 { 293 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 294 case PCI_QLOGIC_ISP1020: 295 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 296 break; 297 case PCI_QLOGIC_ISP1080: 298 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 299 break; 300 case PCI_QLOGIC_ISP1240: 301 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP1280: 304 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 305 break; 306 case PCI_QLOGIC_ISP10160: 307 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP12160: 310 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 311 return (ENXIO); 312 } 313 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 314 break; 315 case PCI_QLOGIC_ISP2100: 316 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 317 break; 318 case PCI_QLOGIC_ISP2200: 319 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 320 break; 321 case PCI_QLOGIC_ISP2300: 322 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 323 break; 324 case PCI_QLOGIC_ISP2312: 325 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 326 break; 327 default: 328 return (ENXIO); 329 } 330 if (device_get_unit(dev) == 0 && bootverbose) { 331 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 332 "Core Version %d.%d\n", 333 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 334 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 335 } 336 /* 337 * XXXX: Here is where we might load the f/w module 338 * XXXX: (or increase a reference count to it). 339 */ 340 return (0); 341 } 342 343 static int 344 isp_pci_attach(device_t dev) 345 { 346 struct resource *regs, *irq; 347 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug; 348 u_int32_t data, cmd, linesz, psize, basetype; 349 struct isp_pcisoftc *pcs; 350 struct ispsoftc *isp = NULL; 351 struct ispmdvec *mdvp; 352 quad_t wwn; 353 bus_size_t lim; 354 355 /* 356 * Figure out if we're supposed to skip this one. 357 */ 358 unit = device_get_unit(dev); 359 if (getenv_int("isp_disable", &bitmap)) { 360 if (bitmap & (1 << unit)) { 361 device_printf(dev, "not configuring\n"); 362 /* 363 * But return '0' to preserve HBA numbering. 364 */ 365 return (0); 366 } 367 } 368 369 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT); 370 if (pcs == NULL) { 371 device_printf(dev, "cannot allocate softc\n"); 372 return (ENOMEM); 373 } 374 bzero(pcs, sizeof (struct isp_pcisoftc)); 375 376 /* 377 * Figure out which we should try first - memory mapping or i/o mapping? 378 */ 379 #ifdef __alpha__ 380 m1 = PCIM_CMD_MEMEN; 381 m2 = PCIM_CMD_PORTEN; 382 #else 383 m1 = PCIM_CMD_PORTEN; 384 m2 = PCIM_CMD_MEMEN; 385 #endif 386 bitmap = 0; 387 if (getenv_int("isp_mem_map", &bitmap)) { 388 if (bitmap & (1 << unit)) { 389 m1 = PCIM_CMD_MEMEN; 390 m2 = PCIM_CMD_PORTEN; 391 } 392 } 393 bitmap = 0; 394 if (getenv_int("isp_io_map", &bitmap)) { 395 if (bitmap & (1 << unit)) { 396 m1 = PCIM_CMD_PORTEN; 397 m2 = PCIM_CMD_MEMEN; 398 } 399 } 400 401 linesz = PCI_DFLT_LNSZ; 402 irq = regs = NULL; 403 rgd = rtp = iqd = 0; 404 405 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 406 if (cmd & m1) { 407 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 408 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 409 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 410 } 411 if (regs == NULL && (cmd & m2)) { 412 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 413 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 414 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 415 } 416 if (regs == NULL) { 417 device_printf(dev, "unable to map any ports\n"); 418 goto bad; 419 } 420 if (bootverbose) 421 device_printf(dev, "using %s space register mapping\n", 422 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 423 pcs->pci_dev = dev; 424 pcs->pci_reg = regs; 425 pcs->pci_st = rman_get_bustag(regs); 426 pcs->pci_sh = rman_get_bushandle(regs); 427 428 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 429 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 430 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 431 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 432 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 433 mdvp = &mdvec; 434 basetype = ISP_HA_SCSI_UNKNOWN; 435 psize = sizeof (sdparam); 436 lim = BUS_SPACE_MAXSIZE_32BIT; 437 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 438 mdvp = &mdvec; 439 basetype = ISP_HA_SCSI_UNKNOWN; 440 psize = sizeof (sdparam); 441 lim = BUS_SPACE_MAXSIZE_24BIT; 442 } 443 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 444 mdvp = &mdvec_1080; 445 basetype = ISP_HA_SCSI_1080; 446 psize = sizeof (sdparam); 447 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 448 ISP1080_DMA_REGS_OFF; 449 } 450 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 451 mdvp = &mdvec_1080; 452 basetype = ISP_HA_SCSI_1240; 453 psize = 2 * sizeof (sdparam); 454 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 455 ISP1080_DMA_REGS_OFF; 456 } 457 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 458 mdvp = &mdvec_1080; 459 basetype = ISP_HA_SCSI_1280; 460 psize = 2 * sizeof (sdparam); 461 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 462 ISP1080_DMA_REGS_OFF; 463 } 464 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 465 mdvp = &mdvec_12160; 466 basetype = ISP_HA_SCSI_10160; 467 psize = sizeof (sdparam); 468 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 469 ISP1080_DMA_REGS_OFF; 470 } 471 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 472 mdvp = &mdvec_12160; 473 basetype = ISP_HA_SCSI_12160; 474 psize = 2 * sizeof (sdparam); 475 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 476 ISP1080_DMA_REGS_OFF; 477 } 478 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 479 mdvp = &mdvec_2100; 480 basetype = ISP_HA_FC_2100; 481 psize = sizeof (fcparam); 482 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 483 PCI_MBOX_REGS2100_OFF; 484 if (pci_get_revid(dev) < 3) { 485 /* 486 * XXX: Need to get the actual revision 487 * XXX: number of the 2100 FB. At any rate, 488 * XXX: lower cache line size for early revision 489 * XXX; boards. 490 */ 491 linesz = 1; 492 } 493 } 494 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 495 mdvp = &mdvec_2200; 496 basetype = ISP_HA_FC_2200; 497 psize = sizeof (fcparam); 498 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 499 PCI_MBOX_REGS2100_OFF; 500 } 501 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 502 mdvp = &mdvec_2300; 503 basetype = ISP_HA_FC_2300; 504 psize = sizeof (fcparam); 505 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 506 PCI_MBOX_REGS2300_OFF; 507 } 508 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 509 mdvp = &mdvec_2300; 510 basetype = ISP_HA_FC_2312; 511 psize = sizeof (fcparam); 512 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 513 PCI_MBOX_REGS2300_OFF; 514 } 515 isp = &pcs->pci_isp; 516 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT); 517 if (isp->isp_param == NULL) { 518 device_printf(dev, "cannot allocate parameter data\n"); 519 goto bad; 520 } 521 bzero(isp->isp_param, psize); 522 isp->isp_mdvec = mdvp; 523 isp->isp_type = basetype; 524 isp->isp_revision = pci_get_revid(dev); 525 #ifdef ISP_TARGET_MODE 526 isp->isp_role = ISP_ROLE_BOTH; 527 #else 528 isp->isp_role = ISP_DEFAULT_ROLES; 529 #endif 530 isp->isp_dev = dev; 531 532 533 /* 534 * Try and find firmware for this device. 535 */ 536 537 if (isp_get_firmware_p) { 538 int device = (int) pci_get_device(dev); 539 #ifdef ISP_TARGET_MODE 540 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 541 #else 542 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 543 #endif 544 } 545 546 /* 547 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 548 * are set. 549 */ 550 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 551 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 552 if (IS_2300(isp)) { /* per QLogic errata */ 553 cmd &= ~PCIM_CMD_INVEN; 554 } 555 if (IS_23XX(isp)) { 556 /* 557 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 558 */ 559 isp->isp_touched = 1; 560 561 } 562 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 563 564 /* 565 * Make sure the Cache Line Size register is set sensibly. 566 */ 567 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 568 if (data != linesz) { 569 data = PCI_DFLT_LNSZ; 570 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 571 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 572 } 573 574 /* 575 * Make sure the Latency Timer is sane. 576 */ 577 data = pci_read_config(dev, PCIR_LATTIMER, 1); 578 if (data < PCI_DFLT_LTNCY) { 579 data = PCI_DFLT_LTNCY; 580 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 581 pci_write_config(dev, PCIR_LATTIMER, data, 1); 582 } 583 584 /* 585 * Make sure we've disabled the ROM. 586 */ 587 data = pci_read_config(dev, PCIR_ROMADDR, 4); 588 data &= ~1; 589 pci_write_config(dev, PCIR_ROMADDR, data, 4); 590 591 iqd = 0; 592 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 593 1, RF_ACTIVE | RF_SHAREABLE); 594 if (irq == NULL) { 595 device_printf(dev, "could not allocate interrupt\n"); 596 goto bad; 597 } 598 599 if (getenv_int("isp_no_fwload", &bitmap)) { 600 if (bitmap & (1 << unit)) 601 isp->isp_confopts |= ISP_CFG_NORELOAD; 602 } 603 if (getenv_int("isp_fwload", &bitmap)) { 604 if (bitmap & (1 << unit)) 605 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 606 } 607 if (getenv_int("isp_no_nvram", &bitmap)) { 608 if (bitmap & (1 << unit)) 609 isp->isp_confopts |= ISP_CFG_NONVRAM; 610 } 611 if (getenv_int("isp_nvram", &bitmap)) { 612 if (bitmap & (1 << unit)) 613 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 614 } 615 if (getenv_int("isp_fcduplex", &bitmap)) { 616 if (bitmap & (1 << unit)) 617 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 618 } 619 if (getenv_int("isp_no_fcduplex", &bitmap)) { 620 if (bitmap & (1 << unit)) 621 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 622 } 623 if (getenv_int("isp_nport", &bitmap)) { 624 if (bitmap & (1 << unit)) 625 isp->isp_confopts |= ISP_CFG_NPORT; 626 } 627 628 /* 629 * Because the resource_*_value functions can neither return 630 * 64 bit integer values, nor can they be directly coerced 631 * to interpret the right hand side of the assignment as 632 * you want them to interpret it, we have to force WWN 633 * hint replacement to specify WWN strings with a leading 634 * 'w' (e..g w50000000aaaa0001). Sigh. 635 */ 636 if (getenv_quad("isp_portwwn", &wwn)) { 637 isp->isp_osinfo.default_port_wwn = wwn; 638 isp->isp_confopts |= ISP_CFG_OWNWWPN; 639 } 640 if (isp->isp_osinfo.default_port_wwn == 0) { 641 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 642 } 643 644 if (getenv_quad("isp_nodewwn", &wwn)) { 645 isp->isp_osinfo.default_node_wwn = wwn; 646 isp->isp_confopts |= ISP_CFG_OWNWWNN; 647 } 648 if (isp->isp_osinfo.default_node_wwn == 0) { 649 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 650 } 651 652 isp_debug = 0; 653 (void) getenv_int("isp_debug", &isp_debug); 654 if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, isp_pci_intr, 655 isp, &pcs->ih)) { 656 device_printf(dev, "could not setup interrupt\n"); 657 goto bad; 658 } 659 660 #ifdef ISP_FW_CRASH_DUMP 661 bitmap = 0; 662 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 663 if (bitmap & (1 << unit) { 664 size_t amt = 0; 665 if (IS_2200(isp)) { 666 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 667 } else if (IS_23XX(isp)) { 668 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 669 } 670 if (amt) { 671 FCPARAM(isp)->isp_dump_data = 672 malloc(amt, M_DEVBUF, M_WAITOK); 673 bzero(FCPARAM(isp)->isp_dump_data, amt); 674 } else { 675 device_printf(dev, 676 "f/w crash dumps not supported for card\n"); 677 } 678 } 679 } 680 #endif 681 682 if (IS_2312(isp)) { 683 isp->isp_port = pci_get_function(dev); 684 } 685 686 /* 687 * Set up logging levels. 688 */ 689 if (isp_debug) { 690 isp->isp_dblev = isp_debug; 691 } else { 692 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 693 } 694 if (bootverbose) 695 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 696 697 /* 698 * Make sure we're in reset state. 699 */ 700 ISP_LOCK(isp); 701 isp_reset(isp); 702 703 if (isp->isp_state != ISP_RESETSTATE) { 704 ISP_UNLOCK(isp); 705 goto bad; 706 } 707 isp_init(isp); 708 if (isp->isp_state != ISP_INITSTATE) { 709 /* If we're a Fibre Channel Card, we allow deferred attach */ 710 if (IS_SCSI(isp)) { 711 isp_uninit(isp); 712 ISP_UNLOCK(isp); 713 goto bad; 714 } 715 } 716 isp_attach(isp); 717 if (isp->isp_state != ISP_RUNSTATE) { 718 /* If we're a Fibre Channel Card, we allow deferred attach */ 719 if (IS_SCSI(isp)) { 720 isp_uninit(isp); 721 ISP_UNLOCK(isp); 722 goto bad; 723 } 724 } 725 /* 726 * XXXX: Here is where we might unload the f/w module 727 * XXXX: (or decrease the reference count to it). 728 */ 729 ISP_UNLOCK(isp); 730 return (0); 731 732 bad: 733 734 if (pcs && pcs->ih) { 735 (void) bus_teardown_intr(dev, irq, pcs->ih); 736 } 737 738 if (irq) { 739 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 740 } 741 742 743 if (regs) { 744 (void) bus_release_resource(dev, rtp, rgd, regs); 745 } 746 747 if (pcs) { 748 if (pcs->pci_isp.isp_param) 749 free(pcs->pci_isp.isp_param, M_DEVBUF); 750 free(pcs, M_DEVBUF); 751 } 752 753 /* 754 * XXXX: Here is where we might unload the f/w module 755 * XXXX: (or decrease the reference count to it). 756 */ 757 return (ENXIO); 758 } 759 760 static void 761 isp_pci_intr(void *arg) 762 { 763 struct ispsoftc *isp = arg; 764 u_int16_t isr, sema, mbox; 765 766 ISP_LOCK(isp); 767 isp->isp_intcnt++; 768 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 769 isp->isp_intbogus++; 770 } else { 771 int iok = isp->isp_osinfo.intsok; 772 isp->isp_osinfo.intsok = 0; 773 isp_intr(isp, isr, sema, mbox); 774 isp->isp_osinfo.intsok = iok; 775 } 776 ISP_UNLOCK(isp); 777 } 778 779 780 #define IspVirt2Off(a, x) \ 781 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 782 _BLK_REG_SHFT] + ((x) & 0xff)) 783 784 #define BXR2(pcs, off) \ 785 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 786 #define BXW2(pcs, off, v) \ 787 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 788 789 790 static INLINE int 791 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 792 { 793 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 794 u_int16_t val0, val1; 795 int i = 0; 796 797 do { 798 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 799 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 800 } while (val0 != val1 && ++i < 1000); 801 if (val0 != val1) { 802 return (1); 803 } 804 *rp = val0; 805 return (0); 806 } 807 808 static int 809 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 810 u_int16_t *semap, u_int16_t *mbp) 811 { 812 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 813 u_int16_t isr, sema; 814 815 if (IS_2100(isp)) { 816 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 817 return (0); 818 } 819 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 820 return (0); 821 } 822 } else { 823 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 824 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 825 } 826 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 827 isr &= INT_PENDING_MASK(isp); 828 sema &= BIU_SEMA_LOCK; 829 if (isr == 0 && sema == 0) { 830 return (0); 831 } 832 *isrp = isr; 833 if ((*semap = sema) != 0) { 834 if (IS_2100(isp)) { 835 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 836 return (0); 837 } 838 } else { 839 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 840 } 841 } 842 return (1); 843 } 844 845 static int 846 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 847 u_int16_t *semap, u_int16_t *mbox0p) 848 { 849 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 850 u_int32_t r2hisr; 851 852 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 853 *isrp = 0; 854 return (0); 855 } 856 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 857 IspVirt2Off(pcs, BIU_R2HSTSLO)); 858 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 859 if ((r2hisr & BIU_R2HST_INTR) == 0) { 860 *isrp = 0; 861 return (0); 862 } 863 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 864 case ISPR2HST_ROM_MBX_OK: 865 case ISPR2HST_ROM_MBX_FAIL: 866 case ISPR2HST_MBX_OK: 867 case ISPR2HST_MBX_FAIL: 868 case ISPR2HST_ASYNC_EVENT: 869 *isrp = r2hisr & 0xffff; 870 *mbox0p = (r2hisr >> 16); 871 *semap = 1; 872 return (1); 873 case ISPR2HST_RIO_16: 874 *isrp = r2hisr & 0xffff; 875 *mbox0p = ASYNC_RIO1; 876 *semap = 1; 877 return (1); 878 case ISPR2HST_FPOST: 879 *isrp = r2hisr & 0xffff; 880 *mbox0p = ASYNC_CMD_CMPLT; 881 *semap = 1; 882 return (1); 883 case ISPR2HST_FPOST_CTIO: 884 *isrp = r2hisr & 0xffff; 885 *mbox0p = ASYNC_CTIO_DONE; 886 *semap = 1; 887 return (1); 888 case ISPR2HST_RSPQ_UPDATE: 889 *isrp = r2hisr & 0xffff; 890 *mbox0p = 0; 891 *semap = 0; 892 return (1); 893 default: 894 return (0); 895 } 896 } 897 898 static u_int16_t 899 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 900 { 901 u_int16_t rv; 902 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 903 int oldconf = 0; 904 905 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 906 /* 907 * We will assume that someone has paused the RISC processor. 908 */ 909 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 910 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 911 oldconf | BIU_PCI_CONF1_SXP); 912 } 913 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 914 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 915 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 916 } 917 return (rv); 918 } 919 920 static void 921 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 922 { 923 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 924 int oldconf = 0; 925 926 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 927 /* 928 * We will assume that someone has paused the RISC processor. 929 */ 930 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 931 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 932 oldconf | BIU_PCI_CONF1_SXP); 933 } 934 BXW2(pcs, IspVirt2Off(isp, regoff), val); 935 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 936 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 937 } 938 } 939 940 static u_int16_t 941 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 942 { 943 u_int16_t rv, oc = 0; 944 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 945 946 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 947 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 948 u_int16_t tc; 949 /* 950 * We will assume that someone has paused the RISC processor. 951 */ 952 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 953 tc = oc & ~BIU_PCI1080_CONF1_DMA; 954 if (regoff & SXP_BANK1_SELECT) 955 tc |= BIU_PCI1080_CONF1_SXP1; 956 else 957 tc |= BIU_PCI1080_CONF1_SXP0; 958 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 959 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 960 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 961 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 962 oc | BIU_PCI1080_CONF1_DMA); 963 } 964 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 965 if (oc) { 966 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 967 } 968 return (rv); 969 } 970 971 static void 972 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 973 { 974 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 975 int oc = 0; 976 977 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 978 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 979 u_int16_t tc; 980 /* 981 * We will assume that someone has paused the RISC processor. 982 */ 983 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 984 tc = oc & ~BIU_PCI1080_CONF1_DMA; 985 if (regoff & SXP_BANK1_SELECT) 986 tc |= BIU_PCI1080_CONF1_SXP1; 987 else 988 tc |= BIU_PCI1080_CONF1_SXP0; 989 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 990 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 991 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 992 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 993 oc | BIU_PCI1080_CONF1_DMA); 994 } 995 BXW2(pcs, IspVirt2Off(isp, regoff), val); 996 if (oc) { 997 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 998 } 999 } 1000 1001 1002 struct imush { 1003 struct ispsoftc *isp; 1004 int error; 1005 }; 1006 1007 static void imc(void *, bus_dma_segment_t *, int, int); 1008 1009 static void 1010 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1011 { 1012 struct imush *imushp = (struct imush *) arg; 1013 if (error) { 1014 imushp->error = error; 1015 } else { 1016 struct ispsoftc *isp =imushp->isp; 1017 bus_addr_t addr = segs->ds_addr; 1018 1019 isp->isp_rquest_dma = addr; 1020 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1021 isp->isp_result_dma = addr; 1022 if (IS_FC(isp)) { 1023 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1024 FCPARAM(isp)->isp_scdma = addr; 1025 } 1026 } 1027 } 1028 1029 /* 1030 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1031 */ 1032 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1033 1034 static int 1035 isp_pci_mbxdma(struct ispsoftc *isp) 1036 { 1037 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1038 caddr_t base; 1039 u_int32_t len; 1040 int i, error, ns; 1041 bus_size_t alim, slim; 1042 struct imush im; 1043 1044 /* 1045 * Already been here? If so, leave... 1046 */ 1047 if (isp->isp_rquest) { 1048 return (0); 1049 } 1050 1051 #ifdef ISP_DAC_SUPPORTED 1052 alim = BUS_SPACE_UNRESTRICTED; 1053 #else 1054 alim = BUS_SPACE_MAXADDR_32BIT; 1055 #endif 1056 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1057 slim = BUS_SPACE_MAXADDR_32BIT; 1058 } else { 1059 slim = BUS_SPACE_MAXADDR_24BIT; 1060 } 1061 1062 ISP_UNLOCK(isp); 1063 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1064 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) { 1065 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1066 ISP_LOCK(isp); 1067 return(1); 1068 } 1069 1070 1071 len = sizeof (XS_T **) * isp->isp_maxcmds; 1072 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1073 if (isp->isp_xflist == NULL) { 1074 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1075 ISP_LOCK(isp); 1076 return (1); 1077 } 1078 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1079 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1080 if (pcs->dmaps == NULL) { 1081 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1082 free(isp->isp_xflist, M_DEVBUF); 1083 ISP_LOCK(isp); 1084 return (1); 1085 } 1086 1087 /* 1088 * Allocate and map the request, result queues, plus FC scratch area. 1089 */ 1090 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1091 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1092 if (IS_FC(isp)) { 1093 len += ISP2100_SCRLEN; 1094 } 1095 1096 ns = (len / PAGE_SIZE) + 1; 1097 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim, 1098 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1099 isp_prt(isp, ISP_LOGERR, 1100 "cannot create a dma tag for control spaces"); 1101 free(pcs->dmaps, M_DEVBUF); 1102 free(isp->isp_xflist, M_DEVBUF); 1103 ISP_LOCK(isp); 1104 return (1); 1105 } 1106 1107 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1108 &isp->isp_cdmap) != 0) { 1109 isp_prt(isp, ISP_LOGERR, 1110 "cannot allocate %d bytes of CCB memory", len); 1111 bus_dma_tag_destroy(isp->isp_cdmat); 1112 free(isp->isp_xflist, M_DEVBUF); 1113 free(pcs->dmaps, M_DEVBUF); 1114 ISP_LOCK(isp); 1115 return (1); 1116 } 1117 1118 for (i = 0; i < isp->isp_maxcmds; i++) { 1119 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1120 if (error) { 1121 isp_prt(isp, ISP_LOGERR, 1122 "error %d creating per-cmd DMA maps", error); 1123 while (--i >= 0) { 1124 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1125 } 1126 goto bad; 1127 } 1128 } 1129 1130 im.isp = isp; 1131 im.error = 0; 1132 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1133 if (im.error) { 1134 isp_prt(isp, ISP_LOGERR, 1135 "error %d loading dma map for control areas", im.error); 1136 goto bad; 1137 } 1138 1139 isp->isp_rquest = base; 1140 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1141 isp->isp_result = base; 1142 if (IS_FC(isp)) { 1143 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1144 FCPARAM(isp)->isp_scratch = base; 1145 } 1146 ISP_LOCK(isp); 1147 return (0); 1148 1149 bad: 1150 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1151 bus_dma_tag_destroy(isp->isp_cdmat); 1152 free(isp->isp_xflist, M_DEVBUF); 1153 free(pcs->dmaps, M_DEVBUF); 1154 ISP_LOCK(isp); 1155 isp->isp_rquest = NULL; 1156 return (1); 1157 } 1158 1159 typedef struct { 1160 struct ispsoftc *isp; 1161 void *cmd_token; 1162 void *rq; 1163 u_int16_t *nxtip; 1164 u_int16_t optr; 1165 u_int error; 1166 } mush_t; 1167 1168 #define MUSHERR_NOQENTRIES -2 1169 1170 #ifdef ISP_TARGET_MODE 1171 /* 1172 * We need to handle DMA for target mode differently from initiator mode. 1173 * 1174 * DMA mapping and construction and submission of CTIO Request Entries 1175 * and rendevous for completion are very tightly coupled because we start 1176 * out by knowing (per platform) how much data we have to move, but we 1177 * don't know, up front, how many DMA mapping segments will have to be used 1178 * cover that data, so we don't know how many CTIO Request Entries we 1179 * will end up using. Further, for performance reasons we may want to 1180 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1181 * 1182 * The standard vector still goes through isp_pci_dmasetup, but the callback 1183 * for the DMA mapping routines comes here instead with the whole transfer 1184 * mapped and a pointer to a partially filled in already allocated request 1185 * queue entry. We finish the job. 1186 */ 1187 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1188 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1189 1190 #define STATUS_WITH_DATA 1 1191 1192 static void 1193 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1194 { 1195 mush_t *mp; 1196 struct ccb_scsiio *csio; 1197 struct ispsoftc *isp; 1198 struct isp_pcisoftc *pcs; 1199 bus_dmamap_t *dp; 1200 ct_entry_t *cto, *qe; 1201 u_int8_t scsi_status; 1202 u_int16_t curi, nxti, handle; 1203 u_int32_t sflags; 1204 int32_t resid; 1205 int nth_ctio, nctios, send_status; 1206 1207 mp = (mush_t *) arg; 1208 if (error) { 1209 mp->error = error; 1210 return; 1211 } 1212 1213 isp = mp->isp; 1214 csio = mp->cmd_token; 1215 cto = mp->rq; 1216 curi = isp->isp_reqidx; 1217 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1218 1219 cto->ct_xfrlen = 0; 1220 cto->ct_seg_count = 0; 1221 cto->ct_header.rqs_entry_count = 1; 1222 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1223 1224 if (nseg == 0) { 1225 cto->ct_header.rqs_seqno = 1; 1226 isp_prt(isp, ISP_LOGTDEBUG1, 1227 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1228 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1229 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1230 cto->ct_scsi_status, cto->ct_resid); 1231 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1232 isp_put_ctio(isp, cto, qe); 1233 return; 1234 } 1235 1236 nctios = nseg / ISP_RQDSEG; 1237 if (nseg % ISP_RQDSEG) { 1238 nctios++; 1239 } 1240 1241 /* 1242 * Save syshandle, and potentially any SCSI status, which we'll 1243 * reinsert on the last CTIO we're going to send. 1244 */ 1245 1246 handle = cto->ct_syshandle; 1247 cto->ct_syshandle = 0; 1248 cto->ct_header.rqs_seqno = 0; 1249 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1250 1251 if (send_status) { 1252 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1253 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1254 /* 1255 * Preserve residual. 1256 */ 1257 resid = cto->ct_resid; 1258 1259 /* 1260 * Save actual SCSI status. 1261 */ 1262 scsi_status = cto->ct_scsi_status; 1263 1264 #ifndef STATUS_WITH_DATA 1265 sflags |= CT_NO_DATA; 1266 /* 1267 * We can't do a status at the same time as a data CTIO, so 1268 * we need to synthesize an extra CTIO at this level. 1269 */ 1270 nctios++; 1271 #endif 1272 } else { 1273 sflags = scsi_status = resid = 0; 1274 } 1275 1276 cto->ct_resid = 0; 1277 cto->ct_scsi_status = 0; 1278 1279 pcs = (struct isp_pcisoftc *)isp; 1280 dp = &pcs->dmaps[isp_handle_index(handle)]; 1281 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1282 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1283 } else { 1284 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1285 } 1286 1287 nxti = *mp->nxtip; 1288 1289 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1290 int seglim; 1291 1292 seglim = nseg; 1293 if (seglim) { 1294 int seg; 1295 1296 if (seglim > ISP_RQDSEG) 1297 seglim = ISP_RQDSEG; 1298 1299 for (seg = 0; seg < seglim; seg++, nseg--) { 1300 /* 1301 * Unlike normal initiator commands, we don't 1302 * do any swizzling here. 1303 */ 1304 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1305 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1306 cto->ct_xfrlen += dm_segs->ds_len; 1307 dm_segs++; 1308 } 1309 cto->ct_seg_count = seg; 1310 } else { 1311 /* 1312 * This case should only happen when we're sending an 1313 * extra CTIO with final status. 1314 */ 1315 if (send_status == 0) { 1316 isp_prt(isp, ISP_LOGWARN, 1317 "tdma_mk ran out of segments"); 1318 mp->error = EINVAL; 1319 return; 1320 } 1321 } 1322 1323 /* 1324 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1325 * ct_tagtype, and ct_timeout have been carried over 1326 * unchanged from what our caller had set. 1327 * 1328 * The dataseg fields and the seg_count fields we just got 1329 * through setting. The data direction we've preserved all 1330 * along and only clear it if we're now sending status. 1331 */ 1332 1333 if (nth_ctio == nctios - 1) { 1334 /* 1335 * We're the last in a sequence of CTIOs, so mark 1336 * this CTIO and save the handle to the CCB such that 1337 * when this CTIO completes we can free dma resources 1338 * and do whatever else we need to do to finish the 1339 * rest of the command. We *don't* give this to the 1340 * firmware to work on- the caller will do that. 1341 */ 1342 1343 cto->ct_syshandle = handle; 1344 cto->ct_header.rqs_seqno = 1; 1345 1346 if (send_status) { 1347 cto->ct_scsi_status = scsi_status; 1348 cto->ct_flags |= sflags; 1349 cto->ct_resid = resid; 1350 } 1351 if (send_status) { 1352 isp_prt(isp, ISP_LOGTDEBUG1, 1353 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1354 "scsi status %x resid %d", 1355 cto->ct_fwhandle, csio->ccb_h.target_lun, 1356 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1357 cto->ct_scsi_status, cto->ct_resid); 1358 } else { 1359 isp_prt(isp, ISP_LOGTDEBUG1, 1360 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1361 cto->ct_fwhandle, csio->ccb_h.target_lun, 1362 cto->ct_iid, cto->ct_tag_val, 1363 cto->ct_flags); 1364 } 1365 isp_put_ctio(isp, cto, qe); 1366 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1367 if (nctios > 1) { 1368 MEMORYBARRIER(isp, SYNC_REQUEST, 1369 curi, QENTRY_LEN); 1370 } 1371 } else { 1372 ct_entry_t *oqe = qe; 1373 1374 /* 1375 * Make sure syshandle fields are clean 1376 */ 1377 cto->ct_syshandle = 0; 1378 cto->ct_header.rqs_seqno = 0; 1379 1380 isp_prt(isp, ISP_LOGTDEBUG1, 1381 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1382 cto->ct_fwhandle, csio->ccb_h.target_lun, 1383 cto->ct_iid, cto->ct_flags); 1384 1385 /* 1386 * Get a new CTIO 1387 */ 1388 qe = (ct_entry_t *) 1389 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1390 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1391 if (nxti == mp->optr) { 1392 isp_prt(isp, ISP_LOGTDEBUG0, 1393 "Queue Overflow in tdma_mk"); 1394 mp->error = MUSHERR_NOQENTRIES; 1395 return; 1396 } 1397 1398 /* 1399 * Now that we're done with the old CTIO, 1400 * flush it out to the request queue. 1401 */ 1402 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1403 isp_put_ctio(isp, cto, oqe); 1404 if (nth_ctio != 0) { 1405 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1406 QENTRY_LEN); 1407 } 1408 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1409 1410 /* 1411 * Reset some fields in the CTIO so we can reuse 1412 * for the next one we'll flush to the request 1413 * queue. 1414 */ 1415 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1416 cto->ct_header.rqs_entry_count = 1; 1417 cto->ct_header.rqs_flags = 0; 1418 cto->ct_status = 0; 1419 cto->ct_scsi_status = 0; 1420 cto->ct_xfrlen = 0; 1421 cto->ct_resid = 0; 1422 cto->ct_seg_count = 0; 1423 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1424 } 1425 } 1426 *mp->nxtip = nxti; 1427 } 1428 1429 /* 1430 * We don't have to do multiple CTIOs here. Instead, we can just do 1431 * continuation segments as needed. This greatly simplifies the code 1432 * improves performance. 1433 */ 1434 1435 static void 1436 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1437 { 1438 mush_t *mp; 1439 struct ccb_scsiio *csio; 1440 struct ispsoftc *isp; 1441 ct2_entry_t *cto, *qe; 1442 u_int16_t curi, nxti; 1443 int segcnt; 1444 1445 mp = (mush_t *) arg; 1446 if (error) { 1447 mp->error = error; 1448 return; 1449 } 1450 1451 isp = mp->isp; 1452 csio = mp->cmd_token; 1453 cto = mp->rq; 1454 1455 curi = isp->isp_reqidx; 1456 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1457 1458 if (nseg == 0) { 1459 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1460 isp_prt(isp, ISP_LOGWARN, 1461 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1462 "set (0x%x)", cto->ct_flags); 1463 mp->error = EINVAL; 1464 return; 1465 } 1466 /* 1467 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1468 * flags to NO DATA and clear relative offset flags. 1469 * We preserve the ct_resid and the response area. 1470 */ 1471 cto->ct_header.rqs_seqno = 1; 1472 cto->ct_seg_count = 0; 1473 cto->ct_reloff = 0; 1474 isp_prt(isp, ISP_LOGTDEBUG1, 1475 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1476 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1477 cto->ct_iid, cto->ct_flags, cto->ct_status, 1478 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1479 isp_put_ctio2(isp, cto, qe); 1480 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1481 return; 1482 } 1483 1484 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1485 isp_prt(isp, ISP_LOGERR, 1486 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1487 "(0x%x)", cto->ct_flags); 1488 mp->error = EINVAL; 1489 return; 1490 } 1491 1492 1493 nxti = *mp->nxtip; 1494 1495 /* 1496 * Set up the CTIO2 data segments. 1497 */ 1498 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1499 cto->ct_seg_count++, segcnt++) { 1500 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1501 dm_segs[segcnt].ds_addr; 1502 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1503 dm_segs[segcnt].ds_len; 1504 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1505 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d", 1506 cto->ct_seg_count, dm_segs[segcnt].ds_addr, 1507 dm_segs[segcnt].ds_len); 1508 } 1509 1510 while (segcnt < nseg) { 1511 u_int16_t curip; 1512 int seg; 1513 ispcontreq_t local, *crq = &local, *qep; 1514 1515 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1516 curip = nxti; 1517 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1518 if (nxti == mp->optr) { 1519 ISP_UNLOCK(isp); 1520 isp_prt(isp, ISP_LOGTDEBUG0, 1521 "tdma_mkfc: request queue overflow"); 1522 mp->error = MUSHERR_NOQENTRIES; 1523 return; 1524 } 1525 cto->ct_header.rqs_entry_count++; 1526 MEMZERO((void *)crq, sizeof (*crq)); 1527 crq->req_header.rqs_entry_count = 1; 1528 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1529 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1530 segcnt++, seg++) { 1531 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1532 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1533 isp_prt(isp, ISP_LOGTDEBUG1, 1534 "isp_send_ctio2: ent%d[%d]%x:%u", 1535 cto->ct_header.rqs_entry_count-1, seg, 1536 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len); 1537 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1538 cto->ct_seg_count++; 1539 } 1540 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1541 isp_put_cont_req(isp, crq, qep); 1542 ISP_TDQE(isp, "cont entry", curi, qep); 1543 } 1544 1545 /* 1546 * No do final twiddling for the CTIO itself. 1547 */ 1548 cto->ct_header.rqs_seqno = 1; 1549 isp_prt(isp, ISP_LOGTDEBUG1, 1550 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1551 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1552 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1553 cto->ct_resid); 1554 isp_put_ctio2(isp, cto, qe); 1555 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1556 *mp->nxtip = nxti; 1557 } 1558 #endif 1559 1560 static void dma2(void *, bus_dma_segment_t *, int, int); 1561 1562 static void 1563 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1564 { 1565 mush_t *mp; 1566 struct ispsoftc *isp; 1567 struct ccb_scsiio *csio; 1568 struct isp_pcisoftc *pcs; 1569 bus_dmamap_t *dp; 1570 bus_dma_segment_t *eseg; 1571 ispreq_t *rq; 1572 int seglim, datalen; 1573 u_int16_t nxti; 1574 1575 mp = (mush_t *) arg; 1576 if (error) { 1577 mp->error = error; 1578 return; 1579 } 1580 1581 if (nseg < 1) { 1582 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1583 mp->error = EFAULT; 1584 return; 1585 } 1586 csio = mp->cmd_token; 1587 isp = mp->isp; 1588 rq = mp->rq; 1589 pcs = (struct isp_pcisoftc *)mp->isp; 1590 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1591 nxti = *mp->nxtip; 1592 1593 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1594 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1595 } else { 1596 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1597 } 1598 1599 datalen = XS_XFRLEN(csio); 1600 1601 /* 1602 * We're passed an initial partially filled in entry that 1603 * has most fields filled in except for data transfer 1604 * related values. 1605 * 1606 * Our job is to fill in the initial request queue entry and 1607 * then to start allocating and filling in continuation entries 1608 * until we've covered the entire transfer. 1609 */ 1610 1611 if (IS_FC(isp)) { 1612 seglim = ISP_RQDSEG_T2; 1613 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1614 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1615 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1616 } else { 1617 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1618 } 1619 } else { 1620 if (csio->cdb_len > 12) { 1621 seglim = 0; 1622 } else { 1623 seglim = ISP_RQDSEG; 1624 } 1625 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1626 rq->req_flags |= REQFLAG_DATA_IN; 1627 } else { 1628 rq->req_flags |= REQFLAG_DATA_OUT; 1629 } 1630 } 1631 1632 eseg = dm_segs + nseg; 1633 1634 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1635 if (IS_FC(isp)) { 1636 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1637 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1638 dm_segs->ds_addr; 1639 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1640 dm_segs->ds_len; 1641 } else { 1642 rq->req_dataseg[rq->req_seg_count].ds_base = 1643 dm_segs->ds_addr; 1644 rq->req_dataseg[rq->req_seg_count].ds_count = 1645 dm_segs->ds_len; 1646 } 1647 datalen -= dm_segs->ds_len; 1648 rq->req_seg_count++; 1649 dm_segs++; 1650 } 1651 1652 while (datalen > 0 && dm_segs != eseg) { 1653 u_int16_t onxti; 1654 ispcontreq_t local, *crq = &local, *cqe; 1655 1656 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1657 onxti = nxti; 1658 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1659 if (nxti == mp->optr) { 1660 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1661 mp->error = MUSHERR_NOQENTRIES; 1662 return; 1663 } 1664 rq->req_header.rqs_entry_count++; 1665 MEMZERO((void *)crq, sizeof (*crq)); 1666 crq->req_header.rqs_entry_count = 1; 1667 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1668 1669 seglim = 0; 1670 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1671 crq->req_dataseg[seglim].ds_base = 1672 dm_segs->ds_addr; 1673 crq->req_dataseg[seglim].ds_count = 1674 dm_segs->ds_len; 1675 rq->req_seg_count++; 1676 dm_segs++; 1677 seglim++; 1678 datalen -= dm_segs->ds_len; 1679 } 1680 isp_put_cont_req(isp, crq, cqe); 1681 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1682 } 1683 *mp->nxtip = nxti; 1684 } 1685 1686 static int 1687 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1688 u_int16_t *nxtip, u_int16_t optr) 1689 { 1690 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1691 ispreq_t *qep; 1692 bus_dmamap_t *dp = NULL; 1693 mush_t mush, *mp; 1694 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1695 1696 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1697 #ifdef ISP_TARGET_MODE 1698 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1699 if (IS_FC(isp)) { 1700 eptr = tdma_mkfc; 1701 } else { 1702 eptr = tdma_mk; 1703 } 1704 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1705 (csio->dxfer_len == 0)) { 1706 mp = &mush; 1707 mp->isp = isp; 1708 mp->cmd_token = csio; 1709 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1710 mp->nxtip = nxtip; 1711 mp->optr = optr; 1712 mp->error = 0; 1713 (*eptr)(mp, NULL, 0, 0); 1714 goto mbxsync; 1715 } 1716 } else 1717 #endif 1718 eptr = dma2; 1719 1720 1721 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1722 (csio->dxfer_len == 0)) { 1723 rq->req_seg_count = 1; 1724 goto mbxsync; 1725 } 1726 1727 /* 1728 * Do a virtual grapevine step to collect info for 1729 * the callback dma allocation that we have to use... 1730 */ 1731 mp = &mush; 1732 mp->isp = isp; 1733 mp->cmd_token = csio; 1734 mp->rq = rq; 1735 mp->nxtip = nxtip; 1736 mp->optr = optr; 1737 mp->error = 0; 1738 1739 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1740 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1741 int error, s; 1742 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1743 s = splsoftvm(); 1744 error = bus_dmamap_load(pcs->dmat, *dp, 1745 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1746 if (error == EINPROGRESS) { 1747 bus_dmamap_unload(pcs->dmat, *dp); 1748 mp->error = EINVAL; 1749 isp_prt(isp, ISP_LOGERR, 1750 "deferred dma allocation not supported"); 1751 } else if (error && mp->error == 0) { 1752 #ifdef DIAGNOSTIC 1753 isp_prt(isp, ISP_LOGERR, 1754 "error %d in dma mapping code", error); 1755 #endif 1756 mp->error = error; 1757 } 1758 splx(s); 1759 } else { 1760 /* Pointer to physical buffer */ 1761 struct bus_dma_segment seg; 1762 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1763 seg.ds_len = csio->dxfer_len; 1764 (*eptr)(mp, &seg, 1, 0); 1765 } 1766 } else { 1767 struct bus_dma_segment *segs; 1768 1769 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1770 isp_prt(isp, ISP_LOGERR, 1771 "Physical segment pointers unsupported"); 1772 mp->error = EINVAL; 1773 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1774 isp_prt(isp, ISP_LOGERR, 1775 "Virtual segment addresses unsupported"); 1776 mp->error = EINVAL; 1777 } else { 1778 /* Just use the segments provided */ 1779 segs = (struct bus_dma_segment *) csio->data_ptr; 1780 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1781 } 1782 } 1783 if (mp->error) { 1784 int retval = CMD_COMPLETE; 1785 if (mp->error == MUSHERR_NOQENTRIES) { 1786 retval = CMD_EAGAIN; 1787 } else if (mp->error == EFBIG) { 1788 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1789 } else if (mp->error == EINVAL) { 1790 XS_SETERR(csio, CAM_REQ_INVALID); 1791 } else { 1792 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1793 } 1794 return (retval); 1795 } 1796 mbxsync: 1797 switch (rq->req_header.rqs_entry_type) { 1798 case RQSTYPE_REQUEST: 1799 isp_put_request(isp, rq, qep); 1800 break; 1801 case RQSTYPE_CMDONLY: 1802 isp_put_extended_request(isp, (ispextreq_t *)rq, 1803 (ispextreq_t *)qep); 1804 break; 1805 case RQSTYPE_T2RQS: 1806 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1807 break; 1808 } 1809 return (CMD_QUEUED); 1810 } 1811 1812 static void 1813 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1814 { 1815 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1816 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1817 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1818 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1819 } else { 1820 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1821 } 1822 bus_dmamap_unload(pcs->dmat, *dp); 1823 } 1824 1825 1826 static void 1827 isp_pci_reset1(struct ispsoftc *isp) 1828 { 1829 /* Make sure the BIOS is disabled */ 1830 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1831 /* and enable interrupts */ 1832 ENABLE_INTS(isp); 1833 } 1834 1835 static void 1836 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1837 { 1838 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1839 if (msg) 1840 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1841 else 1842 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1843 if (IS_SCSI(isp)) 1844 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1845 else 1846 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1847 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1848 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1849 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1850 1851 1852 if (IS_SCSI(isp)) { 1853 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1854 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1855 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1856 ISP_READ(isp, CDMA_FIFO_STS)); 1857 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1858 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1859 ISP_READ(isp, DDMA_FIFO_STS)); 1860 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1861 ISP_READ(isp, SXP_INTERRUPT), 1862 ISP_READ(isp, SXP_GROSS_ERR), 1863 ISP_READ(isp, SXP_PINS_CTRL)); 1864 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1865 } 1866 printf(" mbox regs: %x %x %x %x %x\n", 1867 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1868 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1869 ISP_READ(isp, OUTMAILBOX4)); 1870 printf(" PCI Status Command/Status=%x\n", 1871 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1872 } 1873