1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */ 2 /* $DragonFly: src/sys/dev/disk/isp/isp_pci.c,v 1.4 2004/03/15 01:10:43 dillon Exp $ */ 3 /* 4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 5 * FreeBSD Version. 6 * 7 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice immediately at the beginning of the file, without modification, 14 * this list of conditions, and the following disclaimer. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/bus.h> 36 37 #include <bus/pci/pcireg.h> 38 #include <bus/pci/pcivar.h> 39 40 #include <machine/bus_memio.h> 41 #include <machine/bus_pio.h> 42 #include <machine/bus.h> 43 #include <machine/resource.h> 44 #include <sys/rman.h> 45 #include <sys/malloc.h> 46 47 #include "isp_freebsd.h" 48 49 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 50 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 51 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 52 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 53 static int 54 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 55 static int 56 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 57 static int isp_pci_mbxdma(struct ispsoftc *); 58 static int 59 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 60 static void 61 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 62 63 static void isp_pci_reset1(struct ispsoftc *); 64 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 65 66 static struct ispmdvec mdvec = { 67 isp_pci_rd_isr, 68 isp_pci_rd_reg, 69 isp_pci_wr_reg, 70 isp_pci_mbxdma, 71 isp_pci_dmasetup, 72 isp_pci_dmateardown, 73 NULL, 74 isp_pci_reset1, 75 isp_pci_dumpregs, 76 NULL, 77 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 78 }; 79 80 static struct ispmdvec mdvec_1080 = { 81 isp_pci_rd_isr, 82 isp_pci_rd_reg_1080, 83 isp_pci_wr_reg_1080, 84 isp_pci_mbxdma, 85 isp_pci_dmasetup, 86 isp_pci_dmateardown, 87 NULL, 88 isp_pci_reset1, 89 isp_pci_dumpregs, 90 NULL, 91 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 92 }; 93 94 static struct ispmdvec mdvec_12160 = { 95 isp_pci_rd_isr, 96 isp_pci_rd_reg_1080, 97 isp_pci_wr_reg_1080, 98 isp_pci_mbxdma, 99 isp_pci_dmasetup, 100 isp_pci_dmateardown, 101 NULL, 102 isp_pci_reset1, 103 isp_pci_dumpregs, 104 NULL, 105 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 106 }; 107 108 static struct ispmdvec mdvec_2100 = { 109 isp_pci_rd_isr, 110 isp_pci_rd_reg, 111 isp_pci_wr_reg, 112 isp_pci_mbxdma, 113 isp_pci_dmasetup, 114 isp_pci_dmateardown, 115 NULL, 116 isp_pci_reset1, 117 isp_pci_dumpregs 118 }; 119 120 static struct ispmdvec mdvec_2200 = { 121 isp_pci_rd_isr, 122 isp_pci_rd_reg, 123 isp_pci_wr_reg, 124 isp_pci_mbxdma, 125 isp_pci_dmasetup, 126 isp_pci_dmateardown, 127 NULL, 128 isp_pci_reset1, 129 isp_pci_dumpregs 130 }; 131 132 static struct ispmdvec mdvec_2300 = { 133 isp_pci_rd_isr_2300, 134 isp_pci_rd_reg, 135 isp_pci_wr_reg, 136 isp_pci_mbxdma, 137 isp_pci_dmasetup, 138 isp_pci_dmateardown, 139 NULL, 140 isp_pci_reset1, 141 isp_pci_dumpregs 142 }; 143 144 #ifndef PCIM_CMD_INVEN 145 #define PCIM_CMD_INVEN 0x10 146 #endif 147 #ifndef PCIM_CMD_BUSMASTEREN 148 #define PCIM_CMD_BUSMASTEREN 0x0004 149 #endif 150 #ifndef PCIM_CMD_PERRESPEN 151 #define PCIM_CMD_PERRESPEN 0x0040 152 #endif 153 #ifndef PCIM_CMD_SEREN 154 #define PCIM_CMD_SEREN 0x0100 155 #endif 156 157 #ifndef PCIR_COMMAND 158 #define PCIR_COMMAND 0x04 159 #endif 160 161 #ifndef PCIR_CACHELNSZ 162 #define PCIR_CACHELNSZ 0x0c 163 #endif 164 165 #ifndef PCIR_LATTIMER 166 #define PCIR_LATTIMER 0x0d 167 #endif 168 169 #ifndef PCIR_ROMADDR 170 #define PCIR_ROMADDR 0x30 171 #endif 172 173 #ifndef PCI_VENDOR_QLOGIC 174 #define PCI_VENDOR_QLOGIC 0x1077 175 #endif 176 177 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 178 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 179 #endif 180 181 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 182 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 183 #endif 184 185 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 186 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 187 #endif 188 189 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 190 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 191 #endif 192 193 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 194 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 195 #endif 196 197 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 198 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 199 #endif 200 201 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 202 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 203 #endif 204 205 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 206 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 207 #endif 208 209 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 210 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 211 #endif 212 213 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 214 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 215 #endif 216 217 #define PCI_QLOGIC_ISP1020 \ 218 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 219 220 #define PCI_QLOGIC_ISP1080 \ 221 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 222 223 #define PCI_QLOGIC_ISP10160 \ 224 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 225 226 #define PCI_QLOGIC_ISP12160 \ 227 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 228 229 #define PCI_QLOGIC_ISP1240 \ 230 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 231 232 #define PCI_QLOGIC_ISP1280 \ 233 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 234 235 #define PCI_QLOGIC_ISP2100 \ 236 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 237 238 #define PCI_QLOGIC_ISP2200 \ 239 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 240 241 #define PCI_QLOGIC_ISP2300 \ 242 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 243 244 #define PCI_QLOGIC_ISP2312 \ 245 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 246 247 /* 248 * Odd case for some AMI raid cards... We need to *not* attach to this. 249 */ 250 #define AMI_RAID_SUBVENDOR_ID 0x101e 251 252 #define IO_MAP_REG 0x10 253 #define MEM_MAP_REG 0x14 254 255 #define PCI_DFLT_LTNCY 0x40 256 #define PCI_DFLT_LNSZ 0x10 257 258 static int isp_pci_probe (device_t); 259 static int isp_pci_attach (device_t); 260 261 262 struct isp_pcisoftc { 263 struct ispsoftc pci_isp; 264 device_t pci_dev; 265 struct resource * pci_reg; 266 bus_space_tag_t pci_st; 267 bus_space_handle_t pci_sh; 268 void * ih; 269 int16_t pci_poff[_NREG_BLKS]; 270 bus_dma_tag_t dmat; 271 bus_dmamap_t *dmaps; 272 }; 273 ispfwfunc *isp_get_firmware_p = NULL; 274 275 static device_method_t isp_pci_methods[] = { 276 /* Device interface */ 277 DEVMETHOD(device_probe, isp_pci_probe), 278 DEVMETHOD(device_attach, isp_pci_attach), 279 { 0, 0 } 280 }; 281 static void isp_pci_intr(void *); 282 283 static driver_t isp_pci_driver = { 284 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 285 }; 286 static devclass_t isp_devclass; 287 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 288 MODULE_VERSION(isp, 1); 289 290 static int 291 isp_pci_probe(device_t dev) 292 { 293 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 294 case PCI_QLOGIC_ISP1020: 295 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 296 break; 297 case PCI_QLOGIC_ISP1080: 298 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 299 break; 300 case PCI_QLOGIC_ISP1240: 301 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP1280: 304 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 305 break; 306 case PCI_QLOGIC_ISP10160: 307 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP12160: 310 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 311 return (ENXIO); 312 } 313 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 314 break; 315 case PCI_QLOGIC_ISP2100: 316 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 317 break; 318 case PCI_QLOGIC_ISP2200: 319 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 320 break; 321 case PCI_QLOGIC_ISP2300: 322 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 323 break; 324 case PCI_QLOGIC_ISP2312: 325 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 326 break; 327 default: 328 return (ENXIO); 329 } 330 if (device_get_unit(dev) == 0 && bootverbose) { 331 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 332 "Core Version %d.%d\n", 333 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 334 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 335 } 336 /* 337 * XXXX: Here is where we might load the f/w module 338 * XXXX: (or increase a reference count to it). 339 */ 340 return (0); 341 } 342 343 static int 344 isp_pci_attach(device_t dev) 345 { 346 struct resource *regs, *irq; 347 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug; 348 u_int32_t data, cmd, linesz, psize, basetype; 349 struct isp_pcisoftc *pcs; 350 struct ispsoftc *isp = NULL; 351 struct ispmdvec *mdvp; 352 quad_t wwn; 353 bus_size_t lim; 354 355 /* 356 * Figure out if we're supposed to skip this one. 357 */ 358 unit = device_get_unit(dev); 359 if (getenv_int("isp_disable", &bitmap)) { 360 if (bitmap & (1 << unit)) { 361 device_printf(dev, "not configuring\n"); 362 /* 363 * But return '0' to preserve HBA numbering. 364 */ 365 return (0); 366 } 367 } 368 369 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_WAITOK | M_ZERO); 370 371 /* 372 * Figure out which we should try first - memory mapping or i/o mapping? 373 */ 374 #ifdef __alpha__ 375 m1 = PCIM_CMD_MEMEN; 376 m2 = PCIM_CMD_PORTEN; 377 #else 378 m1 = PCIM_CMD_PORTEN; 379 m2 = PCIM_CMD_MEMEN; 380 #endif 381 bitmap = 0; 382 if (getenv_int("isp_mem_map", &bitmap)) { 383 if (bitmap & (1 << unit)) { 384 m1 = PCIM_CMD_MEMEN; 385 m2 = PCIM_CMD_PORTEN; 386 } 387 } 388 bitmap = 0; 389 if (getenv_int("isp_io_map", &bitmap)) { 390 if (bitmap & (1 << unit)) { 391 m1 = PCIM_CMD_PORTEN; 392 m2 = PCIM_CMD_MEMEN; 393 } 394 } 395 396 linesz = PCI_DFLT_LNSZ; 397 irq = regs = NULL; 398 rgd = rtp = iqd = 0; 399 400 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 401 if (cmd & m1) { 402 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 403 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 404 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 405 } 406 if (regs == NULL && (cmd & m2)) { 407 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 408 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 409 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 410 } 411 if (regs == NULL) { 412 device_printf(dev, "unable to map any ports\n"); 413 goto bad; 414 } 415 if (bootverbose) 416 device_printf(dev, "using %s space register mapping\n", 417 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 418 pcs->pci_dev = dev; 419 pcs->pci_reg = regs; 420 pcs->pci_st = rman_get_bustag(regs); 421 pcs->pci_sh = rman_get_bushandle(regs); 422 423 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 424 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 425 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 426 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 427 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 428 mdvp = &mdvec; 429 basetype = ISP_HA_SCSI_UNKNOWN; 430 psize = sizeof (sdparam); 431 lim = BUS_SPACE_MAXSIZE_32BIT; 432 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 433 mdvp = &mdvec; 434 basetype = ISP_HA_SCSI_UNKNOWN; 435 psize = sizeof (sdparam); 436 lim = BUS_SPACE_MAXSIZE_24BIT; 437 } 438 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 439 mdvp = &mdvec_1080; 440 basetype = ISP_HA_SCSI_1080; 441 psize = sizeof (sdparam); 442 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 443 ISP1080_DMA_REGS_OFF; 444 } 445 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 446 mdvp = &mdvec_1080; 447 basetype = ISP_HA_SCSI_1240; 448 psize = 2 * sizeof (sdparam); 449 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 450 ISP1080_DMA_REGS_OFF; 451 } 452 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 453 mdvp = &mdvec_1080; 454 basetype = ISP_HA_SCSI_1280; 455 psize = 2 * sizeof (sdparam); 456 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 457 ISP1080_DMA_REGS_OFF; 458 } 459 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 460 mdvp = &mdvec_12160; 461 basetype = ISP_HA_SCSI_10160; 462 psize = sizeof (sdparam); 463 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 464 ISP1080_DMA_REGS_OFF; 465 } 466 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 467 mdvp = &mdvec_12160; 468 basetype = ISP_HA_SCSI_12160; 469 psize = 2 * sizeof (sdparam); 470 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 471 ISP1080_DMA_REGS_OFF; 472 } 473 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 474 mdvp = &mdvec_2100; 475 basetype = ISP_HA_FC_2100; 476 psize = sizeof (fcparam); 477 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 478 PCI_MBOX_REGS2100_OFF; 479 if (pci_get_revid(dev) < 3) { 480 /* 481 * XXX: Need to get the actual revision 482 * XXX: number of the 2100 FB. At any rate, 483 * XXX: lower cache line size for early revision 484 * XXX; boards. 485 */ 486 linesz = 1; 487 } 488 } 489 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 490 mdvp = &mdvec_2200; 491 basetype = ISP_HA_FC_2200; 492 psize = sizeof (fcparam); 493 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 494 PCI_MBOX_REGS2100_OFF; 495 } 496 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 497 mdvp = &mdvec_2300; 498 basetype = ISP_HA_FC_2300; 499 psize = sizeof (fcparam); 500 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 501 PCI_MBOX_REGS2300_OFF; 502 } 503 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 504 mdvp = &mdvec_2300; 505 basetype = ISP_HA_FC_2312; 506 psize = sizeof (fcparam); 507 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 508 PCI_MBOX_REGS2300_OFF; 509 } 510 isp = &pcs->pci_isp; 511 isp->isp_param = malloc(psize, M_DEVBUF, M_WAITOK | M_ZERO); 512 isp->isp_mdvec = mdvp; 513 isp->isp_type = basetype; 514 isp->isp_revision = pci_get_revid(dev); 515 #ifdef ISP_TARGET_MODE 516 isp->isp_role = ISP_ROLE_BOTH; 517 #else 518 isp->isp_role = ISP_DEFAULT_ROLES; 519 #endif 520 isp->isp_dev = dev; 521 522 523 /* 524 * Try and find firmware for this device. 525 */ 526 527 if (isp_get_firmware_p) { 528 int device = (int) pci_get_device(dev); 529 #ifdef ISP_TARGET_MODE 530 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 531 #else 532 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 533 #endif 534 } 535 536 /* 537 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 538 * are set. 539 */ 540 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 541 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 542 if (IS_2300(isp)) { /* per QLogic errata */ 543 cmd &= ~PCIM_CMD_INVEN; 544 } 545 if (IS_23XX(isp)) { 546 /* 547 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 548 */ 549 isp->isp_touched = 1; 550 551 } 552 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 553 554 /* 555 * Make sure the Cache Line Size register is set sensibly. 556 */ 557 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 558 if (data != linesz) { 559 data = PCI_DFLT_LNSZ; 560 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 561 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 562 } 563 564 /* 565 * Make sure the Latency Timer is sane. 566 */ 567 data = pci_read_config(dev, PCIR_LATTIMER, 1); 568 if (data < PCI_DFLT_LTNCY) { 569 data = PCI_DFLT_LTNCY; 570 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 571 pci_write_config(dev, PCIR_LATTIMER, data, 1); 572 } 573 574 /* 575 * Make sure we've disabled the ROM. 576 */ 577 data = pci_read_config(dev, PCIR_ROMADDR, 4); 578 data &= ~1; 579 pci_write_config(dev, PCIR_ROMADDR, data, 4); 580 581 iqd = 0; 582 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 583 1, RF_ACTIVE | RF_SHAREABLE); 584 if (irq == NULL) { 585 device_printf(dev, "could not allocate interrupt\n"); 586 goto bad; 587 } 588 589 if (getenv_int("isp_no_fwload", &bitmap)) { 590 if (bitmap & (1 << unit)) 591 isp->isp_confopts |= ISP_CFG_NORELOAD; 592 } 593 if (getenv_int("isp_fwload", &bitmap)) { 594 if (bitmap & (1 << unit)) 595 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 596 } 597 if (getenv_int("isp_no_nvram", &bitmap)) { 598 if (bitmap & (1 << unit)) 599 isp->isp_confopts |= ISP_CFG_NONVRAM; 600 } 601 if (getenv_int("isp_nvram", &bitmap)) { 602 if (bitmap & (1 << unit)) 603 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 604 } 605 if (getenv_int("isp_fcduplex", &bitmap)) { 606 if (bitmap & (1 << unit)) 607 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 608 } 609 if (getenv_int("isp_no_fcduplex", &bitmap)) { 610 if (bitmap & (1 << unit)) 611 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 612 } 613 if (getenv_int("isp_nport", &bitmap)) { 614 if (bitmap & (1 << unit)) 615 isp->isp_confopts |= ISP_CFG_NPORT; 616 } 617 618 /* 619 * Because the resource_*_value functions can neither return 620 * 64 bit integer values, nor can they be directly coerced 621 * to interpret the right hand side of the assignment as 622 * you want them to interpret it, we have to force WWN 623 * hint replacement to specify WWN strings with a leading 624 * 'w' (e..g w50000000aaaa0001). Sigh. 625 */ 626 if (getenv_quad("isp_portwwn", &wwn)) { 627 isp->isp_osinfo.default_port_wwn = wwn; 628 isp->isp_confopts |= ISP_CFG_OWNWWPN; 629 } 630 if (isp->isp_osinfo.default_port_wwn == 0) { 631 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 632 } 633 634 if (getenv_quad("isp_nodewwn", &wwn)) { 635 isp->isp_osinfo.default_node_wwn = wwn; 636 isp->isp_confopts |= ISP_CFG_OWNWWNN; 637 } 638 if (isp->isp_osinfo.default_node_wwn == 0) { 639 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 640 } 641 642 isp_debug = 0; 643 (void) getenv_int("isp_debug", &isp_debug); 644 if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, isp_pci_intr, 645 isp, &pcs->ih)) { 646 device_printf(dev, "could not setup interrupt\n"); 647 goto bad; 648 } 649 650 #ifdef ISP_FW_CRASH_DUMP 651 bitmap = 0; 652 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 653 if (bitmap & (1 << unit) { 654 size_t amt = 0; 655 if (IS_2200(isp)) { 656 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 657 } else if (IS_23XX(isp)) { 658 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 659 } 660 if (amt) { 661 FCPARAM(isp)->isp_dump_data = 662 malloc(amt, M_DEVBUF, M_WAITOK); 663 bzero(FCPARAM(isp)->isp_dump_data, amt); 664 } else { 665 device_printf(dev, 666 "f/w crash dumps not supported for card\n"); 667 } 668 } 669 } 670 #endif 671 672 if (IS_2312(isp)) { 673 isp->isp_port = pci_get_function(dev); 674 } 675 676 /* 677 * Set up logging levels. 678 */ 679 if (isp_debug) { 680 isp->isp_dblev = isp_debug; 681 } else { 682 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 683 } 684 if (bootverbose) 685 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 686 687 /* 688 * Make sure we're in reset state. 689 */ 690 ISP_LOCK(isp); 691 isp_reset(isp); 692 693 if (isp->isp_state != ISP_RESETSTATE) { 694 ISP_UNLOCK(isp); 695 goto bad; 696 } 697 isp_init(isp); 698 if (isp->isp_state != ISP_INITSTATE) { 699 /* If we're a Fibre Channel Card, we allow deferred attach */ 700 if (IS_SCSI(isp)) { 701 isp_uninit(isp); 702 ISP_UNLOCK(isp); 703 goto bad; 704 } 705 } 706 isp_attach(isp); 707 if (isp->isp_state != ISP_RUNSTATE) { 708 /* If we're a Fibre Channel Card, we allow deferred attach */ 709 if (IS_SCSI(isp)) { 710 isp_uninit(isp); 711 ISP_UNLOCK(isp); 712 goto bad; 713 } 714 } 715 /* 716 * XXXX: Here is where we might unload the f/w module 717 * XXXX: (or decrease the reference count to it). 718 */ 719 ISP_UNLOCK(isp); 720 return (0); 721 722 bad: 723 724 if (pcs && pcs->ih) { 725 (void) bus_teardown_intr(dev, irq, pcs->ih); 726 } 727 728 if (irq) { 729 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 730 } 731 732 733 if (regs) { 734 (void) bus_release_resource(dev, rtp, rgd, regs); 735 } 736 737 if (pcs) { 738 if (pcs->pci_isp.isp_param) 739 free(pcs->pci_isp.isp_param, M_DEVBUF); 740 free(pcs, M_DEVBUF); 741 } 742 743 /* 744 * XXXX: Here is where we might unload the f/w module 745 * XXXX: (or decrease the reference count to it). 746 */ 747 return (ENXIO); 748 } 749 750 static void 751 isp_pci_intr(void *arg) 752 { 753 struct ispsoftc *isp = arg; 754 u_int16_t isr, sema, mbox; 755 756 ISP_LOCK(isp); 757 isp->isp_intcnt++; 758 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 759 isp->isp_intbogus++; 760 } else { 761 int iok = isp->isp_osinfo.intsok; 762 isp->isp_osinfo.intsok = 0; 763 isp_intr(isp, isr, sema, mbox); 764 isp->isp_osinfo.intsok = iok; 765 } 766 ISP_UNLOCK(isp); 767 } 768 769 770 #define IspVirt2Off(a, x) \ 771 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 772 _BLK_REG_SHFT] + ((x) & 0xff)) 773 774 #define BXR2(pcs, off) \ 775 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 776 #define BXW2(pcs, off, v) \ 777 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 778 779 780 static INLINE int 781 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 782 { 783 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 784 u_int16_t val0, val1; 785 int i = 0; 786 787 do { 788 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 789 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 790 } while (val0 != val1 && ++i < 1000); 791 if (val0 != val1) { 792 return (1); 793 } 794 *rp = val0; 795 return (0); 796 } 797 798 static int 799 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 800 u_int16_t *semap, u_int16_t *mbp) 801 { 802 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 803 u_int16_t isr, sema; 804 805 if (IS_2100(isp)) { 806 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 807 return (0); 808 } 809 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 810 return (0); 811 } 812 } else { 813 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 814 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 815 } 816 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 817 isr &= INT_PENDING_MASK(isp); 818 sema &= BIU_SEMA_LOCK; 819 if (isr == 0 && sema == 0) { 820 return (0); 821 } 822 *isrp = isr; 823 if ((*semap = sema) != 0) { 824 if (IS_2100(isp)) { 825 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 826 return (0); 827 } 828 } else { 829 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 830 } 831 } 832 return (1); 833 } 834 835 static int 836 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 837 u_int16_t *semap, u_int16_t *mbox0p) 838 { 839 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 840 u_int32_t r2hisr; 841 842 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 843 *isrp = 0; 844 return (0); 845 } 846 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 847 IspVirt2Off(pcs, BIU_R2HSTSLO)); 848 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 849 if ((r2hisr & BIU_R2HST_INTR) == 0) { 850 *isrp = 0; 851 return (0); 852 } 853 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 854 case ISPR2HST_ROM_MBX_OK: 855 case ISPR2HST_ROM_MBX_FAIL: 856 case ISPR2HST_MBX_OK: 857 case ISPR2HST_MBX_FAIL: 858 case ISPR2HST_ASYNC_EVENT: 859 *isrp = r2hisr & 0xffff; 860 *mbox0p = (r2hisr >> 16); 861 *semap = 1; 862 return (1); 863 case ISPR2HST_RIO_16: 864 *isrp = r2hisr & 0xffff; 865 *mbox0p = ASYNC_RIO1; 866 *semap = 1; 867 return (1); 868 case ISPR2HST_FPOST: 869 *isrp = r2hisr & 0xffff; 870 *mbox0p = ASYNC_CMD_CMPLT; 871 *semap = 1; 872 return (1); 873 case ISPR2HST_FPOST_CTIO: 874 *isrp = r2hisr & 0xffff; 875 *mbox0p = ASYNC_CTIO_DONE; 876 *semap = 1; 877 return (1); 878 case ISPR2HST_RSPQ_UPDATE: 879 *isrp = r2hisr & 0xffff; 880 *mbox0p = 0; 881 *semap = 0; 882 return (1); 883 default: 884 return (0); 885 } 886 } 887 888 static u_int16_t 889 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 890 { 891 u_int16_t rv; 892 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 893 int oldconf = 0; 894 895 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 896 /* 897 * We will assume that someone has paused the RISC processor. 898 */ 899 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 900 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 901 oldconf | BIU_PCI_CONF1_SXP); 902 } 903 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 904 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 905 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 906 } 907 return (rv); 908 } 909 910 static void 911 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 912 { 913 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 914 int oldconf = 0; 915 916 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 917 /* 918 * We will assume that someone has paused the RISC processor. 919 */ 920 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 921 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 922 oldconf | BIU_PCI_CONF1_SXP); 923 } 924 BXW2(pcs, IspVirt2Off(isp, regoff), val); 925 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 926 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 927 } 928 } 929 930 static u_int16_t 931 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 932 { 933 u_int16_t rv, oc = 0; 934 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 935 936 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 937 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 938 u_int16_t tc; 939 /* 940 * We will assume that someone has paused the RISC processor. 941 */ 942 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 943 tc = oc & ~BIU_PCI1080_CONF1_DMA; 944 if (regoff & SXP_BANK1_SELECT) 945 tc |= BIU_PCI1080_CONF1_SXP1; 946 else 947 tc |= BIU_PCI1080_CONF1_SXP0; 948 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 949 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 950 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 951 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 952 oc | BIU_PCI1080_CONF1_DMA); 953 } 954 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 955 if (oc) { 956 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 957 } 958 return (rv); 959 } 960 961 static void 962 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 963 { 964 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 965 int oc = 0; 966 967 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 968 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 969 u_int16_t tc; 970 /* 971 * We will assume that someone has paused the RISC processor. 972 */ 973 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 974 tc = oc & ~BIU_PCI1080_CONF1_DMA; 975 if (regoff & SXP_BANK1_SELECT) 976 tc |= BIU_PCI1080_CONF1_SXP1; 977 else 978 tc |= BIU_PCI1080_CONF1_SXP0; 979 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 980 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 981 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 982 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 983 oc | BIU_PCI1080_CONF1_DMA); 984 } 985 BXW2(pcs, IspVirt2Off(isp, regoff), val); 986 if (oc) { 987 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 988 } 989 } 990 991 992 struct imush { 993 struct ispsoftc *isp; 994 int error; 995 }; 996 997 static void imc(void *, bus_dma_segment_t *, int, int); 998 999 static void 1000 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1001 { 1002 struct imush *imushp = (struct imush *) arg; 1003 if (error) { 1004 imushp->error = error; 1005 } else { 1006 struct ispsoftc *isp =imushp->isp; 1007 bus_addr_t addr = segs->ds_addr; 1008 1009 isp->isp_rquest_dma = addr; 1010 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1011 isp->isp_result_dma = addr; 1012 if (IS_FC(isp)) { 1013 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1014 FCPARAM(isp)->isp_scdma = addr; 1015 } 1016 } 1017 } 1018 1019 /* 1020 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1021 */ 1022 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1023 1024 static int 1025 isp_pci_mbxdma(struct ispsoftc *isp) 1026 { 1027 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1028 caddr_t base; 1029 u_int32_t len; 1030 int i, error, ns; 1031 bus_size_t alim, slim; 1032 struct imush im; 1033 1034 /* 1035 * Already been here? If so, leave... 1036 */ 1037 if (isp->isp_rquest) { 1038 return (0); 1039 } 1040 1041 #ifdef ISP_DAC_SUPPORTED 1042 alim = BUS_SPACE_UNRESTRICTED; 1043 #else 1044 alim = BUS_SPACE_MAXADDR_32BIT; 1045 #endif 1046 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1047 slim = BUS_SPACE_MAXADDR_32BIT; 1048 } else { 1049 slim = BUS_SPACE_MAXADDR_24BIT; 1050 } 1051 1052 ISP_UNLOCK(isp); 1053 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1054 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) { 1055 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1056 ISP_LOCK(isp); 1057 return(1); 1058 } 1059 1060 1061 len = sizeof (XS_T **) * isp->isp_maxcmds; 1062 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1063 if (isp->isp_xflist == NULL) { 1064 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1065 ISP_LOCK(isp); 1066 return (1); 1067 } 1068 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1069 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1070 if (pcs->dmaps == NULL) { 1071 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1072 free(isp->isp_xflist, M_DEVBUF); 1073 ISP_LOCK(isp); 1074 return (1); 1075 } 1076 1077 /* 1078 * Allocate and map the request, result queues, plus FC scratch area. 1079 */ 1080 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1081 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1082 if (IS_FC(isp)) { 1083 len += ISP2100_SCRLEN; 1084 } 1085 1086 ns = (len / PAGE_SIZE) + 1; 1087 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim, 1088 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1089 isp_prt(isp, ISP_LOGERR, 1090 "cannot create a dma tag for control spaces"); 1091 free(pcs->dmaps, M_DEVBUF); 1092 free(isp->isp_xflist, M_DEVBUF); 1093 ISP_LOCK(isp); 1094 return (1); 1095 } 1096 1097 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1098 &isp->isp_cdmap) != 0) { 1099 isp_prt(isp, ISP_LOGERR, 1100 "cannot allocate %d bytes of CCB memory", len); 1101 bus_dma_tag_destroy(isp->isp_cdmat); 1102 free(isp->isp_xflist, M_DEVBUF); 1103 free(pcs->dmaps, M_DEVBUF); 1104 ISP_LOCK(isp); 1105 return (1); 1106 } 1107 1108 for (i = 0; i < isp->isp_maxcmds; i++) { 1109 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1110 if (error) { 1111 isp_prt(isp, ISP_LOGERR, 1112 "error %d creating per-cmd DMA maps", error); 1113 while (--i >= 0) { 1114 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1115 } 1116 goto bad; 1117 } 1118 } 1119 1120 im.isp = isp; 1121 im.error = 0; 1122 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1123 if (im.error) { 1124 isp_prt(isp, ISP_LOGERR, 1125 "error %d loading dma map for control areas", im.error); 1126 goto bad; 1127 } 1128 1129 isp->isp_rquest = base; 1130 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1131 isp->isp_result = base; 1132 if (IS_FC(isp)) { 1133 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1134 FCPARAM(isp)->isp_scratch = base; 1135 } 1136 ISP_LOCK(isp); 1137 return (0); 1138 1139 bad: 1140 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1141 bus_dma_tag_destroy(isp->isp_cdmat); 1142 free(isp->isp_xflist, M_DEVBUF); 1143 free(pcs->dmaps, M_DEVBUF); 1144 ISP_LOCK(isp); 1145 isp->isp_rquest = NULL; 1146 return (1); 1147 } 1148 1149 typedef struct { 1150 struct ispsoftc *isp; 1151 void *cmd_token; 1152 void *rq; 1153 u_int16_t *nxtip; 1154 u_int16_t optr; 1155 u_int error; 1156 } mush_t; 1157 1158 #define MUSHERR_NOQENTRIES -2 1159 1160 #ifdef ISP_TARGET_MODE 1161 /* 1162 * We need to handle DMA for target mode differently from initiator mode. 1163 * 1164 * DMA mapping and construction and submission of CTIO Request Entries 1165 * and rendevous for completion are very tightly coupled because we start 1166 * out by knowing (per platform) how much data we have to move, but we 1167 * don't know, up front, how many DMA mapping segments will have to be used 1168 * cover that data, so we don't know how many CTIO Request Entries we 1169 * will end up using. Further, for performance reasons we may want to 1170 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1171 * 1172 * The standard vector still goes through isp_pci_dmasetup, but the callback 1173 * for the DMA mapping routines comes here instead with the whole transfer 1174 * mapped and a pointer to a partially filled in already allocated request 1175 * queue entry. We finish the job. 1176 */ 1177 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1178 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1179 1180 #define STATUS_WITH_DATA 1 1181 1182 static void 1183 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1184 { 1185 mush_t *mp; 1186 struct ccb_scsiio *csio; 1187 struct ispsoftc *isp; 1188 struct isp_pcisoftc *pcs; 1189 bus_dmamap_t *dp; 1190 ct_entry_t *cto, *qe; 1191 u_int8_t scsi_status; 1192 u_int16_t curi, nxti, handle; 1193 u_int32_t sflags; 1194 int32_t resid; 1195 int nth_ctio, nctios, send_status; 1196 1197 mp = (mush_t *) arg; 1198 if (error) { 1199 mp->error = error; 1200 return; 1201 } 1202 1203 isp = mp->isp; 1204 csio = mp->cmd_token; 1205 cto = mp->rq; 1206 curi = isp->isp_reqidx; 1207 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1208 1209 cto->ct_xfrlen = 0; 1210 cto->ct_seg_count = 0; 1211 cto->ct_header.rqs_entry_count = 1; 1212 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1213 1214 if (nseg == 0) { 1215 cto->ct_header.rqs_seqno = 1; 1216 isp_prt(isp, ISP_LOGTDEBUG1, 1217 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1218 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1219 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1220 cto->ct_scsi_status, cto->ct_resid); 1221 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1222 isp_put_ctio(isp, cto, qe); 1223 return; 1224 } 1225 1226 nctios = nseg / ISP_RQDSEG; 1227 if (nseg % ISP_RQDSEG) { 1228 nctios++; 1229 } 1230 1231 /* 1232 * Save syshandle, and potentially any SCSI status, which we'll 1233 * reinsert on the last CTIO we're going to send. 1234 */ 1235 1236 handle = cto->ct_syshandle; 1237 cto->ct_syshandle = 0; 1238 cto->ct_header.rqs_seqno = 0; 1239 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1240 1241 if (send_status) { 1242 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1243 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1244 /* 1245 * Preserve residual. 1246 */ 1247 resid = cto->ct_resid; 1248 1249 /* 1250 * Save actual SCSI status. 1251 */ 1252 scsi_status = cto->ct_scsi_status; 1253 1254 #ifndef STATUS_WITH_DATA 1255 sflags |= CT_NO_DATA; 1256 /* 1257 * We can't do a status at the same time as a data CTIO, so 1258 * we need to synthesize an extra CTIO at this level. 1259 */ 1260 nctios++; 1261 #endif 1262 } else { 1263 sflags = scsi_status = resid = 0; 1264 } 1265 1266 cto->ct_resid = 0; 1267 cto->ct_scsi_status = 0; 1268 1269 pcs = (struct isp_pcisoftc *)isp; 1270 dp = &pcs->dmaps[isp_handle_index(handle)]; 1271 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1272 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1273 } else { 1274 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1275 } 1276 1277 nxti = *mp->nxtip; 1278 1279 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1280 int seglim; 1281 1282 seglim = nseg; 1283 if (seglim) { 1284 int seg; 1285 1286 if (seglim > ISP_RQDSEG) 1287 seglim = ISP_RQDSEG; 1288 1289 for (seg = 0; seg < seglim; seg++, nseg--) { 1290 /* 1291 * Unlike normal initiator commands, we don't 1292 * do any swizzling here. 1293 */ 1294 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1295 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1296 cto->ct_xfrlen += dm_segs->ds_len; 1297 dm_segs++; 1298 } 1299 cto->ct_seg_count = seg; 1300 } else { 1301 /* 1302 * This case should only happen when we're sending an 1303 * extra CTIO with final status. 1304 */ 1305 if (send_status == 0) { 1306 isp_prt(isp, ISP_LOGWARN, 1307 "tdma_mk ran out of segments"); 1308 mp->error = EINVAL; 1309 return; 1310 } 1311 } 1312 1313 /* 1314 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1315 * ct_tagtype, and ct_timeout have been carried over 1316 * unchanged from what our caller had set. 1317 * 1318 * The dataseg fields and the seg_count fields we just got 1319 * through setting. The data direction we've preserved all 1320 * along and only clear it if we're now sending status. 1321 */ 1322 1323 if (nth_ctio == nctios - 1) { 1324 /* 1325 * We're the last in a sequence of CTIOs, so mark 1326 * this CTIO and save the handle to the CCB such that 1327 * when this CTIO completes we can free dma resources 1328 * and do whatever else we need to do to finish the 1329 * rest of the command. We *don't* give this to the 1330 * firmware to work on- the caller will do that. 1331 */ 1332 1333 cto->ct_syshandle = handle; 1334 cto->ct_header.rqs_seqno = 1; 1335 1336 if (send_status) { 1337 cto->ct_scsi_status = scsi_status; 1338 cto->ct_flags |= sflags; 1339 cto->ct_resid = resid; 1340 } 1341 if (send_status) { 1342 isp_prt(isp, ISP_LOGTDEBUG1, 1343 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1344 "scsi status %x resid %d", 1345 cto->ct_fwhandle, csio->ccb_h.target_lun, 1346 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1347 cto->ct_scsi_status, cto->ct_resid); 1348 } else { 1349 isp_prt(isp, ISP_LOGTDEBUG1, 1350 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1351 cto->ct_fwhandle, csio->ccb_h.target_lun, 1352 cto->ct_iid, cto->ct_tag_val, 1353 cto->ct_flags); 1354 } 1355 isp_put_ctio(isp, cto, qe); 1356 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1357 if (nctios > 1) { 1358 MEMORYBARRIER(isp, SYNC_REQUEST, 1359 curi, QENTRY_LEN); 1360 } 1361 } else { 1362 ct_entry_t *oqe = qe; 1363 1364 /* 1365 * Make sure syshandle fields are clean 1366 */ 1367 cto->ct_syshandle = 0; 1368 cto->ct_header.rqs_seqno = 0; 1369 1370 isp_prt(isp, ISP_LOGTDEBUG1, 1371 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1372 cto->ct_fwhandle, csio->ccb_h.target_lun, 1373 cto->ct_iid, cto->ct_flags); 1374 1375 /* 1376 * Get a new CTIO 1377 */ 1378 qe = (ct_entry_t *) 1379 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1380 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1381 if (nxti == mp->optr) { 1382 isp_prt(isp, ISP_LOGTDEBUG0, 1383 "Queue Overflow in tdma_mk"); 1384 mp->error = MUSHERR_NOQENTRIES; 1385 return; 1386 } 1387 1388 /* 1389 * Now that we're done with the old CTIO, 1390 * flush it out to the request queue. 1391 */ 1392 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1393 isp_put_ctio(isp, cto, oqe); 1394 if (nth_ctio != 0) { 1395 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1396 QENTRY_LEN); 1397 } 1398 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1399 1400 /* 1401 * Reset some fields in the CTIO so we can reuse 1402 * for the next one we'll flush to the request 1403 * queue. 1404 */ 1405 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1406 cto->ct_header.rqs_entry_count = 1; 1407 cto->ct_header.rqs_flags = 0; 1408 cto->ct_status = 0; 1409 cto->ct_scsi_status = 0; 1410 cto->ct_xfrlen = 0; 1411 cto->ct_resid = 0; 1412 cto->ct_seg_count = 0; 1413 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1414 } 1415 } 1416 *mp->nxtip = nxti; 1417 } 1418 1419 /* 1420 * We don't have to do multiple CTIOs here. Instead, we can just do 1421 * continuation segments as needed. This greatly simplifies the code 1422 * improves performance. 1423 */ 1424 1425 static void 1426 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1427 { 1428 mush_t *mp; 1429 struct ccb_scsiio *csio; 1430 struct ispsoftc *isp; 1431 ct2_entry_t *cto, *qe; 1432 u_int16_t curi, nxti; 1433 int segcnt; 1434 1435 mp = (mush_t *) arg; 1436 if (error) { 1437 mp->error = error; 1438 return; 1439 } 1440 1441 isp = mp->isp; 1442 csio = mp->cmd_token; 1443 cto = mp->rq; 1444 1445 curi = isp->isp_reqidx; 1446 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1447 1448 if (nseg == 0) { 1449 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1450 isp_prt(isp, ISP_LOGWARN, 1451 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1452 "set (0x%x)", cto->ct_flags); 1453 mp->error = EINVAL; 1454 return; 1455 } 1456 /* 1457 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1458 * flags to NO DATA and clear relative offset flags. 1459 * We preserve the ct_resid and the response area. 1460 */ 1461 cto->ct_header.rqs_seqno = 1; 1462 cto->ct_seg_count = 0; 1463 cto->ct_reloff = 0; 1464 isp_prt(isp, ISP_LOGTDEBUG1, 1465 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1466 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1467 cto->ct_iid, cto->ct_flags, cto->ct_status, 1468 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1469 isp_put_ctio2(isp, cto, qe); 1470 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1471 return; 1472 } 1473 1474 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1475 isp_prt(isp, ISP_LOGERR, 1476 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1477 "(0x%x)", cto->ct_flags); 1478 mp->error = EINVAL; 1479 return; 1480 } 1481 1482 1483 nxti = *mp->nxtip; 1484 1485 /* 1486 * Set up the CTIO2 data segments. 1487 */ 1488 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1489 cto->ct_seg_count++, segcnt++) { 1490 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1491 dm_segs[segcnt].ds_addr; 1492 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1493 dm_segs[segcnt].ds_len; 1494 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1495 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d", 1496 cto->ct_seg_count, dm_segs[segcnt].ds_addr, 1497 dm_segs[segcnt].ds_len); 1498 } 1499 1500 while (segcnt < nseg) { 1501 u_int16_t curip; 1502 int seg; 1503 ispcontreq_t local, *crq = &local, *qep; 1504 1505 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1506 curip = nxti; 1507 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1508 if (nxti == mp->optr) { 1509 ISP_UNLOCK(isp); 1510 isp_prt(isp, ISP_LOGTDEBUG0, 1511 "tdma_mkfc: request queue overflow"); 1512 mp->error = MUSHERR_NOQENTRIES; 1513 return; 1514 } 1515 cto->ct_header.rqs_entry_count++; 1516 MEMZERO((void *)crq, sizeof (*crq)); 1517 crq->req_header.rqs_entry_count = 1; 1518 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1519 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1520 segcnt++, seg++) { 1521 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1522 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1523 isp_prt(isp, ISP_LOGTDEBUG1, 1524 "isp_send_ctio2: ent%d[%d]%x:%u", 1525 cto->ct_header.rqs_entry_count-1, seg, 1526 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len); 1527 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1528 cto->ct_seg_count++; 1529 } 1530 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1531 isp_put_cont_req(isp, crq, qep); 1532 ISP_TDQE(isp, "cont entry", curi, qep); 1533 } 1534 1535 /* 1536 * No do final twiddling for the CTIO itself. 1537 */ 1538 cto->ct_header.rqs_seqno = 1; 1539 isp_prt(isp, ISP_LOGTDEBUG1, 1540 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1541 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1542 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1543 cto->ct_resid); 1544 isp_put_ctio2(isp, cto, qe); 1545 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1546 *mp->nxtip = nxti; 1547 } 1548 #endif 1549 1550 static void dma2(void *, bus_dma_segment_t *, int, int); 1551 1552 static void 1553 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1554 { 1555 mush_t *mp; 1556 struct ispsoftc *isp; 1557 struct ccb_scsiio *csio; 1558 struct isp_pcisoftc *pcs; 1559 bus_dmamap_t *dp; 1560 bus_dma_segment_t *eseg; 1561 ispreq_t *rq; 1562 int seglim, datalen; 1563 u_int16_t nxti; 1564 1565 mp = (mush_t *) arg; 1566 if (error) { 1567 mp->error = error; 1568 return; 1569 } 1570 1571 if (nseg < 1) { 1572 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1573 mp->error = EFAULT; 1574 return; 1575 } 1576 csio = mp->cmd_token; 1577 isp = mp->isp; 1578 rq = mp->rq; 1579 pcs = (struct isp_pcisoftc *)mp->isp; 1580 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1581 nxti = *mp->nxtip; 1582 1583 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1584 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1585 } else { 1586 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1587 } 1588 1589 datalen = XS_XFRLEN(csio); 1590 1591 /* 1592 * We're passed an initial partially filled in entry that 1593 * has most fields filled in except for data transfer 1594 * related values. 1595 * 1596 * Our job is to fill in the initial request queue entry and 1597 * then to start allocating and filling in continuation entries 1598 * until we've covered the entire transfer. 1599 */ 1600 1601 if (IS_FC(isp)) { 1602 seglim = ISP_RQDSEG_T2; 1603 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1604 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1605 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1606 } else { 1607 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1608 } 1609 } else { 1610 if (csio->cdb_len > 12) { 1611 seglim = 0; 1612 } else { 1613 seglim = ISP_RQDSEG; 1614 } 1615 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1616 rq->req_flags |= REQFLAG_DATA_IN; 1617 } else { 1618 rq->req_flags |= REQFLAG_DATA_OUT; 1619 } 1620 } 1621 1622 eseg = dm_segs + nseg; 1623 1624 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1625 if (IS_FC(isp)) { 1626 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1627 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1628 dm_segs->ds_addr; 1629 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1630 dm_segs->ds_len; 1631 } else { 1632 rq->req_dataseg[rq->req_seg_count].ds_base = 1633 dm_segs->ds_addr; 1634 rq->req_dataseg[rq->req_seg_count].ds_count = 1635 dm_segs->ds_len; 1636 } 1637 datalen -= dm_segs->ds_len; 1638 rq->req_seg_count++; 1639 dm_segs++; 1640 } 1641 1642 while (datalen > 0 && dm_segs != eseg) { 1643 u_int16_t onxti; 1644 ispcontreq_t local, *crq = &local, *cqe; 1645 1646 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1647 onxti = nxti; 1648 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1649 if (nxti == mp->optr) { 1650 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1651 mp->error = MUSHERR_NOQENTRIES; 1652 return; 1653 } 1654 rq->req_header.rqs_entry_count++; 1655 MEMZERO((void *)crq, sizeof (*crq)); 1656 crq->req_header.rqs_entry_count = 1; 1657 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1658 1659 seglim = 0; 1660 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1661 crq->req_dataseg[seglim].ds_base = 1662 dm_segs->ds_addr; 1663 crq->req_dataseg[seglim].ds_count = 1664 dm_segs->ds_len; 1665 rq->req_seg_count++; 1666 dm_segs++; 1667 seglim++; 1668 datalen -= dm_segs->ds_len; 1669 } 1670 isp_put_cont_req(isp, crq, cqe); 1671 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1672 } 1673 *mp->nxtip = nxti; 1674 } 1675 1676 static int 1677 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1678 u_int16_t *nxtip, u_int16_t optr) 1679 { 1680 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1681 ispreq_t *qep; 1682 bus_dmamap_t *dp = NULL; 1683 mush_t mush, *mp; 1684 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1685 1686 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1687 #ifdef ISP_TARGET_MODE 1688 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1689 if (IS_FC(isp)) { 1690 eptr = tdma_mkfc; 1691 } else { 1692 eptr = tdma_mk; 1693 } 1694 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1695 (csio->dxfer_len == 0)) { 1696 mp = &mush; 1697 mp->isp = isp; 1698 mp->cmd_token = csio; 1699 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1700 mp->nxtip = nxtip; 1701 mp->optr = optr; 1702 mp->error = 0; 1703 (*eptr)(mp, NULL, 0, 0); 1704 goto mbxsync; 1705 } 1706 } else 1707 #endif 1708 eptr = dma2; 1709 1710 1711 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1712 (csio->dxfer_len == 0)) { 1713 rq->req_seg_count = 1; 1714 goto mbxsync; 1715 } 1716 1717 /* 1718 * Do a virtual grapevine step to collect info for 1719 * the callback dma allocation that we have to use... 1720 */ 1721 mp = &mush; 1722 mp->isp = isp; 1723 mp->cmd_token = csio; 1724 mp->rq = rq; 1725 mp->nxtip = nxtip; 1726 mp->optr = optr; 1727 mp->error = 0; 1728 1729 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1730 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1731 int error, s; 1732 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1733 s = splsoftvm(); 1734 error = bus_dmamap_load(pcs->dmat, *dp, 1735 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1736 if (error == EINPROGRESS) { 1737 bus_dmamap_unload(pcs->dmat, *dp); 1738 mp->error = EINVAL; 1739 isp_prt(isp, ISP_LOGERR, 1740 "deferred dma allocation not supported"); 1741 } else if (error && mp->error == 0) { 1742 #ifdef DIAGNOSTIC 1743 isp_prt(isp, ISP_LOGERR, 1744 "error %d in dma mapping code", error); 1745 #endif 1746 mp->error = error; 1747 } 1748 splx(s); 1749 } else { 1750 /* Pointer to physical buffer */ 1751 struct bus_dma_segment seg; 1752 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1753 seg.ds_len = csio->dxfer_len; 1754 (*eptr)(mp, &seg, 1, 0); 1755 } 1756 } else { 1757 struct bus_dma_segment *segs; 1758 1759 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1760 isp_prt(isp, ISP_LOGERR, 1761 "Physical segment pointers unsupported"); 1762 mp->error = EINVAL; 1763 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1764 isp_prt(isp, ISP_LOGERR, 1765 "Virtual segment addresses unsupported"); 1766 mp->error = EINVAL; 1767 } else { 1768 /* Just use the segments provided */ 1769 segs = (struct bus_dma_segment *) csio->data_ptr; 1770 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1771 } 1772 } 1773 if (mp->error) { 1774 int retval = CMD_COMPLETE; 1775 if (mp->error == MUSHERR_NOQENTRIES) { 1776 retval = CMD_EAGAIN; 1777 } else if (mp->error == EFBIG) { 1778 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1779 } else if (mp->error == EINVAL) { 1780 XS_SETERR(csio, CAM_REQ_INVALID); 1781 } else { 1782 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1783 } 1784 return (retval); 1785 } 1786 mbxsync: 1787 switch (rq->req_header.rqs_entry_type) { 1788 case RQSTYPE_REQUEST: 1789 isp_put_request(isp, rq, qep); 1790 break; 1791 case RQSTYPE_CMDONLY: 1792 isp_put_extended_request(isp, (ispextreq_t *)rq, 1793 (ispextreq_t *)qep); 1794 break; 1795 case RQSTYPE_T2RQS: 1796 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1797 break; 1798 } 1799 return (CMD_QUEUED); 1800 } 1801 1802 static void 1803 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1804 { 1805 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1806 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1807 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1808 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1809 } else { 1810 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1811 } 1812 bus_dmamap_unload(pcs->dmat, *dp); 1813 } 1814 1815 1816 static void 1817 isp_pci_reset1(struct ispsoftc *isp) 1818 { 1819 /* Make sure the BIOS is disabled */ 1820 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1821 /* and enable interrupts */ 1822 ENABLE_INTS(isp); 1823 } 1824 1825 static void 1826 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1827 { 1828 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1829 if (msg) 1830 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1831 else 1832 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1833 if (IS_SCSI(isp)) 1834 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1835 else 1836 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1837 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1838 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1839 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1840 1841 1842 if (IS_SCSI(isp)) { 1843 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1844 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1845 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1846 ISP_READ(isp, CDMA_FIFO_STS)); 1847 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1848 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1849 ISP_READ(isp, DDMA_FIFO_STS)); 1850 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1851 ISP_READ(isp, SXP_INTERRUPT), 1852 ISP_READ(isp, SXP_GROSS_ERR), 1853 ISP_READ(isp, SXP_PINS_CTRL)); 1854 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1855 } 1856 printf(" mbox regs: %x %x %x %x %x\n", 1857 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1858 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1859 ISP_READ(isp, OUTMAILBOX4)); 1860 printf(" PCI Status Command/Status=%x\n", 1861 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1862 } 1863