1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */ 2 /* $DragonFly: src/sys/dev/disk/isp/isp_pci.c,v 1.12 2006/12/22 23:26:16 swildner Exp $ */ 3 /* 4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 5 * FreeBSD Version. 6 * 7 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice immediately at the beginning of the file, without modification, 14 * this list of conditions, and the following disclaimer. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/bus.h> 36 #include <sys/rman.h> 37 #include <sys/malloc.h> 38 39 #include <bus/pci/pcireg.h> 40 #include <bus/pci/pcivar.h> 41 42 #include "isp_freebsd.h" 43 44 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 45 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 46 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 47 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 48 static int 49 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 50 static int 51 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 52 static int isp_pci_mbxdma(struct ispsoftc *); 53 static int 54 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 55 static void 56 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 57 58 static void isp_pci_reset1(struct ispsoftc *); 59 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 60 61 static struct ispmdvec mdvec = { 62 isp_pci_rd_isr, 63 isp_pci_rd_reg, 64 isp_pci_wr_reg, 65 isp_pci_mbxdma, 66 isp_pci_dmasetup, 67 isp_pci_dmateardown, 68 NULL, 69 isp_pci_reset1, 70 isp_pci_dumpregs, 71 NULL, 72 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 73 }; 74 75 static struct ispmdvec mdvec_1080 = { 76 isp_pci_rd_isr, 77 isp_pci_rd_reg_1080, 78 isp_pci_wr_reg_1080, 79 isp_pci_mbxdma, 80 isp_pci_dmasetup, 81 isp_pci_dmateardown, 82 NULL, 83 isp_pci_reset1, 84 isp_pci_dumpregs, 85 NULL, 86 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 87 }; 88 89 static struct ispmdvec mdvec_12160 = { 90 isp_pci_rd_isr, 91 isp_pci_rd_reg_1080, 92 isp_pci_wr_reg_1080, 93 isp_pci_mbxdma, 94 isp_pci_dmasetup, 95 isp_pci_dmateardown, 96 NULL, 97 isp_pci_reset1, 98 isp_pci_dumpregs, 99 NULL, 100 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 101 }; 102 103 static struct ispmdvec mdvec_2100 = { 104 isp_pci_rd_isr, 105 isp_pci_rd_reg, 106 isp_pci_wr_reg, 107 isp_pci_mbxdma, 108 isp_pci_dmasetup, 109 isp_pci_dmateardown, 110 NULL, 111 isp_pci_reset1, 112 isp_pci_dumpregs 113 }; 114 115 static struct ispmdvec mdvec_2200 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_pci_dmateardown, 122 NULL, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2300 = { 128 isp_pci_rd_isr_2300, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_pci_dmateardown, 134 NULL, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 #ifndef PCIM_CMD_INVEN 140 #define PCIM_CMD_INVEN 0x10 141 #endif 142 #ifndef PCIM_CMD_BUSMASTEREN 143 #define PCIM_CMD_BUSMASTEREN 0x0004 144 #endif 145 #ifndef PCIM_CMD_PERRESPEN 146 #define PCIM_CMD_PERRESPEN 0x0040 147 #endif 148 #ifndef PCIM_CMD_SEREN 149 #define PCIM_CMD_SEREN 0x0100 150 #endif 151 152 #ifndef PCIR_COMMAND 153 #define PCIR_COMMAND 0x04 154 #endif 155 156 #ifndef PCIR_CACHELNSZ 157 #define PCIR_CACHELNSZ 0x0c 158 #endif 159 160 #ifndef PCIR_LATTIMER 161 #define PCIR_LATTIMER 0x0d 162 #endif 163 164 #ifndef PCIR_ROMADDR 165 #define PCIR_ROMADDR 0x30 166 #endif 167 168 #ifndef PCI_VENDOR_QLOGIC 169 #define PCI_VENDOR_QLOGIC 0x1077 170 #endif 171 172 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 173 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 174 #endif 175 176 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 177 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 178 #endif 179 180 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 181 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 185 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 189 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 193 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 197 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 201 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 205 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 209 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 210 #endif 211 212 #define PCI_QLOGIC_ISP1020 \ 213 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 214 215 #define PCI_QLOGIC_ISP1080 \ 216 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 217 218 #define PCI_QLOGIC_ISP10160 \ 219 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 220 221 #define PCI_QLOGIC_ISP12160 \ 222 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 223 224 #define PCI_QLOGIC_ISP1240 \ 225 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 226 227 #define PCI_QLOGIC_ISP1280 \ 228 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 229 230 #define PCI_QLOGIC_ISP2100 \ 231 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 232 233 #define PCI_QLOGIC_ISP2200 \ 234 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 235 236 #define PCI_QLOGIC_ISP2300 \ 237 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 238 239 #define PCI_QLOGIC_ISP2312 \ 240 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 241 242 /* 243 * Odd case for some AMI raid cards... We need to *not* attach to this. 244 */ 245 #define AMI_RAID_SUBVENDOR_ID 0x101e 246 247 #define IO_MAP_REG 0x10 248 #define MEM_MAP_REG 0x14 249 250 #define PCI_DFLT_LTNCY 0x40 251 #define PCI_DFLT_LNSZ 0x10 252 253 static int isp_pci_probe (device_t); 254 static int isp_pci_attach (device_t); 255 256 257 struct isp_pcisoftc { 258 struct ispsoftc pci_isp; 259 device_t pci_dev; 260 struct resource * pci_reg; 261 bus_space_tag_t pci_st; 262 bus_space_handle_t pci_sh; 263 void * ih; 264 int16_t pci_poff[_NREG_BLKS]; 265 bus_dma_tag_t dmat; 266 bus_dmamap_t *dmaps; 267 }; 268 ispfwfunc *isp_get_firmware_p = NULL; 269 270 static device_method_t isp_pci_methods[] = { 271 /* Device interface */ 272 DEVMETHOD(device_probe, isp_pci_probe), 273 DEVMETHOD(device_attach, isp_pci_attach), 274 { 0, 0 } 275 }; 276 static void isp_pci_intr(void *); 277 278 static driver_t isp_pci_driver = { 279 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 280 }; 281 static devclass_t isp_devclass; 282 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 283 MODULE_VERSION(isp, 1); 284 285 static int 286 isp_pci_probe(device_t dev) 287 { 288 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 289 case PCI_QLOGIC_ISP1020: 290 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 291 break; 292 case PCI_QLOGIC_ISP1080: 293 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 294 break; 295 case PCI_QLOGIC_ISP1240: 296 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 297 break; 298 case PCI_QLOGIC_ISP1280: 299 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 300 break; 301 case PCI_QLOGIC_ISP10160: 302 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 303 break; 304 case PCI_QLOGIC_ISP12160: 305 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 306 return (ENXIO); 307 } 308 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 309 break; 310 case PCI_QLOGIC_ISP2100: 311 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 312 break; 313 case PCI_QLOGIC_ISP2200: 314 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 315 break; 316 case PCI_QLOGIC_ISP2300: 317 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 318 break; 319 case PCI_QLOGIC_ISP2312: 320 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 321 break; 322 default: 323 return (ENXIO); 324 } 325 if (device_get_unit(dev) == 0 && bootverbose) { 326 kprintf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 327 "Core Version %d.%d\n", 328 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 329 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 330 } 331 /* 332 * XXXX: Here is where we might load the f/w module 333 * XXXX: (or increase a reference count to it). 334 */ 335 return (0); 336 } 337 338 static int 339 isp_pci_attach(device_t dev) 340 { 341 struct resource *regs, *irq; 342 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug; 343 u_int32_t data, cmd, linesz, psize, basetype; 344 struct isp_pcisoftc *pcs; 345 struct ispsoftc *isp = NULL; 346 struct ispmdvec *mdvp; 347 quad_t wwn; 348 bus_size_t lim; 349 350 /* 351 * Figure out if we're supposed to skip this one. 352 */ 353 unit = device_get_unit(dev); 354 if (kgetenv_int("isp_disable", &bitmap)) { 355 if (bitmap & (1 << unit)) { 356 device_printf(dev, "not configuring\n"); 357 /* 358 * But return '0' to preserve HBA numbering. 359 */ 360 return (0); 361 } 362 } 363 364 pcs = kmalloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_WAITOK | M_ZERO); 365 366 /* 367 * Figure out which we should try first - memory mapping or i/o mapping? 368 */ 369 m1 = PCIM_CMD_PORTEN; 370 m2 = PCIM_CMD_MEMEN; 371 bitmap = 0; 372 if (kgetenv_int("isp_mem_map", &bitmap)) { 373 if (bitmap & (1 << unit)) { 374 m1 = PCIM_CMD_MEMEN; 375 m2 = PCIM_CMD_PORTEN; 376 } 377 } 378 bitmap = 0; 379 if (kgetenv_int("isp_io_map", &bitmap)) { 380 if (bitmap & (1 << unit)) { 381 m1 = PCIM_CMD_PORTEN; 382 m2 = PCIM_CMD_MEMEN; 383 } 384 } 385 386 linesz = PCI_DFLT_LNSZ; 387 irq = regs = NULL; 388 rgd = rtp = iqd = 0; 389 390 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 391 if (cmd & m1) { 392 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 393 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 394 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 395 } 396 if (regs == NULL && (cmd & m2)) { 397 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 398 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 399 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 400 } 401 if (regs == NULL) { 402 device_printf(dev, "unable to map any ports\n"); 403 goto bad; 404 } 405 if (bootverbose) 406 device_printf(dev, "using %s space register mapping\n", 407 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 408 pcs->pci_dev = dev; 409 pcs->pci_reg = regs; 410 pcs->pci_st = rman_get_bustag(regs); 411 pcs->pci_sh = rman_get_bushandle(regs); 412 413 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 414 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 415 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 416 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 417 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 418 mdvp = &mdvec; 419 basetype = ISP_HA_SCSI_UNKNOWN; 420 psize = sizeof (sdparam); 421 lim = BUS_SPACE_MAXSIZE_32BIT; 422 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 423 mdvp = &mdvec; 424 basetype = ISP_HA_SCSI_UNKNOWN; 425 psize = sizeof (sdparam); 426 lim = BUS_SPACE_MAXSIZE_24BIT; 427 } 428 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 429 mdvp = &mdvec_1080; 430 basetype = ISP_HA_SCSI_1080; 431 psize = sizeof (sdparam); 432 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 433 ISP1080_DMA_REGS_OFF; 434 } 435 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 436 mdvp = &mdvec_1080; 437 basetype = ISP_HA_SCSI_1240; 438 psize = 2 * sizeof (sdparam); 439 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 440 ISP1080_DMA_REGS_OFF; 441 } 442 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 443 mdvp = &mdvec_1080; 444 basetype = ISP_HA_SCSI_1280; 445 psize = 2 * sizeof (sdparam); 446 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 447 ISP1080_DMA_REGS_OFF; 448 } 449 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 450 mdvp = &mdvec_12160; 451 basetype = ISP_HA_SCSI_10160; 452 psize = sizeof (sdparam); 453 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 454 ISP1080_DMA_REGS_OFF; 455 } 456 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 457 mdvp = &mdvec_12160; 458 basetype = ISP_HA_SCSI_12160; 459 psize = 2 * sizeof (sdparam); 460 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 461 ISP1080_DMA_REGS_OFF; 462 } 463 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 464 mdvp = &mdvec_2100; 465 basetype = ISP_HA_FC_2100; 466 psize = sizeof (fcparam); 467 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 468 PCI_MBOX_REGS2100_OFF; 469 if (pci_get_revid(dev) < 3) { 470 /* 471 * XXX: Need to get the actual revision 472 * XXX: number of the 2100 FB. At any rate, 473 * XXX: lower cache line size for early revision 474 * XXX; boards. 475 */ 476 linesz = 1; 477 } 478 } 479 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 480 mdvp = &mdvec_2200; 481 basetype = ISP_HA_FC_2200; 482 psize = sizeof (fcparam); 483 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 484 PCI_MBOX_REGS2100_OFF; 485 } 486 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 487 mdvp = &mdvec_2300; 488 basetype = ISP_HA_FC_2300; 489 psize = sizeof (fcparam); 490 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 491 PCI_MBOX_REGS2300_OFF; 492 } 493 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 494 mdvp = &mdvec_2300; 495 basetype = ISP_HA_FC_2312; 496 psize = sizeof (fcparam); 497 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 498 PCI_MBOX_REGS2300_OFF; 499 } 500 isp = &pcs->pci_isp; 501 isp->isp_param = kmalloc(psize, M_DEVBUF, M_WAITOK | M_ZERO); 502 isp->isp_mdvec = mdvp; 503 isp->isp_type = basetype; 504 isp->isp_revision = pci_get_revid(dev); 505 #ifdef ISP_TARGET_MODE 506 isp->isp_role = ISP_ROLE_BOTH; 507 #else 508 isp->isp_role = ISP_DEFAULT_ROLES; 509 #endif 510 isp->isp_dev = dev; 511 512 513 /* 514 * Try and find firmware for this device. 515 */ 516 517 if (isp_get_firmware_p) { 518 int device = (int) pci_get_device(dev); 519 #ifdef ISP_TARGET_MODE 520 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 521 #else 522 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 523 #endif 524 } 525 526 /* 527 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 528 * are set. 529 */ 530 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 531 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 532 if (IS_2300(isp)) { /* per QLogic errata */ 533 cmd &= ~PCIM_CMD_INVEN; 534 } 535 if (IS_23XX(isp)) { 536 /* 537 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 538 */ 539 isp->isp_touched = 1; 540 541 } 542 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 543 544 /* 545 * Make sure the Cache Line Size register is set sensibly. 546 */ 547 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 548 if (data != linesz) { 549 data = PCI_DFLT_LNSZ; 550 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 551 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 552 } 553 554 /* 555 * Make sure the Latency Timer is sane. 556 */ 557 data = pci_read_config(dev, PCIR_LATTIMER, 1); 558 if (data < PCI_DFLT_LTNCY) { 559 data = PCI_DFLT_LTNCY; 560 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 561 pci_write_config(dev, PCIR_LATTIMER, data, 1); 562 } 563 564 /* 565 * Make sure we've disabled the ROM. 566 */ 567 data = pci_read_config(dev, PCIR_ROMADDR, 4); 568 data &= ~1; 569 pci_write_config(dev, PCIR_ROMADDR, data, 4); 570 571 iqd = 0; 572 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 573 1, RF_ACTIVE | RF_SHAREABLE); 574 if (irq == NULL) { 575 device_printf(dev, "could not allocate interrupt\n"); 576 goto bad; 577 } 578 579 if (kgetenv_int("isp_no_fwload", &bitmap)) { 580 if (bitmap & (1 << unit)) 581 isp->isp_confopts |= ISP_CFG_NORELOAD; 582 } 583 if (kgetenv_int("isp_fwload", &bitmap)) { 584 if (bitmap & (1 << unit)) 585 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 586 } 587 if (kgetenv_int("isp_no_nvram", &bitmap)) { 588 if (bitmap & (1 << unit)) 589 isp->isp_confopts |= ISP_CFG_NONVRAM; 590 } 591 if (kgetenv_int("isp_nvram", &bitmap)) { 592 if (bitmap & (1 << unit)) 593 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 594 } 595 if (kgetenv_int("isp_fcduplex", &bitmap)) { 596 if (bitmap & (1 << unit)) 597 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 598 } 599 if (kgetenv_int("isp_no_fcduplex", &bitmap)) { 600 if (bitmap & (1 << unit)) 601 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 602 } 603 if (kgetenv_int("isp_nport", &bitmap)) { 604 if (bitmap & (1 << unit)) 605 isp->isp_confopts |= ISP_CFG_NPORT; 606 } 607 608 /* 609 * Because the resource_*_value functions can neither return 610 * 64 bit integer values, nor can they be directly coerced 611 * to interpret the right hand side of the assignment as 612 * you want them to interpret it, we have to force WWN 613 * hint replacement to specify WWN strings with a leading 614 * 'w' (e..g w50000000aaaa0001). Sigh. 615 */ 616 if (kgetenv_quad("isp_portwwn", &wwn)) { 617 isp->isp_osinfo.default_port_wwn = wwn; 618 isp->isp_confopts |= ISP_CFG_OWNWWPN; 619 } 620 if (isp->isp_osinfo.default_port_wwn == 0) { 621 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 622 } 623 624 if (kgetenv_quad("isp_nodewwn", &wwn)) { 625 isp->isp_osinfo.default_node_wwn = wwn; 626 isp->isp_confopts |= ISP_CFG_OWNWWNN; 627 } 628 if (isp->isp_osinfo.default_node_wwn == 0) { 629 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 630 } 631 632 isp_debug = 0; 633 (void) kgetenv_int("isp_debug", &isp_debug); 634 if (bus_setup_intr(dev, irq, 0, isp_pci_intr, 635 isp, &pcs->ih, NULL)) { 636 device_printf(dev, "could not setup interrupt\n"); 637 goto bad; 638 } 639 640 #ifdef ISP_FW_CRASH_DUMP 641 bitmap = 0; 642 if (kgetenv_int("isp_fw_dump_enable", &bitmap)) { 643 if (bitmap & (1 << unit) { 644 size_t amt = 0; 645 if (IS_2200(isp)) { 646 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 647 } else if (IS_23XX(isp)) { 648 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 649 } 650 if (amt) { 651 FCPARAM(isp)->isp_dump_data = 652 kmalloc(amt, M_DEVBUF, M_WAITOK); 653 bzero(FCPARAM(isp)->isp_dump_data, amt); 654 } else { 655 device_printf(dev, 656 "f/w crash dumps not supported for card\n"); 657 } 658 } 659 } 660 #endif 661 662 if (IS_2312(isp)) { 663 isp->isp_port = pci_get_function(dev); 664 } 665 666 /* 667 * Set up logging levels. 668 */ 669 if (isp_debug) { 670 isp->isp_dblev = isp_debug; 671 } else { 672 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 673 } 674 if (bootverbose) 675 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 676 677 /* 678 * Make sure we're in reset state. 679 */ 680 ISP_LOCK(isp); 681 isp_reset(isp); 682 683 if (isp->isp_state != ISP_RESETSTATE) { 684 ISP_UNLOCK(isp); 685 goto bad; 686 } 687 isp_init(isp); 688 if (isp->isp_state != ISP_INITSTATE) { 689 /* If we're a Fibre Channel Card, we allow deferred attach */ 690 if (IS_SCSI(isp)) { 691 isp_uninit(isp); 692 ISP_UNLOCK(isp); 693 goto bad; 694 } 695 } 696 isp_attach(isp); 697 if (isp->isp_state != ISP_RUNSTATE) { 698 /* If we're a Fibre Channel Card, we allow deferred attach */ 699 if (IS_SCSI(isp)) { 700 isp_uninit(isp); 701 ISP_UNLOCK(isp); 702 goto bad; 703 } 704 } 705 /* 706 * XXXX: Here is where we might unload the f/w module 707 * XXXX: (or decrease the reference count to it). 708 */ 709 ISP_UNLOCK(isp); 710 return (0); 711 712 bad: 713 714 if (pcs && pcs->ih) { 715 (void) bus_teardown_intr(dev, irq, pcs->ih); 716 } 717 718 if (irq) { 719 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 720 } 721 722 723 if (regs) { 724 (void) bus_release_resource(dev, rtp, rgd, regs); 725 } 726 727 if (pcs) { 728 if (pcs->pci_isp.isp_param) 729 kfree(pcs->pci_isp.isp_param, M_DEVBUF); 730 kfree(pcs, M_DEVBUF); 731 } 732 733 /* 734 * XXXX: Here is where we might unload the f/w module 735 * XXXX: (or decrease the reference count to it). 736 */ 737 return (ENXIO); 738 } 739 740 static void 741 isp_pci_intr(void *arg) 742 { 743 struct ispsoftc *isp = arg; 744 u_int16_t isr, sema, mbox; 745 746 ISP_LOCK(isp); 747 isp->isp_intcnt++; 748 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 749 isp->isp_intbogus++; 750 } else { 751 int iok = isp->isp_osinfo.intsok; 752 isp->isp_osinfo.intsok = 0; 753 isp_intr(isp, isr, sema, mbox); 754 isp->isp_osinfo.intsok = iok; 755 } 756 ISP_UNLOCK(isp); 757 } 758 759 760 #define IspVirt2Off(a, x) \ 761 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 762 _BLK_REG_SHFT] + ((x) & 0xff)) 763 764 #define BXR2(pcs, off) \ 765 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 766 #define BXW2(pcs, off, v) \ 767 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 768 769 770 static INLINE int 771 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 772 { 773 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 774 u_int16_t val0, val1; 775 int i = 0; 776 777 do { 778 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 779 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 780 } while (val0 != val1 && ++i < 1000); 781 if (val0 != val1) { 782 return (1); 783 } 784 *rp = val0; 785 return (0); 786 } 787 788 static int 789 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 790 u_int16_t *semap, u_int16_t *mbp) 791 { 792 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 793 u_int16_t isr, sema; 794 795 if (IS_2100(isp)) { 796 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 797 return (0); 798 } 799 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 800 return (0); 801 } 802 } else { 803 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 804 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 805 } 806 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 807 isr &= INT_PENDING_MASK(isp); 808 sema &= BIU_SEMA_LOCK; 809 if (isr == 0 && sema == 0) { 810 return (0); 811 } 812 *isrp = isr; 813 if ((*semap = sema) != 0) { 814 if (IS_2100(isp)) { 815 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 816 return (0); 817 } 818 } else { 819 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 820 } 821 } 822 return (1); 823 } 824 825 static int 826 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 827 u_int16_t *semap, u_int16_t *mbox0p) 828 { 829 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 830 u_int32_t r2hisr; 831 832 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 833 *isrp = 0; 834 return (0); 835 } 836 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 837 IspVirt2Off(pcs, BIU_R2HSTSLO)); 838 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 839 if ((r2hisr & BIU_R2HST_INTR) == 0) { 840 *isrp = 0; 841 return (0); 842 } 843 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 844 case ISPR2HST_ROM_MBX_OK: 845 case ISPR2HST_ROM_MBX_FAIL: 846 case ISPR2HST_MBX_OK: 847 case ISPR2HST_MBX_FAIL: 848 case ISPR2HST_ASYNC_EVENT: 849 *isrp = r2hisr & 0xffff; 850 *mbox0p = (r2hisr >> 16); 851 *semap = 1; 852 return (1); 853 case ISPR2HST_RIO_16: 854 *isrp = r2hisr & 0xffff; 855 *mbox0p = ASYNC_RIO1; 856 *semap = 1; 857 return (1); 858 case ISPR2HST_FPOST: 859 *isrp = r2hisr & 0xffff; 860 *mbox0p = ASYNC_CMD_CMPLT; 861 *semap = 1; 862 return (1); 863 case ISPR2HST_FPOST_CTIO: 864 *isrp = r2hisr & 0xffff; 865 *mbox0p = ASYNC_CTIO_DONE; 866 *semap = 1; 867 return (1); 868 case ISPR2HST_RSPQ_UPDATE: 869 *isrp = r2hisr & 0xffff; 870 *mbox0p = 0; 871 *semap = 0; 872 return (1); 873 default: 874 return (0); 875 } 876 } 877 878 static u_int16_t 879 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 880 { 881 u_int16_t rv; 882 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 883 int oldconf = 0; 884 885 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 886 /* 887 * We will assume that someone has paused the RISC processor. 888 */ 889 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 890 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 891 oldconf | BIU_PCI_CONF1_SXP); 892 } 893 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 894 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 895 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 896 } 897 return (rv); 898 } 899 900 static void 901 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 902 { 903 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 904 int oldconf = 0; 905 906 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 907 /* 908 * We will assume that someone has paused the RISC processor. 909 */ 910 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 911 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 912 oldconf | BIU_PCI_CONF1_SXP); 913 } 914 BXW2(pcs, IspVirt2Off(isp, regoff), val); 915 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 916 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 917 } 918 } 919 920 static u_int16_t 921 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 922 { 923 u_int16_t rv, oc = 0; 924 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 925 926 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 927 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 928 u_int16_t tc; 929 /* 930 * We will assume that someone has paused the RISC processor. 931 */ 932 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 933 tc = oc & ~BIU_PCI1080_CONF1_DMA; 934 if (regoff & SXP_BANK1_SELECT) 935 tc |= BIU_PCI1080_CONF1_SXP1; 936 else 937 tc |= BIU_PCI1080_CONF1_SXP0; 938 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 939 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 940 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 941 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 942 oc | BIU_PCI1080_CONF1_DMA); 943 } 944 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 945 if (oc) { 946 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 947 } 948 return (rv); 949 } 950 951 static void 952 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 953 { 954 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 955 int oc = 0; 956 957 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 958 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 959 u_int16_t tc; 960 /* 961 * We will assume that someone has paused the RISC processor. 962 */ 963 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 964 tc = oc & ~BIU_PCI1080_CONF1_DMA; 965 if (regoff & SXP_BANK1_SELECT) 966 tc |= BIU_PCI1080_CONF1_SXP1; 967 else 968 tc |= BIU_PCI1080_CONF1_SXP0; 969 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 970 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 971 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 972 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 973 oc | BIU_PCI1080_CONF1_DMA); 974 } 975 BXW2(pcs, IspVirt2Off(isp, regoff), val); 976 if (oc) { 977 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 978 } 979 } 980 981 982 struct imush { 983 struct ispsoftc *isp; 984 int error; 985 }; 986 987 static void imc(void *, bus_dma_segment_t *, int, int); 988 989 static void 990 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 991 { 992 struct imush *imushp = (struct imush *) arg; 993 if (error) { 994 imushp->error = error; 995 } else { 996 struct ispsoftc *isp =imushp->isp; 997 bus_addr_t addr = segs->ds_addr; 998 999 isp->isp_rquest_dma = addr; 1000 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1001 isp->isp_result_dma = addr; 1002 if (IS_FC(isp)) { 1003 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1004 FCPARAM(isp)->isp_scdma = addr; 1005 } 1006 } 1007 } 1008 1009 /* 1010 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1011 */ 1012 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1013 1014 static int 1015 isp_pci_mbxdma(struct ispsoftc *isp) 1016 { 1017 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1018 caddr_t base; 1019 u_int32_t len; 1020 int i, error, ns; 1021 bus_size_t alim, slim; 1022 struct imush im; 1023 1024 /* 1025 * Already been here? If so, leave... 1026 */ 1027 if (isp->isp_rquest) { 1028 return (0); 1029 } 1030 1031 #ifdef ISP_DAC_SUPPORTED 1032 alim = BUS_SPACE_UNRESTRICTED; 1033 #else 1034 alim = BUS_SPACE_MAXADDR_32BIT; 1035 #endif 1036 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1037 slim = BUS_SPACE_MAXADDR_32BIT; 1038 } else { 1039 slim = BUS_SPACE_MAXADDR_24BIT; 1040 } 1041 1042 ISP_UNLOCK(isp); 1043 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1044 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) { 1045 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1046 ISP_LOCK(isp); 1047 return(1); 1048 } 1049 1050 1051 len = sizeof (XS_T **) * isp->isp_maxcmds; 1052 isp->isp_xflist = (XS_T **) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1053 if (isp->isp_xflist == NULL) { 1054 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1055 ISP_LOCK(isp); 1056 return (1); 1057 } 1058 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1059 pcs->dmaps = (bus_dmamap_t *) kmalloc(len, M_DEVBUF, M_WAITOK); 1060 if (pcs->dmaps == NULL) { 1061 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1062 kfree(isp->isp_xflist, M_DEVBUF); 1063 ISP_LOCK(isp); 1064 return (1); 1065 } 1066 1067 /* 1068 * Allocate and map the request, result queues, plus FC scratch area. 1069 */ 1070 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1071 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1072 if (IS_FC(isp)) { 1073 len += ISP2100_SCRLEN; 1074 } 1075 1076 ns = (len / PAGE_SIZE) + 1; 1077 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim, 1078 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1079 isp_prt(isp, ISP_LOGERR, 1080 "cannot create a dma tag for control spaces"); 1081 kfree(pcs->dmaps, M_DEVBUF); 1082 kfree(isp->isp_xflist, M_DEVBUF); 1083 ISP_LOCK(isp); 1084 return (1); 1085 } 1086 1087 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1088 &isp->isp_cdmap) != 0) { 1089 isp_prt(isp, ISP_LOGERR, 1090 "cannot allocate %d bytes of CCB memory", len); 1091 bus_dma_tag_destroy(isp->isp_cdmat); 1092 kfree(isp->isp_xflist, M_DEVBUF); 1093 kfree(pcs->dmaps, M_DEVBUF); 1094 ISP_LOCK(isp); 1095 return (1); 1096 } 1097 1098 for (i = 0; i < isp->isp_maxcmds; i++) { 1099 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1100 if (error) { 1101 isp_prt(isp, ISP_LOGERR, 1102 "error %d creating per-cmd DMA maps", error); 1103 while (--i >= 0) { 1104 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1105 } 1106 goto bad; 1107 } 1108 } 1109 1110 im.isp = isp; 1111 im.error = 0; 1112 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1113 if (im.error) { 1114 isp_prt(isp, ISP_LOGERR, 1115 "error %d loading dma map for control areas", im.error); 1116 goto bad; 1117 } 1118 1119 isp->isp_rquest = base; 1120 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1121 isp->isp_result = base; 1122 if (IS_FC(isp)) { 1123 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1124 FCPARAM(isp)->isp_scratch = base; 1125 } 1126 ISP_LOCK(isp); 1127 return (0); 1128 1129 bad: 1130 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1131 bus_dma_tag_destroy(isp->isp_cdmat); 1132 kfree(isp->isp_xflist, M_DEVBUF); 1133 kfree(pcs->dmaps, M_DEVBUF); 1134 ISP_LOCK(isp); 1135 isp->isp_rquest = NULL; 1136 return (1); 1137 } 1138 1139 typedef struct { 1140 struct ispsoftc *isp; 1141 void *cmd_token; 1142 void *rq; 1143 u_int16_t *nxtip; 1144 u_int16_t optr; 1145 u_int error; 1146 } mush_t; 1147 1148 #define MUSHERR_NOQENTRIES -2 1149 1150 #ifdef ISP_TARGET_MODE 1151 /* 1152 * We need to handle DMA for target mode differently from initiator mode. 1153 * 1154 * DMA mapping and construction and submission of CTIO Request Entries 1155 * and rendevous for completion are very tightly coupled because we start 1156 * out by knowing (per platform) how much data we have to move, but we 1157 * don't know, up front, how many DMA mapping segments will have to be used 1158 * cover that data, so we don't know how many CTIO Request Entries we 1159 * will end up using. Further, for performance reasons we may want to 1160 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1161 * 1162 * The standard vector still goes through isp_pci_dmasetup, but the callback 1163 * for the DMA mapping routines comes here instead with the whole transfer 1164 * mapped and a pointer to a partially filled in already allocated request 1165 * queue entry. We finish the job. 1166 */ 1167 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1168 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1169 1170 #define STATUS_WITH_DATA 1 1171 1172 static void 1173 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1174 { 1175 mush_t *mp; 1176 struct ccb_scsiio *csio; 1177 struct ispsoftc *isp; 1178 struct isp_pcisoftc *pcs; 1179 bus_dmamap_t *dp; 1180 ct_entry_t *cto, *qe; 1181 u_int8_t scsi_status; 1182 u_int16_t curi, nxti, handle; 1183 u_int32_t sflags; 1184 int32_t resid; 1185 int nth_ctio, nctios, send_status; 1186 1187 mp = (mush_t *) arg; 1188 if (error) { 1189 mp->error = error; 1190 return; 1191 } 1192 1193 isp = mp->isp; 1194 csio = mp->cmd_token; 1195 cto = mp->rq; 1196 curi = isp->isp_reqidx; 1197 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1198 1199 cto->ct_xfrlen = 0; 1200 cto->ct_seg_count = 0; 1201 cto->ct_header.rqs_entry_count = 1; 1202 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1203 1204 if (nseg == 0) { 1205 cto->ct_header.rqs_seqno = 1; 1206 isp_prt(isp, ISP_LOGTDEBUG1, 1207 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1208 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1209 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1210 cto->ct_scsi_status, cto->ct_resid); 1211 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1212 isp_put_ctio(isp, cto, qe); 1213 return; 1214 } 1215 1216 nctios = nseg / ISP_RQDSEG; 1217 if (nseg % ISP_RQDSEG) { 1218 nctios++; 1219 } 1220 1221 /* 1222 * Save syshandle, and potentially any SCSI status, which we'll 1223 * reinsert on the last CTIO we're going to send. 1224 */ 1225 1226 handle = cto->ct_syshandle; 1227 cto->ct_syshandle = 0; 1228 cto->ct_header.rqs_seqno = 0; 1229 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1230 1231 if (send_status) { 1232 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1233 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1234 /* 1235 * Preserve residual. 1236 */ 1237 resid = cto->ct_resid; 1238 1239 /* 1240 * Save actual SCSI status. 1241 */ 1242 scsi_status = cto->ct_scsi_status; 1243 1244 #ifndef STATUS_WITH_DATA 1245 sflags |= CT_NO_DATA; 1246 /* 1247 * We can't do a status at the same time as a data CTIO, so 1248 * we need to synthesize an extra CTIO at this level. 1249 */ 1250 nctios++; 1251 #endif 1252 } else { 1253 sflags = scsi_status = resid = 0; 1254 } 1255 1256 cto->ct_resid = 0; 1257 cto->ct_scsi_status = 0; 1258 1259 pcs = (struct isp_pcisoftc *)isp; 1260 dp = &pcs->dmaps[isp_handle_index(handle)]; 1261 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1262 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1263 } else { 1264 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1265 } 1266 1267 nxti = *mp->nxtip; 1268 1269 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1270 int seglim; 1271 1272 seglim = nseg; 1273 if (seglim) { 1274 int seg; 1275 1276 if (seglim > ISP_RQDSEG) 1277 seglim = ISP_RQDSEG; 1278 1279 for (seg = 0; seg < seglim; seg++, nseg--) { 1280 /* 1281 * Unlike normal initiator commands, we don't 1282 * do any swizzling here. 1283 */ 1284 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1285 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1286 cto->ct_xfrlen += dm_segs->ds_len; 1287 dm_segs++; 1288 } 1289 cto->ct_seg_count = seg; 1290 } else { 1291 /* 1292 * This case should only happen when we're sending an 1293 * extra CTIO with final status. 1294 */ 1295 if (send_status == 0) { 1296 isp_prt(isp, ISP_LOGWARN, 1297 "tdma_mk ran out of segments"); 1298 mp->error = EINVAL; 1299 return; 1300 } 1301 } 1302 1303 /* 1304 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1305 * ct_tagtype, and ct_timeout have been carried over 1306 * unchanged from what our caller had set. 1307 * 1308 * The dataseg fields and the seg_count fields we just got 1309 * through setting. The data direction we've preserved all 1310 * along and only clear it if we're now sending status. 1311 */ 1312 1313 if (nth_ctio == nctios - 1) { 1314 /* 1315 * We're the last in a sequence of CTIOs, so mark 1316 * this CTIO and save the handle to the CCB such that 1317 * when this CTIO completes we can free dma resources 1318 * and do whatever else we need to do to finish the 1319 * rest of the command. We *don't* give this to the 1320 * firmware to work on- the caller will do that. 1321 */ 1322 1323 cto->ct_syshandle = handle; 1324 cto->ct_header.rqs_seqno = 1; 1325 1326 if (send_status) { 1327 cto->ct_scsi_status = scsi_status; 1328 cto->ct_flags |= sflags; 1329 cto->ct_resid = resid; 1330 } 1331 if (send_status) { 1332 isp_prt(isp, ISP_LOGTDEBUG1, 1333 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1334 "scsi status %x resid %d", 1335 cto->ct_fwhandle, csio->ccb_h.target_lun, 1336 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1337 cto->ct_scsi_status, cto->ct_resid); 1338 } else { 1339 isp_prt(isp, ISP_LOGTDEBUG1, 1340 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1341 cto->ct_fwhandle, csio->ccb_h.target_lun, 1342 cto->ct_iid, cto->ct_tag_val, 1343 cto->ct_flags); 1344 } 1345 isp_put_ctio(isp, cto, qe); 1346 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1347 if (nctios > 1) { 1348 MEMORYBARRIER(isp, SYNC_REQUEST, 1349 curi, QENTRY_LEN); 1350 } 1351 } else { 1352 ct_entry_t *oqe = qe; 1353 1354 /* 1355 * Make sure syshandle fields are clean 1356 */ 1357 cto->ct_syshandle = 0; 1358 cto->ct_header.rqs_seqno = 0; 1359 1360 isp_prt(isp, ISP_LOGTDEBUG1, 1361 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1362 cto->ct_fwhandle, csio->ccb_h.target_lun, 1363 cto->ct_iid, cto->ct_flags); 1364 1365 /* 1366 * Get a new CTIO 1367 */ 1368 qe = (ct_entry_t *) 1369 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1370 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1371 if (nxti == mp->optr) { 1372 isp_prt(isp, ISP_LOGTDEBUG0, 1373 "Queue Overflow in tdma_mk"); 1374 mp->error = MUSHERR_NOQENTRIES; 1375 return; 1376 } 1377 1378 /* 1379 * Now that we're done with the old CTIO, 1380 * flush it out to the request queue. 1381 */ 1382 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1383 isp_put_ctio(isp, cto, oqe); 1384 if (nth_ctio != 0) { 1385 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1386 QENTRY_LEN); 1387 } 1388 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1389 1390 /* 1391 * Reset some fields in the CTIO so we can reuse 1392 * for the next one we'll flush to the request 1393 * queue. 1394 */ 1395 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1396 cto->ct_header.rqs_entry_count = 1; 1397 cto->ct_header.rqs_flags = 0; 1398 cto->ct_status = 0; 1399 cto->ct_scsi_status = 0; 1400 cto->ct_xfrlen = 0; 1401 cto->ct_resid = 0; 1402 cto->ct_seg_count = 0; 1403 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1404 } 1405 } 1406 *mp->nxtip = nxti; 1407 } 1408 1409 /* 1410 * We don't have to do multiple CTIOs here. Instead, we can just do 1411 * continuation segments as needed. This greatly simplifies the code 1412 * improves performance. 1413 */ 1414 1415 static void 1416 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1417 { 1418 mush_t *mp; 1419 struct ccb_scsiio *csio; 1420 struct ispsoftc *isp; 1421 ct2_entry_t *cto, *qe; 1422 u_int16_t curi, nxti; 1423 int segcnt; 1424 1425 mp = (mush_t *) arg; 1426 if (error) { 1427 mp->error = error; 1428 return; 1429 } 1430 1431 isp = mp->isp; 1432 csio = mp->cmd_token; 1433 cto = mp->rq; 1434 1435 curi = isp->isp_reqidx; 1436 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1437 1438 if (nseg == 0) { 1439 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1440 isp_prt(isp, ISP_LOGWARN, 1441 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1442 "set (0x%x)", cto->ct_flags); 1443 mp->error = EINVAL; 1444 return; 1445 } 1446 /* 1447 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1448 * flags to NO DATA and clear relative offset flags. 1449 * We preserve the ct_resid and the response area. 1450 */ 1451 cto->ct_header.rqs_seqno = 1; 1452 cto->ct_seg_count = 0; 1453 cto->ct_reloff = 0; 1454 isp_prt(isp, ISP_LOGTDEBUG1, 1455 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1456 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1457 cto->ct_iid, cto->ct_flags, cto->ct_status, 1458 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1459 isp_put_ctio2(isp, cto, qe); 1460 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1461 return; 1462 } 1463 1464 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1465 isp_prt(isp, ISP_LOGERR, 1466 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1467 "(0x%x)", cto->ct_flags); 1468 mp->error = EINVAL; 1469 return; 1470 } 1471 1472 1473 nxti = *mp->nxtip; 1474 1475 /* 1476 * Set up the CTIO2 data segments. 1477 */ 1478 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1479 cto->ct_seg_count++, segcnt++) { 1480 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1481 dm_segs[segcnt].ds_addr; 1482 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1483 dm_segs[segcnt].ds_len; 1484 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1485 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d", 1486 cto->ct_seg_count, dm_segs[segcnt].ds_addr, 1487 dm_segs[segcnt].ds_len); 1488 } 1489 1490 while (segcnt < nseg) { 1491 u_int16_t curip; 1492 int seg; 1493 ispcontreq_t local, *crq = &local, *qep; 1494 1495 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1496 curip = nxti; 1497 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1498 if (nxti == mp->optr) { 1499 ISP_UNLOCK(isp); 1500 isp_prt(isp, ISP_LOGTDEBUG0, 1501 "tdma_mkfc: request queue overflow"); 1502 mp->error = MUSHERR_NOQENTRIES; 1503 return; 1504 } 1505 cto->ct_header.rqs_entry_count++; 1506 MEMZERO((void *)crq, sizeof (*crq)); 1507 crq->req_header.rqs_entry_count = 1; 1508 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1509 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1510 segcnt++, seg++) { 1511 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1512 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1513 isp_prt(isp, ISP_LOGTDEBUG1, 1514 "isp_send_ctio2: ent%d[%d]%x:%u", 1515 cto->ct_header.rqs_entry_count-1, seg, 1516 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len); 1517 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1518 cto->ct_seg_count++; 1519 } 1520 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1521 isp_put_cont_req(isp, crq, qep); 1522 ISP_TDQE(isp, "cont entry", curi, qep); 1523 } 1524 1525 /* 1526 * No do final twiddling for the CTIO itself. 1527 */ 1528 cto->ct_header.rqs_seqno = 1; 1529 isp_prt(isp, ISP_LOGTDEBUG1, 1530 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1531 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1532 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1533 cto->ct_resid); 1534 isp_put_ctio2(isp, cto, qe); 1535 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1536 *mp->nxtip = nxti; 1537 } 1538 #endif 1539 1540 static void dma2(void *, bus_dma_segment_t *, int, int); 1541 1542 static void 1543 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1544 { 1545 mush_t *mp; 1546 struct ispsoftc *isp; 1547 struct ccb_scsiio *csio; 1548 struct isp_pcisoftc *pcs; 1549 bus_dmamap_t *dp; 1550 bus_dma_segment_t *eseg; 1551 ispreq_t *rq; 1552 int seglim, datalen; 1553 u_int16_t nxti; 1554 1555 mp = (mush_t *) arg; 1556 if (error) { 1557 mp->error = error; 1558 return; 1559 } 1560 1561 if (nseg < 1) { 1562 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1563 mp->error = EFAULT; 1564 return; 1565 } 1566 csio = mp->cmd_token; 1567 isp = mp->isp; 1568 rq = mp->rq; 1569 pcs = (struct isp_pcisoftc *)mp->isp; 1570 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1571 nxti = *mp->nxtip; 1572 1573 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1574 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1575 } else { 1576 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1577 } 1578 1579 datalen = XS_XFRLEN(csio); 1580 1581 /* 1582 * We're passed an initial partially filled in entry that 1583 * has most fields filled in except for data transfer 1584 * related values. 1585 * 1586 * Our job is to fill in the initial request queue entry and 1587 * then to start allocating and filling in continuation entries 1588 * until we've covered the entire transfer. 1589 */ 1590 1591 if (IS_FC(isp)) { 1592 seglim = ISP_RQDSEG_T2; 1593 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1594 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1595 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1596 } else { 1597 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1598 } 1599 } else { 1600 if (csio->cdb_len > 12) { 1601 seglim = 0; 1602 } else { 1603 seglim = ISP_RQDSEG; 1604 } 1605 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1606 rq->req_flags |= REQFLAG_DATA_IN; 1607 } else { 1608 rq->req_flags |= REQFLAG_DATA_OUT; 1609 } 1610 } 1611 1612 eseg = dm_segs + nseg; 1613 1614 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1615 if (IS_FC(isp)) { 1616 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1617 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1618 dm_segs->ds_addr; 1619 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1620 dm_segs->ds_len; 1621 } else { 1622 rq->req_dataseg[rq->req_seg_count].ds_base = 1623 dm_segs->ds_addr; 1624 rq->req_dataseg[rq->req_seg_count].ds_count = 1625 dm_segs->ds_len; 1626 } 1627 datalen -= dm_segs->ds_len; 1628 rq->req_seg_count++; 1629 dm_segs++; 1630 } 1631 1632 while (datalen > 0 && dm_segs != eseg) { 1633 u_int16_t onxti; 1634 ispcontreq_t local, *crq = &local, *cqe; 1635 1636 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1637 onxti = nxti; 1638 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1639 if (nxti == mp->optr) { 1640 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1641 mp->error = MUSHERR_NOQENTRIES; 1642 return; 1643 } 1644 rq->req_header.rqs_entry_count++; 1645 MEMZERO((void *)crq, sizeof (*crq)); 1646 crq->req_header.rqs_entry_count = 1; 1647 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1648 1649 seglim = 0; 1650 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1651 crq->req_dataseg[seglim].ds_base = 1652 dm_segs->ds_addr; 1653 crq->req_dataseg[seglim].ds_count = 1654 dm_segs->ds_len; 1655 rq->req_seg_count++; 1656 dm_segs++; 1657 seglim++; 1658 datalen -= dm_segs->ds_len; 1659 } 1660 isp_put_cont_req(isp, crq, cqe); 1661 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1662 } 1663 *mp->nxtip = nxti; 1664 } 1665 1666 static int 1667 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1668 u_int16_t *nxtip, u_int16_t optr) 1669 { 1670 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1671 ispreq_t *qep; 1672 bus_dmamap_t *dp = NULL; 1673 mush_t mush, *mp; 1674 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1675 1676 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1677 #ifdef ISP_TARGET_MODE 1678 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1679 if (IS_FC(isp)) { 1680 eptr = tdma_mkfc; 1681 } else { 1682 eptr = tdma_mk; 1683 } 1684 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1685 (csio->dxfer_len == 0)) { 1686 mp = &mush; 1687 mp->isp = isp; 1688 mp->cmd_token = csio; 1689 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1690 mp->nxtip = nxtip; 1691 mp->optr = optr; 1692 mp->error = 0; 1693 (*eptr)(mp, NULL, 0, 0); 1694 goto mbxsync; 1695 } 1696 } else 1697 #endif 1698 eptr = dma2; 1699 1700 1701 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1702 (csio->dxfer_len == 0)) { 1703 rq->req_seg_count = 1; 1704 goto mbxsync; 1705 } 1706 1707 /* 1708 * Do a virtual grapevine step to collect info for 1709 * the callback dma allocation that we have to use... 1710 */ 1711 mp = &mush; 1712 mp->isp = isp; 1713 mp->cmd_token = csio; 1714 mp->rq = rq; 1715 mp->nxtip = nxtip; 1716 mp->optr = optr; 1717 mp->error = 0; 1718 1719 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1720 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1721 int error; 1722 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1723 crit_enter(); 1724 error = bus_dmamap_load(pcs->dmat, *dp, 1725 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1726 if (error == EINPROGRESS) { 1727 bus_dmamap_unload(pcs->dmat, *dp); 1728 mp->error = EINVAL; 1729 isp_prt(isp, ISP_LOGERR, 1730 "deferred dma allocation not supported"); 1731 } else if (error && mp->error == 0) { 1732 #ifdef DIAGNOSTIC 1733 isp_prt(isp, ISP_LOGERR, 1734 "error %d in dma mapping code", error); 1735 #endif 1736 mp->error = error; 1737 } 1738 crit_exit(); 1739 } else { 1740 /* Pointer to physical buffer */ 1741 struct bus_dma_segment seg; 1742 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1743 seg.ds_len = csio->dxfer_len; 1744 (*eptr)(mp, &seg, 1, 0); 1745 } 1746 } else { 1747 struct bus_dma_segment *segs; 1748 1749 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1750 isp_prt(isp, ISP_LOGERR, 1751 "Physical segment pointers unsupported"); 1752 mp->error = EINVAL; 1753 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1754 isp_prt(isp, ISP_LOGERR, 1755 "Virtual segment addresses unsupported"); 1756 mp->error = EINVAL; 1757 } else { 1758 /* Just use the segments provided */ 1759 segs = (struct bus_dma_segment *) csio->data_ptr; 1760 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1761 } 1762 } 1763 if (mp->error) { 1764 int retval = CMD_COMPLETE; 1765 if (mp->error == MUSHERR_NOQENTRIES) { 1766 retval = CMD_EAGAIN; 1767 } else if (mp->error == EFBIG) { 1768 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1769 } else if (mp->error == EINVAL) { 1770 XS_SETERR(csio, CAM_REQ_INVALID); 1771 } else { 1772 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1773 } 1774 return (retval); 1775 } 1776 mbxsync: 1777 switch (rq->req_header.rqs_entry_type) { 1778 case RQSTYPE_REQUEST: 1779 isp_put_request(isp, rq, qep); 1780 break; 1781 case RQSTYPE_CMDONLY: 1782 isp_put_extended_request(isp, (ispextreq_t *)rq, 1783 (ispextreq_t *)qep); 1784 break; 1785 case RQSTYPE_T2RQS: 1786 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1787 break; 1788 } 1789 return (CMD_QUEUED); 1790 } 1791 1792 static void 1793 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1794 { 1795 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1796 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1797 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1798 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1799 } else { 1800 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1801 } 1802 bus_dmamap_unload(pcs->dmat, *dp); 1803 } 1804 1805 1806 static void 1807 isp_pci_reset1(struct ispsoftc *isp) 1808 { 1809 /* Make sure the BIOS is disabled */ 1810 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1811 /* and enable interrupts */ 1812 ENABLE_INTS(isp); 1813 } 1814 1815 static void 1816 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1817 { 1818 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1819 if (msg) 1820 kprintf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1821 else 1822 kprintf("%s:\n", device_get_nameunit(isp->isp_dev)); 1823 if (IS_SCSI(isp)) 1824 kprintf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1825 else 1826 kprintf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1827 kprintf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1828 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1829 kprintf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1830 1831 1832 if (IS_SCSI(isp)) { 1833 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1834 kprintf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1835 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1836 ISP_READ(isp, CDMA_FIFO_STS)); 1837 kprintf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1838 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1839 ISP_READ(isp, DDMA_FIFO_STS)); 1840 kprintf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1841 ISP_READ(isp, SXP_INTERRUPT), 1842 ISP_READ(isp, SXP_GROSS_ERR), 1843 ISP_READ(isp, SXP_PINS_CTRL)); 1844 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1845 } 1846 kprintf(" mbox regs: %x %x %x %x %x\n", 1847 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1848 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1849 ISP_READ(isp, OUTMAILBOX4)); 1850 kprintf(" PCI Status Command/Status=%x\n", 1851 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1852 } 1853