1 /* $FreeBSD: src/sys/dev/isp/isp_pci.c,v 1.78.2.4 2002/10/11 18:50:53 mjacob Exp $ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 #include <sys/rman.h> 36 #include <sys/malloc.h> 37 38 #include <bus/pci/pcireg.h> 39 #include <bus/pci/pcivar.h> 40 41 #include "isp_freebsd.h" 42 43 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 44 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 45 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 46 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 47 static int 48 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 49 static int 50 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 51 static int isp_pci_mbxdma(struct ispsoftc *); 52 static int 53 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 54 static void 55 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 56 57 static void isp_pci_reset1(struct ispsoftc *); 58 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 59 60 static struct ispmdvec mdvec = { 61 isp_pci_rd_isr, 62 isp_pci_rd_reg, 63 isp_pci_wr_reg, 64 isp_pci_mbxdma, 65 isp_pci_dmasetup, 66 isp_pci_dmateardown, 67 NULL, 68 isp_pci_reset1, 69 isp_pci_dumpregs, 70 NULL, 71 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 72 }; 73 74 static struct ispmdvec mdvec_1080 = { 75 isp_pci_rd_isr, 76 isp_pci_rd_reg_1080, 77 isp_pci_wr_reg_1080, 78 isp_pci_mbxdma, 79 isp_pci_dmasetup, 80 isp_pci_dmateardown, 81 NULL, 82 isp_pci_reset1, 83 isp_pci_dumpregs, 84 NULL, 85 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 86 }; 87 88 static struct ispmdvec mdvec_12160 = { 89 isp_pci_rd_isr, 90 isp_pci_rd_reg_1080, 91 isp_pci_wr_reg_1080, 92 isp_pci_mbxdma, 93 isp_pci_dmasetup, 94 isp_pci_dmateardown, 95 NULL, 96 isp_pci_reset1, 97 isp_pci_dumpregs, 98 NULL, 99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 100 }; 101 102 static struct ispmdvec mdvec_2100 = { 103 isp_pci_rd_isr, 104 isp_pci_rd_reg, 105 isp_pci_wr_reg, 106 isp_pci_mbxdma, 107 isp_pci_dmasetup, 108 isp_pci_dmateardown, 109 NULL, 110 isp_pci_reset1, 111 isp_pci_dumpregs 112 }; 113 114 static struct ispmdvec mdvec_2200 = { 115 isp_pci_rd_isr, 116 isp_pci_rd_reg, 117 isp_pci_wr_reg, 118 isp_pci_mbxdma, 119 isp_pci_dmasetup, 120 isp_pci_dmateardown, 121 NULL, 122 isp_pci_reset1, 123 isp_pci_dumpregs 124 }; 125 126 static struct ispmdvec mdvec_2300 = { 127 isp_pci_rd_isr_2300, 128 isp_pci_rd_reg, 129 isp_pci_wr_reg, 130 isp_pci_mbxdma, 131 isp_pci_dmasetup, 132 isp_pci_dmateardown, 133 NULL, 134 isp_pci_reset1, 135 isp_pci_dumpregs 136 }; 137 138 #ifndef PCIM_CMD_INVEN 139 #define PCIM_CMD_INVEN 0x10 140 #endif 141 #ifndef PCIM_CMD_BUSMASTEREN 142 #define PCIM_CMD_BUSMASTEREN 0x0004 143 #endif 144 #ifndef PCIM_CMD_PERRESPEN 145 #define PCIM_CMD_PERRESPEN 0x0040 146 #endif 147 #ifndef PCIM_CMD_SEREN 148 #define PCIM_CMD_SEREN 0x0100 149 #endif 150 151 #ifndef PCIR_COMMAND 152 #define PCIR_COMMAND 0x04 153 #endif 154 155 #ifndef PCIR_CACHELNSZ 156 #define PCIR_CACHELNSZ 0x0c 157 #endif 158 159 #ifndef PCIR_LATTIMER 160 #define PCIR_LATTIMER 0x0d 161 #endif 162 163 #ifndef PCIR_ROMADDR 164 #define PCIR_ROMADDR 0x30 165 #endif 166 167 #ifndef PCI_VENDOR_QLOGIC 168 #define PCI_VENDOR_QLOGIC 0x1077 169 #endif 170 171 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 172 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 173 #endif 174 175 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 176 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 177 #endif 178 179 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 180 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 181 #endif 182 183 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 184 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 185 #endif 186 187 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 188 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 189 #endif 190 191 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 192 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 193 #endif 194 195 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 196 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 197 #endif 198 199 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 200 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 201 #endif 202 203 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 204 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 205 #endif 206 207 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 208 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 209 #endif 210 211 #define PCI_QLOGIC_ISP1020 \ 212 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 213 214 #define PCI_QLOGIC_ISP1080 \ 215 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 216 217 #define PCI_QLOGIC_ISP10160 \ 218 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 219 220 #define PCI_QLOGIC_ISP12160 \ 221 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 222 223 #define PCI_QLOGIC_ISP1240 \ 224 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 225 226 #define PCI_QLOGIC_ISP1280 \ 227 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 228 229 #define PCI_QLOGIC_ISP2100 \ 230 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 231 232 #define PCI_QLOGIC_ISP2200 \ 233 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 234 235 #define PCI_QLOGIC_ISP2300 \ 236 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 237 238 #define PCI_QLOGIC_ISP2312 \ 239 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 240 241 /* 242 * Odd case for some AMI raid cards... We need to *not* attach to this. 243 */ 244 #define AMI_RAID_SUBVENDOR_ID 0x101e 245 246 #define IO_MAP_REG 0x10 247 #define MEM_MAP_REG 0x14 248 249 #define PCI_DFLT_LTNCY 0x40 250 #define PCI_DFLT_LNSZ 0x10 251 252 static int isp_pci_probe (device_t); 253 static int isp_pci_attach (device_t); 254 255 256 struct isp_pcisoftc { 257 struct ispsoftc pci_isp; 258 device_t pci_dev; 259 struct resource * pci_reg; 260 bus_space_tag_t pci_st; 261 bus_space_handle_t pci_sh; 262 void * ih; 263 int16_t pci_poff[_NREG_BLKS]; 264 bus_dma_tag_t dmat; 265 bus_dmamap_t *dmaps; 266 }; 267 ispfwfunc *isp_get_firmware_p = NULL; 268 269 static device_method_t isp_pci_methods[] = { 270 /* Device interface */ 271 DEVMETHOD(device_probe, isp_pci_probe), 272 DEVMETHOD(device_attach, isp_pci_attach), 273 { 0, 0 } 274 }; 275 static void isp_pci_intr(void *); 276 277 static driver_t isp_pci_driver = { 278 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 279 }; 280 static devclass_t isp_devclass; 281 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, NULL, NULL); 282 MODULE_VERSION(isp, 1); 283 284 static int 285 isp_pci_probe(device_t dev) 286 { 287 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 288 case PCI_QLOGIC_ISP1020: 289 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 290 break; 291 case PCI_QLOGIC_ISP1080: 292 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 293 break; 294 case PCI_QLOGIC_ISP1240: 295 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 296 break; 297 case PCI_QLOGIC_ISP1280: 298 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 299 break; 300 case PCI_QLOGIC_ISP10160: 301 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP12160: 304 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 305 return (ENXIO); 306 } 307 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP2100: 310 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 311 break; 312 case PCI_QLOGIC_ISP2200: 313 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 314 break; 315 case PCI_QLOGIC_ISP2300: 316 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 317 break; 318 case PCI_QLOGIC_ISP2312: 319 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 320 break; 321 default: 322 return (ENXIO); 323 } 324 if (device_get_unit(dev) == 0 && bootverbose) { 325 kprintf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 326 "Core Version %d.%d\n", 327 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 328 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 329 } 330 /* 331 * XXXX: Here is where we might load the f/w module 332 * XXXX: (or increase a reference count to it). 333 */ 334 return (0); 335 } 336 337 static int 338 isp_pci_attach(device_t dev) 339 { 340 struct resource *regs, *irq; 341 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug; 342 u_int32_t data, cmd, linesz, psize, basetype; 343 struct isp_pcisoftc *pcs; 344 struct ispsoftc *isp = NULL; 345 struct ispmdvec *mdvp; 346 quad_t wwn; 347 bus_size_t lim; 348 349 /* 350 * Figure out if we're supposed to skip this one. 351 */ 352 unit = device_get_unit(dev); 353 if (kgetenv_int("isp_disable", &bitmap)) { 354 if (bitmap & (1 << unit)) { 355 device_printf(dev, "not configuring\n"); 356 /* 357 * But return '0' to preserve HBA numbering. 358 */ 359 return (0); 360 } 361 } 362 363 pcs = kmalloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_WAITOK | M_ZERO); 364 365 /* 366 * Figure out which we should try first - memory mapping or i/o mapping? 367 */ 368 m1 = PCIM_CMD_PORTEN; 369 m2 = PCIM_CMD_MEMEN; 370 bitmap = 0; 371 if (kgetenv_int("isp_mem_map", &bitmap)) { 372 if (bitmap & (1 << unit)) { 373 m1 = PCIM_CMD_MEMEN; 374 m2 = PCIM_CMD_PORTEN; 375 } 376 } 377 bitmap = 0; 378 if (kgetenv_int("isp_io_map", &bitmap)) { 379 if (bitmap & (1 << unit)) { 380 m1 = PCIM_CMD_PORTEN; 381 m2 = PCIM_CMD_MEMEN; 382 } 383 } 384 385 linesz = PCI_DFLT_LNSZ; 386 irq = regs = NULL; 387 rgd = rtp = iqd = 0; 388 389 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 390 if (cmd & m1) { 391 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 392 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 393 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 394 } 395 if (regs == NULL && (cmd & m2)) { 396 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 397 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 398 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 399 } 400 if (regs == NULL) { 401 device_printf(dev, "unable to map any ports\n"); 402 goto bad; 403 } 404 if (bootverbose) 405 device_printf(dev, "using %s space register mapping\n", 406 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 407 pcs->pci_dev = dev; 408 pcs->pci_reg = regs; 409 pcs->pci_st = rman_get_bustag(regs); 410 pcs->pci_sh = rman_get_bushandle(regs); 411 412 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 413 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 414 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 415 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 416 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 417 mdvp = &mdvec; 418 basetype = ISP_HA_SCSI_UNKNOWN; 419 psize = sizeof (sdparam); 420 lim = BUS_SPACE_MAXSIZE_32BIT; 421 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 422 mdvp = &mdvec; 423 basetype = ISP_HA_SCSI_UNKNOWN; 424 psize = sizeof (sdparam); 425 lim = BUS_SPACE_MAXSIZE_24BIT; 426 } 427 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 428 mdvp = &mdvec_1080; 429 basetype = ISP_HA_SCSI_1080; 430 psize = sizeof (sdparam); 431 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 432 ISP1080_DMA_REGS_OFF; 433 } 434 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 435 mdvp = &mdvec_1080; 436 basetype = ISP_HA_SCSI_1240; 437 psize = 2 * sizeof (sdparam); 438 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 439 ISP1080_DMA_REGS_OFF; 440 } 441 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 442 mdvp = &mdvec_1080; 443 basetype = ISP_HA_SCSI_1280; 444 psize = 2 * sizeof (sdparam); 445 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 446 ISP1080_DMA_REGS_OFF; 447 } 448 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 449 mdvp = &mdvec_12160; 450 basetype = ISP_HA_SCSI_10160; 451 psize = sizeof (sdparam); 452 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 453 ISP1080_DMA_REGS_OFF; 454 } 455 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 456 mdvp = &mdvec_12160; 457 basetype = ISP_HA_SCSI_12160; 458 psize = 2 * sizeof (sdparam); 459 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 460 ISP1080_DMA_REGS_OFF; 461 } 462 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 463 mdvp = &mdvec_2100; 464 basetype = ISP_HA_FC_2100; 465 psize = sizeof (fcparam); 466 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 467 PCI_MBOX_REGS2100_OFF; 468 if (pci_get_revid(dev) < 3) { 469 /* 470 * XXX: Need to get the actual revision 471 * XXX: number of the 2100 FB. At any rate, 472 * XXX: lower cache line size for early revision 473 * XXX; boards. 474 */ 475 linesz = 1; 476 } 477 } 478 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 479 mdvp = &mdvec_2200; 480 basetype = ISP_HA_FC_2200; 481 psize = sizeof (fcparam); 482 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 483 PCI_MBOX_REGS2100_OFF; 484 } 485 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 486 mdvp = &mdvec_2300; 487 basetype = ISP_HA_FC_2300; 488 psize = sizeof (fcparam); 489 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 490 PCI_MBOX_REGS2300_OFF; 491 } 492 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 493 mdvp = &mdvec_2300; 494 basetype = ISP_HA_FC_2312; 495 psize = sizeof (fcparam); 496 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 497 PCI_MBOX_REGS2300_OFF; 498 } 499 isp = &pcs->pci_isp; 500 isp->isp_param = kmalloc(psize, M_DEVBUF, M_WAITOK | M_ZERO); 501 isp->isp_mdvec = mdvp; 502 isp->isp_type = basetype; 503 isp->isp_revision = pci_get_revid(dev); 504 #ifdef ISP_TARGET_MODE 505 isp->isp_role = ISP_ROLE_BOTH; 506 #else 507 isp->isp_role = ISP_DEFAULT_ROLES; 508 #endif 509 isp->isp_dev = dev; 510 511 512 /* 513 * Try and find firmware for this device. 514 */ 515 516 if (isp_get_firmware_p) { 517 int device = (int) pci_get_device(dev); 518 #ifdef ISP_TARGET_MODE 519 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 520 #else 521 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 522 #endif 523 } 524 525 /* 526 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 527 * are set. 528 */ 529 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 530 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 531 if (IS_2300(isp)) { /* per QLogic errata */ 532 cmd &= ~PCIM_CMD_INVEN; 533 } 534 if (IS_23XX(isp)) { 535 /* 536 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 537 */ 538 isp->isp_touched = 1; 539 540 } 541 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 542 543 /* 544 * Make sure the Cache Line Size register is set sensibly. 545 */ 546 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 547 if (data != linesz) { 548 data = PCI_DFLT_LNSZ; 549 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 550 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 551 } 552 553 /* 554 * Make sure the Latency Timer is sane. 555 */ 556 data = pci_read_config(dev, PCIR_LATTIMER, 1); 557 if (data < PCI_DFLT_LTNCY) { 558 data = PCI_DFLT_LTNCY; 559 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 560 pci_write_config(dev, PCIR_LATTIMER, data, 1); 561 } 562 563 /* 564 * Make sure we've disabled the ROM. 565 */ 566 data = pci_read_config(dev, PCIR_ROMADDR, 4); 567 data &= ~1; 568 pci_write_config(dev, PCIR_ROMADDR, data, 4); 569 570 iqd = 0; 571 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 572 1, RF_ACTIVE | RF_SHAREABLE); 573 if (irq == NULL) { 574 device_printf(dev, "could not allocate interrupt\n"); 575 goto bad; 576 } 577 578 if (kgetenv_int("isp_no_fwload", &bitmap)) { 579 if (bitmap & (1 << unit)) 580 isp->isp_confopts |= ISP_CFG_NORELOAD; 581 } 582 if (kgetenv_int("isp_fwload", &bitmap)) { 583 if (bitmap & (1 << unit)) 584 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 585 } 586 if (kgetenv_int("isp_no_nvram", &bitmap)) { 587 if (bitmap & (1 << unit)) 588 isp->isp_confopts |= ISP_CFG_NONVRAM; 589 } 590 if (kgetenv_int("isp_nvram", &bitmap)) { 591 if (bitmap & (1 << unit)) 592 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 593 } 594 if (kgetenv_int("isp_fcduplex", &bitmap)) { 595 if (bitmap & (1 << unit)) 596 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 597 } 598 if (kgetenv_int("isp_no_fcduplex", &bitmap)) { 599 if (bitmap & (1 << unit)) 600 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 601 } 602 if (kgetenv_int("isp_nport", &bitmap)) { 603 if (bitmap & (1 << unit)) 604 isp->isp_confopts |= ISP_CFG_NPORT; 605 } 606 607 /* 608 * Because the resource_*_value functions can neither return 609 * 64 bit integer values, nor can they be directly coerced 610 * to interpret the right hand side of the assignment as 611 * you want them to interpret it, we have to force WWN 612 * hint replacement to specify WWN strings with a leading 613 * 'w' (e..g w50000000aaaa0001). Sigh. 614 */ 615 if (kgetenv_quad("isp_portwwn", &wwn)) { 616 isp->isp_osinfo.default_port_wwn = wwn; 617 isp->isp_confopts |= ISP_CFG_OWNWWPN; 618 } 619 if (isp->isp_osinfo.default_port_wwn == 0) { 620 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 621 } 622 623 if (kgetenv_quad("isp_nodewwn", &wwn)) { 624 isp->isp_osinfo.default_node_wwn = wwn; 625 isp->isp_confopts |= ISP_CFG_OWNWWNN; 626 } 627 if (isp->isp_osinfo.default_node_wwn == 0) { 628 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 629 } 630 631 isp_debug = 0; 632 (void) kgetenv_int("isp_debug", &isp_debug); 633 if (bus_setup_intr(dev, irq, 0, isp_pci_intr, 634 isp, &pcs->ih, NULL)) { 635 device_printf(dev, "could not setup interrupt\n"); 636 goto bad; 637 } 638 639 #ifdef ISP_FW_CRASH_DUMP 640 bitmap = 0; 641 if (kgetenv_int("isp_fw_dump_enable", &bitmap)) { 642 if (bitmap & (1 << unit) { 643 size_t amt = 0; 644 if (IS_2200(isp)) { 645 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 646 } else if (IS_23XX(isp)) { 647 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 648 } 649 if (amt) { 650 FCPARAM(isp)->isp_dump_data = 651 kmalloc(amt, M_DEVBUF, M_WAITOK); 652 bzero(FCPARAM(isp)->isp_dump_data, amt); 653 } else { 654 device_printf(dev, 655 "f/w crash dumps not supported for card\n"); 656 } 657 } 658 } 659 #endif 660 661 if (IS_2312(isp)) { 662 isp->isp_port = pci_get_function(dev); 663 } 664 665 /* 666 * Set up logging levels. 667 */ 668 if (isp_debug) { 669 isp->isp_dblev = isp_debug; 670 } else { 671 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 672 } 673 if (bootverbose) 674 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 675 676 /* 677 * Make sure we're in reset state. 678 */ 679 ISP_LOCK(isp); 680 isp_reset(isp); 681 682 if (isp->isp_state != ISP_RESETSTATE) { 683 ISP_UNLOCK(isp); 684 goto bad; 685 } 686 isp_init(isp); 687 if (isp->isp_state != ISP_INITSTATE) { 688 /* If we're a Fibre Channel Card, we allow deferred attach */ 689 if (IS_SCSI(isp)) { 690 isp_uninit(isp); 691 ISP_UNLOCK(isp); 692 goto bad; 693 } 694 } 695 isp_attach(isp); 696 if (isp->isp_state != ISP_RUNSTATE) { 697 /* If we're a Fibre Channel Card, we allow deferred attach */ 698 if (IS_SCSI(isp)) { 699 isp_uninit(isp); 700 ISP_UNLOCK(isp); 701 goto bad; 702 } 703 } 704 /* 705 * XXXX: Here is where we might unload the f/w module 706 * XXXX: (or decrease the reference count to it). 707 */ 708 ISP_UNLOCK(isp); 709 return (0); 710 711 bad: 712 713 if (pcs && pcs->ih) { 714 (void) bus_teardown_intr(dev, irq, pcs->ih); 715 } 716 717 if (irq) { 718 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 719 } 720 721 722 if (regs) { 723 (void) bus_release_resource(dev, rtp, rgd, regs); 724 } 725 726 if (pcs) { 727 if (pcs->pci_isp.isp_param) 728 kfree(pcs->pci_isp.isp_param, M_DEVBUF); 729 kfree(pcs, M_DEVBUF); 730 } 731 732 /* 733 * XXXX: Here is where we might unload the f/w module 734 * XXXX: (or decrease the reference count to it). 735 */ 736 return (ENXIO); 737 } 738 739 static void 740 isp_pci_intr(void *arg) 741 { 742 struct ispsoftc *isp = arg; 743 u_int16_t isr, sema, mbox; 744 745 ISP_LOCK(isp); 746 isp->isp_intcnt++; 747 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 748 isp->isp_intbogus++; 749 } else { 750 int iok = isp->isp_osinfo.intsok; 751 isp->isp_osinfo.intsok = 0; 752 isp_intr(isp, isr, sema, mbox); 753 isp->isp_osinfo.intsok = iok; 754 } 755 ISP_UNLOCK(isp); 756 } 757 758 759 #define IspVirt2Off(a, x) \ 760 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 761 _BLK_REG_SHFT] + ((x) & 0xff)) 762 763 #define BXR2(pcs, off) \ 764 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 765 #define BXW2(pcs, off, v) \ 766 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 767 768 769 static INLINE int 770 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 771 { 772 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 773 u_int16_t val0, val1; 774 int i = 0; 775 776 do { 777 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 778 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 779 } while (val0 != val1 && ++i < 1000); 780 if (val0 != val1) { 781 return (1); 782 } 783 *rp = val0; 784 return (0); 785 } 786 787 static int 788 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 789 u_int16_t *semap, u_int16_t *mbp) 790 { 791 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 792 u_int16_t isr, sema; 793 794 if (IS_2100(isp)) { 795 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 796 return (0); 797 } 798 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 799 return (0); 800 } 801 } else { 802 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 803 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 804 } 805 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 806 isr &= INT_PENDING_MASK(isp); 807 sema &= BIU_SEMA_LOCK; 808 if (isr == 0 && sema == 0) { 809 return (0); 810 } 811 *isrp = isr; 812 if ((*semap = sema) != 0) { 813 if (IS_2100(isp)) { 814 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 815 return (0); 816 } 817 } else { 818 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 819 } 820 } 821 return (1); 822 } 823 824 static int 825 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 826 u_int16_t *semap, u_int16_t *mbox0p) 827 { 828 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 829 u_int32_t r2hisr; 830 831 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 832 *isrp = 0; 833 return (0); 834 } 835 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 836 IspVirt2Off(pcs, BIU_R2HSTSLO)); 837 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 838 if ((r2hisr & BIU_R2HST_INTR) == 0) { 839 *isrp = 0; 840 return (0); 841 } 842 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 843 case ISPR2HST_ROM_MBX_OK: 844 case ISPR2HST_ROM_MBX_FAIL: 845 case ISPR2HST_MBX_OK: 846 case ISPR2HST_MBX_FAIL: 847 case ISPR2HST_ASYNC_EVENT: 848 *isrp = r2hisr & 0xffff; 849 *mbox0p = (r2hisr >> 16); 850 *semap = 1; 851 return (1); 852 case ISPR2HST_RIO_16: 853 *isrp = r2hisr & 0xffff; 854 *mbox0p = ASYNC_RIO1; 855 *semap = 1; 856 return (1); 857 case ISPR2HST_FPOST: 858 *isrp = r2hisr & 0xffff; 859 *mbox0p = ASYNC_CMD_CMPLT; 860 *semap = 1; 861 return (1); 862 case ISPR2HST_FPOST_CTIO: 863 *isrp = r2hisr & 0xffff; 864 *mbox0p = ASYNC_CTIO_DONE; 865 *semap = 1; 866 return (1); 867 case ISPR2HST_RSPQ_UPDATE: 868 *isrp = r2hisr & 0xffff; 869 *mbox0p = 0; 870 *semap = 0; 871 return (1); 872 default: 873 return (0); 874 } 875 } 876 877 static u_int16_t 878 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 879 { 880 u_int16_t rv; 881 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 882 int oldconf = 0; 883 884 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 885 /* 886 * We will assume that someone has paused the RISC processor. 887 */ 888 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 889 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 890 oldconf | BIU_PCI_CONF1_SXP); 891 } 892 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 893 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 894 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 895 } 896 return (rv); 897 } 898 899 static void 900 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 901 { 902 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 903 int oldconf = 0; 904 905 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 906 /* 907 * We will assume that someone has paused the RISC processor. 908 */ 909 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 910 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 911 oldconf | BIU_PCI_CONF1_SXP); 912 } 913 BXW2(pcs, IspVirt2Off(isp, regoff), val); 914 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 915 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 916 } 917 } 918 919 static u_int16_t 920 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 921 { 922 u_int16_t rv, oc = 0; 923 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 924 925 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 926 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 927 u_int16_t tc; 928 /* 929 * We will assume that someone has paused the RISC processor. 930 */ 931 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 932 tc = oc & ~BIU_PCI1080_CONF1_DMA; 933 if (regoff & SXP_BANK1_SELECT) 934 tc |= BIU_PCI1080_CONF1_SXP1; 935 else 936 tc |= BIU_PCI1080_CONF1_SXP0; 937 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 938 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 939 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 940 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 941 oc | BIU_PCI1080_CONF1_DMA); 942 } 943 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 944 if (oc) { 945 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 946 } 947 return (rv); 948 } 949 950 static void 951 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 952 { 953 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 954 int oc = 0; 955 956 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 957 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 958 u_int16_t tc; 959 /* 960 * We will assume that someone has paused the RISC processor. 961 */ 962 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 963 tc = oc & ~BIU_PCI1080_CONF1_DMA; 964 if (regoff & SXP_BANK1_SELECT) 965 tc |= BIU_PCI1080_CONF1_SXP1; 966 else 967 tc |= BIU_PCI1080_CONF1_SXP0; 968 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 969 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 970 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 971 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 972 oc | BIU_PCI1080_CONF1_DMA); 973 } 974 BXW2(pcs, IspVirt2Off(isp, regoff), val); 975 if (oc) { 976 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 977 } 978 } 979 980 981 struct imush { 982 struct ispsoftc *isp; 983 int error; 984 }; 985 986 static void imc(void *, bus_dma_segment_t *, int, int); 987 988 static void 989 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 990 { 991 struct imush *imushp = (struct imush *) arg; 992 if (error) { 993 imushp->error = error; 994 } else { 995 struct ispsoftc *isp =imushp->isp; 996 bus_addr_t addr = segs->ds_addr; 997 998 isp->isp_rquest_dma = addr; 999 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1000 isp->isp_result_dma = addr; 1001 if (IS_FC(isp)) { 1002 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1003 FCPARAM(isp)->isp_scdma = addr; 1004 } 1005 } 1006 } 1007 1008 /* 1009 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1010 */ 1011 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1012 1013 static int 1014 isp_pci_mbxdma(struct ispsoftc *isp) 1015 { 1016 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1017 caddr_t base; 1018 u_int32_t len; 1019 int i, error, ns; 1020 bus_size_t alim, slim; 1021 struct imush im; 1022 1023 /* 1024 * Already been here? If so, leave... 1025 */ 1026 if (isp->isp_rquest) { 1027 return (0); 1028 } 1029 1030 #ifdef ISP_DAC_SUPPORTED 1031 alim = BUS_SPACE_UNRESTRICTED; 1032 #else 1033 alim = BUS_SPACE_MAXADDR_32BIT; 1034 #endif 1035 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1036 slim = BUS_SPACE_MAXADDR_32BIT; 1037 } else { 1038 slim = BUS_SPACE_MAXADDR_24BIT; 1039 } 1040 1041 ISP_UNLOCK(isp); 1042 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1043 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) { 1044 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1045 ISP_LOCK(isp); 1046 return(1); 1047 } 1048 1049 1050 len = sizeof (XS_T **) * isp->isp_maxcmds; 1051 isp->isp_xflist = (XS_T **) kmalloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1052 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1053 pcs->dmaps = (bus_dmamap_t *) kmalloc(len, M_DEVBUF, M_WAITOK); 1054 1055 /* 1056 * Allocate and map the request, result queues, plus FC scratch area. 1057 */ 1058 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1059 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1060 if (IS_FC(isp)) { 1061 len += ISP2100_SCRLEN; 1062 } 1063 1064 ns = (len / PAGE_SIZE) + 1; 1065 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim, 1066 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1067 isp_prt(isp, ISP_LOGERR, 1068 "cannot create a dma tag for control spaces"); 1069 kfree(pcs->dmaps, M_DEVBUF); 1070 kfree(isp->isp_xflist, M_DEVBUF); 1071 ISP_LOCK(isp); 1072 return (1); 1073 } 1074 1075 if (bus_dmamem_alloc(isp->isp_cdmat, (void *)&base, BUS_DMA_NOWAIT, 1076 &isp->isp_cdmap) != 0) { 1077 isp_prt(isp, ISP_LOGERR, 1078 "cannot allocate %d bytes of CCB memory", len); 1079 bus_dma_tag_destroy(isp->isp_cdmat); 1080 kfree(isp->isp_xflist, M_DEVBUF); 1081 kfree(pcs->dmaps, M_DEVBUF); 1082 ISP_LOCK(isp); 1083 return (1); 1084 } 1085 1086 for (i = 0; i < isp->isp_maxcmds; i++) { 1087 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1088 if (error) { 1089 isp_prt(isp, ISP_LOGERR, 1090 "error %d creating per-cmd DMA maps", error); 1091 while (--i >= 0) { 1092 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1093 } 1094 goto bad; 1095 } 1096 } 1097 1098 im.isp = isp; 1099 im.error = 0; 1100 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1101 if (im.error) { 1102 isp_prt(isp, ISP_LOGERR, 1103 "error %d loading dma map for control areas", im.error); 1104 goto bad; 1105 } 1106 1107 isp->isp_rquest = base; 1108 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1109 isp->isp_result = base; 1110 if (IS_FC(isp)) { 1111 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1112 FCPARAM(isp)->isp_scratch = base; 1113 } 1114 ISP_LOCK(isp); 1115 return (0); 1116 1117 bad: 1118 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1119 bus_dma_tag_destroy(isp->isp_cdmat); 1120 kfree(isp->isp_xflist, M_DEVBUF); 1121 kfree(pcs->dmaps, M_DEVBUF); 1122 ISP_LOCK(isp); 1123 isp->isp_rquest = NULL; 1124 return (1); 1125 } 1126 1127 typedef struct { 1128 struct ispsoftc *isp; 1129 void *cmd_token; 1130 void *rq; 1131 u_int16_t *nxtip; 1132 u_int16_t optr; 1133 u_int error; 1134 } mush_t; 1135 1136 #define MUSHERR_NOQENTRIES -2 1137 1138 #ifdef ISP_TARGET_MODE 1139 /* 1140 * We need to handle DMA for target mode differently from initiator mode. 1141 * 1142 * DMA mapping and construction and submission of CTIO Request Entries 1143 * and rendevous for completion are very tightly coupled because we start 1144 * out by knowing (per platform) how much data we have to move, but we 1145 * don't know, up front, how many DMA mapping segments will have to be used 1146 * cover that data, so we don't know how many CTIO Request Entries we 1147 * will end up using. Further, for performance reasons we may want to 1148 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1149 * 1150 * The standard vector still goes through isp_pci_dmasetup, but the callback 1151 * for the DMA mapping routines comes here instead with the whole transfer 1152 * mapped and a pointer to a partially filled in already allocated request 1153 * queue entry. We finish the job. 1154 */ 1155 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1156 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1157 1158 #define STATUS_WITH_DATA 1 1159 1160 static void 1161 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1162 { 1163 mush_t *mp; 1164 struct ccb_scsiio *csio; 1165 struct ispsoftc *isp; 1166 struct isp_pcisoftc *pcs; 1167 bus_dmamap_t *dp; 1168 ct_entry_t *cto, *qe; 1169 u_int8_t scsi_status; 1170 u_int16_t curi, nxti, handle; 1171 u_int32_t sflags; 1172 int32_t resid; 1173 int nth_ctio, nctios, send_status; 1174 1175 mp = (mush_t *) arg; 1176 if (error) { 1177 mp->error = error; 1178 return; 1179 } 1180 1181 isp = mp->isp; 1182 csio = mp->cmd_token; 1183 cto = mp->rq; 1184 curi = isp->isp_reqidx; 1185 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1186 1187 cto->ct_xfrlen = 0; 1188 cto->ct_seg_count = 0; 1189 cto->ct_header.rqs_entry_count = 1; 1190 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1191 1192 if (nseg == 0) { 1193 cto->ct_header.rqs_seqno = 1; 1194 isp_prt(isp, ISP_LOGTDEBUG1, 1195 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1196 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1197 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1198 cto->ct_scsi_status, cto->ct_resid); 1199 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1200 isp_put_ctio(isp, cto, qe); 1201 return; 1202 } 1203 1204 nctios = nseg / ISP_RQDSEG; 1205 if (nseg % ISP_RQDSEG) { 1206 nctios++; 1207 } 1208 1209 /* 1210 * Save syshandle, and potentially any SCSI status, which we'll 1211 * reinsert on the last CTIO we're going to send. 1212 */ 1213 1214 handle = cto->ct_syshandle; 1215 cto->ct_syshandle = 0; 1216 cto->ct_header.rqs_seqno = 0; 1217 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1218 1219 if (send_status) { 1220 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1221 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1222 /* 1223 * Preserve residual. 1224 */ 1225 resid = cto->ct_resid; 1226 1227 /* 1228 * Save actual SCSI status. 1229 */ 1230 scsi_status = cto->ct_scsi_status; 1231 1232 #ifndef STATUS_WITH_DATA 1233 sflags |= CT_NO_DATA; 1234 /* 1235 * We can't do a status at the same time as a data CTIO, so 1236 * we need to synthesize an extra CTIO at this level. 1237 */ 1238 nctios++; 1239 #endif 1240 } else { 1241 sflags = scsi_status = resid = 0; 1242 } 1243 1244 cto->ct_resid = 0; 1245 cto->ct_scsi_status = 0; 1246 1247 pcs = (struct isp_pcisoftc *)isp; 1248 dp = &pcs->dmaps[isp_handle_index(handle)]; 1249 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1250 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1251 } else { 1252 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1253 } 1254 1255 nxti = *mp->nxtip; 1256 1257 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1258 int seglim; 1259 1260 seglim = nseg; 1261 if (seglim) { 1262 int seg; 1263 1264 if (seglim > ISP_RQDSEG) 1265 seglim = ISP_RQDSEG; 1266 1267 for (seg = 0; seg < seglim; seg++, nseg--) { 1268 /* 1269 * Unlike normal initiator commands, we don't 1270 * do any swizzling here. 1271 */ 1272 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1273 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1274 cto->ct_xfrlen += dm_segs->ds_len; 1275 dm_segs++; 1276 } 1277 cto->ct_seg_count = seg; 1278 } else { 1279 /* 1280 * This case should only happen when we're sending an 1281 * extra CTIO with final status. 1282 */ 1283 if (send_status == 0) { 1284 isp_prt(isp, ISP_LOGWARN, 1285 "tdma_mk ran out of segments"); 1286 mp->error = EINVAL; 1287 return; 1288 } 1289 } 1290 1291 /* 1292 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1293 * ct_tagtype, and ct_timeout have been carried over 1294 * unchanged from what our caller had set. 1295 * 1296 * The dataseg fields and the seg_count fields we just got 1297 * through setting. The data direction we've preserved all 1298 * along and only clear it if we're now sending status. 1299 */ 1300 1301 if (nth_ctio == nctios - 1) { 1302 /* 1303 * We're the last in a sequence of CTIOs, so mark 1304 * this CTIO and save the handle to the CCB such that 1305 * when this CTIO completes we can free dma resources 1306 * and do whatever else we need to do to finish the 1307 * rest of the command. We *don't* give this to the 1308 * firmware to work on- the caller will do that. 1309 */ 1310 1311 cto->ct_syshandle = handle; 1312 cto->ct_header.rqs_seqno = 1; 1313 1314 if (send_status) { 1315 cto->ct_scsi_status = scsi_status; 1316 cto->ct_flags |= sflags; 1317 cto->ct_resid = resid; 1318 } 1319 if (send_status) { 1320 isp_prt(isp, ISP_LOGTDEBUG1, 1321 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1322 "scsi status %x resid %d", 1323 cto->ct_fwhandle, csio->ccb_h.target_lun, 1324 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1325 cto->ct_scsi_status, cto->ct_resid); 1326 } else { 1327 isp_prt(isp, ISP_LOGTDEBUG1, 1328 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1329 cto->ct_fwhandle, csio->ccb_h.target_lun, 1330 cto->ct_iid, cto->ct_tag_val, 1331 cto->ct_flags); 1332 } 1333 isp_put_ctio(isp, cto, qe); 1334 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1335 if (nctios > 1) { 1336 MEMORYBARRIER(isp, SYNC_REQUEST, 1337 curi, QENTRY_LEN); 1338 } 1339 } else { 1340 ct_entry_t *oqe = qe; 1341 1342 /* 1343 * Make sure syshandle fields are clean 1344 */ 1345 cto->ct_syshandle = 0; 1346 cto->ct_header.rqs_seqno = 0; 1347 1348 isp_prt(isp, ISP_LOGTDEBUG1, 1349 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1350 cto->ct_fwhandle, csio->ccb_h.target_lun, 1351 cto->ct_iid, cto->ct_flags); 1352 1353 /* 1354 * Get a new CTIO 1355 */ 1356 qe = (ct_entry_t *) 1357 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1358 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1359 if (nxti == mp->optr) { 1360 isp_prt(isp, ISP_LOGTDEBUG0, 1361 "Queue Overflow in tdma_mk"); 1362 mp->error = MUSHERR_NOQENTRIES; 1363 return; 1364 } 1365 1366 /* 1367 * Now that we're done with the old CTIO, 1368 * flush it out to the request queue. 1369 */ 1370 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1371 isp_put_ctio(isp, cto, oqe); 1372 if (nth_ctio != 0) { 1373 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1374 QENTRY_LEN); 1375 } 1376 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1377 1378 /* 1379 * Reset some fields in the CTIO so we can reuse 1380 * for the next one we'll flush to the request 1381 * queue. 1382 */ 1383 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1384 cto->ct_header.rqs_entry_count = 1; 1385 cto->ct_header.rqs_flags = 0; 1386 cto->ct_status = 0; 1387 cto->ct_scsi_status = 0; 1388 cto->ct_xfrlen = 0; 1389 cto->ct_resid = 0; 1390 cto->ct_seg_count = 0; 1391 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1392 } 1393 } 1394 *mp->nxtip = nxti; 1395 } 1396 1397 /* 1398 * We don't have to do multiple CTIOs here. Instead, we can just do 1399 * continuation segments as needed. This greatly simplifies the code 1400 * improves performance. 1401 */ 1402 1403 static void 1404 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1405 { 1406 mush_t *mp; 1407 struct ccb_scsiio *csio; 1408 struct ispsoftc *isp; 1409 ct2_entry_t *cto, *qe; 1410 u_int16_t curi, nxti; 1411 int segcnt; 1412 1413 mp = (mush_t *) arg; 1414 if (error) { 1415 mp->error = error; 1416 return; 1417 } 1418 1419 isp = mp->isp; 1420 csio = mp->cmd_token; 1421 cto = mp->rq; 1422 1423 curi = isp->isp_reqidx; 1424 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1425 1426 if (nseg == 0) { 1427 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1428 isp_prt(isp, ISP_LOGWARN, 1429 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1430 "set (0x%x)", cto->ct_flags); 1431 mp->error = EINVAL; 1432 return; 1433 } 1434 /* 1435 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1436 * flags to NO DATA and clear relative offset flags. 1437 * We preserve the ct_resid and the response area. 1438 */ 1439 cto->ct_header.rqs_seqno = 1; 1440 cto->ct_seg_count = 0; 1441 cto->ct_reloff = 0; 1442 isp_prt(isp, ISP_LOGTDEBUG1, 1443 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1444 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1445 cto->ct_iid, cto->ct_flags, cto->ct_status, 1446 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1447 isp_put_ctio2(isp, cto, qe); 1448 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1449 return; 1450 } 1451 1452 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1453 isp_prt(isp, ISP_LOGERR, 1454 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1455 "(0x%x)", cto->ct_flags); 1456 mp->error = EINVAL; 1457 return; 1458 } 1459 1460 1461 nxti = *mp->nxtip; 1462 1463 /* 1464 * Set up the CTIO2 data segments. 1465 */ 1466 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1467 cto->ct_seg_count++, segcnt++) { 1468 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1469 dm_segs[segcnt].ds_addr; 1470 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1471 dm_segs[segcnt].ds_len; 1472 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1473 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d", 1474 cto->ct_seg_count, dm_segs[segcnt].ds_addr, 1475 dm_segs[segcnt].ds_len); 1476 } 1477 1478 while (segcnt < nseg) { 1479 u_int16_t curip; 1480 int seg; 1481 ispcontreq_t local, *crq = &local, *qep; 1482 1483 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1484 curip = nxti; 1485 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1486 if (nxti == mp->optr) { 1487 ISP_UNLOCK(isp); 1488 isp_prt(isp, ISP_LOGTDEBUG0, 1489 "tdma_mkfc: request queue overflow"); 1490 mp->error = MUSHERR_NOQENTRIES; 1491 return; 1492 } 1493 cto->ct_header.rqs_entry_count++; 1494 MEMZERO((void *)crq, sizeof (*crq)); 1495 crq->req_header.rqs_entry_count = 1; 1496 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1497 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1498 segcnt++, seg++) { 1499 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1500 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1501 isp_prt(isp, ISP_LOGTDEBUG1, 1502 "isp_send_ctio2: ent%d[%d]%x:%u", 1503 cto->ct_header.rqs_entry_count-1, seg, 1504 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len); 1505 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1506 cto->ct_seg_count++; 1507 } 1508 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1509 isp_put_cont_req(isp, crq, qep); 1510 ISP_TDQE(isp, "cont entry", curi, qep); 1511 } 1512 1513 /* 1514 * No do final twiddling for the CTIO itself. 1515 */ 1516 cto->ct_header.rqs_seqno = 1; 1517 isp_prt(isp, ISP_LOGTDEBUG1, 1518 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1519 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1520 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1521 cto->ct_resid); 1522 isp_put_ctio2(isp, cto, qe); 1523 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1524 *mp->nxtip = nxti; 1525 } 1526 #endif 1527 1528 static void dma2(void *, bus_dma_segment_t *, int, int); 1529 1530 static void 1531 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1532 { 1533 mush_t *mp; 1534 struct ispsoftc *isp; 1535 struct ccb_scsiio *csio; 1536 struct isp_pcisoftc *pcs; 1537 bus_dmamap_t *dp; 1538 bus_dma_segment_t *eseg; 1539 ispreq_t *rq; 1540 int seglim, datalen; 1541 u_int16_t nxti; 1542 1543 mp = (mush_t *) arg; 1544 if (error) { 1545 mp->error = error; 1546 return; 1547 } 1548 1549 if (nseg < 1) { 1550 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1551 mp->error = EFAULT; 1552 return; 1553 } 1554 csio = mp->cmd_token; 1555 isp = mp->isp; 1556 rq = mp->rq; 1557 pcs = (struct isp_pcisoftc *)mp->isp; 1558 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1559 nxti = *mp->nxtip; 1560 1561 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1562 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1563 } else { 1564 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1565 } 1566 1567 datalen = XS_XFRLEN(csio); 1568 1569 /* 1570 * We're passed an initial partially filled in entry that 1571 * has most fields filled in except for data transfer 1572 * related values. 1573 * 1574 * Our job is to fill in the initial request queue entry and 1575 * then to start allocating and filling in continuation entries 1576 * until we've covered the entire transfer. 1577 */ 1578 1579 if (IS_FC(isp)) { 1580 seglim = ISP_RQDSEG_T2; 1581 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1582 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1583 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1584 } else { 1585 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1586 } 1587 } else { 1588 if (csio->cdb_len > 12) { 1589 seglim = 0; 1590 } else { 1591 seglim = ISP_RQDSEG; 1592 } 1593 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1594 rq->req_flags |= REQFLAG_DATA_IN; 1595 } else { 1596 rq->req_flags |= REQFLAG_DATA_OUT; 1597 } 1598 } 1599 1600 eseg = dm_segs + nseg; 1601 1602 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1603 if (IS_FC(isp)) { 1604 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1605 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1606 dm_segs->ds_addr; 1607 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1608 dm_segs->ds_len; 1609 } else { 1610 rq->req_dataseg[rq->req_seg_count].ds_base = 1611 dm_segs->ds_addr; 1612 rq->req_dataseg[rq->req_seg_count].ds_count = 1613 dm_segs->ds_len; 1614 } 1615 datalen -= dm_segs->ds_len; 1616 rq->req_seg_count++; 1617 dm_segs++; 1618 } 1619 1620 while (datalen > 0 && dm_segs != eseg) { 1621 u_int16_t onxti; 1622 ispcontreq_t local, *crq = &local, *cqe; 1623 1624 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1625 onxti = nxti; 1626 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1627 if (nxti == mp->optr) { 1628 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1629 mp->error = MUSHERR_NOQENTRIES; 1630 return; 1631 } 1632 rq->req_header.rqs_entry_count++; 1633 MEMZERO((void *)crq, sizeof (*crq)); 1634 crq->req_header.rqs_entry_count = 1; 1635 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1636 1637 seglim = 0; 1638 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1639 crq->req_dataseg[seglim].ds_base = 1640 dm_segs->ds_addr; 1641 crq->req_dataseg[seglim].ds_count = 1642 dm_segs->ds_len; 1643 rq->req_seg_count++; 1644 dm_segs++; 1645 seglim++; 1646 datalen -= dm_segs->ds_len; 1647 } 1648 isp_put_cont_req(isp, crq, cqe); 1649 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1650 } 1651 *mp->nxtip = nxti; 1652 } 1653 1654 static int 1655 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1656 u_int16_t *nxtip, u_int16_t optr) 1657 { 1658 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1659 ispreq_t *qep; 1660 bus_dmamap_t *dp = NULL; 1661 mush_t mush, *mp; 1662 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1663 1664 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1665 #ifdef ISP_TARGET_MODE 1666 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1667 if (IS_FC(isp)) { 1668 eptr = tdma_mkfc; 1669 } else { 1670 eptr = tdma_mk; 1671 } 1672 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1673 (csio->dxfer_len == 0)) { 1674 mp = &mush; 1675 mp->isp = isp; 1676 mp->cmd_token = csio; 1677 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1678 mp->nxtip = nxtip; 1679 mp->optr = optr; 1680 mp->error = 0; 1681 (*eptr)(mp, NULL, 0, 0); 1682 goto mbxsync; 1683 } 1684 } else 1685 #endif 1686 eptr = dma2; 1687 1688 1689 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1690 (csio->dxfer_len == 0)) { 1691 rq->req_seg_count = 1; 1692 goto mbxsync; 1693 } 1694 1695 /* 1696 * Do a virtual grapevine step to collect info for 1697 * the callback dma allocation that we have to use... 1698 */ 1699 mp = &mush; 1700 mp->isp = isp; 1701 mp->cmd_token = csio; 1702 mp->rq = rq; 1703 mp->nxtip = nxtip; 1704 mp->optr = optr; 1705 mp->error = 0; 1706 1707 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1708 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1709 int error; 1710 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1711 crit_enter(); 1712 error = bus_dmamap_load(pcs->dmat, *dp, 1713 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1714 if (error == EINPROGRESS) { 1715 bus_dmamap_unload(pcs->dmat, *dp); 1716 mp->error = EINVAL; 1717 isp_prt(isp, ISP_LOGERR, 1718 "deferred dma allocation not supported"); 1719 } else if (error && mp->error == 0) { 1720 #ifdef DIAGNOSTIC 1721 isp_prt(isp, ISP_LOGERR, 1722 "error %d in dma mapping code", error); 1723 #endif 1724 mp->error = error; 1725 } 1726 crit_exit(); 1727 } else { 1728 /* Pointer to physical buffer */ 1729 struct bus_dma_segment seg; 1730 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1731 seg.ds_len = csio->dxfer_len; 1732 (*eptr)(mp, &seg, 1, 0); 1733 } 1734 } else { 1735 struct bus_dma_segment *segs; 1736 1737 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1738 isp_prt(isp, ISP_LOGERR, 1739 "Physical segment pointers unsupported"); 1740 mp->error = EINVAL; 1741 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1742 isp_prt(isp, ISP_LOGERR, 1743 "Virtual segment addresses unsupported"); 1744 mp->error = EINVAL; 1745 } else { 1746 /* Just use the segments provided */ 1747 segs = (struct bus_dma_segment *) csio->data_ptr; 1748 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1749 } 1750 } 1751 if (mp->error) { 1752 int retval = CMD_COMPLETE; 1753 if (mp->error == MUSHERR_NOQENTRIES) { 1754 retval = CMD_EAGAIN; 1755 } else if (mp->error == EFBIG) { 1756 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1757 } else if (mp->error == EINVAL) { 1758 XS_SETERR(csio, CAM_REQ_INVALID); 1759 } else { 1760 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1761 } 1762 return (retval); 1763 } 1764 mbxsync: 1765 switch (rq->req_header.rqs_entry_type) { 1766 case RQSTYPE_REQUEST: 1767 isp_put_request(isp, rq, qep); 1768 break; 1769 case RQSTYPE_CMDONLY: 1770 isp_put_extended_request(isp, (ispextreq_t *)rq, 1771 (ispextreq_t *)qep); 1772 break; 1773 case RQSTYPE_T2RQS: 1774 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1775 break; 1776 } 1777 return (CMD_QUEUED); 1778 } 1779 1780 static void 1781 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1782 { 1783 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1784 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1785 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1786 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1787 } else { 1788 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1789 } 1790 bus_dmamap_unload(pcs->dmat, *dp); 1791 } 1792 1793 1794 static void 1795 isp_pci_reset1(struct ispsoftc *isp) 1796 { 1797 /* Make sure the BIOS is disabled */ 1798 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1799 /* and enable interrupts */ 1800 ENABLE_INTS(isp); 1801 } 1802 1803 static void 1804 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1805 { 1806 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1807 if (msg) 1808 kprintf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1809 else 1810 kprintf("%s:\n", device_get_nameunit(isp->isp_dev)); 1811 if (IS_SCSI(isp)) 1812 kprintf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1813 else 1814 kprintf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1815 kprintf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1816 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1817 kprintf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1818 1819 1820 if (IS_SCSI(isp)) { 1821 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1822 kprintf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1823 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1824 ISP_READ(isp, CDMA_FIFO_STS)); 1825 kprintf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1826 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1827 ISP_READ(isp, DDMA_FIFO_STS)); 1828 kprintf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1829 ISP_READ(isp, SXP_INTERRUPT), 1830 ISP_READ(isp, SXP_GROSS_ERR), 1831 ISP_READ(isp, SXP_PINS_CTRL)); 1832 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1833 } 1834 kprintf(" mbox regs: %x %x %x %x %x\n", 1835 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1836 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1837 ISP_READ(isp, OUTMAILBOX4)); 1838 kprintf(" PCI Status Command/Status=%x\n", 1839 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1840 } 1841