1 /* $OpenBSD: adv.c,v 1.12 2002/03/14 01:26:53 millert Exp $ */ 2 /* $NetBSD: adv.c,v 1.6 1998/10/28 20:39:45 dante Exp $ */ 3 4 /* 5 * Generic driver for the Advanced Systems Inc. Narrow SCSI controllers 6 * 7 * Copyright (c) 1998 The NetBSD Foundation, Inc. 8 * All rights reserved. 9 * 10 * Author: Baldassare Dante Profeta <dante@mclink.it> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the NetBSD 23 * Foundation, Inc. and its contributors. 24 * 4. Neither the name of The NetBSD Foundation nor the names of its 25 * contributors may be used to endorse or promote products derived 26 * from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGE. 39 */ 40 41 #include <sys/types.h> 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/errno.h> 46 #include <sys/ioctl.h> 47 #include <sys/device.h> 48 #include <sys/malloc.h> 49 #include <sys/buf.h> 50 #include <sys/proc.h> 51 #include <sys/user.h> 52 53 #include <machine/bus.h> 54 #include <machine/intr.h> 55 56 #include <uvm/uvm_extern.h> 57 58 #include <scsi/scsi_all.h> 59 #include <scsi/scsiconf.h> 60 61 #include <dev/ic/adv.h> 62 #include <dev/ic/advlib.h> 63 64 #ifndef DDB 65 #define Debugger() panic("should call debugger here (adv.c)") 66 #endif /* ! DDB */ 67 68 69 /* #define ASC_DEBUG */ 70 71 /******************************************************************************/ 72 73 74 static void adv_enqueue(ASC_SOFTC *, struct scsi_xfer *, int); 75 static struct scsi_xfer *adv_dequeue(ASC_SOFTC *); 76 77 static int adv_alloc_ccbs(ASC_SOFTC *); 78 static int adv_create_ccbs(ASC_SOFTC *, ADV_CCB *, int); 79 static void adv_free_ccb(ASC_SOFTC *, ADV_CCB *); 80 static void adv_reset_ccb(ADV_CCB *); 81 static int adv_init_ccb(ASC_SOFTC *, ADV_CCB *); 82 static ADV_CCB *adv_get_ccb(ASC_SOFTC *, int); 83 static void adv_queue_ccb(ASC_SOFTC *, ADV_CCB *); 84 static void adv_start_ccbs(ASC_SOFTC *); 85 86 static u_int8_t *adv_alloc_overrunbuf(char *dvname, bus_dma_tag_t); 87 88 static int adv_scsi_cmd(struct scsi_xfer *); 89 static void advminphys(struct buf *); 90 static void adv_narrow_isr_callback(ASC_SOFTC *, ASC_QDONE_INFO *); 91 92 static int adv_poll(ASC_SOFTC *, struct scsi_xfer *, int); 93 static void adv_timeout(void *); 94 static void adv_watchdog(void *); 95 96 97 /******************************************************************************/ 98 99 100 struct cfdriver adv_cd = { 101 NULL, "adv", DV_DULL 102 }; 103 104 105 struct scsi_adapter adv_switch = 106 { 107 adv_scsi_cmd, /* called to start/enqueue a SCSI command */ 108 advminphys, /* to limit the transfer to max device can do */ 109 0, /* IT SEEMS IT IS NOT USED YET */ 110 0, /* as above... */ 111 }; 112 113 114 /* the below structure is so we have a default dev struct for out link struct */ 115 struct scsi_device adv_dev = 116 { 117 NULL, /* Use default error handler */ 118 NULL, /* have a queue, served by this */ 119 NULL, /* have no async handler */ 120 NULL, /* Use default 'done' routine */ 121 }; 122 123 124 #define ADV_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */ 125 #define ADV_WATCH_TIMEOUT 1000 /* time to wait for watchdog (mSec) */ 126 127 128 /******************************************************************************/ 129 /* scsi_xfer queue routines */ 130 /******************************************************************************/ 131 132 133 /* 134 * Insert a scsi_xfer into the software queue. We overload xs->free_list 135 * to avoid having to allocate additional resources (since we're used 136 * only during resource shortages anyhow. 137 */ 138 static void 139 adv_enqueue(sc, xs, infront) 140 ASC_SOFTC *sc; 141 struct scsi_xfer *xs; 142 int infront; 143 { 144 145 if (infront || sc->sc_queue.lh_first == NULL) { 146 if (sc->sc_queue.lh_first == NULL) 147 sc->sc_queuelast = xs; 148 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list); 149 return; 150 } 151 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list); 152 sc->sc_queuelast = xs; 153 } 154 155 156 /* 157 * Pull a scsi_xfer off the front of the software queue. 158 */ 159 static struct scsi_xfer * 160 adv_dequeue(sc) 161 ASC_SOFTC *sc; 162 { 163 struct scsi_xfer *xs; 164 165 xs = sc->sc_queue.lh_first; 166 LIST_REMOVE(xs, free_list); 167 168 if (sc->sc_queue.lh_first == NULL) 169 sc->sc_queuelast = NULL; 170 171 return (xs); 172 } 173 174 175 /******************************************************************************/ 176 /* Control Blocks routines */ 177 /******************************************************************************/ 178 179 180 static int 181 adv_alloc_ccbs(sc) 182 ASC_SOFTC *sc; 183 { 184 bus_dma_segment_t seg; 185 int error, rseg; 186 187 /* 188 * Allocate the control blocks. 189 */ 190 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adv_control), 191 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 192 printf("%s: unable to allocate control structures," 193 " error = %d\n", sc->sc_dev.dv_xname, error); 194 return (error); 195 } 196 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 197 sizeof(struct adv_control), (caddr_t *) & sc->sc_control, 198 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 199 printf("%s: unable to map control structures, error = %d\n", 200 sc->sc_dev.dv_xname, error); 201 return (error); 202 } 203 /* 204 * Create and load the DMA map used for the control blocks. 205 */ 206 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adv_control), 207 1, sizeof(struct adv_control), 0, BUS_DMA_NOWAIT, 208 &sc->sc_dmamap_control)) != 0) { 209 printf("%s: unable to create control DMA map, error = %d\n", 210 sc->sc_dev.dv_xname, error); 211 return (error); 212 } 213 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control, 214 sc->sc_control, sizeof(struct adv_control), NULL, 215 BUS_DMA_NOWAIT)) != 0) { 216 printf("%s: unable to load control DMA map, error = %d\n", 217 sc->sc_dev.dv_xname, error); 218 return (error); 219 } 220 return (0); 221 } 222 223 224 /* 225 * Create a set of ccbs and add them to the free list. Called once 226 * by adv_init(). We return the number of CCBs successfully created. 227 */ 228 static int 229 adv_create_ccbs(sc, ccbstore, count) 230 ASC_SOFTC *sc; 231 ADV_CCB *ccbstore; 232 int count; 233 { 234 ADV_CCB *ccb; 235 int i, error; 236 237 bzero(ccbstore, sizeof(ADV_CCB) * count); 238 for (i = 0; i < count; i++) { 239 ccb = &ccbstore[i]; 240 if ((error = adv_init_ccb(sc, ccb)) != 0) { 241 printf("%s: unable to initialize ccb, error = %d\n", 242 sc->sc_dev.dv_xname, error); 243 return (i); 244 } 245 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain); 246 } 247 248 return (i); 249 } 250 251 252 /* 253 * A ccb is put onto the free list. 254 */ 255 static void 256 adv_free_ccb(sc, ccb) 257 ASC_SOFTC *sc; 258 ADV_CCB *ccb; 259 { 260 int s; 261 262 s = splbio(); 263 264 adv_reset_ccb(ccb); 265 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain); 266 267 /* 268 * If there were none, wake anybody waiting for one to come free, 269 * starting with queued entries. 270 */ 271 if (ccb->chain.tqe_next == 0) 272 wakeup(&sc->sc_free_ccb); 273 274 splx(s); 275 } 276 277 278 static void 279 adv_reset_ccb(ccb) 280 ADV_CCB *ccb; 281 { 282 283 ccb->flags = 0; 284 } 285 286 287 static int 288 adv_init_ccb(sc, ccb) 289 ASC_SOFTC *sc; 290 ADV_CCB *ccb; 291 { 292 int error; 293 294 /* 295 * Create the DMA map for this CCB. 296 */ 297 error = bus_dmamap_create(sc->sc_dmat, 298 (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 299 ASC_MAX_SG_LIST, (ASC_MAX_SG_LIST - 1) * PAGE_SIZE, 300 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer); 301 if (error) { 302 printf("%s: unable to create DMA map, error = %d\n", 303 sc->sc_dev.dv_xname, error); 304 return (error); 305 } 306 adv_reset_ccb(ccb); 307 return (0); 308 } 309 310 311 /* 312 * Get a free ccb 313 * 314 * If there are none, see if we can allocate a new one 315 */ 316 static ADV_CCB * 317 adv_get_ccb(sc, flags) 318 ASC_SOFTC *sc; 319 int flags; 320 { 321 ADV_CCB *ccb = 0; 322 int s; 323 324 s = splbio(); 325 326 /* 327 * If we can and have to, sleep waiting for one to come free 328 * but only if we can't allocate a new one. 329 */ 330 for (;;) { 331 ccb = sc->sc_free_ccb.tqh_first; 332 if (ccb) { 333 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain); 334 break; 335 } 336 if ((flags & SCSI_NOSLEEP) != 0) 337 goto out; 338 339 tsleep(&sc->sc_free_ccb, PRIBIO, "advccb", 0); 340 } 341 342 ccb->flags |= CCB_ALLOC; 343 344 out: 345 splx(s); 346 return (ccb); 347 } 348 349 350 /* 351 * Queue a CCB to be sent to the controller, and send it if possible. 352 */ 353 static void 354 adv_queue_ccb(sc, ccb) 355 ASC_SOFTC *sc; 356 ADV_CCB *ccb; 357 { 358 359 timeout_set(&ccb->xs->stimeout, adv_timeout, ccb); 360 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain); 361 362 adv_start_ccbs(sc); 363 } 364 365 366 static void 367 adv_start_ccbs(sc) 368 ASC_SOFTC *sc; 369 { 370 ADV_CCB *ccb; 371 struct scsi_xfer *xs; 372 373 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) { 374 375 xs = ccb->xs; 376 if (ccb->flags & CCB_WATCHDOG) 377 timeout_del(&xs->stimeout); 378 379 if (AscExeScsiQueue(sc, &ccb->scsiq) == ASC_BUSY) { 380 ccb->flags |= CCB_WATCHDOG; 381 timeout_set(&xs->stimeout, adv_watchdog, ccb); 382 timeout_add(&xs->stimeout, 383 (ADV_WATCH_TIMEOUT * hz) / 1000); 384 break; 385 } 386 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain); 387 388 if ((ccb->xs->flags & SCSI_POLL) == 0) { 389 timeout_set(&xs->stimeout, adv_timeout, ccb); 390 timeout_add(&xs->stimeout, (ccb->timeout * hz) / 1000); 391 } 392 } 393 } 394 395 396 /******************************************************************************/ 397 /* DMA able memory allocation routines */ 398 /******************************************************************************/ 399 400 401 /* 402 * Allocate a DMA able memory for overrun_buffer. 403 * This memory can be safely shared among all the AdvanSys boards. 404 */ 405 u_int8_t * 406 adv_alloc_overrunbuf(dvname, dmat) 407 char *dvname; 408 bus_dma_tag_t dmat; 409 { 410 static u_int8_t *overrunbuf = NULL; 411 412 bus_dmamap_t ovrbuf_dmamap; 413 bus_dma_segment_t seg; 414 int rseg, error; 415 416 417 /* 418 * if an overrun buffer has been already allocated don't allocate it 419 * again. Instead return the address of the allocated buffer. 420 */ 421 if (overrunbuf) 422 return (overrunbuf); 423 424 425 if ((error = bus_dmamem_alloc(dmat, ASC_OVERRUN_BSIZE, 426 NBPG, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 427 printf("%s: unable to allocate overrun buffer, error = %d\n", 428 dvname, error); 429 return (0); 430 } 431 if ((error = bus_dmamem_map(dmat, &seg, rseg, ASC_OVERRUN_BSIZE, 432 (caddr_t *) & overrunbuf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 433 printf("%s: unable to map overrun buffer, error = %d\n", 434 dvname, error); 435 436 bus_dmamem_free(dmat, &seg, 1); 437 return (0); 438 } 439 if ((error = bus_dmamap_create(dmat, ASC_OVERRUN_BSIZE, 1, 440 ASC_OVERRUN_BSIZE, 0, BUS_DMA_NOWAIT, &ovrbuf_dmamap)) != 0) { 441 printf("%s: unable to create overrun buffer DMA map," 442 " error = %d\n", dvname, error); 443 444 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 445 bus_dmamem_free(dmat, &seg, 1); 446 return (0); 447 } 448 if ((error = bus_dmamap_load(dmat, ovrbuf_dmamap, overrunbuf, 449 ASC_OVERRUN_BSIZE, NULL, BUS_DMA_NOWAIT)) != 0) { 450 printf("%s: unable to load overrun buffer DMA map," 451 " error = %d\n", dvname, error); 452 453 bus_dmamap_destroy(dmat, ovrbuf_dmamap); 454 bus_dmamem_unmap(dmat, overrunbuf, ASC_OVERRUN_BSIZE); 455 bus_dmamem_free(dmat, &seg, 1); 456 return (0); 457 } 458 return (overrunbuf); 459 } 460 461 462 /******************************************************************************/ 463 /* SCSI layer interfacing routines */ 464 /******************************************************************************/ 465 466 467 int 468 adv_init(sc) 469 ASC_SOFTC *sc; 470 { 471 int warn; 472 473 if (!AscFindSignature(sc->sc_iot, sc->sc_ioh)) 474 panic("adv_init: adv_find_signature failed"); 475 476 /* 477 * Read the board configuration 478 */ 479 AscInitASC_SOFTC(sc); 480 warn = AscInitFromEEP(sc); 481 if (warn) { 482 printf("%s -get: ", sc->sc_dev.dv_xname); 483 switch (warn) { 484 case -1: 485 printf("Chip is not halted\n"); 486 break; 487 488 case -2: 489 printf("Couldn't get MicroCode Start" 490 " address\n"); 491 break; 492 493 case ASC_WARN_IO_PORT_ROTATE: 494 printf("I/O port address modified\n"); 495 break; 496 497 case ASC_WARN_AUTO_CONFIG: 498 printf("I/O port increment switch enabled\n"); 499 break; 500 501 case ASC_WARN_EEPROM_CHKSUM: 502 printf("EEPROM checksum error\n"); 503 break; 504 505 case ASC_WARN_IRQ_MODIFIED: 506 printf("IRQ modified\n"); 507 break; 508 509 case ASC_WARN_CMD_QNG_CONFLICT: 510 printf("tag queuing enabled w/o disconnects\n"); 511 break; 512 513 default: 514 printf("unknown warning %d\n", warn); 515 } 516 } 517 if (sc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) 518 sc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; 519 520 /* 521 * Modify the board configuration 522 */ 523 warn = AscInitFromASC_SOFTC(sc); 524 if (warn) { 525 printf("%s -set: ", sc->sc_dev.dv_xname); 526 switch (warn) { 527 case ASC_WARN_CMD_QNG_CONFLICT: 528 printf("tag queuing enabled w/o disconnects\n"); 529 break; 530 531 case ASC_WARN_AUTO_CONFIG: 532 printf("I/O port increment switch enabled\n"); 533 break; 534 535 default: 536 printf("unknown warning %d\n", warn); 537 } 538 } 539 sc->isr_callback = (ulong) adv_narrow_isr_callback; 540 541 if (!(sc->overrun_buf = adv_alloc_overrunbuf(sc->sc_dev.dv_xname, 542 sc->sc_dmat))) { 543 return (1); 544 } 545 546 return (0); 547 } 548 549 550 void 551 adv_attach(sc) 552 ASC_SOFTC *sc; 553 { 554 int i, error; 555 556 /* 557 * Initialize board RISC chip and enable interrupts. 558 */ 559 switch (AscInitDriver(sc)) { 560 case 0: 561 /* AllOK */ 562 break; 563 564 case 1: 565 panic("%s: bad signature", sc->sc_dev.dv_xname); 566 break; 567 568 case 2: 569 panic("%s: unable to load MicroCode", 570 sc->sc_dev.dv_xname); 571 break; 572 573 case 3: 574 panic("%s: unable to initialize MicroCode", 575 sc->sc_dev.dv_xname); 576 break; 577 578 default: 579 panic("%s: unable to initialize board RISC chip", 580 sc->sc_dev.dv_xname); 581 } 582 583 584 /* 585 * fill in the prototype scsi_link. 586 */ 587 sc->sc_link.adapter_softc = sc; 588 sc->sc_link.adapter_target = sc->chip_scsi_id; 589 sc->sc_link.adapter = &adv_switch; 590 sc->sc_link.device = &adv_dev; 591 sc->sc_link.openings = 4; 592 sc->sc_link.adapter_buswidth = 7; 593 594 595 TAILQ_INIT(&sc->sc_free_ccb); 596 TAILQ_INIT(&sc->sc_waiting_ccb); 597 LIST_INIT(&sc->sc_queue); 598 599 600 /* 601 * Allocate the Control Blocks. 602 */ 603 error = adv_alloc_ccbs(sc); 604 if (error) 605 return; /* (error) */ ; 606 607 /* 608 * Create and initialize the Control Blocks. 609 */ 610 i = adv_create_ccbs(sc, sc->sc_control->ccbs, ADV_MAX_CCB); 611 if (i == 0) { 612 printf("%s: unable to create control blocks\n", 613 sc->sc_dev.dv_xname); 614 return; /* (ENOMEM) */ ; 615 } else if (i != ADV_MAX_CCB) { 616 printf("%s: WARNING: only %d of %d control blocks created\n", 617 sc->sc_dev.dv_xname, i, ADV_MAX_CCB); 618 } 619 config_found(&sc->sc_dev, &sc->sc_link, scsiprint); 620 } 621 622 623 static void 624 advminphys(bp) 625 struct buf *bp; 626 { 627 628 if (bp->b_bcount > ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE)) 629 bp->b_bcount = ((ASC_MAX_SG_LIST - 1) * PAGE_SIZE); 630 minphys(bp); 631 } 632 633 634 /* 635 * start a scsi operation given the command and the data address. Also needs 636 * the unit, target and lu. 637 */ 638 static int 639 adv_scsi_cmd(xs) 640 struct scsi_xfer *xs; 641 { 642 struct scsi_link *sc_link = xs->sc_link; 643 ASC_SOFTC *sc = sc_link->adapter_softc; 644 bus_dma_tag_t dmat = sc->sc_dmat; 645 ADV_CCB *ccb; 646 int s, flags, error, nsegs; 647 int fromqueue = 1, dontqueue = 0; 648 649 650 s = splbio(); /* protect the queue */ 651 652 /* 653 * If we're running the queue from adv_done(), we've been 654 * called with the first queue entry as our argument. 655 */ 656 if (xs == sc->sc_queue.lh_first) { 657 xs = adv_dequeue(sc); 658 fromqueue = 1; 659 } else { 660 661 /* Polled requests can't be queued for later. */ 662 dontqueue = xs->flags & SCSI_POLL; 663 664 /* 665 * If there are jobs in the queue, run them first. 666 */ 667 if (sc->sc_queue.lh_first != NULL) { 668 /* 669 * If we can't queue, we have to abort, since 670 * we have to preserve order. 671 */ 672 if (dontqueue) { 673 splx(s); 674 xs->error = XS_DRIVER_STUFFUP; 675 return (TRY_AGAIN_LATER); 676 } 677 /* 678 * Swap with the first queue entry. 679 */ 680 adv_enqueue(sc, xs, 0); 681 xs = adv_dequeue(sc); 682 fromqueue = 1; 683 } 684 } 685 686 687 /* 688 * get a ccb to use. If the transfer 689 * is from a buf (possibly from interrupt time) 690 * then we can't allow it to sleep 691 */ 692 693 flags = xs->flags; 694 if ((ccb = adv_get_ccb(sc, flags)) == NULL) { 695 /* 696 * If we can't queue, we lose. 697 */ 698 if (dontqueue) { 699 splx(s); 700 xs->error = XS_DRIVER_STUFFUP; 701 return (TRY_AGAIN_LATER); 702 } 703 /* 704 * Stuff ourselves into the queue, in front 705 * if we came off in the first place. 706 */ 707 adv_enqueue(sc, xs, fromqueue); 708 splx(s); 709 return (SUCCESSFULLY_QUEUED); 710 } 711 splx(s); /* done playing with the queue */ 712 713 ccb->xs = xs; 714 ccb->timeout = xs->timeout; 715 716 /* 717 * Build up the request 718 */ 719 memset(&ccb->scsiq, 0, sizeof(ASC_SCSI_Q)); 720 721 ccb->scsiq.q2.ccb_ptr = (ulong) ccb; 722 723 ccb->scsiq.cdbptr = &xs->cmd->opcode; 724 ccb->scsiq.q2.cdb_len = xs->cmdlen; 725 ccb->scsiq.q1.target_id = ASC_TID_TO_TARGET_ID(sc_link->target); 726 ccb->scsiq.q1.target_lun = sc_link->lun; 727 ccb->scsiq.q2.target_ix = ASC_TIDLUN_TO_IX(sc_link->target, 728 sc_link->lun); 729 ccb->scsiq.q1.sense_addr = sc->sc_dmamap_control->dm_segs[0].ds_addr + 730 ADV_CCB_OFF(ccb) + offsetof(struct adv_ccb, scsi_sense); 731 ccb->scsiq.q1.sense_len = sizeof(struct scsi_sense_data); 732 733 /* 734 * If there are any outstanding requests for the current target, 735 * then every 255th request send an ORDERED request. This heuristic 736 * tries to retain the benefit of request sorting while preventing 737 * request starvation. 255 is the max number of tags or pending commands 738 * a device may have outstanding. 739 */ 740 sc->reqcnt[sc_link->target]++; 741 if ((sc->reqcnt[sc_link->target] > 0) && 742 (sc->reqcnt[sc_link->target] % 255) == 0) { 743 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_ORDERED; 744 } else { 745 ccb->scsiq.q2.tag_code = M2_QTAG_MSG_SIMPLE; 746 } 747 748 749 if (xs->datalen) { 750 /* 751 * Map the DMA transfer. 752 */ 753 #ifdef TFS 754 if (flags & SCSI_DATA_UIO) { 755 error = bus_dmamap_load_uio(dmat, 756 ccb->dmamap_xfer, (struct uio *) xs->data, 757 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 758 } else 759 #endif /* TFS */ 760 { 761 error = bus_dmamap_load(dmat, 762 ccb->dmamap_xfer, xs->data, xs->datalen, NULL, 763 (flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 764 } 765 766 if (error) { 767 if (error == EFBIG) { 768 printf("%s: adv_scsi_cmd, more than %d dma" 769 " segments\n", 770 sc->sc_dev.dv_xname, ASC_MAX_SG_LIST); 771 } else { 772 printf("%s: adv_scsi_cmd, error %d loading" 773 " dma map\n", 774 sc->sc_dev.dv_xname, error); 775 } 776 777 xs->error = XS_DRIVER_STUFFUP; 778 adv_free_ccb(sc, ccb); 779 return (COMPLETE); 780 } 781 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 782 0, ccb->dmamap_xfer->dm_mapsize, 783 ((flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 784 BUS_DMASYNC_PREWRITE)); 785 786 787 memset(&ccb->sghead, 0, sizeof(ASC_SG_HEAD)); 788 789 for (nsegs = 0; nsegs < ccb->dmamap_xfer->dm_nsegs; nsegs++) { 790 791 ccb->sghead.sg_list[nsegs].addr = 792 ccb->dmamap_xfer->dm_segs[nsegs].ds_addr; 793 ccb->sghead.sg_list[nsegs].bytes = 794 ccb->dmamap_xfer->dm_segs[nsegs].ds_len; 795 } 796 797 ccb->sghead.entry_cnt = ccb->scsiq.q1.sg_queue_cnt = 798 ccb->dmamap_xfer->dm_nsegs; 799 800 ccb->scsiq.q1.cntl |= ASC_QC_SG_HEAD; 801 ccb->scsiq.sg_head = &ccb->sghead; 802 ccb->scsiq.q1.data_addr = 0; 803 ccb->scsiq.q1.data_cnt = 0; 804 } else { 805 /* 806 * No data xfer, use non S/G values. 807 */ 808 ccb->scsiq.q1.data_addr = 0; 809 ccb->scsiq.q1.data_cnt = 0; 810 } 811 812 #ifdef ASC_DEBUG 813 printf("id = %d, lun = %d, cmd = %d, ccb = 0x%lX \n", 814 sc_link->scsipi_scsi.target, 815 sc_link->scsipi_scsi.lun, xs->cmd->opcode, 816 (unsigned long)ccb); 817 #endif 818 s = splbio(); 819 adv_queue_ccb(sc, ccb); 820 splx(s); 821 822 /* 823 * Usually return SUCCESSFULLY QUEUED 824 */ 825 if ((flags & SCSI_POLL) == 0) 826 return (SUCCESSFULLY_QUEUED); 827 828 /* 829 * If we can't use interrupts, poll on completion 830 */ 831 if (adv_poll(sc, xs, ccb->timeout)) { 832 adv_timeout(ccb); 833 if (adv_poll(sc, xs, ccb->timeout)) 834 adv_timeout(ccb); 835 } 836 return (COMPLETE); 837 } 838 839 840 int 841 adv_intr(arg) 842 void *arg; 843 { 844 ASC_SOFTC *sc = arg; 845 struct scsi_xfer *xs; 846 847 #ifdef ASC_DEBUG 848 int int_pend = FALSE; 849 850 if(ASC_IS_INT_PENDING(sc->sc_iot, sc->sc_ioh)) 851 { 852 int_pend = TRUE; 853 printf("ISR - "); 854 } 855 #endif 856 AscISR(sc); 857 #ifdef ASC_DEBUG 858 if(int_pend) 859 printf("\n"); 860 #endif 861 862 /* 863 * If there are queue entries in the software queue, try to 864 * run the first one. We should be more or less guaranteed 865 * to succeed, since we just freed a CCB. 866 * 867 * NOTE: adv_scsi_cmd() relies on our calling it with 868 * the first entry in the queue. 869 */ 870 if ((xs = sc->sc_queue.lh_first) != NULL) 871 (void) adv_scsi_cmd(xs); 872 873 return (1); 874 } 875 876 877 /* 878 * Poll a particular unit, looking for a particular xs 879 */ 880 static int 881 adv_poll(sc, xs, count) 882 ASC_SOFTC *sc; 883 struct scsi_xfer *xs; 884 int count; 885 { 886 887 /* timeouts are in msec, so we loop in 1000 usec cycles */ 888 while (count) { 889 adv_intr(sc); 890 if (xs->flags & ITSDONE) 891 return (0); 892 delay(1000); /* only happens in boot so ok */ 893 count--; 894 } 895 return (1); 896 } 897 898 899 static void 900 adv_timeout(arg) 901 void *arg; 902 { 903 ADV_CCB *ccb = arg; 904 struct scsi_xfer *xs = ccb->xs; 905 struct scsi_link *sc_link = xs->sc_link; 906 ASC_SOFTC *sc = sc_link->adapter_softc; 907 int s; 908 909 sc_print_addr(sc_link); 910 printf("timed out"); 911 912 s = splbio(); 913 914 /* 915 * If it has been through before, then a previous abort has failed, 916 * don't try abort again, reset the bus instead. 917 */ 918 if (ccb->flags & CCB_ABORT) { 919 /* abort timed out */ 920 printf(" AGAIN. Resetting Bus\n"); 921 /* Lets try resetting the bus! */ 922 if (AscResetBus(sc) == ASC_ERROR) { 923 ccb->timeout = sc->scsi_reset_wait; 924 adv_queue_ccb(sc, ccb); 925 } 926 } else { 927 /* abort the operation that has timed out */ 928 printf("\n"); 929 AscAbortCCB(sc, (u_int32_t) ccb); 930 ccb->xs->error = XS_TIMEOUT; 931 ccb->timeout = ADV_ABORT_TIMEOUT; 932 ccb->flags |= CCB_ABORT; 933 adv_queue_ccb(sc, ccb); 934 } 935 936 splx(s); 937 } 938 939 940 static void 941 adv_watchdog(arg) 942 void *arg; 943 { 944 ADV_CCB *ccb = arg; 945 struct scsi_xfer *xs = ccb->xs; 946 struct scsi_link *sc_link = xs->sc_link; 947 ASC_SOFTC *sc = sc_link->adapter_softc; 948 int s; 949 950 s = splbio(); 951 952 ccb->flags &= ~CCB_WATCHDOG; 953 adv_start_ccbs(sc); 954 955 splx(s); 956 } 957 958 959 /******************************************************************************/ 960 /* NARROW and WIDE boards Interrupt callbacks */ 961 /******************************************************************************/ 962 963 964 /* 965 * adv_narrow_isr_callback() - Second Level Interrupt Handler called by AscISR() 966 * 967 * Interrupt callback function for the Narrow SCSI Asc Library. 968 */ 969 static void 970 adv_narrow_isr_callback(sc, qdonep) 971 ASC_SOFTC *sc; 972 ASC_QDONE_INFO *qdonep; 973 { 974 bus_dma_tag_t dmat = sc->sc_dmat; 975 ADV_CCB *ccb = (ADV_CCB *) qdonep->d2.ccb_ptr; 976 struct scsi_xfer *xs = ccb->xs; 977 struct scsi_sense_data *s1, *s2; 978 979 980 #ifdef ASC_DEBUG 981 printf(" - ccb=0x%lx, id=%d, lun=%d, cmd=%d, ", 982 (unsigned long)ccb, 983 xs->sc_link->scsipi_scsi.target, 984 xs->sc_link->scsipi_scsi.lun, xs->cmd->opcode); 985 #endif 986 timeout_del(&xs->stimeout); 987 988 /* 989 * If we were a data transfer, unload the map that described 990 * the data buffer. 991 */ 992 if (xs->datalen) { 993 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 994 0, ccb->dmamap_xfer->dm_mapsize, 995 ((xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 996 BUS_DMASYNC_POSTWRITE)); 997 bus_dmamap_unload(dmat, ccb->dmamap_xfer); 998 } 999 if ((ccb->flags & CCB_ALLOC) == 0) { 1000 printf("%s: exiting ccb not allocated!\n", sc->sc_dev.dv_xname); 1001 Debugger(); 1002 return; 1003 } 1004 /* 1005 * 'qdonep' contains the command's ending status. 1006 */ 1007 #ifdef ASC_DEBUG 1008 printf("d_s=%d, h_s=%d", qdonep->d3.done_stat, qdonep->d3.host_stat); 1009 #endif 1010 switch (qdonep->d3.done_stat) { 1011 case ASC_QD_NO_ERROR: 1012 switch (qdonep->d3.host_stat) { 1013 case ASC_QHSTA_NO_ERROR: 1014 xs->error = XS_NOERROR; 1015 xs->resid = 0; 1016 break; 1017 1018 default: 1019 /* QHSTA error occurred */ 1020 xs->error = XS_DRIVER_STUFFUP; 1021 break; 1022 } 1023 1024 /* 1025 * If an INQUIRY command completed successfully, then call 1026 * the AscInquiryHandling() function to patch bugged boards. 1027 */ 1028 if ((xs->cmd->opcode == SCSICMD_Inquiry) && 1029 (xs->sc_link->lun == 0) && 1030 (xs->datalen - qdonep->remain_bytes) >= 8) { 1031 AscInquiryHandling(sc, 1032 xs->sc_link->target & 0x7, 1033 (ASC_SCSI_INQUIRY *) xs->data); 1034 } 1035 break; 1036 1037 case ASC_QD_WITH_ERROR: 1038 switch (qdonep->d3.host_stat) { 1039 case ASC_QHSTA_NO_ERROR: 1040 if (qdonep->d3.scsi_stat == SS_CHK_CONDITION) { 1041 s1 = &ccb->scsi_sense; 1042 s2 = &xs->sense; 1043 *s2 = *s1; 1044 xs->error = XS_SENSE; 1045 } else { 1046 xs->error = XS_DRIVER_STUFFUP; 1047 } 1048 break; 1049 1050 default: 1051 /* QHSTA error occurred */ 1052 xs->error = XS_DRIVER_STUFFUP; 1053 break; 1054 } 1055 break; 1056 1057 case ASC_QD_ABORTED_BY_HOST: 1058 default: 1059 xs->error = XS_DRIVER_STUFFUP; 1060 break; 1061 } 1062 1063 1064 adv_free_ccb(sc, ccb); 1065 xs->flags |= ITSDONE; 1066 scsi_done(xs); 1067 } 1068