1 /* $NetBSD: iha.c,v 1.20 2002/12/08 01:09:34 jmc Exp $ */ 2 3 /*- 4 * Device driver for the INI-9XXXU/UW or INIC-940/950 PCI SCSI Controller. 5 * 6 * Written for 386bsd and FreeBSD by 7 * Winston Hung <winstonh@initio.com> 8 * 9 * Copyright (c) 1997-1999 Initio Corp. 10 * Copyright (c) 2000, 2001 Ken Westerback 11 * Copyright (c) 2001, 2002 Izumi Tsutsui 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer, 19 * without modification, immediately at the beginning of the file. 20 * 2. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 /* 37 * Ported to NetBSD by Izumi Tsutsui <tsutsui@ceres.dti.ne.jp> from OpenBSD: 38 * $OpenBSD: iha.c,v 1.3 2001/02/20 00:47:33 krw Exp $ 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: iha.c,v 1.20 2002/12/08 01:09:34 jmc Exp $"); 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/buf.h> 48 #include <sys/device.h> 49 #include <sys/malloc.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <machine/bus.h> 54 #include <machine/intr.h> 55 56 #include <dev/scsipi/scsi_all.h> 57 #include <dev/scsipi/scsipi_all.h> 58 #include <dev/scsipi/scsiconf.h> 59 #include <dev/scsipi/scsi_message.h> 60 61 #include <dev/ic/ihareg.h> 62 #include <dev/ic/ihavar.h> 63 64 /* 65 * SCSI Rate Table, indexed by FLAG_SCSI_RATE field of 66 * tcs flags. 67 */ 68 static const u_int8_t iha_rate_tbl[] = { 69 /* fast 20 */ 70 /* nanosecond divide by 4 */ 71 12, /* 50ns, 20M */ 72 18, /* 75ns, 13.3M */ 73 25, /* 100ns, 10M */ 74 31, /* 125ns, 8M */ 75 37, /* 150ns, 6.6M */ 76 43, /* 175ns, 5.7M */ 77 50, /* 200ns, 5M */ 78 62 /* 250ns, 4M */ 79 }; 80 #define IHA_MAX_PERIOD 62 81 82 #ifdef notused 83 static u_int16_t eeprom_default[EEPROM_SIZE] = { 84 /* -- Header ------------------------------------ */ 85 /* signature */ 86 EEP_SIGNATURE, 87 /* size, revision */ 88 EEP_WORD(EEPROM_SIZE * 2, 0x01), 89 /* -- Host Adapter Structure -------------------- */ 90 /* model */ 91 0x0095, 92 /* model info, number of channel */ 93 EEP_WORD(0x00, 1), 94 /* BIOS config */ 95 EEP_BIOSCFG_DEFAULT, 96 /* host adapter config */ 97 0, 98 99 /* -- eeprom_adapter[0] ------------------------------- */ 100 /* ID, adapter config 1 */ 101 EEP_WORD(7, CFG_DEFAULT), 102 /* adapter config 2, number of targets */ 103 EEP_WORD(0x00, 8), 104 /* target flags */ 105 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 106 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 107 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 108 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 109 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 110 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 111 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 112 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 113 114 /* -- eeprom_adapter[1] ------------------------------- */ 115 /* ID, adapter config 1 */ 116 EEP_WORD(7, CFG_DEFAULT), 117 /* adapter config 2, number of targets */ 118 EEP_WORD(0x00, 8), 119 /* target flags */ 120 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 121 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 122 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 123 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 124 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 125 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 126 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 127 EEP_WORD(FLAG_DEFAULT, FLAG_DEFAULT), 128 /* reserved[5] */ 129 0, 0, 0, 0, 0, 130 /* checksum */ 131 0 132 }; 133 #endif 134 135 static void iha_append_free_scb(struct iha_softc *, struct iha_scb *); 136 static void iha_append_done_scb(struct iha_softc *, struct iha_scb *, u_int8_t); 137 static __inline struct iha_scb *iha_pop_done_scb(struct iha_softc *); 138 139 static struct iha_scb *iha_find_pend_scb(struct iha_softc *); 140 static __inline void iha_append_pend_scb(struct iha_softc *, struct iha_scb *); 141 static __inline void iha_push_pend_scb(struct iha_softc *, struct iha_scb *); 142 static __inline void iha_del_pend_scb(struct iha_softc *, struct iha_scb *); 143 static __inline void iha_mark_busy_scb(struct iha_scb *); 144 145 static __inline void iha_set_ssig(struct iha_softc *, u_int8_t, u_int8_t); 146 147 static int iha_alloc_sglist(struct iha_softc *); 148 149 static void iha_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t, 150 void *); 151 static void iha_update_xfer_mode(struct iha_softc *, int); 152 153 static void iha_reset_scsi_bus(struct iha_softc *); 154 static void iha_reset_chip(struct iha_softc *); 155 static void iha_reset_dma(struct iha_softc *); 156 static void iha_reset_tcs(struct tcs *, u_int8_t); 157 158 static void iha_main(struct iha_softc *); 159 static void iha_scsi(struct iha_softc *); 160 static void iha_select(struct iha_softc *, struct iha_scb *, u_int8_t); 161 static int iha_wait(struct iha_softc *, u_int8_t); 162 163 static void iha_exec_scb(struct iha_softc *, struct iha_scb *); 164 static void iha_done_scb(struct iha_softc *, struct iha_scb *); 165 static int iha_push_sense_request(struct iha_softc *, struct iha_scb *); 166 167 static void iha_timeout(void *); 168 static void iha_abort_xs(struct iha_softc *, struct scsipi_xfer *, u_int8_t); 169 static u_int8_t iha_data_over_run(struct iha_scb *); 170 171 static int iha_next_state(struct iha_softc *); 172 static int iha_state_1(struct iha_softc *); 173 static int iha_state_2(struct iha_softc *); 174 static int iha_state_3(struct iha_softc *); 175 static int iha_state_4(struct iha_softc *); 176 static int iha_state_5(struct iha_softc *); 177 static int iha_state_6(struct iha_softc *); 178 static int iha_state_8(struct iha_softc *); 179 180 static int iha_xfer_data(struct iha_softc *, struct iha_scb *, int); 181 static int iha_xpad_in(struct iha_softc *); 182 static int iha_xpad_out(struct iha_softc *); 183 184 static int iha_status_msg(struct iha_softc *); 185 static void iha_busfree(struct iha_softc *); 186 static int iha_resel(struct iha_softc *); 187 188 static int iha_msgin(struct iha_softc *); 189 static int iha_msgin_extended(struct iha_softc *); 190 static int iha_msgin_sdtr(struct iha_softc *); 191 static int iha_msgin_ignore_wid_resid(struct iha_softc *); 192 193 static int iha_msgout(struct iha_softc *, u_int8_t); 194 static void iha_msgout_abort(struct iha_softc *, u_int8_t); 195 static int iha_msgout_reject(struct iha_softc *); 196 static int iha_msgout_extended(struct iha_softc *); 197 static int iha_msgout_wdtr(struct iha_softc *); 198 static int iha_msgout_sdtr(struct iha_softc *); 199 200 static void iha_wide_done(struct iha_softc *); 201 static void iha_sync_done(struct iha_softc *); 202 203 static void iha_bad_seq(struct iha_softc *); 204 205 static void iha_read_eeprom(struct iha_softc *, struct iha_eeprom *); 206 static int iha_se2_rd_all(struct iha_softc *, u_int16_t *); 207 static void iha_se2_instr(struct iha_softc *, int); 208 static u_int16_t iha_se2_rd(struct iha_softc *, int); 209 #ifdef notused 210 static void iha_se2_update_all(struct iha_softc *); 211 static void iha_se2_wr(struct iha_softc *, int, u_int16_t); 212 #endif 213 214 /* 215 * iha_append_free_scb - append the supplied SCB to the tail of the 216 * sc_freescb queue after clearing and resetting 217 * everything possible. 218 */ 219 static void 220 iha_append_free_scb(sc, scb) 221 struct iha_softc *sc; 222 struct iha_scb *scb; 223 { 224 int s; 225 226 s = splbio(); 227 228 if (scb == sc->sc_actscb) 229 sc->sc_actscb = NULL; 230 231 scb->status = STATUS_QUEUED; 232 scb->ha_stat = HOST_OK; 233 scb->ta_stat = SCSI_OK; 234 235 scb->nextstat = 0; 236 scb->scb_tagmsg = 0; 237 238 scb->xs = NULL; 239 scb->tcs = NULL; 240 241 /* 242 * scb_tagid, sg_addr, sglist 243 * SCB_SensePtr are set at initialization 244 * and never change 245 */ 246 247 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain); 248 249 splx(s); 250 } 251 252 static void 253 iha_append_done_scb(sc, scb, hastat) 254 struct iha_softc *sc; 255 struct iha_scb *scb; 256 u_int8_t hastat; 257 { 258 struct tcs *tcs; 259 int s; 260 261 s = splbio(); 262 263 if (scb->xs != NULL) 264 callout_stop(&scb->xs->xs_callout); 265 266 if (scb == sc->sc_actscb) 267 sc->sc_actscb = NULL; 268 269 tcs = scb->tcs; 270 271 if (scb->scb_tagmsg != 0) { 272 if (tcs->tagcnt) 273 tcs->tagcnt--; 274 } else if (tcs->ntagscb == scb) 275 tcs->ntagscb = NULL; 276 277 scb->status = STATUS_QUEUED; 278 scb->ha_stat = hastat; 279 280 TAILQ_INSERT_TAIL(&sc->sc_donescb, scb, chain); 281 282 splx(s); 283 } 284 285 static __inline struct iha_scb * 286 iha_pop_done_scb(sc) 287 struct iha_softc *sc; 288 { 289 struct iha_scb *scb; 290 int s; 291 292 s = splbio(); 293 294 scb = TAILQ_FIRST(&sc->sc_donescb); 295 296 if (scb != NULL) { 297 scb->status = STATUS_RENT; 298 TAILQ_REMOVE(&sc->sc_donescb, scb, chain); 299 } 300 301 splx(s); 302 303 return (scb); 304 } 305 306 /* 307 * iha_find_pend_scb - scan the pending queue for a SCB that can be 308 * processed immediately. Return NULL if none found 309 * and a pointer to the SCB if one is found. If there 310 * is an active SCB, return NULL! 311 */ 312 static struct iha_scb * 313 iha_find_pend_scb(sc) 314 struct iha_softc *sc; 315 { 316 struct iha_scb *scb; 317 struct tcs *tcs; 318 int s; 319 320 s = splbio(); 321 322 if (sc->sc_actscb != NULL) 323 scb = NULL; 324 325 else 326 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) { 327 if ((scb->xs->xs_control & XS_CTL_RESET) != 0) 328 /* ALWAYS willing to reset a device */ 329 break; 330 331 tcs = scb->tcs; 332 333 if ((scb->scb_tagmsg) != 0) { 334 /* 335 * A Tagged I/O. OK to start If no 336 * non-tagged I/O is active on the same 337 * target 338 */ 339 if (tcs->ntagscb == NULL) 340 break; 341 342 } else if (scb->cmd[0] == REQUEST_SENSE) { 343 /* 344 * OK to do a non-tagged request sense 345 * even if a non-tagged I/O has been 346 * started, 'cuz we don't allow any 347 * disconnect during a request sense op 348 */ 349 break; 350 351 } else if (tcs->tagcnt == 0) { 352 /* 353 * No tagged I/O active on this target, 354 * ok to start a non-tagged one if one 355 * is not already active 356 */ 357 if (tcs->ntagscb == NULL) 358 break; 359 } 360 } 361 362 splx(s); 363 364 return (scb); 365 } 366 367 static __inline void 368 iha_append_pend_scb(sc, scb) 369 struct iha_softc *sc; 370 struct iha_scb *scb; 371 { 372 /* ASSUMPTION: only called within a splbio()/splx() pair */ 373 374 if (scb == sc->sc_actscb) 375 sc->sc_actscb = NULL; 376 377 scb->status = STATUS_QUEUED; 378 379 TAILQ_INSERT_TAIL(&sc->sc_pendscb, scb, chain); 380 } 381 382 static __inline void 383 iha_push_pend_scb(sc, scb) 384 struct iha_softc *sc; 385 struct iha_scb *scb; 386 { 387 int s; 388 389 s = splbio(); 390 391 if (scb == sc->sc_actscb) 392 sc->sc_actscb = NULL; 393 394 scb->status = STATUS_QUEUED; 395 396 TAILQ_INSERT_HEAD(&sc->sc_pendscb, scb, chain); 397 398 splx(s); 399 } 400 401 /* 402 * iha_del_pend_scb - remove scb from sc_pendscb 403 */ 404 static __inline void 405 iha_del_pend_scb(sc, scb) 406 struct iha_softc *sc; 407 struct iha_scb *scb; 408 { 409 int s; 410 411 s = splbio(); 412 413 TAILQ_REMOVE(&sc->sc_pendscb, scb, chain); 414 415 splx(s); 416 } 417 418 static __inline void 419 iha_mark_busy_scb(scb) 420 struct iha_scb *scb; 421 { 422 int s; 423 424 s = splbio(); 425 426 scb->status = STATUS_BUSY; 427 428 if (scb->scb_tagmsg == 0) 429 scb->tcs->ntagscb = scb; 430 else 431 scb->tcs->tagcnt++; 432 433 splx(s); 434 } 435 436 /* 437 * iha_set_ssig - read the current scsi signal mask, then write a new 438 * one which turns off/on the specified signals. 439 */ 440 static __inline void 441 iha_set_ssig(sc, offsigs, onsigs) 442 struct iha_softc *sc; 443 u_int8_t offsigs, onsigs; 444 { 445 bus_space_tag_t iot = sc->sc_iot; 446 bus_space_handle_t ioh = sc->sc_ioh; 447 u_int8_t currsigs; 448 449 currsigs = bus_space_read_1(iot, ioh, TUL_SSIGI); 450 bus_space_write_1(iot, ioh, TUL_SSIGO, (currsigs & ~offsigs) | onsigs); 451 } 452 453 /* 454 * iha_intr - the interrupt service routine for the iha driver 455 */ 456 int 457 iha_intr(arg) 458 void *arg; 459 { 460 bus_space_tag_t iot; 461 bus_space_handle_t ioh; 462 struct iha_softc *sc; 463 int s; 464 465 sc = (struct iha_softc *)arg; 466 iot = sc->sc_iot; 467 ioh = sc->sc_ioh; 468 469 if ((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0) 470 return (0); 471 472 s = splbio(); /* XXX - Or are interrupts off when ISR's are called? */ 473 474 if (sc->sc_semaph != SEMAPH_IN_MAIN) { 475 /* XXX - need these inside a splbio()/splx()? */ 476 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 477 sc->sc_semaph = SEMAPH_IN_MAIN; 478 479 iha_main(sc); 480 481 sc->sc_semaph = ~SEMAPH_IN_MAIN; 482 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP)); 483 } 484 485 splx(s); 486 487 return (1); 488 } 489 490 void 491 iha_attach(sc) 492 struct iha_softc *sc; 493 { 494 bus_space_tag_t iot = sc->sc_iot; 495 bus_space_handle_t ioh = sc->sc_ioh; 496 struct iha_scb *scb; 497 struct iha_eeprom eeprom; 498 struct eeprom_adapter *conf; 499 int i, error, reg; 500 501 iha_read_eeprom(sc, &eeprom); 502 503 conf = &eeprom.adapter[0]; 504 505 /* 506 * fill in the rest of the iha_softc fields 507 */ 508 sc->sc_id = CFG_ID(conf->config1); 509 sc->sc_semaph = ~SEMAPH_IN_MAIN; 510 sc->sc_status0 = 0; 511 sc->sc_actscb = NULL; 512 513 TAILQ_INIT(&sc->sc_freescb); 514 TAILQ_INIT(&sc->sc_pendscb); 515 TAILQ_INIT(&sc->sc_donescb); 516 error = iha_alloc_sglist(sc); 517 if (error != 0) { 518 printf(": cannot allocate sglist\n"); 519 return; 520 } 521 522 sc->sc_scb = malloc(sizeof(struct iha_scb) * IHA_MAX_SCB, 523 M_DEVBUF, M_NOWAIT|M_ZERO); 524 if (sc->sc_scb == NULL) { 525 printf(": cannot allocate SCB\n"); 526 return; 527 } 528 529 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) { 530 scb->scb_tagid = i; 531 scb->sgoffset = IHA_SG_SIZE * i; 532 scb->sglist = sc->sc_sglist + IHA_MAX_SG_ENTRIES * i; 533 scb->sg_addr = 534 sc->sc_dmamap->dm_segs[0].ds_addr + scb->sgoffset; 535 536 error = bus_dmamap_create(sc->sc_dmat, 537 MAXPHYS, IHA_MAX_SG_ENTRIES, MAXPHYS, 0, 538 BUS_DMA_NOWAIT, &scb->dmap); 539 540 if (error != 0) { 541 printf(": couldn't create SCB DMA map, error = %d\n", 542 error); 543 return; 544 } 545 TAILQ_INSERT_TAIL(&sc->sc_freescb, scb, chain); 546 } 547 548 /* Mask all the interrupts */ 549 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 550 551 /* Stop any I/O and reset the scsi module */ 552 iha_reset_dma(sc); 553 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSMOD); 554 555 /* Program HBA's SCSI ID */ 556 bus_space_write_1(iot, ioh, TUL_SID, sc->sc_id << 4); 557 558 /* 559 * Configure the channel as requested by the NVRAM settings read 560 * by iha_read_eeprom() above. 561 */ 562 563 sc->sc_sconf1 = SCONFIG0DEFAULT; 564 if ((conf->config1 & CFG_EN_PAR) != 0) 565 sc->sc_sconf1 |= SPCHK; 566 bus_space_write_1(iot, ioh, TUL_SCONFIG0, sc->sc_sconf1); 567 568 /* set selection time out 250 ms */ 569 bus_space_write_1(iot, ioh, TUL_STIMO, STIMO_250MS); 570 571 /* Enable desired SCSI termination configuration read from eeprom */ 572 reg = 0; 573 if (conf->config1 & CFG_ACT_TERM1) 574 reg |= ENTMW; 575 if (conf->config1 & CFG_ACT_TERM2) 576 reg |= ENTM; 577 bus_space_write_1(iot, ioh, TUL_DCTRL0, reg); 578 579 reg = bus_space_read_1(iot, ioh, TUL_GCTRL1) & ~ATDEN; 580 if (conf->config1 & CFG_AUTO_TERM) 581 reg |= ATDEN; 582 bus_space_write_1(iot, ioh, TUL_GCTRL1, reg); 583 584 for (i = 0; i < IHA_MAX_TARGETS / 2; i++) { 585 sc->sc_tcs[i * 2 ].flags = EEP_LBYTE(conf->tflags[i]); 586 sc->sc_tcs[i * 2 + 1].flags = EEP_HBYTE(conf->tflags[i]); 587 iha_reset_tcs(&sc->sc_tcs[i * 2 ], sc->sc_sconf1); 588 iha_reset_tcs(&sc->sc_tcs[i * 2 + 1], sc->sc_sconf1); 589 } 590 591 iha_reset_chip(sc); 592 bus_space_write_1(iot, ioh, TUL_SIEN, ALL_INTERRUPTS); 593 594 /* 595 * fill in the adapter. 596 */ 597 sc->sc_adapter.adapt_dev = &sc->sc_dev; 598 sc->sc_adapter.adapt_nchannels = 1; 599 sc->sc_adapter.adapt_openings = IHA_MAX_SCB; 600 sc->sc_adapter.adapt_max_periph = IHA_MAX_SCB; 601 sc->sc_adapter.adapt_ioctl = NULL; 602 sc->sc_adapter.adapt_minphys = minphys; 603 sc->sc_adapter.adapt_request = iha_scsipi_request; 604 605 /* 606 * fill in the channel. 607 */ 608 sc->sc_channel.chan_adapter = &sc->sc_adapter; 609 sc->sc_channel.chan_bustype = &scsi_bustype; 610 sc->sc_channel.chan_channel = 0; 611 sc->sc_channel.chan_ntargets = CFG_TARGET(conf->config2); 612 sc->sc_channel.chan_nluns = 8; 613 sc->sc_channel.chan_id = sc->sc_id; 614 615 /* 616 * Now try to attach all the sub devices. 617 */ 618 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 619 } 620 621 /* 622 * iha_alloc_sglist - allocate and map sglist for SCB's 623 */ 624 static int 625 iha_alloc_sglist(sc) 626 struct iha_softc *sc; 627 { 628 bus_dma_segment_t seg; 629 int error, rseg; 630 631 /* 632 * Allocate dma-safe memory for the SCB's sglist 633 */ 634 if ((error = bus_dmamem_alloc(sc->sc_dmat, 635 IHA_SG_SIZE * IHA_MAX_SCB, 636 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 637 printf(": unable to allocate sglist, error = %d\n", error); 638 return (error); 639 } 640 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 641 IHA_SG_SIZE * IHA_MAX_SCB, (caddr_t *)&sc->sc_sglist, 642 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 643 printf(": unable to map sglist, error = %d\n", error); 644 return (error); 645 } 646 647 /* 648 * Create and load the DMA map used for the SCBs 649 */ 650 if ((error = bus_dmamap_create(sc->sc_dmat, 651 IHA_SG_SIZE * IHA_MAX_SCB, 1, IHA_SG_SIZE * IHA_MAX_SCB, 652 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 653 printf(": unable to create control DMA map, error = %d\n", 654 error); 655 return (error); 656 } 657 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, 658 sc->sc_sglist, IHA_SG_SIZE * IHA_MAX_SCB, 659 NULL, BUS_DMA_NOWAIT)) != 0) { 660 printf(": unable to load control DMA map, error = %d\n", error); 661 return (error); 662 } 663 664 memset(sc->sc_sglist, 0, IHA_SG_SIZE * IHA_MAX_SCB); 665 666 return (0); 667 } 668 669 void 670 iha_scsipi_request(chan, req, arg) 671 struct scsipi_channel *chan; 672 scsipi_adapter_req_t req; 673 void *arg; 674 { 675 struct scsipi_xfer *xs; 676 struct scsipi_periph *periph; 677 struct iha_scb *scb; 678 struct iha_softc *sc; 679 int error, s; 680 681 sc = (struct iha_softc *)chan->chan_adapter->adapt_dev; 682 683 switch (req) { 684 case ADAPTER_REQ_RUN_XFER: 685 xs = arg; 686 periph = xs->xs_periph; 687 688 if (xs->cmdlen > sizeof(struct scsi_generic) || 689 periph->periph_target >= IHA_MAX_TARGETS) { 690 xs->error = XS_DRIVER_STUFFUP; 691 return; 692 } 693 694 s = splbio(); 695 scb = TAILQ_FIRST(&sc->sc_freescb); 696 if (scb != NULL) { 697 scb->status = STATUS_RENT; 698 TAILQ_REMOVE(&sc->sc_freescb, scb, chain); 699 } 700 #ifdef DIAGNOSTIC 701 else { 702 scsipi_printaddr(periph); 703 printf("unable to allocate scb\n"); 704 panic("iha_scsipi_request"); 705 } 706 #endif 707 splx(s); 708 709 scb->target = periph->periph_target; 710 scb->lun = periph->periph_lun; 711 scb->tcs = &sc->sc_tcs[scb->target]; 712 scb->scb_id = MSG_IDENTIFY(periph->periph_lun, 713 (xs->xs_control & XS_CTL_REQSENSE) == 0); 714 715 scb->xs = xs; 716 scb->cmdlen = xs->cmdlen; 717 memcpy(&scb->cmd, xs->cmd, xs->cmdlen); 718 scb->buflen = xs->datalen; 719 scb->flags = 0; 720 if (xs->xs_control & XS_CTL_DATA_OUT) 721 scb->flags |= FLAG_DATAOUT; 722 if (xs->xs_control & XS_CTL_DATA_IN) 723 scb->flags |= FLAG_DATAIN; 724 725 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) { 726 error = bus_dmamap_load(sc->sc_dmat, scb->dmap, 727 xs->data, scb->buflen, NULL, 728 ((xs->xs_control & XS_CTL_NOSLEEP) ? 729 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | 730 BUS_DMA_STREAMING | 731 ((scb->flags & FLAG_DATAIN) ? 732 BUS_DMA_READ : BUS_DMA_WRITE)); 733 734 if (error) { 735 printf("%s: error %d loading dma map\n", 736 sc->sc_dev.dv_xname, error); 737 iha_append_free_scb(sc, scb); 738 xs->error = XS_DRIVER_STUFFUP; 739 scsipi_done(xs); 740 return; 741 } 742 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 743 0, scb->dmap->dm_mapsize, 744 (scb->flags & FLAG_DATAIN) ? 745 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 746 } 747 748 iha_exec_scb(sc, scb); 749 return; 750 751 case ADAPTER_REQ_GROW_RESOURCES: 752 return; /* XXX */ 753 754 case ADAPTER_REQ_SET_XFER_MODE: 755 { 756 struct tcs *tcs; 757 struct scsipi_xfer_mode *xm = arg; 758 759 tcs = &sc->sc_tcs[xm->xm_target]; 760 761 if ((xm->xm_mode & PERIPH_CAP_WIDE16) != 0 && 762 (tcs->flags & FLAG_NO_WIDE) == 0) 763 tcs->flags &= ~(FLAG_WIDE_DONE|FLAG_SYNC_DONE); 764 765 if ((xm->xm_mode & PERIPH_CAP_SYNC) != 0 && 766 (tcs->flags & FLAG_NO_SYNC) == 0) 767 tcs->flags &= ~FLAG_SYNC_DONE; 768 769 /* 770 * If we're not going to negotiate, send the 771 * notification now, since it won't happen later. 772 */ 773 if ((tcs->flags & (FLAG_WIDE_DONE|FLAG_SYNC_DONE)) == 774 (FLAG_WIDE_DONE|FLAG_SYNC_DONE)) 775 iha_update_xfer_mode(sc, xm->xm_target); 776 777 return; 778 } 779 } 780 } 781 782 void 783 iha_update_xfer_mode(sc, target) 784 struct iha_softc *sc; 785 int target; 786 { 787 struct tcs *tcs = &sc->sc_tcs[target]; 788 struct scsipi_xfer_mode xm; 789 790 xm.xm_target = target; 791 xm.xm_mode = 0; 792 xm.xm_period = 0; 793 xm.xm_offset = 0; 794 795 if (tcs->syncm & PERIOD_WIDE_SCSI) 796 xm.xm_mode |= PERIPH_CAP_WIDE16; 797 798 if (tcs->period) { 799 xm.xm_mode |= PERIPH_CAP_SYNC; 800 xm.xm_period = tcs->period; 801 xm.xm_offset = tcs->offset; 802 } 803 804 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); 805 } 806 807 static void 808 iha_reset_scsi_bus(sc) 809 struct iha_softc *sc; 810 { 811 struct iha_scb *scb; 812 struct tcs *tcs; 813 int i, s; 814 815 s = splbio(); 816 817 iha_reset_dma(sc); 818 819 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 820 switch (scb->status) { 821 case STATUS_BUSY: 822 iha_append_done_scb(sc, scb, HOST_SCSI_RST); 823 break; 824 825 case STATUS_SELECT: 826 iha_push_pend_scb(sc, scb); 827 break; 828 829 default: 830 break; 831 } 832 833 for (i = 0, tcs = sc->sc_tcs; i < IHA_MAX_TARGETS; i++, tcs++) 834 iha_reset_tcs(tcs, sc->sc_sconf1); 835 836 splx(s); 837 } 838 839 void 840 iha_reset_chip(sc) 841 struct iha_softc *sc; 842 { 843 bus_space_tag_t iot = sc->sc_iot; 844 bus_space_handle_t ioh = sc->sc_ioh; 845 846 /* reset tulip chip */ 847 848 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSCSI); 849 850 do { 851 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 852 } while ((sc->sc_sistat & SRSTD) == 0); 853 854 iha_set_ssig(sc, 0, 0); 855 856 bus_space_read_1(iot, ioh, TUL_SISTAT); /* Clear any active interrupt*/ 857 } 858 859 /* 860 * iha_reset_dma - abort any active DMA xfer, reset tulip FIFO. 861 */ 862 static void 863 iha_reset_dma(sc) 864 struct iha_softc *sc; 865 { 866 bus_space_tag_t iot = sc->sc_iot; 867 bus_space_handle_t ioh = sc->sc_ioh; 868 869 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 870 /* if DMA xfer is pending, abort DMA xfer */ 871 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR); 872 /* wait Abort DMA xfer done */ 873 while ((bus_space_read_1(iot, ioh, TUL_ISTUS0) & DABT) == 0) 874 ; 875 } 876 877 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 878 } 879 880 /* 881 * iha_reset_tcs - reset the target control structure pointed 882 * to by tcs to default values. tcs flags 883 * only has the negotiation done bits reset as 884 * the other bits are fixed at initialization. 885 */ 886 static void 887 iha_reset_tcs(tcs, config0) 888 struct tcs *tcs; 889 u_int8_t config0; 890 { 891 892 tcs->flags &= ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE); 893 tcs->period = 0; 894 tcs->offset = 0; 895 tcs->tagcnt = 0; 896 tcs->ntagscb = NULL; 897 tcs->syncm = 0; 898 tcs->sconfig0 = config0; 899 } 900 901 /* 902 * iha_main - process the active SCB, taking one off pending and making it 903 * active if necessary, and any done SCB's created as 904 * a result until there are no interrupts pending and no pending 905 * SCB's that can be started. 906 */ 907 static void 908 iha_main(sc) 909 struct iha_softc *sc; 910 { 911 bus_space_tag_t iot = sc->sc_iot; 912 bus_space_handle_t ioh =sc->sc_ioh; 913 struct iha_scb *scb; 914 915 for (;;) { 916 iha_scsi(sc); 917 918 while ((scb = iha_pop_done_scb(sc)) != NULL) 919 iha_done_scb(sc, scb); 920 921 /* 922 * If there are no interrupts pending, or we can't start 923 * a pending sc, break out of the for(;;). Otherwise 924 * continue the good work with another call to 925 * iha_scsi(). 926 */ 927 if (((bus_space_read_1(iot, ioh, TUL_STAT0) & INTPD) == 0) 928 && (iha_find_pend_scb(sc) == NULL)) 929 break; 930 } 931 } 932 933 /* 934 * iha_scsi - service any outstanding interrupts. If there are none, try to 935 * start another SCB currently in the pending queue. 936 */ 937 static void 938 iha_scsi(sc) 939 struct iha_softc *sc; 940 { 941 bus_space_tag_t iot = sc->sc_iot; 942 bus_space_handle_t ioh = sc->sc_ioh; 943 struct iha_scb *scb; 944 struct tcs *tcs; 945 u_int8_t stat; 946 947 /* service pending interrupts asap */ 948 949 stat = bus_space_read_1(iot, ioh, TUL_STAT0); 950 if ((stat & INTPD) != 0) { 951 sc->sc_status0 = stat; 952 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1); 953 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 954 955 sc->sc_phase = sc->sc_status0 & PH_MASK; 956 957 if ((sc->sc_sistat & SRSTD) != 0) { 958 iha_reset_scsi_bus(sc); 959 return; 960 } 961 962 if ((sc->sc_sistat & RSELED) != 0) { 963 iha_resel(sc); 964 return; 965 } 966 967 if ((sc->sc_sistat & (STIMEO | DISCD)) != 0) { 968 iha_busfree(sc); 969 return; 970 } 971 972 if ((sc->sc_sistat & (SCMDN | SBSRV)) != 0) { 973 iha_next_state(sc); 974 return; 975 } 976 977 if ((sc->sc_sistat & SELED) != 0) 978 iha_set_ssig(sc, 0, 0); 979 } 980 981 /* 982 * There were no interrupts pending which required action elsewhere, so 983 * see if it is possible to start the selection phase on a pending SCB 984 */ 985 if ((scb = iha_find_pend_scb(sc)) == NULL) 986 return; 987 988 tcs = scb->tcs; 989 990 /* program HBA's SCSI ID & target SCSI ID */ 991 bus_space_write_1(iot, ioh, TUL_SID, (sc->sc_id << 4) | scb->target); 992 993 if ((scb->xs->xs_control & XS_CTL_RESET) == 0) { 994 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 995 996 if ((tcs->flags & FLAG_NO_NEG_SYNC) == 0 || 997 (tcs->flags & FLAG_NO_NEG_WIDE) == 0) 998 iha_select(sc, scb, SELATNSTOP); 999 1000 else if (scb->scb_tagmsg != 0) 1001 iha_select(sc, scb, SEL_ATN3); 1002 1003 else 1004 iha_select(sc, scb, SEL_ATN); 1005 1006 } else { 1007 iha_select(sc, scb, SELATNSTOP); 1008 scb->nextstat = 8; 1009 } 1010 1011 if ((scb->xs->xs_control & XS_CTL_POLL) != 0) { 1012 int timeout; 1013 for (timeout = scb->xs->timeout; timeout > 0; timeout--) { 1014 if (iha_wait(sc, NO_OP) == -1) 1015 break; 1016 if (iha_next_state(sc) == -1) 1017 break; 1018 delay(1000); /* Only happens in boot, so it's ok */ 1019 } 1020 1021 /* 1022 * Since done queue processing not done until AFTER this 1023 * function returns, scb is on the done queue, not 1024 * the free queue at this point and still has valid data 1025 * 1026 * Conversely, xs->error has not been set yet 1027 */ 1028 if (timeout == 0) 1029 iha_timeout(scb); 1030 } 1031 } 1032 1033 static void 1034 iha_select(sc, scb, select_type) 1035 struct iha_softc *sc; 1036 struct iha_scb *scb; 1037 u_int8_t select_type; 1038 { 1039 bus_space_tag_t iot = sc->sc_iot; 1040 bus_space_handle_t ioh = sc->sc_ioh; 1041 1042 switch (select_type) { 1043 case SEL_ATN: 1044 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 1045 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 1046 scb->cmd, scb->cmdlen); 1047 1048 scb->nextstat = 2; 1049 break; 1050 1051 case SELATNSTOP: 1052 scb->nextstat = 1; 1053 break; 1054 1055 case SEL_ATN3: 1056 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 1057 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagmsg); 1058 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_tagid); 1059 1060 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, scb->cmd, 1061 scb->cmdlen); 1062 1063 scb->nextstat = 2; 1064 break; 1065 1066 default: 1067 printf("[debug] iha_select() - unknown select type = 0x%02x\n", 1068 select_type); 1069 return; 1070 } 1071 1072 iha_del_pend_scb(sc, scb); 1073 scb->status = STATUS_SELECT; 1074 1075 sc->sc_actscb = scb; 1076 1077 bus_space_write_1(iot, ioh, TUL_SCMD, select_type); 1078 } 1079 1080 /* 1081 * iha_wait - wait for an interrupt to service or a SCSI bus phase change 1082 * after writing the supplied command to the tulip chip. If 1083 * the command is NO_OP, skip the command writing. 1084 */ 1085 static int 1086 iha_wait(sc, cmd) 1087 struct iha_softc *sc; 1088 u_int8_t cmd; 1089 { 1090 bus_space_tag_t iot = sc->sc_iot; 1091 bus_space_handle_t ioh = sc->sc_ioh; 1092 1093 if (cmd != NO_OP) 1094 bus_space_write_1(iot, ioh, TUL_SCMD, cmd); 1095 1096 /* 1097 * Have to do this here, in addition to in iha_isr, because 1098 * interrupts might be turned off when we get here. 1099 */ 1100 do { 1101 sc->sc_status0 = bus_space_read_1(iot, ioh, TUL_STAT0); 1102 } while ((sc->sc_status0 & INTPD) == 0); 1103 1104 sc->sc_status1 = bus_space_read_1(iot, ioh, TUL_STAT1); 1105 sc->sc_sistat = bus_space_read_1(iot, ioh, TUL_SISTAT); 1106 1107 sc->sc_phase = sc->sc_status0 & PH_MASK; 1108 1109 if ((sc->sc_sistat & SRSTD) != 0) { 1110 /* SCSI bus reset interrupt */ 1111 iha_reset_scsi_bus(sc); 1112 return (-1); 1113 } 1114 1115 if ((sc->sc_sistat & RSELED) != 0) 1116 /* Reselection interrupt */ 1117 return (iha_resel(sc)); 1118 1119 if ((sc->sc_sistat & STIMEO) != 0) { 1120 /* selected/reselected timeout interrupt */ 1121 iha_busfree(sc); 1122 return (-1); 1123 } 1124 1125 if ((sc->sc_sistat & DISCD) != 0) { 1126 /* BUS disconnection interrupt */ 1127 if ((sc->sc_flags & FLAG_EXPECT_DONE_DISC) != 0) { 1128 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1129 bus_space_write_1(iot, ioh, TUL_SCONFIG0, 1130 SCONFIG0DEFAULT); 1131 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 1132 iha_append_done_scb(sc, sc->sc_actscb, HOST_OK); 1133 sc->sc_flags &= ~FLAG_EXPECT_DONE_DISC; 1134 1135 } else if ((sc->sc_flags & FLAG_EXPECT_DISC) != 0) { 1136 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1137 bus_space_write_1(iot, ioh, TUL_SCONFIG0, 1138 SCONFIG0DEFAULT); 1139 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 1140 sc->sc_actscb = NULL; 1141 sc->sc_flags &= ~FLAG_EXPECT_DISC; 1142 1143 } else 1144 iha_busfree(sc); 1145 1146 return (-1); 1147 } 1148 1149 return (sc->sc_phase); 1150 } 1151 1152 static void 1153 iha_exec_scb(sc, scb) 1154 struct iha_softc *sc; 1155 struct iha_scb *scb; 1156 { 1157 bus_space_tag_t iot; 1158 bus_space_handle_t ioh; 1159 bus_dmamap_t dm; 1160 struct scsipi_xfer *xs = scb->xs; 1161 int nseg, s; 1162 1163 dm = scb->dmap; 1164 nseg = dm->dm_nsegs; 1165 1166 if (nseg > 1) { 1167 struct iha_sg_element *sg = scb->sglist; 1168 int i; 1169 1170 for (i = 0; i < nseg; i++) { 1171 sg[i].sg_len = htole32(dm->dm_segs[i].ds_len); 1172 sg[i].sg_addr = htole32(dm->dm_segs[i].ds_addr); 1173 } 1174 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1175 scb->sgoffset, IHA_SG_SIZE, 1176 BUS_DMASYNC_PREWRITE); 1177 1178 scb->flags |= FLAG_SG; 1179 scb->sg_size = scb->sg_max = nseg; 1180 scb->sg_index = 0; 1181 1182 scb->bufaddr = scb->sg_addr; 1183 } else 1184 scb->bufaddr = dm->dm_segs[0].ds_addr; 1185 1186 if ((xs->xs_control & XS_CTL_POLL) == 0) { 1187 int timeout = mstohz(xs->timeout); 1188 if (timeout == 0) 1189 timeout = 1; 1190 callout_reset(&xs->xs_callout, timeout, iha_timeout, scb); 1191 } 1192 1193 s = splbio(); 1194 1195 if (((scb->xs->xs_control & XS_RESET) != 0) || 1196 (scb->cmd[0] == REQUEST_SENSE)) 1197 iha_push_pend_scb(sc, scb); /* Insert SCB at head of Pend */ 1198 else 1199 iha_append_pend_scb(sc, scb); /* Append SCB to tail of Pend */ 1200 1201 /* 1202 * Run through iha_main() to ensure something is active, if 1203 * only this new SCB. 1204 */ 1205 if (sc->sc_semaph != SEMAPH_IN_MAIN) { 1206 iot = sc->sc_iot; 1207 ioh = sc->sc_ioh; 1208 1209 bus_space_write_1(iot, ioh, TUL_IMSK, MASK_ALL); 1210 sc->sc_semaph = SEMAPH_IN_MAIN;; 1211 1212 splx(s); 1213 iha_main(sc); 1214 s = splbio(); 1215 1216 sc->sc_semaph = ~SEMAPH_IN_MAIN;; 1217 bus_space_write_1(iot, ioh, TUL_IMSK, (MASK_ALL & ~MSCMP)); 1218 } 1219 1220 splx(s); 1221 } 1222 1223 /* 1224 * iha_done_scb - We have a scb which has been processed by the 1225 * adaptor, now we look to see how the operation went. 1226 */ 1227 static void 1228 iha_done_scb(sc, scb) 1229 struct iha_softc *sc; 1230 struct iha_scb *scb; 1231 { 1232 struct scsipi_xfer *xs = scb->xs; 1233 1234 if (xs != NULL) { 1235 /* Cancel the timeout. */ 1236 callout_stop(&xs->xs_callout); 1237 1238 if (scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) { 1239 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 1240 0, scb->dmap->dm_mapsize, 1241 (scb->flags & FLAG_DATAIN) ? 1242 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1243 bus_dmamap_unload(sc->sc_dmat, scb->dmap); 1244 } 1245 1246 xs->status = scb->ta_stat; 1247 1248 switch (scb->ha_stat) { 1249 case HOST_OK: 1250 switch (scb->ta_stat) { 1251 case SCSI_OK: 1252 case SCSI_CONDITION_MET: 1253 case SCSI_INTERM: 1254 case SCSI_INTERM_COND_MET: 1255 xs->resid = scb->buflen; 1256 xs->error = XS_NOERROR; 1257 if ((scb->flags & FLAG_RSENS) != 0) 1258 xs->error = XS_SENSE; 1259 break; 1260 1261 case SCSI_RESV_CONFLICT: 1262 case SCSI_BUSY: 1263 case SCSI_QUEUE_FULL: 1264 xs->error = XS_BUSY; 1265 break; 1266 1267 case SCSI_TERMINATED: 1268 case SCSI_ACA_ACTIVE: 1269 case SCSI_CHECK: 1270 scb->tcs->flags &= 1271 ~(FLAG_SYNC_DONE | FLAG_WIDE_DONE); 1272 1273 if ((scb->flags & FLAG_RSENS) != 0 || 1274 iha_push_sense_request(sc, scb) != 0) { 1275 scb->flags &= ~FLAG_RSENS; 1276 printf("%s: request sense failed\n", 1277 sc->sc_dev.dv_xname); 1278 xs->error = XS_DRIVER_STUFFUP; 1279 break; 1280 } 1281 1282 xs->error = XS_SENSE; 1283 return; 1284 1285 default: 1286 xs->error = XS_DRIVER_STUFFUP; 1287 break; 1288 } 1289 break; 1290 1291 case HOST_SEL_TOUT: 1292 xs->error = XS_SELTIMEOUT; 1293 break; 1294 1295 case HOST_SCSI_RST: 1296 case HOST_DEV_RST: 1297 xs->error = XS_RESET; 1298 break; 1299 1300 case HOST_SPERR: 1301 printf("%s: SCSI Parity error detected\n", 1302 sc->sc_dev.dv_xname); 1303 xs->error = XS_DRIVER_STUFFUP; 1304 break; 1305 1306 case HOST_TIMED_OUT: 1307 xs->error = XS_TIMEOUT; 1308 break; 1309 1310 case HOST_DO_DU: 1311 case HOST_BAD_PHAS: 1312 default: 1313 xs->error = XS_DRIVER_STUFFUP; 1314 break; 1315 } 1316 1317 scsipi_done(xs); 1318 } 1319 1320 iha_append_free_scb(sc, scb); 1321 } 1322 1323 /* 1324 * iha_push_sense_request - obtain auto sense data by pushing the 1325 * SCB needing it back onto the pending 1326 * queue with a REQUEST_SENSE CDB. 1327 */ 1328 static int 1329 iha_push_sense_request(sc, scb) 1330 struct iha_softc *sc; 1331 struct iha_scb *scb; 1332 { 1333 struct scsipi_xfer *xs = scb->xs; 1334 struct scsipi_periph *periph = xs->xs_periph; 1335 struct scsipi_sense *ss = (struct scsipi_sense *)scb->cmd; 1336 int lun = periph->periph_lun; 1337 int err; 1338 1339 ss->opcode = REQUEST_SENSE; 1340 ss->byte2 = lun << SCSI_CMD_LUN_SHIFT; 1341 ss->unused[0] = ss->unused[1] = 0; 1342 ss->length = sizeof(struct scsipi_sense_data); 1343 ss->control = 0; 1344 1345 scb->flags = FLAG_RSENS | FLAG_DATAIN; 1346 1347 scb->scb_id &= ~MSG_IDENTIFY_DISCFLAG; 1348 1349 scb->scb_tagmsg = 0; 1350 scb->ta_stat = SCSI_OK; 1351 1352 scb->cmdlen = sizeof(struct scsipi_sense); 1353 scb->buflen = ss->length; 1354 1355 err = bus_dmamap_load(sc->sc_dmat, scb->dmap, 1356 &xs->sense.scsi_sense, scb->buflen, NULL, 1357 BUS_DMA_READ|BUS_DMA_NOWAIT); 1358 if (err != 0) { 1359 printf("iha_push_sense_request: cannot bus_dmamap_load()\n"); 1360 xs->error = XS_DRIVER_STUFFUP; 1361 return 1; 1362 } 1363 bus_dmamap_sync(sc->sc_dmat, scb->dmap, 1364 0, scb->buflen, BUS_DMASYNC_PREREAD); 1365 1366 /* XXX What about queued command? */ 1367 iha_exec_scb(sc, scb); 1368 1369 return 0; 1370 } 1371 1372 static void 1373 iha_timeout(arg) 1374 void *arg; 1375 { 1376 struct iha_scb *scb = (struct iha_scb *)arg; 1377 struct scsipi_xfer *xs = scb->xs; 1378 struct scsipi_periph *periph = xs->xs_periph; 1379 struct iha_softc *sc; 1380 1381 sc = (void *)periph->periph_channel->chan_adapter->adapt_dev; 1382 1383 if (xs == NULL) 1384 printf("[debug] iha_timeout called with xs == NULL\n"); 1385 1386 else { 1387 scsipi_printaddr(periph); 1388 printf("SCSI OpCode 0x%02x timed out\n", xs->cmd->opcode); 1389 1390 iha_abort_xs(sc, xs, HOST_TIMED_OUT); 1391 } 1392 } 1393 1394 /* 1395 * iha_abort_xs - find the SCB associated with the supplied xs and 1396 * stop all processing on it, moving it to the done 1397 * queue with the supplied host status value. 1398 */ 1399 static void 1400 iha_abort_xs(sc, xs, hastat) 1401 struct iha_softc *sc; 1402 struct scsipi_xfer *xs; 1403 u_int8_t hastat; 1404 { 1405 struct iha_scb *scb; 1406 int i, s; 1407 1408 s = splbio(); 1409 1410 /* Check the pending queue for the SCB pointing to xs */ 1411 1412 TAILQ_FOREACH(scb, &sc->sc_pendscb, chain) 1413 if (scb->xs == xs) { 1414 iha_del_pend_scb(sc, scb); 1415 iha_append_done_scb(sc, scb, hastat); 1416 splx(s); 1417 return; 1418 } 1419 1420 /* 1421 * If that didn't work, check all BUSY/SELECTING SCB's for one 1422 * pointing to xs 1423 */ 1424 1425 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 1426 switch (scb->status) { 1427 case STATUS_BUSY: 1428 case STATUS_SELECT: 1429 if (scb->xs == xs) { 1430 iha_append_done_scb(sc, scb, hastat); 1431 splx(s); 1432 return; 1433 } 1434 break; 1435 default: 1436 break; 1437 } 1438 1439 splx(s); 1440 } 1441 1442 /* 1443 * iha_data_over_run - return HOST_OK for all SCSI opcodes where BufLen 1444 * is an 'Allocation Length'. All other SCSI opcodes 1445 * get HOST_DO_DU as they SHOULD have xferred all the 1446 * data requested. 1447 * 1448 * The list of opcodes using 'Allocation Length' was 1449 * found by scanning all the SCSI-3 T10 drafts. See 1450 * www.t10.org for the curious with a .pdf reader. 1451 */ 1452 static u_int8_t 1453 iha_data_over_run(scb) 1454 struct iha_scb *scb; 1455 { 1456 switch (scb->cmd[0]) { 1457 case 0x03: /* Request Sense SPC-2 */ 1458 case 0x12: /* Inquiry SPC-2 */ 1459 case 0x1a: /* Mode Sense (6 byte version) SPC-2 */ 1460 case 0x1c: /* Receive Diagnostic Results SPC-2 */ 1461 case 0x23: /* Read Format Capacities MMC-2 */ 1462 case 0x29: /* Read Generation SBC */ 1463 case 0x34: /* Read Position SSC-2 */ 1464 case 0x37: /* Read Defect Data SBC */ 1465 case 0x3c: /* Read Buffer SPC-2 */ 1466 case 0x42: /* Read Sub Channel MMC-2 */ 1467 case 0x43: /* Read TOC/PMA/ATIP MMC */ 1468 1469 /* XXX - 2 with same opcode of 0x44? */ 1470 case 0x44: /* Read Header/Read Density Suprt MMC/SSC*/ 1471 1472 case 0x46: /* Get Configuration MMC-2 */ 1473 case 0x4a: /* Get Event/Status Notification MMC-2 */ 1474 case 0x4d: /* Log Sense SPC-2 */ 1475 case 0x51: /* Read Disc Information MMC */ 1476 case 0x52: /* Read Track Information MMC */ 1477 case 0x59: /* Read Master CUE MMC */ 1478 case 0x5a: /* Mode Sense (10 byte version) SPC-2 */ 1479 case 0x5c: /* Read Buffer Capacity MMC */ 1480 case 0x5e: /* Persistant Reserve In SPC-2 */ 1481 case 0x84: /* Receive Copy Results SPC-2 */ 1482 case 0xa0: /* Report LUNs SPC-2 */ 1483 case 0xa3: /* Various Report requests SBC-2/SCC-2*/ 1484 case 0xa4: /* Report Key MMC-2 */ 1485 case 0xad: /* Read DVD Structure MMC-2 */ 1486 case 0xb4: /* Read Element Status (Attached) SMC */ 1487 case 0xb5: /* Request Volume Element Address SMC */ 1488 case 0xb7: /* Read Defect Data (12 byte ver.) SBC */ 1489 case 0xb8: /* Read Element Status (Independ.) SMC */ 1490 case 0xba: /* Report Redundancy SCC-2 */ 1491 case 0xbd: /* Mechanism Status MMC */ 1492 case 0xbe: /* Report Basic Redundancy SCC-2 */ 1493 1494 return (HOST_OK); 1495 break; 1496 1497 default: 1498 return (HOST_DO_DU); 1499 break; 1500 } 1501 } 1502 1503 /* 1504 * iha_next_state - prcess the current SCB as requested in it's 1505 * nextstat member. 1506 */ 1507 static int 1508 iha_next_state(sc) 1509 struct iha_softc *sc; 1510 { 1511 1512 if (sc->sc_actscb == NULL) 1513 return (-1); 1514 1515 switch (sc->sc_actscb->nextstat) { 1516 case 1: 1517 if (iha_state_1(sc) == 3) 1518 goto state_3; 1519 break; 1520 1521 case 2: 1522 switch (iha_state_2(sc)) { 1523 case 3: 1524 goto state_3; 1525 case 4: 1526 goto state_4; 1527 default: 1528 break; 1529 } 1530 break; 1531 1532 case 3: 1533 state_3: 1534 if (iha_state_3(sc) == 4) 1535 goto state_4; 1536 break; 1537 1538 case 4: 1539 state_4: 1540 switch (iha_state_4(sc)) { 1541 case 0: 1542 return (0); 1543 case 6: 1544 goto state_6; 1545 default: 1546 break; 1547 } 1548 break; 1549 1550 case 5: 1551 switch (iha_state_5(sc)) { 1552 case 4: 1553 goto state_4; 1554 case 6: 1555 goto state_6; 1556 default: 1557 break; 1558 } 1559 break; 1560 1561 case 6: 1562 state_6: 1563 iha_state_6(sc); 1564 break; 1565 1566 case 8: 1567 iha_state_8(sc); 1568 break; 1569 1570 default: 1571 #ifdef IHA_DEBUG_STATE 1572 printf("[debug] -unknown state: %i-\n", 1573 sc->sc_actscb->nextstat); 1574 #endif 1575 iha_bad_seq(sc); 1576 break; 1577 } 1578 1579 return (-1); 1580 } 1581 1582 /* 1583 * iha_state_1 - selection is complete after a SELATNSTOP. If the target 1584 * has put the bus into MSG_OUT phase start wide/sync 1585 * negotiation. Otherwise clear the FIFO and go to state 3, 1586 * which will send the SCSI CDB to the target. 1587 */ 1588 static int 1589 iha_state_1(sc) 1590 struct iha_softc *sc; 1591 { 1592 bus_space_tag_t iot = sc->sc_iot; 1593 bus_space_handle_t ioh = sc->sc_ioh; 1594 struct iha_scb *scb = sc->sc_actscb; 1595 struct tcs *tcs; 1596 int flags; 1597 1598 iha_mark_busy_scb(scb); 1599 1600 tcs = scb->tcs; 1601 1602 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 1603 1604 /* 1605 * If we are in PHASE_MSG_OUT, send 1606 * a) IDENT message (with tags if appropriate) 1607 * b) WDTR if the target is configured to negotiate wide xfers 1608 * ** OR ** 1609 * c) SDTR if the target is configured to negotiate sync xfers 1610 * but not wide ones 1611 * 1612 * If we are NOT, then the target is not asking for anything but 1613 * the data/command, so go straight to state 3. 1614 */ 1615 if (sc->sc_phase == PHASE_MSG_OUT) { 1616 bus_space_write_1(iot, ioh, TUL_SCTRL1, (ESBUSIN | EHRSL)); 1617 bus_space_write_1(iot, ioh, TUL_SFIFO, scb->scb_id); 1618 1619 if (scb->scb_tagmsg != 0) { 1620 bus_space_write_1(iot, ioh, TUL_SFIFO, 1621 scb->scb_tagmsg); 1622 bus_space_write_1(iot, ioh, TUL_SFIFO, 1623 scb->scb_tagid); 1624 } 1625 1626 flags = tcs->flags; 1627 if ((flags & FLAG_NO_NEG_WIDE) == 0) { 1628 if (iha_msgout_wdtr(sc) == -1) 1629 return (-1); 1630 } else if ((flags & FLAG_NO_NEG_SYNC) == 0) { 1631 if (iha_msgout_sdtr(sc) == -1) 1632 return (-1); 1633 } 1634 1635 } else { 1636 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1637 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1638 } 1639 1640 return (3); 1641 } 1642 1643 /* 1644 * iha_state_2 - selection is complete after a SEL_ATN or SEL_ATN3. If the SCSI 1645 * CDB has already been send, go to state 4 to start the data 1646 * xfer. Otherwise reset the FIFO and go to state 3, sending 1647 * the SCSI CDB. 1648 */ 1649 static int 1650 iha_state_2(sc) 1651 struct iha_softc *sc; 1652 { 1653 bus_space_tag_t iot = sc->sc_iot; 1654 bus_space_handle_t ioh = sc->sc_ioh; 1655 struct iha_scb *scb = sc->sc_actscb; 1656 1657 iha_mark_busy_scb(scb); 1658 1659 bus_space_write_1(iot, ioh, TUL_SCONFIG0, scb->tcs->sconfig0); 1660 1661 if ((sc->sc_status1 & CPDNE) != 0) 1662 return (4); 1663 1664 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1665 1666 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 1667 1668 return (3); 1669 } 1670 1671 /* 1672 * iha_state_3 - send the SCSI CDB to the target, processing any status 1673 * or other messages received until that is done or 1674 * abandoned. 1675 */ 1676 static int 1677 iha_state_3(sc) 1678 struct iha_softc *sc; 1679 { 1680 bus_space_tag_t iot = sc->sc_iot; 1681 bus_space_handle_t ioh = sc->sc_ioh; 1682 struct iha_scb *scb = sc->sc_actscb; 1683 int flags; 1684 1685 for (;;) { 1686 switch (sc->sc_phase) { 1687 case PHASE_CMD_OUT: 1688 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 1689 scb->cmd, scb->cmdlen); 1690 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1691 return (-1); 1692 else if (sc->sc_phase == PHASE_CMD_OUT) { 1693 iha_bad_seq(sc); 1694 return (-1); 1695 } else 1696 return (4); 1697 1698 case PHASE_MSG_IN: 1699 scb->nextstat = 3; 1700 if (iha_msgin(sc) == -1) 1701 return (-1); 1702 break; 1703 1704 case PHASE_STATUS_IN: 1705 if (iha_status_msg(sc) == -1) 1706 return (-1); 1707 break; 1708 1709 case PHASE_MSG_OUT: 1710 flags = scb->tcs->flags; 1711 if ((flags & FLAG_NO_NEG_SYNC) != 0) { 1712 if (iha_msgout(sc, MSG_NOOP) == -1) 1713 return (-1); 1714 } else if (iha_msgout_sdtr(sc) == -1) 1715 return (-1); 1716 break; 1717 1718 default: 1719 printf("[debug] -s3- bad phase = %d\n", sc->sc_phase); 1720 iha_bad_seq(sc); 1721 return (-1); 1722 } 1723 } 1724 } 1725 1726 /* 1727 * iha_state_4 - start a data xfer. Handle any bus state 1728 * transitions until PHASE_DATA_IN/_OUT 1729 * or the attempt is abandoned. If there is 1730 * no data to xfer, go to state 6 and finish 1731 * processing the current SCB. 1732 */ 1733 static int 1734 iha_state_4(sc) 1735 struct iha_softc *sc; 1736 { 1737 struct iha_scb *scb = sc->sc_actscb; 1738 1739 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) == 1740 (FLAG_DATAIN | FLAG_DATAOUT)) 1741 return (6); /* Both dir flags set => NO xfer was requested */ 1742 1743 for (;;) { 1744 if (scb->buflen == 0) 1745 return (6); 1746 1747 switch (sc->sc_phase) { 1748 case PHASE_STATUS_IN: 1749 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 1750 scb->ha_stat = iha_data_over_run(scb); 1751 if ((iha_status_msg(sc)) == -1) 1752 return (-1); 1753 break; 1754 1755 case PHASE_MSG_IN: 1756 scb->nextstat = 4; 1757 if (iha_msgin(sc) == -1) 1758 return (-1); 1759 break; 1760 1761 case PHASE_MSG_OUT: 1762 if ((sc->sc_status0 & SPERR) != 0) { 1763 scb->buflen = 0; 1764 scb->ha_stat = HOST_SPERR; 1765 if (iha_msgout(sc, MSG_INITIATOR_DET_ERR) == -1) 1766 return (-1); 1767 else 1768 return (6); 1769 } else { 1770 if (iha_msgout(sc, MSG_NOOP) == -1) 1771 return (-1); 1772 } 1773 break; 1774 1775 case PHASE_DATA_IN: 1776 return (iha_xfer_data(sc, scb, FLAG_DATAIN)); 1777 1778 case PHASE_DATA_OUT: 1779 return (iha_xfer_data(sc, scb, FLAG_DATAOUT)); 1780 1781 default: 1782 iha_bad_seq(sc); 1783 return (-1); 1784 } 1785 } 1786 } 1787 1788 /* 1789 * iha_state_5 - handle the partial or final completion of the current 1790 * data xfer. If DMA is still active stop it. If there is 1791 * more data to xfer, go to state 4 and start the xfer. 1792 * If not go to state 6 and finish the SCB. 1793 */ 1794 static int 1795 iha_state_5(sc) 1796 struct iha_softc *sc; 1797 { 1798 bus_space_tag_t iot = sc->sc_iot; 1799 bus_space_handle_t ioh = sc->sc_ioh; 1800 struct iha_scb *scb = sc->sc_actscb; 1801 struct iha_sg_element *sg; 1802 u_int32_t cnt; 1803 u_int8_t period, stat; 1804 long xcnt; /* cannot use unsigned!! see code: if (xcnt < 0) */ 1805 int i; 1806 1807 cnt = bus_space_read_4(iot, ioh, TUL_STCNT0) & TCNT; 1808 1809 /* 1810 * Stop any pending DMA activity and check for parity error. 1811 */ 1812 1813 if ((bus_space_read_1(iot, ioh, TUL_DCMD) & XDIR) != 0) { 1814 /* Input Operation */ 1815 if ((sc->sc_status0 & SPERR) != 0) 1816 scb->ha_stat = HOST_SPERR; 1817 1818 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 1819 bus_space_write_1(iot, ioh, TUL_DCTRL0, 1820 bus_space_read_1(iot, ioh, TUL_DCTRL0) | SXSTP); 1821 while (bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) 1822 ; 1823 } 1824 1825 } else { 1826 /* Output Operation */ 1827 if ((sc->sc_status1 & SXCMP) == 0) { 1828 period = scb->tcs->syncm; 1829 if ((period & PERIOD_WIDE_SCSI) != 0) 1830 cnt += (bus_space_read_1(iot, ioh, 1831 TUL_SFIFOCNT) & FIFOC) * 2; 1832 else 1833 cnt += bus_space_read_1(iot, ioh, 1834 TUL_SFIFOCNT) & FIFOC; 1835 } 1836 1837 if ((bus_space_read_1(iot, ioh, TUL_ISTUS1) & XPEND) != 0) { 1838 bus_space_write_1(iot, ioh, TUL_DCMD, ABTXFR); 1839 do 1840 stat = bus_space_read_1(iot, ioh, TUL_ISTUS0); 1841 while ((stat & DABT) == 0); 1842 } 1843 1844 if ((cnt == 1) && (sc->sc_phase == PHASE_DATA_OUT)) { 1845 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1846 return (-1); 1847 cnt = 0; 1848 1849 } else if ((sc->sc_status1 & SXCMP) == 0) 1850 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 1851 } 1852 1853 if (cnt == 0) { 1854 scb->buflen = 0; 1855 return (6); 1856 } 1857 1858 /* Update active data pointer and restart the I/O at the new point */ 1859 1860 xcnt = scb->buflen - cnt; /* xcnt == bytes xferred */ 1861 scb->buflen = cnt; /* cnt == bytes left */ 1862 1863 if ((scb->flags & FLAG_SG) != 0) { 1864 sg = &scb->sglist[scb->sg_index]; 1865 for (i = scb->sg_index; i < scb->sg_max; sg++, i++) { 1866 xcnt -= le32toh(sg->sg_len); 1867 if (xcnt < 0) { 1868 xcnt += le32toh(sg->sg_len); 1869 1870 sg->sg_addr = 1871 htole32(le32toh(sg->sg_addr) + xcnt); 1872 sg->sg_len = 1873 htole32(le32toh(sg->sg_len) - xcnt); 1874 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1875 scb->sgoffset, IHA_SG_SIZE, 1876 BUS_DMASYNC_PREWRITE); 1877 1878 scb->bufaddr += (i - scb->sg_index) * 1879 sizeof(struct iha_sg_element); 1880 scb->sg_size = scb->sg_max - i; 1881 scb->sg_index = i; 1882 1883 return (4); 1884 } 1885 } 1886 return (6); 1887 1888 } else 1889 scb->bufaddr += xcnt; 1890 1891 return (4); 1892 } 1893 1894 /* 1895 * iha_state_6 - finish off the active scb (may require several 1896 * iterations if PHASE_MSG_IN) and return -1 to indicate 1897 * the bus is free. 1898 */ 1899 static int 1900 iha_state_6(sc) 1901 struct iha_softc *sc; 1902 { 1903 1904 for (;;) { 1905 switch (sc->sc_phase) { 1906 case PHASE_STATUS_IN: 1907 if (iha_status_msg(sc) == -1) 1908 return (-1); 1909 break; 1910 1911 case PHASE_MSG_IN: 1912 sc->sc_actscb->nextstat = 6; 1913 if ((iha_msgin(sc)) == -1) 1914 return (-1); 1915 break; 1916 1917 case PHASE_MSG_OUT: 1918 if ((iha_msgout(sc, MSG_NOOP)) == -1) 1919 return (-1); 1920 break; 1921 1922 case PHASE_DATA_IN: 1923 if (iha_xpad_in(sc) == -1) 1924 return (-1); 1925 break; 1926 1927 case PHASE_DATA_OUT: 1928 if (iha_xpad_out(sc) == -1) 1929 return (-1); 1930 break; 1931 1932 default: 1933 iha_bad_seq(sc); 1934 return (-1); 1935 } 1936 } 1937 } 1938 1939 /* 1940 * iha_state_8 - reset the active device and all busy SCBs using it 1941 */ 1942 static int 1943 iha_state_8(sc) 1944 struct iha_softc *sc; 1945 { 1946 bus_space_tag_t iot = sc->sc_iot; 1947 bus_space_handle_t ioh = sc->sc_ioh; 1948 struct iha_scb *scb; 1949 int i; 1950 u_int8_t tar; 1951 1952 if (sc->sc_phase == PHASE_MSG_OUT) { 1953 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_BUS_DEV_RESET); 1954 1955 scb = sc->sc_actscb; 1956 1957 /* This SCB finished correctly -- resetting the device */ 1958 iha_append_done_scb(sc, scb, HOST_OK); 1959 1960 iha_reset_tcs(scb->tcs, sc->sc_sconf1); 1961 1962 tar = scb->target; 1963 for (i = 0, scb = sc->sc_scb; i < IHA_MAX_SCB; i++, scb++) 1964 if (scb->target == tar) 1965 switch (scb->status) { 1966 case STATUS_BUSY: 1967 iha_append_done_scb(sc, 1968 scb, HOST_DEV_RST); 1969 break; 1970 1971 case STATUS_SELECT: 1972 iha_push_pend_scb(sc, scb); 1973 break; 1974 1975 default: 1976 break; 1977 } 1978 1979 sc->sc_flags |= FLAG_EXPECT_DISC; 1980 1981 if (iha_wait(sc, XF_FIFO_OUT) == -1) 1982 return (-1); 1983 } 1984 1985 iha_bad_seq(sc); 1986 return (-1); 1987 } 1988 1989 /* 1990 * iha_xfer_data - initiate the DMA xfer of the data 1991 */ 1992 static int 1993 iha_xfer_data(sc, scb, direction) 1994 struct iha_softc *sc; 1995 struct iha_scb *scb; 1996 int direction; 1997 { 1998 bus_space_tag_t iot = sc->sc_iot; 1999 bus_space_handle_t ioh = sc->sc_ioh; 2000 u_int32_t xferlen; 2001 u_int8_t xfercmd; 2002 2003 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != direction) 2004 return (6); /* wrong direction, abandon I/O */ 2005 2006 bus_space_write_4(iot, ioh, TUL_STCNT0, scb->buflen); 2007 2008 xfercmd = STRXFR; 2009 if (direction == FLAG_DATAIN) 2010 xfercmd |= XDIR; 2011 2012 if (scb->flags & FLAG_SG) { 2013 xferlen = scb->sg_size * sizeof(struct iha_sg_element); 2014 xfercmd |= SGXFR; 2015 } else 2016 xferlen = scb->buflen; 2017 2018 bus_space_write_4(iot, ioh, TUL_DXC, xferlen); 2019 bus_space_write_4(iot, ioh, TUL_DXPA, scb->bufaddr); 2020 bus_space_write_1(iot, ioh, TUL_DCMD, xfercmd); 2021 2022 bus_space_write_1(iot, ioh, TUL_SCMD, 2023 (direction == FLAG_DATAIN) ? XF_DMA_IN : XF_DMA_OUT); 2024 2025 scb->nextstat = 5; 2026 2027 return (0); 2028 } 2029 2030 static int 2031 iha_xpad_in(sc) 2032 struct iha_softc *sc; 2033 { 2034 bus_space_tag_t iot = sc->sc_iot; 2035 bus_space_handle_t ioh = sc->sc_ioh; 2036 struct iha_scb *scb = sc->sc_actscb; 2037 2038 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 2039 scb->ha_stat = HOST_DO_DU; 2040 2041 for (;;) { 2042 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0) 2043 bus_space_write_4(iot, ioh, TUL_STCNT0, 2); 2044 else 2045 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 2046 2047 switch (iha_wait(sc, XF_FIFO_IN)) { 2048 case -1: 2049 return (-1); 2050 2051 case PHASE_DATA_IN: 2052 bus_space_read_1(iot, ioh, TUL_SFIFO); 2053 break; 2054 2055 default: 2056 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2057 return (6); 2058 } 2059 } 2060 } 2061 2062 static int 2063 iha_xpad_out(sc) 2064 struct iha_softc *sc; 2065 { 2066 bus_space_tag_t iot = sc->sc_iot; 2067 bus_space_handle_t ioh = sc->sc_ioh; 2068 struct iha_scb *scb = sc->sc_actscb; 2069 2070 if ((scb->flags & (FLAG_DATAIN | FLAG_DATAOUT)) != 0) 2071 scb->ha_stat = HOST_DO_DU; 2072 2073 for (;;) { 2074 if ((scb->tcs->syncm & PERIOD_WIDE_SCSI) != 0) 2075 bus_space_write_4(iot, ioh, TUL_STCNT0, 2); 2076 else 2077 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 2078 2079 bus_space_write_1(iot, ioh, TUL_SFIFO, 0); 2080 2081 switch (iha_wait(sc, XF_FIFO_OUT)) { 2082 case -1: 2083 return (-1); 2084 2085 case PHASE_DATA_OUT: 2086 break; 2087 2088 default: 2089 /* Disable wide CPU to allow read 16 bits */ 2090 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 2091 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2092 return (6); 2093 } 2094 } 2095 } 2096 2097 static int 2098 iha_status_msg(sc) 2099 struct iha_softc *sc; 2100 { 2101 bus_space_tag_t iot = sc->sc_iot; 2102 bus_space_handle_t ioh = sc->sc_ioh; 2103 struct iha_scb *scb; 2104 u_int8_t msg; 2105 int phase; 2106 2107 if ((phase = iha_wait(sc, CMD_COMP)) == -1) 2108 return (-1); 2109 2110 scb = sc->sc_actscb; 2111 2112 scb->ta_stat = bus_space_read_1(iot, ioh, TUL_SFIFO); 2113 2114 if (phase == PHASE_MSG_OUT) { 2115 if ((sc->sc_status0 & SPERR) == 0) 2116 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_NOOP); 2117 else 2118 bus_space_write_1(iot, ioh, TUL_SFIFO, 2119 MSG_PARITY_ERROR); 2120 2121 return (iha_wait(sc, XF_FIFO_OUT)); 2122 2123 } else if (phase == PHASE_MSG_IN) { 2124 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); 2125 2126 if ((sc->sc_status0 & SPERR) != 0) 2127 switch (iha_wait(sc, MSG_ACCEPT)) { 2128 case -1: 2129 return (-1); 2130 case PHASE_MSG_OUT: 2131 bus_space_write_1(iot, ioh, TUL_SFIFO, 2132 MSG_PARITY_ERROR); 2133 return (iha_wait(sc, XF_FIFO_OUT)); 2134 default: 2135 iha_bad_seq(sc); 2136 return (-1); 2137 } 2138 2139 if (msg == MSG_CMDCOMPLETE) { 2140 if ((scb->ta_stat & 2141 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) { 2142 iha_bad_seq(sc); 2143 return (-1); 2144 } 2145 sc->sc_flags |= FLAG_EXPECT_DONE_DISC; 2146 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2147 return (iha_wait(sc, MSG_ACCEPT)); 2148 } 2149 2150 if ((msg == MSG_LINK_CMD_COMPLETE) 2151 || (msg == MSG_LINK_CMD_COMPLETEF)) { 2152 if ((scb->ta_stat & 2153 (SCSI_INTERM | SCSI_BUSY)) == SCSI_INTERM) 2154 return (iha_wait(sc, MSG_ACCEPT)); 2155 } 2156 } 2157 2158 iha_bad_seq(sc); 2159 return (-1); 2160 } 2161 2162 /* 2163 * iha_busfree - SCSI bus free detected as a result of a TIMEOUT or 2164 * DISCONNECT interrupt. Reset the tulip FIFO and 2165 * SCONFIG0 and enable hardware reselect. Move any active 2166 * SCB to sc_donescb list. Return an appropriate host status 2167 * if an I/O was active. 2168 */ 2169 static void 2170 iha_busfree(sc) 2171 struct iha_softc *sc; 2172 { 2173 bus_space_tag_t iot = sc->sc_iot; 2174 bus_space_handle_t ioh = sc->sc_ioh; 2175 struct iha_scb *scb; 2176 2177 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2178 bus_space_write_1(iot, ioh, TUL_SCONFIG0, SCONFIG0DEFAULT); 2179 bus_space_write_1(iot, ioh, TUL_SCTRL1, EHRSL); 2180 2181 scb = sc->sc_actscb; 2182 2183 if (scb != NULL) { 2184 if (scb->status == STATUS_SELECT) 2185 /* selection timeout */ 2186 iha_append_done_scb(sc, scb, HOST_SEL_TOUT); 2187 else 2188 /* Unexpected bus free */ 2189 iha_append_done_scb(sc, scb, HOST_BAD_PHAS); 2190 } 2191 } 2192 2193 /* 2194 * iha_resel - handle a detected SCSI bus reselection request. 2195 */ 2196 static int 2197 iha_resel(sc) 2198 struct iha_softc *sc; 2199 { 2200 bus_space_tag_t iot = sc->sc_iot; 2201 bus_space_handle_t ioh = sc->sc_ioh; 2202 struct iha_scb *scb; 2203 struct tcs *tcs; 2204 u_int8_t tag, target, lun, msg, abortmsg; 2205 2206 if (sc->sc_actscb != NULL) { 2207 if ((sc->sc_actscb->status == STATUS_SELECT)) 2208 iha_push_pend_scb(sc, sc->sc_actscb); 2209 sc->sc_actscb = NULL; 2210 } 2211 2212 target = bus_space_read_1(iot, ioh, TUL_SBID); 2213 lun = bus_space_read_1(iot, ioh, TUL_SALVC) & IHA_MSG_IDENTIFY_LUNMASK; 2214 2215 tcs = &sc->sc_tcs[target]; 2216 2217 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 2218 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 2219 2220 abortmsg = MSG_ABORT; /* until a valid tag has been obtained */ 2221 2222 if (tcs->ntagscb != NULL) 2223 /* There is a non-tagged I/O active on the target */ 2224 scb = tcs->ntagscb; 2225 2226 else { 2227 /* 2228 * Since there is no active non-tagged operation 2229 * read the tag type, the tag itself, and find 2230 * the appropriate scb by indexing sc_scb with 2231 * the tag. 2232 */ 2233 2234 switch (iha_wait(sc, MSG_ACCEPT)) { 2235 case -1: 2236 return (-1); 2237 case PHASE_MSG_IN: 2238 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 2239 if ((iha_wait(sc, XF_FIFO_IN)) == -1) 2240 return (-1); 2241 break; 2242 default: 2243 goto abort; 2244 } 2245 2246 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag Msg */ 2247 2248 if ((msg < MSG_SIMPLE_Q_TAG) || (msg > MSG_ORDERED_Q_TAG)) 2249 goto abort; 2250 2251 switch (iha_wait(sc, MSG_ACCEPT)) { 2252 case -1: 2253 return (-1); 2254 case PHASE_MSG_IN: 2255 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 2256 if ((iha_wait(sc, XF_FIFO_IN)) == -1) 2257 return (-1); 2258 break; 2259 default: 2260 goto abort; 2261 } 2262 2263 tag = bus_space_read_1(iot, ioh, TUL_SFIFO); /* Read Tag ID */ 2264 scb = &sc->sc_scb[tag]; 2265 2266 abortmsg = MSG_ABORT_TAG; /* Now that we have valdid tag! */ 2267 } 2268 2269 if ((scb->target != target) 2270 || (scb->lun != lun) 2271 || (scb->status != STATUS_BUSY)) { 2272 abort: 2273 iha_msgout_abort(sc, abortmsg); 2274 return (-1); 2275 } 2276 2277 sc->sc_actscb = scb; 2278 2279 if (iha_wait(sc, MSG_ACCEPT) == -1) 2280 return (-1); 2281 2282 return (iha_next_state(sc)); 2283 } 2284 2285 static int 2286 iha_msgin(sc) 2287 struct iha_softc *sc; 2288 { 2289 bus_space_tag_t iot = sc->sc_iot; 2290 bus_space_handle_t ioh = sc->sc_ioh; 2291 int flags; 2292 int phase; 2293 u_int8_t msg; 2294 2295 for (;;) { 2296 if ((bus_space_read_1(iot, ioh, TUL_SFIFOCNT) & FIFOC) > 0) 2297 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2298 2299 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 2300 2301 phase = iha_wait(sc, XF_FIFO_IN); 2302 msg = bus_space_read_1(iot, ioh, TUL_SFIFO); 2303 2304 switch (msg) { 2305 case MSG_DISCONNECT: 2306 sc->sc_flags |= FLAG_EXPECT_DISC; 2307 if (iha_wait(sc, MSG_ACCEPT) != -1) 2308 iha_bad_seq(sc); 2309 phase = -1; 2310 break; 2311 case MSG_SAVEDATAPOINTER: 2312 case MSG_RESTOREPOINTERS: 2313 case MSG_NOOP: 2314 phase = iha_wait(sc, MSG_ACCEPT); 2315 break; 2316 case MSG_MESSAGE_REJECT: 2317 /* XXX - need to clear FIFO like other 'Clear ATN'?*/ 2318 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 2319 flags = sc->sc_actscb->tcs->flags; 2320 if ((flags & FLAG_NO_NEG_SYNC) == 0) 2321 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2322 phase = iha_wait(sc, MSG_ACCEPT); 2323 break; 2324 case MSG_EXTENDED: 2325 phase = iha_msgin_extended(sc); 2326 break; 2327 case MSG_IGN_WIDE_RESIDUE: 2328 phase = iha_msgin_ignore_wid_resid(sc); 2329 break; 2330 case MSG_CMDCOMPLETE: 2331 sc->sc_flags |= FLAG_EXPECT_DONE_DISC; 2332 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2333 phase = iha_wait(sc, MSG_ACCEPT); 2334 if (phase != -1) { 2335 iha_bad_seq(sc); 2336 return (-1); 2337 } 2338 break; 2339 default: 2340 printf("[debug] iha_msgin: bad msg type: %d\n", msg); 2341 phase = iha_msgout_reject(sc); 2342 break; 2343 } 2344 2345 if (phase != PHASE_MSG_IN) 2346 return (phase); 2347 } 2348 /* NOTREACHED */ 2349 } 2350 2351 static int 2352 iha_msgin_extended(sc) 2353 struct iha_softc *sc; 2354 { 2355 bus_space_tag_t iot = sc->sc_iot; 2356 bus_space_handle_t ioh = sc->sc_ioh; 2357 int flags, i, phase, msglen, msgcode; 2358 2359 /* 2360 * XXX - can we just stop reading and reject, or do we have to 2361 * read all input, discarding the excess, and then reject 2362 */ 2363 for (i = 0; i < IHA_MAX_EXTENDED_MSG; i++) { 2364 phase = iha_wait(sc, MSG_ACCEPT); 2365 2366 if (phase != PHASE_MSG_IN) 2367 return (phase); 2368 2369 bus_space_write_4(iot, ioh, TUL_STCNT0, 1); 2370 2371 if (iha_wait(sc, XF_FIFO_IN) == -1) 2372 return (-1); 2373 2374 sc->sc_msg[i] = bus_space_read_1(iot, ioh, TUL_SFIFO); 2375 2376 if (sc->sc_msg[0] == i) 2377 break; 2378 } 2379 2380 msglen = sc->sc_msg[0]; 2381 msgcode = sc->sc_msg[1]; 2382 2383 if ((msglen == MSG_EXT_SDTR_LEN) && (msgcode == MSG_EXT_SDTR)) { 2384 if (iha_msgin_sdtr(sc) == 0) { 2385 iha_sync_done(sc); 2386 return (iha_wait(sc, MSG_ACCEPT)); 2387 } 2388 2389 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2390 2391 phase = iha_wait(sc, MSG_ACCEPT); 2392 if (phase != PHASE_MSG_OUT) 2393 return (phase); 2394 2395 /* Clear FIFO for important message - final SYNC offer */ 2396 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2397 2398 iha_sync_done(sc); /* This is our final offer */ 2399 2400 } else if ((msglen == MSG_EXT_WDTR_LEN) && (msgcode == MSG_EXT_WDTR)) { 2401 2402 flags = sc->sc_actscb->tcs->flags; 2403 2404 if ((flags & FLAG_NO_WIDE) != 0) 2405 /* Offer 8bit xfers only */ 2406 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_8_BIT; 2407 2408 else if (sc->sc_msg[2] > MSG_EXT_WDTR_BUS_32_BIT) 2409 /* BAD MSG */ 2410 return (iha_msgout_reject(sc)); 2411 2412 else if (sc->sc_msg[2] == MSG_EXT_WDTR_BUS_32_BIT) 2413 /* Offer 16bit instead */ 2414 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT; 2415 2416 else { 2417 iha_wide_done(sc); 2418 if ((flags & FLAG_NO_NEG_SYNC) == 0) 2419 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2420 return (iha_wait(sc, MSG_ACCEPT)); 2421 } 2422 2423 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2424 2425 phase = iha_wait(sc, MSG_ACCEPT); 2426 if (phase != PHASE_MSG_OUT) 2427 return (phase); 2428 } else 2429 return (iha_msgout_reject(sc)); 2430 2431 return (iha_msgout_extended(sc)); 2432 } 2433 2434 /* 2435 * iha_msgin_sdtr - check SDTR msg in sc_msg. If the offer is 2436 * acceptable leave sc_msg as is and return 0. 2437 * If the negotiation must continue, modify sc_msg 2438 * as needed and return 1. Else return 0. 2439 */ 2440 static int 2441 iha_msgin_sdtr(sc) 2442 struct iha_softc *sc; 2443 { 2444 int flags; 2445 int newoffer; 2446 u_int8_t default_period; 2447 2448 flags = sc->sc_actscb->tcs->flags; 2449 2450 default_period = iha_rate_tbl[flags & FLAG_SCSI_RATE]; 2451 2452 if (sc->sc_msg[3] == 0) 2453 /* target offered async only. Accept it. */ 2454 return (0); 2455 2456 newoffer = 0; 2457 2458 if ((flags & FLAG_NO_SYNC) != 0) { 2459 sc->sc_msg[3] = 0; 2460 newoffer = 1; 2461 } 2462 2463 if (sc->sc_msg[3] > IHA_MAX_OFFSET) { 2464 sc->sc_msg[3] = IHA_MAX_OFFSET; 2465 newoffer = 1; 2466 } 2467 2468 if (sc->sc_msg[2] < default_period) { 2469 sc->sc_msg[2] = default_period; 2470 newoffer = 1; 2471 } 2472 2473 if (sc->sc_msg[2] > IHA_MAX_PERIOD) { 2474 /* Use async */ 2475 sc->sc_msg[3] = 0; 2476 newoffer = 1; 2477 } 2478 2479 return (newoffer); 2480 } 2481 2482 static int 2483 iha_msgin_ignore_wid_resid(sc) 2484 struct iha_softc *sc; 2485 { 2486 bus_space_tag_t iot = sc->sc_iot; 2487 bus_space_handle_t ioh = sc->sc_ioh; 2488 int phase; 2489 2490 phase = iha_wait(sc, MSG_ACCEPT); 2491 2492 if (phase == PHASE_MSG_IN) { 2493 phase = iha_wait(sc, XF_FIFO_IN); 2494 2495 if (phase != -1) { 2496 bus_space_write_1(iot, ioh, TUL_SFIFO, 0); 2497 bus_space_read_1(iot, ioh, TUL_SFIFO); 2498 bus_space_read_1(iot, ioh, TUL_SFIFO); 2499 2500 phase = iha_wait(sc, MSG_ACCEPT); 2501 } 2502 } 2503 2504 return (phase); 2505 } 2506 2507 static int 2508 iha_msgout(sc, msg) 2509 struct iha_softc *sc; 2510 u_int8_t msg; 2511 { 2512 2513 bus_space_write_1(sc->sc_iot, sc->sc_ioh, TUL_SFIFO, msg); 2514 2515 return (iha_wait(sc, XF_FIFO_OUT)); 2516 } 2517 2518 static void 2519 iha_msgout_abort(sc, aborttype) 2520 struct iha_softc *sc; 2521 u_int8_t aborttype; 2522 { 2523 2524 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2525 2526 switch (iha_wait(sc, MSG_ACCEPT)) { 2527 case -1: 2528 break; 2529 2530 case PHASE_MSG_OUT: 2531 sc->sc_flags |= FLAG_EXPECT_DISC; 2532 if (iha_msgout(sc, aborttype) != -1) 2533 iha_bad_seq(sc); 2534 break; 2535 2536 default: 2537 iha_bad_seq(sc); 2538 break; 2539 } 2540 } 2541 2542 static int 2543 iha_msgout_reject(sc) 2544 struct iha_softc *sc; 2545 { 2546 2547 iha_set_ssig(sc, REQ | BSY | SEL, ATN); 2548 2549 if (iha_wait(sc, MSG_ACCEPT) == PHASE_MSG_OUT) 2550 return (iha_msgout(sc, MSG_MESSAGE_REJECT)); 2551 2552 return (-1); 2553 } 2554 2555 static int 2556 iha_msgout_extended(sc) 2557 struct iha_softc *sc; 2558 { 2559 bus_space_tag_t iot = sc->sc_iot; 2560 bus_space_handle_t ioh = sc->sc_ioh; 2561 int phase; 2562 2563 bus_space_write_1(iot, ioh, TUL_SFIFO, MSG_EXTENDED); 2564 2565 bus_space_write_multi_1(iot, ioh, TUL_SFIFO, 2566 sc->sc_msg, sc->sc_msg[0] + 1); 2567 2568 phase = iha_wait(sc, XF_FIFO_OUT); 2569 2570 bus_space_write_1(iot, ioh, TUL_SCTRL0, RSFIFO); 2571 iha_set_ssig(sc, REQ | BSY | SEL | ATN, 0); 2572 2573 return (phase); 2574 } 2575 2576 static int 2577 iha_msgout_wdtr(sc) 2578 struct iha_softc *sc; 2579 { 2580 2581 sc->sc_actscb->tcs->flags |= FLAG_WIDE_DONE; 2582 2583 sc->sc_msg[0] = MSG_EXT_WDTR_LEN; 2584 sc->sc_msg[1] = MSG_EXT_WDTR; 2585 sc->sc_msg[2] = MSG_EXT_WDTR_BUS_16_BIT; 2586 2587 return (iha_msgout_extended(sc)); 2588 } 2589 2590 static int 2591 iha_msgout_sdtr(sc) 2592 struct iha_softc *sc; 2593 { 2594 struct tcs *tcs = sc->sc_actscb->tcs; 2595 2596 tcs->flags |= FLAG_SYNC_DONE; 2597 2598 sc->sc_msg[0] = MSG_EXT_SDTR_LEN; 2599 sc->sc_msg[1] = MSG_EXT_SDTR; 2600 sc->sc_msg[2] = iha_rate_tbl[tcs->flags & FLAG_SCSI_RATE]; 2601 sc->sc_msg[3] = IHA_MAX_OFFSET; /* REQ/ACK */ 2602 2603 return (iha_msgout_extended(sc)); 2604 } 2605 2606 static void 2607 iha_wide_done(sc) 2608 struct iha_softc *sc; 2609 { 2610 bus_space_tag_t iot = sc->sc_iot; 2611 bus_space_handle_t ioh = sc->sc_ioh; 2612 struct tcs *tcs = sc->sc_actscb->tcs; 2613 2614 tcs->syncm = 0; 2615 tcs->period = 0; 2616 tcs->offset = 0; 2617 2618 if (sc->sc_msg[2] != 0) 2619 tcs->syncm |= PERIOD_WIDE_SCSI; 2620 2621 tcs->sconfig0 &= ~ALTPD; 2622 tcs->flags &= ~FLAG_SYNC_DONE; 2623 tcs->flags |= FLAG_WIDE_DONE; 2624 2625 iha_update_xfer_mode(sc, sc->sc_actscb->target); 2626 2627 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 2628 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 2629 } 2630 2631 static void 2632 iha_sync_done(sc) 2633 struct iha_softc *sc; 2634 { 2635 bus_space_tag_t iot = sc->sc_iot; 2636 bus_space_handle_t ioh = sc->sc_ioh; 2637 struct tcs *tcs = sc->sc_actscb->tcs; 2638 int i; 2639 2640 tcs->period = sc->sc_msg[2]; 2641 tcs->offset = sc->sc_msg[3]; 2642 if (tcs->offset != 0) { 2643 tcs->syncm |= tcs->offset; 2644 2645 /* pick the highest possible rate */ 2646 for (i = 0; i < sizeof(iha_rate_tbl); i++) 2647 if (iha_rate_tbl[i] >= tcs->period) 2648 break; 2649 2650 tcs->syncm |= (i << 4); 2651 tcs->sconfig0 |= ALTPD; 2652 } 2653 2654 tcs->flags |= FLAG_SYNC_DONE; 2655 2656 iha_update_xfer_mode(sc, sc->sc_actscb->target); 2657 2658 bus_space_write_1(iot, ioh, TUL_SCONFIG0, tcs->sconfig0); 2659 bus_space_write_1(iot, ioh, TUL_SYNCM, tcs->syncm); 2660 } 2661 2662 /* 2663 * iha_bad_seq - a SCSI bus phase was encountered out of the 2664 * correct/expected sequence. Reset the SCSI bus. 2665 */ 2666 static void 2667 iha_bad_seq(sc) 2668 struct iha_softc *sc; 2669 { 2670 struct iha_scb *scb = sc->sc_actscb; 2671 2672 if (scb != NULL) 2673 iha_append_done_scb(sc, scb, HOST_BAD_PHAS); 2674 2675 iha_reset_scsi_bus(sc); 2676 iha_reset_chip(sc); 2677 } 2678 2679 /* 2680 * iha_read_eeprom - read Serial EEPROM value & set to defaults 2681 * if required. XXX - Writing does NOT work! 2682 */ 2683 static void 2684 iha_read_eeprom(sc, eeprom) 2685 struct iha_softc *sc; 2686 struct iha_eeprom *eeprom; 2687 { 2688 bus_space_tag_t iot = sc->sc_iot; 2689 bus_space_handle_t ioh = sc->sc_ioh; 2690 u_int16_t *buf = (u_int16_t *)eeprom; 2691 u_int8_t gctrl; 2692 2693 /* Enable EEProm programming */ 2694 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) | EEPRG; 2695 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl); 2696 2697 /* Read EEProm */ 2698 if (iha_se2_rd_all(sc, buf) == 0) 2699 panic("%s: cannot read EEPROM", sc->sc_dev.dv_xname); 2700 2701 /* Disable EEProm programming */ 2702 gctrl = bus_space_read_1(iot, ioh, TUL_GCTRL0) & ~EEPRG; 2703 bus_space_write_1(iot, ioh, TUL_GCTRL0, gctrl); 2704 } 2705 2706 #ifdef notused 2707 /* 2708 * iha_se2_update_all - Update SCSI H/A configuration parameters from 2709 * serial EEPROM Setup default pattern. Only 2710 * change those values different from the values 2711 * in iha_eeprom. 2712 */ 2713 static void 2714 iha_se2_update_all(sc) 2715 struct iha_softc *sc; 2716 { 2717 bus_space_tag_t iot = sc->sc_iot; 2718 bus_space_handle_t ioh = sc->sc_ioh; 2719 u_int16_t *np; 2720 u_int32_t chksum; 2721 int i; 2722 2723 /* Enable erase/write state of EEPROM */ 2724 iha_se2_instr(sc, ENABLE_ERASE); 2725 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2726 EEP_WAIT(); 2727 2728 np = (u_int16_t *)&eeprom_default; 2729 2730 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) { 2731 iha_se2_wr(sc, i, *np); 2732 chksum += *np++; 2733 } 2734 2735 chksum &= 0x0000ffff; 2736 iha_se2_wr(sc, 31, chksum); 2737 2738 /* Disable erase/write state of EEPROM */ 2739 iha_se2_instr(sc, 0); 2740 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2741 EEP_WAIT(); 2742 } 2743 2744 /* 2745 * iha_se2_wr - write the given 16 bit value into the Serial EEPROM 2746 * at the specified offset 2747 */ 2748 static void 2749 iha_se2_wr(sc, addr, writeword) 2750 struct iha_softc *sc; 2751 int addr; 2752 u_int16_t writeword; 2753 { 2754 bus_space_tag_t iot = sc->sc_iot; 2755 bus_space_handle_t ioh = sc->sc_ioh; 2756 int i, bit; 2757 2758 /* send 'WRITE' Instruction == address | WRITE bit */ 2759 iha_se2_instr(sc, addr | WRITE); 2760 2761 for (i = 16; i > 0; i--) { 2762 if (writeword & (1 << (i - 1))) 2763 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRDO); 2764 else 2765 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2766 EEP_WAIT(); 2767 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2768 EEP_WAIT(); 2769 } 2770 2771 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2772 EEP_WAIT(); 2773 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2774 EEP_WAIT(); 2775 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2776 EEP_WAIT(); 2777 2778 for (;;) { 2779 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2780 EEP_WAIT(); 2781 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2782 EEP_WAIT(); 2783 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI; 2784 EEP_WAIT(); 2785 if (bit != 0) 2786 break; /* write complete */ 2787 } 2788 2789 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2790 } 2791 #endif 2792 2793 /* 2794 * iha_se2_rd - read & return the 16 bit value at the specified 2795 * offset in the Serial E2PROM 2796 * 2797 */ 2798 static u_int16_t 2799 iha_se2_rd(sc, addr) 2800 struct iha_softc *sc; 2801 int addr; 2802 { 2803 bus_space_tag_t iot = sc->sc_iot; 2804 bus_space_handle_t ioh = sc->sc_ioh; 2805 int i, bit; 2806 u_int16_t readword; 2807 2808 /* Send 'READ' instruction == address | READ bit */ 2809 iha_se2_instr(sc, addr | READ); 2810 2811 readword = 0; 2812 for (i = 16; i > 0; i--) { 2813 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS | NVRCK); 2814 EEP_WAIT(); 2815 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2816 EEP_WAIT(); 2817 /* sample data after the following edge of clock */ 2818 bit = bus_space_read_1(iot, ioh, TUL_NVRAM) & NVRDI ? 1 : 0; 2819 EEP_WAIT(); 2820 2821 readword |= bit << (i - 1); 2822 } 2823 2824 bus_space_write_1(iot, ioh, TUL_NVRAM, 0); 2825 2826 return (readword); 2827 } 2828 2829 /* 2830 * iha_se2_rd_all - Read SCSI H/A config parameters from serial EEPROM 2831 */ 2832 static int 2833 iha_se2_rd_all(sc, buf) 2834 struct iha_softc *sc; 2835 u_int16_t *buf; 2836 { 2837 struct iha_eeprom *eeprom = (struct iha_eeprom *)buf; 2838 u_int32_t chksum; 2839 int i; 2840 2841 for (i = 0, chksum = 0; i < EEPROM_SIZE - 1; i++) { 2842 *buf = iha_se2_rd(sc, i); 2843 chksum += *buf++; 2844 } 2845 *buf = iha_se2_rd(sc, 31); /* read checksum from EEPROM */ 2846 2847 chksum &= 0x0000ffff; /* lower 16 bits */ 2848 2849 return (eeprom->signature == EEP_SIGNATURE) && 2850 (eeprom->checksum == chksum); 2851 } 2852 2853 /* 2854 * iha_se2_instr - write an octet to serial E2PROM one bit at a time 2855 */ 2856 static void 2857 iha_se2_instr(sc, instr) 2858 struct iha_softc *sc; 2859 int instr; 2860 { 2861 bus_space_tag_t iot = sc->sc_iot; 2862 bus_space_handle_t ioh = sc->sc_ioh; 2863 int b, i; 2864 2865 b = NVRCS | NVRDO; /* Write the start bit (== 1) */ 2866 2867 bus_space_write_1(iot, ioh, TUL_NVRAM, b); 2868 EEP_WAIT(); 2869 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK); 2870 EEP_WAIT(); 2871 2872 for (i = 8; i > 0; i--) { 2873 if (instr & (1 << (i - 1))) 2874 b = NVRCS | NVRDO; /* Write a 1 bit */ 2875 else 2876 b = NVRCS; /* Write a 0 bit */ 2877 2878 bus_space_write_1(iot, ioh, TUL_NVRAM, b); 2879 EEP_WAIT(); 2880 bus_space_write_1(iot, ioh, TUL_NVRAM, b | NVRCK); 2881 EEP_WAIT(); 2882 } 2883 2884 bus_space_write_1(iot, ioh, TUL_NVRAM, NVRCS); 2885 } 2886