1 /* $OpenBSD: siop_common.c,v 1.35 2012/02/24 06:19:00 guenther Exp $ */ 2 /* $NetBSD: siop_common.c,v 1.37 2005/02/27 00:27:02 perry Exp $ */ 3 4 /* 5 * Copyright (c) 2000, 2002 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/device.h> 34 #include <sys/malloc.h> 35 #include <sys/buf.h> 36 #include <sys/kernel.h> 37 #include <sys/scsiio.h> 38 39 #include <machine/endian.h> 40 #include <machine/bus.h> 41 42 #include <scsi/scsi_all.h> 43 #include <scsi/scsi_message.h> 44 #include <scsi/scsiconf.h> 45 46 #define SIOP_NEEDS_PERIOD_TABLES 47 #include <dev/ic/siopreg.h> 48 #include <dev/ic/siopvar_common.h> 49 #include <dev/ic/siopvar.h> 50 51 #undef DEBUG 52 #undef DEBUG_DR 53 #undef DEBUG_NEG 54 55 int 56 siop_common_attach(sc) 57 struct siop_common_softc *sc; 58 { 59 int error, i; 60 bus_dma_segment_t seg; 61 int rseg; 62 63 /* 64 * Allocate DMA-safe memory for the script and map it. 65 */ 66 if ((sc->features & SF_CHIP_RAM) == 0) { 67 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 68 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 69 if (error) { 70 printf("%s: unable to allocate script DMA memory, " 71 "error = %d\n", sc->sc_dev.dv_xname, error); 72 return error; 73 } 74 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 75 (caddr_t *)&sc->sc_script, 76 BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 77 if (error) { 78 printf("%s: unable to map script DMA memory, " 79 "error = %d\n", sc->sc_dev.dv_xname, error); 80 return error; 81 } 82 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, 83 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma); 84 if (error) { 85 printf("%s: unable to create script DMA map, " 86 "error = %d\n", sc->sc_dev.dv_xname, error); 87 return error; 88 } 89 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma, 90 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 91 if (error) { 92 printf("%s: unable to load script DMA map, " 93 "error = %d\n", sc->sc_dev.dv_xname, error); 94 return error; 95 } 96 sc->sc_scriptaddr = 97 sc->sc_scriptdma->dm_segs[0].ds_addr; 98 sc->ram_size = PAGE_SIZE; 99 } 100 101 /* 102 * sc->sc_link is the template for all device sc_link's 103 * for devices attached to this adapter. It is passed to 104 * the upper layers in config_found(). 105 */ 106 sc->sc_link.adapter_softc = sc; 107 sc->sc_link.adapter_buswidth = 108 (sc->features & SF_BUS_WIDE) ? 16 : 8; 109 sc->sc_link.adapter_target = 110 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID); 111 if (sc->sc_link.adapter_target == 0 || 112 sc->sc_link.adapter_target >= 113 sc->sc_link.adapter_buswidth) 114 sc->sc_link.adapter_target = SIOP_DEFAULT_TARGET; 115 116 for (i = 0; i < 16; i++) 117 sc->targets[i] = NULL; 118 119 /* find min/max sync period for this chip */ 120 sc->st_maxsync = 0; 121 sc->dt_maxsync = 0; 122 sc->st_minsync = 255; 123 sc->dt_minsync = 255; 124 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) { 125 if (sc->clock_period != scf_period[i].clock) 126 continue; 127 if (sc->st_maxsync < scf_period[i].period) 128 sc->st_maxsync = scf_period[i].period; 129 if (sc->st_minsync > scf_period[i].period) 130 sc->st_minsync = scf_period[i].period; 131 } 132 if (sc->st_maxsync == 255 || sc->st_minsync == 0) 133 panic("siop: can't find my sync parameters"); 134 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) { 135 if (sc->clock_period != dt_scf_period[i].clock) 136 continue; 137 if (sc->dt_maxsync < dt_scf_period[i].period) 138 sc->dt_maxsync = dt_scf_period[i].period; 139 if (sc->dt_minsync > dt_scf_period[i].period) 140 sc->dt_minsync = dt_scf_period[i].period; 141 } 142 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0) 143 panic("siop: can't find my sync parameters"); 144 return 0; 145 } 146 147 void 148 siop_common_reset(sc) 149 struct siop_common_softc *sc; 150 { 151 u_int32_t stest3; 152 153 /* reset the chip */ 154 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST); 155 delay(1000); 156 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0); 157 158 /* init registers */ 159 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0, 160 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP); 161 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0); 162 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div); 163 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0); 164 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff); 165 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0, 166 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL)); 167 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1, 168 0xff & ~(SIEN1_HTH | SIEN1_GEN)); 169 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0); 170 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE); 171 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0, 172 (0xb << STIME0_SEL_SHIFT)); 173 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID, 174 sc->sc_link.adapter_target | SCID_RRE); 175 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0, 176 1 << sc->sc_link.adapter_target); 177 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL, 178 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM); 179 if (sc->features & SF_CHIP_AAIP) 180 bus_space_write_1(sc->sc_rt, sc->sc_rh, 181 SIOP_AIPCNTL1, AIPCNTL1_DIS); 182 183 /* enable clock doubler or quadrupler if appropriate */ 184 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) { 185 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3); 186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 187 STEST1_DBLEN); 188 if (sc->features & SF_CHIP_QUAD) { 189 /* wait for PPL to lock */ 190 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, 191 SIOP_STEST4) & STEST4_LOCK) == 0) 192 delay(10); 193 } else { 194 /* data sheet says 20us - more won't hurt */ 195 delay(100); 196 } 197 /* halt scsi clock, select doubler/quad, restart clock */ 198 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, 199 stest3 | STEST3_HSC); 200 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 201 STEST1_DBLEN | STEST1_DBLSEL); 202 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3); 203 } else { 204 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0); 205 } 206 if (sc->features & SF_CHIP_FIFO) 207 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5, 208 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) | 209 CTEST5_DFS); 210 if (sc->features & SF_CHIP_LED0) { 211 /* Set GPIO0 as output if software LED control is required */ 212 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL, 213 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe); 214 } 215 if (sc->features & SF_BUS_ULTRA3) { 216 /* reset SCNTL4 */ 217 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0); 218 } 219 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 220 STEST4_MODE_MASK; 221 222 /* 223 * initialise the RAM. Without this we may get scsi gross errors on 224 * the 1010 225 */ 226 if (sc->features & SF_CHIP_RAM) 227 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh, 228 0, 0, sc->ram_size / 4); 229 sc->sc_reset(sc); 230 } 231 232 /* prepare tables before sending a cmd */ 233 void 234 siop_setuptables(siop_cmd) 235 struct siop_common_cmd *siop_cmd; 236 { 237 int i; 238 struct siop_common_softc *sc = siop_cmd->siop_sc; 239 struct scsi_xfer *xs = siop_cmd->xs; 240 int target = xs->sc_link->target; 241 int lun = xs->sc_link->lun; 242 int msgoffset = 1; 243 int *targ_flags = &sc->targets[target]->flags; 244 int quirks; 245 246 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id); 247 memset(siop_cmd->siop_tables->msg_out, 0, 248 sizeof(siop_cmd->siop_tables->msg_out)); 249 /* request sense doesn't disconnect */ 250 if (siop_cmd->status == CMDST_SENSE) 251 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 252 else if ((sc->features & SF_CHIP_GEBUG) && 253 (sc->targets[target]->flags & TARF_ISWIDE) == 0) 254 /* 255 * 1010 bug: it seems that the 1010 has problems with reselect 256 * when not in wide mode (generate false SCSI gross error). 257 * The FreeBSD sym driver has comments about it but their 258 * workaround (disable SCSI gross error reporting) doesn't 259 * work with my adapter. So disable disconnect when not 260 * wide. 261 */ 262 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 263 else 264 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1); 265 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset); 266 if (sc->targets[target]->status == TARST_ASYNC) { 267 *targ_flags &= TARF_DT; /* Save TARF_DT 'cuz we don't set it here */ 268 quirks = xs->sc_link->quirks; 269 270 if ((quirks & SDEV_NOTAGS) == 0) 271 *targ_flags |= TARF_TAG; 272 if (((quirks & SDEV_NOWIDE) == 0) && 273 (sc->features & SF_BUS_WIDE)) 274 *targ_flags |= TARF_WIDE; 275 if ((quirks & SDEV_NOSYNC) == 0) 276 *targ_flags |= TARF_SYNC; 277 278 if ((sc->features & SF_CHIP_GEBUG) && 279 (*targ_flags & TARF_WIDE) == 0) 280 /* 281 * 1010 workaround: can't do disconnect if not wide, 282 * so can't do tag 283 */ 284 *targ_flags &= ~TARF_TAG; 285 286 /* Safe to call siop_add_dev() multiple times */ 287 siop_add_dev((struct siop_softc *)sc, target, lun); 288 289 if ((*targ_flags & TARF_DT) && 290 (sc->mode == STEST4_MODE_LVD)) { 291 sc->targets[target]->status = TARST_PPR_NEG; 292 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync, 293 sc->maxoff); 294 } else if (*targ_flags & TARF_WIDE) { 295 sc->targets[target]->status = TARST_WIDE_NEG; 296 siop_wdtr_msg(siop_cmd, msgoffset, 297 MSG_EXT_WDTR_BUS_16_BIT); 298 } else if (*targ_flags & TARF_SYNC) { 299 sc->targets[target]->status = TARST_SYNC_NEG; 300 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync, 301 (sc->maxoff > 31) ? 31 : sc->maxoff); 302 } else { 303 sc->targets[target]->status = TARST_OK; 304 siop_update_xfer_mode(sc, target); 305 } 306 } else if (sc->targets[target]->status == TARST_OK && 307 (*targ_flags & TARF_TAG) && 308 siop_cmd->status != CMDST_SENSE) { 309 siop_cmd->flags |= CMDFL_TAG; 310 } 311 siop_cmd->siop_tables->status = 312 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */ 313 314 if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) || 315 siop_cmd->status == CMDST_SENSE) { 316 bzero(siop_cmd->siop_tables->data, 317 sizeof(siop_cmd->siop_tables->data)); 318 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) { 319 siop_cmd->siop_tables->data[i].count = 320 siop_htoc32(sc, 321 siop_cmd->dmamap_data->dm_segs[i].ds_len); 322 siop_cmd->siop_tables->data[i].addr = 323 siop_htoc32(sc, 324 siop_cmd->dmamap_data->dm_segs[i].ds_addr); 325 } 326 } 327 } 328 329 int 330 siop_wdtr_neg(siop_cmd) 331 struct siop_common_cmd *siop_cmd; 332 { 333 struct siop_common_softc *sc = siop_cmd->siop_sc; 334 struct siop_common_target *siop_target = siop_cmd->siop_target; 335 int target = siop_cmd->xs->sc_link->target; 336 struct siop_common_xfer *tables = siop_cmd->siop_tables; 337 338 if (siop_target->status == TARST_WIDE_NEG) { 339 /* we initiated wide negotiation */ 340 switch (tables->msg_in[3]) { 341 case MSG_EXT_WDTR_BUS_8_BIT: 342 siop_target->flags &= ~TARF_ISWIDE; 343 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 344 break; 345 case MSG_EXT_WDTR_BUS_16_BIT: 346 if (siop_target->flags & TARF_WIDE) { 347 siop_target->flags |= TARF_ISWIDE; 348 sc->targets[target]->id |= (SCNTL3_EWS << 24); 349 break; 350 } 351 /* FALLTHROUGH */ 352 default: 353 /* 354 * hum, we got more than what we can handle, shouldn't 355 * happen. Reject, and stay async 356 */ 357 siop_target->flags &= ~TARF_ISWIDE; 358 siop_target->status = TARST_OK; 359 siop_target->offset = siop_target->period = 0; 360 siop_update_xfer_mode(sc, target); 361 printf("%s: rejecting invalid wide negotiation from " 362 "target %d (%d)\n", sc->sc_dev.dv_xname, target, 363 tables->msg_in[3]); 364 tables->t_msgout.count = siop_htoc32(sc, 1); 365 tables->msg_out[0] = MSG_MESSAGE_REJECT; 366 return SIOP_NEG_MSGOUT; 367 } 368 tables->id = siop_htoc32(sc, sc->targets[target]->id); 369 bus_space_write_1(sc->sc_rt, sc->sc_rh, 370 SIOP_SCNTL3, 371 (sc->targets[target]->id >> 24) & 0xff); 372 /* we now need to do sync */ 373 if (siop_target->flags & TARF_SYNC) { 374 siop_target->status = TARST_SYNC_NEG; 375 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync, 376 (sc->maxoff > 31) ? 31 : sc->maxoff); 377 return SIOP_NEG_MSGOUT; 378 } else { 379 siop_target->status = TARST_OK; 380 siop_update_xfer_mode(sc, target); 381 return SIOP_NEG_ACK; 382 } 383 } else { 384 /* target initiated wide negotiation */ 385 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT 386 && (siop_target->flags & TARF_WIDE)) { 387 siop_target->flags |= TARF_ISWIDE; 388 sc->targets[target]->id |= SCNTL3_EWS << 24; 389 } else { 390 siop_target->flags &= ~TARF_ISWIDE; 391 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 392 } 393 tables->id = siop_htoc32(sc, sc->targets[target]->id); 394 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 395 (sc->targets[target]->id >> 24) & 0xff); 396 /* 397 * we did reset wide parameters, so fall back to async, 398 * but don't schedule a sync neg, target should initiate it 399 */ 400 siop_target->status = TARST_OK; 401 siop_target->offset = siop_target->period = 0; 402 siop_update_xfer_mode(sc, target); 403 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ? 404 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT); 405 return SIOP_NEG_MSGOUT; 406 } 407 } 408 409 int 410 siop_ppr_neg(siop_cmd) 411 struct siop_common_cmd *siop_cmd; 412 { 413 struct siop_common_softc *sc = siop_cmd->siop_sc; 414 struct siop_common_target *siop_target = siop_cmd->siop_target; 415 int target = siop_cmd->xs->sc_link->target; 416 struct siop_common_xfer *tables = siop_cmd->siop_tables; 417 int sync, offset, options, scf = 0; 418 int i; 419 420 #ifdef DEBUG_NEG 421 printf("%s: answer on ppr negotiation:", sc->sc_dev.dv_xname); 422 for (i = 0; i < 8; i++) 423 printf(" 0x%x", tables->msg_in[i]); 424 printf("\n"); 425 #endif 426 427 if (siop_target->status == TARST_PPR_NEG) { 428 /* we initiated PPR negotiation */ 429 sync = tables->msg_in[3]; 430 offset = tables->msg_in[5]; 431 options = tables->msg_in[7]; 432 if (options != MSG_EXT_PPR_PROT_DT) { 433 /* should't happen */ 434 printf("%s: ppr negotiation for target %d: " 435 "no DT option\n", sc->sc_dev.dv_xname, target); 436 siop_target->status = TARST_ASYNC; 437 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 438 siop_target->offset = 0; 439 siop_target->period = 0; 440 goto reject; 441 } 442 443 if (offset > sc->maxoff || sync < sc->dt_minsync || 444 sync > sc->dt_maxsync) { 445 printf("%s: ppr negotiation for target %d: " 446 "offset (%d) or sync (%d) out of range\n", 447 sc->sc_dev.dv_xname, target, offset, sync); 448 /* should not happen */ 449 siop_target->status = TARST_ASYNC; 450 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 451 siop_target->offset = 0; 452 siop_target->period = 0; 453 goto reject; 454 } else { 455 for (i = 0; i < 456 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); 457 i++) { 458 if (sc->clock_period != dt_scf_period[i].clock) 459 continue; 460 if (dt_scf_period[i].period == sync) { 461 /* ok, found it. we now are sync. */ 462 siop_target->offset = offset; 463 siop_target->period = sync; 464 scf = dt_scf_period[i].scf; 465 siop_target->flags |= TARF_ISDT; 466 } 467 } 468 if ((siop_target->flags & TARF_ISDT) == 0) { 469 printf("%s: ppr negotiation for target %d: " 470 "sync (%d) incompatible with adapter\n", 471 sc->sc_dev.dv_xname, target, sync); 472 /* 473 * we didn't find it in our table, do async 474 * send reject msg, start SDTR/WDTR neg 475 */ 476 siop_target->status = TARST_ASYNC; 477 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 478 siop_target->offset = 0; 479 siop_target->period = 0; 480 goto reject; 481 } 482 } 483 if (tables->msg_in[6] != 1) { 484 printf("%s: ppr negotiation for target %d: " 485 "transfer width (%d) incompatible with dt\n", 486 sc->sc_dev.dv_xname, target, tables->msg_in[6]); 487 /* DT mode can only be done with wide transfers */ 488 siop_target->status = TARST_ASYNC; 489 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 490 siop_target->offset = 0; 491 siop_target->period = 0; 492 goto reject; 493 } 494 siop_target->flags |= TARF_ISWIDE; 495 sc->targets[target]->id |= (SCNTL3_EWS << 24); 496 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 497 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT); 498 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 499 sc->targets[target]->id |= 500 (siop_target->offset & SXFER_MO_MASK) << 8; 501 sc->targets[target]->id &= ~0xff; 502 sc->targets[target]->id |= SCNTL4_U3EN; 503 siop_target->status = TARST_OK; 504 siop_update_xfer_mode(sc, target); 505 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 506 (sc->targets[target]->id >> 24) & 0xff); 507 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 508 (sc->targets[target]->id >> 8) & 0xff); 509 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 510 sc->targets[target]->id & 0xff); 511 return SIOP_NEG_ACK; 512 } else { 513 /* target initiated PPR negotiation, shouldn't happen */ 514 printf("%s: rejecting invalid PPR negotiation from " 515 "target %d\n", sc->sc_dev.dv_xname, target); 516 reject: 517 tables->t_msgout.count = siop_htoc32(sc, 1); 518 tables->msg_out[0] = MSG_MESSAGE_REJECT; 519 return SIOP_NEG_MSGOUT; 520 } 521 } 522 523 int 524 siop_sdtr_neg(siop_cmd) 525 struct siop_common_cmd *siop_cmd; 526 { 527 struct siop_common_softc *sc = siop_cmd->siop_sc; 528 struct siop_common_target *siop_target = siop_cmd->siop_target; 529 int target = siop_cmd->xs->sc_link->target; 530 int sync, maxoffset, offset, i; 531 int send_msgout = 0; 532 struct siop_common_xfer *tables = siop_cmd->siop_tables; 533 534 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */ 535 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff; 536 537 sync = tables->msg_in[3]; 538 offset = tables->msg_in[4]; 539 540 if (siop_target->status == TARST_SYNC_NEG) { 541 /* we initiated sync negotiation */ 542 siop_target->status = TARST_OK; 543 #ifdef DEBUG 544 printf("sdtr: sync %d offset %d\n", sync, offset); 545 #endif 546 if (offset > maxoffset || sync < sc->st_minsync || 547 sync > sc->st_maxsync) 548 goto reject; 549 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 550 i++) { 551 if (sc->clock_period != scf_period[i].clock) 552 continue; 553 if (scf_period[i].period == sync) { 554 /* ok, found it. we now are sync. */ 555 siop_target->offset = offset; 556 siop_target->period = sync; 557 sc->targets[target]->id &= 558 ~(SCNTL3_SCF_MASK << 24); 559 sc->targets[target]->id |= scf_period[i].scf 560 << (24 + SCNTL3_SCF_SHIFT); 561 if (sync < 25 && /* Ultra */ 562 (sc->features & SF_BUS_ULTRA3) == 0) 563 sc->targets[target]->id |= 564 SCNTL3_ULTRA << 24; 565 else 566 sc->targets[target]->id &= 567 ~(SCNTL3_ULTRA << 24); 568 sc->targets[target]->id &= 569 ~(SXFER_MO_MASK << 8); 570 sc->targets[target]->id |= 571 (offset & SXFER_MO_MASK) << 8; 572 sc->targets[target]->id &= ~0xff; /* scntl4 */ 573 goto end; 574 } 575 } 576 /* 577 * we didn't find it in our table, do async and send reject 578 * msg 579 */ 580 reject: 581 send_msgout = 1; 582 tables->t_msgout.count = siop_htoc32(sc, 1); 583 tables->msg_out[0] = MSG_MESSAGE_REJECT; 584 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 585 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 586 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 587 sc->targets[target]->id &= ~0xff; /* scntl4 */ 588 siop_target->offset = siop_target->period = 0; 589 } else { /* target initiated sync neg */ 590 #ifdef DEBUG 591 printf("sdtr (target): sync %d offset %d\n", sync, offset); 592 #endif 593 if (offset == 0 || sync > sc->st_maxsync) { /* async */ 594 goto async; 595 } 596 if (offset > maxoffset) 597 offset = maxoffset; 598 if (sync < sc->st_minsync) 599 sync = sc->st_minsync; 600 /* look for sync period */ 601 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); 602 i++) { 603 if (sc->clock_period != scf_period[i].clock) 604 continue; 605 if (scf_period[i].period == sync) { 606 /* ok, found it. we now are sync. */ 607 siop_target->offset = offset; 608 siop_target->period = sync; 609 sc->targets[target]->id &= 610 ~(SCNTL3_SCF_MASK << 24); 611 sc->targets[target]->id |= scf_period[i].scf 612 << (24 + SCNTL3_SCF_SHIFT); 613 if (sync < 25 && /* Ultra */ 614 (sc->features & SF_BUS_ULTRA3) == 0) 615 sc->targets[target]->id |= 616 SCNTL3_ULTRA << 24; 617 else 618 sc->targets[target]->id &= 619 ~(SCNTL3_ULTRA << 24); 620 sc->targets[target]->id &= 621 ~(SXFER_MO_MASK << 8); 622 sc->targets[target]->id |= 623 (offset & SXFER_MO_MASK) << 8; 624 sc->targets[target]->id &= ~0xff; /* scntl4 */ 625 siop_sdtr_msg(siop_cmd, 0, sync, offset); 626 send_msgout = 1; 627 goto end; 628 } 629 } 630 async: 631 siop_target->offset = siop_target->period = 0; 632 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 633 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 634 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 635 sc->targets[target]->id &= ~0xff; /* scntl4 */ 636 siop_sdtr_msg(siop_cmd, 0, 0, 0); 637 send_msgout = 1; 638 } 639 end: 640 if (siop_target->status == TARST_OK) 641 siop_update_xfer_mode(sc, target); 642 #ifdef DEBUG 643 printf("id now 0x%x\n", sc->targets[target]->id); 644 #endif 645 tables->id = siop_htoc32(sc, sc->targets[target]->id); 646 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 647 (sc->targets[target]->id >> 24) & 0xff); 648 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 649 (sc->targets[target]->id >> 8) & 0xff); 650 if (send_msgout) { 651 return SIOP_NEG_MSGOUT; 652 } else { 653 return SIOP_NEG_ACK; 654 } 655 } 656 657 void 658 siop_sdtr_msg(siop_cmd, offset, ssync, soff) 659 struct siop_common_cmd *siop_cmd; 660 int offset; 661 int ssync, soff; 662 { 663 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 664 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN; 665 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR; 666 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 667 siop_cmd->siop_tables->msg_out[offset + 4] = soff; 668 siop_cmd->siop_tables->t_msgout.count = 669 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2); 670 } 671 672 void 673 siop_wdtr_msg(siop_cmd, offset, wide) 674 struct siop_common_cmd *siop_cmd; 675 int offset; 676 int wide; 677 { 678 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 679 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN; 680 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR; 681 siop_cmd->siop_tables->msg_out[offset + 3] = wide; 682 siop_cmd->siop_tables->t_msgout.count = 683 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2); 684 } 685 686 void 687 siop_ppr_msg(siop_cmd, offset, ssync, soff) 688 struct siop_common_cmd *siop_cmd; 689 int offset; 690 int ssync, soff; 691 { 692 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 693 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN; 694 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR; 695 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 696 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */ 697 siop_cmd->siop_tables->msg_out[offset + 5] = soff; 698 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */ 699 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_PROT_DT; 700 siop_cmd->siop_tables->t_msgout.count = 701 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2); 702 } 703 704 void 705 siop_minphys(struct buf *bp, struct scsi_link *sl) 706 { 707 if (bp->b_bcount > SIOP_MAXFER) 708 bp->b_bcount = SIOP_MAXFER; 709 710 minphys(bp); 711 } 712 713 void 714 siop_ma(siop_cmd) 715 struct siop_common_cmd *siop_cmd; 716 { 717 int offset, dbc, sstat; 718 struct siop_common_softc *sc = siop_cmd->siop_sc; 719 scr_table_t *table; /* table with partial xfer */ 720 721 /* 722 * compute how much of the current table didn't get handled when 723 * a phase mismatch occurs 724 */ 725 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN)) 726 == 0) 727 return; /* no valid data transfer */ 728 729 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 730 if (offset >= SIOP_NSG) { 731 printf("%s: bad offset in siop_sdp (%d)\n", 732 sc->sc_dev.dv_xname, offset); 733 return; 734 } 735 table = &siop_cmd->siop_tables->data[offset]; 736 #ifdef DEBUG_DR 737 printf("siop_ma: offset %d count=%d addr=0x%x ", offset, 738 table->count, table->addr); 739 #endif 740 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff; 741 if (siop_cmd->xs->flags & SCSI_DATA_OUT) { 742 if (sc->features & SF_CHIP_DFBC) { 743 dbc += 744 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC); 745 } else { 746 /* need to account stale data in FIFO */ 747 int dfifo = 748 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO); 749 if (sc->features & SF_CHIP_FIFO) { 750 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh, 751 SIOP_CTEST5) & CTEST5_BOMASK) << 8; 752 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff; 753 } else { 754 dbc += (dfifo - (dbc & 0x7f)) & 0x7f; 755 } 756 } 757 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0); 758 if (sstat & SSTAT0_OLF) 759 dbc++; 760 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0) 761 dbc++; 762 if (siop_cmd->siop_target->flags & TARF_ISWIDE) { 763 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, 764 SIOP_SSTAT2); 765 if (sstat & SSTAT2_OLF1) 766 dbc++; 767 if ((sstat & SSTAT2_ORF1) && 768 (sc->features & SF_CHIP_DFBC) == 0) 769 dbc++; 770 } 771 /* clear the FIFO */ 772 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 773 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) | 774 CTEST3_CLF); 775 } 776 siop_cmd->flags |= CMDFL_RESID; 777 siop_cmd->resid = dbc; 778 } 779 780 void 781 siop_sdp(siop_cmd, offset) 782 struct siop_common_cmd *siop_cmd; 783 int offset; 784 { 785 struct siop_common_softc *sc = siop_cmd->siop_sc; 786 scr_table_t *table; 787 788 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN)) 789 == 0) 790 return; /* no data pointers to save */ 791 792 /* 793 * offset == SIOP_NSG may be a valid condition if we get a Save data 794 * pointer when the xfer is done. Just ignore the Save data pointer 795 * in this case 796 */ 797 if (offset == SIOP_NSG) 798 return; 799 #ifdef DIAGNOSTIC 800 if (offset > SIOP_NSG) { 801 sc_print_addr(siop_cmd->xs->sc_link); 802 printf("offset %d > %d\n", offset, SIOP_NSG); 803 panic("siop_sdp: offset"); 804 } 805 #endif 806 /* 807 * Save data pointer. We do this by adjusting the tables to point 808 * at the beginning of the data not yet transferred. 809 * offset points to the first table with untransferred data. 810 */ 811 812 /* 813 * before doing that we decrease resid from the amount of data which 814 * has been transferred. 815 */ 816 siop_update_resid(siop_cmd, offset); 817 818 /* 819 * First let see if we have a resid from a phase mismatch. If so, 820 * we have to adjst the table at offset to remove transferred data. 821 */ 822 if (siop_cmd->flags & CMDFL_RESID) { 823 siop_cmd->flags &= ~CMDFL_RESID; 824 table = &siop_cmd->siop_tables->data[offset]; 825 /* "cut" already transferred data from this table */ 826 table->addr = 827 siop_htoc32(sc, siop_ctoh32(sc, table->addr) + 828 siop_ctoh32(sc, table->count) - siop_cmd->resid); 829 table->count = siop_htoc32(sc, siop_cmd->resid); 830 } 831 832 /* 833 * now we can remove entries which have been transferred. 834 * We just move the entries with data left at the beginning of the 835 * tables 836 */ 837 bcopy(&siop_cmd->siop_tables->data[offset], 838 &siop_cmd->siop_tables->data[0], 839 (SIOP_NSG - offset) * sizeof(scr_table_t)); 840 } 841 842 void 843 siop_update_resid(siop_cmd, offset) 844 struct siop_common_cmd *siop_cmd; 845 int offset; 846 { 847 struct siop_common_softc *sc = siop_cmd->siop_sc; 848 scr_table_t *table; 849 int i; 850 851 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN)) 852 == 0) 853 return; /* no data to transfer */ 854 855 /* 856 * update resid. First account for the table entries which have 857 * been fully completed. 858 */ 859 for (i = 0; i < offset; i++) 860 siop_cmd->xs->resid -= 861 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count); 862 /* 863 * if CMDFL_RESID is set, the last table (pointed by offset) is a 864 * partial transfers. If not, offset points to the entry folloing 865 * the last full transfer. 866 */ 867 if (siop_cmd->flags & CMDFL_RESID) { 868 table = &siop_cmd->siop_tables->data[offset]; 869 siop_cmd->xs->resid -= 870 siop_ctoh32(sc, table->count) - siop_cmd->resid; 871 } 872 } 873 874 int 875 siop_iwr(siop_cmd) 876 struct siop_common_cmd *siop_cmd; 877 { 878 int offset; 879 scr_table_t *table; /* table with IWR */ 880 struct siop_common_softc *sc = siop_cmd->siop_sc; 881 /* handle ignore wide residue messages */ 882 883 /* if target isn't wide, reject */ 884 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) { 885 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1); 886 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT; 887 return SIOP_NEG_MSGOUT; 888 } 889 /* get index of current command in table */ 890 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 891 /* 892 * if the current table did complete, we're now pointing at the 893 * next one. Go back one if we didn't see a phase mismatch. 894 */ 895 if ((siop_cmd->flags & CMDFL_RESID) == 0) 896 offset--; 897 table = &siop_cmd->siop_tables->data[offset]; 898 899 if ((siop_cmd->flags & CMDFL_RESID) == 0) { 900 if (siop_ctoh32(sc, table->count) & 1) { 901 /* we really got the number of bytes we expected */ 902 return SIOP_NEG_ACK; 903 } else { 904 /* 905 * now we really had a short xfer, by one byte. 906 * handle it just as if we had a phase mistmatch 907 * (there is a resid of one for this table). 908 * Update scratcha1 to reflect the fact that 909 * this xfer isn't complete. 910 */ 911 siop_cmd->flags |= CMDFL_RESID; 912 siop_cmd->resid = 1; 913 bus_space_write_1(sc->sc_rt, sc->sc_rh, 914 SIOP_SCRATCHA + 1, offset); 915 return SIOP_NEG_ACK; 916 } 917 } else { 918 /* 919 * we already have a short xfer for this table; it's 920 * just one byte less than we though it was 921 */ 922 siop_cmd->resid--; 923 return SIOP_NEG_ACK; 924 } 925 } 926 927 void 928 siop_clearfifo(sc) 929 struct siop_common_softc *sc; 930 { 931 int timeout = 0; 932 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3); 933 934 #ifdef DEBUG_INTR 935 printf("DMA fifo not empty !\n"); 936 #endif 937 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 938 ctest3 | CTEST3_CLF); 939 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) & 940 CTEST3_CLF) != 0) { 941 delay(1); 942 if (++timeout > 1000) { 943 printf("clear fifo failed\n"); 944 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 945 bus_space_read_1(sc->sc_rt, sc->sc_rh, 946 SIOP_CTEST3) & ~CTEST3_CLF); 947 return; 948 } 949 } 950 } 951 952 int 953 siop_modechange(sc) 954 struct siop_common_softc *sc; 955 { 956 int retry; 957 int sist0, sist1, stest2; 958 for (retry = 0; retry < 5; retry++) { 959 /* 960 * datasheet says to wait 100ms and re-read SIST1, 961 * to check that DIFFSENSE is stable. 962 * We may delay() 5 times for 100ms at interrupt time; 963 * hopefully this will not happen often. 964 */ 965 delay(100000); 966 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0); 967 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1); 968 if (sist1 & SIEN1_SBMC) 969 continue; /* we got an irq again */ 970 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 971 STEST4_MODE_MASK; 972 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2); 973 switch(sc->mode) { 974 case STEST4_MODE_DIF: 975 printf("%s: switching to differential mode\n", 976 sc->sc_dev.dv_xname); 977 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 978 stest2 | STEST2_DIF); 979 break; 980 case STEST4_MODE_SE: 981 printf("%s: switching to single-ended mode\n", 982 sc->sc_dev.dv_xname); 983 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 984 stest2 & ~STEST2_DIF); 985 break; 986 case STEST4_MODE_LVD: 987 printf("%s: switching to LVD mode\n", 988 sc->sc_dev.dv_xname); 989 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 990 stest2 & ~STEST2_DIF); 991 break; 992 default: 993 printf("%s: invalid SCSI mode 0x%x\n", 994 sc->sc_dev.dv_xname, sc->mode); 995 return 0; 996 } 997 return 1; 998 } 999 printf("%s: timeout waiting for DIFFSENSE to stabilise\n", 1000 sc->sc_dev.dv_xname); 1001 return 0; 1002 } 1003 1004 void 1005 siop_resetbus(sc) 1006 struct siop_common_softc *sc; 1007 { 1008 int scntl1; 1009 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1); 1010 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 1011 scntl1 | SCNTL1_RST); 1012 /* minimum 25 us, more time won't hurt */ 1013 delay(100); 1014 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1); 1015 } 1016 1017 void 1018 siop_update_xfer_mode(sc, target) 1019 struct siop_common_softc *sc; 1020 int target; 1021 { 1022 struct siop_common_target *siop_target; 1023 1024 siop_target = sc->targets[target]; 1025 1026 printf("%s: target %d now using %s%s%d bit ", 1027 sc->sc_dev.dv_xname, target, 1028 (siop_target->flags & TARF_TAG) ? "tagged " : "", 1029 (siop_target->flags & TARF_ISDT) ? "DT " : "", 1030 (siop_target->flags & TARF_ISWIDE) ? 16 : 8); 1031 1032 if (siop_target->offset == 0) 1033 printf("async "); 1034 else { 1035 switch (siop_target->period) { 1036 case 9: /* 12.5ns cycle */ 1037 printf("80.0"); 1038 break; 1039 case 10: /* 25 ns cycle */ 1040 printf("40.0"); 1041 break; 1042 case 12: /* 48 ns cycle */ 1043 printf("20.0"); 1044 break; 1045 case 18: /* 72 ns cycle */ 1046 printf("13.3"); 1047 break; 1048 case 25: /* 100 ns cycle */ 1049 printf("10.0"); 1050 break; 1051 case 37: /* 118 ns cycle */ 1052 printf("6.67"); 1053 break; 1054 case 50: /* 200 ns cycle */ 1055 printf("5.0"); 1056 break; 1057 case 75: /* 300 ns cycle */ 1058 printf("3.33"); 1059 break; 1060 default: 1061 printf("??"); 1062 break; 1063 } 1064 printf(" MHz %d REQ/ACK offset ", siop_target->offset); 1065 } 1066 1067 printf("xfers\n"); 1068 1069 if ((sc->features & SF_CHIP_GEBUG) && 1070 (siop_target->flags & TARF_ISWIDE) == 0) 1071 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */ 1072 siop_target->flags &= ~TARF_TAG; 1073 } 1074