1 /* $NetBSD: siop_common.c,v 1.53 2010/11/13 13:52:02 uebayasi Exp $ */ 2 3 /* 4 * Copyright (c) 2000, 2002 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 */ 27 28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.53 2010/11/13 13:52:02 uebayasi Exp $"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/device.h> 36 #include <sys/malloc.h> 37 #include <sys/buf.h> 38 #include <sys/kernel.h> 39 #include <sys/scsiio.h> 40 41 #include <machine/endian.h> 42 #include <sys/bus.h> 43 44 #include <dev/scsipi/scsi_all.h> 45 #include <dev/scsipi/scsi_message.h> 46 #include <dev/scsipi/scsipi_all.h> 47 48 #include <dev/scsipi/scsiconf.h> 49 50 #include <dev/ic/siopreg.h> 51 #include <dev/ic/siopvar_common.h> 52 53 #include "opt_siop.h" 54 55 #undef DEBUG 56 #undef DEBUG_DR 57 #undef DEBUG_NEG 58 59 int 60 siop_common_attach(struct siop_common_softc *sc) 61 { 62 int error, i; 63 bus_dma_segment_t seg; 64 int rseg; 65 66 /* 67 * Allocate DMA-safe memory for the script and map it. 68 */ 69 if ((sc->features & SF_CHIP_RAM) == 0) { 70 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 71 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 72 if (error) { 73 aprint_error_dev(sc->sc_dev, 74 "unable to allocate script DMA memory, " 75 "error = %d\n", error); 76 return error; 77 } 78 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE, 79 (void **)&sc->sc_script, 80 BUS_DMA_NOWAIT|BUS_DMA_COHERENT); 81 if (error) { 82 aprint_error_dev(sc->sc_dev, 83 "unable to map script DMA memory, " 84 "error = %d\n", error); 85 return error; 86 } 87 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, 88 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma); 89 if (error) { 90 aprint_error_dev(sc->sc_dev, 91 "unable to create script DMA map, " 92 "error = %d\n", error); 93 return error; 94 } 95 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma, 96 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT); 97 if (error) { 98 aprint_error_dev(sc->sc_dev, 99 "unable to load script DMA map, " 100 "error = %d\n", error); 101 return error; 102 } 103 sc->sc_scriptaddr = 104 sc->sc_scriptdma->dm_segs[0].ds_addr; 105 sc->ram_size = PAGE_SIZE; 106 } 107 108 sc->sc_adapt.adapt_dev = sc->sc_dev; 109 sc->sc_adapt.adapt_nchannels = 1; 110 sc->sc_adapt.adapt_openings = 0; 111 sc->sc_adapt.adapt_ioctl = siop_ioctl; 112 sc->sc_adapt.adapt_minphys = minphys; 113 114 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan)); 115 sc->sc_chan.chan_adapter = &sc->sc_adapt; 116 sc->sc_chan.chan_bustype = &scsi_bustype; 117 sc->sc_chan.chan_channel = 0; 118 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW; 119 sc->sc_chan.chan_ntargets = 120 (sc->features & SF_BUS_WIDE) ? 16 : 8; 121 sc->sc_chan.chan_nluns = 8; 122 sc->sc_chan.chan_id = 123 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID); 124 if (sc->sc_chan.chan_id == 0 || 125 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets) 126 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET; 127 128 for (i = 0; i < 16; i++) 129 sc->targets[i] = NULL; 130 131 /* find min/max sync period for this chip */ 132 sc->st_maxsync = 0; 133 sc->dt_maxsync = 0; 134 sc->st_minsync = 255; 135 sc->dt_minsync = 255; 136 for (i = 0; i < __arraycount(scf_period); i++) { 137 if (sc->clock_period != scf_period[i].clock) 138 continue; 139 if (sc->st_maxsync < scf_period[i].period) 140 sc->st_maxsync = scf_period[i].period; 141 if (sc->st_minsync > scf_period[i].period) 142 sc->st_minsync = scf_period[i].period; 143 } 144 if (sc->st_maxsync == 255 || sc->st_minsync == 0) 145 panic("siop: can't find my sync parameters"); 146 for (i = 0; i < __arraycount(dt_scf_period); i++) { 147 if (sc->clock_period != dt_scf_period[i].clock) 148 continue; 149 if (sc->dt_maxsync < dt_scf_period[i].period) 150 sc->dt_maxsync = dt_scf_period[i].period; 151 if (sc->dt_minsync > dt_scf_period[i].period) 152 sc->dt_minsync = dt_scf_period[i].period; 153 } 154 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0) 155 panic("siop: can't find my sync parameters"); 156 return 0; 157 } 158 159 void 160 siop_common_reset(struct siop_common_softc *sc) 161 { 162 u_int32_t stest1, stest3; 163 164 /* reset the chip */ 165 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST); 166 delay(1000); 167 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0); 168 169 /* init registers */ 170 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0, 171 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP); 172 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0); 173 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div); 174 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0); 175 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff); 176 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0, 177 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL)); 178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1, 179 0xff & ~(SIEN1_HTH | SIEN1_GEN)); 180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0); 181 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE); 182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0, 183 (0xb << STIME0_SEL_SHIFT)); 184 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID, 185 sc->sc_chan.chan_id | SCID_RRE); 186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0, 187 1 << sc->sc_chan.chan_id); 188 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL, 189 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM); 190 if (sc->features & SF_CHIP_AAIP) 191 bus_space_write_1(sc->sc_rt, sc->sc_rh, 192 SIOP_AIPCNTL1, AIPCNTL1_DIS); 193 194 /* enable clock doubler or quadruler if appropriate */ 195 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) { 196 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3); 197 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 198 STEST1_DBLEN); 199 if (sc->features & SF_CHIP_QUAD) { 200 /* wait for PPL to lock */ 201 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, 202 SIOP_STEST4) & STEST4_LOCK) == 0) 203 delay(10); 204 } else { 205 /* data sheet says 20us - more won't hurt */ 206 delay(100); 207 } 208 /* halt scsi clock, select doubler/quad, restart clock */ 209 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, 210 stest3 | STEST3_HSC); 211 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 212 STEST1_DBLEN | STEST1_DBLSEL); 213 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3); 214 } else { 215 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0); 216 } 217 218 if (sc->features & SF_CHIP_USEPCIC) { 219 stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1); 220 stest1 |= STEST1_SCLK; 221 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1); 222 } 223 224 if (sc->features & SF_CHIP_FIFO) 225 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5, 226 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) | 227 CTEST5_DFS); 228 if (sc->features & SF_CHIP_LED0) { 229 /* Set GPIO0 as output if software LED control is required */ 230 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL, 231 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe); 232 } 233 if (sc->features & SF_BUS_ULTRA3) { 234 /* reset SCNTL4 */ 235 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0); 236 } 237 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 238 STEST4_MODE_MASK; 239 240 /* 241 * initialise the RAM. Without this we may get scsi gross errors on 242 * the 1010 243 */ 244 if (sc->features & SF_CHIP_RAM) 245 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh, 246 0, 0, sc->ram_size / 4); 247 sc->sc_reset(sc); 248 } 249 250 /* prepare tables before sending a cmd */ 251 void 252 siop_setuptables(struct siop_common_cmd *siop_cmd) 253 { 254 int i; 255 struct siop_common_softc *sc = siop_cmd->siop_sc; 256 struct scsipi_xfer *xs = siop_cmd->xs; 257 int target = xs->xs_periph->periph_target; 258 int lun = xs->xs_periph->periph_lun; 259 int msgoffset = 1; 260 261 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id); 262 memset(siop_cmd->siop_tables->msg_out, 0, 263 sizeof(siop_cmd->siop_tables->msg_out)); 264 /* request sense doesn't disconnect */ 265 if (xs->xs_control & XS_CTL_REQSENSE) 266 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 267 else if ((sc->features & SF_CHIP_GEBUG) && 268 (sc->targets[target]->flags & TARF_ISWIDE) == 0) 269 /* 270 * 1010 bug: it seems that the 1010 has problems with reselect 271 * when not in wide mode (generate false SCSI gross error). 272 * The FreeBSD sym driver has comments about it but their 273 * workaround (disable SCSI gross error reporting) doesn't 274 * work with my adapter. So disable disconnect when not 275 * wide. 276 */ 277 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0); 278 else 279 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1); 280 if (xs->xs_tag_type != 0) { 281 if ((sc->targets[target]->flags & TARF_TAG) == 0) { 282 scsipi_printaddr(xs->xs_periph); 283 printf(": tagged command type %d id %d\n", 284 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id); 285 panic("tagged command for non-tagging device"); 286 } 287 siop_cmd->flags |= CMDFL_TAG; 288 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type; 289 /* 290 * use siop_cmd->tag not xs->xs_tag_id, caller may want a 291 * different one 292 */ 293 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag; 294 msgoffset = 3; 295 } 296 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset); 297 if (sc->targets[target]->status == TARST_ASYNC) { 298 if ((sc->targets[target]->flags & TARF_DT) && 299 (sc->mode == STEST4_MODE_LVD)) { 300 sc->targets[target]->status = TARST_PPR_NEG; 301 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync, 302 sc->maxoff); 303 } else if (sc->targets[target]->flags & TARF_WIDE) { 304 sc->targets[target]->status = TARST_WIDE_NEG; 305 siop_wdtr_msg(siop_cmd, msgoffset, 306 MSG_EXT_WDTR_BUS_16_BIT); 307 } else if (sc->targets[target]->flags & TARF_SYNC) { 308 sc->targets[target]->status = TARST_SYNC_NEG; 309 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync, 310 (sc->maxoff > 31) ? 31 : sc->maxoff); 311 } else { 312 sc->targets[target]->status = TARST_OK; 313 siop_update_xfer_mode(sc, target); 314 } 315 } 316 siop_cmd->siop_tables->status = 317 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */ 318 319 siop_cmd->siop_tables->cmd.count = 320 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len); 321 siop_cmd->siop_tables->cmd.addr = 322 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr); 323 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) { 324 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) { 325 siop_cmd->siop_tables->data[i].count = 326 siop_htoc32(sc, 327 siop_cmd->dmamap_data->dm_segs[i].ds_len); 328 siop_cmd->siop_tables->data[i].addr = 329 siop_htoc32(sc, 330 siop_cmd->dmamap_data->dm_segs[i].ds_addr); 331 } 332 } 333 } 334 335 int 336 siop_wdtr_neg(struct siop_common_cmd *siop_cmd) 337 { 338 struct siop_common_softc *sc = siop_cmd->siop_sc; 339 struct siop_common_target *siop_target = siop_cmd->siop_target; 340 int target = siop_cmd->xs->xs_periph->periph_target; 341 struct siop_common_xfer *tables = siop_cmd->siop_tables; 342 343 if (siop_target->status == TARST_WIDE_NEG) { 344 /* we initiated wide negotiation */ 345 switch (tables->msg_in[3]) { 346 case MSG_EXT_WDTR_BUS_8_BIT: 347 siop_target->flags &= ~TARF_ISWIDE; 348 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 349 break; 350 case MSG_EXT_WDTR_BUS_16_BIT: 351 if (siop_target->flags & TARF_WIDE) { 352 siop_target->flags |= TARF_ISWIDE; 353 sc->targets[target]->id |= (SCNTL3_EWS << 24); 354 break; 355 } 356 /* FALLTHROUGH */ 357 default: 358 /* 359 * hum, we got more than what we can handle, shouldn't 360 * happen. Reject, and stay async 361 */ 362 siop_target->flags &= ~TARF_ISWIDE; 363 siop_target->status = TARST_OK; 364 siop_target->offset = siop_target->period = 0; 365 siop_update_xfer_mode(sc, target); 366 printf("%s: rejecting invalid wide negotiation from " 367 "target %d (%d)\n", device_xname(sc->sc_dev), 368 target, 369 tables->msg_in[3]); 370 tables->t_msgout.count = siop_htoc32(sc, 1); 371 tables->msg_out[0] = MSG_MESSAGE_REJECT; 372 return SIOP_NEG_MSGOUT; 373 } 374 tables->id = siop_htoc32(sc, sc->targets[target]->id); 375 bus_space_write_1(sc->sc_rt, sc->sc_rh, 376 SIOP_SCNTL3, 377 (sc->targets[target]->id >> 24) & 0xff); 378 /* we now need to do sync */ 379 if (siop_target->flags & TARF_SYNC) { 380 siop_target->status = TARST_SYNC_NEG; 381 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync, 382 (sc->maxoff > 31) ? 31 : sc->maxoff); 383 return SIOP_NEG_MSGOUT; 384 } else { 385 siop_target->status = TARST_OK; 386 siop_update_xfer_mode(sc, target); 387 return SIOP_NEG_ACK; 388 } 389 } else { 390 /* target initiated wide negotiation */ 391 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT 392 && (siop_target->flags & TARF_WIDE)) { 393 siop_target->flags |= TARF_ISWIDE; 394 sc->targets[target]->id |= SCNTL3_EWS << 24; 395 } else { 396 siop_target->flags &= ~TARF_ISWIDE; 397 sc->targets[target]->id &= ~(SCNTL3_EWS << 24); 398 } 399 tables->id = siop_htoc32(sc, sc->targets[target]->id); 400 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 401 (sc->targets[target]->id >> 24) & 0xff); 402 /* 403 * we did reset wide parameters, so fall back to async, 404 * but don't schedule a sync neg, target should initiate it 405 */ 406 siop_target->status = TARST_OK; 407 siop_target->offset = siop_target->period = 0; 408 siop_update_xfer_mode(sc, target); 409 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ? 410 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT); 411 return SIOP_NEG_MSGOUT; 412 } 413 } 414 415 int 416 siop_ppr_neg(struct siop_common_cmd *siop_cmd) 417 { 418 struct siop_common_softc *sc = siop_cmd->siop_sc; 419 struct siop_common_target *siop_target = siop_cmd->siop_target; 420 int target = siop_cmd->xs->xs_periph->periph_target; 421 struct siop_common_xfer *tables = siop_cmd->siop_tables; 422 int sync, offset, options, scf = 0; 423 int i; 424 425 #ifdef DEBUG_NEG 426 printf("%s: answer on ppr negotiation:", device_xname(sc->sc_dev)); 427 for (i = 0; i < 8; i++) 428 printf(" 0x%x", tables->msg_in[i]); 429 printf("\n"); 430 #endif 431 432 if (siop_target->status == TARST_PPR_NEG) { 433 /* we initiated PPR negotiation */ 434 sync = tables->msg_in[3]; 435 offset = tables->msg_in[5]; 436 options = tables->msg_in[7]; 437 if (options != MSG_EXT_PPR_DT) { 438 /* should't happen */ 439 printf("%s: ppr negotiation for target %d: " 440 "no DT option\n", device_xname(sc->sc_dev), target); 441 siop_target->status = TARST_ASYNC; 442 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 443 siop_target->offset = 0; 444 siop_target->period = 0; 445 goto reject; 446 } 447 448 if (offset > sc->maxoff || sync < sc->dt_minsync || 449 sync > sc->dt_maxsync) { 450 printf("%s: ppr negotiation for target %d: " 451 "offset (%d) or sync (%d) out of range\n", 452 device_xname(sc->sc_dev), target, offset, sync); 453 /* should not happen */ 454 siop_target->offset = 0; 455 siop_target->period = 0; 456 goto reject; 457 } else { 458 for (i = 0; i < __arraycount(dt_scf_period); i++) { 459 if (sc->clock_period != dt_scf_period[i].clock) 460 continue; 461 if (dt_scf_period[i].period == sync) { 462 /* ok, found it. we now are sync. */ 463 siop_target->offset = offset; 464 siop_target->period = sync; 465 scf = dt_scf_period[i].scf; 466 siop_target->flags |= TARF_ISDT; 467 } 468 } 469 if ((siop_target->flags & TARF_ISDT) == 0) { 470 printf("%s: ppr negotiation for target %d: " 471 "sync (%d) incompatible with adapter\n", 472 device_xname(sc->sc_dev), target, sync); 473 /* 474 * we didn't find it in our table, do async 475 * send reject msg, start SDTR/WDTR neg 476 */ 477 siop_target->status = TARST_ASYNC; 478 siop_target->flags &= ~(TARF_DT | TARF_ISDT); 479 siop_target->offset = 0; 480 siop_target->period = 0; 481 goto reject; 482 } 483 } 484 if (tables->msg_in[6] != 1) { 485 printf("%s: ppr negotiation for target %d: " 486 "transfer width (%d) incompatible with dt\n", 487 device_xname(sc->sc_dev), 488 target, tables->msg_in[6]); 489 /* DT mode can only be done with wide transfers */ 490 siop_target->status = TARST_ASYNC; 491 goto reject; 492 } 493 siop_target->flags |= TARF_ISWIDE; 494 sc->targets[target]->id |= (SCNTL3_EWS << 24); 495 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 496 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT); 497 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 498 sc->targets[target]->id |= 499 (siop_target->offset & SXFER_MO_MASK) << 8; 500 sc->targets[target]->id &= ~0xff; 501 sc->targets[target]->id |= SCNTL4_U3EN; 502 siop_target->status = TARST_OK; 503 siop_update_xfer_mode(sc, target); 504 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 505 (sc->targets[target]->id >> 24) & 0xff); 506 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 507 (sc->targets[target]->id >> 8) & 0xff); 508 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 509 sc->targets[target]->id & 0xff); 510 return SIOP_NEG_ACK; 511 } else { 512 /* target initiated PPR negotiation, shouldn't happen */ 513 printf("%s: rejecting invalid PPR negotiation from " 514 "target %d\n", device_xname(sc->sc_dev), target); 515 reject: 516 tables->t_msgout.count = siop_htoc32(sc, 1); 517 tables->msg_out[0] = MSG_MESSAGE_REJECT; 518 return SIOP_NEG_MSGOUT; 519 } 520 } 521 522 int 523 siop_sdtr_neg(struct siop_common_cmd *siop_cmd) 524 { 525 struct siop_common_softc *sc = siop_cmd->siop_sc; 526 struct siop_common_target *siop_target = siop_cmd->siop_target; 527 int target = siop_cmd->xs->xs_periph->periph_target; 528 int sync, maxoffset, offset, i; 529 int send_msgout = 0; 530 struct siop_common_xfer *tables = siop_cmd->siop_tables; 531 532 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */ 533 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff; 534 535 sync = tables->msg_in[3]; 536 offset = tables->msg_in[4]; 537 538 if (siop_target->status == TARST_SYNC_NEG) { 539 /* we initiated sync negotiation */ 540 siop_target->status = TARST_OK; 541 #ifdef DEBUG 542 printf("sdtr: sync %d offset %d\n", sync, offset); 543 #endif 544 if (offset > maxoffset || sync < sc->st_minsync || 545 sync > sc->st_maxsync) 546 goto reject; 547 for (i = 0; i < __arraycount(scf_period); i++) { 548 if (sc->clock_period != scf_period[i].clock) 549 continue; 550 if (scf_period[i].period == sync) { 551 /* ok, found it. we now are sync. */ 552 siop_target->offset = offset; 553 siop_target->period = sync; 554 sc->targets[target]->id &= 555 ~(SCNTL3_SCF_MASK << 24); 556 sc->targets[target]->id |= scf_period[i].scf 557 << (24 + SCNTL3_SCF_SHIFT); 558 if (sync < 25 && /* Ultra */ 559 (sc->features & SF_BUS_ULTRA3) == 0) 560 sc->targets[target]->id |= 561 SCNTL3_ULTRA << 24; 562 else 563 sc->targets[target]->id &= 564 ~(SCNTL3_ULTRA << 24); 565 sc->targets[target]->id &= 566 ~(SXFER_MO_MASK << 8); 567 sc->targets[target]->id |= 568 (offset & SXFER_MO_MASK) << 8; 569 sc->targets[target]->id &= ~0xff; /* scntl4 */ 570 goto end; 571 } 572 } 573 /* 574 * we didn't find it in our table, do async and send reject 575 * msg 576 */ 577 reject: 578 send_msgout = 1; 579 tables->t_msgout.count = siop_htoc32(sc, 1); 580 tables->msg_out[0] = MSG_MESSAGE_REJECT; 581 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 582 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 583 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 584 sc->targets[target]->id &= ~0xff; /* scntl4 */ 585 siop_target->offset = siop_target->period = 0; 586 } else { /* target initiated sync neg */ 587 #ifdef DEBUG 588 printf("sdtr (target): sync %d offset %d\n", sync, offset); 589 #endif 590 if (offset == 0 || sync > sc->st_maxsync) { /* async */ 591 goto async; 592 } 593 if (offset > maxoffset) 594 offset = maxoffset; 595 if (sync < sc->st_minsync) 596 sync = sc->st_minsync; 597 /* look for sync period */ 598 for (i = 0; i < __arraycount(scf_period); i++) { 599 if (sc->clock_period != scf_period[i].clock) 600 continue; 601 if (scf_period[i].period == sync) { 602 /* ok, found it. we now are sync. */ 603 siop_target->offset = offset; 604 siop_target->period = sync; 605 sc->targets[target]->id &= 606 ~(SCNTL3_SCF_MASK << 24); 607 sc->targets[target]->id |= scf_period[i].scf 608 << (24 + SCNTL3_SCF_SHIFT); 609 if (sync < 25 && /* Ultra */ 610 (sc->features & SF_BUS_ULTRA3) == 0) 611 sc->targets[target]->id |= 612 SCNTL3_ULTRA << 24; 613 else 614 sc->targets[target]->id &= 615 ~(SCNTL3_ULTRA << 24); 616 sc->targets[target]->id &= 617 ~(SXFER_MO_MASK << 8); 618 sc->targets[target]->id |= 619 (offset & SXFER_MO_MASK) << 8; 620 sc->targets[target]->id &= ~0xff; /* scntl4 */ 621 siop_sdtr_msg(siop_cmd, 0, sync, offset); 622 send_msgout = 1; 623 goto end; 624 } 625 } 626 async: 627 siop_target->offset = siop_target->period = 0; 628 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24); 629 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24); 630 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8); 631 sc->targets[target]->id &= ~0xff; /* scntl4 */ 632 siop_sdtr_msg(siop_cmd, 0, 0, 0); 633 send_msgout = 1; 634 } 635 end: 636 if (siop_target->status == TARST_OK) 637 siop_update_xfer_mode(sc, target); 638 #ifdef DEBUG 639 printf("id now 0x%x\n", sc->targets[target]->id); 640 #endif 641 tables->id = siop_htoc32(sc, sc->targets[target]->id); 642 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, 643 (sc->targets[target]->id >> 24) & 0xff); 644 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 645 (sc->targets[target]->id >> 8) & 0xff); 646 if (send_msgout) { 647 return SIOP_NEG_MSGOUT; 648 } else { 649 return SIOP_NEG_ACK; 650 } 651 } 652 653 void 654 siop_sdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff) 655 { 656 657 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 658 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN; 659 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR; 660 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 661 siop_cmd->siop_tables->msg_out[offset + 4] = soff; 662 siop_cmd->siop_tables->t_msgout.count = 663 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2); 664 } 665 666 void 667 siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide) 668 { 669 670 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 671 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN; 672 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR; 673 siop_cmd->siop_tables->msg_out[offset + 3] = wide; 674 siop_cmd->siop_tables->t_msgout.count = 675 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2); 676 } 677 678 void 679 siop_ppr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff) 680 { 681 682 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED; 683 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN; 684 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR; 685 siop_cmd->siop_tables->msg_out[offset + 3] = ssync; 686 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */ 687 siop_cmd->siop_tables->msg_out[offset + 5] = soff; 688 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */ 689 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT; 690 siop_cmd->siop_tables->t_msgout.count = 691 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2); 692 } 693 694 void 695 siop_minphys(struct buf *bp) 696 { 697 698 minphys(bp); 699 } 700 701 int 702 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg, 703 int flag, struct proc *p) 704 { 705 struct siop_common_softc *sc; 706 707 sc = device_private(chan->chan_adapter->adapt_dev); 708 709 switch (cmd) { 710 case SCBUSIORESET: 711 /* 712 * abort the script. This will trigger an interrupt, which will 713 * trigger a bus reset. 714 * We can't safely trigger the reset here as we can't access 715 * the required register while the script is running. 716 */ 717 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT); 718 return (0); 719 default: 720 return (ENOTTY); 721 } 722 } 723 724 void 725 siop_ma(struct siop_common_cmd *siop_cmd) 726 { 727 int offset, dbc, sstat; 728 struct siop_common_softc *sc = siop_cmd->siop_sc; 729 scr_table_t *table; /* table with partial xfer */ 730 731 /* 732 * compute how much of the current table didn't get handled when 733 * a phase mismatch occurs 734 */ 735 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 736 == 0) 737 return; /* no valid data transfer */ 738 739 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 740 if (offset >= SIOP_NSG) { 741 aprint_error_dev(sc->sc_dev, "bad offset in siop_sdp (%d)\n", 742 offset); 743 return; 744 } 745 table = &siop_cmd->siop_tables->data[offset]; 746 #ifdef DEBUG_DR 747 printf("siop_ma: offset %d count=%d addr=0x%x ", offset, 748 table->count, table->addr); 749 #endif 750 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff; 751 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) { 752 if (sc->features & SF_CHIP_DFBC) { 753 dbc += 754 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC); 755 } else { 756 /* need to account stale data in FIFO */ 757 int dfifo = 758 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO); 759 if (sc->features & SF_CHIP_FIFO) { 760 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh, 761 SIOP_CTEST5) & CTEST5_BOMASK) << 8; 762 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff; 763 } else { 764 dbc += (dfifo - (dbc & 0x7f)) & 0x7f; 765 } 766 } 767 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0); 768 if (sstat & SSTAT0_OLF) 769 dbc++; 770 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0) 771 dbc++; 772 if (siop_cmd->siop_target->flags & TARF_ISWIDE) { 773 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, 774 SIOP_SSTAT2); 775 if (sstat & SSTAT2_OLF1) 776 dbc++; 777 if ((sstat & SSTAT2_ORF1) && 778 (sc->features & SF_CHIP_DFBC) == 0) 779 dbc++; 780 } 781 /* clear the FIFO */ 782 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 783 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) | 784 CTEST3_CLF); 785 } 786 siop_cmd->flags |= CMDFL_RESID; 787 siop_cmd->resid = dbc; 788 } 789 790 void 791 siop_sdp(struct siop_common_cmd *siop_cmd, int offset) 792 { 793 struct siop_common_softc *sc = siop_cmd->siop_sc; 794 scr_table_t *table; 795 796 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 797 == 0) 798 return; /* no data pointers to save */ 799 800 /* 801 * offset == SIOP_NSG may be a valid condition if we get a Save data 802 * pointer when the xfer is done. Just ignore the Save data pointer 803 * in this case 804 */ 805 if (offset == SIOP_NSG) 806 return; 807 #ifdef DIAGNOSTIC 808 if (offset > SIOP_NSG) { 809 scsipi_printaddr(siop_cmd->xs->xs_periph); 810 printf(": offset %d > %d\n", offset, SIOP_NSG); 811 panic("siop_sdp: offset"); 812 } 813 #endif 814 /* 815 * Save data pointer. We do this by adjusting the tables to point 816 * at the begginning of the data not yet transfered. 817 * offset points to the first table with untransfered data. 818 */ 819 820 /* 821 * before doing that we decrease resid from the ammount of data which 822 * has been transfered. 823 */ 824 siop_update_resid(siop_cmd, offset); 825 826 /* 827 * First let see if we have a resid from a phase mismatch. If so, 828 * we have to adjst the table at offset to remove transfered data. 829 */ 830 if (siop_cmd->flags & CMDFL_RESID) { 831 siop_cmd->flags &= ~CMDFL_RESID; 832 table = &siop_cmd->siop_tables->data[offset]; 833 /* "cut" already transfered data from this table */ 834 table->addr = 835 siop_htoc32(sc, siop_ctoh32(sc, table->addr) + 836 siop_ctoh32(sc, table->count) - siop_cmd->resid); 837 table->count = siop_htoc32(sc, siop_cmd->resid); 838 } 839 840 /* 841 * now we can remove entries which have been transfered. 842 * We just move the entries with data left at the beggining of the 843 * tables 844 */ 845 memmove(&siop_cmd->siop_tables->data[0], 846 &siop_cmd->siop_tables->data[offset], 847 (SIOP_NSG - offset) * sizeof(scr_table_t)); 848 } 849 850 void 851 siop_update_resid(struct siop_common_cmd *siop_cmd, int offset) 852 { 853 struct siop_common_softc *sc = siop_cmd->siop_sc; 854 scr_table_t *table; 855 int i; 856 857 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN)) 858 == 0) 859 return; /* no data to transfer */ 860 861 /* 862 * update resid. First account for the table entries which have 863 * been fully completed. 864 */ 865 for (i = 0; i < offset; i++) 866 siop_cmd->xs->resid -= 867 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count); 868 /* 869 * if CMDFL_RESID is set, the last table (pointed by offset) is a 870 * partial transfers. If not, offset points to the entry folloing 871 * the last full transfer. 872 */ 873 if (siop_cmd->flags & CMDFL_RESID) { 874 table = &siop_cmd->siop_tables->data[offset]; 875 siop_cmd->xs->resid -= 876 siop_ctoh32(sc, table->count) - siop_cmd->resid; 877 } 878 } 879 880 int 881 siop_iwr(struct siop_common_cmd *siop_cmd) 882 { 883 int offset; 884 scr_table_t *table; /* table with IWR */ 885 struct siop_common_softc *sc = siop_cmd->siop_sc; 886 887 /* handle ignore wide residue messages */ 888 889 /* if target isn't wide, reject */ 890 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) { 891 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1); 892 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT; 893 return SIOP_NEG_MSGOUT; 894 } 895 /* get index of current command in table */ 896 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1); 897 /* 898 * if the current table did complete, we're now pointing at the 899 * next one. Go back one if we didn't see a phase mismatch. 900 */ 901 if ((siop_cmd->flags & CMDFL_RESID) == 0) 902 offset--; 903 table = &siop_cmd->siop_tables->data[offset]; 904 905 if ((siop_cmd->flags & CMDFL_RESID) == 0) { 906 if (siop_ctoh32(sc, table->count) & 1) { 907 /* we really got the number of bytes we expected */ 908 return SIOP_NEG_ACK; 909 } else { 910 /* 911 * now we really had a short xfer, by one byte. 912 * handle it just as if we had a phase mistmatch 913 * (there is a resid of one for this table). 914 * Update scratcha1 to reflect the fact that 915 * this xfer isn't complete. 916 */ 917 siop_cmd->flags |= CMDFL_RESID; 918 siop_cmd->resid = 1; 919 bus_space_write_1(sc->sc_rt, sc->sc_rh, 920 SIOP_SCRATCHA + 1, offset); 921 return SIOP_NEG_ACK; 922 } 923 } else { 924 /* 925 * we already have a short xfer for this table; it's 926 * just one byte less than we though it was 927 */ 928 siop_cmd->resid--; 929 return SIOP_NEG_ACK; 930 } 931 } 932 933 void 934 siop_clearfifo(struct siop_common_softc *sc) 935 { 936 int timeout = 0; 937 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3); 938 939 #ifdef DEBUG_INTR 940 printf("DMA fifo not empty !\n"); 941 #endif 942 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 943 ctest3 | CTEST3_CLF); 944 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) & 945 CTEST3_CLF) != 0) { 946 delay(1); 947 if (++timeout > 1000) { 948 printf("clear fifo failed\n"); 949 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3, 950 bus_space_read_1(sc->sc_rt, sc->sc_rh, 951 SIOP_CTEST3) & ~CTEST3_CLF); 952 return; 953 } 954 } 955 } 956 957 int 958 siop_modechange(struct siop_common_softc *sc) 959 { 960 int retry; 961 int sist0, sist1, stest2; 962 963 for (retry = 0; retry < 5; retry++) { 964 /* 965 * datasheet says to wait 100ms and re-read SIST1, 966 * to check that DIFFSENSE is stable. 967 * We may delay() 5 times for 100ms at interrupt time; 968 * hopefully this will not happen often. 969 */ 970 delay(100000); 971 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0); 972 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1); 973 if (sist1 & SIEN1_SBMC) 974 continue; /* we got an irq again */ 975 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) & 976 STEST4_MODE_MASK; 977 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2); 978 switch(sc->mode) { 979 case STEST4_MODE_DIF: 980 printf("%s: switching to differential mode\n", 981 device_xname(sc->sc_dev)); 982 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 983 stest2 | STEST2_DIF); 984 break; 985 case STEST4_MODE_SE: 986 printf("%s: switching to single-ended mode\n", 987 device_xname(sc->sc_dev)); 988 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 989 stest2 & ~STEST2_DIF); 990 break; 991 case STEST4_MODE_LVD: 992 printf("%s: switching to LVD mode\n", 993 device_xname(sc->sc_dev)); 994 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 995 stest2 & ~STEST2_DIF); 996 break; 997 default: 998 aprint_error_dev(sc->sc_dev, "invalid SCSI mode 0x%x\n", 999 sc->mode); 1000 return 0; 1001 } 1002 return 1; 1003 } 1004 printf("%s: timeout waiting for DIFFSENSE to stabilise\n", 1005 device_xname(sc->sc_dev)); 1006 return 0; 1007 } 1008 1009 void 1010 siop_resetbus(struct siop_common_softc *sc) 1011 { 1012 int scntl1; 1013 1014 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1); 1015 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 1016 scntl1 | SCNTL1_RST); 1017 /* minimum 25 us, more time won't hurt */ 1018 delay(100); 1019 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1); 1020 } 1021 1022 void 1023 siop_update_xfer_mode(struct siop_common_softc *sc, int target) 1024 { 1025 struct siop_common_target *siop_target = sc->targets[target]; 1026 struct scsipi_xfer_mode xm; 1027 1028 xm.xm_target = target; 1029 xm.xm_mode = 0; 1030 xm.xm_period = 0; 1031 xm.xm_offset = 0; 1032 1033 if (siop_target->flags & TARF_ISWIDE) 1034 xm.xm_mode |= PERIPH_CAP_WIDE16; 1035 if (siop_target->period) { 1036 xm.xm_period = siop_target->period; 1037 xm.xm_offset = siop_target->offset; 1038 xm.xm_mode |= PERIPH_CAP_SYNC; 1039 } 1040 if (siop_target->flags & TARF_TAG) { 1041 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */ 1042 if ((sc->features & SF_CHIP_GEBUG) == 0 || 1043 (sc->targets[target]->flags & TARF_ISWIDE)) 1044 xm.xm_mode |= PERIPH_CAP_TQING; 1045 } 1046 1047 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm); 1048 } 1049