1 /* $OpenBSD: siop_common.c,v 1.46 2024/09/01 03:08:56 jsg Exp $ */
2 /* $NetBSD: siop_common.c,v 1.37 2005/02/27 00:27:02 perry Exp $ */
3
4 /*
5 * Copyright (c) 2000, 2002 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/device.h>
34 #include <sys/malloc.h>
35 #include <sys/buf.h>
36 #include <sys/kernel.h>
37 #include <sys/scsiio.h>
38 #include <sys/endian.h>
39
40 #include <machine/bus.h>
41
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_message.h>
44 #include <scsi/scsiconf.h>
45
46 #define SIOP_NEEDS_PERIOD_TABLES
47 #include <dev/ic/siopreg.h>
48 #include <dev/ic/siopvar_common.h>
49 #include <dev/ic/siopvar.h>
50
51 #undef DEBUG
52 #undef DEBUG_DR
53 #undef DEBUG_NEG
54
55 int
siop_common_attach(struct siop_common_softc * sc)56 siop_common_attach(struct siop_common_softc *sc)
57 {
58 int error, i, buswidth;
59 bus_dma_segment_t seg;
60 int rseg;
61
62 /*
63 * Allocate DMA-safe memory for the script and map it.
64 */
65 if ((sc->features & SF_CHIP_RAM) == 0) {
66 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
67 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
68 if (error) {
69 printf("%s: unable to allocate script DMA memory, "
70 "error = %d\n", sc->sc_dev.dv_xname, error);
71 return error;
72 }
73 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
74 (caddr_t *)&sc->sc_script,
75 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
76 if (error) {
77 printf("%s: unable to map script DMA memory, "
78 "error = %d\n", sc->sc_dev.dv_xname, error);
79 return error;
80 }
81 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
82 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
83 if (error) {
84 printf("%s: unable to create script DMA map, "
85 "error = %d\n", sc->sc_dev.dv_xname, error);
86 return error;
87 }
88 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
89 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
90 if (error) {
91 printf("%s: unable to load script DMA map, "
92 "error = %d\n", sc->sc_dev.dv_xname, error);
93 return error;
94 }
95 sc->sc_scriptaddr =
96 sc->sc_scriptdma->dm_segs[0].ds_addr;
97 sc->ram_size = PAGE_SIZE;
98 }
99
100 /*
101 * sc->sc_link is the template for all device sc_link's
102 * for devices attached to this adapter. It is passed to
103 * the upper layers in config_found().
104 */
105 buswidth = (sc->features & SF_BUS_WIDE) ? 16 : 8;
106 sc->sc_id = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
107 if (sc->sc_id == 0 || sc->sc_id >= buswidth)
108 sc->sc_id = SIOP_DEFAULT_TARGET;
109
110 for (i = 0; i < 16; i++)
111 sc->targets[i] = NULL;
112
113 /* find min/max sync period for this chip */
114 sc->st_maxsync = 0;
115 sc->dt_maxsync = 0;
116 sc->st_minsync = 255;
117 sc->dt_minsync = 255;
118 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
119 if (sc->clock_period != scf_period[i].clock)
120 continue;
121 if (sc->st_maxsync < scf_period[i].period)
122 sc->st_maxsync = scf_period[i].period;
123 if (sc->st_minsync > scf_period[i].period)
124 sc->st_minsync = scf_period[i].period;
125 }
126 if (sc->st_maxsync == 255 || sc->st_minsync == 0)
127 panic("siop: can't find my sync parameters");
128 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
129 if (sc->clock_period != dt_scf_period[i].clock)
130 continue;
131 if (sc->dt_maxsync < dt_scf_period[i].period)
132 sc->dt_maxsync = dt_scf_period[i].period;
133 if (sc->dt_minsync > dt_scf_period[i].period)
134 sc->dt_minsync = dt_scf_period[i].period;
135 }
136 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
137 panic("siop: can't find my sync parameters");
138 return 0;
139 }
140
141 void
siop_common_reset(struct siop_common_softc * sc)142 siop_common_reset(struct siop_common_softc *sc)
143 {
144 u_int32_t stest3;
145
146 /* reset the chip */
147 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
148 delay(1000);
149 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
150
151 /* init registers */
152 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
153 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
154 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
155 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
156 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
157 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
158 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
159 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
160 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
161 0xff & ~(SIEN1_HTH | SIEN1_GEN));
162 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
163 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
164 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
165 (0xb << STIME0_SEL_SHIFT));
166 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
167 sc->sc_id | SCID_RRE);
168 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
169 1 << sc->sc_id);
170 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
171 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
172 if (sc->features & SF_CHIP_AAIP)
173 bus_space_write_1(sc->sc_rt, sc->sc_rh,
174 SIOP_AIPCNTL1, AIPCNTL1_DIS);
175
176 /* enable clock doubler or quadrupler if appropriate */
177 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
178 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
180 STEST1_DBLEN);
181 if (sc->features & SF_CHIP_QUAD) {
182 /* wait for PPL to lock */
183 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
184 SIOP_STEST4) & STEST4_LOCK) == 0)
185 delay(10);
186 } else {
187 /* data sheet says 20us - more won't hurt */
188 delay(100);
189 }
190 /* halt scsi clock, select doubler/quad, restart clock */
191 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
192 stest3 | STEST3_HSC);
193 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
194 STEST1_DBLEN | STEST1_DBLSEL);
195 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
196 } else {
197 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
198 }
199 if (sc->features & SF_CHIP_FIFO)
200 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
201 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
202 CTEST5_DFS);
203 if (sc->features & SF_CHIP_LED0) {
204 /* Set GPIO0 as output if software LED control is required */
205 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
206 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
207 }
208 if (sc->features & SF_BUS_ULTRA3) {
209 /* reset SCNTL4 */
210 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
211 }
212 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
213 STEST4_MODE_MASK;
214
215 /*
216 * initialise the RAM. Without this we may get scsi gross errors on
217 * the 1010
218 */
219 if (sc->features & SF_CHIP_RAM)
220 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
221 0, 0, sc->ram_size / 4);
222 sc->sc_reset(sc);
223 }
224
225 /* prepare tables before sending a cmd */
226 void
siop_setuptables(struct siop_common_cmd * siop_cmd)227 siop_setuptables(struct siop_common_cmd *siop_cmd)
228 {
229 int i;
230 struct siop_common_softc *sc = siop_cmd->siop_sc;
231 struct scsi_xfer *xs = siop_cmd->xs;
232 int target = xs->sc_link->target;
233 int lun = xs->sc_link->lun;
234 int msgoffset = 1;
235 int *targ_flags = &sc->targets[target]->flags;
236 int quirks;
237
238 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
239 memset(siop_cmd->siop_tables->msg_out, 0,
240 sizeof(siop_cmd->siop_tables->msg_out));
241 /* request sense doesn't disconnect */
242 if (siop_cmd->status == CMDST_SENSE)
243 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
244 else if ((sc->features & SF_CHIP_GEBUG) &&
245 (sc->targets[target]->flags & TARF_ISWIDE) == 0)
246 /*
247 * 1010 bug: it seems that the 1010 has problems with reselect
248 * when not in wide mode (generate false SCSI gross error).
249 * The FreeBSD sym driver has comments about it but their
250 * workaround (disable SCSI gross error reporting) doesn't
251 * work with my adapter. So disable disconnect when not
252 * wide.
253 */
254 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
255 else
256 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
257 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
258 if (sc->targets[target]->status == TARST_ASYNC) {
259 *targ_flags &= TARF_DT; /* Save TARF_DT 'cuz we don't set it here */
260 quirks = xs->sc_link->quirks;
261
262 if ((quirks & SDEV_NOTAGS) == 0)
263 *targ_flags |= TARF_TAG;
264 if (((quirks & SDEV_NOWIDE) == 0) &&
265 (sc->features & SF_BUS_WIDE))
266 *targ_flags |= TARF_WIDE;
267 if ((quirks & SDEV_NOSYNC) == 0)
268 *targ_flags |= TARF_SYNC;
269
270 if ((sc->features & SF_CHIP_GEBUG) &&
271 (*targ_flags & TARF_WIDE) == 0)
272 /*
273 * 1010 workaround: can't do disconnect if not wide,
274 * so can't do tag
275 */
276 *targ_flags &= ~TARF_TAG;
277
278 /* Safe to call siop_add_dev() multiple times */
279 siop_add_dev((struct siop_softc *)sc, target, lun);
280
281 if ((*targ_flags & TARF_DT) &&
282 (sc->mode == STEST4_MODE_LVD)) {
283 sc->targets[target]->status = TARST_PPR_NEG;
284 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
285 sc->maxoff);
286 } else if (*targ_flags & TARF_WIDE) {
287 sc->targets[target]->status = TARST_WIDE_NEG;
288 siop_wdtr_msg(siop_cmd, msgoffset,
289 MSG_EXT_WDTR_BUS_16_BIT);
290 } else if (*targ_flags & TARF_SYNC) {
291 sc->targets[target]->status = TARST_SYNC_NEG;
292 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
293 (sc->maxoff > 31) ? 31 : sc->maxoff);
294 } else {
295 sc->targets[target]->status = TARST_OK;
296 siop_update_xfer_mode(sc, target);
297 }
298 } else if (sc->targets[target]->status == TARST_OK &&
299 (*targ_flags & TARF_TAG) &&
300 siop_cmd->status != CMDST_SENSE) {
301 siop_cmd->flags |= CMDFL_TAG;
302 }
303 siop_cmd->siop_tables->status =
304 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */
305
306 if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) ||
307 siop_cmd->status == CMDST_SENSE) {
308 bzero(siop_cmd->siop_tables->data,
309 sizeof(siop_cmd->siop_tables->data));
310 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
311 siop_cmd->siop_tables->data[i].count =
312 siop_htoc32(sc,
313 siop_cmd->dmamap_data->dm_segs[i].ds_len);
314 siop_cmd->siop_tables->data[i].addr =
315 siop_htoc32(sc,
316 siop_cmd->dmamap_data->dm_segs[i].ds_addr);
317 }
318 }
319 }
320
321 int
siop_wdtr_neg(struct siop_common_cmd * siop_cmd)322 siop_wdtr_neg(struct siop_common_cmd *siop_cmd)
323 {
324 struct siop_common_softc *sc = siop_cmd->siop_sc;
325 struct siop_common_target *siop_target = siop_cmd->siop_target;
326 int target = siop_cmd->xs->sc_link->target;
327 struct siop_common_xfer *tables = siop_cmd->siop_tables;
328
329 if (siop_target->status == TARST_WIDE_NEG) {
330 /* we initiated wide negotiation */
331 switch (tables->msg_in[3]) {
332 case MSG_EXT_WDTR_BUS_8_BIT:
333 siop_target->flags &= ~TARF_ISWIDE;
334 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
335 break;
336 case MSG_EXT_WDTR_BUS_16_BIT:
337 if (siop_target->flags & TARF_WIDE) {
338 siop_target->flags |= TARF_ISWIDE;
339 sc->targets[target]->id |= (SCNTL3_EWS << 24);
340 break;
341 }
342 /* FALLTHROUGH */
343 default:
344 /*
345 * hum, we got more than what we can handle, shouldn't
346 * happen. Reject, and stay async
347 */
348 siop_target->flags &= ~TARF_ISWIDE;
349 siop_target->status = TARST_OK;
350 siop_target->offset = siop_target->period = 0;
351 siop_update_xfer_mode(sc, target);
352 printf("%s: rejecting invalid wide negotiation from "
353 "target %d (%d)\n", sc->sc_dev.dv_xname, target,
354 tables->msg_in[3]);
355 tables->t_msgout.count = siop_htoc32(sc, 1);
356 tables->msg_out[0] = MSG_MESSAGE_REJECT;
357 return SIOP_NEG_MSGOUT;
358 }
359 tables->id = siop_htoc32(sc, sc->targets[target]->id);
360 bus_space_write_1(sc->sc_rt, sc->sc_rh,
361 SIOP_SCNTL3,
362 (sc->targets[target]->id >> 24) & 0xff);
363 /* we now need to do sync */
364 if (siop_target->flags & TARF_SYNC) {
365 siop_target->status = TARST_SYNC_NEG;
366 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
367 (sc->maxoff > 31) ? 31 : sc->maxoff);
368 return SIOP_NEG_MSGOUT;
369 } else {
370 siop_target->status = TARST_OK;
371 siop_update_xfer_mode(sc, target);
372 return SIOP_NEG_ACK;
373 }
374 } else {
375 /* target initiated wide negotiation */
376 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
377 && (siop_target->flags & TARF_WIDE)) {
378 siop_target->flags |= TARF_ISWIDE;
379 sc->targets[target]->id |= SCNTL3_EWS << 24;
380 } else {
381 siop_target->flags &= ~TARF_ISWIDE;
382 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
383 }
384 tables->id = siop_htoc32(sc, sc->targets[target]->id);
385 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
386 (sc->targets[target]->id >> 24) & 0xff);
387 /*
388 * we did reset wide parameters, so fall back to async,
389 * but don't schedule a sync neg, target should initiate it
390 */
391 siop_target->status = TARST_OK;
392 siop_target->offset = siop_target->period = 0;
393 siop_update_xfer_mode(sc, target);
394 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
395 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
396 return SIOP_NEG_MSGOUT;
397 }
398 }
399
400 int
siop_ppr_neg(struct siop_common_cmd * siop_cmd)401 siop_ppr_neg(struct siop_common_cmd *siop_cmd)
402 {
403 struct siop_common_softc *sc = siop_cmd->siop_sc;
404 struct siop_common_target *siop_target = siop_cmd->siop_target;
405 int target = siop_cmd->xs->sc_link->target;
406 struct siop_common_xfer *tables = siop_cmd->siop_tables;
407 int sync, offset, options, scf = 0;
408 int i;
409
410 #ifdef DEBUG_NEG
411 printf("%s: answer on ppr negotiation:", sc->sc_dev.dv_xname);
412 for (i = 0; i < 8; i++)
413 printf(" 0x%x", tables->msg_in[i]);
414 printf("\n");
415 #endif
416
417 if (siop_target->status == TARST_PPR_NEG) {
418 /* we initiated PPR negotiation */
419 sync = tables->msg_in[3];
420 offset = tables->msg_in[5];
421 options = tables->msg_in[7];
422 if (options != MSG_EXT_PPR_PROT_DT) {
423 /* shouldn't happen */
424 printf("%s: ppr negotiation for target %d: "
425 "no DT option\n", sc->sc_dev.dv_xname, target);
426 siop_target->status = TARST_ASYNC;
427 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
428 siop_target->offset = 0;
429 siop_target->period = 0;
430 goto reject;
431 }
432
433 if (offset > sc->maxoff || sync < sc->dt_minsync ||
434 sync > sc->dt_maxsync) {
435 printf("%s: ppr negotiation for target %d: "
436 "offset (%d) or sync (%d) out of range\n",
437 sc->sc_dev.dv_xname, target, offset, sync);
438 /* should not happen */
439 siop_target->status = TARST_ASYNC;
440 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
441 siop_target->offset = 0;
442 siop_target->period = 0;
443 goto reject;
444 } else {
445 for (i = 0; i <
446 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
447 i++) {
448 if (sc->clock_period != dt_scf_period[i].clock)
449 continue;
450 if (dt_scf_period[i].period == sync) {
451 /* ok, found it. we now are sync. */
452 siop_target->offset = offset;
453 siop_target->period = sync;
454 scf = dt_scf_period[i].scf;
455 siop_target->flags |= TARF_ISDT;
456 }
457 }
458 if ((siop_target->flags & TARF_ISDT) == 0) {
459 printf("%s: ppr negotiation for target %d: "
460 "sync (%d) incompatible with adapter\n",
461 sc->sc_dev.dv_xname, target, sync);
462 /*
463 * we didn't find it in our table, do async
464 * send reject msg, start SDTR/WDTR neg
465 */
466 siop_target->status = TARST_ASYNC;
467 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
468 siop_target->offset = 0;
469 siop_target->period = 0;
470 goto reject;
471 }
472 }
473 if (tables->msg_in[6] != 1) {
474 printf("%s: ppr negotiation for target %d: "
475 "transfer width (%d) incompatible with dt\n",
476 sc->sc_dev.dv_xname, target, tables->msg_in[6]);
477 /* DT mode can only be done with wide transfers */
478 siop_target->status = TARST_ASYNC;
479 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
480 siop_target->offset = 0;
481 siop_target->period = 0;
482 goto reject;
483 }
484 siop_target->flags |= TARF_ISWIDE;
485 sc->targets[target]->id |= (SCNTL3_EWS << 24);
486 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
487 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
488 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
489 sc->targets[target]->id |=
490 (siop_target->offset & SXFER_MO_MASK) << 8;
491 sc->targets[target]->id &= ~0xff;
492 sc->targets[target]->id |= SCNTL4_U3EN;
493 siop_target->status = TARST_OK;
494 siop_update_xfer_mode(sc, target);
495 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
496 (sc->targets[target]->id >> 24) & 0xff);
497 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
498 (sc->targets[target]->id >> 8) & 0xff);
499 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
500 sc->targets[target]->id & 0xff);
501 return SIOP_NEG_ACK;
502 } else {
503 /* target initiated PPR negotiation, shouldn't happen */
504 printf("%s: rejecting invalid PPR negotiation from "
505 "target %d\n", sc->sc_dev.dv_xname, target);
506 reject:
507 tables->t_msgout.count = siop_htoc32(sc, 1);
508 tables->msg_out[0] = MSG_MESSAGE_REJECT;
509 return SIOP_NEG_MSGOUT;
510 }
511 }
512
513 int
siop_sdtr_neg(struct siop_common_cmd * siop_cmd)514 siop_sdtr_neg(struct siop_common_cmd *siop_cmd)
515 {
516 struct siop_common_softc *sc = siop_cmd->siop_sc;
517 struct siop_common_target *siop_target = siop_cmd->siop_target;
518 int target = siop_cmd->xs->sc_link->target;
519 int sync, maxoffset, offset, i;
520 int send_msgout = 0;
521 struct siop_common_xfer *tables = siop_cmd->siop_tables;
522
523 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */
524 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
525
526 sync = tables->msg_in[3];
527 offset = tables->msg_in[4];
528
529 if (siop_target->status == TARST_SYNC_NEG) {
530 /* we initiated sync negotiation */
531 siop_target->status = TARST_OK;
532 #ifdef DEBUG
533 printf("sdtr: sync %d offset %d\n", sync, offset);
534 #endif
535 if (offset > maxoffset || sync < sc->st_minsync ||
536 sync > sc->st_maxsync)
537 goto reject;
538 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
539 i++) {
540 if (sc->clock_period != scf_period[i].clock)
541 continue;
542 if (scf_period[i].period == sync) {
543 /* ok, found it. we now are sync. */
544 siop_target->offset = offset;
545 siop_target->period = sync;
546 sc->targets[target]->id &=
547 ~(SCNTL3_SCF_MASK << 24);
548 sc->targets[target]->id |= scf_period[i].scf
549 << (24 + SCNTL3_SCF_SHIFT);
550 if (sync < 25 && /* Ultra */
551 (sc->features & SF_BUS_ULTRA3) == 0)
552 sc->targets[target]->id |=
553 SCNTL3_ULTRA << 24;
554 else
555 sc->targets[target]->id &=
556 ~(SCNTL3_ULTRA << 24);
557 sc->targets[target]->id &=
558 ~(SXFER_MO_MASK << 8);
559 sc->targets[target]->id |=
560 (offset & SXFER_MO_MASK) << 8;
561 sc->targets[target]->id &= ~0xff; /* scntl4 */
562 goto end;
563 }
564 }
565 /*
566 * we didn't find it in our table, do async and send reject
567 * msg
568 */
569 reject:
570 send_msgout = 1;
571 tables->t_msgout.count = siop_htoc32(sc, 1);
572 tables->msg_out[0] = MSG_MESSAGE_REJECT;
573 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
574 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
575 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
576 sc->targets[target]->id &= ~0xff; /* scntl4 */
577 siop_target->offset = siop_target->period = 0;
578 } else { /* target initiated sync neg */
579 #ifdef DEBUG
580 printf("sdtr (target): sync %d offset %d\n", sync, offset);
581 #endif
582 if (offset == 0 || sync > sc->st_maxsync) { /* async */
583 goto async;
584 }
585 if (offset > maxoffset)
586 offset = maxoffset;
587 if (sync < sc->st_minsync)
588 sync = sc->st_minsync;
589 /* look for sync period */
590 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
591 i++) {
592 if (sc->clock_period != scf_period[i].clock)
593 continue;
594 if (scf_period[i].period == sync) {
595 /* ok, found it. we now are sync. */
596 siop_target->offset = offset;
597 siop_target->period = sync;
598 sc->targets[target]->id &=
599 ~(SCNTL3_SCF_MASK << 24);
600 sc->targets[target]->id |= scf_period[i].scf
601 << (24 + SCNTL3_SCF_SHIFT);
602 if (sync < 25 && /* Ultra */
603 (sc->features & SF_BUS_ULTRA3) == 0)
604 sc->targets[target]->id |=
605 SCNTL3_ULTRA << 24;
606 else
607 sc->targets[target]->id &=
608 ~(SCNTL3_ULTRA << 24);
609 sc->targets[target]->id &=
610 ~(SXFER_MO_MASK << 8);
611 sc->targets[target]->id |=
612 (offset & SXFER_MO_MASK) << 8;
613 sc->targets[target]->id &= ~0xff; /* scntl4 */
614 siop_sdtr_msg(siop_cmd, 0, sync, offset);
615 send_msgout = 1;
616 goto end;
617 }
618 }
619 async:
620 siop_target->offset = siop_target->period = 0;
621 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
622 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
623 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
624 sc->targets[target]->id &= ~0xff; /* scntl4 */
625 siop_sdtr_msg(siop_cmd, 0, 0, 0);
626 send_msgout = 1;
627 }
628 end:
629 if (siop_target->status == TARST_OK)
630 siop_update_xfer_mode(sc, target);
631 #ifdef DEBUG
632 printf("id now 0x%x\n", sc->targets[target]->id);
633 #endif
634 tables->id = siop_htoc32(sc, sc->targets[target]->id);
635 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
636 (sc->targets[target]->id >> 24) & 0xff);
637 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
638 (sc->targets[target]->id >> 8) & 0xff);
639 if (send_msgout) {
640 return SIOP_NEG_MSGOUT;
641 } else {
642 return SIOP_NEG_ACK;
643 }
644 }
645
646 void
siop_sdtr_msg(struct siop_common_cmd * siop_cmd,int offset,int ssync,int soff)647 siop_sdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
648 {
649 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
650 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
651 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
652 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
653 siop_cmd->siop_tables->msg_out[offset + 4] = soff;
654 siop_cmd->siop_tables->t_msgout.count =
655 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2);
656 }
657
658 void
siop_wdtr_msg(struct siop_common_cmd * siop_cmd,int offset,int wide)659 siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide)
660 {
661 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
662 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
663 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
664 siop_cmd->siop_tables->msg_out[offset + 3] = wide;
665 siop_cmd->siop_tables->t_msgout.count =
666 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2);
667 }
668
669 void
siop_ppr_msg(struct siop_common_cmd * siop_cmd,int offset,int ssync,int soff)670 siop_ppr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
671 {
672 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
673 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
674 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
675 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
676 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
677 siop_cmd->siop_tables->msg_out[offset + 5] = soff;
678 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
679 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_PROT_DT;
680 siop_cmd->siop_tables->t_msgout.count =
681 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2);
682 }
683
684 void
siop_ma(struct siop_common_cmd * siop_cmd)685 siop_ma(struct siop_common_cmd *siop_cmd)
686 {
687 int offset, dbc, sstat;
688 struct siop_common_softc *sc = siop_cmd->siop_sc;
689 scr_table_t *table; /* table with partial xfer */
690
691 /*
692 * compute how much of the current table didn't get handled when
693 * a phase mismatch occurs
694 */
695 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN))
696 == 0)
697 return; /* no valid data transfer */
698
699 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
700 if (offset >= SIOP_NSG) {
701 printf("%s: bad offset in siop_sdp (%d)\n",
702 sc->sc_dev.dv_xname, offset);
703 return;
704 }
705 table = &siop_cmd->siop_tables->data[offset];
706 #ifdef DEBUG_DR
707 printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
708 table->count, table->addr);
709 #endif
710 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
711 if (siop_cmd->xs->flags & SCSI_DATA_OUT) {
712 if (sc->features & SF_CHIP_DFBC) {
713 dbc +=
714 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
715 } else {
716 /* need to account stale data in FIFO */
717 int dfifo =
718 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
719 if (sc->features & SF_CHIP_FIFO) {
720 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
721 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
722 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
723 } else {
724 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
725 }
726 }
727 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
728 if (sstat & SSTAT0_OLF)
729 dbc++;
730 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
731 dbc++;
732 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
733 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
734 SIOP_SSTAT2);
735 if (sstat & SSTAT2_OLF1)
736 dbc++;
737 if ((sstat & SSTAT2_ORF1) &&
738 (sc->features & SF_CHIP_DFBC) == 0)
739 dbc++;
740 }
741 /* clear the FIFO */
742 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
743 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
744 CTEST3_CLF);
745 }
746 siop_cmd->flags |= CMDFL_RESID;
747 siop_cmd->resid = dbc;
748 }
749
750 void
siop_sdp(struct siop_common_cmd * siop_cmd,int offset)751 siop_sdp(struct siop_common_cmd *siop_cmd, int offset)
752 {
753 struct siop_common_softc *sc = siop_cmd->siop_sc;
754 scr_table_t *table;
755
756 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN))== 0)
757 return; /* no data pointers to save */
758
759 /*
760 * offset == SIOP_NSG may be a valid condition if we get a Save data
761 * pointer when the xfer is done. Just ignore the Save data pointer
762 * in this case
763 */
764 if (offset == SIOP_NSG)
765 return;
766 #ifdef DIAGNOSTIC
767 if (offset > SIOP_NSG) {
768 sc_print_addr(siop_cmd->xs->sc_link);
769 printf("offset %d > %d\n", offset, SIOP_NSG);
770 panic("siop_sdp: offset");
771 }
772 #endif
773 /*
774 * Save data pointer. We do this by adjusting the tables to point
775 * at the beginning of the data not yet transferred.
776 * offset points to the first table with untransferred data.
777 */
778
779 /*
780 * before doing that we decrease resid from the amount of data which
781 * has been transferred.
782 */
783 siop_update_resid(siop_cmd, offset);
784
785 /*
786 * First let see if we have a resid from a phase mismatch. If so,
787 * we have to adjust the table at offset to remove transferred data.
788 */
789 if (siop_cmd->flags & CMDFL_RESID) {
790 siop_cmd->flags &= ~CMDFL_RESID;
791 table = &siop_cmd->siop_tables->data[offset];
792 /* "cut" already transferred data from this table */
793 table->addr =
794 siop_htoc32(sc, siop_ctoh32(sc, table->addr) +
795 siop_ctoh32(sc, table->count) - siop_cmd->resid);
796 table->count = siop_htoc32(sc, siop_cmd->resid);
797 }
798
799 /*
800 * now we can remove entries which have been transferred.
801 * We just move the entries with data left at the beginning of the
802 * tables
803 */
804 bcopy(&siop_cmd->siop_tables->data[offset],
805 &siop_cmd->siop_tables->data[0],
806 (SIOP_NSG - offset) * sizeof(scr_table_t));
807 }
808
809 void
siop_update_resid(struct siop_common_cmd * siop_cmd,int offset)810 siop_update_resid(struct siop_common_cmd *siop_cmd, int offset)
811 {
812 struct siop_common_softc *sc = siop_cmd->siop_sc;
813 scr_table_t *table;
814 int i;
815
816 if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN))
817 == 0)
818 return; /* no data to transfer */
819
820 /*
821 * update resid. First account for the table entries which have
822 * been fully completed.
823 */
824 for (i = 0; i < offset; i++)
825 siop_cmd->xs->resid -=
826 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count);
827 /*
828 * if CMDFL_RESID is set, the last table (pointed by offset) is a
829 * partial transfers. If not, offset points to the entry following
830 * the last full transfer.
831 */
832 if (siop_cmd->flags & CMDFL_RESID) {
833 table = &siop_cmd->siop_tables->data[offset];
834 siop_cmd->xs->resid -=
835 siop_ctoh32(sc, table->count) - siop_cmd->resid;
836 }
837 }
838
839 int
siop_iwr(struct siop_common_cmd * siop_cmd)840 siop_iwr(struct siop_common_cmd *siop_cmd)
841 {
842 int offset;
843 scr_table_t *table; /* table with IWR */
844 struct siop_common_softc *sc = siop_cmd->siop_sc;
845 /* handle ignore wide residue messages */
846
847 /* if target isn't wide, reject */
848 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
849 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1);
850 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
851 return SIOP_NEG_MSGOUT;
852 }
853 /* get index of current command in table */
854 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
855 /*
856 * if the current table did complete, we're now pointing at the
857 * next one. Go back one if we didn't see a phase mismatch.
858 */
859 if ((siop_cmd->flags & CMDFL_RESID) == 0)
860 offset--;
861 table = &siop_cmd->siop_tables->data[offset];
862
863 if ((siop_cmd->flags & CMDFL_RESID) == 0) {
864 if (siop_ctoh32(sc, table->count) & 1) {
865 /* we really got the number of bytes we expected */
866 return SIOP_NEG_ACK;
867 } else {
868 /*
869 * now we really had a short xfer, by one byte.
870 * handle it just as if we had a phase mismatch
871 * (there is a resid of one for this table).
872 * Update scratcha1 to reflect the fact that
873 * this xfer isn't complete.
874 */
875 siop_cmd->flags |= CMDFL_RESID;
876 siop_cmd->resid = 1;
877 bus_space_write_1(sc->sc_rt, sc->sc_rh,
878 SIOP_SCRATCHA + 1, offset);
879 return SIOP_NEG_ACK;
880 }
881 } else {
882 /*
883 * we already have a short xfer for this table; it's
884 * just one byte less than we though it was
885 */
886 siop_cmd->resid--;
887 return SIOP_NEG_ACK;
888 }
889 }
890
891 void
siop_clearfifo(struct siop_common_softc * sc)892 siop_clearfifo(struct siop_common_softc *sc)
893 {
894 int timeout = 0;
895 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
896
897 #ifdef DEBUG_INTR
898 printf("DMA fifo not empty !\n");
899 #endif
900 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
901 ctest3 | CTEST3_CLF);
902 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
903 CTEST3_CLF) != 0) {
904 delay(1);
905 if (++timeout > 1000) {
906 printf("clear fifo failed\n");
907 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
908 bus_space_read_1(sc->sc_rt, sc->sc_rh,
909 SIOP_CTEST3) & ~CTEST3_CLF);
910 return;
911 }
912 }
913 }
914
915 int
siop_modechange(struct siop_common_softc * sc)916 siop_modechange(struct siop_common_softc *sc)
917 {
918 int retry;
919 int sist0, sist1, stest2;
920 for (retry = 0; retry < 5; retry++) {
921 /*
922 * datasheet says to wait 100ms and re-read SIST1,
923 * to check that DIFFSENSE is stable.
924 * We may delay() 5 times for 100ms at interrupt time;
925 * hopefully this will not happen often.
926 */
927 delay(100000);
928 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
929 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
930 if (sist1 & SIEN1_SBMC)
931 continue; /* we got an irq again */
932 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
933 STEST4_MODE_MASK;
934 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
935 switch(sc->mode) {
936 case STEST4_MODE_DIF:
937 printf("%s: switching to differential mode\n",
938 sc->sc_dev.dv_xname);
939 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
940 stest2 | STEST2_DIF);
941 break;
942 case STEST4_MODE_SE:
943 printf("%s: switching to single-ended mode\n",
944 sc->sc_dev.dv_xname);
945 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
946 stest2 & ~STEST2_DIF);
947 break;
948 case STEST4_MODE_LVD:
949 printf("%s: switching to LVD mode\n",
950 sc->sc_dev.dv_xname);
951 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
952 stest2 & ~STEST2_DIF);
953 break;
954 default:
955 printf("%s: invalid SCSI mode 0x%x\n",
956 sc->sc_dev.dv_xname, sc->mode);
957 return 0;
958 }
959 return 1;
960 }
961 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
962 sc->sc_dev.dv_xname);
963 return 0;
964 }
965
966 void
siop_resetbus(struct siop_common_softc * sc)967 siop_resetbus(struct siop_common_softc *sc)
968 {
969 int scntl1;
970 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
971 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
972 scntl1 | SCNTL1_RST);
973 /* minimum 25 us, more time won't hurt */
974 delay(100);
975 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
976 }
977
978 void
siop_update_xfer_mode(struct siop_common_softc * sc,int target)979 siop_update_xfer_mode(struct siop_common_softc *sc, int target)
980 {
981 struct siop_common_target *siop_target;
982
983 siop_target = sc->targets[target];
984
985 printf("%s: target %d now using %s%s%d bit ",
986 sc->sc_dev.dv_xname, target,
987 (siop_target->flags & TARF_TAG) ? "tagged " : "",
988 (siop_target->flags & TARF_ISDT) ? "DT " : "",
989 (siop_target->flags & TARF_ISWIDE) ? 16 : 8);
990
991 if (siop_target->offset == 0)
992 printf("async ");
993 else {
994 switch (siop_target->period) {
995 case 9: /* 12.5ns cycle */
996 printf("80.0");
997 break;
998 case 10: /* 25 ns cycle */
999 printf("40.0");
1000 break;
1001 case 12: /* 48 ns cycle */
1002 printf("20.0");
1003 break;
1004 case 18: /* 72 ns cycle */
1005 printf("13.3");
1006 break;
1007 case 25: /* 100 ns cycle */
1008 printf("10.0");
1009 break;
1010 case 37: /* 118 ns cycle */
1011 printf("6.67");
1012 break;
1013 case 50: /* 200 ns cycle */
1014 printf("5.0");
1015 break;
1016 case 75: /* 300 ns cycle */
1017 printf("3.33");
1018 break;
1019 default:
1020 printf("??");
1021 break;
1022 }
1023 printf(" MHz %d REQ/ACK offset ", siop_target->offset);
1024 }
1025
1026 printf("xfers\n");
1027
1028 if ((sc->features & SF_CHIP_GEBUG) &&
1029 (siop_target->flags & TARF_ISWIDE) == 0)
1030 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1031 siop_target->flags &= ~TARF_TAG;
1032 }
1033