1 /* $OpenBSD: twe.c,v 1.67 2022/04/16 19:19:59 naddy Exp $ */
2
3 /*
4 * Copyright (c) 2000-2002 Michael Shalayeff. All rights reserved.
5 *
6 * The SCSI emulation layer is derived from gdt(4) driver,
7 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /* #define TWE_DEBUG */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/buf.h>
36 #include <sys/device.h>
37 #include <sys/malloc.h>
38 #include <sys/kthread.h>
39
40 #include <machine/bus.h>
41
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_disk.h>
44 #include <scsi/scsiconf.h>
45
46 #include <dev/ic/twereg.h>
47 #include <dev/ic/twevar.h>
48
49 #ifdef TWE_DEBUG
50 #define TWE_DPRINTF(m,a) if (twe_debug & (m)) printf a
51 #define TWE_D_CMD 0x0001
52 #define TWE_D_INTR 0x0002
53 #define TWE_D_MISC 0x0004
54 #define TWE_D_DMA 0x0008
55 #define TWE_D_AEN 0x0010
56 int twe_debug = 0;
57 #else
58 #define TWE_DPRINTF(m,a) /* m, a */
59 #endif
60
61 struct cfdriver twe_cd = {
62 NULL, "twe", DV_DULL
63 };
64
65 void twe_scsi_cmd(struct scsi_xfer *);
66
67 const struct scsi_adapter twe_switch = {
68 twe_scsi_cmd, NULL, NULL, NULL, NULL
69 };
70
71 void *twe_get_ccb(void *);
72 void twe_put_ccb(void *, void *);
73 void twe_dispose(struct twe_softc *sc);
74 int twe_cmd(struct twe_ccb *ccb, int flags, int wait);
75 int twe_start(struct twe_ccb *ccb, int wait);
76 int twe_complete(struct twe_ccb *ccb);
77 int twe_done(struct twe_softc *sc, struct twe_ccb *ccb);
78 void twe_thread_create(void *v);
79 void twe_thread(void *v);
80 void twe_aen(void *, void *);
81
82 void *
twe_get_ccb(void * xsc)83 twe_get_ccb(void *xsc)
84 {
85 struct twe_softc *sc = xsc;
86 struct twe_ccb *ccb;
87
88 mtx_enter(&sc->sc_ccb_mtx);
89 ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
90 if (ccb != NULL)
91 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
92 mtx_leave(&sc->sc_ccb_mtx);
93
94 return (ccb);
95 }
96
97 void
twe_put_ccb(void * xsc,void * xccb)98 twe_put_ccb(void *xsc, void *xccb)
99 {
100 struct twe_softc *sc = xsc;
101 struct twe_ccb *ccb = xccb;
102
103 ccb->ccb_state = TWE_CCB_FREE;
104 mtx_enter(&sc->sc_ccb_mtx);
105 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
106 mtx_leave(&sc->sc_ccb_mtx);
107 }
108
109 void
twe_dispose(struct twe_softc * sc)110 twe_dispose(struct twe_softc *sc)
111 {
112 register struct twe_ccb *ccb;
113 if (sc->sc_cmdmap != NULL) {
114 bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
115 /* traverse the ccbs and destroy the maps */
116 for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
117 if (ccb->ccb_dmamap)
118 bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
119 }
120 bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
121 sizeof(struct twe_cmd) * TWE_MAXCMDS);
122 bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
123 }
124
125 int
twe_attach(struct twe_softc * sc)126 twe_attach(struct twe_softc *sc)
127 {
128 struct scsibus_attach_args saa;
129 /* this includes a buffer for drive config req, and a capacity req */
130 u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
131 struct twe_param *pb = (void *)
132 (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
133 struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
134 struct twe_ccb *ccb;
135 struct twe_cmd *cmd;
136 u_int32_t status;
137 int error, i, retry, nunits, nseg;
138 const char *errstr;
139 twe_lock_t lock;
140 paddr_t pa;
141
142 error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
143 PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
144 if (error) {
145 printf(": cannot allocate commands (%d)\n", error);
146 return (1);
147 }
148
149 error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
150 sizeof(struct twe_cmd) * TWE_MAXCMDS,
151 (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
152 if (error) {
153 printf(": cannot map commands (%d)\n", error);
154 bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
155 return (1);
156 }
157
158 error = bus_dmamap_create(sc->dmat,
159 sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
160 sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
161 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
162 if (error) {
163 printf(": cannot create ccb cmd dmamap (%d)\n", error);
164 twe_dispose(sc);
165 return (1);
166 }
167 error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
168 sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
169 if (error) {
170 printf(": cannot load command dma map (%d)\n", error);
171 twe_dispose(sc);
172 return (1);
173 }
174
175 TAILQ_INIT(&sc->sc_ccb2q);
176 TAILQ_INIT(&sc->sc_ccbq);
177 TAILQ_INIT(&sc->sc_free_ccb);
178 TAILQ_INIT(&sc->sc_done_ccb);
179 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
180 scsi_iopool_init(&sc->sc_iopool, sc, twe_get_ccb, twe_put_ccb);
181
182 scsi_ioh_set(&sc->sc_aen, &sc->sc_iopool, twe_aen, sc);
183
184 pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
185 sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
186 for (cmd = (struct twe_cmd *)sc->sc_cmds + TWE_MAXCMDS - 1;
187 cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
188
189 cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
190 ccb = &sc->sc_ccbs[cmd->cmd_index];
191 error = bus_dmamap_create(sc->dmat,
192 TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
193 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
194 if (error) {
195 printf(": cannot create ccb dmamap (%d)\n", error);
196 twe_dispose(sc);
197 return (1);
198 }
199 ccb->ccb_sc = sc;
200 ccb->ccb_cmd = cmd;
201 ccb->ccb_cmdpa = pa;
202 ccb->ccb_state = TWE_CCB_FREE;
203 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
204 }
205
206 for (errstr = NULL, retry = 3; retry--; ) {
207 int veseen_srst;
208 u_int16_t aen;
209
210 if (errstr)
211 TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
212
213 for (i = 350000; i--; DELAY(100)) {
214 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
215 if (status & TWE_STAT_CPURDY)
216 break;
217 }
218
219 if (!(status & TWE_STAT_CPURDY)) {
220 errstr = ": card CPU is not ready\n";
221 continue;
222 }
223
224 /* soft reset, disable ints */
225 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
226 TWE_CTRL_SRST |
227 TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
228 TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
229 TWE_CTRL_MINT);
230
231 for (i = 350000; i--; DELAY(100)) {
232 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
233 if (status & TWE_STAT_ATTNI)
234 break;
235 }
236
237 if (!(status & TWE_STAT_ATTNI)) {
238 errstr = ": cannot get card's attention\n";
239 continue;
240 }
241
242 /* drain aen queue */
243 for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
244
245 ccb = scsi_io_get(&sc->sc_iopool, 0);
246 if (ccb == NULL) {
247 errstr = ": out of ccbs\n";
248 break;
249 }
250
251 ccb->ccb_xs = NULL;
252 ccb->ccb_data = pb;
253 ccb->ccb_length = TWE_SECTOR_SIZE;
254 ccb->ccb_state = TWE_CCB_READY;
255 cmd = ccb->ccb_cmd;
256 cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
257 cmd->cmd_op = TWE_CMD_GPARAM;
258 cmd->cmd_param.count = 1;
259
260 pb->table_id = TWE_PARAM_AEN;
261 pb->param_id = 2;
262 pb->param_size = 2;
263
264 error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
265 scsi_io_put(&sc->sc_iopool, ccb);
266 if (error) {
267 errstr = ": error draining attention queue\n";
268 break;
269 }
270
271 aen = *(u_int16_t *)pb->data;
272 TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
273 if (aen == TWE_AEN_SRST)
274 veseen_srst++;
275 }
276
277 if (!veseen_srst) {
278 errstr = ": we don't get it\n";
279 continue;
280 }
281
282 if (status & TWE_STAT_CPUERR) {
283 errstr = ": card CPU error detected\n";
284 continue;
285 }
286
287 if (status & TWE_STAT_PCIPAR) {
288 errstr = ": PCI parity error detected\n";
289 continue;
290 }
291
292 if (status & TWE_STAT_QUEUEE ) {
293 errstr = ": queuing error detected\n";
294 continue;
295 }
296
297 if (status & TWE_STAT_PCIABR) {
298 errstr = ": PCI abort\n";
299 continue;
300 }
301
302 while (!(status & TWE_STAT_RQE)) {
303 bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
304 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
305 }
306
307 break;
308 }
309
310 if (retry < 0) {
311 printf("%s", errstr);
312 twe_dispose(sc);
313 return 1;
314 }
315
316 ccb = scsi_io_get(&sc->sc_iopool, 0);
317 if (ccb == NULL) {
318 printf(": out of ccbs\n");
319 twe_dispose(sc);
320 return 1;
321 }
322
323 ccb->ccb_xs = NULL;
324 ccb->ccb_data = pb;
325 ccb->ccb_length = TWE_SECTOR_SIZE;
326 ccb->ccb_state = TWE_CCB_READY;
327 cmd = ccb->ccb_cmd;
328 cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
329 cmd->cmd_op = TWE_CMD_GPARAM;
330 cmd->cmd_param.count = 1;
331
332 pb->table_id = TWE_PARAM_UC;
333 pb->param_id = TWE_PARAM_UC;
334 pb->param_size = TWE_MAX_UNITS;
335
336 error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
337 scsi_io_put(&sc->sc_iopool, ccb);
338 if (error) {
339 printf(": failed to fetch unit parameters\n");
340 twe_dispose(sc);
341 return 1;
342 }
343
344 /* we are assuming last read status was good */
345 printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
346
347 for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
348 if (pb->data[i] == 0)
349 continue;
350
351 ccb = scsi_io_get(&sc->sc_iopool, 0);
352 if (ccb == NULL) {
353 printf(": out of ccbs\n");
354 twe_dispose(sc);
355 return 1;
356 }
357
358 ccb->ccb_xs = NULL;
359 ccb->ccb_data = cap;
360 ccb->ccb_length = TWE_SECTOR_SIZE;
361 ccb->ccb_state = TWE_CCB_READY;
362 cmd = ccb->ccb_cmd;
363 cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
364 cmd->cmd_op = TWE_CMD_GPARAM;
365 cmd->cmd_param.count = 1;
366
367 cap->table_id = TWE_PARAM_UI + i;
368 cap->param_id = 4;
369 cap->param_size = 4; /* 4 bytes */
370
371 lock = TWE_LOCK(sc);
372 error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
373 TWE_UNLOCK(sc, lock);
374 scsi_io_put(&sc->sc_iopool, ccb);
375 if (error) {
376 printf("%s: error fetching capacity for unit %d\n",
377 sc->sc_dev.dv_xname, i);
378 continue;
379 }
380
381 nunits++;
382 sc->sc_hdr[i].hd_present = 1;
383 sc->sc_hdr[i].hd_devtype = 0;
384 sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
385 TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d\n",
386 i, sc->sc_hdr[i].hd_size));
387 }
388
389 if (!nunits)
390 nunits++;
391
392 /* TODO: fetch & print cache params? */
393
394 saa.saa_adapter_softc = sc;
395 saa.saa_adapter = &twe_switch;
396 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
397 saa.saa_adapter_buswidth = TWE_MAX_UNITS;
398 saa.saa_luns = 8;
399 saa.saa_openings = TWE_MAXCMDS / nunits;
400 saa.saa_pool = &sc->sc_iopool;
401 saa.saa_quirks = saa.saa_flags = 0;
402 saa.saa_wwpn = saa.saa_wwnn = 0;
403
404 config_found(&sc->sc_dev, &saa, scsiprint);
405
406 kthread_create_deferred(twe_thread_create, sc);
407
408 return (0);
409 }
410
411 void
twe_thread_create(void * v)412 twe_thread_create(void *v)
413 {
414 struct twe_softc *sc = v;
415
416 if (kthread_create(twe_thread, sc, &sc->sc_thread,
417 sc->sc_dev.dv_xname)) {
418 /* TODO disable twe */
419 printf("%s: failed to create kernel thread, disabled\n",
420 sc->sc_dev.dv_xname);
421 return;
422 }
423
424 TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
425 bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
426 /*
427 * ack all before enable, cannot be done in one
428 * operation as it seems clear is not processed
429 * if enable is specified.
430 */
431 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
432 TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
433 TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
434 bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
435 /* enable interrupts */
436 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
437 TWE_CTRL_EINT | TWE_CTRL_ERDYI |
438 /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
439 }
440
441 void
twe_thread(void * v)442 twe_thread(void *v)
443 {
444 struct twe_softc *sc = v;
445 struct twe_ccb *ccb;
446 twe_lock_t lock;
447 u_int32_t status;
448 int err;
449
450 for (;;) {
451 lock = TWE_LOCK(sc);
452
453 while (!TAILQ_EMPTY(&sc->sc_done_ccb)) {
454 ccb = TAILQ_FIRST(&sc->sc_done_ccb);
455 TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link);
456 if ((err = twe_done(sc, ccb)))
457 printf("%s: done failed (%d)\n",
458 sc->sc_dev.dv_xname, err);
459 }
460
461 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
462 TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
463 status & TWE_STAT_FLAGS, TWE_STAT_BITS));
464 while (!(status & TWE_STAT_CQF) &&
465 !TAILQ_EMPTY(&sc->sc_ccb2q)) {
466
467 ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
468 TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
469
470 ccb->ccb_state = TWE_CCB_QUEUED;
471 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
472 bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
473 ccb->ccb_cmdpa);
474
475 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
476 TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
477 status & TWE_STAT_FLAGS, TWE_STAT_BITS));
478 }
479
480 if (!TAILQ_EMPTY(&sc->sc_ccb2q))
481 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
482 TWE_CTRL_ECMDI);
483
484 TWE_UNLOCK(sc, lock);
485 sc->sc_thread_on = 1;
486 tsleep_nsec(sc, PWAIT, "twespank", INFSLP);
487 }
488 }
489
490 int
twe_cmd(struct twe_ccb * ccb,int flags,int wait)491 twe_cmd(struct twe_ccb *ccb, int flags, int wait)
492 {
493 struct twe_softc *sc = ccb->ccb_sc;
494 bus_dmamap_t dmap;
495 struct twe_cmd *cmd;
496 struct twe_segs *sgp;
497 int error, i;
498
499 if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
500 TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
501 ccb->ccb_realdata = ccb->ccb_data;
502
503 error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
504 0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
505 BUS_DMA_NOWAIT);
506 if (error) {
507 TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
508 return (ENOMEM);
509 }
510
511 error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
512 ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
513 if (error) {
514 TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
515 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
516 return (ENOMEM);
517 }
518 bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
519 } else
520 ccb->ccb_realdata = NULL;
521
522 dmap = ccb->ccb_dmamap;
523 cmd = ccb->ccb_cmd;
524 cmd->cmd_status = 0;
525
526 if (ccb->ccb_data) {
527 error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
528 ccb->ccb_length, NULL, flags);
529 if (error) {
530 if (error == EFBIG)
531 printf("more than %d dma segs\n", TWE_MAXOFFSETS);
532 else
533 printf("error %d loading dma map\n", error);
534
535 if (ccb->ccb_realdata) {
536 bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
537 ccb->ccb_length);
538 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
539 ccb->ccb_2nseg);
540 }
541 return error;
542 }
543 /* load addresses into command */
544 switch (cmd->cmd_op) {
545 case TWE_CMD_GPARAM:
546 case TWE_CMD_SPARAM:
547 sgp = cmd->cmd_param.segs;
548 break;
549 case TWE_CMD_READ:
550 case TWE_CMD_WRITE:
551 sgp = cmd->cmd_io.segs;
552 break;
553 default:
554 /* no data transfer */
555 TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
556 cmd->cmd_op));
557 sgp = NULL;
558 break;
559 }
560 TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
561 if (sgp) {
562 /*
563 * we know that size is in the upper byte,
564 * and we do not worry about overflow
565 */
566 cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
567 bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
568 for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
569 sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
570 sgp->twes_len = htole32(dmap->dm_segs[i].ds_len);
571 TWE_DPRINTF(TWE_D_DMA, ("%lx[%lx] ",
572 dmap->dm_segs[i].ds_addr,
573 dmap->dm_segs[i].ds_len));
574 }
575 }
576 TWE_DPRINTF(TWE_D_DMA, ("> "));
577 bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
578 BUS_DMASYNC_PREWRITE);
579 }
580 bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
581 BUS_DMASYNC_PREWRITE);
582
583 if ((error = twe_start(ccb, wait))) {
584 bus_dmamap_unload(sc->dmat, dmap);
585 if (ccb->ccb_realdata) {
586 bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
587 ccb->ccb_length);
588 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
589 ccb->ccb_2nseg);
590 }
591 return (error);
592 }
593
594 return wait? twe_complete(ccb) : 0;
595 }
596
597 int
twe_start(struct twe_ccb * ccb,int wait)598 twe_start(struct twe_ccb *ccb, int wait)
599 {
600 struct twe_softc*sc = ccb->ccb_sc;
601 struct twe_cmd *cmd = ccb->ccb_cmd;
602 u_int32_t status;
603 int i;
604
605 cmd->cmd_op = htole16(cmd->cmd_op);
606
607 if (!wait) {
608
609 TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
610 ccb->ccb_state = TWE_CCB_PREQUEUED;
611 TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
612 wakeup(sc);
613 return 0;
614 }
615
616 for (i = 1000; i--; DELAY(10)) {
617
618 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
619 if (!(status & TWE_STAT_CQF))
620 break;
621 TWE_DPRINTF(TWE_D_CMD, ("twe_start stat=%b ",
622 status & TWE_STAT_FLAGS, TWE_STAT_BITS));
623 }
624
625 if (!(status & TWE_STAT_CQF)) {
626 bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
627 ccb->ccb_cmdpa);
628
629 TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
630 ccb->ccb_state = TWE_CCB_QUEUED;
631 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
632 return 0;
633
634 } else {
635
636 printf("%s: twe_start(%d) timed out\n",
637 sc->sc_dev.dv_xname, cmd->cmd_index);
638
639 return EPERM;
640 }
641 }
642
643 int
twe_complete(struct twe_ccb * ccb)644 twe_complete(struct twe_ccb *ccb)
645 {
646 struct twe_softc *sc = ccb->ccb_sc;
647 struct scsi_xfer *xs = ccb->ccb_xs;
648 int i;
649
650 for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
651 u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
652
653 /* TWE_DPRINTF(TWE_D_CMD, ("twe_intr stat=%b ",
654 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
655
656 while (!(status & TWE_STAT_RQE)) {
657 struct twe_ccb *ccb1;
658 u_int32_t ready;
659
660 ready = bus_space_read_4(sc->iot, sc->ioh,
661 TWE_READYQUEUE);
662
663 TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
664
665 ccb1 = &sc->sc_ccbs[TWE_READYID(ready)];
666 TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link);
667 ccb1->ccb_state = TWE_CCB_DONE;
668 if (!twe_done(sc, ccb1) && ccb1 == ccb) {
669 TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
670 return 0;
671 }
672
673 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
674 /* TWE_DPRINTF(TWE_D_CMD, ("twe_intr stat=%b ",
675 status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
676 }
677 }
678
679 return 1;
680 }
681
682 int
twe_done(struct twe_softc * sc,struct twe_ccb * ccb)683 twe_done(struct twe_softc *sc, struct twe_ccb *ccb)
684 {
685 struct twe_cmd *cmd = ccb->ccb_cmd;
686 struct scsi_xfer *xs = ccb->ccb_xs;
687 bus_dmamap_t dmap;
688 twe_lock_t lock;
689
690 TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index));
691
692 if (ccb->ccb_state != TWE_CCB_DONE) {
693 printf("%s: undone ccb %d ready\n",
694 sc->sc_dev.dv_xname, cmd->cmd_index);
695 return 1;
696 }
697
698 dmap = ccb->ccb_dmamap;
699 if (xs) {
700 if (xs->cmd.opcode != PREVENT_ALLOW &&
701 xs->cmd.opcode != SYNCHRONIZE_CACHE) {
702 bus_dmamap_sync(sc->dmat, dmap, 0,
703 dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
704 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
705 bus_dmamap_unload(sc->dmat, dmap);
706 }
707 } else {
708 switch (letoh16(cmd->cmd_op)) {
709 case TWE_CMD_GPARAM:
710 case TWE_CMD_READ:
711 bus_dmamap_sync(sc->dmat, dmap, 0,
712 dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
713 bus_dmamap_unload(sc->dmat, dmap);
714 break;
715 case TWE_CMD_SPARAM:
716 case TWE_CMD_WRITE:
717 bus_dmamap_sync(sc->dmat, dmap, 0,
718 dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
719 bus_dmamap_unload(sc->dmat, dmap);
720 break;
721 default:
722 /* no data */
723 break;
724 }
725 }
726
727 if (ccb->ccb_realdata) {
728 bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
729 bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
730 bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
731 }
732
733 lock = TWE_LOCK(sc);
734
735 if (xs) {
736 xs->resid = 0;
737 scsi_done(xs);
738 }
739 TWE_UNLOCK(sc, lock);
740
741 return 0;
742 }
743
744 void
twe_scsi_cmd(struct scsi_xfer * xs)745 twe_scsi_cmd(struct scsi_xfer *xs)
746 {
747 struct scsi_link *link = xs->sc_link;
748 struct twe_softc *sc = link->bus->sb_adapter_softc;
749 struct twe_ccb *ccb = xs->io;
750 struct twe_cmd *cmd;
751 struct scsi_inquiry_data inq;
752 struct scsi_sense_data sd;
753 struct scsi_read_cap_data rcd;
754 u_int8_t target = link->target;
755 u_int32_t blockno, blockcnt;
756 struct scsi_rw *rw;
757 struct scsi_rw_10 *rw10;
758 int error, op, flags, wait;
759 twe_lock_t lock;
760
761
762 if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
763 link->lun != 0) {
764 xs->error = XS_DRIVER_STUFFUP;
765 scsi_done(xs);
766 return;
767 }
768
769 TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
770
771 xs->error = XS_NOERROR;
772
773 switch (xs->cmd.opcode) {
774 case TEST_UNIT_READY:
775 case START_STOP:
776 #if 0
777 case VERIFY:
778 #endif
779 TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd.opcode,
780 target));
781 break;
782
783 case REQUEST_SENSE:
784 TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
785 bzero(&sd, sizeof sd);
786 sd.error_code = SSD_ERRCODE_CURRENT;
787 sd.segment = 0;
788 sd.flags = SKEY_NO_SENSE;
789 *(u_int32_t*)sd.info = htole32(0);
790 sd.extra_len = 0;
791 scsi_copy_internal_data(xs, &sd, sizeof(sd));
792 break;
793
794 case INQUIRY:
795 TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
796 sc->sc_hdr[target].hd_devtype));
797 bzero(&inq, sizeof inq);
798 inq.device =
799 (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
800 inq.dev_qual2 =
801 (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
802 inq.version = SCSI_REV_2;
803 inq.response_format = SID_SCSI2_RESPONSE;
804 inq.additional_length = SID_SCSI2_ALEN;
805 strlcpy(inq.vendor, "3WARE ", sizeof inq.vendor);
806 snprintf(inq.product, sizeof inq.product, "Host drive #%02d",
807 target);
808 strlcpy(inq.revision, " ", sizeof inq.revision);
809 scsi_copy_internal_data(xs, &inq, sizeof(inq));
810 break;
811
812 case READ_CAPACITY:
813 TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
814 bzero(&rcd, sizeof rcd);
815 _lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
816 _lto4b(TWE_SECTOR_SIZE, rcd.length);
817 scsi_copy_internal_data(xs, &rcd, sizeof(rcd));
818 break;
819
820 case PREVENT_ALLOW:
821 TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
822 scsi_done(xs);
823 return;
824
825 case READ_COMMAND:
826 case READ_10:
827 case WRITE_COMMAND:
828 case WRITE_10:
829 case SYNCHRONIZE_CACHE:
830 lock = TWE_LOCK(sc);
831
832 flags = 0;
833 if (xs->cmd.opcode == SYNCHRONIZE_CACHE) {
834 blockno = blockcnt = 0;
835 } else {
836 /* A read or write operation. */
837 if (xs->cmdlen == 6) {
838 rw = (struct scsi_rw *)&xs->cmd;
839 blockno = _3btol(rw->addr) &
840 (SRW_TOPADDR << 16 | 0xffff);
841 blockcnt = rw->length ? rw->length : 0x100;
842 } else {
843 rw10 = (struct scsi_rw_10 *)&xs->cmd;
844 blockno = _4btol(rw10->addr);
845 blockcnt = _2btol(rw10->length);
846 /* reflect DPO & FUA flags */
847 if (xs->cmd.opcode == WRITE_10 &&
848 rw10->byte2 & 0x18)
849 flags = TWE_FLAGS_CACHEDISABLE;
850 }
851 if (blockno >= sc->sc_hdr[target].hd_size ||
852 blockno + blockcnt > sc->sc_hdr[target].hd_size) {
853 printf("%s: out of bounds %u-%u >= %u\n",
854 sc->sc_dev.dv_xname, blockno, blockcnt,
855 sc->sc_hdr[target].hd_size);
856 xs->error = XS_DRIVER_STUFFUP;
857 scsi_done(xs);
858 TWE_UNLOCK(sc, lock);
859 return;
860 }
861 }
862
863 switch (xs->cmd.opcode) {
864 case READ_COMMAND: op = TWE_CMD_READ; break;
865 case READ_10: op = TWE_CMD_READ; break;
866 case WRITE_COMMAND: op = TWE_CMD_WRITE; break;
867 case WRITE_10: op = TWE_CMD_WRITE; break;
868 default: op = TWE_CMD_NOP; break;
869 }
870
871 ccb->ccb_xs = xs;
872 ccb->ccb_data = xs->data;
873 ccb->ccb_length = xs->datalen;
874 ccb->ccb_state = TWE_CCB_READY;
875 cmd = ccb->ccb_cmd;
876 cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
877 cmd->cmd_op = op;
878 cmd->cmd_flags = flags;
879 cmd->cmd_io.count = htole16(blockcnt);
880 cmd->cmd_io.lba = htole32(blockno);
881 wait = xs->flags & SCSI_POLL;
882 if (!sc->sc_thread_on)
883 wait |= SCSI_POLL;
884
885 if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
886 BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) {
887
888 TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
889 xs->error = XS_DRIVER_STUFFUP;
890 scsi_done(xs);
891 }
892
893 TWE_UNLOCK(sc, lock);
894 return;
895
896 default:
897 TWE_DPRINTF(TWE_D_CMD, ("unsupported scsi command %#x tgt %d ",
898 xs->cmd.opcode, target));
899 xs->error = XS_DRIVER_STUFFUP;
900 }
901
902 scsi_done(xs);
903 }
904
905 int
twe_intr(void * v)906 twe_intr(void *v)
907 {
908 struct twe_softc *sc = v;
909 struct twe_ccb *ccb;
910 u_int32_t status;
911 int rv = 0;
912
913 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
914 TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
915 status & TWE_STAT_FLAGS, TWE_STAT_BITS));
916 #if 0
917 if (status & TWE_STAT_HOSTI) {
918
919 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
920 TWE_CTRL_CHOSTI);
921 }
922 #endif
923
924 if (status & TWE_STAT_RDYI) {
925
926 while (!(status & TWE_STAT_RQE)) {
927
928 u_int32_t ready;
929
930 /*
931 * it seems that reading ready queue
932 * we get all the status bits in each ready word.
933 * i wonder if it's legal to use those for
934 * status and avoid extra read below
935 */
936 ready = bus_space_read_4(sc->iot, sc->ioh,
937 TWE_READYQUEUE);
938
939 ccb = &sc->sc_ccbs[TWE_READYID(ready)];
940 TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
941 ccb->ccb_state = TWE_CCB_DONE;
942 TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link);
943 rv++;
944
945 status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
946 TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
947 status & TWE_STAT_FLAGS, TWE_STAT_BITS));
948 }
949 }
950
951 if (status & TWE_STAT_CMDI) {
952 rv++;
953 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
954 TWE_CTRL_MCMDI);
955 }
956
957 if (rv)
958 wakeup(sc);
959
960 if (status & TWE_STAT_ATTNI) {
961 /*
962 * we know no attentions of interest right now.
963 * one of those would be mirror degradation i think.
964 * or, what else exists in there?
965 * maybe 3ware can answer that?
966 */
967 bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
968 TWE_CTRL_CATTNI);
969
970 scsi_ioh_add(&sc->sc_aen);
971 }
972
973 return rv;
974 }
975
976 void
twe_aen(void * cookie,void * io)977 twe_aen(void *cookie, void *io)
978 {
979 struct twe_softc *sc = cookie;
980 struct twe_ccb *ccb = io;
981 struct twe_cmd *cmd = ccb->ccb_cmd;
982
983 u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
984 struct twe_param *pb = (void *) (((u_long)param_buf +
985 TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
986 u_int16_t aen;
987
988 twe_lock_t lock;
989 int error;
990
991 ccb->ccb_xs = NULL;
992 ccb->ccb_data = pb;
993 ccb->ccb_length = TWE_SECTOR_SIZE;
994 ccb->ccb_state = TWE_CCB_READY;
995 cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
996 cmd->cmd_op = TWE_CMD_GPARAM;
997 cmd->cmd_flags = 0;
998 cmd->cmd_param.count = 1;
999
1000 pb->table_id = TWE_PARAM_AEN;
1001 pb->param_id = 2;
1002 pb->param_size = 2;
1003
1004 lock = TWE_LOCK(sc);
1005 error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
1006 TWE_UNLOCK(sc, lock);
1007 scsi_io_put(&sc->sc_iopool, ccb);
1008
1009 if (error) {
1010 printf("%s: error draining attention queue\n",
1011 sc->sc_dev.dv_xname);
1012 return;
1013 }
1014
1015 aen = *(u_int16_t *)pb->data;
1016 if (aen != TWE_AEN_QEMPTY)
1017 scsi_ioh_add(&sc->sc_aen);
1018 }
1019