xref: /openbsd/sys/dev/ic/twe.c (revision 905646f0)
1 /*	$OpenBSD: twe.c,v 1.64 2020/10/15 00:01:24 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2002 Michael Shalayeff.  All rights reserved.
5  *
6  * The SCSI emulation layer is derived from gdt(4) driver,
7  * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* #define	TWE_DEBUG */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/buf.h>
36 #include <sys/device.h>
37 #include <sys/malloc.h>
38 #include <sys/kthread.h>
39 
40 #include <machine/bus.h>
41 
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_disk.h>
44 #include <scsi/scsiconf.h>
45 
46 #include <dev/ic/twereg.h>
47 #include <dev/ic/twevar.h>
48 
49 #ifdef TWE_DEBUG
50 #define	TWE_DPRINTF(m,a)	if (twe_debug & (m)) printf a
51 #define	TWE_D_CMD	0x0001
52 #define	TWE_D_INTR	0x0002
53 #define	TWE_D_MISC	0x0004
54 #define	TWE_D_DMA	0x0008
55 #define	TWE_D_AEN	0x0010
56 int twe_debug = 0;
57 #else
58 #define	TWE_DPRINTF(m,a)	/* m, a */
59 #endif
60 
61 struct cfdriver twe_cd = {
62 	NULL, "twe", DV_DULL
63 };
64 
65 void	twe_scsi_cmd(struct scsi_xfer *);
66 
67 struct scsi_adapter twe_switch = {
68 	twe_scsi_cmd, NULL, NULL, NULL, NULL
69 };
70 
71 void *twe_get_ccb(void *);
72 void twe_put_ccb(void *, void *);
73 void twe_dispose(struct twe_softc *sc);
74 int  twe_cmd(struct twe_ccb *ccb, int flags, int wait);
75 int  twe_start(struct twe_ccb *ccb, int wait);
76 int  twe_complete(struct twe_ccb *ccb);
77 int  twe_done(struct twe_softc *sc, struct twe_ccb *ccb);
78 void twe_thread_create(void *v);
79 void twe_thread(void *v);
80 void twe_aen(void *, void *);
81 
82 void *
83 twe_get_ccb(void *xsc)
84 {
85 	struct twe_softc *sc = xsc;
86 	struct twe_ccb *ccb;
87 
88 	mtx_enter(&sc->sc_ccb_mtx);
89 	ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
90 	if (ccb != NULL)
91 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
92 	mtx_leave(&sc->sc_ccb_mtx);
93 
94 	return (ccb);
95 }
96 
97 void
98 twe_put_ccb(void *xsc, void *xccb)
99 {
100 	struct twe_softc *sc = xsc;
101 	struct twe_ccb *ccb = xccb;
102 
103 	ccb->ccb_state = TWE_CCB_FREE;
104 	mtx_enter(&sc->sc_ccb_mtx);
105 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
106 	mtx_leave(&sc->sc_ccb_mtx);
107 }
108 
109 void
110 twe_dispose(sc)
111 	struct twe_softc *sc;
112 {
113 	register struct twe_ccb *ccb;
114 	if (sc->sc_cmdmap != NULL) {
115 		bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
116 		/* traverse the ccbs and destroy the maps */
117 		for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
118 			if (ccb->ccb_dmamap)
119 				bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
120 	}
121 	bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
122 	    sizeof(struct twe_cmd) * TWE_MAXCMDS);
123 	bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
124 }
125 
126 int
127 twe_attach(sc)
128 	struct twe_softc *sc;
129 {
130 	struct scsibus_attach_args saa;
131 	/* this includes a buffer for drive config req, and a capacity req */
132 	u_int8_t	param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
133 	struct twe_param *pb = (void *)
134 	    (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
135 	struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
136 	struct twe_ccb	*ccb;
137 	struct twe_cmd	*cmd;
138 	u_int32_t	status;
139 	int		error, i, retry, nunits, nseg;
140 	const char	*errstr;
141 	twe_lock_t	lock;
142 	paddr_t		pa;
143 
144 	error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
145 	    PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
146 	if (error) {
147 		printf(": cannot allocate commands (%d)\n", error);
148 		return (1);
149 	}
150 
151 	error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
152 	    sizeof(struct twe_cmd) * TWE_MAXCMDS,
153 	    (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
154 	if (error) {
155 		printf(": cannot map commands (%d)\n", error);
156 		bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
157 		return (1);
158 	}
159 
160 	error = bus_dmamap_create(sc->dmat,
161 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
162 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
163 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
164 	if (error) {
165 		printf(": cannot create ccb cmd dmamap (%d)\n", error);
166 		twe_dispose(sc);
167 		return (1);
168 	}
169 	error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
170 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
171 	if (error) {
172 		printf(": cannot load command dma map (%d)\n", error);
173 		twe_dispose(sc);
174 		return (1);
175 	}
176 
177 	TAILQ_INIT(&sc->sc_ccb2q);
178 	TAILQ_INIT(&sc->sc_ccbq);
179 	TAILQ_INIT(&sc->sc_free_ccb);
180 	TAILQ_INIT(&sc->sc_done_ccb);
181 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
182 	scsi_iopool_init(&sc->sc_iopool, sc, twe_get_ccb, twe_put_ccb);
183 
184 	scsi_ioh_set(&sc->sc_aen, &sc->sc_iopool, twe_aen, sc);
185 
186 	pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
187 	    sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
188 	for (cmd = (struct twe_cmd *)sc->sc_cmds + TWE_MAXCMDS - 1;
189 	     cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
190 
191 		cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
192 		ccb = &sc->sc_ccbs[cmd->cmd_index];
193 		error = bus_dmamap_create(sc->dmat,
194 		    TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
195 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
196 		if (error) {
197 			printf(": cannot create ccb dmamap (%d)\n", error);
198 			twe_dispose(sc);
199 			return (1);
200 		}
201 		ccb->ccb_sc = sc;
202 		ccb->ccb_cmd = cmd;
203 		ccb->ccb_cmdpa = pa;
204 		ccb->ccb_state = TWE_CCB_FREE;
205 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
206 	}
207 
208 	for (errstr = NULL, retry = 3; retry--; ) {
209 		int		veseen_srst;
210 		u_int16_t	aen;
211 
212 		if (errstr)
213 			TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
214 
215 		for (i = 350000; i--; DELAY(100)) {
216 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
217 			if (status & TWE_STAT_CPURDY)
218 				break;
219 		}
220 
221 		if (!(status & TWE_STAT_CPURDY)) {
222 			errstr = ": card CPU is not ready\n";
223 			continue;
224 		}
225 
226 		/* soft reset, disable ints */
227 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
228 		    TWE_CTRL_SRST |
229 		    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
230 		    TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
231 		    TWE_CTRL_MINT);
232 
233 		for (i = 350000; i--; DELAY(100)) {
234 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
235 			if (status & TWE_STAT_ATTNI)
236 				break;
237 		}
238 
239 		if (!(status & TWE_STAT_ATTNI)) {
240 			errstr = ": cannot get card's attention\n";
241 			continue;
242 		}
243 
244 		/* drain aen queue */
245 		for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
246 
247 			ccb = scsi_io_get(&sc->sc_iopool, 0);
248 			if (ccb == NULL) {
249 				errstr = ": out of ccbs\n";
250 				break;
251 			}
252 
253 			ccb->ccb_xs = NULL;
254 			ccb->ccb_data = pb;
255 			ccb->ccb_length = TWE_SECTOR_SIZE;
256 			ccb->ccb_state = TWE_CCB_READY;
257 			cmd = ccb->ccb_cmd;
258 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
259 			cmd->cmd_op = TWE_CMD_GPARAM;
260 			cmd->cmd_param.count = 1;
261 
262 			pb->table_id = TWE_PARAM_AEN;
263 			pb->param_id = 2;
264 			pb->param_size = 2;
265 
266 			error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
267 			scsi_io_put(&sc->sc_iopool, ccb);
268 			if (error) {
269 				errstr = ": error draining attention queue\n";
270 				break;
271 			}
272 
273 			aen = *(u_int16_t *)pb->data;
274 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
275 			if (aen == TWE_AEN_SRST)
276 				veseen_srst++;
277 		}
278 
279 		if (!veseen_srst) {
280 			errstr = ": we don't get it\n";
281 			continue;
282 		}
283 
284 		if (status & TWE_STAT_CPUERR) {
285 			errstr = ": card CPU error detected\n";
286 			continue;
287 		}
288 
289 		if (status & TWE_STAT_PCIPAR) {
290 			errstr = ": PCI parity error detected\n";
291 			continue;
292 		}
293 
294 		if (status & TWE_STAT_QUEUEE ) {
295 			errstr = ": queuing error detected\n";
296 			continue;
297 		}
298 
299 		if (status & TWE_STAT_PCIABR) {
300 			errstr = ": PCI abort\n";
301 			continue;
302 		}
303 
304 		while (!(status & TWE_STAT_RQE)) {
305 			bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
306 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
307 		}
308 
309 		break;
310 	}
311 
312 	if (retry < 0) {
313 		printf("%s", errstr);
314 		twe_dispose(sc);
315 		return 1;
316 	}
317 
318 	ccb = scsi_io_get(&sc->sc_iopool, 0);
319 	if (ccb == NULL) {
320 		printf(": out of ccbs\n");
321 		twe_dispose(sc);
322 		return 1;
323 	}
324 
325 	ccb->ccb_xs = NULL;
326 	ccb->ccb_data = pb;
327 	ccb->ccb_length = TWE_SECTOR_SIZE;
328 	ccb->ccb_state = TWE_CCB_READY;
329 	cmd = ccb->ccb_cmd;
330 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
331 	cmd->cmd_op = TWE_CMD_GPARAM;
332 	cmd->cmd_param.count = 1;
333 
334 	pb->table_id = TWE_PARAM_UC;
335 	pb->param_id = TWE_PARAM_UC;
336 	pb->param_size = TWE_MAX_UNITS;
337 
338 	error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
339 	scsi_io_put(&sc->sc_iopool, ccb);
340 	if (error) {
341 		printf(": failed to fetch unit parameters\n");
342 		twe_dispose(sc);
343 		return 1;
344 	}
345 
346 	/* we are assuming last read status was good */
347 	printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
348 
349 	for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
350 		if (pb->data[i] == 0)
351 			continue;
352 
353 		ccb = scsi_io_get(&sc->sc_iopool, 0);
354 		if (ccb == NULL) {
355 			printf(": out of ccbs\n");
356 			twe_dispose(sc);
357 			return 1;
358 		}
359 
360 		ccb->ccb_xs = NULL;
361 		ccb->ccb_data = cap;
362 		ccb->ccb_length = TWE_SECTOR_SIZE;
363 		ccb->ccb_state = TWE_CCB_READY;
364 		cmd = ccb->ccb_cmd;
365 		cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
366 		cmd->cmd_op = TWE_CMD_GPARAM;
367 		cmd->cmd_param.count = 1;
368 
369 		cap->table_id = TWE_PARAM_UI + i;
370 		cap->param_id = 4;
371 		cap->param_size = 4;	/* 4 bytes */
372 
373 		lock = TWE_LOCK(sc);
374 		twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
375 		TWE_UNLOCK(sc, lock);
376 		scsi_io_put(&sc->sc_iopool, ccb);
377 		if (error) {
378 			printf("%s: error fetching capacity for unit %d\n",
379 			    sc->sc_dev.dv_xname, i);
380 			continue;
381 		}
382 
383 		nunits++;
384 		sc->sc_hdr[i].hd_present = 1;
385 		sc->sc_hdr[i].hd_devtype = 0;
386 		sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
387 		TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d\n",
388 		    i, sc->sc_hdr[i].hd_size));
389 	}
390 
391 	if (!nunits)
392 		nunits++;
393 
394 	/* TODO: fetch & print cache params? */
395 
396 	saa.saa_adapter_softc = sc;
397 	saa.saa_adapter = &twe_switch;
398 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
399 	saa.saa_adapter_buswidth = TWE_MAX_UNITS;
400 	saa.saa_luns = 8;
401 	saa.saa_openings = TWE_MAXCMDS / nunits;
402 	saa.saa_pool = &sc->sc_iopool;
403 	saa.saa_quirks = saa.saa_flags = 0;
404 	saa.saa_wwpn = saa.saa_wwnn = 0;
405 
406 	config_found(&sc->sc_dev, &saa, scsiprint);
407 
408 	kthread_create_deferred(twe_thread_create, sc);
409 
410 	return (0);
411 }
412 
413 void
414 twe_thread_create(void *v)
415 {
416 	struct twe_softc *sc = v;
417 
418 	if (kthread_create(twe_thread, sc, &sc->sc_thread,
419 	    sc->sc_dev.dv_xname)) {
420 		/* TODO disable twe */
421 		printf("%s: failed to create kernel thread, disabled\n",
422 		    sc->sc_dev.dv_xname);
423 		return;
424 	}
425 
426 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
427 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
428 	/*
429 	 * ack all before enable, cannot be done in one
430 	 * operation as it seems clear is not processed
431 	 * if enable is specified.
432 	 */
433 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
434 	    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
435 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
436 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
437 	/* enable interrupts */
438 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
439 	    TWE_CTRL_EINT | TWE_CTRL_ERDYI |
440 	    /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
441 }
442 
443 void
444 twe_thread(v)
445 	void *v;
446 {
447 	struct twe_softc *sc = v;
448 	struct twe_ccb *ccb;
449 	twe_lock_t lock;
450 	u_int32_t status;
451 	int err;
452 
453 	for (;;) {
454 		lock = TWE_LOCK(sc);
455 
456 		while (!TAILQ_EMPTY(&sc->sc_done_ccb)) {
457 			ccb = TAILQ_FIRST(&sc->sc_done_ccb);
458 			TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link);
459 			if ((err = twe_done(sc, ccb)))
460 				printf("%s: done failed (%d)\n",
461 				    sc->sc_dev.dv_xname, err);
462 		}
463 
464 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
465 		TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
466 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
467 		while (!(status & TWE_STAT_CQF) &&
468 		    !TAILQ_EMPTY(&sc->sc_ccb2q)) {
469 
470 			ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
471 			TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
472 
473 			ccb->ccb_state = TWE_CCB_QUEUED;
474 			TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
475 			bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
476 			    ccb->ccb_cmdpa);
477 
478 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
479 			TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
480 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
481 		}
482 
483 		if (!TAILQ_EMPTY(&sc->sc_ccb2q))
484 			bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
485 			    TWE_CTRL_ECMDI);
486 
487 		TWE_UNLOCK(sc, lock);
488 		sc->sc_thread_on = 1;
489 		tsleep_nsec(sc, PWAIT, "twespank", INFSLP);
490 	}
491 }
492 
493 int
494 twe_cmd(ccb, flags, wait)
495 	struct twe_ccb *ccb;
496 	int flags, wait;
497 {
498 	struct twe_softc *sc = ccb->ccb_sc;
499 	bus_dmamap_t dmap;
500 	struct twe_cmd *cmd;
501 	struct twe_segs *sgp;
502 	int error, i;
503 
504 	if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
505 		TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
506 		ccb->ccb_realdata = ccb->ccb_data;
507 
508 		error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
509 		    0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
510 		    BUS_DMA_NOWAIT);
511 		if (error) {
512 			TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
513 			return (ENOMEM);
514 		}
515 
516 		error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
517 		    ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
518 		if (error) {
519 			TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
520 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
521 			return (ENOMEM);
522 		}
523 		bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
524 	} else
525 		ccb->ccb_realdata = NULL;
526 
527 	dmap = ccb->ccb_dmamap;
528 	cmd = ccb->ccb_cmd;
529 	cmd->cmd_status = 0;
530 
531 	if (ccb->ccb_data) {
532 		error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
533 		    ccb->ccb_length, NULL, flags);
534 		if (error) {
535 			if (error == EFBIG)
536 				printf("more than %d dma segs\n", TWE_MAXOFFSETS);
537 			else
538 				printf("error %d loading dma map\n", error);
539 
540 			if (ccb->ccb_realdata) {
541 				bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
542 				    ccb->ccb_length);
543 				bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
544 				    ccb->ccb_2nseg);
545 			}
546 			return error;
547 		}
548 		/* load addresses into command */
549 		switch (cmd->cmd_op) {
550 		case TWE_CMD_GPARAM:
551 		case TWE_CMD_SPARAM:
552 			sgp = cmd->cmd_param.segs;
553 			break;
554 		case TWE_CMD_READ:
555 		case TWE_CMD_WRITE:
556 			sgp = cmd->cmd_io.segs;
557 			break;
558 		default:
559 			/* no data transfer */
560 			TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
561 			    cmd->cmd_op));
562 			sgp = NULL;
563 			break;
564 		}
565 		TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
566 		if (sgp) {
567 			/*
568 			 * we know that size is in the upper byte,
569 			 * and we do not worry about overflow
570 			 */
571 			cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
572 			bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
573 			for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
574 				sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
575 				sgp->twes_len  = htole32(dmap->dm_segs[i].ds_len);
576 				TWE_DPRINTF(TWE_D_DMA, ("%lx[%lx] ",
577 				    dmap->dm_segs[i].ds_addr,
578 				    dmap->dm_segs[i].ds_len));
579 			}
580 		}
581 		TWE_DPRINTF(TWE_D_DMA, ("> "));
582 		bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
583 		    BUS_DMASYNC_PREWRITE);
584 	}
585 	bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
586 	    BUS_DMASYNC_PREWRITE);
587 
588 	if ((error = twe_start(ccb, wait))) {
589 		bus_dmamap_unload(sc->dmat, dmap);
590 		if (ccb->ccb_realdata) {
591 			bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
592 			    ccb->ccb_length);
593 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
594 			    ccb->ccb_2nseg);
595 		}
596 		return (error);
597 	}
598 
599 	return wait? twe_complete(ccb) : 0;
600 }
601 
602 int
603 twe_start(ccb, wait)
604 	struct twe_ccb *ccb;
605 	int wait;
606 {
607 	struct twe_softc*sc = ccb->ccb_sc;
608 	struct twe_cmd	*cmd = ccb->ccb_cmd;
609 	u_int32_t	status;
610 	int i;
611 
612 	cmd->cmd_op = htole16(cmd->cmd_op);
613 
614 	if (!wait) {
615 
616 		TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
617 		ccb->ccb_state = TWE_CCB_PREQUEUED;
618 		TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
619 		wakeup(sc);
620 		return 0;
621 	}
622 
623 	for (i = 1000; i--; DELAY(10)) {
624 
625 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
626 		if (!(status & TWE_STAT_CQF))
627 			break;
628 		TWE_DPRINTF(TWE_D_CMD,  ("twe_start stat=%b ",
629 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
630 	}
631 
632 	if (!(status & TWE_STAT_CQF)) {
633 		bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
634 		    ccb->ccb_cmdpa);
635 
636 		TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
637 		ccb->ccb_state = TWE_CCB_QUEUED;
638 		TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
639 		return 0;
640 
641 	} else {
642 
643 		printf("%s: twe_start(%d) timed out\n",
644 		    sc->sc_dev.dv_xname, cmd->cmd_index);
645 
646 		return EPERM;
647 	}
648 }
649 
650 int
651 twe_complete(ccb)
652 	struct twe_ccb *ccb;
653 {
654 	struct twe_softc *sc = ccb->ccb_sc;
655 	struct scsi_xfer *xs = ccb->ccb_xs;
656 	int i;
657 
658 	for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
659 		u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
660 
661 		/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
662 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
663 
664 		while (!(status & TWE_STAT_RQE)) {
665 			struct twe_ccb *ccb1;
666 			u_int32_t ready;
667 
668 			ready = bus_space_read_4(sc->iot, sc->ioh,
669 			    TWE_READYQUEUE);
670 
671 			TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
672 
673 			ccb1 = &sc->sc_ccbs[TWE_READYID(ready)];
674 			TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link);
675 			ccb1->ccb_state = TWE_CCB_DONE;
676 			if (!twe_done(sc, ccb1) && ccb1 == ccb) {
677 				TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
678 				return 0;
679 			}
680 
681 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
682 			/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
683 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
684 		}
685 	}
686 
687 	return 1;
688 }
689 
690 int
691 twe_done(sc, ccb)
692 	struct twe_softc *sc;
693 	struct twe_ccb *ccb;
694 {
695 	struct twe_cmd *cmd = ccb->ccb_cmd;
696 	struct scsi_xfer *xs = ccb->ccb_xs;
697 	bus_dmamap_t	dmap;
698 	twe_lock_t	lock;
699 
700 	TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index));
701 
702 	if (ccb->ccb_state != TWE_CCB_DONE) {
703 		printf("%s: undone ccb %d ready\n",
704 		     sc->sc_dev.dv_xname, cmd->cmd_index);
705 		return 1;
706 	}
707 
708 	dmap = ccb->ccb_dmamap;
709 	if (xs) {
710 		if (xs->cmd.opcode != PREVENT_ALLOW &&
711 		    xs->cmd.opcode != SYNCHRONIZE_CACHE) {
712 			bus_dmamap_sync(sc->dmat, dmap, 0,
713 			    dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
714 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
715 			bus_dmamap_unload(sc->dmat, dmap);
716 		}
717 	} else {
718 		switch (letoh16(cmd->cmd_op)) {
719 		case TWE_CMD_GPARAM:
720 		case TWE_CMD_READ:
721 			bus_dmamap_sync(sc->dmat, dmap, 0,
722 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
723 			bus_dmamap_unload(sc->dmat, dmap);
724 			break;
725 		case TWE_CMD_SPARAM:
726 		case TWE_CMD_WRITE:
727 			bus_dmamap_sync(sc->dmat, dmap, 0,
728 			    dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
729 			bus_dmamap_unload(sc->dmat, dmap);
730 			break;
731 		default:
732 			/* no data */
733 			break;
734 		}
735 	}
736 
737 	if (ccb->ccb_realdata) {
738 		bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
739 		bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
740 		bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
741 	}
742 
743 	lock = TWE_LOCK(sc);
744 
745 	if (xs) {
746 		xs->resid = 0;
747 		scsi_done(xs);
748 	}
749 	TWE_UNLOCK(sc, lock);
750 
751 	return 0;
752 }
753 
754 void
755 twe_scsi_cmd(xs)
756 	struct scsi_xfer *xs;
757 {
758 	struct scsi_link *link = xs->sc_link;
759 	struct twe_softc *sc = link->bus->sb_adapter_softc;
760 	struct twe_ccb *ccb = xs->io;
761 	struct twe_cmd *cmd;
762 	struct scsi_inquiry_data inq;
763 	struct scsi_sense_data sd;
764 	struct scsi_read_cap_data rcd;
765 	u_int8_t target = link->target;
766 	u_int32_t blockno, blockcnt;
767 	struct scsi_rw *rw;
768 	struct scsi_rw_10 *rw10;
769 	int error, op, flags, wait;
770 	twe_lock_t lock;
771 
772 
773 	if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
774 	    link->lun != 0) {
775 		xs->error = XS_DRIVER_STUFFUP;
776 		scsi_done(xs);
777 		return;
778 	}
779 
780 	TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
781 
782 	xs->error = XS_NOERROR;
783 
784 	switch (xs->cmd.opcode) {
785 	case TEST_UNIT_READY:
786 	case START_STOP:
787 #if 0
788 	case VERIFY:
789 #endif
790 		TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd.opcode,
791 		    target));
792 		break;
793 
794 	case REQUEST_SENSE:
795 		TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
796 		bzero(&sd, sizeof sd);
797 		sd.error_code = SSD_ERRCODE_CURRENT;
798 		sd.segment = 0;
799 		sd.flags = SKEY_NO_SENSE;
800 		*(u_int32_t*)sd.info = htole32(0);
801 		sd.extra_len = 0;
802 		scsi_copy_internal_data(xs, &sd, sizeof(sd));
803 		break;
804 
805 	case INQUIRY:
806 		TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
807 		    sc->sc_hdr[target].hd_devtype));
808 		bzero(&inq, sizeof inq);
809 		inq.device =
810 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
811 		inq.dev_qual2 =
812 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
813 		inq.version = SCSI_REV_2;
814 		inq.response_format = SID_SCSI2_RESPONSE;
815 		inq.additional_length = SID_SCSI2_ALEN;
816 		strlcpy(inq.vendor, "3WARE  ", sizeof inq.vendor);
817 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
818 		    target);
819 		strlcpy(inq.revision, "   ", sizeof inq.revision);
820 		scsi_copy_internal_data(xs, &inq, sizeof(inq));
821 		break;
822 
823 	case READ_CAPACITY:
824 		TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
825 		bzero(&rcd, sizeof rcd);
826 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
827 		_lto4b(TWE_SECTOR_SIZE, rcd.length);
828 		scsi_copy_internal_data(xs, &rcd, sizeof(rcd));
829 		break;
830 
831 	case PREVENT_ALLOW:
832 		TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
833 		scsi_done(xs);
834 		return;
835 
836 	case READ_COMMAND:
837 	case READ_10:
838 	case WRITE_COMMAND:
839 	case WRITE_10:
840 	case SYNCHRONIZE_CACHE:
841 		lock = TWE_LOCK(sc);
842 
843 		flags = 0;
844 		if (xs->cmd.opcode == SYNCHRONIZE_CACHE) {
845 			blockno = blockcnt = 0;
846 		} else {
847 			/* A read or write operation. */
848 			if (xs->cmdlen == 6) {
849 				rw = (struct scsi_rw *)&xs->cmd;
850 				blockno = _3btol(rw->addr) &
851 				    (SRW_TOPADDR << 16 | 0xffff);
852 				blockcnt = rw->length ? rw->length : 0x100;
853 			} else {
854 				rw10 = (struct scsi_rw_10 *)&xs->cmd;
855 				blockno = _4btol(rw10->addr);
856 				blockcnt = _2btol(rw10->length);
857 				/* reflect DPO & FUA flags */
858 				if (xs->cmd.opcode == WRITE_10 &&
859 				    rw10->byte2 & 0x18)
860 					flags = TWE_FLAGS_CACHEDISABLE;
861 			}
862 			if (blockno >= sc->sc_hdr[target].hd_size ||
863 			    blockno + blockcnt > sc->sc_hdr[target].hd_size) {
864 				printf("%s: out of bounds %u-%u >= %u\n",
865 				    sc->sc_dev.dv_xname, blockno, blockcnt,
866 				    sc->sc_hdr[target].hd_size);
867 				xs->error = XS_DRIVER_STUFFUP;
868 				scsi_done(xs);
869 				TWE_UNLOCK(sc, lock);
870 				return;
871 			}
872 		}
873 
874 		switch (xs->cmd.opcode) {
875 		case READ_COMMAND:	op = TWE_CMD_READ;	break;
876 		case READ_10:		op = TWE_CMD_READ;	break;
877 		case WRITE_COMMAND:	op = TWE_CMD_WRITE;	break;
878 		case WRITE_10:		op = TWE_CMD_WRITE;	break;
879 		default:		op = TWE_CMD_NOP;	break;
880 		}
881 
882 		ccb->ccb_xs = xs;
883 		ccb->ccb_data = xs->data;
884 		ccb->ccb_length = xs->datalen;
885 		ccb->ccb_state = TWE_CCB_READY;
886 		cmd = ccb->ccb_cmd;
887 		cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
888 		cmd->cmd_op = op;
889 		cmd->cmd_flags = flags;
890 		cmd->cmd_io.count = htole16(blockcnt);
891 		cmd->cmd_io.lba = htole32(blockno);
892 		wait = xs->flags & SCSI_POLL;
893 		if (!sc->sc_thread_on)
894 			wait |= SCSI_POLL;
895 
896 		if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
897 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) {
898 
899 			TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
900 			xs->error = XS_DRIVER_STUFFUP;
901 			scsi_done(xs);
902 		}
903 
904 		TWE_UNLOCK(sc, lock);
905 		return;
906 
907 	default:
908 		TWE_DPRINTF(TWE_D_CMD, ("unsupported scsi command %#x tgt %d ",
909 		    xs->cmd.opcode, target));
910 		xs->error = XS_DRIVER_STUFFUP;
911 	}
912 
913 	scsi_done(xs);
914 }
915 
916 int
917 twe_intr(v)
918 	void *v;
919 {
920 	struct twe_softc *sc = v;
921 	struct twe_ccb	*ccb;
922 	u_int32_t	status;
923 	int		rv = 0;
924 
925 	status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
926 	TWE_DPRINTF(TWE_D_INTR,  ("twe_intr stat=%b ",
927 	    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
928 #if 0
929 	if (status & TWE_STAT_HOSTI) {
930 
931 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
932 		    TWE_CTRL_CHOSTI);
933 	}
934 #endif
935 
936 	if (status & TWE_STAT_RDYI) {
937 
938 		while (!(status & TWE_STAT_RQE)) {
939 
940 			u_int32_t ready;
941 
942 			/*
943 			 * it seems that reading ready queue
944 			 * we get all the status bits in each ready word.
945 			 * i wonder if it's legal to use those for
946 			 * status and avoid extra read below
947 			 */
948 			ready = bus_space_read_4(sc->iot, sc->ioh,
949 			    TWE_READYQUEUE);
950 
951 			ccb = &sc->sc_ccbs[TWE_READYID(ready)];
952 			TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
953 			ccb->ccb_state = TWE_CCB_DONE;
954 			TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link);
955 			rv++;
956 
957 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
958 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
959 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
960 		}
961 	}
962 
963 	if (status & TWE_STAT_CMDI) {
964 		rv++;
965 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
966 		    TWE_CTRL_MCMDI);
967 	}
968 
969 	if (rv)
970 		wakeup(sc);
971 
972 	if (status & TWE_STAT_ATTNI) {
973 		/*
974 		 * we know no attentions of interest right now.
975 		 * one of those would be mirror degradation i think.
976 		 * or, what else exists in there?
977 		 * maybe 3ware can answer that?
978 		 */
979 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
980 		    TWE_CTRL_CATTNI);
981 
982 		scsi_ioh_add(&sc->sc_aen);
983 	}
984 
985 	return rv;
986 }
987 
988 void
989 twe_aen(void *cookie, void *io)
990 {
991 	struct twe_softc *sc = cookie;
992 	struct twe_ccb *ccb = io;
993 	struct twe_cmd *cmd = ccb->ccb_cmd;
994 
995 	u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
996 	struct twe_param *pb = (void *) (((u_long)param_buf +
997 	    TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
998 	u_int16_t aen;
999 
1000 	twe_lock_t lock;
1001 	int error;
1002 
1003 	ccb->ccb_xs = NULL;
1004 	ccb->ccb_data = pb;
1005 	ccb->ccb_length = TWE_SECTOR_SIZE;
1006 	ccb->ccb_state = TWE_CCB_READY;
1007 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1008 	cmd->cmd_op = TWE_CMD_GPARAM;
1009 	cmd->cmd_flags = 0;
1010 	cmd->cmd_param.count = 1;
1011 
1012 	pb->table_id = TWE_PARAM_AEN;
1013 	pb->param_id = 2;
1014 	pb->param_size = 2;
1015 
1016 	lock = TWE_LOCK(sc);
1017 	error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
1018 	TWE_UNLOCK(sc, lock);
1019 	scsi_io_put(&sc->sc_iopool, ccb);
1020 
1021 	if (error) {
1022 		printf("%s: error draining attention queue\n",
1023 		    sc->sc_dev.dv_xname);
1024 		return;
1025 	}
1026 
1027 	aen = *(u_int16_t *)pb->data;
1028 	if (aen != TWE_AEN_QEMPTY)
1029 		scsi_ioh_add(&sc->sc_aen);
1030 }
1031