xref: /openbsd/sys/dev/ic/twe.c (revision 91f110e0)
1 /*	$OpenBSD: twe.c,v 1.43 2013/11/18 23:32:23 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2002 Michael Shalayeff.  All rights reserved.
5  *
6  * The SCSI emulation layer is derived from gdt(4) driver,
7  * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* #define	TWE_DEBUG */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/buf.h>
36 #include <sys/device.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/kthread.h>
41 
42 #include <machine/bus.h>
43 
44 #include <scsi/scsi_all.h>
45 #include <scsi/scsi_disk.h>
46 #include <scsi/scsiconf.h>
47 
48 #include <dev/ic/twereg.h>
49 #include <dev/ic/twevar.h>
50 
51 #ifdef TWE_DEBUG
52 #define	TWE_DPRINTF(m,a)	if (twe_debug & (m)) printf a
53 #define	TWE_D_CMD	0x0001
54 #define	TWE_D_INTR	0x0002
55 #define	TWE_D_MISC	0x0004
56 #define	TWE_D_DMA	0x0008
57 #define	TWE_D_AEN	0x0010
58 int twe_debug = 0;
59 #else
60 #define	TWE_DPRINTF(m,a)	/* m, a */
61 #endif
62 
63 struct cfdriver twe_cd = {
64 	NULL, "twe", DV_DULL
65 };
66 
67 void	twe_scsi_cmd(struct scsi_xfer *);
68 
69 struct scsi_adapter twe_switch = {
70 	twe_scsi_cmd, tweminphys, 0, 0,
71 };
72 
73 void *twe_get_ccb(void *);
74 void twe_put_ccb(void *, void *);
75 void twe_dispose(struct twe_softc *sc);
76 int  twe_cmd(struct twe_ccb *ccb, int flags, int wait);
77 int  twe_start(struct twe_ccb *ccb, int wait);
78 int  twe_complete(struct twe_ccb *ccb);
79 int  twe_done(struct twe_softc *sc, struct twe_ccb *ccb);
80 void twe_copy_internal_data(struct scsi_xfer *xs, void *v, size_t size);
81 void twe_thread_create(void *v);
82 void twe_thread(void *v);
83 void twe_aen(void *, void *);
84 
85 void *
86 twe_get_ccb(void *xsc)
87 {
88 	struct twe_softc *sc = xsc;
89 	struct twe_ccb *ccb;
90 
91 	mtx_enter(&sc->sc_ccb_mtx);
92 	ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
93 	if (ccb != NULL)
94 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
95 	mtx_leave(&sc->sc_ccb_mtx);
96 
97 	return (ccb);
98 }
99 
100 void
101 twe_put_ccb(void *xsc, void *xccb)
102 {
103 	struct twe_softc *sc = xsc;
104 	struct twe_ccb *ccb = xccb;
105 
106 	ccb->ccb_state = TWE_CCB_FREE;
107 	mtx_enter(&sc->sc_ccb_mtx);
108 	TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
109 	mtx_leave(&sc->sc_ccb_mtx);
110 }
111 
112 void
113 twe_dispose(sc)
114 	struct twe_softc *sc;
115 {
116 	register struct twe_ccb *ccb;
117 	if (sc->sc_cmdmap != NULL) {
118 		bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
119 		/* traverse the ccbs and destroy the maps */
120 		for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
121 			if (ccb->ccb_dmamap)
122 				bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
123 	}
124 	bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
125 	    sizeof(struct twe_cmd) * TWE_MAXCMDS);
126 	bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
127 }
128 
129 int
130 twe_attach(sc)
131 	struct twe_softc *sc;
132 {
133 	struct scsibus_attach_args saa;
134 	/* this includes a buffer for drive config req, and a capacity req */
135 	u_int8_t	param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
136 	struct twe_param *pb = (void *)
137 	    (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
138 	struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
139 	struct twe_ccb	*ccb;
140 	struct twe_cmd	*cmd;
141 	u_int32_t	status;
142 	int		error, i, retry, nunits, nseg;
143 	const char	*errstr;
144 	twe_lock_t	lock;
145 	paddr_t		pa;
146 
147 	error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
148 	    PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
149 	if (error) {
150 		printf(": cannot allocate commands (%d)\n", error);
151 		return (1);
152 	}
153 
154 	error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
155 	    sizeof(struct twe_cmd) * TWE_MAXCMDS,
156 	    (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
157 	if (error) {
158 		printf(": cannot map commands (%d)\n", error);
159 		bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
160 		return (1);
161 	}
162 
163 	error = bus_dmamap_create(sc->dmat,
164 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
165 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
166 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
167 	if (error) {
168 		printf(": cannot create ccb cmd dmamap (%d)\n", error);
169 		twe_dispose(sc);
170 		return (1);
171 	}
172 	error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
173 	    sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
174 	if (error) {
175 		printf(": cannot load command dma map (%d)\n", error);
176 		twe_dispose(sc);
177 		return (1);
178 	}
179 
180 	TAILQ_INIT(&sc->sc_ccb2q);
181 	TAILQ_INIT(&sc->sc_ccbq);
182 	TAILQ_INIT(&sc->sc_free_ccb);
183 	TAILQ_INIT(&sc->sc_done_ccb);
184 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
185 	scsi_iopool_init(&sc->sc_iopool, sc, twe_get_ccb, twe_put_ccb);
186 
187 	scsi_ioh_set(&sc->sc_aen, &sc->sc_iopool, twe_aen, sc);
188 
189 	pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
190 	    sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
191 	for (cmd = (struct twe_cmd *)sc->sc_cmds + TWE_MAXCMDS - 1;
192 	     cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
193 
194 		cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
195 		ccb = &sc->sc_ccbs[cmd->cmd_index];
196 		error = bus_dmamap_create(sc->dmat,
197 		    TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
198 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
199 		if (error) {
200 			printf(": cannot create ccb dmamap (%d)\n", error);
201 			twe_dispose(sc);
202 			return (1);
203 		}
204 		ccb->ccb_sc = sc;
205 		ccb->ccb_cmd = cmd;
206 		ccb->ccb_cmdpa = pa;
207 		ccb->ccb_state = TWE_CCB_FREE;
208 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
209 	}
210 
211 	for (errstr = NULL, retry = 3; retry--; ) {
212 		int		veseen_srst;
213 		u_int16_t	aen;
214 
215 		if (errstr)
216 			TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
217 
218 		for (i = 350000; i--; DELAY(100)) {
219 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
220 			if (status & TWE_STAT_CPURDY)
221 				break;
222 		}
223 
224 		if (!(status & TWE_STAT_CPURDY)) {
225 			errstr = ": card CPU is not ready\n";
226 			continue;
227 		}
228 
229 		/* soft reset, disable ints */
230 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
231 		    TWE_CTRL_SRST |
232 		    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
233 		    TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
234 		    TWE_CTRL_MINT);
235 
236 		for (i = 350000; i--; DELAY(100)) {
237 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
238 			if (status & TWE_STAT_ATTNI)
239 				break;
240 		}
241 
242 		if (!(status & TWE_STAT_ATTNI)) {
243 			errstr = ": cannot get card's attention\n";
244 			continue;
245 		}
246 
247 		/* drain aen queue */
248 		for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
249 
250 			ccb = scsi_io_get(&sc->sc_iopool, 0);
251 			if (ccb == NULL) {
252 				errstr = ": out of ccbs\n";
253 				break;
254 			}
255 
256 			ccb->ccb_xs = NULL;
257 			ccb->ccb_data = pb;
258 			ccb->ccb_length = TWE_SECTOR_SIZE;
259 			ccb->ccb_state = TWE_CCB_READY;
260 			cmd = ccb->ccb_cmd;
261 			cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
262 			cmd->cmd_op = TWE_CMD_GPARAM;
263 			cmd->cmd_param.count = 1;
264 
265 			pb->table_id = TWE_PARAM_AEN;
266 			pb->param_id = 2;
267 			pb->param_size = 2;
268 
269 			error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
270 			scsi_io_put(&sc->sc_iopool, ccb);
271 			if (error) {
272 				errstr = ": error draining attention queue\n";
273 				break;
274 			}
275 
276 			aen = *(u_int16_t *)pb->data;
277 			TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
278 			if (aen == TWE_AEN_SRST)
279 				veseen_srst++;
280 		}
281 
282 		if (!veseen_srst) {
283 			errstr = ": we don't get it\n";
284 			continue;
285 		}
286 
287 		if (status & TWE_STAT_CPUERR) {
288 			errstr = ": card CPU error detected\n";
289 			continue;
290 		}
291 
292 		if (status & TWE_STAT_PCIPAR) {
293 			errstr = ": PCI parity error detected\n";
294 			continue;
295 		}
296 
297 		if (status & TWE_STAT_QUEUEE ) {
298 			errstr = ": queuing error detected\n";
299 			continue;
300 		}
301 
302 		if (status & TWE_STAT_PCIABR) {
303 			errstr = ": PCI abort\n";
304 			continue;
305 		}
306 
307 		while (!(status & TWE_STAT_RQE)) {
308 			bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
309 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
310 		}
311 
312 		break;
313 	}
314 
315 	if (retry < 0) {
316 		printf("%s", errstr);
317 		twe_dispose(sc);
318 		return 1;
319 	}
320 
321 	ccb = scsi_io_get(&sc->sc_iopool, 0);
322 	if (ccb == NULL) {
323 		printf(": out of ccbs\n");
324 		twe_dispose(sc);
325 		return 1;
326 	}
327 
328 	ccb->ccb_xs = NULL;
329 	ccb->ccb_data = pb;
330 	ccb->ccb_length = TWE_SECTOR_SIZE;
331 	ccb->ccb_state = TWE_CCB_READY;
332 	cmd = ccb->ccb_cmd;
333 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
334 	cmd->cmd_op = TWE_CMD_GPARAM;
335 	cmd->cmd_param.count = 1;
336 
337 	pb->table_id = TWE_PARAM_UC;
338 	pb->param_id = TWE_PARAM_UC;
339 	pb->param_size = TWE_MAX_UNITS;
340 
341 	error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
342 	scsi_io_put(&sc->sc_iopool, ccb);
343 	if (error) {
344 		printf(": failed to fetch unit parameters\n");
345 		twe_dispose(sc);
346 		return 1;
347 	}
348 
349 	/* we are assuming last read status was good */
350 	printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
351 
352 	for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
353 		if (pb->data[i] == 0)
354 			continue;
355 
356 		ccb = scsi_io_get(&sc->sc_iopool, 0);
357 		if (ccb == NULL) {
358 			printf(": out of ccbs\n");
359 			twe_dispose(sc);
360 			return 1;
361 		}
362 
363 		ccb->ccb_xs = NULL;
364 		ccb->ccb_data = cap;
365 		ccb->ccb_length = TWE_SECTOR_SIZE;
366 		ccb->ccb_state = TWE_CCB_READY;
367 		cmd = ccb->ccb_cmd;
368 		cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
369 		cmd->cmd_op = TWE_CMD_GPARAM;
370 		cmd->cmd_param.count = 1;
371 
372 		cap->table_id = TWE_PARAM_UI + i;
373 		cap->param_id = 4;
374 		cap->param_size = 4;	/* 4 bytes */
375 
376 		lock = TWE_LOCK(sc);
377 		twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
378 		TWE_UNLOCK(sc, lock);
379 		scsi_io_put(&sc->sc_iopool, ccb);
380 		if (error) {
381 			printf("%s: error fetching capacity for unit %d\n",
382 			    sc->sc_dev.dv_xname, i);
383 			continue;
384 		}
385 
386 		nunits++;
387 		sc->sc_hdr[i].hd_present = 1;
388 		sc->sc_hdr[i].hd_devtype = 0;
389 		sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
390 		TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d\n",
391 		    i, sc->sc_hdr[i].hd_size));
392 	}
393 
394 	if (!nunits)
395 		nunits++;
396 
397 	/* TODO: fetch & print cache params? */
398 
399 	sc->sc_link.adapter_softc = sc;
400 	sc->sc_link.adapter = &twe_switch;
401 	sc->sc_link.adapter_target = TWE_MAX_UNITS;
402 	sc->sc_link.openings = TWE_MAXCMDS / nunits;
403 	sc->sc_link.adapter_buswidth = TWE_MAX_UNITS;
404 	sc->sc_link.pool = &sc->sc_iopool;
405 
406 	bzero(&saa, sizeof(saa));
407 	saa.saa_sc_link = &sc->sc_link;
408 
409 	config_found(&sc->sc_dev, &saa, scsiprint);
410 
411 	kthread_create_deferred(twe_thread_create, sc);
412 
413 	return (0);
414 }
415 
416 void
417 twe_thread_create(void *v)
418 {
419 	struct twe_softc *sc = v;
420 
421 	if (kthread_create(twe_thread, sc, &sc->sc_thread,
422 	    sc->sc_dev.dv_xname)) {
423 		/* TODO disable twe */
424 		printf("%s: failed to create kernel thread, disabled\n",
425 		    sc->sc_dev.dv_xname);
426 		return;
427 	}
428 
429 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
430 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
431 	/*
432 	 * ack all before enable, cannot be done in one
433 	 * operation as it seems clear is not processed
434 	 * if enable is specified.
435 	 */
436 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
437 	    TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
438 	TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
439 	    bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
440 	/* enable interrupts */
441 	bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
442 	    TWE_CTRL_EINT | TWE_CTRL_ERDYI |
443 	    /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
444 }
445 
446 void
447 twe_thread(v)
448 	void *v;
449 {
450 	struct twe_softc *sc = v;
451 	struct twe_ccb *ccb;
452 	twe_lock_t lock;
453 	u_int32_t status;
454 	int err;
455 
456 	splbio();
457 	for (;;) {
458 		lock = TWE_LOCK(sc);
459 
460 		while (!TAILQ_EMPTY(&sc->sc_done_ccb)) {
461 			ccb = TAILQ_FIRST(&sc->sc_done_ccb);
462 			TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link);
463 			if ((err = twe_done(sc, ccb)))
464 				printf("%s: done failed (%d)\n",
465 				    sc->sc_dev.dv_xname, err);
466 		}
467 
468 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
469 		TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
470 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
471 		while (!(status & TWE_STAT_CQF) &&
472 		    !TAILQ_EMPTY(&sc->sc_ccb2q)) {
473 
474 			ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
475 			TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
476 
477 			ccb->ccb_state = TWE_CCB_QUEUED;
478 			TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
479 			bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
480 			    ccb->ccb_cmdpa);
481 
482 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
483 			TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
484 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
485 		}
486 
487 		if (!TAILQ_EMPTY(&sc->sc_ccb2q))
488 			bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
489 			    TWE_CTRL_ECMDI);
490 
491 		TWE_UNLOCK(sc, lock);
492 		sc->sc_thread_on = 1;
493 		tsleep(sc, PWAIT, "twespank", 0);
494 	}
495 }
496 
497 int
498 twe_cmd(ccb, flags, wait)
499 	struct twe_ccb *ccb;
500 	int flags, wait;
501 {
502 	struct twe_softc *sc = ccb->ccb_sc;
503 	bus_dmamap_t dmap;
504 	struct twe_cmd *cmd;
505 	struct twe_segs *sgp;
506 	int error, i;
507 
508 	if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
509 		TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
510 		ccb->ccb_realdata = ccb->ccb_data;
511 
512 		error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
513 		    0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
514 		    BUS_DMA_NOWAIT);
515 		if (error) {
516 			TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
517 			return (ENOMEM);
518 		}
519 
520 		error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
521 		    ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
522 		if (error) {
523 			TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
524 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
525 			return (ENOMEM);
526 		}
527 		bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
528 	} else
529 		ccb->ccb_realdata = NULL;
530 
531 	dmap = ccb->ccb_dmamap;
532 	cmd = ccb->ccb_cmd;
533 	cmd->cmd_status = 0;
534 
535 	if (ccb->ccb_data) {
536 		error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
537 		    ccb->ccb_length, NULL, flags);
538 		if (error) {
539 			if (error == EFBIG)
540 				printf("more than %d dma segs\n", TWE_MAXOFFSETS);
541 			else
542 				printf("error %d loading dma map\n", error);
543 
544 			if (ccb->ccb_realdata) {
545 				bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
546 				    ccb->ccb_length);
547 				bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
548 				    ccb->ccb_2nseg);
549 			}
550 			return error;
551 		}
552 		/* load addresses into command */
553 		switch (cmd->cmd_op) {
554 		case TWE_CMD_GPARAM:
555 		case TWE_CMD_SPARAM:
556 			sgp = cmd->cmd_param.segs;
557 			break;
558 		case TWE_CMD_READ:
559 		case TWE_CMD_WRITE:
560 			sgp = cmd->cmd_io.segs;
561 			break;
562 		default:
563 			/* no data transfer */
564 			TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
565 			    cmd->cmd_op));
566 			sgp = NULL;
567 			break;
568 		}
569 		TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
570 		if (sgp) {
571 			/*
572 			 * we know that size is in the upper byte,
573 			 * and we do not worry about overflow
574 			 */
575 			cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
576 			bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
577 			for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
578 				sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
579 				sgp->twes_len  = htole32(dmap->dm_segs[i].ds_len);
580 				TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ",
581 				    dmap->dm_segs[i].ds_addr,
582 				    dmap->dm_segs[i].ds_len));
583 			}
584 		}
585 		TWE_DPRINTF(TWE_D_DMA, ("> "));
586 		bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
587 		    BUS_DMASYNC_PREWRITE);
588 	}
589 	bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
590 	    BUS_DMASYNC_PREWRITE);
591 
592 	if ((error = twe_start(ccb, wait))) {
593 		bus_dmamap_unload(sc->dmat, dmap);
594 		if (ccb->ccb_realdata) {
595 			bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
596 			    ccb->ccb_length);
597 			bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
598 			    ccb->ccb_2nseg);
599 		}
600 		return (error);
601 	}
602 
603 	return wait? twe_complete(ccb) : 0;
604 }
605 
606 int
607 twe_start(ccb, wait)
608 	struct twe_ccb *ccb;
609 	int wait;
610 {
611 	struct twe_softc*sc = ccb->ccb_sc;
612 	struct twe_cmd	*cmd = ccb->ccb_cmd;
613 	u_int32_t	status;
614 	int i;
615 
616 	cmd->cmd_op = htole16(cmd->cmd_op);
617 
618 	if (!wait) {
619 
620 		TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
621 		ccb->ccb_state = TWE_CCB_PREQUEUED;
622 		TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
623 		wakeup(sc);
624 		return 0;
625 	}
626 
627 	for (i = 1000; i--; DELAY(10)) {
628 
629 		status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
630 		if (!(status & TWE_STAT_CQF))
631 			break;
632 		TWE_DPRINTF(TWE_D_CMD,  ("twe_start stat=%b ",
633 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
634 	}
635 
636 	if (!(status & TWE_STAT_CQF)) {
637 		bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
638 		    ccb->ccb_cmdpa);
639 
640 		TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
641 		ccb->ccb_state = TWE_CCB_QUEUED;
642 		TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
643 		return 0;
644 
645 	} else {
646 
647 		printf("%s: twe_start(%d) timed out\n",
648 		    sc->sc_dev.dv_xname, cmd->cmd_index);
649 
650 		return EPERM;
651 	}
652 }
653 
654 int
655 twe_complete(ccb)
656 	struct twe_ccb *ccb;
657 {
658 	struct twe_softc *sc = ccb->ccb_sc;
659 	struct scsi_xfer *xs = ccb->ccb_xs;
660 	int i;
661 
662 	for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
663 		u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
664 
665 		/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
666 		    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
667 
668 		while (!(status & TWE_STAT_RQE)) {
669 			struct twe_ccb *ccb1;
670 			u_int32_t ready;
671 
672 			ready = bus_space_read_4(sc->iot, sc->ioh,
673 			    TWE_READYQUEUE);
674 
675 			TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
676 
677 			ccb1 = &sc->sc_ccbs[TWE_READYID(ready)];
678 			TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link);
679 			ccb1->ccb_state = TWE_CCB_DONE;
680 			if (!twe_done(sc, ccb1) && ccb1 == ccb) {
681 				TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
682 				return 0;
683 			}
684 
685 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
686 			/* TWE_DPRINTF(TWE_D_CMD,  ("twe_intr stat=%b ",
687 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
688 		}
689 	}
690 
691 	return 1;
692 }
693 
694 int
695 twe_done(sc, ccb)
696 	struct twe_softc *sc;
697 	struct twe_ccb *ccb;
698 {
699 	struct twe_cmd *cmd = ccb->ccb_cmd;
700 	struct scsi_xfer *xs = ccb->ccb_xs;
701 	bus_dmamap_t	dmap;
702 	twe_lock_t	lock;
703 
704 	TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index));
705 
706 	if (ccb->ccb_state != TWE_CCB_DONE) {
707 		printf("%s: undone ccb %d ready\n",
708 		     sc->sc_dev.dv_xname, cmd->cmd_index);
709 		return 1;
710 	}
711 
712 	dmap = ccb->ccb_dmamap;
713 	if (xs) {
714 		if (xs->cmd->opcode != PREVENT_ALLOW &&
715 		    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
716 			bus_dmamap_sync(sc->dmat, dmap, 0,
717 			    dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
718 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
719 			bus_dmamap_unload(sc->dmat, dmap);
720 		}
721 	} else {
722 		switch (letoh16(cmd->cmd_op)) {
723 		case TWE_CMD_GPARAM:
724 		case TWE_CMD_READ:
725 			bus_dmamap_sync(sc->dmat, dmap, 0,
726 			    dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
727 			bus_dmamap_unload(sc->dmat, dmap);
728 			break;
729 		case TWE_CMD_SPARAM:
730 		case TWE_CMD_WRITE:
731 			bus_dmamap_sync(sc->dmat, dmap, 0,
732 			    dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
733 			bus_dmamap_unload(sc->dmat, dmap);
734 			break;
735 		default:
736 			/* no data */
737 			break;
738 		}
739 	}
740 
741 	if (ccb->ccb_realdata) {
742 		bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
743 		bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
744 		bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
745 	}
746 
747 	lock = TWE_LOCK(sc);
748 
749 	if (xs) {
750 		xs->resid = 0;
751 		scsi_done(xs);
752 	}
753 	TWE_UNLOCK(sc, lock);
754 
755 	return 0;
756 }
757 
758 void
759 tweminphys(struct buf *bp, struct scsi_link *sl)
760 {
761 	if (bp->b_bcount > TWE_MAXFER)
762 		bp->b_bcount = TWE_MAXFER;
763 	minphys(bp);
764 }
765 
766 void
767 twe_copy_internal_data(xs, v, size)
768 	struct scsi_xfer *xs;
769 	void *v;
770 	size_t size;
771 {
772 	size_t copy_cnt;
773 
774 	TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data "));
775 
776 	if (!xs->datalen)
777 		printf("uio move is not yet supported\n");
778 	else {
779 		copy_cnt = MIN(size, xs->datalen);
780 		bcopy(v, xs->data, copy_cnt);
781 	}
782 }
783 
784 void
785 twe_scsi_cmd(xs)
786 	struct scsi_xfer *xs;
787 {
788 	struct scsi_link *link = xs->sc_link;
789 	struct twe_softc *sc = link->adapter_softc;
790 	struct twe_ccb *ccb = xs->io;
791 	struct twe_cmd *cmd;
792 	struct scsi_inquiry_data inq;
793 	struct scsi_sense_data sd;
794 	struct scsi_read_cap_data rcd;
795 	u_int8_t target = link->target;
796 	u_int32_t blockno, blockcnt;
797 	struct scsi_rw *rw;
798 	struct scsi_rw_big *rwb;
799 	int error, op, flags, wait;
800 	twe_lock_t lock;
801 
802 
803 	if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
804 	    link->lun != 0) {
805 		xs->error = XS_DRIVER_STUFFUP;
806 		scsi_done(xs);
807 		return;
808 	}
809 
810 	TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
811 
812 	xs->error = XS_NOERROR;
813 
814 	switch (xs->cmd->opcode) {
815 	case TEST_UNIT_READY:
816 	case START_STOP:
817 #if 0
818 	case VERIFY:
819 #endif
820 		TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
821 		    target));
822 		break;
823 
824 	case REQUEST_SENSE:
825 		TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
826 		bzero(&sd, sizeof sd);
827 		sd.error_code = SSD_ERRCODE_CURRENT;
828 		sd.segment = 0;
829 		sd.flags = SKEY_NO_SENSE;
830 		*(u_int32_t*)sd.info = htole32(0);
831 		sd.extra_len = 0;
832 		twe_copy_internal_data(xs, &sd, sizeof sd);
833 		break;
834 
835 	case INQUIRY:
836 		TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
837 		    sc->sc_hdr[target].hd_devtype));
838 		bzero(&inq, sizeof inq);
839 		inq.device =
840 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
841 		inq.dev_qual2 =
842 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
843 		inq.version = 2;
844 		inq.response_format = 2;
845 		inq.additional_length = 32;
846 		strlcpy(inq.vendor, "3WARE  ", sizeof inq.vendor);
847 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
848 		    target);
849 		strlcpy(inq.revision, "   ", sizeof inq.revision);
850 		twe_copy_internal_data(xs, &inq, sizeof inq);
851 		break;
852 
853 	case READ_CAPACITY:
854 		TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
855 		bzero(&rcd, sizeof rcd);
856 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
857 		_lto4b(TWE_SECTOR_SIZE, rcd.length);
858 		twe_copy_internal_data(xs, &rcd, sizeof rcd);
859 		break;
860 
861 	case PREVENT_ALLOW:
862 		TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
863 		scsi_done(xs);
864 		return;
865 
866 	case READ_COMMAND:
867 	case READ_BIG:
868 	case WRITE_COMMAND:
869 	case WRITE_BIG:
870 	case SYNCHRONIZE_CACHE:
871 		lock = TWE_LOCK(sc);
872 
873 		flags = 0;
874 		if (xs->cmd->opcode == SYNCHRONIZE_CACHE) {
875 			blockno = blockcnt = 0;
876 		} else {
877 			/* A read or write operation. */
878 			if (xs->cmdlen == 6) {
879 				rw = (struct scsi_rw *)xs->cmd;
880 				blockno = _3btol(rw->addr) &
881 				    (SRW_TOPADDR << 16 | 0xffff);
882 				blockcnt = rw->length ? rw->length : 0x100;
883 			} else {
884 				rwb = (struct scsi_rw_big *)xs->cmd;
885 				blockno = _4btol(rwb->addr);
886 				blockcnt = _2btol(rwb->length);
887 				/* reflect DPO & FUA flags */
888 				if (xs->cmd->opcode == WRITE_BIG &&
889 				    rwb->byte2 & 0x18)
890 					flags = TWE_FLAGS_CACHEDISABLE;
891 			}
892 			if (blockno >= sc->sc_hdr[target].hd_size ||
893 			    blockno + blockcnt > sc->sc_hdr[target].hd_size) {
894 				printf("%s: out of bounds %u-%u >= %u\n",
895 				    sc->sc_dev.dv_xname, blockno, blockcnt,
896 				    sc->sc_hdr[target].hd_size);
897 				xs->error = XS_DRIVER_STUFFUP;
898 				scsi_done(xs);
899 				TWE_UNLOCK(sc, lock);
900 				return;
901 			}
902 		}
903 
904 		switch (xs->cmd->opcode) {
905 		case READ_COMMAND:	op = TWE_CMD_READ;	break;
906 		case READ_BIG:		op = TWE_CMD_READ;	break;
907 		case WRITE_COMMAND:	op = TWE_CMD_WRITE;	break;
908 		case WRITE_BIG:		op = TWE_CMD_WRITE;	break;
909 		default:		op = TWE_CMD_NOP;	break;
910 		}
911 
912 		ccb->ccb_xs = xs;
913 		ccb->ccb_data = xs->data;
914 		ccb->ccb_length = xs->datalen;
915 		ccb->ccb_state = TWE_CCB_READY;
916 		cmd = ccb->ccb_cmd;
917 		cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
918 		cmd->cmd_op = op;
919 		cmd->cmd_flags = flags;
920 		cmd->cmd_io.count = htole16(blockcnt);
921 		cmd->cmd_io.lba = htole32(blockno);
922 		wait = xs->flags & SCSI_POLL;
923 		if (!sc->sc_thread_on)
924 			wait |= SCSI_POLL;
925 
926 		if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
927 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) {
928 
929 			TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
930 			xs->error = XS_DRIVER_STUFFUP;
931 			scsi_done(xs);
932 		}
933 
934 		TWE_UNLOCK(sc, lock);
935 		return;
936 
937 	default:
938 		TWE_DPRINTF(TWE_D_CMD, ("unsupported scsi command %#x tgt %d ",
939 		    xs->cmd->opcode, target));
940 		xs->error = XS_DRIVER_STUFFUP;
941 	}
942 
943 	scsi_done(xs);
944 }
945 
946 int
947 twe_intr(v)
948 	void *v;
949 {
950 	struct twe_softc *sc = v;
951 	struct twe_ccb	*ccb;
952 	u_int32_t	status;
953 	int		rv = 0;
954 
955 	status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
956 	TWE_DPRINTF(TWE_D_INTR,  ("twe_intr stat=%b ",
957 	    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
958 #if 0
959 	if (status & TWE_STAT_HOSTI) {
960 
961 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
962 		    TWE_CTRL_CHOSTI);
963 	}
964 #endif
965 
966 	if (status & TWE_STAT_RDYI) {
967 
968 		while (!(status & TWE_STAT_RQE)) {
969 
970 			u_int32_t ready;
971 
972 			/*
973 			 * it seems that reading ready queue
974 			 * we get all the status bits in each ready word.
975 			 * i wonder if it's legal to use those for
976 			 * status and avoid extra read below
977 			 */
978 			ready = bus_space_read_4(sc->iot, sc->ioh,
979 			    TWE_READYQUEUE);
980 
981 			ccb = &sc->sc_ccbs[TWE_READYID(ready)];
982 			TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
983 			ccb->ccb_state = TWE_CCB_DONE;
984 			TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link);
985 			rv++;
986 
987 			status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
988 			TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
989 			    status & TWE_STAT_FLAGS, TWE_STAT_BITS));
990 		}
991 	}
992 
993 	if (status & TWE_STAT_CMDI) {
994 		rv++;
995 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
996 		    TWE_CTRL_MCMDI);
997 	}
998 
999 	if (rv)
1000 		wakeup(sc);
1001 
1002 	if (status & TWE_STAT_ATTNI) {
1003 		/*
1004 		 * we know no attentions of interest right now.
1005 		 * one of those would be mirror degradation i think.
1006 		 * or, what else exists in there?
1007 		 * maybe 3ware can answer that?
1008 		 */
1009 		bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1010 		    TWE_CTRL_CATTNI);
1011 
1012 		scsi_ioh_add(&sc->sc_aen);
1013 	}
1014 
1015 	return rv;
1016 }
1017 
1018 void
1019 twe_aen(void *cookie, void *io)
1020 {
1021 	struct twe_softc *sc = cookie;
1022 	struct twe_ccb *ccb = io;
1023 	struct twe_cmd *cmd = ccb->ccb_cmd;
1024 
1025 	u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
1026 	struct twe_param *pb = (void *) (((u_long)param_buf +
1027 	    TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
1028 	u_int16_t aen;
1029 
1030 	twe_lock_t lock;
1031 	int error;
1032 
1033 	ccb->ccb_xs = NULL;
1034 	ccb->ccb_data = pb;
1035 	ccb->ccb_length = TWE_SECTOR_SIZE;
1036 	ccb->ccb_state = TWE_CCB_READY;
1037 	cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1038 	cmd->cmd_op = TWE_CMD_GPARAM;
1039 	cmd->cmd_flags = 0;
1040 	cmd->cmd_param.count = 1;
1041 
1042 	pb->table_id = TWE_PARAM_AEN;
1043 	pb->param_id = 2;
1044 	pb->param_size = 2;
1045 
1046 	lock = TWE_LOCK(sc);
1047 	error = twe_cmd(ccb, BUS_DMA_NOWAIT, 1);
1048 	TWE_UNLOCK(sc, lock);
1049 	scsi_io_put(&sc->sc_iopool, ccb);
1050 
1051 	if (error) {
1052 		printf("%s: error draining attention queue\n",
1053 		    sc->sc_dev.dv_xname);
1054 		return;
1055 	}
1056 
1057 	aen = *(u_int16_t *)pb->data;
1058 	if (aen != TWE_AEN_QEMPTY)
1059 		scsi_ioh_add(&sc->sc_aen);
1060 }
1061