xref: /netbsd/sys/dev/ic/bha.c (revision bf9ec67e)
1 /*	$NetBSD: bha.c,v 1.49 2002/04/05 18:27:51 bouyer Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Originally written by Julian Elischer (julian@tfs.com)
42  * for TRW Financial Systems for use under the MACH(2.5) operating system.
43  *
44  * TRW Financial Systems, in accordance with their agreement with Carnegie
45  * Mellon University, makes this software available to CMU to distribute
46  * or use in any manner that they see fit as long as this message is kept with
47  * the software. For this reason TFS also grants any other persons or
48  * organisations permission to use or modify this software.
49  *
50  * TFS supplies this software to be publicly redistributed
51  * on the understanding that TFS is not responsible for the correct
52  * functioning of this software in any circumstances.
53  */
54 
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: bha.c,v 1.49 2002/04/05 18:27:51 bouyer Exp $");
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/kernel.h>
64 #include <sys/errno.h>
65 #include <sys/ioctl.h>
66 #include <sys/device.h>
67 #include <sys/malloc.h>
68 #include <sys/buf.h>
69 #include <sys/proc.h>
70 #include <sys/user.h>
71 
72 #include <uvm/uvm_extern.h>
73 
74 #include <machine/bus.h>
75 #include <machine/intr.h>
76 
77 #include <dev/scsipi/scsi_all.h>
78 #include <dev/scsipi/scsipi_all.h>
79 #include <dev/scsipi/scsiconf.h>
80 
81 #include <dev/ic/bhareg.h>
82 #include <dev/ic/bhavar.h>
83 
84 #ifndef DDB
85 #define Debugger() panic("should call debugger here (bha.c)")
86 #endif /* ! DDB */
87 
88 #define	BHA_MAXXFER	((BHA_NSEG - 1) << PGSHIFT)
89 
90 #ifdef BHADEBUG
91 int     bha_debug = 0;
92 #endif /* BHADEBUG */
93 
94 static int bha_cmd __P((bus_space_tag_t, bus_space_handle_t, char *, int,
95 	    u_char *, int, u_char *));
96 
97 static void bha_scsipi_request __P((struct scsipi_channel *,
98 	    scsipi_adapter_req_t, void *));
99 static void bha_minphys __P((struct buf *));
100 
101 static void bha_get_xfer_mode __P((struct bha_softc *,
102 	    struct scsipi_xfer_mode *));
103 
104 static void bha_done __P((struct bha_softc *, struct bha_ccb *));
105 int bha_poll __P((struct bha_softc *, struct scsipi_xfer *, int));
106 static void bha_timeout __P((void *arg));
107 
108 static int bha_init __P((struct bha_softc *));
109 
110 static int bha_create_mailbox __P((struct bha_softc *));
111 static void bha_collect_mbo __P((struct bha_softc *));
112 
113 static void bha_queue_ccb __P((struct bha_softc *, struct bha_ccb *));
114 static void bha_start_ccbs __P((struct bha_softc *));
115 static void bha_finish_ccbs __P((struct bha_softc *));
116 
117 struct bha_ccb *bha_ccb_phys_kv __P((struct bha_softc *, bus_addr_t));
118 void	bha_create_ccbs __P((struct bha_softc *, int));
119 int	bha_init_ccb __P((struct bha_softc *, struct bha_ccb *));
120 struct bha_ccb *bha_get_ccb __P((struct bha_softc *));
121 void	bha_free_ccb __P((struct bha_softc *, struct bha_ccb *));
122 
123 #define BHA_RESET_TIMEOUT	2000	/* time to wait for reset (mSec) */
124 #define	BHA_ABORT_TIMEOUT	2000	/* time to wait for abort (mSec) */
125 
126 /*
127  * Number of CCBs in an allocation group; must be computed at run-time.
128  */
129 int	bha_ccbs_per_group;
130 
131 __inline struct bha_mbx_out *bha_nextmbo __P((struct bha_softc *,
132 	struct bha_mbx_out *));
133 __inline struct bha_mbx_in *bha_nextmbi __P((struct bha_softc *,
134 	struct bha_mbx_in *));
135 
136 __inline struct bha_mbx_out *
137 bha_nextmbo(sc, mbo)
138 	struct bha_softc *sc;
139 	struct bha_mbx_out *mbo;
140 {
141 
142 	if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1])
143 		return (&sc->sc_mbo[0]);
144 	return (mbo + 1);
145 }
146 
147 __inline struct bha_mbx_in *
148 bha_nextmbi(sc, mbi)
149 	struct bha_softc *sc;
150 	struct bha_mbx_in *mbi;
151 {
152 	if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1])
153 		return (&sc->sc_mbi[0]);
154 	return (mbi + 1);
155 }
156 
157 /*
158  * bha_attach:
159  *
160  *	Finish attaching a Buslogic controller, and configure children.
161  */
162 void
163 bha_attach(sc)
164 	struct bha_softc *sc;
165 {
166 	struct scsipi_adapter *adapt = &sc->sc_adapter;
167 	struct scsipi_channel *chan = &sc->sc_channel;
168 	int initial_ccbs;
169 
170 	/*
171 	 * Initialize the number of CCBs per group.
172 	 */
173 	if (bha_ccbs_per_group == 0)
174 		bha_ccbs_per_group = BHA_CCBS_PER_GROUP;
175 
176 	initial_ccbs = bha_info(sc);
177 	if (initial_ccbs == 0) {
178 		printf("%s: unable to get adapter info\n",
179 		    sc->sc_dev.dv_xname);
180 		return;
181 	}
182 
183 	/*
184 	 * Fill in the scsipi_adapter.
185 	 */
186 	memset(adapt, 0, sizeof(*adapt));
187 	adapt->adapt_dev = &sc->sc_dev;
188 	adapt->adapt_nchannels = 1;
189 	/* adapt_openings initialized below */
190 	adapt->adapt_max_periph = sc->sc_mbox_count;
191 	adapt->adapt_request = bha_scsipi_request;
192 	adapt->adapt_minphys = bha_minphys;
193 
194 	/*
195 	 * Fill in the scsipi_channel.
196 	 */
197 	memset(chan, 0, sizeof(*chan));
198 	chan->chan_adapter = adapt;
199 	chan->chan_bustype = &scsi_bustype;
200 	chan->chan_channel = 0;
201 	chan->chan_flags = SCSIPI_CHAN_CANGROW;
202 	chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8;
203 	chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8;
204 	chan->chan_id = sc->sc_scsi_id;
205 
206 	TAILQ_INIT(&sc->sc_free_ccb);
207 	TAILQ_INIT(&sc->sc_waiting_ccb);
208 	TAILQ_INIT(&sc->sc_allocating_ccbs);
209 
210 	if (bha_create_mailbox(sc) != 0)
211 		return;
212 
213 	bha_create_ccbs(sc, initial_ccbs);
214 	if (sc->sc_cur_ccbs < 2) {
215 		printf("%s: not enough CCBs to run\n",
216 		    sc->sc_dev.dv_xname);
217 		return;
218 	}
219 
220 	adapt->adapt_openings = sc->sc_cur_ccbs;
221 
222 	if (bha_init(sc) != 0)
223 		return;
224 
225 	(void) config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
226 }
227 
228 /*
229  * bha_intr:
230  *
231  *	Interrupt service routine.
232  */
233 int
234 bha_intr(arg)
235 	void *arg;
236 {
237 	struct bha_softc *sc = arg;
238 	bus_space_tag_t iot = sc->sc_iot;
239 	bus_space_handle_t ioh = sc->sc_ioh;
240 	u_char sts;
241 
242 #ifdef BHADEBUG
243 	printf("%s: bha_intr ", sc->sc_dev.dv_xname);
244 #endif /* BHADEBUG */
245 
246 	/*
247 	 * First acknowlege the interrupt, Then if it's not telling about
248 	 * a completed operation just return.
249 	 */
250 	sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT);
251 	if ((sts & BHA_INTR_ANYINTR) == 0)
252 		return (0);
253 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST);
254 
255 #ifdef BHADIAG
256 	/* Make sure we clear CCB_SENDING before finishing a CCB. */
257 	bha_collect_mbo(sc);
258 #endif
259 
260 	/* Mail box out empty? */
261 	if (sts & BHA_INTR_MBOA) {
262 		struct bha_toggle toggle;
263 
264 		toggle.cmd.opcode = BHA_MBO_INTR_EN;
265 		toggle.cmd.enable = 0;
266 		bha_cmd(iot, ioh, sc->sc_dev.dv_xname,
267 		    sizeof(toggle.cmd), (u_char *)&toggle.cmd,
268 		    0, (u_char *)0);
269 		bha_start_ccbs(sc);
270 	}
271 
272 	/* Mail box in full? */
273 	if (sts & BHA_INTR_MBIF)
274 		bha_finish_ccbs(sc);
275 
276 	return (1);
277 }
278 
279 /*****************************************************************************
280  * SCSI interface routines
281  *****************************************************************************/
282 
283 /*
284  * bha_scsipi_request:
285  *
286  *	Perform a request for the SCSIPI layer.
287  */
288 void
289 bha_scsipi_request(chan, req, arg)
290 	struct scsipi_channel *chan;
291 	scsipi_adapter_req_t req;
292 	void *arg;
293 {
294 	struct scsipi_adapter *adapt = chan->chan_adapter;
295 	struct bha_softc *sc = (void *)adapt->adapt_dev;
296 	struct scsipi_xfer *xs;
297 	struct scsipi_periph *periph;
298 	bus_dma_tag_t dmat = sc->sc_dmat;
299 	struct bha_ccb *ccb;
300 	int error, seg, flags, s;
301 
302 	switch (req) {
303 	case ADAPTER_REQ_RUN_XFER:
304 		xs = arg;
305 		periph = xs->xs_periph;
306 		flags = xs->xs_control;
307 
308 		SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n"));
309 
310 		/* Get a CCB to use. */
311 		ccb = bha_get_ccb(sc);
312 #ifdef DIAGNOSTIC
313 		/*
314 		 * This should never happen as we track the resources
315 		 * in the mid-layer.
316 		 */
317 		if (ccb == NULL) {
318 			scsipi_printaddr(periph);
319 			printf("unable to allocate ccb\n");
320 			panic("bha_scsipi_request");
321 		}
322 #endif
323 
324 		ccb->xs = xs;
325 		ccb->timeout = xs->timeout;
326 
327 		/*
328 		 * Put all the arguments for the xfer in the ccb
329 		 */
330 		if (flags & XS_CTL_RESET) {
331 			ccb->opcode = BHA_RESET_CCB;
332 			ccb->scsi_cmd_length = 0;
333 		} else {
334 			/* can't use S/G if zero length */
335 			ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB
336 						   : BHA_INITIATOR_CCB);
337 			memcpy(&ccb->scsi_cmd, xs->cmd,
338 			    ccb->scsi_cmd_length = xs->cmdlen);
339 		}
340 
341 		if (xs->datalen) {
342 			/*
343 			 * Map the DMA transfer.
344 			 */
345 #ifdef TFS
346 			if (flags & XS_CTL_DATA_UIO) {
347 				error = bus_dmamap_load_uio(dmat,
348 				    ccb->dmamap_xfer, (struct uio *)xs->data,
349 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
350 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
351 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
352 				      BUS_DMA_WRITE));
353 			} else
354 #endif /* TFS */
355 			{
356 				error = bus_dmamap_load(dmat,
357 				    ccb->dmamap_xfer, xs->data, xs->datalen,
358 				    NULL,
359 				    ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
360 				     BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
361 				     ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
362 				      BUS_DMA_WRITE));
363 			}
364 
365 			switch (error) {
366 			case 0:
367 				break;
368 
369 			case ENOMEM:
370 			case EAGAIN:
371 				xs->error = XS_RESOURCE_SHORTAGE;
372 				goto out_bad;
373 
374 			default:
375 				xs->error = XS_DRIVER_STUFFUP;
376 				printf("%s: error %d loading DMA map\n",
377 				    sc->sc_dev.dv_xname, error);
378  out_bad:
379 				bha_free_ccb(sc, ccb);
380 				scsipi_done(xs);
381 				return;
382 			}
383 
384 			bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
385 			    ccb->dmamap_xfer->dm_mapsize,
386 			    (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
387 			    BUS_DMASYNC_PREWRITE);
388 
389 			/*
390 			 * Load the hardware scatter/gather map with the
391 			 * contents of the DMA map.
392 			 */
393 			for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) {
394 				ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr,
395 				    ccb->scat_gath[seg].seg_addr);
396 				ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len,
397 				    ccb->scat_gath[seg].seg_len);
398 			}
399 
400 			ltophys(ccb->hashkey + offsetof(struct bha_ccb,
401 			    scat_gath), ccb->data_addr);
402 			ltophys(ccb->dmamap_xfer->dm_nsegs *
403 			    sizeof(struct bha_scat_gath), ccb->data_length);
404 		} else {
405 			/*
406 			 * No data xfer, use non S/G values.
407 			 */
408 			ltophys(0, ccb->data_addr);
409 			ltophys(0, ccb->data_length);
410 		}
411 
412 		if (XS_CTL_TAGTYPE(xs) != 0) {
413 			ccb->tag_enable = 1;
414 			ccb->tag_type = xs->xs_tag_type & 0x03;
415 		} else {
416 			ccb->tag_enable = 0;
417 			ccb->tag_type = 0;
418 		}
419 
420 		ccb->data_out = 0;
421 		ccb->data_in = 0;
422 		ccb->target = periph->periph_target;
423 		ccb->lun = periph->periph_lun;
424 		ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense),
425 		    ccb->sense_ptr);
426 		ccb->req_sense_length = sizeof(ccb->scsi_sense);
427 		ccb->host_stat = 0x00;
428 		ccb->target_stat = 0x00;
429 		ccb->link_id = 0;
430 		ltophys(0, ccb->link_addr);
431 
432 		BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
433 
434 		s = splbio();
435 		bha_queue_ccb(sc, ccb);
436 		splx(s);
437 
438 		SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n"));
439 		if ((flags & XS_CTL_POLL) == 0)
440 			return;
441 
442 		/*
443 		 * If we can't use interrupts, poll on completion
444 		 */
445 		if (bha_poll(sc, xs, ccb->timeout)) {
446 			bha_timeout(ccb);
447 			if (bha_poll(sc, xs, ccb->timeout))
448 				bha_timeout(ccb);
449 		}
450 		return;
451 
452 	case ADAPTER_REQ_GROW_RESOURCES:
453 		if (sc->sc_cur_ccbs == sc->sc_max_ccbs) {
454 			chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
455 			return;
456 		}
457 		seg = sc->sc_cur_ccbs;
458 		bha_create_ccbs(sc, bha_ccbs_per_group);
459 		adapt->adapt_openings += sc->sc_cur_ccbs - seg;
460 		return;
461 
462 	case ADAPTER_REQ_SET_XFER_MODE:
463 		/*
464 		 * Can't really do this on the Buslogic.  It has its
465 		 * own setup info.  But we do know how to query what
466 		 * the settings are.
467 		 */
468 		bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg);
469 		return;
470 	}
471 }
472 
473 /*
474  * bha_minphys:
475  *
476  *	Limit a transfer to our maximum transfer size.
477  */
478 void
479 bha_minphys(bp)
480 	struct buf *bp;
481 {
482 
483 	if (bp->b_bcount > BHA_MAXXFER)
484 		bp->b_bcount = BHA_MAXXFER;
485 	minphys(bp);
486 }
487 
488 /*****************************************************************************
489  * SCSI job execution helper routines
490  *****************************************************************************/
491 
492 /*
493  * bha_get_xfer_mode;
494  *
495  *	Negotiate the xfer mode for the specified periph, and report
496  *	back the mode to the midlayer.
497  *
498  *	NOTE: we must be called at splbio().
499  */
500 void
501 bha_get_xfer_mode(sc, xm)
502 	struct bha_softc *sc;
503 	struct scsipi_xfer_mode *xm;
504 {
505 	struct bha_setup hwsetup;
506 	struct bha_period hwperiod;
507 	struct bha_sync *bs;
508 	int toff = xm->xm_target & 7, tmask = (1 << toff);
509 	int wide, period, offset, rlen;
510 
511 	/*
512 	 * Issue an Inquire Setup Information.  We can extract
513 	 * sync and wide information from here.
514 	 */
515 	rlen = sizeof(hwsetup.reply) +
516 	    ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0);
517 	hwsetup.cmd.opcode = BHA_INQUIRE_SETUP;
518 	hwsetup.cmd.len = rlen;
519 	bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname,
520 	    sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd,
521 	    rlen, (u_char *)&hwsetup.reply);
522 
523 	xm->xm_mode = 0;
524 	xm->xm_period = 0;
525 	xm->xm_offset = 0;
526 
527 	/*
528 	 * First check for wide.  On later boards, we can check
529 	 * directly in the setup info if wide is currently active.
530 	 *
531 	 * On earlier boards, we have to make an educated guess.
532 	 */
533 	if (sc->sc_flags & BHAF_WIDE) {
534 		if (strcmp(sc->sc_firmware, "5.06L") >= 0) {
535 			if (xm->xm_target > 7) {
536 				wide =
537 				    hwsetup.reply_w.high_wide_active & tmask;
538 			} else {
539 				wide =
540 				    hwsetup.reply_w.low_wide_active & tmask;
541 			}
542 			if (wide)
543 				xm->xm_mode |= PERIPH_CAP_WIDE16;
544 		} else {
545 			/* XXX Check `wide permitted' in the config info. */
546 			xm->xm_mode |= PERIPH_CAP_WIDE16;
547 		}
548 	}
549 
550 	/*
551 	 * Now get basic sync info.
552 	 */
553 	bs = (xm->xm_target > 7) ?
554 	     &hwsetup.reply_w.sync_high[toff] :
555 	     &hwsetup.reply.sync_low[toff];
556 
557 	if (bs->valid) {
558 		xm->xm_mode |= PERIPH_CAP_SYNC;
559 		period = (bs->period * 50) + 20;
560 		offset = bs->offset;
561 
562 		/*
563 		 * On boards that can do Fast and Ultra, use the Inquire Period
564 		 * command to get the period.
565 		 */
566 		if (sc->sc_firmware[0] >= '3') {
567 			rlen = sizeof(hwperiod.reply) +
568 			    ((sc->sc_flags & BHAF_WIDE) ?
569 			      sizeof(hwperiod.reply_w) : 0);
570 			hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD;
571 			hwperiod.cmd.len = rlen;
572 			bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname,
573 			    sizeof(hwperiod.cmd), (u_char *)&hwperiod.cmd,
574 			    rlen, (u_char *)&hwperiod.reply);
575 
576 			if (xm->xm_target > 7)
577 				period = hwperiod.reply_w.period[toff];
578 			else
579 				period = hwperiod.reply.period[toff];
580 
581 			period *= 10;
582 		}
583 
584 		xm->xm_period =
585 		    scsipi_sync_period_to_factor(period * 10);
586 		xm->xm_offset = offset;
587 	}
588 
589 	/*
590 	 * Now check for tagged queueing support.
591 	 *
592 	 * XXX Check `tags permitted' in the config info.
593 	 */
594 	if (sc->sc_flags & BHAF_TAGGED_QUEUEING)
595 		xm->xm_mode |= PERIPH_CAP_TQING;
596 
597 	scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm);
598 }
599 
600 /*
601  * bha_done:
602  *
603  *	A CCB has completed execution.  Pass the status back to the
604  *	upper layer.
605  */
606 void
607 bha_done(sc, ccb)
608 	struct bha_softc *sc;
609 	struct bha_ccb *ccb;
610 {
611 	bus_dma_tag_t dmat = sc->sc_dmat;
612 	struct scsipi_xfer *xs = ccb->xs;
613 
614 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n"));
615 
616 #ifdef BHADIAG
617 	if (ccb->flags & CCB_SENDING) {
618 		printf("%s: exiting ccb still in transit!\n",
619 		    sc->sc_dev.dv_xname);
620 		Debugger();
621 		return;
622 	}
623 #endif
624 	if ((ccb->flags & CCB_ALLOC) == 0) {
625 		printf("%s: exiting ccb not allocated!\n",
626 		    sc->sc_dev.dv_xname);
627 		Debugger();
628 		return;
629 	}
630 
631 	/*
632 	 * If we were a data transfer, unload the map that described
633 	 * the data buffer.
634 	 */
635 	if (xs->datalen) {
636 		bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
637 		    ccb->dmamap_xfer->dm_mapsize,
638 		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
639 		    BUS_DMASYNC_POSTWRITE);
640 		bus_dmamap_unload(dmat, ccb->dmamap_xfer);
641 	}
642 
643 	if (xs->error == XS_NOERROR) {
644 		if (ccb->host_stat != BHA_OK) {
645 			switch (ccb->host_stat) {
646 			case BHA_SEL_TIMEOUT:	/* No response */
647 				xs->error = XS_SELTIMEOUT;
648 				break;
649 			default:	/* Other scsi protocol messes */
650 				printf("%s: host_stat %x\n",
651 				    sc->sc_dev.dv_xname, ccb->host_stat);
652 				xs->error = XS_DRIVER_STUFFUP;
653 				break;
654 			}
655 		} else if (ccb->target_stat != SCSI_OK) {
656 			switch (ccb->target_stat) {
657 			case SCSI_CHECK:
658 				memcpy(&xs->sense.scsi_sense,
659 				    &ccb->scsi_sense,
660 				    sizeof(xs->sense.scsi_sense));
661 				xs->error = XS_SENSE;
662 				break;
663 			case SCSI_BUSY:
664 				xs->error = XS_BUSY;
665 				break;
666 			default:
667 				printf("%s: target_stat %x\n",
668 				    sc->sc_dev.dv_xname, ccb->target_stat);
669 				xs->error = XS_DRIVER_STUFFUP;
670 				break;
671 			}
672 		} else
673 			xs->resid = 0;
674 	}
675 
676 	bha_free_ccb(sc, ccb);
677 	scsipi_done(xs);
678 }
679 
680 /*
681  * bha_poll:
682  *
683  *	Poll for completion of the specified job.
684  */
685 int
686 bha_poll(sc, xs, count)
687 	struct bha_softc *sc;
688 	struct scsipi_xfer *xs;
689 	int count;
690 {
691 	bus_space_tag_t iot = sc->sc_iot;
692 	bus_space_handle_t ioh = sc->sc_ioh;
693 
694 	/* timeouts are in msec, so we loop in 1000 usec cycles */
695 	while (count) {
696 		/*
697 		 * If we had interrupts enabled, would we
698 		 * have got an interrupt?
699 		 */
700 		if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) &
701 		    BHA_INTR_ANYINTR)
702 			bha_intr(sc);
703 		if (xs->xs_status & XS_STS_DONE)
704 			return (0);
705 		delay(1000);	/* only happens in boot so ok */
706 		count--;
707 	}
708 	return (1);
709 }
710 
711 /*
712  * bha_timeout:
713  *
714  *	CCB timeout handler.
715  */
716 void
717 bha_timeout(arg)
718 	void *arg;
719 {
720 	struct bha_ccb *ccb = arg;
721 	struct scsipi_xfer *xs = ccb->xs;
722 	struct scsipi_periph *periph = xs->xs_periph;
723 	struct bha_softc *sc =
724 	    (void *)periph->periph_channel->chan_adapter->adapt_dev;
725 	int s;
726 
727 	scsipi_printaddr(periph);
728 	printf("timed out");
729 
730 	s = splbio();
731 
732 #ifdef BHADIAG
733 	/*
734 	 * If the ccb's mbx is not free, then the board has gone Far East?
735 	 */
736 	bha_collect_mbo(sc);
737 	if (ccb->flags & CCB_SENDING) {
738 		printf("%s: not taking commands!\n", sc->sc_dev.dv_xname);
739 		Debugger();
740 	}
741 #endif
742 
743 	/*
744 	 * If it has been through before, then
745 	 * a previous abort has failed, don't
746 	 * try abort again
747 	 */
748 	if (ccb->flags & CCB_ABORT) {
749 		/* abort timed out */
750 		printf(" AGAIN\n");
751 		/* XXX Must reset! */
752 	} else {
753 		/* abort the operation that has timed out */
754 		printf("\n");
755 		ccb->xs->error = XS_TIMEOUT;
756 		ccb->timeout = BHA_ABORT_TIMEOUT;
757 		ccb->flags |= CCB_ABORT;
758 		bha_queue_ccb(sc, ccb);
759 	}
760 
761 	splx(s);
762 }
763 
764 /*****************************************************************************
765  * Misc. subroutines.
766  *****************************************************************************/
767 
768 /*
769  * bha_cmd:
770  *
771  *	Send a command to the Buglogic controller.
772  */
773 int
774 bha_cmd(iot, ioh, name, icnt, ibuf, ocnt, obuf)
775 	bus_space_tag_t iot;
776 	bus_space_handle_t ioh;
777 	char *name;
778 	int icnt, ocnt;
779 	u_char *ibuf, *obuf;
780 {
781 	int i;
782 	int wait;
783 	u_char sts;
784 	u_char opcode = ibuf[0];
785 
786 	/*
787 	 * Calculate a reasonable timeout for the command.
788 	 */
789 	switch (opcode) {
790 	case BHA_INQUIRE_DEVICES:
791 	case BHA_INQUIRE_DEVICES_2:
792 		wait = 90 * 20000;
793 		break;
794 	default:
795 		wait = 1 * 20000;
796 		break;
797 	}
798 
799 	/*
800 	 * Wait for the adapter to go idle, unless it's one of
801 	 * the commands which don't need this
802 	 */
803 	if (opcode != BHA_MBO_INTR_EN) {
804 		for (i = 20000; i; i--) {	/* 1 sec? */
805 			sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
806 			if (sts & BHA_STAT_IDLE)
807 				break;
808 			delay(50);
809 		}
810 		if (!i) {
811 			printf("%s: bha_cmd, host not idle(0x%x)\n",
812 			    name, sts);
813 			return (1);
814 		}
815 	}
816 
817 	/*
818 	 * Now that it is idle, if we expect output, preflush the
819 	 * queue feeding to us.
820 	 */
821 	if (ocnt) {
822 		while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) &
823 		    BHA_STAT_DF)
824 			bus_space_read_1(iot, ioh, BHA_DATA_PORT);
825 	}
826 
827 	/*
828 	 * Output the command and the number of arguments given
829 	 * for each byte, first check the port is empty.
830 	 */
831 	while (icnt--) {
832 		for (i = wait; i; i--) {
833 			sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
834 			if (!(sts & BHA_STAT_CDF))
835 				break;
836 			delay(50);
837 		}
838 		if (!i) {
839 			if (opcode != BHA_INQUIRE_REVISION)
840 				printf("%s: bha_cmd, cmd/data port full\n",
841 				    name);
842 			goto bad;
843 		}
844 		bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++);
845 	}
846 
847 	/*
848 	 * If we expect input, loop that many times, each time,
849 	 * looking for the data register to have valid data
850 	 */
851 	while (ocnt--) {
852 		for (i = wait; i; i--) {
853 			sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
854 			if (sts & BHA_STAT_DF)
855 				break;
856 			delay(50);
857 		}
858 		if (!i) {
859 #ifdef BHADEBUG
860 			if (opcode != BHA_INQUIRE_REVISION)
861 				printf("%s: bha_cmd, cmd/data port empty %d\n",
862 				    name, ocnt);
863 #endif /* BHADEBUG */
864 			goto bad;
865 		}
866 		*obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT);
867 	}
868 
869 	/*
870 	 * Wait for the board to report a finished instruction.
871 	 * We may get an extra interrupt for the HACC signal, but this is
872 	 * unimportant.
873 	 */
874 	if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) {
875 		for (i = 20000; i; i--) {	/* 1 sec? */
876 			sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT);
877 			/* XXX Need to save this in the interrupt handler? */
878 			if (sts & BHA_INTR_HACC)
879 				break;
880 			delay(50);
881 		}
882 		if (!i) {
883 			printf("%s: bha_cmd, host not finished(0x%x)\n",
884 			    name, sts);
885 			return (1);
886 		}
887 	}
888 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST);
889 	return (0);
890 
891 bad:
892 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST);
893 	return (1);
894 }
895 
896 /*
897  * bha_find:
898  *
899  *	Find the board.
900  */
901 int
902 bha_find(iot, ioh)
903 	bus_space_tag_t iot;
904 	bus_space_handle_t ioh;
905 {
906 	int i;
907 	u_char sts;
908 	struct bha_extended_inquire inquire;
909 
910 	/* Check something is at the ports we need to access */
911 	sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
912 	if (sts == 0xFF)
913 		return (0);
914 
915 	/*
916 	 * Reset board, If it doesn't respond, assume
917 	 * that it's not there.. good for the probe
918 	 */
919 
920 	bus_space_write_1(iot, ioh, BHA_CTRL_PORT,
921 	    BHA_CTRL_HRST | BHA_CTRL_SRST);
922 
923 	delay(100);
924 	for (i = BHA_RESET_TIMEOUT; i; i--) {
925 		sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
926 		if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT))
927 			break;
928 		delay(1000);
929 	}
930 	if (!i) {
931 #ifdef BHADEBUG
932 		if (bha_debug)
933 			printf("bha_find: No answer from buslogic board\n");
934 #endif /* BHADEBUG */
935 		return (0);
936 	}
937 
938 	/*
939 	 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible
940 	 * interface. The native bha interface is not compatible with
941 	 * an aha. 1542. We need to ensure that we never match an
942 	 * Adaptec 1542. We must also avoid sending Adaptec-compatible
943 	 * commands to a real bha, lest it go into 1542 emulation mode.
944 	 * (On an indirect bus like ISA, we should always probe for BusLogic
945 	 * interfaces before Adaptec interfaces).
946 	 */
947 
948 	/*
949 	 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking
950 	 * for an extended-geometry register.  The 1542[AB] don't have one.
951 	 */
952 	sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT);
953 	if (sts == 0xFF)
954 		return (0);
955 
956 	/*
957 	 * Check that we actually know how to use this board.
958 	 */
959 	delay(1000);
960 	inquire.cmd.opcode = BHA_INQUIRE_EXTENDED;
961 	inquire.cmd.len = sizeof(inquire.reply);
962 	i = bha_cmd(iot, ioh, "(bha_find)",
963 	    sizeof(inquire.cmd), (u_char *)&inquire.cmd,
964 	    sizeof(inquire.reply), (u_char *)&inquire.reply);
965 
966 	/*
967 	 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev)
968 	 * have the extended-geometry register and also respond to
969 	 * BHA_INQUIRE_EXTENDED.  Make sure we never match such cards,
970 	 * by checking the size of the reply is what a BusLogic card returns.
971 	 */
972 	if (i) {
973 #ifdef BHADEBUG
974 		printf("bha_find: board returned %d instead of %d to %s\n",
975 		       i, sizeof(inquire.reply), "INQUIRE_EXTENDED");
976 #endif
977 		return (0);
978 	}
979 
980 	/* OK, we know we've found a buslogic adaptor. */
981 
982 	switch (inquire.reply.bus_type) {
983 	case BHA_BUS_TYPE_24BIT:
984 	case BHA_BUS_TYPE_32BIT:
985 		break;
986 	case BHA_BUS_TYPE_MCA:
987 		/* We don't grok MicroChannel (yet). */
988 		return (0);
989 	default:
990 		printf("bha_find: illegal bus type %c\n",
991 		    inquire.reply.bus_type);
992 		return (0);
993 	}
994 
995 	return (1);
996 }
997 
998 
999 /*
1000  * bha_inquire_config:
1001  *
1002  *	Determine irq/drq.
1003  */
1004 int
1005 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh,
1006 	    struct bha_probe_data *sc)
1007 {
1008 	int irq, drq;
1009 	struct bha_config config;
1010 
1011 	/*
1012 	 * Assume we have a board at this stage setup dma channel from
1013 	 * jumpers and save int level
1014 	 */
1015 	delay(1000);
1016 	config.cmd.opcode = BHA_INQUIRE_CONFIG;
1017 	bha_cmd(iot, ioh, "(bha_inquire_config)",
1018 	    sizeof(config.cmd), (u_char *)&config.cmd,
1019 	    sizeof(config.reply), (u_char *)&config.reply);
1020 	switch (config.reply.chan) {
1021 	case EISADMA:
1022 		drq = -1;
1023 		break;
1024 	case CHAN0:
1025 		drq = 0;
1026 		break;
1027 	case CHAN5:
1028 		drq = 5;
1029 		break;
1030 	case CHAN6:
1031 		drq = 6;
1032 		break;
1033 	case CHAN7:
1034 		drq = 7;
1035 		break;
1036 	default:
1037 		printf("bha: illegal drq setting %x\n",
1038 		    config.reply.chan);
1039 		return (0);
1040 	}
1041 
1042 	switch (config.reply.intr) {
1043 	case INT9:
1044 		irq = 9;
1045 		break;
1046 	case INT10:
1047 		irq = 10;
1048 		break;
1049 	case INT11:
1050 		irq = 11;
1051 		break;
1052 	case INT12:
1053 		irq = 12;
1054 		break;
1055 	case INT14:
1056 		irq = 14;
1057 		break;
1058 	case INT15:
1059 		irq = 15;
1060 		break;
1061 	default:
1062 		printf("bha: illegal irq setting %x\n",
1063 		    config.reply.intr);
1064 		return (0);
1065 	}
1066 
1067 	/* if we want to fill in softc, do so now */
1068 	if (sc != NULL) {
1069 		sc->sc_irq = irq;
1070 		sc->sc_drq = drq;
1071 	}
1072 
1073 	return (1);
1074 }
1075 
1076 int
1077 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh,
1078     struct bha_probe_data *bpd)
1079 {
1080 	return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd);
1081 }
1082 
1083 /*
1084  * bha_disable_isacompat:
1085  *
1086  *	Disable the ISA-compatiblity ioports on PCI bha devices,
1087  *	to ensure they're not autoconfigured a second time as an ISA bha.
1088  */
1089 int
1090 bha_disable_isacompat(sc)
1091 	struct bha_softc *sc;
1092 {
1093 	struct bha_isadisable isa_disable;
1094 
1095 	isa_disable.cmd.opcode = BHA_MODIFY_IOPORT;
1096 	isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1;
1097 	bha_cmd(sc->sc_iot, sc->sc_ioh, sc->sc_dev.dv_xname,
1098 	    sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd,
1099 	    0, (u_char *)0);
1100 	return (0);
1101 }
1102 
1103 /*
1104  * bha_info:
1105  *
1106  *	Get information about the board, and report it.  We
1107  *	return the initial number of CCBs, 0 if we failed.
1108  */
1109 int
1110 bha_info(sc)
1111 	struct bha_softc *sc;
1112 {
1113 	bus_space_tag_t iot = sc->sc_iot;
1114 	bus_space_handle_t ioh = sc->sc_ioh;
1115 	struct bha_extended_inquire inquire;
1116 	struct bha_config config;
1117 	struct bha_devices devices;
1118 	struct bha_setup setup;
1119 	struct bha_model model;
1120 	struct bha_revision revision;
1121 	struct bha_digit digit;
1122 	int i, j, initial_ccbs, rlen;
1123 	char *name = sc->sc_dev.dv_xname;
1124 	char *p;
1125 
1126 	/*
1127 	 * Fetch the extended inquire information.
1128 	 */
1129 	inquire.cmd.opcode = BHA_INQUIRE_EXTENDED;
1130 	inquire.cmd.len = sizeof(inquire.reply);
1131 	bha_cmd(iot, ioh, name,
1132 	    sizeof(inquire.cmd), (u_char *)&inquire.cmd,
1133 	    sizeof(inquire.reply), (u_char *)&inquire.reply);
1134 
1135 	/*
1136 	 * Fetch the configuration information.
1137 	 */
1138 	config.cmd.opcode = BHA_INQUIRE_CONFIG;
1139 	bha_cmd(iot, ioh, name,
1140 	    sizeof(config.cmd), (u_char *)&config.cmd,
1141 	    sizeof(config.reply), (u_char *)&config.reply);
1142 
1143 	sc->sc_scsi_id = config.reply.scsi_dev;
1144 
1145 	/*
1146 	 * Get the firmware revision.
1147 	 */
1148 	p = sc->sc_firmware;
1149 	revision.cmd.opcode = BHA_INQUIRE_REVISION;
1150 	bha_cmd(iot, ioh, name,
1151 	    sizeof(revision.cmd), (u_char *)&revision.cmd,
1152 	    sizeof(revision.reply), (u_char *)&revision.reply);
1153 	*p++ = revision.reply.firm_revision;
1154 	*p++ = '.';
1155 	*p++ = revision.reply.firm_version;
1156 	digit.cmd.opcode = BHA_INQUIRE_REVISION_3;
1157 	bha_cmd(iot, ioh, name,
1158 	    sizeof(digit.cmd), (u_char *)&digit.cmd,
1159 	    sizeof(digit.reply), (u_char *)&digit.reply);
1160 	*p++ = digit.reply.digit;
1161 	if (revision.reply.firm_revision >= '3' ||
1162 	    (revision.reply.firm_revision == '3' &&
1163 	     revision.reply.firm_version >= '3')) {
1164 		digit.cmd.opcode = BHA_INQUIRE_REVISION_4;
1165 		bha_cmd(iot, ioh, name,
1166 		    sizeof(digit.cmd), (u_char *)&digit.cmd,
1167 		    sizeof(digit.reply), (u_char *)&digit.reply);
1168 		*p++ = digit.reply.digit;
1169 	}
1170 	while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0'))
1171 		p--;
1172 	*p = '\0';
1173 
1174 	/*
1175 	 * Get the model number.
1176 	 *
1177 	 * Some boards do not handle the Inquire Board Model Number
1178 	 * command correctly, or don't give correct information.
1179 	 *
1180 	 * So, we use the Firmware Revision and Extended Setup
1181 	 * information to fixup the model number in these cases.
1182 	 *
1183 	 * The firmware version indicates:
1184 	 *
1185 	 *	5.xx	BusLogic "W" Series Host Adapters
1186 	 *		BT-948/958/958D
1187 	 *
1188 	 *	4.xx	BusLogic "C" Series Host Adapters
1189 	 *		BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF
1190 	 *
1191 	 *	3.xx	BusLogic "S" Series Host Adapters
1192 	 *		BT-747S/747D/757S/757D/445S/545S/542D
1193 	 *		BT-542B/742A (revision H)
1194 	 *
1195 	 *	2.xx	BusLogic "A" Series Host Adapters
1196 	 *		BT-542B/742A (revision G and below)
1197 	 *
1198 	 *	0.xx	AMI FastDisk VLB/EISA BusLogic Clone Host Adapter
1199 	 */
1200 	if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT &&
1201 	    sc->sc_firmware[0] < '3')
1202 		sprintf(sc->sc_model, "542B");
1203 	else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT &&
1204 	    sc->sc_firmware[0] == '2' &&
1205 	    (sc->sc_firmware[2] == '1' ||
1206 	     (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '0')))
1207 		sprintf(sc->sc_model, "742A");
1208 	else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT &&
1209 	    sc->sc_firmware[0] == '0')
1210 		sprintf(sc->sc_model, "747A");
1211 	else {
1212 		p = sc->sc_model;
1213 		model.cmd.opcode = BHA_INQUIRE_MODEL;
1214 		model.cmd.len = sizeof(model.reply);
1215 		bha_cmd(iot, ioh, name,
1216 		    sizeof(model.cmd), (u_char *)&model.cmd,
1217 		    sizeof(model.reply), (u_char *)&model.reply);
1218 		*p++ = model.reply.id[0];
1219 		*p++ = model.reply.id[1];
1220 		*p++ = model.reply.id[2];
1221 		*p++ = model.reply.id[3];
1222 		while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0'))
1223 			p--;
1224 		*p++ = model.reply.version[0];
1225 		*p++ = model.reply.version[1];
1226 		while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0'))
1227 			p--;
1228 		*p = '\0';
1229 	}
1230 
1231 	/* Enable round-robin scheme - appeared at firmware rev. 3.31. */
1232 	if (strcmp(sc->sc_firmware, "3.31") >= 0)
1233 		sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN;
1234 
1235 	/*
1236 	 * Determine some characteristics about our bus.
1237 	 */
1238 	if (inquire.reply.scsi_flags & BHA_SCSI_WIDE)
1239 		sc->sc_flags |= BHAF_WIDE;
1240 	if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL)
1241 		sc->sc_flags |= BHAF_DIFFERENTIAL;
1242 	if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA)
1243 		sc->sc_flags |= BHAF_ULTRA;
1244 
1245 	/*
1246 	 * Determine some characterists of the board.
1247 	 */
1248 	sc->sc_max_dmaseg = inquire.reply.sg_limit;
1249 
1250 	/*
1251 	 * Determine the maximum CCB count and whether or not
1252 	 * tagged queueing is available on this host adapter.
1253 	 *
1254 	 * Tagged queueing works on:
1255 	 *
1256 	 *	"W" Series adapters
1257 	 *	"C" Series adapters with firmware >= 4.22
1258 	 *	"S" Series adapters with firmware >= 3.35
1259 	 *
1260 	 * The internal CCB counts are:
1261 	 *
1262 	 *	192	BT-948/958/958D
1263 	 *	100	BT-946C/956C/956CD/747C/757C/757CD/445C
1264 	 *	50	BT-545C/540CF
1265 	 *	30	BT-747S/747D/757S/757D/445S/545S/542D/542B/742A
1266 	 */
1267 	switch (sc->sc_firmware[0]) {
1268 	case '5':
1269 		sc->sc_max_ccbs = 192;
1270 		sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1271 		break;
1272 
1273 	case '4':
1274 		if (sc->sc_model[0] == '5')
1275 			sc->sc_max_ccbs = 50;
1276 		else
1277 			sc->sc_max_ccbs = 100;
1278 		if (strcmp(sc->sc_firmware, "4.22") >= 0)
1279 			sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1280 		break;
1281 
1282 	case '3':
1283 		if (strcmp(sc->sc_firmware, "3.35") >= 0)
1284 			sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1285 		/* FALLTHROUGH */
1286 
1287 	default:
1288 		sc->sc_max_ccbs = 30;
1289 	}
1290 
1291 	/*
1292 	 * Set the mailbox count to precisely the number of HW CCBs
1293 	 * available.  A mailbox isn't required while a CCB is executing,
1294 	 * but this allows us to actually enqueue up to our resource
1295 	 * limit.
1296 	 *
1297 	 * This will keep the mailbox count small on boards which don't
1298 	 * have strict round-robin (they have to scan the entire set of
1299 	 * mailboxes each time they run a command).
1300 	 */
1301 	sc->sc_mbox_count = sc->sc_max_ccbs;
1302 
1303 	/*
1304 	 * Obtain setup information.
1305 	 */
1306 	rlen = sizeof(setup.reply) +
1307 	    ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0);
1308 	setup.cmd.opcode = BHA_INQUIRE_SETUP;
1309 	setup.cmd.len = rlen;
1310 	bha_cmd(iot, ioh, name,
1311 	    sizeof(setup.cmd), (u_char *)&setup.cmd,
1312 	    rlen, (u_char *)&setup.reply);
1313 
1314 	printf("%s: model BT-%s, firmware %s\n", sc->sc_dev.dv_xname,
1315 	    sc->sc_model, sc->sc_firmware);
1316 
1317 	printf("%s: %d H/W CCBs", sc->sc_dev.dv_xname, sc->sc_max_ccbs);
1318 	if (setup.reply.sync_neg)
1319 		printf(", sync");
1320 	if (setup.reply.parity)
1321 		printf(", parity");
1322 	if (sc->sc_flags & BHAF_TAGGED_QUEUEING)
1323 		printf(", tagged queueing");
1324 	if (sc->sc_flags & BHAF_WIDE_LUN)
1325 		printf(", wide LUN support");
1326 	printf("\n");
1327 
1328 	/*
1329 	 * Poll targets 0 - 7.
1330 	 */
1331 	devices.cmd.opcode = BHA_INQUIRE_DEVICES;
1332 	bha_cmd(iot, ioh, name,
1333 	    sizeof(devices.cmd), (u_char *)&devices.cmd,
1334 	    sizeof(devices.reply), (u_char *)&devices.reply);
1335 
1336 	/* Count installed units. */
1337 	initial_ccbs = 0;
1338 	for (i = 0; i < 8; i++) {
1339 		for (j = 0; j < 8; j++) {
1340 			if (((devices.reply.lun_map[i] >> j) & 1) == 1)
1341 				initial_ccbs++;
1342 		}
1343 	}
1344 
1345 	/*
1346 	 * Poll targets 8 - 15 if we have a wide bus.
1347 	 */
1348 	if (sc->sc_flags & BHAF_WIDE) {
1349 		devices.cmd.opcode = BHA_INQUIRE_DEVICES_2;
1350 		bha_cmd(iot, ioh, name,
1351 		    sizeof(devices.cmd), (u_char *)&devices.cmd,
1352 		    sizeof(devices.reply), (u_char *)&devices.reply);
1353 
1354 		for (i = 0; i < 8; i++) {
1355 			for (j = 0; j < 8; j++) {
1356 				if (((devices.reply.lun_map[i] >> j) & 1) == 1)
1357 					initial_ccbs++;
1358 			}
1359 		}
1360 	}
1361 
1362 	/*
1363 	 * Double the initial CCB count, for good measure.
1364 	 */
1365 	initial_ccbs *= 2;
1366 
1367 	/*
1368 	 * Sanity check the initial CCB count; don't create more than
1369 	 * we can enqueue (sc_max_ccbs), and make sure there are some
1370 	 * at all.
1371 	 */
1372 	if (initial_ccbs > sc->sc_max_ccbs)
1373 		initial_ccbs = sc->sc_max_ccbs;
1374 	if (initial_ccbs == 0)
1375 		initial_ccbs = 2;
1376 
1377 	return (initial_ccbs);
1378 }
1379 
1380 /*
1381  * bha_init:
1382  *
1383  *	Initialize the board.
1384  */
1385 int
1386 bha_init(sc)
1387 	struct bha_softc *sc;
1388 {
1389 	char *name = sc->sc_dev.dv_xname;
1390 	struct bha_toggle toggle;
1391 	struct bha_mailbox mailbox;
1392 	struct bha_mbx_out *mbo;
1393 	struct bha_mbx_in *mbi;
1394 	int i;
1395 
1396 	/*
1397 	 * Set up the mailbox.  We always run the mailbox in round-robin.
1398 	 */
1399 	for (i = 0; i < sc->sc_mbox_count; i++) {
1400 		mbo = &sc->sc_mbo[i];
1401 		mbi = &sc->sc_mbi[i];
1402 
1403 		mbo->cmd = BHA_MBO_FREE;
1404 		BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1405 
1406 		mbi->comp_stat = BHA_MBI_FREE;
1407 		BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1408 	}
1409 
1410 	sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0];
1411 	sc->sc_tmbi = &sc->sc_mbi[0];
1412 
1413 	sc->sc_mbofull = 0;
1414 
1415 	/*
1416 	 * If the board supports strict round-robin, enable that.
1417 	 */
1418 	if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) {
1419 		toggle.cmd.opcode = BHA_ROUND_ROBIN;
1420 		toggle.cmd.enable = 1;
1421 		bha_cmd(sc->sc_iot, sc->sc_ioh, name,
1422 		    sizeof(toggle.cmd), (u_char *)&toggle.cmd,
1423 		    0, NULL);
1424 	}
1425 
1426 	/*
1427 	 * Give the mailbox to the board.
1428 	 */
1429 	mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED;
1430 	mailbox.cmd.nmbx = sc->sc_mbox_count;
1431 	ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr);
1432 	bha_cmd(sc->sc_iot, sc->sc_ioh, name,
1433 	    sizeof(mailbox.cmd), (u_char *)&mailbox.cmd,
1434 	    0, (u_char *)0);
1435 
1436 	return (0);
1437 }
1438 
1439 /*****************************************************************************
1440  * CCB execution engine
1441  *****************************************************************************/
1442 
1443 /*
1444  * bha_queue_ccb:
1445  *
1446  *	Queue a CCB to be sent to the controller, and send it if possible.
1447  */
1448 void
1449 bha_queue_ccb(sc, ccb)
1450 	struct bha_softc *sc;
1451 	struct bha_ccb *ccb;
1452 {
1453 
1454 	TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
1455 	bha_start_ccbs(sc);
1456 }
1457 
1458 /*
1459  * bha_start_ccbs:
1460  *
1461  *	Send as many CCBs as we have empty mailboxes for.
1462  */
1463 void
1464 bha_start_ccbs(sc)
1465 	struct bha_softc *sc;
1466 {
1467 	bus_space_tag_t iot = sc->sc_iot;
1468 	bus_space_handle_t ioh = sc->sc_ioh;
1469 	struct bha_ccb_group *bcg;
1470 	struct bha_mbx_out *mbo;
1471 	struct bha_ccb *ccb;
1472 
1473 	mbo = sc->sc_tmbo;
1474 
1475 	while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) {
1476 		if (sc->sc_mbofull >= sc->sc_mbox_count) {
1477 #ifdef DIAGNOSTIC
1478 			if (sc->sc_mbofull > sc->sc_mbox_count)
1479 				panic("bha_start_ccbs: mbofull > mbox_count");
1480 #endif
1481 			/*
1482 			 * No mailboxes available; attempt to collect ones
1483 			 * that have already been used.
1484 			 */
1485 			bha_collect_mbo(sc);
1486 			if (sc->sc_mbofull == sc->sc_mbox_count) {
1487 				/*
1488 				 * Still no more available; have the
1489 				 * controller interrupt us when it
1490 				 * frees one.
1491 				 */
1492 				struct bha_toggle toggle;
1493 
1494 				toggle.cmd.opcode = BHA_MBO_INTR_EN;
1495 				toggle.cmd.enable = 1;
1496 				bha_cmd(iot, ioh, sc->sc_dev.dv_xname,
1497 				    sizeof(toggle.cmd), (u_char *)&toggle.cmd,
1498 				    0, (u_char *)0);
1499 				break;
1500 			}
1501 		}
1502 
1503 		TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
1504 #ifdef BHADIAG
1505 		ccb->flags |= CCB_SENDING;
1506 #endif
1507 
1508 		/*
1509 		 * Put the CCB in the mailbox.
1510 		 */
1511 		bcg = BHA_CCB_GROUP(ccb);
1512 		ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr +
1513 		    BHA_CCB_OFFSET(ccb), mbo->ccb_addr);
1514 		if (ccb->flags & CCB_ABORT)
1515 			mbo->cmd = BHA_MBO_ABORT;
1516 		else
1517 			mbo->cmd = BHA_MBO_START;
1518 
1519 		BHA_MBO_SYNC(sc, mbo,
1520 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1521 
1522 		/* Tell the card to poll immediately. */
1523 		bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI);
1524 
1525 		if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
1526 			callout_reset(&ccb->xs->xs_callout,
1527 			    mstohz(ccb->timeout), bha_timeout, ccb);
1528 
1529 		++sc->sc_mbofull;
1530 		mbo = bha_nextmbo(sc, mbo);
1531 	}
1532 
1533 	sc->sc_tmbo = mbo;
1534 }
1535 
1536 /*
1537  * bha_finish_ccbs:
1538  *
1539  *	Finalize the execution of CCBs in our incoming mailbox.
1540  */
1541 void
1542 bha_finish_ccbs(sc)
1543 	struct bha_softc *sc;
1544 {
1545 	struct bha_mbx_in *mbi;
1546 	struct bha_ccb *ccb;
1547 	int i;
1548 
1549 	mbi = sc->sc_tmbi;
1550 
1551 	BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1552 
1553 	if (mbi->comp_stat == BHA_MBI_FREE) {
1554 		for (i = 0; i < sc->sc_mbox_count; i++) {
1555 			if (mbi->comp_stat != BHA_MBI_FREE) {
1556 #ifdef BHADIAG
1557 				/*
1558 				 * This can happen in normal operation if
1559 				 * we use all mailbox slots.
1560 				 */
1561 				printf("%s: mbi not in round-robin order\n",
1562 				    sc->sc_dev.dv_xname);
1563 #endif
1564 				goto again;
1565 			}
1566 			mbi = bha_nextmbi(sc, mbi);
1567 			BHA_MBI_SYNC(sc, mbi,
1568 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1569 		}
1570 #ifdef BHADIAGnot
1571 		printf("%s: mbi interrupt with no full mailboxes\n",
1572 		    sc->sc_dev.dv_xname);
1573 #endif
1574 		return;
1575 	}
1576 
1577  again:
1578 	do {
1579 		ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr));
1580 		if (ccb == NULL) {
1581 			printf("%s: bad mbi ccb pointer 0x%08x; skipping\n",
1582 			    sc->sc_dev.dv_xname, phystol(mbi->ccb_addr));
1583 			goto next;
1584 		}
1585 
1586 		BHA_CCB_SYNC(sc, ccb,
1587 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1588 
1589 #ifdef BHADEBUG
1590 		if (bha_debug) {
1591 			struct scsi_generic *cmd = &ccb->scsi_cmd;
1592 			printf("op=%x %x %x %x %x %x\n",
1593 			    cmd->opcode, cmd->bytes[0], cmd->bytes[1],
1594 			    cmd->bytes[2], cmd->bytes[3], cmd->bytes[4]);
1595 			printf("comp_stat %x for mbi addr = 0x%p, ",
1596 			    mbi->comp_stat, mbi);
1597 			printf("ccb addr = %p\n", ccb);
1598 		}
1599 #endif /* BHADEBUG */
1600 
1601 		switch (mbi->comp_stat) {
1602 		case BHA_MBI_OK:
1603 		case BHA_MBI_ERROR:
1604 			if ((ccb->flags & CCB_ABORT) != 0) {
1605 				/*
1606 				 * If we already started an abort, wait for it
1607 				 * to complete before clearing the CCB.  We
1608 				 * could instead just clear CCB_SENDING, but
1609 				 * what if the mailbox was already received?
1610 				 * The worst that happens here is that we clear
1611 				 * the CCB a bit later than we need to.  BFD.
1612 				 */
1613 				goto next;
1614 			}
1615 			break;
1616 
1617 		case BHA_MBI_ABORT:
1618 		case BHA_MBI_UNKNOWN:
1619 			/*
1620 			 * Even if the CCB wasn't found, we clear it anyway.
1621 			 * See preceding comment.
1622 			 */
1623 			break;
1624 
1625 		default:
1626 			printf("%s: bad mbi comp_stat %02x; skipping\n",
1627 			    sc->sc_dev.dv_xname, mbi->comp_stat);
1628 			goto next;
1629 		}
1630 
1631 		callout_stop(&ccb->xs->xs_callout);
1632 		bha_done(sc, ccb);
1633 
1634 	next:
1635 		mbi->comp_stat = BHA_MBI_FREE;
1636 		BHA_CCB_SYNC(sc, ccb,
1637 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1638 
1639 		mbi = bha_nextmbi(sc, mbi);
1640 		BHA_MBI_SYNC(sc, mbi,
1641 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1642 	} while (mbi->comp_stat != BHA_MBI_FREE);
1643 
1644 	sc->sc_tmbi = mbi;
1645 }
1646 
1647 /*****************************************************************************
1648  * Mailbox management functions.
1649  *****************************************************************************/
1650 
1651 /*
1652  * bha_create_mailbox:
1653  *
1654  *	Create the mailbox structures.  Helper function for bha_attach().
1655  *
1656  *	NOTE: The Buslogic hardware only gets one DMA address for the
1657  *	mailbox!  It expects:
1658  *
1659  *		mailbox_out[mailbox_size]
1660  *		mailbox_in[mailbox_size]
1661  */
1662 int
1663 bha_create_mailbox(sc)
1664 	struct bha_softc *sc;
1665 {
1666 	bus_dma_segment_t seg;
1667 	size_t size;
1668 	int error, rseg;
1669 
1670 	size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) +
1671 	       (sizeof(struct bha_mbx_in)  * sc->sc_mbox_count);
1672 
1673 	error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg,
1674 	    1, &rseg, sc->sc_dmaflags);
1675 	if (error) {
1676 		printf("%s: unable to allocate mailboxes, error = %d\n",
1677 		    sc->sc_dev.dv_xname, error);
1678 		goto bad_0;
1679 	}
1680 
1681 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
1682 	    (caddr_t *)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT);
1683 	if (error) {
1684 		printf("%s: unable to map mailboxes, error = %d\n",
1685 		    sc->sc_dev.dv_xname, error);
1686 		goto bad_1;
1687 	}
1688 
1689 	memset(sc->sc_mbo, 0, size);
1690 
1691 	error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1692 	    sc->sc_dmaflags, &sc->sc_dmamap_mbox);
1693 	if (error) {
1694 		printf("%s: unable to create mailbox DMA map, error = %d\n",
1695 		    sc->sc_dev.dv_xname, error);
1696 		goto bad_2;
1697 	}
1698 
1699 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox,
1700 	    sc->sc_mbo, size, NULL, 0);
1701 	if (error) {
1702 		printf("%s: unable to load mailbox DMA map, error = %d\n",
1703 		    sc->sc_dev.dv_xname, error);
1704 		goto bad_3;
1705 	}
1706 
1707 	sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count);
1708 
1709 	return (0);
1710 
1711  bad_3:
1712 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox);
1713  bad_2:
1714 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_mbo, size);
1715  bad_1:
1716 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1717  bad_0:
1718 	return (error);
1719 }
1720 
1721 /*
1722  * bha_collect_mbo:
1723  *
1724  *	Garbage collect mailboxes that are no longer in use.
1725  */
1726 void
1727 bha_collect_mbo(sc)
1728 	struct bha_softc *sc;
1729 {
1730 	struct bha_mbx_out *mbo;
1731 #ifdef BHADIAG
1732 	struct bha_ccb *ccb;
1733 #endif
1734 
1735 	mbo = sc->sc_cmbo;
1736 
1737 	while (sc->sc_mbofull > 0) {
1738 		BHA_MBO_SYNC(sc, mbo,
1739 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1740 		if (mbo->cmd != BHA_MBO_FREE)
1741 			break;
1742 
1743 #ifdef BHADIAG
1744 		ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr));
1745 		ccb->flags &= ~CCB_SENDING;
1746 #endif
1747 
1748 		--sc->sc_mbofull;
1749 		mbo = bha_nextmbo(sc, mbo);
1750 	}
1751 
1752 	sc->sc_cmbo = mbo;
1753 }
1754 
1755 /*****************************************************************************
1756  * CCB management functions
1757  *****************************************************************************/
1758 
1759 __inline void bha_reset_ccb __P((struct bha_ccb *));
1760 
1761 __inline void
1762 bha_reset_ccb(ccb)
1763 	struct bha_ccb *ccb;
1764 {
1765 
1766 	ccb->flags = 0;
1767 }
1768 
1769 /*
1770  * bha_create_ccbs:
1771  *
1772  *	Create a set of CCBs.
1773  *
1774  *	We determine the target CCB count, and then keep creating them
1775  *	until we reach the target, or fail.  CCBs that are allocated
1776  *	but not "created" are left on the allocating list.
1777  */
1778 void
1779 bha_create_ccbs(sc, count)
1780 	struct bha_softc *sc;
1781 	int count;
1782 {
1783 	struct bha_ccb_group *bcg;
1784 	struct bha_ccb *ccb;
1785 	bus_dma_segment_t seg;
1786 	bus_dmamap_t ccbmap;
1787 	int target, i, error, rseg;
1788 
1789 	/*
1790 	 * If the current CCB count is already the max number we're
1791 	 * allowed to have, bail out now.
1792 	 */
1793 	if (sc->sc_cur_ccbs == sc->sc_max_ccbs)
1794 		return;
1795 
1796 	/*
1797 	 * Compute our target count, and clamp it down to the max
1798 	 * number we're allowed to have.
1799 	 */
1800 	target = sc->sc_cur_ccbs + count;
1801 	if (target > sc->sc_max_ccbs)
1802 		target = sc->sc_max_ccbs;
1803 
1804 	/*
1805 	 * If there are CCBs on the allocating list, don't allocate a
1806 	 * CCB group yet.
1807 	 */
1808 	if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL)
1809 		goto have_allocating_ccbs;
1810 
1811  allocate_group:
1812 	error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
1813 	    PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT);
1814 	if (error) {
1815 		printf("%s: unable to allocate CCB group, error = %d\n",
1816 		    sc->sc_dev.dv_xname, error);
1817 		goto bad_0;
1818 	}
1819 
1820 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
1821 	    (caddr_t *)&bcg,
1822 	    sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1823 	if (error) {
1824 		printf("%s: unable to map CCB group, error = %d\n",
1825 		    sc->sc_dev.dv_xname, error);
1826 		goto bad_1;
1827 	}
1828 
1829 	memset(bcg, 0, PAGE_SIZE);
1830 
1831 	error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE,
1832 	    1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap);
1833 	if (error) {
1834 		printf("%s: unable to create CCB group DMA map, error = %d\n",
1835 		    sc->sc_dev.dv_xname, error);
1836 		goto bad_2;
1837 	}
1838 
1839 	error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL,
1840 	    sc->sc_dmaflags | BUS_DMA_NOWAIT);
1841 	if (error) {
1842 		printf("%s: unable to load CCB group DMA map, error = %d\n",
1843 		    sc->sc_dev.dv_xname, error);
1844 		goto bad_3;
1845 	}
1846 
1847 	bcg->bcg_dmamap = ccbmap;
1848 
1849 #ifdef DIAGNOSTIC
1850 	if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) !=
1851 	    BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1]))
1852 		panic("bha_create_ccbs: CCB group size botch");
1853 #endif
1854 
1855 	/*
1856 	 * Add all of the CCBs in this group to the allocating list.
1857 	 */
1858 	for (i = 0; i < bha_ccbs_per_group; i++) {
1859 		ccb = &bcg->bcg_ccbs[i];
1860 		TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain);
1861 	}
1862 
1863  have_allocating_ccbs:
1864 	/*
1865 	 * Loop over the allocating list until we reach our CCB target.
1866 	 * If we run out on the list, we'll allocate another group's
1867 	 * worth.
1868 	 */
1869 	while (sc->sc_cur_ccbs < target) {
1870 		ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs);
1871 		if (ccb == NULL)
1872 			goto allocate_group;
1873 		if (bha_init_ccb(sc, ccb) != 0) {
1874 			/*
1875 			 * We were unable to initialize the CCB.
1876 			 * This is likely due to a resource shortage,
1877 			 * so bail out now.
1878 			 */
1879 			return;
1880 		}
1881 	}
1882 
1883 	/*
1884 	 * If we got here, we've reached our target!
1885 	 */
1886 	return;
1887 
1888  bad_3:
1889 	bus_dmamap_destroy(sc->sc_dmat, ccbmap);
1890  bad_2:
1891 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)bcg, PAGE_SIZE);
1892  bad_1:
1893 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1894  bad_0:
1895 	return;
1896 }
1897 
1898 /*
1899  * bha_init_ccb:
1900  *
1901  *	Initialize a CCB; helper function for bha_create_ccbs().
1902  */
1903 int
1904 bha_init_ccb(sc, ccb)
1905 	struct bha_softc *sc;
1906 	struct bha_ccb *ccb;
1907 {
1908 	struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb);
1909 	int hashnum, error;
1910 
1911 	/*
1912 	 * Create the DMA map for this CCB.
1913 	 *
1914 	 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages
1915 	 * XXX in the ISA case.  A better solution is needed.
1916 	 */
1917 	error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG,
1918 	    BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags,
1919 	    &ccb->dmamap_xfer);
1920 	if (error) {
1921 		printf("%s: unable to create CCB DMA map, error = %d\n",
1922 		    sc->sc_dev.dv_xname, error);
1923 		return (error);
1924 	}
1925 
1926 	TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain);
1927 
1928 	/*
1929 	 * Put the CCB into the phystokv hash table.
1930 	 */
1931 	ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr +
1932 	    BHA_CCB_OFFSET(ccb);
1933 	hashnum = CCB_HASH(ccb->hashkey);
1934 	ccb->nexthash = sc->sc_ccbhash[hashnum];
1935 	sc->sc_ccbhash[hashnum] = ccb;
1936 	bha_reset_ccb(ccb);
1937 
1938 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
1939 	sc->sc_cur_ccbs++;
1940 
1941 	return (0);
1942 }
1943 
1944 /*
1945  * bha_get_ccb:
1946  *
1947  *	Get a CCB for the SCSI operation.  If there are none left,
1948  *	wait until one becomes available, if we can.
1949  */
1950 struct bha_ccb *
1951 bha_get_ccb(sc)
1952 	struct bha_softc *sc;
1953 {
1954 	struct bha_ccb *ccb;
1955 	int s;
1956 
1957 	s = splbio();
1958 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1959 	if (ccb != NULL) {
1960 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
1961 		ccb->flags |= CCB_ALLOC;
1962 	}
1963 	splx(s);
1964 	return (ccb);
1965 }
1966 
1967 /*
1968  * bha_free_ccb:
1969  *
1970  *	Put a CCB back onto the free list.
1971  */
1972 void
1973 bha_free_ccb(sc, ccb)
1974 	struct bha_softc *sc;
1975 	struct bha_ccb *ccb;
1976 {
1977 	int s;
1978 
1979 	s = splbio();
1980 	bha_reset_ccb(ccb);
1981 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
1982 	splx(s);
1983 }
1984 
1985 /*
1986  * bha_ccb_phys_kv:
1987  *
1988  *	Given a CCB DMA address, locate the CCB in kernel virtual space.
1989  */
1990 struct bha_ccb *
1991 bha_ccb_phys_kv(sc, ccb_phys)
1992 	struct bha_softc *sc;
1993 	bus_addr_t ccb_phys;
1994 {
1995 	int hashnum = CCB_HASH(ccb_phys);
1996 	struct bha_ccb *ccb = sc->sc_ccbhash[hashnum];
1997 
1998 	while (ccb) {
1999 		if (ccb->hashkey == ccb_phys)
2000 			break;
2001 		ccb = ccb->nexthash;
2002 	}
2003 	return (ccb);
2004 }
2005