xref: /openbsd/sys/dev/ic/aic7xxx_openbsd.c (revision 404b540a)
1 /*	$OpenBSD: aic7xxx_openbsd.c,v 1.42 2009/02/16 21:19:06 miod Exp $	*/
2 /*	$NetBSD: aic7xxx_osm.c,v 1.14 2003/11/02 11:07:44 wiz Exp $	*/
3 
4 /*
5  * Bus independent OpenBSD shim for the aic7xxx based adaptec SCSI controllers
6  *
7  * Copyright (c) 1994-2001 Justin T. Gibbs.
8  * Copyright (c) 2001-2002 Steve Murphree, Jr.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU Public License ("GPL").
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
36  *
37  * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
38  */
39 /*
40  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
41  */
42 
43 #include <dev/ic/aic7xxx_openbsd.h>
44 #include <dev/ic/aic7xxx_inline.h>
45 
46 #ifndef AHC_TMODE_ENABLE
47 #define AHC_TMODE_ENABLE 0
48 #endif
49 
50 
51 int	ahc_action(struct scsi_xfer *);
52 int	ahc_execute_scb(void *, bus_dma_segment_t *, int);
53 int	ahc_poll(struct ahc_softc *, int);
54 int	ahc_setup_data(struct ahc_softc *, struct scsi_xfer *, struct scb *);
55 
56 void	ahc_minphys(struct buf *, struct scsi_link *);
57 void	ahc_adapter_req_set_xfer_mode(struct ahc_softc *, struct scb *);
58 
59 
60 struct cfdriver ahc_cd = {
61 	NULL, "ahc", DV_DULL
62 };
63 
64 static struct scsi_adapter ahc_switch =
65 {
66 	ahc_action,
67 	ahc_minphys,
68 	0,
69 	0,
70 };
71 
72 /* the below structure is so we have a default dev struct for our link struct */
73 static struct scsi_device ahc_dev =
74 {
75 	NULL, /* Use default error handler */
76 	NULL, /* have a queue, served by this */
77 	NULL, /* have no async handler */
78 	NULL, /* Use default 'done' routine */
79 };
80 
81 /*
82  * Attach all the sub-devices we can find
83  */
84 int
85 ahc_attach(struct ahc_softc *ahc)
86 {
87 	struct scsibus_attach_args saa;
88 	int s;
89 
90         s = splbio();
91 
92 	/*
93 	 * fill in the prototype scsi_links.
94 	 */
95 	ahc->sc_channel.adapter_target = ahc->our_id;
96 	if (ahc->features & AHC_WIDE)
97 		ahc->sc_channel.adapter_buswidth = 16;
98 	ahc->sc_channel.adapter_softc = ahc;
99 	ahc->sc_channel.adapter = &ahc_switch;
100 	ahc->sc_channel.openings = 16;
101 	ahc->sc_channel.device = &ahc_dev;
102 
103 	if (ahc->features & AHC_TWIN) {
104 		/* Configure the second scsi bus */
105 		ahc->sc_channel_b = ahc->sc_channel;
106 		ahc->sc_channel_b.adapter_target = ahc->our_id_b;
107 	}
108 
109 #ifndef DEBUG
110 	if (bootverbose) {
111 		char ahc_info[256];
112 		ahc_controller_info(ahc, ahc_info, sizeof ahc_info);
113 		printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
114 	}
115 #endif
116 
117 	ahc_intr_enable(ahc, TRUE);
118 
119 	if (ahc->flags & AHC_RESET_BUS_A)
120 		ahc_reset_channel(ahc, 'A', TRUE);
121 	if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
122 		ahc_reset_channel(ahc, 'B', TRUE);
123 
124 	bzero(&saa, sizeof(saa));
125 	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
126 		saa.saa_sc_link = &ahc->sc_channel;
127 		ahc->sc_child = config_found((void *)&ahc->sc_dev,
128 		    &saa, scsiprint);
129 		if (ahc->features & AHC_TWIN) {
130 			saa.saa_sc_link = &ahc->sc_channel_b;
131 			ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
132 			    &saa, scsiprint);
133 		}
134 	} else {
135 		if (ahc->features & AHC_TWIN) {
136 			saa.saa_sc_link = &ahc->sc_channel_b;
137 			ahc->sc_child = config_found((void *)&ahc->sc_dev,
138 			    &saa, scsiprint);
139 		}
140 		saa.saa_sc_link = &ahc->sc_channel;
141 		ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
142 		    &saa, scsiprint);
143 	}
144 
145 	splx(s);
146 	return (1);
147 }
148 
149 /*
150  * Catch an interrupt from the adapter
151  */
152 int
153 ahc_platform_intr(void *arg)
154 {
155 	struct	ahc_softc *ahc = (struct ahc_softc *)arg;
156 
157 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
158 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
159 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
160 
161 	return ahc_intr(ahc);
162 }
163 
164 /*
165  * We have an scb which has been processed by the
166  * adaptor, now we look to see how the operation
167  * went.
168  */
169 void
170 ahc_done(struct ahc_softc *ahc, struct scb *scb)
171 {
172 	struct scsi_xfer *xs = scb->xs;
173 	int s;
174 
175 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
176 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
177 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
178 
179 	LIST_REMOVE(scb, pending_links);
180 	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
181 		struct scb_tailq *untagged_q;
182 		int target_offset;
183 
184 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
185 		untagged_q = &ahc->untagged_queues[target_offset];
186 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
187 		scb->flags &= ~SCB_UNTAGGEDQ;
188 		ahc_run_untagged_queue(ahc, untagged_q);
189 	}
190 
191 	timeout_del(&xs->stimeout);
192 
193 	if (xs->datalen) {
194 		int op;
195 
196 		if ((xs->flags & SCSI_DATA_IN) != 0)
197 			op = BUS_DMASYNC_POSTREAD;
198 		else
199 			op = BUS_DMASYNC_POSTWRITE;
200 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
201 				scb->dmamap->dm_mapsize, op);
202 		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
203 	}
204 
205 	/* Translate the CAM status code to a SCSI error code. */
206 	switch (xs->error) {
207 	case CAM_SCSI_STATUS_ERROR:
208 	case CAM_REQ_INPROG:
209 	case CAM_REQ_CMP:
210 		switch (xs->status) {
211 		case SCSI_TASKSET_FULL:
212 			/* SCSI Layer won't requeue, so we force infinite
213 			 * retries until queue space is available. XS_BUSY
214 			 * is dangerous because if the NOSLEEP flag is set
215 			 * it can cause the I/O to return EIO. XS_BUSY code
216 			 * falls through to XS_TIMEOUT anyway.
217 			 */
218 			xs->error = XS_TIMEOUT;
219 			xs->retries++;
220 			break;
221 		case SCSI_BUSY:
222 			xs->error = XS_BUSY;
223 			break;
224 		case SCSI_CHECK:
225 		case SCSI_TERMINATED:
226 			if ((scb->flags & SCB_SENSE) == 0) {
227 				/* CHECK on CHECK? */
228 				xs->error = XS_DRIVER_STUFFUP;
229 			} else
230 				xs->error = XS_NOERROR;
231 			break;
232 		default:
233 			xs->error = XS_NOERROR;
234 			break;
235 		}
236 		break;
237 	case CAM_BUSY:
238 		xs->error = XS_BUSY;
239 		break;
240 	case CAM_CMD_TIMEOUT:
241 		xs->error = XS_TIMEOUT;
242 		break;
243 	case CAM_BDR_SENT:
244 	case CAM_SCSI_BUS_RESET:
245 		xs->error = XS_RESET;
246 		break;
247 	case CAM_REQUEUE_REQ:
248 		xs->error = XS_TIMEOUT;
249 		xs->retries++;
250 		break;
251 	case CAM_SEL_TIMEOUT:
252 		xs->error = XS_SELTIMEOUT;
253 		break;
254 	default:
255 		xs->error = XS_DRIVER_STUFFUP;
256 		break;
257 	}
258 
259 	/* Don't clobber any existing error state */
260 	if (xs->error != XS_NOERROR) {
261 	  /* Don't clobber any existing error state */
262 	} else if ((scb->flags & SCB_SENSE) != 0) {
263 		/*
264 		 * We performed autosense retrieval.
265 		 *
266 		 * Zero any sense not transferred by the
267 		 * device.  The SCSI spec mandates that any
268 		 * untransferred data should be assumed to be
269 		 * zero.  Complete the 'bounce' of sense information
270 		 * through buffers accessible via bus-space by
271 		 * copying it into the clients csio.
272 		 */
273 		memset(&xs->sense, 0, sizeof(struct scsi_sense_data));
274 		memcpy(&xs->sense, ahc_get_sense_buf(ahc, scb),
275 		    aic_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK);
276 		xs->error = XS_SENSE;
277 	}
278 
279         s = splbio();
280 	ahc_free_scb(ahc, scb);
281 	xs->flags |= ITSDONE;
282 	scsi_done(xs);
283         splx(s);
284 }
285 
286 void
287 ahc_minphys(struct buf *bp, struct scsi_link *sl)
288 {
289 	/*
290 	 * Even though the card can transfer up to 16megs per command
291 	 * we are limited by the number of segments in the dma segment
292 	 * list that we can hold.  The worst case is that all pages are
293 	 * discontinuous physically, hence the "page per segment" limit
294 	 * enforced here.
295 	 */
296 	if (bp->b_bcount > ((AHC_NSEG - 1) * PAGE_SIZE)) {
297 		bp->b_bcount = ((AHC_NSEG - 1) * PAGE_SIZE);
298 	}
299 	minphys(bp);
300 }
301 
302 int32_t
303 ahc_action(struct scsi_xfer *xs)
304 {
305 	struct ahc_softc *ahc;
306 	struct scb *scb;
307 	struct hardware_scb *hscb;
308 	u_int target_id;
309 	u_int our_id;
310 	int s;
311 
312 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("ahc_action\n"));
313 	ahc = (struct ahc_softc *)xs->sc_link->adapter_softc;
314 
315 	target_id = xs->sc_link->target;
316 	our_id = SCSI_SCSI_ID(ahc, xs->sc_link);
317 
318 	/*
319 	 * get an scb to use.
320 	 */
321 	s = splbio();
322 	if ((scb = ahc_get_scb(ahc)) == NULL) {
323 		splx(s);
324 		return (NO_CCB);
325 	}
326 	splx(s);
327 
328 	hscb = scb->hscb;
329 
330 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("start scb(%p)\n", scb));
331 	scb->xs = xs;
332 	timeout_set(&xs->stimeout, ahc_timeout, scb);
333 
334 	/*
335 	 * Put all the arguments for the xfer in the scb
336 	 */
337 	hscb->control = 0;
338 	hscb->scsiid = BUILD_SCSIID(ahc, xs->sc_link, target_id, our_id);
339 	hscb->lun = xs->sc_link->lun;
340 	if (xs->xs_control & XS_CTL_RESET) {
341 		hscb->cdb_len = 0;
342 		scb->flags |= SCB_DEVICE_RESET;
343 		hscb->control |= MK_MESSAGE;
344 		return (ahc_execute_scb(scb, NULL, 0));
345 	}
346 
347 	return (ahc_setup_data(ahc, xs, scb));
348 }
349 
350 int
351 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
352 {
353 	struct	scb *scb;
354 	struct	scsi_xfer *xs;
355 	struct	ahc_softc *ahc;
356 	struct	ahc_initiator_tinfo *tinfo;
357 	struct	ahc_tmode_tstate *tstate;
358 
359 	u_int	mask;
360 	int	s;
361 
362 	scb = (struct scb *)arg;
363 	xs = scb->xs;
364 	xs->error = CAM_REQ_INPROG;
365 	xs->status = 0;
366 	ahc = (struct ahc_softc *)xs->sc_link->adapter_softc;
367 
368 	if (nsegments != 0) {
369 		struct	  ahc_dma_seg *sg;
370 		bus_dma_segment_t *end_seg;
371 		int op;
372 
373 		end_seg = dm_segs + nsegments;
374 
375 		/* Copy the segments into our SG list */
376 		sg = scb->sg_list;
377 		while (dm_segs < end_seg) {
378 			uint32_t len;
379 
380 			sg->addr = aic_htole32(dm_segs->ds_addr);
381 			len = dm_segs->ds_len
382 			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
383 			sg->len = aic_htole32(len);
384 			sg++;
385 			dm_segs++;
386 		}
387 
388 		/*
389 		 * Note where to find the SG entries in bus space.
390 		 * We also set the full residual flag which the
391 		 * sequencer will clear as soon as a data transfer
392 		 * occurs.
393 		 */
394 		scb->hscb->sgptr = aic_htole32(scb->sg_list_phys|SG_FULL_RESID);
395 
396 		if ((xs->flags & SCSI_DATA_IN) != 0)
397 			op = BUS_DMASYNC_PREREAD;
398 		else
399 			op = BUS_DMASYNC_PREWRITE;
400 
401 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
402 				scb->dmamap->dm_mapsize, op);
403 
404 		sg--;
405 		sg->len |= aic_htole32(AHC_DMA_LAST_SEG);
406 
407 		bus_dmamap_sync(ahc->parent_dmat, scb->sg_map->sg_dmamap,
408 		    0, scb->sg_map->sg_dmamap->dm_mapsize,
409 		    BUS_DMASYNC_PREWRITE);
410 
411 		/* Copy the first SG into the "current" data pointer area */
412 		scb->hscb->dataptr = scb->sg_list->addr;
413 		scb->hscb->datacnt = scb->sg_list->len;
414 	} else {
415 		scb->hscb->sgptr = aic_htole32(SG_LIST_NULL);
416 		scb->hscb->dataptr = 0;
417 		scb->hscb->datacnt = 0;
418 	}
419 
420 	scb->sg_count = nsegments;
421 
422 	s = splbio();
423 
424 	/*
425 	 * Last time we need to check if this SCB needs to
426 	 * be aborted.
427 	 */
428 	if (xs->flags & ITSDONE) {
429 		if (nsegments != 0)
430 			bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
431 
432 		ahc_free_scb(ahc, scb);
433 		splx(s);
434 		return (COMPLETE);
435 	}
436 
437 	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
438 				    SCSIID_OUR_ID(scb->hscb->scsiid),
439 				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
440 				    &tstate);
441 
442 	mask = SCB_GET_TARGET_MASK(ahc, scb);
443 	scb->hscb->scsirate = tinfo->scsirate;
444 	scb->hscb->scsioffset = tinfo->curr.offset;
445 
446 	if ((tstate->ultraenb & mask) != 0)
447 		scb->hscb->control |= ULTRAENB;
448 
449 	if ((tstate->discenable & mask) != 0)
450 	    	scb->hscb->control |= DISCENB;
451 
452 	if ((tstate->auto_negotiate & mask) != 0) {
453 		scb->flags |= SCB_AUTO_NEGOTIATE;
454 		scb->hscb->control |= MK_MESSAGE;
455 	}
456 
457 	if ((tstate->tagenable & mask) != 0)
458 		scb->hscb->control |= TAG_ENB;
459 
460 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
461 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
462 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
463 
464 	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
465 
466 	if (!(xs->flags & SCSI_POLL))
467 		timeout_add_msec(&xs->stimeout, xs->timeout);
468 
469 	/*
470 	 * We only allow one untagged transaction
471 	 * per target in the initiator role unless
472 	 * we are storing a full busy target *lun*
473 	 * table in SCB space.
474 	 *
475 	 * This really should not be of any
476 	 * concern, as we take care to avoid this
477 	 * in ahc_done().  XXX smurph
478 	 */
479 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
480 	    && (ahc->flags & AHC_SCB_BTT) == 0) {
481 		struct scb_tailq *untagged_q;
482 		int target_offset;
483 
484 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
485 		untagged_q = &(ahc->untagged_queues[target_offset]);
486 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
487 		scb->flags |= SCB_UNTAGGEDQ;
488 		if (TAILQ_FIRST(untagged_q) != scb) {
489 			if (xs->flags & SCSI_POLL)
490 				goto poll;
491 			else {
492 				splx(s);
493 				return (SUCCESSFULLY_QUEUED);
494 			}
495 		}
496 	}
497 	scb->flags |= SCB_ACTIVE;
498 
499 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
500 		/* Define a mapping from our tag to the SCB. */
501 		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
502 		ahc_pause(ahc);
503 		if ((ahc->flags & AHC_PAGESCBS) == 0)
504 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
505 		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
506 		ahc_unpause(ahc);
507 	} else {
508 		ahc_queue_scb(ahc, scb);
509 	}
510 
511 	if (!(xs->flags & SCSI_POLL)) {
512 		if (ahc->inited_target[xs->sc_link->target] == 0) {
513 			struct	ahc_devinfo devinfo;
514 
515 			ahc_adapter_req_set_xfer_mode(ahc, scb);
516 			ahc_scb_devinfo(ahc, &devinfo, scb);
517 			ahc_update_neg_request(ahc, &devinfo, tstate, tinfo,
518 			    AHC_NEG_IF_NON_ASYNC);
519 
520 			ahc->inited_target[xs->sc_link->target] = 1;
521 		}
522 		splx(s);
523 		return (SUCCESSFULLY_QUEUED);
524 	}
525 
526 	/*
527 	 * If we can't use interrupts, poll for completion
528 	 */
529 poll:
530 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("cmd_poll\n"));
531 
532 	do {
533 		if (ahc_poll(ahc, xs->timeout)) {
534 			if (!(xs->flags & SCSI_SILENT))
535 				printf("cmd fail\n");
536 			ahc_timeout(scb);
537 			break;
538 		}
539 	} while (!(xs->flags & ITSDONE));
540 
541 	splx(s);
542 	return (COMPLETE);
543 }
544 
545 int
546 ahc_poll(struct ahc_softc *ahc, int wait)
547 {
548 	while (--wait) {
549 		DELAY(1000);
550 		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
551 			break;
552 	}
553 
554 	if (wait == 0) {
555 		printf("%s: board is not responding\n", ahc_name(ahc));
556 		return (EIO);
557 	}
558 
559 	ahc_intr((void *)ahc);
560 	return (0);
561 }
562 
563 int
564 ahc_setup_data(struct ahc_softc *ahc, struct scsi_xfer *xs,
565 	       struct scb *scb)
566 {
567 	struct hardware_scb *hscb;
568 	int s;
569 
570 	hscb = scb->hscb;
571 	xs->resid = xs->status = 0;
572 	xs->error = CAM_REQ_INPROG;
573 
574 	hscb->cdb_len = xs->cmdlen;
575 	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
576 		s = splbio();
577 		ahc_free_scb(ahc, scb);
578 		xs->error = XS_DRIVER_STUFFUP;
579 		xs->flags |= ITSDONE;
580 		scsi_done(xs);
581 		splx(s);
582 		return (COMPLETE);
583 	}
584 
585 	if (hscb->cdb_len > 12) {
586 		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
587 		scb->flags |= SCB_CDB32_PTR;
588 	} else {
589 		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
590 	}
591 
592 	/* Only use S/G if there is a transfer */
593 	if (xs->datalen) {
594 		int error;
595 
596                 error = bus_dmamap_load(ahc->parent_dmat,
597 					scb->dmamap, xs->data,
598 					xs->datalen, NULL,
599 					(xs->flags & SCSI_NOSLEEP) ?
600 					BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
601 		if (error) {
602 #ifdef AHC_DEBUG
603                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
604 			       "= %d\n",
605 			       ahc_name(ahc), error);
606 #endif
607 			s = splbio();
608 			ahc_free_scb(ahc, scb);
609 			splx(s);
610 			return (TRY_AGAIN_LATER);	/* XXX fvdl */
611 }
612 		error = ahc_execute_scb(scb,
613 					scb->dmamap->dm_segs,
614 					scb->dmamap->dm_nsegs);
615 		return error;
616 	} else {
617 		return ahc_execute_scb(scb, NULL, 0);
618 	}
619 }
620 
621 void
622 ahc_timeout(void *arg)
623 {
624 	struct	scb *scb, *list_scb;
625 	struct	ahc_softc *ahc;
626 	int	s;
627 	int	found;
628 	char	channel;
629 
630 	scb = (struct scb *)arg;
631 	ahc = (struct ahc_softc *)scb->xs->sc_link->adapter_softc;
632 
633 	s = splbio();
634 
635 #ifdef AHC_DEBUG
636 	printf("%s: SCB %d timed out\n", ahc_name(ahc), scb->hscb->tag);
637 	ahc_dump_card_state(ahc);
638 #endif
639 
640 	ahc_pause(ahc);
641 
642 	if (scb->flags & SCB_ACTIVE) {
643 		channel = SCB_GET_CHANNEL(ahc, scb);
644 		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
645 		/*
646 		 * Go through all of our pending SCBs and remove
647 		 * any scheduled timeouts for them. They're about to be
648 		 * aborted so no need for them to timeout.
649 		 */
650 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
651 			if (list_scb->xs)
652 				timeout_del(&list_scb->xs->stimeout);
653 		}
654 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
655 #ifdef AHC_DEBUG
656 		printf("%s: Issued Channel %c Bus Reset %d SCBs aborted\n",
657 		    ahc_name(ahc), channel, found);
658 #endif
659 	}
660 
661 	ahc_unpause(ahc);
662 	splx(s);
663 }
664 
665 
666 void
667 ahc_platform_set_tags(struct ahc_softc *ahc,
668 		      struct ahc_devinfo *devinfo, int alg)
669 {
670 	struct ahc_tmode_tstate *tstate;
671 
672 	ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
673 			    devinfo->target, &tstate);
674 
675 	/* XXXX Need to check quirks before doing this! XXXX */
676 
677 	switch (alg) {
678 	case AHC_QUEUE_BASIC:
679 	case AHC_QUEUE_TAGGED:
680 		tstate->tagenable |= devinfo->target_mask;
681 		break;
682 	case AHC_QUEUE_NONE:
683 		tstate->tagenable &= ~devinfo->target_mask;
684 		break;
685 	}
686 }
687 
688 int
689 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
690 {
691 	if (sizeof(struct ahc_platform_data) > 0) {
692 		ahc->platform_data = malloc(sizeof(struct ahc_platform_data),
693 		    M_DEVBUF, M_NOWAIT | M_ZERO);
694 		if (ahc->platform_data == NULL)
695 			return (ENOMEM);
696 	}
697 
698 	return (0);
699 }
700 
701 void
702 ahc_platform_free(struct ahc_softc *ahc)
703 {
704 	if (sizeof(struct ahc_platform_data) > 0)
705 		free(ahc->platform_data, M_DEVBUF);
706 }
707 
708 int
709 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
710 {
711 	return (0);
712 }
713 
714 void
715 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
716 		ac_code code, void *opt_arg)
717 {
718 	/* Nothing to do here for OpenBSD */
719 }
720 
721 void
722 ahc_adapter_req_set_xfer_mode(struct ahc_softc *ahc, struct scb *scb)
723 {
724 	struct ahc_initiator_tinfo *tinfo;
725 	struct ahc_tmode_tstate *tstate;
726 	struct ahc_syncrate *syncrate;
727 	struct ahc_devinfo devinfo;
728 	u_int16_t quirks;
729 	u_int width, ppr_options, period, offset;
730 	int s;
731 
732 	s = splbio();
733 
734 	ahc_scb_devinfo(ahc, &devinfo, scb);
735 	quirks = scb->xs->sc_link->quirks;
736 	tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
737 	    devinfo.our_scsiid, devinfo.target, &tstate);
738 
739 	tstate->discenable |= (ahc->user_discenable & devinfo.target_mask);
740 
741 	if (quirks & SDEV_NOTAGS)
742 		tstate->tagenable &= ~devinfo.target_mask;
743 	else if (ahc->user_tagenable & devinfo.target_mask)
744 		tstate->tagenable |= devinfo.target_mask;
745 
746 	if (quirks & SDEV_NOWIDE)
747 		width = MSG_EXT_WDTR_BUS_8_BIT;
748 	else
749 		width = MSG_EXT_WDTR_BUS_16_BIT;
750 
751 	ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
752 	if (width > tinfo->user.width)
753 		width = tinfo->user.width;
754 	ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
755 
756 	if (quirks & SDEV_NOSYNC) {
757 		period = 0;
758 		offset = 0;
759 	} else {
760 		period = tinfo->user.period;
761 		offset = tinfo->user.offset;
762 	}
763 
764 	/* XXX Look at saved INQUIRY flags for PPR capabilities XXX */
765 	ppr_options = tinfo->user.ppr_options;
766 	/* XXX Other reasons to avoid ppr? XXX */
767 	if (width < MSG_EXT_WDTR_BUS_16_BIT)
768 		ppr_options = 0;
769 
770 	if ((tstate->discenable & devinfo.target_mask) == 0 ||
771 	    (tstate->tagenable & devinfo.target_mask) == 0)
772 		ppr_options &= ~MSG_EXT_PPR_PROT_IUS;
773 
774 	syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
775 	    AHC_SYNCRATE_MAX);
776 	ahc_validate_offset(ahc, NULL, syncrate, &offset, width,
777 	    ROLE_UNKNOWN);
778 
779 	if (offset == 0) {
780 		period = 0;
781 		ppr_options = 0;
782 	}
783 
784 	if (ppr_options != 0 && tinfo->user.transport_version >= 3) {
785 		tinfo->goal.transport_version = tinfo->user.transport_version;
786 		tinfo->curr.transport_version = tinfo->user.transport_version;
787 	}
788 
789 	ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options,
790 	    AHC_TRANS_GOAL, FALSE);
791 
792 	splx(s);
793 }
794