xref: /netbsd/sys/dev/ic/aic7xxx_osm.c (revision 6550d01e)
1 /*	$NetBSD: aic7xxx_osm.c,v 1.37 2010/02/24 22:37:57 dyoung Exp $	*/
2 
3 /*
4  * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5  *
6  * Copyright (c) 1994-2001 Justin T. Gibbs.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU Public License ("GPL").
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34  *
35  * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36  */
37 /*
38  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.37 2010/02/24 22:37:57 dyoung Exp $");
43 
44 #include <dev/ic/aic7xxx_osm.h>
45 #include <dev/ic/aic7xxx_inline.h>
46 
47 #ifndef AHC_TMODE_ENABLE
48 #define AHC_TMODE_ENABLE 0
49 #endif
50 
51 
52 static void	ahc_action(struct scsipi_channel *chan,
53 			   scsipi_adapter_req_t req, void *arg);
54 static void	ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
55 				int nsegments);
56 static int	ahc_poll(struct ahc_softc *ahc, int wait);
57 static void	ahc_setup_data(struct ahc_softc *ahc,
58 			       struct scsipi_xfer *xs, struct scb *scb);
59 static void	ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
60 static int	ahc_ioctl(struct scsipi_channel *channel, u_long cmd,
61 			  void *addr, int flag, struct proc *p);
62 
63 static bool	ahc_pmf_suspend(device_t, const pmf_qual_t *);
64 static bool	ahc_pmf_resume(device_t, const pmf_qual_t *);
65 static bool	ahc_pmf_shutdown(device_t, int);
66 
67 
68 /*
69  * Attach all the sub-devices we can find
70  */
71 int
72 ahc_attach(struct ahc_softc *ahc)
73 {
74 	u_long 	s;
75 	int i;
76 	char ahc_info[256];
77 
78 	LIST_INIT(&ahc->pending_scbs);
79 	for (i = 0; i < AHC_NUM_TARGETS; i++)
80 		TAILQ_INIT(&ahc->untagged_queues[i]);
81 
82 	ahc_lock(ahc, &s);
83 
84 	ahc->sc_adapter.adapt_dev = ahc->sc_dev;
85 	ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
86 
87 	ahc->sc_adapter.adapt_openings = ahc->scb_data->numscbs - 1;
88 	ahc->sc_adapter.adapt_max_periph = 16;
89 
90 	ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
91 	ahc->sc_adapter.adapt_minphys = ahc_minphys;
92 	ahc->sc_adapter.adapt_request = ahc_action;
93 
94 	ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
95 	ahc->sc_channel.chan_bustype = &scsi_bustype;
96 	ahc->sc_channel.chan_channel = 0;
97 	ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
98 	ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
99 	ahc->sc_channel.chan_id = ahc->our_id;
100 	ahc->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
101 
102 	if (ahc->features & AHC_TWIN) {
103 		ahc->sc_channel_b = ahc->sc_channel;
104 		ahc->sc_channel_b.chan_id = ahc->our_id_b;
105 		ahc->sc_channel_b.chan_channel = 1;
106 	}
107 
108 	ahc_controller_info(ahc, ahc_info, sizeof(ahc_info));
109 	printf("%s: %s\n", device_xname(ahc->sc_dev), ahc_info);
110 
111 	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
112 		ahc->sc_child = config_found(ahc->sc_dev,
113 		    &ahc->sc_channel, scsiprint);
114 		if (ahc->features & AHC_TWIN)
115 			ahc->sc_child_b = config_found(ahc->sc_dev,
116 			    &ahc->sc_channel_b, scsiprint);
117 	} else {
118 		if (ahc->features & AHC_TWIN)
119 			ahc->sc_child = config_found(ahc->sc_dev,
120 			    &ahc->sc_channel_b, scsiprint);
121 		ahc->sc_child_b = config_found(ahc->sc_dev,
122 		    &ahc->sc_channel, scsiprint);
123 	}
124 
125 	ahc_intr_enable(ahc, TRUE);
126 
127 	if (ahc->flags & AHC_RESET_BUS_A)
128 		ahc_reset_channel(ahc, 'A', TRUE);
129 	if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
130 		ahc_reset_channel(ahc, 'B', TRUE);
131 
132 	if (!pmf_device_register1(ahc->sc_dev,
133 	    ahc_pmf_suspend, ahc_pmf_resume, ahc_pmf_shutdown))
134 		aprint_error_dev(ahc->sc_dev,
135 		    "couldn't establish power handler\n");
136 
137 	ahc_unlock(ahc, &s);
138 	return (1);
139 }
140 
141 /*
142  * XXX we should call the real suspend and resume functions here
143  *     but pmf(9) stuff on cardbus backend is untested yet
144  */
145 
146 static bool
147 ahc_pmf_suspend(device_t dev, const pmf_qual_t *qual)
148 {
149 	struct ahc_softc *sc = device_private(dev);
150 #if 0
151 	return (ahc_suspend(sc) == 0);
152 #else
153 	ahc_shutdown(sc);
154 	return true;
155 #endif
156 }
157 
158 static bool
159 ahc_pmf_resume(device_t dev, const pmf_qual_t *qual)
160 {
161 #if 0
162 	struct ahc_softc *sc = device_private(dev);
163 
164 	return (ahc_resume(sc) == 0);
165 #else
166 	return true;
167 #endif
168 }
169 
170 static bool
171 ahc_pmf_shutdown(device_t dev, int howto)
172 {
173 	struct ahc_softc *sc = device_private(dev);
174 
175 	/* Disable all interrupt sources by resetting the controller */
176 	ahc_shutdown(sc);
177 
178 	return true;
179 }
180 
181 /*
182  * Catch an interrupt from the adapter
183  */
184 void
185 ahc_platform_intr(void *arg)
186 {
187 	struct	ahc_softc *ahc;
188 
189 	ahc = arg;
190 	ahc_intr(ahc);
191 }
192 
193 /*
194  * We have an scb which has been processed by the
195  * adaptor, now we look to see how the operation
196  * went.
197  */
198 void
199 ahc_done(struct ahc_softc *ahc, struct scb *scb)
200 {
201 	struct scsipi_xfer *xs;
202 	struct scsipi_periph *periph;
203 	u_long s;
204 
205 	xs = scb->xs;
206 	periph = xs->xs_periph;
207 	LIST_REMOVE(scb, pending_links);
208 	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
209 		struct scb_tailq *untagged_q;
210 		int target_offset;
211 
212 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
213 		untagged_q = &ahc->untagged_queues[target_offset];
214 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
215 		scb->flags &= ~SCB_UNTAGGEDQ;
216 		ahc_run_untagged_queue(ahc, untagged_q);
217 	}
218 
219 	callout_stop(&scb->xs->xs_callout);
220 
221 	if (xs->datalen) {
222 		int op;
223 
224 		if (xs->xs_control & XS_CTL_DATA_IN)
225 			op = BUS_DMASYNC_POSTREAD;
226 		else
227 			op = BUS_DMASYNC_POSTWRITE;
228 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
229 				scb->dmamap->dm_mapsize, op);
230 		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
231 	}
232 
233 	/*
234 	 * If the recovery SCB completes, we have to be
235 	 * out of our timeout.
236 	 */
237 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
238 		struct	scb *list_scb;
239 
240 		/*
241 		 * We were able to complete the command successfully,
242 		 * so reinstate the timeouts for all other pending
243 		 * commands.
244 		 */
245 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
246 			if (!(list_scb->xs->xs_control & XS_CTL_POLL)) {
247 				callout_reset(&list_scb->xs->xs_callout,
248 				    (list_scb->xs->timeout > 1000000) ?
249 				    (list_scb->xs->timeout / 1000) * hz :
250 				    (list_scb->xs->timeout * hz) / 1000,
251 				    ahc_timeout, list_scb);
252 			}
253 		}
254 
255 		if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
256 		 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
257 			ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
258 		scsipi_printaddr(xs->xs_periph);
259 		printf("%s: no longer in timeout, status = %x\n",
260 		       ahc_name(ahc), xs->status);
261 	}
262 
263 	/* Don't clobber any existing error state */
264 	if (xs->error != XS_NOERROR) {
265 	  /* Don't clobber any existing error state */
266 	} else if ((scb->flags & SCB_SENSE) != 0) {
267 		/*
268 		 * We performed autosense retrieval.
269 		 *
270 		 * Zero any sense not transferred by the
271 		 * device.  The SCSI spec mandates that any
272 		 * untransferred data should be assumed to be
273 		 * zero.  Complete the 'bounce' of sense information
274 		 * through buffers accessible via bus-space by
275 		 * copying it into the clients csio.
276 		 */
277 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
278 		memcpy(&xs->sense.scsi_sense,
279 		       ahc_get_sense_buf(ahc, scb),
280 		       sizeof(xs->sense.scsi_sense));
281 		xs->error = XS_SENSE;
282 	}
283 	if (scb->flags & SCB_FREEZE_QUEUE) {
284 		scsipi_periph_thaw(periph, 1);
285 		scb->flags &= ~SCB_FREEZE_QUEUE;
286 	}
287 
288 	ahc_lock(ahc, &s);
289 	ahc_free_scb(ahc, scb);
290 	ahc_unlock(ahc, &s);
291 
292 	scsipi_done(xs);
293 }
294 
295 static int
296 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, void *addr,
297     int flag, struct proc *p)
298 {
299 	struct ahc_softc *ahc;
300 	int s, ret = ENOTTY;
301 
302 	ahc = device_private(channel->chan_adapter->adapt_dev);
303 
304 	switch (cmd) {
305 	case SCBUSIORESET:
306 		s = splbio();
307 		ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
308 		    TRUE);
309 		splx(s);
310 		ret = 0;
311 		break;
312 	default:
313 		break;
314 	}
315 
316 	return ret;
317 }
318 
319 static void
320 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
321 {
322 	struct ahc_softc *ahc;
323 	int s;
324 	struct ahc_initiator_tinfo *tinfo;
325 	struct ahc_tmode_tstate *tstate;
326 
327 	ahc  = device_private(chan->chan_adapter->adapt_dev);
328 
329 	switch (req) {
330 
331 	case ADAPTER_REQ_RUN_XFER:
332 	  {
333 		struct scsipi_xfer *xs;
334 		struct scsipi_periph *periph;
335 		struct scb *scb;
336 		struct hardware_scb *hscb;
337 		u_int target_id;
338 		u_int our_id;
339 		u_long ss;
340 
341 		xs = arg;
342 		periph = xs->xs_periph;
343 
344 		target_id = periph->periph_target;
345 		our_id = ahc->our_id;
346 
347 		SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
348 
349 		/*
350 		 * get an scb to use.
351 		 */
352 		ahc_lock(ahc, &ss);
353 		if ((scb = ahc_get_scb(ahc)) == NULL) {
354 			xs->error = XS_RESOURCE_SHORTAGE;
355 			ahc_unlock(ahc, &ss);
356 			scsipi_done(xs);
357 			return;
358 		}
359 		ahc_unlock(ahc, &ss);
360 
361 		hscb = scb->hscb;
362 
363 		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
364 		scb->xs = xs;
365 
366 		/*
367 		 * Put all the arguments for the xfer in the scb
368 		 */
369 		hscb->control = 0;
370 		hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
371 		hscb->lun = periph->periph_lun;
372 		if (xs->xs_control & XS_CTL_RESET) {
373 			hscb->cdb_len = 0;
374 			scb->flags |= SCB_DEVICE_RESET;
375 			hscb->control |= MK_MESSAGE;
376 			ahc_execute_scb(scb, NULL, 0);
377 		}
378 
379 		ahc_setup_data(ahc, xs, scb);
380 
381 		break;
382 	  }
383 	case ADAPTER_REQ_GROW_RESOURCES:
384 #ifdef AHC_DEBUG
385 		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
386 #endif
387 		chan->chan_adapter->adapt_openings += ahc_alloc_scbs(ahc);
388 		if (ahc->scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
389 			chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
390 		return;
391 
392 	case ADAPTER_REQ_SET_XFER_MODE:
393 	    {
394 		struct scsipi_xfer_mode *xm = arg;
395 		struct ahc_devinfo devinfo;
396 		int target_id, our_id, first;
397 		u_int width;
398 		char channel;
399 		u_int ppr_options = 0, period, offset;
400 		struct ahc_syncrate *syncrate;
401 		uint16_t old_autoneg;
402 
403 		target_id = xm->xm_target;
404 		our_id = chan->chan_id;
405 		channel = (chan->chan_channel == 1) ? 'B' : 'A';
406 		s = splbio();
407 		tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
408 		    &tstate);
409 		ahc_compile_devinfo(&devinfo, our_id, target_id,
410 		    0, channel, ROLE_INITIATOR);
411 
412 		old_autoneg = tstate->auto_negotiate;
413 
414 		/*
415 		 * XXX since the period and offset are not provided here,
416 		 * fake things by forcing a renegotiation using the user
417 		 * settings if this is called for the first time (i.e.
418 		 * during probe). Also, cap various values at the user
419 		 * values, assuming that the user set it up that way.
420 		 */
421 		if (ahc->inited_target[target_id] == 0) {
422 			period = tinfo->user.period;
423 			offset = tinfo->user.offset;
424 			ppr_options = tinfo->user.ppr_options;
425 			width = tinfo->user.width;
426 			tstate->tagenable |=
427 			    (ahc->user_tagenable & devinfo.target_mask);
428 			tstate->discenable |=
429 			    (ahc->user_discenable & devinfo.target_mask);
430 			ahc->inited_target[target_id] = 1;
431 			first = 1;
432 		} else
433 			first = 0;
434 
435 		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
436 			width = MSG_EXT_WDTR_BUS_16_BIT;
437 		else
438 			width = MSG_EXT_WDTR_BUS_8_BIT;
439 
440 		ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
441 		if (width > tinfo->user.width)
442 			width = tinfo->user.width;
443 		ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
444 
445 		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
446 			period = 0;
447 			offset = 0;
448 			ppr_options = 0;
449 		}
450 
451 		if ((xm->xm_mode & PERIPH_CAP_DT) &&
452 		    (ppr_options & MSG_EXT_PPR_DT_REQ))
453 			ppr_options |= MSG_EXT_PPR_DT_REQ;
454 		else
455 			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
456 		if ((tstate->discenable & devinfo.target_mask) == 0 ||
457 		    (tstate->tagenable & devinfo.target_mask) == 0)
458 			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
459 
460 		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
461 		    (ahc->user_tagenable & devinfo.target_mask))
462 			tstate->tagenable |= devinfo.target_mask;
463 		else
464 			tstate->tagenable &= ~devinfo.target_mask;
465 
466 		syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
467 		    AHC_SYNCRATE_MAX);
468 		ahc_validate_offset(ahc, NULL, syncrate, &offset,
469 		    width, ROLE_UNKNOWN);
470 
471 		if (offset == 0) {
472 			period = 0;
473 			ppr_options = 0;
474 		}
475 
476 		if (ppr_options != 0
477 		    && tinfo->user.transport_version >= 3) {
478 			tinfo->goal.transport_version =
479 			    tinfo->user.transport_version;
480 			tinfo->curr.transport_version =
481 			    tinfo->user.transport_version;
482 		}
483 
484 		ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
485 		    ppr_options, AHC_TRANS_GOAL, FALSE);
486 
487 		/*
488 		 * If this is the first request, and no negotiation is
489 		 * needed, just confirm the state to the scsipi layer,
490 		 * so that it can print a message.
491 		 */
492 		if (old_autoneg == tstate->auto_negotiate && first) {
493 			xm->xm_mode = 0;
494 			xm->xm_period = tinfo->curr.period;
495 			xm->xm_offset = tinfo->curr.offset;
496 			if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
497 				xm->xm_mode |= PERIPH_CAP_WIDE16;
498 			if (tinfo->curr.period)
499 				xm->xm_mode |= PERIPH_CAP_SYNC;
500 			if (tstate->tagenable & devinfo.target_mask)
501 				xm->xm_mode |= PERIPH_CAP_TQING;
502 			if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
503 				xm->xm_mode |= PERIPH_CAP_DT;
504 			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
505 		}
506 		splx(s);
507 	    }
508 	}
509 
510 	return;
511 }
512 
513 static void
514 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
515 {
516 	struct	scb *scb;
517 	struct scsipi_xfer *xs;
518 	struct	ahc_softc *ahc;
519 	struct	ahc_initiator_tinfo *tinfo;
520 	struct	ahc_tmode_tstate *tstate;
521 
522 	u_int	mask;
523 	u_long	s;
524 
525 	scb = (struct scb *)arg;
526 	xs = scb->xs;
527 	xs->error = 0;
528 	xs->status = 0;
529 	xs->xs_status = 0;
530 	ahc = device_private(
531 	    xs->xs_periph->periph_channel->chan_adapter->adapt_dev);
532 
533 	if (nsegments != 0) {
534 		struct ahc_dma_seg *sg;
535 		bus_dma_segment_t *end_seg;
536 		int op;
537 
538 		end_seg = dm_segs + nsegments;
539 
540 		/* Copy the segments into our SG list */
541 		sg = scb->sg_list;
542 		while (dm_segs < end_seg) {
543 			uint32_t len;
544 
545 			sg->addr = ahc_htole32(dm_segs->ds_addr);
546 			len = dm_segs->ds_len
547 			    | ((dm_segs->ds_addr >> 8) & AHC_SG_HIGH_ADDR_MASK);
548 			sg->len = ahc_htole32(len);
549 			sg++;
550 			dm_segs++;
551 		}
552 
553 		/*
554 		 * Note where to find the SG entries in bus space.
555 		 * We also set the full residual flag which the
556 		 * sequencer will clear as soon as a data transfer
557 		 * occurs.
558 		 */
559 		scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
560 
561 		if (xs->xs_control & XS_CTL_DATA_IN)
562 			op = BUS_DMASYNC_PREREAD;
563 		else
564 			op = BUS_DMASYNC_PREWRITE;
565 
566 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
567 				scb->dmamap->dm_mapsize, op);
568 
569 		sg--;
570 		sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
571 
572 		/* Copy the first SG into the "current" data pointer area */
573 		scb->hscb->dataptr = scb->sg_list->addr;
574 		scb->hscb->datacnt = scb->sg_list->len;
575 	} else {
576 		scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
577 		scb->hscb->dataptr = 0;
578 		scb->hscb->datacnt = 0;
579 	}
580 
581 	scb->sg_count = nsegments;
582 
583 	ahc_lock(ahc, &s);
584 
585 	/*
586 	 * Last time we need to check if this SCB needs to
587 	 * be aborted.
588 	 */
589 	if (xs->xs_status & XS_STS_DONE) {
590 		if (nsegments != 0)
591 			bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
592 		ahc_free_scb(ahc, scb);
593 		ahc_unlock(ahc, &s);
594 		scsipi_done(xs);
595 		return;
596 	}
597 
598 	tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
599 				    SCSIID_OUR_ID(scb->hscb->scsiid),
600 				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
601 				    &tstate);
602 
603 	mask = SCB_GET_TARGET_MASK(ahc, scb);
604 	scb->hscb->scsirate = tinfo->scsirate;
605 	scb->hscb->scsioffset = tinfo->curr.offset;
606 
607 	if ((tstate->ultraenb & mask) != 0)
608 		scb->hscb->control |= ULTRAENB;
609 
610 	if ((tstate->discenable & mask) != 0)
611 		scb->hscb->control |= DISCENB;
612 
613 	if (xs->xs_tag_type)
614 		scb->hscb->control |= xs->xs_tag_type;
615 
616 #if 1	/* This looks like it makes sense at first, but it can loop */
617 	if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
618 	     && tinfo->goal.offset == 0
619 	     && tinfo->goal.ppr_options == 0)) {
620 		scb->flags |= SCB_NEGOTIATE;
621 		scb->hscb->control |= MK_MESSAGE;
622 	} else
623 #endif
624 	if ((tstate->auto_negotiate & mask) != 0) {
625 		scb->flags |= SCB_AUTO_NEGOTIATE;
626 		scb->hscb->control |= MK_MESSAGE;
627 	}
628 
629 	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
630 
631 	if (!(xs->xs_control & XS_CTL_POLL)) {
632 		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
633 			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
634 			      ahc_timeout, scb);
635 	}
636 
637 	/*
638 	 * We only allow one untagged transaction
639 	 * per target in the initiator role unless
640 	 * we are storing a full busy target *lun*
641 	 * table in SCB space.
642 	 */
643 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
644 	    && (ahc->flags & AHC_SCB_BTT) == 0) {
645 		struct scb_tailq *untagged_q;
646 		int target_offset;
647 
648 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
649 		untagged_q = &(ahc->untagged_queues[target_offset]);
650 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
651 		scb->flags |= SCB_UNTAGGEDQ;
652 		if (TAILQ_FIRST(untagged_q) != scb) {
653 			ahc_unlock(ahc, &s);
654 			return;
655 		}
656 	}
657 	scb->flags |= SCB_ACTIVE;
658 
659 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
660 		/* Define a mapping from our tag to the SCB. */
661 		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
662 		ahc_pause(ahc);
663 		if ((ahc->flags & AHC_PAGESCBS) == 0)
664 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
665 		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
666 		ahc_unpause(ahc);
667 	} else {
668 		ahc_queue_scb(ahc, scb);
669 	}
670 
671 	if (!(xs->xs_control & XS_CTL_POLL)) {
672 		ahc_unlock(ahc, &s);
673 		return;
674 	}
675 
676 	/*
677 	 * If we can't use interrupts, poll for completion
678 	 */
679 
680 	SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
681 	do {
682 		if (ahc_poll(ahc, xs->timeout)) {
683 			if (!(xs->xs_control & XS_CTL_SILENT))
684 				printf("cmd fail\n");
685 			ahc_timeout(scb);
686 			break;
687 		}
688 	} while (!(xs->xs_status & XS_STS_DONE));
689 	ahc_unlock(ahc, &s);
690 
691 	return;
692 }
693 
694 static int
695 ahc_poll(struct ahc_softc *ahc, int wait)
696 {
697 	while (--wait) {
698 		DELAY(1000);
699 		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
700 			break;
701 	}
702 
703 	if (wait == 0) {
704 		printf("%s: board is not responding\n", ahc_name(ahc));
705 		return (EIO);
706 	}
707 
708 	ahc_intr(ahc);
709 	return (0);
710 }
711 
712 static void
713 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
714 	       struct scb *scb)
715 {
716 	struct hardware_scb *hscb;
717 
718 	hscb = scb->hscb;
719 	xs->resid = xs->status = 0;
720 
721 	hscb->cdb_len = xs->cmdlen;
722 	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
723 		u_long s;
724 
725 		ahc_set_transaction_status(scb, CAM_REQ_INVALID);
726 		ahc_lock(ahc, &s);
727 		ahc_free_scb(ahc, scb);
728 		ahc_unlock(ahc, &s);
729 		scsipi_done(xs);
730 		return;
731 	}
732 
733 	if (hscb->cdb_len > 12) {
734 		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
735 		scb->flags |= SCB_CDB32_PTR;
736 	} else {
737 		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
738 	}
739 
740 	/* Only use S/G if there is a transfer */
741 	if (xs->datalen) {
742 		int error;
743 
744 		error = bus_dmamap_load(ahc->parent_dmat,
745 					scb->dmamap, xs->data,
746 					xs->datalen, NULL,
747 					((xs->xs_control & XS_CTL_NOSLEEP) ?
748 					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
749 					BUS_DMA_STREAMING |
750 					((xs->xs_control & XS_CTL_DATA_IN) ?
751 					 BUS_DMA_READ : BUS_DMA_WRITE));
752 		if (error) {
753 #ifdef AHC_DEBUG
754 			printf("%s: in ahc_setup_data(): bus_dmamap_load() "
755 			       "= %d\n",
756 			       ahc_name(ahc), error);
757 #endif
758 			xs->error = XS_RESOURCE_SHORTAGE;
759 			scsipi_done(xs);
760 			return;
761 		}
762 		ahc_execute_scb(scb,
763 				scb->dmamap->dm_segs,
764 				scb->dmamap->dm_nsegs);
765 	} else {
766 		ahc_execute_scb(scb, NULL, 0);
767 	}
768 }
769 
770 static void
771 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
772 
773 	if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
774 		struct scb *list_scb;
775 
776 		scb->flags |= SCB_RECOVERY_SCB;
777 
778 		/*
779 		 * Take all queued, but not sent SCBs out of the equation.
780 		 * Also ensure that no new CCBs are queued to us while we
781 		 * try to fix this problem.
782 		 */
783 		scsipi_channel_freeze(&ahc->sc_channel, 1);
784 		if (ahc->features & AHC_TWIN)
785 			scsipi_channel_freeze(&ahc->sc_channel_b, 1);
786 
787 		/*
788 		 * Go through all of our pending SCBs and remove
789 		 * any scheduled timeouts for them.  We will reschedule
790 		 * them after we've successfully fixed this problem.
791 		 */
792 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
793 			callout_stop(&list_scb->xs->xs_callout);
794 		}
795 	}
796 }
797 
798 void
799 ahc_timeout(void *arg)
800 {
801 	struct	scb *scb;
802 	struct	ahc_softc *ahc;
803 	u_long	s;
804 	int	found;
805 	u_int	last_phase;
806 	int	target;
807 	int	lun;
808 	int	i;
809 	char	channel;
810 
811 	scb = arg;
812 	ahc = scb->ahc_softc;
813 
814 	ahc_lock(ahc, &s);
815 
816 	ahc_pause_and_flushwork(ahc);
817 
818 	if ((scb->flags & SCB_ACTIVE) == 0) {
819 		/* Previous timeout took care of me already */
820 		printf("%s: Timedout SCB already complete. "
821 		       "Interrupts may not be functioning.\n", ahc_name(ahc));
822 		ahc_unpause(ahc);
823 		ahc_unlock(ahc, &s);
824 		return;
825 	}
826 
827 	target = SCB_GET_TARGET(ahc, scb);
828 	channel = SCB_GET_CHANNEL(ahc, scb);
829 	lun = SCB_GET_LUN(scb);
830 
831 	ahc_print_path(ahc, scb);
832 	printf("SCB 0x%x - timed out\n", scb->hscb->tag);
833 	ahc_dump_card_state(ahc);
834 	last_phase = ahc_inb(ahc, LASTPHASE);
835 	if (scb->sg_count > 0) {
836 		for (i = 0; i < scb->sg_count; i++) {
837 			printf("sg[%d] - Addr 0x%x : Length %d\n",
838 			       i,
839 			       scb->sg_list[i].addr,
840 			       scb->sg_list[i].len & AHC_SG_LEN_MASK);
841 		}
842 	}
843 	if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
844 		/*
845 		 * Been down this road before.
846 		 * Do a full bus reset.
847 		 */
848 bus_reset:
849 		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
850 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
851 		printf("%s: Issued Channel %c Bus Reset. "
852 		       "%d SCBs aborted\n", ahc_name(ahc), channel, found);
853 	} else {
854 		/*
855 		 * If we are a target, transition to bus free and report
856 		 * the timeout.
857 		 *
858 		 * The target/initiator that is holding up the bus may not
859 		 * be the same as the one that triggered this timeout
860 		 * (different commands have different timeout lengths).
861 		 * If the bus is idle and we are acting as the initiator
862 		 * for this request, queue a BDR message to the timed out
863 		 * target.  Otherwise, if the timed out transaction is
864 		 * active:
865 		 *   Initiator transaction:
866 		 *	Stuff the message buffer with a BDR message and assert
867 		 *	ATN in the hopes that the target will let go of the bus
868 		 *	and go to the mesgout phase.  If this fails, we'll
869 		 *	get another timeout 2 seconds later which will attempt
870 		 *	a bus reset.
871 		 *
872 		 *   Target transaction:
873 		 *	Transition to BUS FREE and report the error.
874 		 *	It's good to be the target!
875 		 */
876 		u_int active_scb_index;
877 		u_int saved_scbptr;
878 
879 		saved_scbptr = ahc_inb(ahc, SCBPTR);
880 		active_scb_index = ahc_inb(ahc, SCB_TAG);
881 
882 		if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
883 		  && (active_scb_index < ahc->scb_data->numscbs)) {
884 			struct scb *active_scb;
885 
886 			/*
887 			 * If the active SCB is not us, assume that
888 			 * the active SCB has a longer timeout than
889 			 * the timedout SCB, and wait for the active
890 			 * SCB to timeout.
891 			 */
892 			active_scb = ahc_lookup_scb(ahc, active_scb_index);
893 			if (active_scb != scb) {
894 				uint64_t newtimeout;
895 
896 				ahc_print_path(ahc, scb);
897 				printf("Other SCB Timeout%s",
898 				       (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
899 				       ? " again\n" : "\n");
900 				scb->flags |= SCB_OTHERTCL_TIMEOUT;
901 				newtimeout = MAX(active_scb->xs->timeout,
902 						 scb->xs->timeout);
903 				callout_reset(&scb->xs->xs_callout,
904 				    newtimeout > 1000000 ?
905 				    (newtimeout / 1000) * hz :
906 				    (newtimeout * hz) / 1000,
907 				    ahc_timeout, scb);
908 				ahc_unpause(ahc);
909 				ahc_unlock(ahc, &s);
910 				return;
911 			}
912 
913 			/* It's us */
914 			if ((scb->flags & SCB_TARGET_SCB) != 0) {
915 
916 				/*
917 				 * Send back any queued up transactions
918 				 * and properly record the error condition.
919 				 */
920 				ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
921 					       SCB_GET_CHANNEL(ahc, scb),
922 					       SCB_GET_LUN(scb),
923 					       scb->hscb->tag,
924 					       ROLE_TARGET,
925 					       CAM_CMD_TIMEOUT);
926 
927 				/* Will clear us from the bus */
928 				ahc_restart(ahc);
929 				ahc_unlock(ahc, &s);
930 				return;
931 			}
932 
933 			ahc_set_recoveryscb(ahc, active_scb);
934 			ahc_outb(ahc, MSG_OUT, HOST_MSG);
935 			ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
936 			ahc_print_path(ahc, active_scb);
937 			printf("BDR message in message buffer\n");
938 			active_scb->flags |= SCB_DEVICE_RESET;
939 			callout_reset(&active_scb->xs->xs_callout,
940 				      2 * hz, ahc_timeout, active_scb);
941 			ahc_unpause(ahc);
942 		} else {
943 			int disconnected;
944 
945 			/* XXX Shouldn't panic.  Just punt instead? */
946 			if ((scb->flags & SCB_TARGET_SCB) != 0)
947 				panic("Timed-out target SCB but bus idle");
948 
949 			if (last_phase != P_BUSFREE
950 			 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
951 				/* XXX What happened to the SCB? */
952 				/* Hung target selection.  Goto busfree */
953 				printf("%s: Hung target selection\n",
954 				       ahc_name(ahc));
955 				ahc_restart(ahc);
956 				ahc_unlock(ahc, &s);
957 				return;
958 			}
959 
960 			if (ahc_search_qinfifo(ahc, target, channel, lun,
961 					       scb->hscb->tag, ROLE_INITIATOR,
962 					       /*status*/0, SEARCH_COUNT) > 0) {
963 				disconnected = FALSE;
964 			} else {
965 				disconnected = TRUE;
966 			}
967 
968 			if (disconnected) {
969 
970 				ahc_set_recoveryscb(ahc, scb);
971 				/*
972 				 * Actually re-queue this SCB in an attempt
973 				 * to select the device before it reconnects.
974 				 * In either case (selection or reselection),
975 				 * we will now issue a target reset to the
976 				 * timed-out device.
977 				 *
978 				 * Set the MK_MESSAGE control bit indicating
979 				 * that we desire to send a message.  We
980 				 * also set the disconnected flag since
981 				 * in the paging case there is no guarantee
982 				 * that our SCB control byte matches the
983 				 * version on the card.  We don't want the
984 				 * sequencer to abort the command thinking
985 				 * an unsolicited reselection occurred.
986 				 */
987 				scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
988 				scb->flags |= SCB_DEVICE_RESET;
989 
990 				/*
991 				 * Remove any cached copy of this SCB in the
992 				 * disconnected list in preparation for the
993 				 * queuing of our abort SCB.  We use the
994 				 * same element in the SCB, SCB_NEXT, for
995 				 * both the qinfifo and the disconnected list.
996 				 */
997 				ahc_search_disc_list(ahc, target, channel,
998 						     lun, scb->hscb->tag,
999 						     /*stop_on_first*/TRUE,
1000 						     /*remove*/TRUE,
1001 						     /*save_state*/FALSE);
1002 
1003 				/*
1004 				 * In the non-paging case, the sequencer will
1005 				 * never re-reference the in-core SCB.
1006 				 * To make sure we are notified during
1007 				 * reslection, set the MK_MESSAGE flag in
1008 				 * the card's copy of the SCB.
1009 				 */
1010 				if ((ahc->flags & AHC_PAGESCBS) == 0) {
1011 					ahc_outb(ahc, SCBPTR, scb->hscb->tag);
1012 					ahc_outb(ahc, SCB_CONTROL,
1013 						 ahc_inb(ahc, SCB_CONTROL)
1014 						| MK_MESSAGE);
1015 				}
1016 
1017 				/*
1018 				 * Clear out any entries in the QINFIFO first
1019 				 * so we are the next SCB for this target
1020 				 * to run.
1021 				 */
1022 				ahc_search_qinfifo(ahc,
1023 						   SCB_GET_TARGET(ahc, scb),
1024 						   channel, SCB_GET_LUN(scb),
1025 						   SCB_LIST_NULL,
1026 						   ROLE_INITIATOR,
1027 						   CAM_REQUEUE_REQ,
1028 						   SEARCH_COMPLETE);
1029 				ahc_print_path(ahc, scb);
1030 				printf("Queuing a BDR SCB\n");
1031 				ahc_qinfifo_requeue_tail(ahc, scb);
1032 				ahc_outb(ahc, SCBPTR, saved_scbptr);
1033 				callout_reset(&scb->xs->xs_callout, 2 * hz,
1034 					      ahc_timeout, scb);
1035 				ahc_unpause(ahc);
1036 			} else {
1037 				/* Go "immediatly" to the bus reset */
1038 				/* This shouldn't happen */
1039 				ahc_set_recoveryscb(ahc, scb);
1040 				ahc_print_path(ahc, scb);
1041 				printf("SCB %d: Immediate reset.  "
1042 					"Flags = 0x%x\n", scb->hscb->tag,
1043 					scb->flags);
1044 				goto bus_reset;
1045 			}
1046 		}
1047 	}
1048 	ahc_unlock(ahc, &s);
1049 }
1050 
1051 void
1052 ahc_platform_set_tags(struct ahc_softc *ahc,
1053 		      struct ahc_devinfo *devinfo, int enable)
1054 {
1055 	struct ahc_tmode_tstate *tstate;
1056 
1057 	ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1058 			    devinfo->target, &tstate);
1059 
1060 	if (enable)
1061 		tstate->tagenable |= devinfo->target_mask;
1062 	else
1063 		tstate->tagenable &= ~devinfo->target_mask;
1064 }
1065 
1066 int
1067 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1068 {
1069 	if (sizeof(struct ahc_platform_data) == 0)
1070 		return 0;
1071 	ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1072 				    M_NOWAIT);
1073 	if (ahc->platform_data == NULL)
1074 		return (ENOMEM);
1075 	return (0);
1076 }
1077 
1078 void
1079 ahc_platform_free(struct ahc_softc *ahc)
1080 {
1081 	if (sizeof(struct ahc_platform_data) == 0)
1082 		return;
1083 	free(ahc->platform_data, M_DEVBUF);
1084 }
1085 
1086 int
1087 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1088 {
1089 	return (0);
1090 }
1091 
1092 int
1093 ahc_detach(struct ahc_softc *ahc, int flags)
1094 {
1095 	int rv = 0;
1096 
1097 	ahc_intr_enable(ahc, FALSE);
1098 	if (ahc->sc_child != NULL)
1099 		rv = config_detach(ahc->sc_child, flags);
1100 	if (rv == 0 && ahc->sc_child_b != NULL)
1101 		rv = config_detach(ahc->sc_child_b, flags);
1102 
1103 	pmf_device_deregister(ahc->sc_dev);
1104 	ahc_free(ahc);
1105 
1106 	return (rv);
1107 }
1108 
1109 
1110 void
1111 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1112 	       ac_code code, void *opt_arg)
1113 {
1114 	struct ahc_tmode_tstate *tstate;
1115 	struct ahc_initiator_tinfo *tinfo;
1116 	struct ahc_devinfo devinfo;
1117 	struct scsipi_channel *chan;
1118 	struct scsipi_xfer_mode xm;
1119 
1120 	chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1121 	switch (code) {
1122 	case AC_TRANSFER_NEG:
1123 		tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1124 			    &tstate);
1125 		ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1126 		    channel, ROLE_UNKNOWN);
1127 		/*
1128 		 * Don't bother if negotiating. XXX?
1129 		 */
1130 		if (tinfo->curr.period != tinfo->goal.period
1131 		    || tinfo->curr.width != tinfo->goal.width
1132 		    || tinfo->curr.offset != tinfo->goal.offset
1133 		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1134 			break;
1135 		xm.xm_target = target;
1136 		xm.xm_mode = 0;
1137 		xm.xm_period = tinfo->curr.period;
1138 		xm.xm_offset = tinfo->curr.offset;
1139 		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1140 			xm.xm_mode |= PERIPH_CAP_WIDE16;
1141 		if (tinfo->curr.period)
1142 			xm.xm_mode |= PERIPH_CAP_SYNC;
1143 		if (tstate->tagenable & devinfo.target_mask)
1144 			xm.xm_mode |= PERIPH_CAP_TQING;
1145 		if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1146 			xm.xm_mode |= PERIPH_CAP_DT;
1147 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1148 		break;
1149 	case AC_BUS_RESET:
1150 		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1151 	case AC_SENT_BDR:
1152 	default:
1153 		break;
1154 	}
1155 }
1156