xref: /netbsd/sys/dev/scsipi/scsipi_base.c (revision 6550d01e)
1 /*	$NetBSD: scsipi_base.c,v 1.155 2010/11/13 13:52:11 uebayasi Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.155 2010/11/13 13:52:11 uebayasi Exp $");
35 
36 #include "opt_scsi.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/buf.h>
42 #include <sys/uio.h>
43 #include <sys/malloc.h>
44 #include <sys/pool.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/proc.h>
48 #include <sys/kthread.h>
49 #include <sys/hash.h>
50 
51 #include <dev/scsipi/scsi_spc.h>
52 #include <dev/scsipi/scsipi_all.h>
53 #include <dev/scsipi/scsipi_disk.h>
54 #include <dev/scsipi/scsipiconf.h>
55 #include <dev/scsipi/scsipi_base.h>
56 
57 #include <dev/scsipi/scsi_all.h>
58 #include <dev/scsipi/scsi_message.h>
59 
60 #include <machine/param.h>
61 
62 static int	scsipi_complete(struct scsipi_xfer *);
63 static void	scsipi_request_sense(struct scsipi_xfer *);
64 static int	scsipi_enqueue(struct scsipi_xfer *);
65 static void	scsipi_run_queue(struct scsipi_channel *chan);
66 
67 static void	scsipi_completion_thread(void *);
68 
69 static void	scsipi_get_tag(struct scsipi_xfer *);
70 static void	scsipi_put_tag(struct scsipi_xfer *);
71 
72 static int	scsipi_get_resource(struct scsipi_channel *);
73 static void	scsipi_put_resource(struct scsipi_channel *);
74 
75 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
76 		    struct scsipi_max_openings *);
77 static void	scsipi_async_event_xfer_mode(struct scsipi_channel *,
78 		    struct scsipi_xfer_mode *);
79 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
80 
81 static struct pool scsipi_xfer_pool;
82 
83 /*
84  * scsipi_init:
85  *
86  *	Called when a scsibus or atapibus is attached to the system
87  *	to initialize shared data structures.
88  */
89 void
90 scsipi_init(void)
91 {
92 	static int scsipi_init_done;
93 
94 	if (scsipi_init_done)
95 		return;
96 	scsipi_init_done = 1;
97 
98 	/* Initialize the scsipi_xfer pool. */
99 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
100 	    0, 0, "scxspl", NULL, IPL_BIO);
101 	if (pool_prime(&scsipi_xfer_pool,
102 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
103 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
104 	}
105 }
106 
107 /*
108  * scsipi_channel_init:
109  *
110  *	Initialize a scsipi_channel when it is attached.
111  */
112 int
113 scsipi_channel_init(struct scsipi_channel *chan)
114 {
115 	struct scsipi_adapter *adapt = chan->chan_adapter;
116 	int i;
117 
118 	/* Initialize shared data. */
119 	scsipi_init();
120 
121 	/* Initialize the queues. */
122 	TAILQ_INIT(&chan->chan_queue);
123 	TAILQ_INIT(&chan->chan_complete);
124 
125 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
126 		LIST_INIT(&chan->chan_periphtab[i]);
127 
128 	/*
129 	 * Create the asynchronous completion thread.
130 	 */
131 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
132 	    &chan->chan_thread, "%s", chan->chan_name)) {
133 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
134 		    "channel %d\n", chan->chan_channel);
135 		panic("scsipi_channel_init");
136 	}
137 
138 	return (0);
139 }
140 
141 /*
142  * scsipi_channel_shutdown:
143  *
144  *	Shutdown a scsipi_channel.
145  */
146 void
147 scsipi_channel_shutdown(struct scsipi_channel *chan)
148 {
149 
150 	/*
151 	 * Shut down the completion thread.
152 	 */
153 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
154 	wakeup(&chan->chan_complete);
155 
156 	/*
157 	 * Now wait for the thread to exit.
158 	 */
159 	while (chan->chan_thread != NULL)
160 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
161 }
162 
163 static uint32_t
164 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
165 {
166 	uint32_t hash;
167 
168 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
169 	hash = hash32_buf(&l, sizeof(l), hash);
170 
171 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
172 }
173 
174 /*
175  * scsipi_insert_periph:
176  *
177  *	Insert a periph into the channel.
178  */
179 void
180 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
181 {
182 	uint32_t hash;
183 	int s;
184 
185 	hash = scsipi_chan_periph_hash(periph->periph_target,
186 	    periph->periph_lun);
187 
188 	s = splbio();
189 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
190 	splx(s);
191 }
192 
193 /*
194  * scsipi_remove_periph:
195  *
196  *	Remove a periph from the channel.
197  */
198 void
199 scsipi_remove_periph(struct scsipi_channel *chan,
200     struct scsipi_periph *periph)
201 {
202 	int s;
203 
204 	s = splbio();
205 	LIST_REMOVE(periph, periph_hash);
206 	splx(s);
207 }
208 
209 /*
210  * scsipi_lookup_periph:
211  *
212  *	Lookup a periph on the specified channel.
213  */
214 struct scsipi_periph *
215 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
216 {
217 	struct scsipi_periph *periph;
218 	uint32_t hash;
219 	int s;
220 
221 	if (target >= chan->chan_ntargets ||
222 	    lun >= chan->chan_nluns)
223 		return (NULL);
224 
225 	hash = scsipi_chan_periph_hash(target, lun);
226 
227 	s = splbio();
228 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
229 		if (periph->periph_target == target &&
230 		    periph->periph_lun == lun)
231 			break;
232 	}
233 	splx(s);
234 
235 	return (periph);
236 }
237 
238 /*
239  * scsipi_get_resource:
240  *
241  *	Allocate a single xfer `resource' from the channel.
242  *
243  *	NOTE: Must be called at splbio().
244  */
245 static int
246 scsipi_get_resource(struct scsipi_channel *chan)
247 {
248 	struct scsipi_adapter *adapt = chan->chan_adapter;
249 
250 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
251 		if (chan->chan_openings > 0) {
252 			chan->chan_openings--;
253 			return (1);
254 		}
255 		return (0);
256 	}
257 
258 	if (adapt->adapt_openings > 0) {
259 		adapt->adapt_openings--;
260 		return (1);
261 	}
262 	return (0);
263 }
264 
265 /*
266  * scsipi_grow_resources:
267  *
268  *	Attempt to grow resources for a channel.  If this succeeds,
269  *	we allocate one for our caller.
270  *
271  *	NOTE: Must be called at splbio().
272  */
273 static inline int
274 scsipi_grow_resources(struct scsipi_channel *chan)
275 {
276 
277 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
278 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
279 			scsipi_adapter_request(chan,
280 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
281 			return (scsipi_get_resource(chan));
282 		}
283 		/*
284 		 * ask the channel thread to do it. It'll have to thaw the
285 		 * queue
286 		 */
287 		scsipi_channel_freeze(chan, 1);
288 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
289 		wakeup(&chan->chan_complete);
290 		return (0);
291 	}
292 
293 	return (0);
294 }
295 
296 /*
297  * scsipi_put_resource:
298  *
299  *	Free a single xfer `resource' to the channel.
300  *
301  *	NOTE: Must be called at splbio().
302  */
303 static void
304 scsipi_put_resource(struct scsipi_channel *chan)
305 {
306 	struct scsipi_adapter *adapt = chan->chan_adapter;
307 
308 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
309 		chan->chan_openings++;
310 	else
311 		adapt->adapt_openings++;
312 }
313 
314 /*
315  * scsipi_get_tag:
316  *
317  *	Get a tag ID for the specified xfer.
318  *
319  *	NOTE: Must be called at splbio().
320  */
321 static void
322 scsipi_get_tag(struct scsipi_xfer *xs)
323 {
324 	struct scsipi_periph *periph = xs->xs_periph;
325 	int bit, tag;
326 	u_int word;
327 
328 	bit = 0;	/* XXX gcc */
329 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
330 		bit = ffs(periph->periph_freetags[word]);
331 		if (bit != 0)
332 			break;
333 	}
334 #ifdef DIAGNOSTIC
335 	if (word == PERIPH_NTAGWORDS) {
336 		scsipi_printaddr(periph);
337 		printf("no free tags\n");
338 		panic("scsipi_get_tag");
339 	}
340 #endif
341 
342 	bit -= 1;
343 	periph->periph_freetags[word] &= ~(1 << bit);
344 	tag = (word << 5) | bit;
345 
346 	/* XXX Should eventually disallow this completely. */
347 	if (tag >= periph->periph_openings) {
348 		scsipi_printaddr(periph);
349 		printf("WARNING: tag %d greater than available openings %d\n",
350 		    tag, periph->periph_openings);
351 	}
352 
353 	xs->xs_tag_id = tag;
354 }
355 
356 /*
357  * scsipi_put_tag:
358  *
359  *	Put the tag ID for the specified xfer back into the pool.
360  *
361  *	NOTE: Must be called at splbio().
362  */
363 static void
364 scsipi_put_tag(struct scsipi_xfer *xs)
365 {
366 	struct scsipi_periph *periph = xs->xs_periph;
367 	int word, bit;
368 
369 	word = xs->xs_tag_id >> 5;
370 	bit = xs->xs_tag_id & 0x1f;
371 
372 	periph->periph_freetags[word] |= (1 << bit);
373 }
374 
375 /*
376  * scsipi_get_xs:
377  *
378  *	Allocate an xfer descriptor and associate it with the
379  *	specified peripherial.  If the peripherial has no more
380  *	available command openings, we either block waiting for
381  *	one to become available, or fail.
382  */
383 struct scsipi_xfer *
384 scsipi_get_xs(struct scsipi_periph *periph, int flags)
385 {
386 	struct scsipi_xfer *xs;
387 	int s;
388 
389 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
390 
391 	KASSERT(!cold);
392 
393 #ifdef DIAGNOSTIC
394 	/*
395 	 * URGENT commands can never be ASYNC.
396 	 */
397 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
398 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
399 		scsipi_printaddr(periph);
400 		printf("URGENT and ASYNC\n");
401 		panic("scsipi_get_xs");
402 	}
403 #endif
404 
405 	s = splbio();
406 	/*
407 	 * Wait for a command opening to become available.  Rules:
408 	 *
409 	 *	- All xfers must wait for an available opening.
410 	 *	  Exception: URGENT xfers can proceed when
411 	 *	  active == openings, because we use the opening
412 	 *	  of the command we're recovering for.
413 	 *	- if the periph has sense pending, only URGENT & REQSENSE
414 	 *	  xfers may proceed.
415 	 *
416 	 *	- If the periph is recovering, only URGENT xfers may
417 	 *	  proceed.
418 	 *
419 	 *	- If the periph is currently executing a recovery
420 	 *	  command, URGENT commands must block, because only
421 	 *	  one recovery command can execute at a time.
422 	 */
423 	for (;;) {
424 		if (flags & XS_CTL_URGENT) {
425 			if (periph->periph_active > periph->periph_openings)
426 				goto wait_for_opening;
427 			if (periph->periph_flags & PERIPH_SENSE) {
428 				if ((flags & XS_CTL_REQSENSE) == 0)
429 					goto wait_for_opening;
430 			} else {
431 				if ((periph->periph_flags &
432 				    PERIPH_RECOVERY_ACTIVE) != 0)
433 					goto wait_for_opening;
434 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
435 			}
436 			break;
437 		}
438 		if (periph->periph_active >= periph->periph_openings ||
439 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
440 			goto wait_for_opening;
441 		periph->periph_active++;
442 		break;
443 
444  wait_for_opening:
445 		if (flags & XS_CTL_NOSLEEP) {
446 			splx(s);
447 			return (NULL);
448 		}
449 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
450 		periph->periph_flags |= PERIPH_WAITING;
451 		(void) tsleep(periph, PRIBIO, "getxs", 0);
452 	}
453 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
454 	xs = pool_get(&scsipi_xfer_pool,
455 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
456 	if (xs == NULL) {
457 		if (flags & XS_CTL_URGENT) {
458 			if ((flags & XS_CTL_REQSENSE) == 0)
459 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
460 		} else
461 			periph->periph_active--;
462 		scsipi_printaddr(periph);
463 		printf("unable to allocate %sscsipi_xfer\n",
464 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
465 	}
466 	splx(s);
467 
468 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
469 
470 	if (xs != NULL) {
471 		memset(xs, 0, sizeof(*xs));
472 		callout_init(&xs->xs_callout, 0);
473 		xs->xs_periph = periph;
474 		xs->xs_control = flags;
475 		xs->xs_status = 0;
476 		s = splbio();
477 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
478 		splx(s);
479 	}
480 	return (xs);
481 }
482 
483 /*
484  * scsipi_put_xs:
485  *
486  *	Release an xfer descriptor, decreasing the outstanding command
487  *	count for the peripherial.  If there is a thread waiting for
488  *	an opening, wake it up.  If not, kick any queued I/O the
489  *	peripherial may have.
490  *
491  *	NOTE: Must be called at splbio().
492  */
493 void
494 scsipi_put_xs(struct scsipi_xfer *xs)
495 {
496 	struct scsipi_periph *periph = xs->xs_periph;
497 	int flags = xs->xs_control;
498 
499 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
500 
501 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
502 	callout_destroy(&xs->xs_callout);
503 	pool_put(&scsipi_xfer_pool, xs);
504 
505 #ifdef DIAGNOSTIC
506 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
507 	    periph->periph_active == 0) {
508 		scsipi_printaddr(periph);
509 		printf("recovery without a command to recovery for\n");
510 		panic("scsipi_put_xs");
511 	}
512 #endif
513 
514 	if (flags & XS_CTL_URGENT) {
515 		if ((flags & XS_CTL_REQSENSE) == 0)
516 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
517 	} else
518 		periph->periph_active--;
519 	if (periph->periph_active == 0 &&
520 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
521 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
522 		wakeup(&periph->periph_active);
523 	}
524 
525 	if (periph->periph_flags & PERIPH_WAITING) {
526 		periph->periph_flags &= ~PERIPH_WAITING;
527 		wakeup(periph);
528 	} else {
529 		if (periph->periph_switch->psw_start != NULL &&
530 		    device_is_active(periph->periph_dev)) {
531 			SC_DEBUG(periph, SCSIPI_DB2,
532 			    ("calling private start()\n"));
533 			(*periph->periph_switch->psw_start)(periph);
534 		}
535 	}
536 }
537 
538 /*
539  * scsipi_channel_freeze:
540  *
541  *	Freeze a channel's xfer queue.
542  */
543 void
544 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
545 {
546 	int s;
547 
548 	s = splbio();
549 	chan->chan_qfreeze += count;
550 	splx(s);
551 }
552 
553 /*
554  * scsipi_channel_thaw:
555  *
556  *	Thaw a channel's xfer queue.
557  */
558 void
559 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
560 {
561 	int s;
562 
563 	s = splbio();
564 	chan->chan_qfreeze -= count;
565 	/*
566 	 * Don't let the freeze count go negative.
567 	 *
568 	 * Presumably the adapter driver could keep track of this,
569 	 * but it might just be easier to do this here so as to allow
570 	 * multiple callers, including those outside the adapter driver.
571 	 */
572 	if (chan->chan_qfreeze < 0) {
573 		chan->chan_qfreeze = 0;
574 	}
575 	splx(s);
576 	/*
577 	 * Kick the channel's queue here.  Note, we may be running in
578 	 * interrupt context (softclock or HBA's interrupt), so the adapter
579 	 * driver had better not sleep.
580 	 */
581 	if (chan->chan_qfreeze == 0)
582 		scsipi_run_queue(chan);
583 }
584 
585 /*
586  * scsipi_channel_timed_thaw:
587  *
588  *	Thaw a channel after some time has expired. This will also
589  * 	run the channel's queue if the freeze count has reached 0.
590  */
591 void
592 scsipi_channel_timed_thaw(void *arg)
593 {
594 	struct scsipi_channel *chan = arg;
595 
596 	scsipi_channel_thaw(chan, 1);
597 }
598 
599 /*
600  * scsipi_periph_freeze:
601  *
602  *	Freeze a device's xfer queue.
603  */
604 void
605 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
606 {
607 	int s;
608 
609 	s = splbio();
610 	periph->periph_qfreeze += count;
611 	splx(s);
612 }
613 
614 /*
615  * scsipi_periph_thaw:
616  *
617  *	Thaw a device's xfer queue.
618  */
619 void
620 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
621 {
622 	int s;
623 
624 	s = splbio();
625 	periph->periph_qfreeze -= count;
626 #ifdef DIAGNOSTIC
627 	if (periph->periph_qfreeze < 0) {
628 		static const char pc[] = "periph freeze count < 0";
629 		scsipi_printaddr(periph);
630 		printf("%s\n", pc);
631 		panic(pc);
632 	}
633 #endif
634 	if (periph->periph_qfreeze == 0 &&
635 	    (periph->periph_flags & PERIPH_WAITING) != 0)
636 		wakeup(periph);
637 	splx(s);
638 }
639 
640 /*
641  * scsipi_periph_timed_thaw:
642  *
643  *	Thaw a device after some time has expired.
644  */
645 void
646 scsipi_periph_timed_thaw(void *arg)
647 {
648 	int s;
649 	struct scsipi_periph *periph = arg;
650 
651 	callout_stop(&periph->periph_callout);
652 
653 	s = splbio();
654 	scsipi_periph_thaw(periph, 1);
655 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
656 		/*
657 		 * Kick the channel's queue here.  Note, we're running in
658 		 * interrupt context (softclock), so the adapter driver
659 		 * had better not sleep.
660 		 */
661 		scsipi_run_queue(periph->periph_channel);
662 	} else {
663 		/*
664 		 * Tell the completion thread to kick the channel's queue here.
665 		 */
666 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
667 		wakeup(&periph->periph_channel->chan_complete);
668 	}
669 	splx(s);
670 }
671 
672 /*
673  * scsipi_wait_drain:
674  *
675  *	Wait for a periph's pending xfers to drain.
676  */
677 void
678 scsipi_wait_drain(struct scsipi_periph *periph)
679 {
680 	int s;
681 
682 	s = splbio();
683 	while (periph->periph_active != 0) {
684 		periph->periph_flags |= PERIPH_WAITDRAIN;
685 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
686 	}
687 	splx(s);
688 }
689 
690 /*
691  * scsipi_kill_pending:
692  *
693  *	Kill off all pending xfers for a periph.
694  *
695  *	NOTE: Must be called at splbio().
696  */
697 void
698 scsipi_kill_pending(struct scsipi_periph *periph)
699 {
700 
701 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
702 	scsipi_wait_drain(periph);
703 }
704 
705 /*
706  * scsipi_print_cdb:
707  * prints a command descriptor block (for debug purpose, error messages,
708  * SCSIVERBOSE, ...)
709  */
710 void
711 scsipi_print_cdb(struct scsipi_generic *cmd)
712 {
713 	int i, j;
714 
715  	printf("0x%02x", cmd->opcode);
716 
717  	switch (CDB_GROUPID(cmd->opcode)) {
718  	case CDB_GROUPID_0:
719  		j = CDB_GROUP0;
720  		break;
721  	case CDB_GROUPID_1:
722  		j = CDB_GROUP1;
723  		break;
724  	case CDB_GROUPID_2:
725  		j = CDB_GROUP2;
726  		break;
727  	case CDB_GROUPID_3:
728  		j = CDB_GROUP3;
729  		break;
730  	case CDB_GROUPID_4:
731  		j = CDB_GROUP4;
732  		break;
733  	case CDB_GROUPID_5:
734  		j = CDB_GROUP5;
735  		break;
736  	case CDB_GROUPID_6:
737  		j = CDB_GROUP6;
738  		break;
739  	case CDB_GROUPID_7:
740  		j = CDB_GROUP7;
741  		break;
742  	default:
743  		j = 0;
744  	}
745  	if (j == 0)
746  		j = sizeof (cmd->bytes);
747  	for (i = 0; i < j-1; i++) /* already done the opcode */
748  		printf(" %02x", cmd->bytes[i]);
749 }
750 
751 /*
752  * scsipi_interpret_sense:
753  *
754  *	Look at the returned sense and act on the error, determining
755  *	the unix error number to pass back.  (0 = report no error)
756  *
757  *	NOTE: If we return ERESTART, we are expected to haved
758  *	thawed the device!
759  *
760  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
761  */
762 int
763 scsipi_interpret_sense(struct scsipi_xfer *xs)
764 {
765 	struct scsi_sense_data *sense;
766 	struct scsipi_periph *periph = xs->xs_periph;
767 	u_int8_t key;
768 	int error;
769 	u_int32_t info;
770 	static const char *error_mes[] = {
771 		"soft error (corrected)",
772 		"not ready", "medium error",
773 		"non-media hardware failure", "illegal request",
774 		"unit attention", "readonly device",
775 		"no data found", "vendor unique",
776 		"copy aborted", "command aborted",
777 		"search returned equal", "volume overflow",
778 		"verify miscompare", "unknown error key"
779 	};
780 
781 	sense = &xs->sense.scsi_sense;
782 #ifdef SCSIPI_DEBUG
783 	if (periph->periph_flags & SCSIPI_DB1) {
784 		int count;
785 		scsipi_printaddr(periph);
786 		printf(" sense debug information:\n");
787 		printf("\tcode 0x%x valid %d\n",
788 			SSD_RCODE(sense->response_code),
789 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
790 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
791 			sense->segment,
792 			SSD_SENSE_KEY(sense->flags),
793 			sense->flags & SSD_ILI ? 1 : 0,
794 			sense->flags & SSD_EOM ? 1 : 0,
795 			sense->flags & SSD_FILEMARK ? 1 : 0);
796 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
797 			"extra bytes\n",
798 			sense->info[0],
799 			sense->info[1],
800 			sense->info[2],
801 			sense->info[3],
802 			sense->extra_len);
803 		printf("\textra: ");
804 		for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
805 			printf("0x%x ", sense->csi[count]);
806 		printf("\n");
807 	}
808 #endif
809 
810 	/*
811 	 * If the periph has it's own error handler, call it first.
812 	 * If it returns a legit error value, return that, otherwise
813 	 * it wants us to continue with normal error processing.
814 	 */
815 	if (periph->periph_switch->psw_error != NULL) {
816 		SC_DEBUG(periph, SCSIPI_DB2,
817 		    ("calling private err_handler()\n"));
818 		error = (*periph->periph_switch->psw_error)(xs);
819 		if (error != EJUSTRETURN)
820 			return (error);
821 	}
822 	/* otherwise use the default */
823 	switch (SSD_RCODE(sense->response_code)) {
824 
825 		/*
826 		 * Old SCSI-1 and SASI devices respond with
827 		 * codes other than 70.
828 		 */
829 	case 0x00:		/* no error (command completed OK) */
830 		return (0);
831 	case 0x04:		/* drive not ready after it was selected */
832 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
833 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
834 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
835 			return (0);
836 		/* XXX - display some sort of error here? */
837 		return (EIO);
838 	case 0x20:		/* invalid command */
839 		if ((xs->xs_control &
840 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
841 			return (0);
842 		return (EINVAL);
843 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
844 		return (EACCES);
845 
846 		/*
847 		 * If it's code 70, use the extended stuff and
848 		 * interpret the key
849 		 */
850 	case 0x71:		/* delayed error */
851 		scsipi_printaddr(periph);
852 		key = SSD_SENSE_KEY(sense->flags);
853 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
854 		/* FALLTHROUGH */
855 	case 0x70:
856 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
857 			info = _4btol(sense->info);
858 		else
859 			info = 0;
860 		key = SSD_SENSE_KEY(sense->flags);
861 
862 		switch (key) {
863 		case SKEY_NO_SENSE:
864 		case SKEY_RECOVERED_ERROR:
865 			if (xs->resid == xs->datalen && xs->datalen) {
866 				/*
867 				 * Why is this here?
868 				 */
869 				xs->resid = 0;	/* not short read */
870 			}
871 		case SKEY_EQUAL:
872 			error = 0;
873 			break;
874 		case SKEY_NOT_READY:
875 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
876 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
877 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
878 				return (0);
879 			if (sense->asc == 0x3A) {
880 				error = ENODEV; /* Medium not present */
881 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
882 					return (error);
883 			} else
884 				error = EIO;
885 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
886 				return (error);
887 			break;
888 		case SKEY_ILLEGAL_REQUEST:
889 			if ((xs->xs_control &
890 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
891 				return (0);
892 			/*
893 			 * Handle the case where a device reports
894 			 * Logical Unit Not Supported during discovery.
895 			 */
896 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
897 			    sense->asc == 0x25 &&
898 			    sense->ascq == 0x00)
899 				return (EINVAL);
900 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
901 				return (EIO);
902 			error = EINVAL;
903 			break;
904 		case SKEY_UNIT_ATTENTION:
905 			if (sense->asc == 0x29 &&
906 			    sense->ascq == 0x00) {
907 				/* device or bus reset */
908 				return (ERESTART);
909 			}
910 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
911 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
912 			if ((xs->xs_control &
913 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
914 				/* XXX Should reupload any transient state. */
915 				(periph->periph_flags &
916 				 PERIPH_REMOVABLE) == 0) {
917 				return (ERESTART);
918 			}
919 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
920 				return (EIO);
921 			error = EIO;
922 			break;
923 		case SKEY_DATA_PROTECT:
924 			error = EROFS;
925 			break;
926 		case SKEY_BLANK_CHECK:
927 			error = 0;
928 			break;
929 		case SKEY_ABORTED_COMMAND:
930 			if (xs->xs_retries != 0) {
931 				xs->xs_retries--;
932 				error = ERESTART;
933 			} else
934 				error = EIO;
935 			break;
936 		case SKEY_VOLUME_OVERFLOW:
937 			error = ENOSPC;
938 			break;
939 		default:
940 			error = EIO;
941 			break;
942 		}
943 
944 		/* Print verbose decode if appropriate and possible */
945 		if ((key == 0) ||
946 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
947 		    (scsipi_print_sense(xs, 0) != 0))
948 			return (error);
949 
950 		/* Print brief(er) sense information */
951 		scsipi_printaddr(periph);
952 		printf("%s", error_mes[key - 1]);
953 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
954 			switch (key) {
955 			case SKEY_NOT_READY:
956 			case SKEY_ILLEGAL_REQUEST:
957 			case SKEY_UNIT_ATTENTION:
958 			case SKEY_DATA_PROTECT:
959 				break;
960 			case SKEY_BLANK_CHECK:
961 				printf(", requested size: %d (decimal)",
962 				    info);
963 				break;
964 			case SKEY_ABORTED_COMMAND:
965 				if (xs->xs_retries)
966 					printf(", retrying");
967 				printf(", cmd 0x%x, info 0x%x",
968 				    xs->cmd->opcode, info);
969 				break;
970 			default:
971 				printf(", info = %d (decimal)", info);
972 			}
973 		}
974 		if (sense->extra_len != 0) {
975 			int n;
976 			printf(", data =");
977 			for (n = 0; n < sense->extra_len; n++)
978 				printf(" %02x",
979 				    sense->csi[n]);
980 		}
981 		printf("\n");
982 		return (error);
983 
984 	/*
985 	 * Some other code, just report it
986 	 */
987 	default:
988 #if    defined(SCSIDEBUG) || defined(DEBUG)
989 	{
990 		static const char *uc = "undecodable sense error";
991 		int i;
992 		u_int8_t *cptr = (u_int8_t *) sense;
993 		scsipi_printaddr(periph);
994 		if (xs->cmd == &xs->cmdstore) {
995 			printf("%s for opcode 0x%x, data=",
996 			    uc, xs->cmdstore.opcode);
997 		} else {
998 			printf("%s, data=", uc);
999 		}
1000 		for (i = 0; i < sizeof (sense); i++)
1001 			printf(" 0x%02x", *(cptr++) & 0xff);
1002 		printf("\n");
1003 	}
1004 #else
1005 		scsipi_printaddr(periph);
1006 		printf("Sense Error Code 0x%x",
1007 			SSD_RCODE(sense->response_code));
1008 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1009 			struct scsi_sense_data_unextended *usense =
1010 			    (struct scsi_sense_data_unextended *)sense;
1011 			printf(" at block no. %d (decimal)",
1012 			    _3btol(usense->block));
1013 		}
1014 		printf("\n");
1015 #endif
1016 		return (EIO);
1017 	}
1018 }
1019 
1020 /*
1021  * scsipi_test_unit_ready:
1022  *
1023  *	Issue a `test unit ready' request.
1024  */
1025 int
1026 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1027 {
1028 	struct scsi_test_unit_ready cmd;
1029 	int retries;
1030 
1031 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1032 	if (periph->periph_quirks & PQUIRK_NOTUR)
1033 		return (0);
1034 
1035 	if (flags & XS_CTL_DISCOVERY)
1036 		retries = 0;
1037 	else
1038 		retries = SCSIPIRETRIES;
1039 
1040 	memset(&cmd, 0, sizeof(cmd));
1041 	cmd.opcode = SCSI_TEST_UNIT_READY;
1042 
1043 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1044 	    retries, 10000, NULL, flags));
1045 }
1046 
1047 /*
1048  * scsipi_inquire:
1049  *
1050  *	Ask the device about itself.
1051  */
1052 int
1053 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1054     int flags)
1055 {
1056 	struct scsipi_inquiry cmd;
1057 	int error;
1058 	int retries;
1059 
1060 	if (flags & XS_CTL_DISCOVERY)
1061 		retries = 0;
1062 	else
1063 		retries = SCSIPIRETRIES;
1064 
1065 	/*
1066 	 * If we request more data than the device can provide, it SHOULD just
1067 	 * return a short reponse.  However, some devices error with an
1068 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1069 	 * failture modes (such as the GL641USB flash adapter, which goes loony
1070 	 * and sends corrupted CRCs).  To work around this, and to bring our
1071 	 * behavior more in line with other OSes, we do a shorter inquiry,
1072 	 * covering all the SCSI-2 information, first, and then request more
1073 	 * data iff the "additional length" field indicates there is more.
1074 	 * - mycroft, 2003/10/16
1075 	 */
1076 	memset(&cmd, 0, sizeof(cmd));
1077 	cmd.opcode = INQUIRY;
1078 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1079 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1080 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1081 	    10000, NULL, flags | XS_CTL_DATA_IN);
1082 	if (!error &&
1083 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1084 #if 0
1085 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1086 #endif
1087 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1088 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1089 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1090 		    10000, NULL, flags | XS_CTL_DATA_IN);
1091 #if 0
1092 printf("inquire: error=%d\n", error);
1093 #endif
1094 	}
1095 
1096 #ifdef SCSI_OLD_NOINQUIRY
1097 	/*
1098 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1099 	 * This board doesn't support the INQUIRY command at all.
1100 	 */
1101 	if (error == EINVAL || error == EACCES) {
1102 		/*
1103 		 * Conjure up an INQUIRY response.
1104 		 */
1105 		inqbuf->device = (error == EINVAL ?
1106 			 SID_QUAL_LU_PRESENT :
1107 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1108 		inqbuf->dev_qual2 = 0;
1109 		inqbuf->version = 0;
1110 		inqbuf->response_format = SID_FORMAT_SCSI1;
1111 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1112 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1113 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1114 		error = 0;
1115 	}
1116 
1117 	/*
1118 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1119 	 * This board gives an empty response to an INQUIRY command.
1120 	 */
1121 	else if (error == 0 &&
1122 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1123 	    inqbuf->dev_qual2 == 0 &&
1124 	    inqbuf->version == 0 &&
1125 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1126 		/*
1127 		 * Fill out the INQUIRY response.
1128 		 */
1129 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1130 		inqbuf->dev_qual2 = SID_REMOVABLE;
1131 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1132 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1133 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1134 	}
1135 #endif /* SCSI_OLD_NOINQUIRY */
1136 
1137 	return error;
1138 }
1139 
1140 /*
1141  * scsipi_prevent:
1142  *
1143  *	Prevent or allow the user to remove the media
1144  */
1145 int
1146 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1147 {
1148 	struct scsi_prevent_allow_medium_removal cmd;
1149 
1150 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1151 		return 0;
1152 
1153 	memset(&cmd, 0, sizeof(cmd));
1154 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1155 	cmd.how = type;
1156 
1157 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1158 	    SCSIPIRETRIES, 5000, NULL, flags));
1159 }
1160 
1161 /*
1162  * scsipi_start:
1163  *
1164  *	Send a START UNIT.
1165  */
1166 int
1167 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1168 {
1169 	struct scsipi_start_stop cmd;
1170 
1171 	memset(&cmd, 0, sizeof(cmd));
1172 	cmd.opcode = START_STOP;
1173 	cmd.byte2 = 0x00;
1174 	cmd.how = type;
1175 
1176 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1177 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1178 }
1179 
1180 /*
1181  * scsipi_mode_sense, scsipi_mode_sense_big:
1182  *	get a sense page from a device
1183  */
1184 
1185 int
1186 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1187     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1188     int timeout)
1189 {
1190 	struct scsi_mode_sense_6 cmd;
1191 
1192 	memset(&cmd, 0, sizeof(cmd));
1193 	cmd.opcode = SCSI_MODE_SENSE_6;
1194 	cmd.byte2 = byte2;
1195 	cmd.page = page;
1196 	cmd.length = len & 0xff;
1197 
1198 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1199 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1200 }
1201 
1202 int
1203 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1204     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1205     int timeout)
1206 {
1207 	struct scsi_mode_sense_10 cmd;
1208 
1209 	memset(&cmd, 0, sizeof(cmd));
1210 	cmd.opcode = SCSI_MODE_SENSE_10;
1211 	cmd.byte2 = byte2;
1212 	cmd.page = page;
1213 	_lto2b(len, cmd.length);
1214 
1215 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1216 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1217 }
1218 
1219 int
1220 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1221     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1222     int timeout)
1223 {
1224 	struct scsi_mode_select_6 cmd;
1225 
1226 	memset(&cmd, 0, sizeof(cmd));
1227 	cmd.opcode = SCSI_MODE_SELECT_6;
1228 	cmd.byte2 = byte2;
1229 	cmd.length = len & 0xff;
1230 
1231 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1232 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1233 }
1234 
1235 int
1236 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1237     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1238     int timeout)
1239 {
1240 	struct scsi_mode_select_10 cmd;
1241 
1242 	memset(&cmd, 0, sizeof(cmd));
1243 	cmd.opcode = SCSI_MODE_SELECT_10;
1244 	cmd.byte2 = byte2;
1245 	_lto2b(len, cmd.length);
1246 
1247 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1248 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1249 }
1250 
1251 /*
1252  * scsipi_done:
1253  *
1254  *	This routine is called by an adapter's interrupt handler when
1255  *	an xfer is completed.
1256  */
1257 void
1258 scsipi_done(struct scsipi_xfer *xs)
1259 {
1260 	struct scsipi_periph *periph = xs->xs_periph;
1261 	struct scsipi_channel *chan = periph->periph_channel;
1262 	int s, freezecnt;
1263 
1264 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1265 #ifdef SCSIPI_DEBUG
1266 	if (periph->periph_dbflags & SCSIPI_DB1)
1267 		show_scsipi_cmd(xs);
1268 #endif
1269 
1270 	s = splbio();
1271 	/*
1272 	 * The resource this command was using is now free.
1273 	 */
1274 	if (xs->xs_status & XS_STS_DONE) {
1275 		/* XXX in certain circumstances, such as a device
1276 		 * being detached, a xs that has already been
1277 		 * scsipi_done()'d by the main thread will be done'd
1278 		 * again by scsibusdetach(). Putting the xs on the
1279 		 * chan_complete queue causes list corruption and
1280 		 * everyone dies. This prevents that, but perhaps
1281 		 * there should be better coordination somewhere such
1282 		 * that this won't ever happen (and can be turned into
1283 		 * a KASSERT().
1284 		 */
1285 		splx(s);
1286 		goto out;
1287 	}
1288 	scsipi_put_resource(chan);
1289 	xs->xs_periph->periph_sent--;
1290 
1291 	/*
1292 	 * If the command was tagged, free the tag.
1293 	 */
1294 	if (XS_CTL_TAGTYPE(xs) != 0)
1295 		scsipi_put_tag(xs);
1296 	else
1297 		periph->periph_flags &= ~PERIPH_UNTAG;
1298 
1299 	/* Mark the command as `done'. */
1300 	xs->xs_status |= XS_STS_DONE;
1301 
1302 #ifdef DIAGNOSTIC
1303 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1304 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1305 		panic("scsipi_done: ASYNC and POLL");
1306 #endif
1307 
1308 	/*
1309 	 * If the xfer had an error of any sort, freeze the
1310 	 * periph's queue.  Freeze it again if we were requested
1311 	 * to do so in the xfer.
1312 	 */
1313 	freezecnt = 0;
1314 	if (xs->error != XS_NOERROR)
1315 		freezecnt++;
1316 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1317 		freezecnt++;
1318 	if (freezecnt != 0)
1319 		scsipi_periph_freeze(periph, freezecnt);
1320 
1321 	/*
1322 	 * record the xfer with a pending sense, in case a SCSI reset is
1323 	 * received before the thread is waked up.
1324 	 */
1325 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1326 		periph->periph_flags |= PERIPH_SENSE;
1327 		periph->periph_xscheck = xs;
1328 	}
1329 
1330 	/*
1331 	 * If this was an xfer that was not to complete asynchronously,
1332 	 * let the requesting thread perform error checking/handling
1333 	 * in its context.
1334 	 */
1335 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1336 		splx(s);
1337 		/*
1338 		 * If it's a polling job, just return, to unwind the
1339 		 * call graph.  We don't need to restart the queue,
1340 		 * because pollings jobs are treated specially, and
1341 		 * are really only used during crash dumps anyway
1342 		 * (XXX or during boot-time autconfiguration of
1343 		 * ATAPI devices).
1344 		 */
1345 		if (xs->xs_control & XS_CTL_POLL)
1346 			return;
1347 		wakeup(xs);
1348 		goto out;
1349 	}
1350 
1351 	/*
1352 	 * Catch the extremely common case of I/O completing
1353 	 * without error; no use in taking a context switch
1354 	 * if we can handle it in interrupt context.
1355 	 */
1356 	if (xs->error == XS_NOERROR) {
1357 		splx(s);
1358 		(void) scsipi_complete(xs);
1359 		goto out;
1360 	}
1361 
1362 	/*
1363 	 * There is an error on this xfer.  Put it on the channel's
1364 	 * completion queue, and wake up the completion thread.
1365 	 */
1366 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1367 	splx(s);
1368 	wakeup(&chan->chan_complete);
1369 
1370  out:
1371 	/*
1372 	 * If there are more xfers on the channel's queue, attempt to
1373 	 * run them.
1374 	 */
1375 	scsipi_run_queue(chan);
1376 }
1377 
1378 /*
1379  * scsipi_complete:
1380  *
1381  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1382  *
1383  *	NOTE: This routine MUST be called with valid thread context
1384  *	except for the case where the following two conditions are
1385  *	true:
1386  *
1387  *		xs->error == XS_NOERROR
1388  *		XS_CTL_ASYNC is set in xs->xs_control
1389  *
1390  *	The semantics of this routine can be tricky, so here is an
1391  *	explanation:
1392  *
1393  *		0		Xfer completed successfully.
1394  *
1395  *		ERESTART	Xfer had an error, but was restarted.
1396  *
1397  *		anything else	Xfer had an error, return value is Unix
1398  *				errno.
1399  *
1400  *	If the return value is anything but ERESTART:
1401  *
1402  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1403  *		  the pool.
1404  *		- If there is a buf associated with the xfer,
1405  *		  it has been biodone()'d.
1406  */
1407 static int
1408 scsipi_complete(struct scsipi_xfer *xs)
1409 {
1410 	struct scsipi_periph *periph = xs->xs_periph;
1411 	struct scsipi_channel *chan = periph->periph_channel;
1412 	int error, s;
1413 
1414 #ifdef DIAGNOSTIC
1415 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1416 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1417 #endif
1418 	/*
1419 	 * If command terminated with a CHECK CONDITION, we need to issue a
1420 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1421 	 * we'll have the real status.
1422 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
1423 	 * for this command.
1424 	 */
1425 	s = splbio();
1426 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1427 		/* request sense for a request sense ? */
1428 		if (xs->xs_control & XS_CTL_REQSENSE) {
1429 			scsipi_printaddr(periph);
1430 			printf("request sense for a request sense ?\n");
1431 			/* XXX maybe we should reset the device ? */
1432 			/* we've been frozen because xs->error != XS_NOERROR */
1433 			scsipi_periph_thaw(periph, 1);
1434 			splx(s);
1435 			if (xs->resid < xs->datalen) {
1436 				printf("we read %d bytes of sense anyway:\n",
1437 				    xs->datalen - xs->resid);
1438 				scsipi_print_sense_data((void *)xs->data, 0);
1439 			}
1440 			return EINVAL;
1441 		}
1442 		scsipi_request_sense(xs);
1443 	}
1444 	splx(s);
1445 
1446 	/*
1447 	 * If it's a user level request, bypass all usual completion
1448 	 * processing, let the user work it out..
1449 	 */
1450 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1451 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1452 		if (xs->error != XS_NOERROR)
1453 			scsipi_periph_thaw(periph, 1);
1454 		scsipi_user_done(xs);
1455 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1456 		return 0;
1457 	}
1458 
1459 	switch (xs->error) {
1460 	case XS_NOERROR:
1461 		error = 0;
1462 		break;
1463 
1464 	case XS_SENSE:
1465 	case XS_SHORTSENSE:
1466 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1467 		break;
1468 
1469 	case XS_RESOURCE_SHORTAGE:
1470 		/*
1471 		 * XXX Should freeze channel's queue.
1472 		 */
1473 		scsipi_printaddr(periph);
1474 		printf("adapter resource shortage\n");
1475 		/* FALLTHROUGH */
1476 
1477 	case XS_BUSY:
1478 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1479 			struct scsipi_max_openings mo;
1480 
1481 			/*
1482 			 * We set the openings to active - 1, assuming that
1483 			 * the command that got us here is the first one that
1484 			 * can't fit into the device's queue.  If that's not
1485 			 * the case, I guess we'll find out soon enough.
1486 			 */
1487 			mo.mo_target = periph->periph_target;
1488 			mo.mo_lun = periph->periph_lun;
1489 			if (periph->periph_active < periph->periph_openings)
1490 				mo.mo_openings = periph->periph_active - 1;
1491 			else
1492 				mo.mo_openings = periph->periph_openings - 1;
1493 #ifdef DIAGNOSTIC
1494 			if (mo.mo_openings < 0) {
1495 				scsipi_printaddr(periph);
1496 				printf("QUEUE FULL resulted in < 0 openings\n");
1497 				panic("scsipi_done");
1498 			}
1499 #endif
1500 			if (mo.mo_openings == 0) {
1501 				scsipi_printaddr(periph);
1502 				printf("QUEUE FULL resulted in 0 openings\n");
1503 				mo.mo_openings = 1;
1504 			}
1505 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1506 			error = ERESTART;
1507 		} else if (xs->xs_retries != 0) {
1508 			xs->xs_retries--;
1509 			/*
1510 			 * Wait one second, and try again.
1511 			 */
1512 			if ((xs->xs_control & XS_CTL_POLL) ||
1513 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1514 				/* XXX: quite extreme */
1515 				kpause("xsbusy", false, hz, NULL);
1516 			} else if (!callout_pending(&periph->periph_callout)) {
1517 				scsipi_periph_freeze(periph, 1);
1518 				callout_reset(&periph->periph_callout,
1519 				    hz, scsipi_periph_timed_thaw, periph);
1520 			}
1521 			error = ERESTART;
1522 		} else
1523 			error = EBUSY;
1524 		break;
1525 
1526 	case XS_REQUEUE:
1527 		error = ERESTART;
1528 		break;
1529 
1530 	case XS_SELTIMEOUT:
1531 	case XS_TIMEOUT:
1532 		/*
1533 		 * If the device hasn't gone away, honor retry counts.
1534 		 *
1535 		 * Note that if we're in the middle of probing it,
1536 		 * it won't be found because it isn't here yet so
1537 		 * we won't honor the retry count in that case.
1538 		 */
1539 		if (scsipi_lookup_periph(chan, periph->periph_target,
1540 		    periph->periph_lun) && xs->xs_retries != 0) {
1541 			xs->xs_retries--;
1542 			error = ERESTART;
1543 		} else
1544 			error = EIO;
1545 		break;
1546 
1547 	case XS_RESET:
1548 		if (xs->xs_control & XS_CTL_REQSENSE) {
1549 			/*
1550 			 * request sense interrupted by reset: signal it
1551 			 * with EINTR return code.
1552 			 */
1553 			error = EINTR;
1554 		} else {
1555 			if (xs->xs_retries != 0) {
1556 				xs->xs_retries--;
1557 				error = ERESTART;
1558 			} else
1559 				error = EIO;
1560 		}
1561 		break;
1562 
1563 	case XS_DRIVER_STUFFUP:
1564 		scsipi_printaddr(periph);
1565 		printf("generic HBA error\n");
1566 		error = EIO;
1567 		break;
1568 	default:
1569 		scsipi_printaddr(periph);
1570 		printf("invalid return code from adapter: %d\n", xs->error);
1571 		error = EIO;
1572 		break;
1573 	}
1574 
1575 	s = splbio();
1576 	if (error == ERESTART) {
1577 		/*
1578 		 * If we get here, the periph has been thawed and frozen
1579 		 * again if we had to issue recovery commands.  Alternatively,
1580 		 * it may have been frozen again and in a timed thaw.  In
1581 		 * any case, we thaw the periph once we re-enqueue the
1582 		 * command.  Once the periph is fully thawed, it will begin
1583 		 * operation again.
1584 		 */
1585 		xs->error = XS_NOERROR;
1586 		xs->status = SCSI_OK;
1587 		xs->xs_status &= ~XS_STS_DONE;
1588 		xs->xs_requeuecnt++;
1589 		error = scsipi_enqueue(xs);
1590 		if (error == 0) {
1591 			scsipi_periph_thaw(periph, 1);
1592 			splx(s);
1593 			return (ERESTART);
1594 		}
1595 	}
1596 
1597 	/*
1598 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1599 	 * Thaw it here.
1600 	 */
1601 	if (xs->error != XS_NOERROR)
1602 		scsipi_periph_thaw(periph, 1);
1603 
1604 	if (periph->periph_switch->psw_done)
1605 		periph->periph_switch->psw_done(xs, error);
1606 
1607 	if (xs->xs_control & XS_CTL_ASYNC)
1608 		scsipi_put_xs(xs);
1609 	splx(s);
1610 
1611 	return (error);
1612 }
1613 
1614 /*
1615  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1616  * returns with a CHECK_CONDITION status. Must be called in valid thread
1617  * context and at splbio().
1618  */
1619 
1620 static void
1621 scsipi_request_sense(struct scsipi_xfer *xs)
1622 {
1623 	struct scsipi_periph *periph = xs->xs_periph;
1624 	int flags, error;
1625 	struct scsi_request_sense cmd;
1626 
1627 	periph->periph_flags |= PERIPH_SENSE;
1628 
1629 	/* if command was polling, request sense will too */
1630 	flags = xs->xs_control & XS_CTL_POLL;
1631 	/* Polling commands can't sleep */
1632 	if (flags)
1633 		flags |= XS_CTL_NOSLEEP;
1634 
1635 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1636 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1637 
1638 	memset(&cmd, 0, sizeof(cmd));
1639 	cmd.opcode = SCSI_REQUEST_SENSE;
1640 	cmd.length = sizeof(struct scsi_sense_data);
1641 
1642 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1643 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1644 	    0, 1000, NULL, flags);
1645 	periph->periph_flags &= ~PERIPH_SENSE;
1646 	periph->periph_xscheck = NULL;
1647 	switch (error) {
1648 	case 0:
1649 		/* we have a valid sense */
1650 		xs->error = XS_SENSE;
1651 		return;
1652 	case EINTR:
1653 		/* REQUEST_SENSE interrupted by bus reset. */
1654 		xs->error = XS_RESET;
1655 		return;
1656 	case EIO:
1657 		 /* request sense coudn't be performed */
1658 		/*
1659 		 * XXX this isn't quite right but we don't have anything
1660 		 * better for now
1661 		 */
1662 		xs->error = XS_DRIVER_STUFFUP;
1663 		return;
1664 	default:
1665 		 /* Notify that request sense failed. */
1666 		xs->error = XS_DRIVER_STUFFUP;
1667 		scsipi_printaddr(periph);
1668 		printf("request sense failed with error %d\n", error);
1669 		return;
1670 	}
1671 }
1672 
1673 /*
1674  * scsipi_enqueue:
1675  *
1676  *	Enqueue an xfer on a channel.
1677  */
1678 static int
1679 scsipi_enqueue(struct scsipi_xfer *xs)
1680 {
1681 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1682 	struct scsipi_xfer *qxs;
1683 	int s;
1684 
1685 	s = splbio();
1686 
1687 	/*
1688 	 * If the xfer is to be polled, and there are already jobs on
1689 	 * the queue, we can't proceed.
1690 	 */
1691 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1692 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
1693 		splx(s);
1694 		xs->error = XS_DRIVER_STUFFUP;
1695 		return (EAGAIN);
1696 	}
1697 
1698 	/*
1699 	 * If we have an URGENT xfer, it's an error recovery command
1700 	 * and it should just go on the head of the channel's queue.
1701 	 */
1702 	if (xs->xs_control & XS_CTL_URGENT) {
1703 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1704 		goto out;
1705 	}
1706 
1707 	/*
1708 	 * If this xfer has already been on the queue before, we
1709 	 * need to reinsert it in the correct order.  That order is:
1710 	 *
1711 	 *	Immediately before the first xfer for this periph
1712 	 *	with a requeuecnt less than xs->xs_requeuecnt.
1713 	 *
1714 	 * Failing that, at the end of the queue.  (We'll end up
1715 	 * there naturally.)
1716 	 */
1717 	if (xs->xs_requeuecnt != 0) {
1718 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1719 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
1720 			if (qxs->xs_periph == xs->xs_periph &&
1721 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
1722 				break;
1723 		}
1724 		if (qxs != NULL) {
1725 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1726 			    channel_q);
1727 			goto out;
1728 		}
1729 	}
1730 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1731  out:
1732 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
1733 		scsipi_periph_thaw(xs->xs_periph, 1);
1734 	splx(s);
1735 	return (0);
1736 }
1737 
1738 /*
1739  * scsipi_run_queue:
1740  *
1741  *	Start as many xfers as possible running on the channel.
1742  */
1743 static void
1744 scsipi_run_queue(struct scsipi_channel *chan)
1745 {
1746 	struct scsipi_xfer *xs;
1747 	struct scsipi_periph *periph;
1748 	int s;
1749 
1750 	for (;;) {
1751 		s = splbio();
1752 
1753 		/*
1754 		 * If the channel is frozen, we can't do any work right
1755 		 * now.
1756 		 */
1757 		if (chan->chan_qfreeze != 0) {
1758 			splx(s);
1759 			return;
1760 		}
1761 
1762 		/*
1763 		 * Look for work to do, and make sure we can do it.
1764 		 */
1765 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1766 		     xs = TAILQ_NEXT(xs, channel_q)) {
1767 			periph = xs->xs_periph;
1768 
1769 			if ((periph->periph_sent >= periph->periph_openings) ||
1770 			    periph->periph_qfreeze != 0 ||
1771 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
1772 				continue;
1773 
1774 			if ((periph->periph_flags &
1775 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1776 			    (xs->xs_control & XS_CTL_URGENT) == 0)
1777 				continue;
1778 
1779 			/*
1780 			 * We can issue this xfer!
1781 			 */
1782 			goto got_one;
1783 		}
1784 
1785 		/*
1786 		 * Can't find any work to do right now.
1787 		 */
1788 		splx(s);
1789 		return;
1790 
1791  got_one:
1792 		/*
1793 		 * Have an xfer to run.  Allocate a resource from
1794 		 * the adapter to run it.  If we can't allocate that
1795 		 * resource, we don't dequeue the xfer.
1796 		 */
1797 		if (scsipi_get_resource(chan) == 0) {
1798 			/*
1799 			 * Adapter is out of resources.  If the adapter
1800 			 * supports it, attempt to grow them.
1801 			 */
1802 			if (scsipi_grow_resources(chan) == 0) {
1803 				/*
1804 				 * Wasn't able to grow resources,
1805 				 * nothing more we can do.
1806 				 */
1807 				if (xs->xs_control & XS_CTL_POLL) {
1808 					scsipi_printaddr(xs->xs_periph);
1809 					printf("polling command but no "
1810 					    "adapter resources");
1811 					/* We'll panic shortly... */
1812 				}
1813 				splx(s);
1814 
1815 				/*
1816 				 * XXX: We should be able to note that
1817 				 * XXX: that resources are needed here!
1818 				 */
1819 				return;
1820 			}
1821 			/*
1822 			 * scsipi_grow_resources() allocated the resource
1823 			 * for us.
1824 			 */
1825 		}
1826 
1827 		/*
1828 		 * We have a resource to run this xfer, do it!
1829 		 */
1830 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1831 
1832 		/*
1833 		 * If the command is to be tagged, allocate a tag ID
1834 		 * for it.
1835 		 */
1836 		if (XS_CTL_TAGTYPE(xs) != 0)
1837 			scsipi_get_tag(xs);
1838 		else
1839 			periph->periph_flags |= PERIPH_UNTAG;
1840 		periph->periph_sent++;
1841 		splx(s);
1842 
1843 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1844 	}
1845 #ifdef DIAGNOSTIC
1846 	panic("scsipi_run_queue: impossible");
1847 #endif
1848 }
1849 
1850 /*
1851  * scsipi_execute_xs:
1852  *
1853  *	Begin execution of an xfer, waiting for it to complete, if necessary.
1854  */
1855 int
1856 scsipi_execute_xs(struct scsipi_xfer *xs)
1857 {
1858 	struct scsipi_periph *periph = xs->xs_periph;
1859 	struct scsipi_channel *chan = periph->periph_channel;
1860 	int oasync, async, poll, error, s;
1861 
1862 	KASSERT(!cold);
1863 
1864 	(chan->chan_bustype->bustype_cmd)(xs);
1865 
1866 	xs->xs_status &= ~XS_STS_DONE;
1867 	xs->error = XS_NOERROR;
1868 	xs->resid = xs->datalen;
1869 	xs->status = SCSI_OK;
1870 
1871 #ifdef SCSIPI_DEBUG
1872 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1873 		printf("scsipi_execute_xs: ");
1874 		show_scsipi_xs(xs);
1875 		printf("\n");
1876 	}
1877 #endif
1878 
1879 	/*
1880 	 * Deal with command tagging:
1881 	 *
1882 	 *	- If the device's current operating mode doesn't
1883 	 *	  include tagged queueing, clear the tag mask.
1884 	 *
1885 	 *	- If the device's current operating mode *does*
1886 	 *	  include tagged queueing, set the tag_type in
1887 	 *	  the xfer to the appropriate byte for the tag
1888 	 *	  message.
1889 	 */
1890 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1891 		(xs->xs_control & XS_CTL_REQSENSE)) {
1892 		xs->xs_control &= ~XS_CTL_TAGMASK;
1893 		xs->xs_tag_type = 0;
1894 	} else {
1895 		/*
1896 		 * If the request doesn't specify a tag, give Head
1897 		 * tags to URGENT operations and Ordered tags to
1898 		 * everything else.
1899 		 */
1900 		if (XS_CTL_TAGTYPE(xs) == 0) {
1901 			if (xs->xs_control & XS_CTL_URGENT)
1902 				xs->xs_control |= XS_CTL_HEAD_TAG;
1903 			else
1904 				xs->xs_control |= XS_CTL_ORDERED_TAG;
1905 		}
1906 
1907 		switch (XS_CTL_TAGTYPE(xs)) {
1908 		case XS_CTL_ORDERED_TAG:
1909 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1910 			break;
1911 
1912 		case XS_CTL_SIMPLE_TAG:
1913 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1914 			break;
1915 
1916 		case XS_CTL_HEAD_TAG:
1917 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1918 			break;
1919 
1920 		default:
1921 			scsipi_printaddr(periph);
1922 			printf("invalid tag mask 0x%08x\n",
1923 			    XS_CTL_TAGTYPE(xs));
1924 			panic("scsipi_execute_xs");
1925 		}
1926 	}
1927 
1928 	/* If the adaptor wants us to poll, poll. */
1929 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1930 		xs->xs_control |= XS_CTL_POLL;
1931 
1932 	/*
1933 	 * If we don't yet have a completion thread, or we are to poll for
1934 	 * completion, clear the ASYNC flag.
1935 	 */
1936 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
1937 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1938 		xs->xs_control &= ~XS_CTL_ASYNC;
1939 
1940 	async = (xs->xs_control & XS_CTL_ASYNC);
1941 	poll = (xs->xs_control & XS_CTL_POLL);
1942 
1943 #ifdef DIAGNOSTIC
1944 	if (oasync != 0 && xs->bp == NULL)
1945 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1946 #endif
1947 
1948 	/*
1949 	 * Enqueue the transfer.  If we're not polling for completion, this
1950 	 * should ALWAYS return `no error'.
1951 	 */
1952 	error = scsipi_enqueue(xs);
1953 	if (error) {
1954 		if (poll == 0) {
1955 			scsipi_printaddr(periph);
1956 			printf("not polling, but enqueue failed with %d\n",
1957 			    error);
1958 			panic("scsipi_execute_xs");
1959 		}
1960 
1961 		scsipi_printaddr(periph);
1962 		printf("should have flushed queue?\n");
1963 		goto free_xs;
1964 	}
1965 
1966  restarted:
1967 	scsipi_run_queue(chan);
1968 
1969 	/*
1970 	 * The xfer is enqueued, and possibly running.  If it's to be
1971 	 * completed asynchronously, just return now.
1972 	 */
1973 	if (async)
1974 		return (0);
1975 
1976 	/*
1977 	 * Not an asynchronous command; wait for it to complete.
1978 	 */
1979 	s = splbio();
1980 	while ((xs->xs_status & XS_STS_DONE) == 0) {
1981 		if (poll) {
1982 			scsipi_printaddr(periph);
1983 			printf("polling command not done\n");
1984 			panic("scsipi_execute_xs");
1985 		}
1986 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
1987 	}
1988 	splx(s);
1989 
1990 	/*
1991 	 * Command is complete.  scsipi_done() has awakened us to perform
1992 	 * the error handling.
1993 	 */
1994 	error = scsipi_complete(xs);
1995 	if (error == ERESTART)
1996 		goto restarted;
1997 
1998 	/*
1999 	 * If it was meant to run async and we cleared aync ourselve,
2000 	 * don't return an error here. It has already been handled
2001 	 */
2002 	if (oasync)
2003 		error = 0;
2004 	/*
2005 	 * Command completed successfully or fatal error occurred.  Fall
2006 	 * into....
2007 	 */
2008  free_xs:
2009 	s = splbio();
2010 	scsipi_put_xs(xs);
2011 	splx(s);
2012 
2013 	/*
2014 	 * Kick the queue, keep it running in case it stopped for some
2015 	 * reason.
2016 	 */
2017 	scsipi_run_queue(chan);
2018 
2019 	return (error);
2020 }
2021 
2022 /*
2023  * scsipi_completion_thread:
2024  *
2025  *	This is the completion thread.  We wait for errors on
2026  *	asynchronous xfers, and perform the error handling
2027  *	function, restarting the command, if necessary.
2028  */
2029 static void
2030 scsipi_completion_thread(void *arg)
2031 {
2032 	struct scsipi_channel *chan = arg;
2033 	struct scsipi_xfer *xs;
2034 	int s;
2035 
2036 	if (chan->chan_init_cb)
2037 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2038 
2039 	s = splbio();
2040 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2041 	splx(s);
2042 	for (;;) {
2043 		s = splbio();
2044 		xs = TAILQ_FIRST(&chan->chan_complete);
2045 		if (xs == NULL && chan->chan_tflags  == 0) {
2046 			/* nothing to do; wait */
2047 			(void) tsleep(&chan->chan_complete, PRIBIO,
2048 			    "sccomp", 0);
2049 			splx(s);
2050 			continue;
2051 		}
2052 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2053 			/* call chan_callback from thread context */
2054 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2055 			chan->chan_callback(chan, chan->chan_callback_arg);
2056 			splx(s);
2057 			continue;
2058 		}
2059 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2060 			/* attempt to get more openings for this channel */
2061 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2062 			scsipi_adapter_request(chan,
2063 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2064 			scsipi_channel_thaw(chan, 1);
2065 			splx(s);
2066 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2067 				kpause("scsizzz", FALSE, hz/10, NULL);
2068 			continue;
2069 		}
2070 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2071 			/* explicitly run the queues for this channel */
2072 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2073 			scsipi_run_queue(chan);
2074 			splx(s);
2075 			continue;
2076 		}
2077 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2078 			splx(s);
2079 			break;
2080 		}
2081 		if (xs) {
2082 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2083 			splx(s);
2084 
2085 			/*
2086 			 * Have an xfer with an error; process it.
2087 			 */
2088 			(void) scsipi_complete(xs);
2089 
2090 			/*
2091 			 * Kick the queue; keep it running if it was stopped
2092 			 * for some reason.
2093 			 */
2094 			scsipi_run_queue(chan);
2095 		} else {
2096 			splx(s);
2097 		}
2098 	}
2099 
2100 	chan->chan_thread = NULL;
2101 
2102 	/* In case parent is waiting for us to exit. */
2103 	wakeup(&chan->chan_thread);
2104 
2105 	kthread_exit(0);
2106 }
2107 /*
2108  * scsipi_thread_call_callback:
2109  *
2110  * 	request to call a callback from the completion thread
2111  */
2112 int
2113 scsipi_thread_call_callback(struct scsipi_channel *chan,
2114     void (*callback)(struct scsipi_channel *, void *), void *arg)
2115 {
2116 	int s;
2117 
2118 	s = splbio();
2119 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2120 		/* kernel thread doesn't exist yet */
2121 		splx(s);
2122 		return ESRCH;
2123 	}
2124 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2125 		splx(s);
2126 		return EBUSY;
2127 	}
2128 	scsipi_channel_freeze(chan, 1);
2129 	chan->chan_callback = callback;
2130 	chan->chan_callback_arg = arg;
2131 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2132 	wakeup(&chan->chan_complete);
2133 	splx(s);
2134 	return(0);
2135 }
2136 
2137 /*
2138  * scsipi_async_event:
2139  *
2140  *	Handle an asynchronous event from an adapter.
2141  */
2142 void
2143 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2144     void *arg)
2145 {
2146 	int s;
2147 
2148 	s = splbio();
2149 	switch (event) {
2150 	case ASYNC_EVENT_MAX_OPENINGS:
2151 		scsipi_async_event_max_openings(chan,
2152 		    (struct scsipi_max_openings *)arg);
2153 		break;
2154 
2155 	case ASYNC_EVENT_XFER_MODE:
2156 		scsipi_async_event_xfer_mode(chan,
2157 		    (struct scsipi_xfer_mode *)arg);
2158 		break;
2159 	case ASYNC_EVENT_RESET:
2160 		scsipi_async_event_channel_reset(chan);
2161 		break;
2162 	}
2163 	splx(s);
2164 }
2165 
2166 /*
2167  * scsipi_print_xfer_mode:
2168  *
2169  *	Print a periph's capabilities.
2170  */
2171 void
2172 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2173 {
2174 	int period, freq, speed, mbs;
2175 
2176 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2177 		return;
2178 
2179 	aprint_normal_dev(periph->periph_dev, "");
2180 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2181 		period = scsipi_sync_factor_to_period(periph->periph_period);
2182 		aprint_normal("sync (%d.%02dns offset %d)",
2183 		    period / 100, period % 100, periph->periph_offset);
2184 	} else
2185 		aprint_normal("async");
2186 
2187 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
2188 		aprint_normal(", 32-bit");
2189 	else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2190 		aprint_normal(", 16-bit");
2191 	else
2192 		aprint_normal(", 8-bit");
2193 
2194 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2195 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
2196 		speed = freq;
2197 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
2198 			speed *= 4;
2199 		else if (periph->periph_mode &
2200 		    (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2201 			speed *= 2;
2202 		mbs = speed / 1000;
2203 		if (mbs > 0)
2204 			aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2205 		else
2206 			aprint_normal(" (%dKB/s)", speed % 1000);
2207 	}
2208 
2209 	aprint_normal(" transfers");
2210 
2211 	if (periph->periph_mode & PERIPH_CAP_TQING)
2212 		aprint_normal(", tagged queueing");
2213 
2214 	aprint_normal("\n");
2215 }
2216 
2217 /*
2218  * scsipi_async_event_max_openings:
2219  *
2220  *	Update the maximum number of outstanding commands a
2221  *	device may have.
2222  */
2223 static void
2224 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2225     struct scsipi_max_openings *mo)
2226 {
2227 	struct scsipi_periph *periph;
2228 	int minlun, maxlun;
2229 
2230 	if (mo->mo_lun == -1) {
2231 		/*
2232 		 * Wildcarded; apply it to all LUNs.
2233 		 */
2234 		minlun = 0;
2235 		maxlun = chan->chan_nluns - 1;
2236 	} else
2237 		minlun = maxlun = mo->mo_lun;
2238 
2239 	/* XXX This could really suck with a large LUN space. */
2240 	for (; minlun <= maxlun; minlun++) {
2241 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2242 		if (periph == NULL)
2243 			continue;
2244 
2245 		if (mo->mo_openings < periph->periph_openings)
2246 			periph->periph_openings = mo->mo_openings;
2247 		else if (mo->mo_openings > periph->periph_openings &&
2248 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2249 			periph->periph_openings = mo->mo_openings;
2250 	}
2251 }
2252 
2253 /*
2254  * scsipi_async_event_xfer_mode:
2255  *
2256  *	Update the xfer mode for all periphs sharing the
2257  *	specified I_T Nexus.
2258  */
2259 static void
2260 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2261     struct scsipi_xfer_mode *xm)
2262 {
2263 	struct scsipi_periph *periph;
2264 	int lun, announce, mode, period, offset;
2265 
2266 	for (lun = 0; lun < chan->chan_nluns; lun++) {
2267 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2268 		if (periph == NULL)
2269 			continue;
2270 		announce = 0;
2271 
2272 		/*
2273 		 * Clamp the xfer mode down to this periph's capabilities.
2274 		 */
2275 		mode = xm->xm_mode & periph->periph_cap;
2276 		if (mode & PERIPH_CAP_SYNC) {
2277 			period = xm->xm_period;
2278 			offset = xm->xm_offset;
2279 		} else {
2280 			period = 0;
2281 			offset = 0;
2282 		}
2283 
2284 		/*
2285 		 * If we do not have a valid xfer mode yet, or the parameters
2286 		 * are different, announce them.
2287 		 */
2288 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2289 		    periph->periph_mode != mode ||
2290 		    periph->periph_period != period ||
2291 		    periph->periph_offset != offset)
2292 			announce = 1;
2293 
2294 		periph->periph_mode = mode;
2295 		periph->periph_period = period;
2296 		periph->periph_offset = offset;
2297 		periph->periph_flags |= PERIPH_MODE_VALID;
2298 
2299 		if (announce)
2300 			scsipi_print_xfer_mode(periph);
2301 	}
2302 }
2303 
2304 /*
2305  * scsipi_set_xfer_mode:
2306  *
2307  *	Set the xfer mode for the specified I_T Nexus.
2308  */
2309 void
2310 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2311 {
2312 	struct scsipi_xfer_mode xm;
2313 	struct scsipi_periph *itperiph;
2314 	int lun, s;
2315 
2316 	/*
2317 	 * Go to the minimal xfer mode.
2318 	 */
2319 	xm.xm_target = target;
2320 	xm.xm_mode = 0;
2321 	xm.xm_period = 0;			/* ignored */
2322 	xm.xm_offset = 0;			/* ignored */
2323 
2324 	/*
2325 	 * Find the first LUN we know about on this I_T Nexus.
2326 	 */
2327 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2328 		itperiph = scsipi_lookup_periph(chan, target, lun);
2329 		if (itperiph != NULL)
2330 			break;
2331 	}
2332 	if (itperiph != NULL) {
2333 		xm.xm_mode = itperiph->periph_cap;
2334 		/*
2335 		 * Now issue the request to the adapter.
2336 		 */
2337 		s = splbio();
2338 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2339 		splx(s);
2340 		/*
2341 		 * If we want this to happen immediately, issue a dummy
2342 		 * command, since most adapters can't really negotiate unless
2343 		 * they're executing a job.
2344 		 */
2345 		if (immed != 0) {
2346 			(void) scsipi_test_unit_ready(itperiph,
2347 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2348 			    XS_CTL_IGNORE_NOT_READY |
2349 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2350 		}
2351 	}
2352 }
2353 
2354 /*
2355  * scsipi_channel_reset:
2356  *
2357  *	handle scsi bus reset
2358  * called at splbio
2359  */
2360 static void
2361 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2362 {
2363 	struct scsipi_xfer *xs, *xs_next;
2364 	struct scsipi_periph *periph;
2365 	int target, lun;
2366 
2367 	/*
2368 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2369 	 * commands; as the sense is not available any more.
2370 	 * can't call scsipi_done() from here, as the command has not been
2371 	 * sent to the adapter yet (this would corrupt accounting).
2372 	 */
2373 
2374 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2375 		xs_next = TAILQ_NEXT(xs, channel_q);
2376 		if (xs->xs_control & XS_CTL_REQSENSE) {
2377 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2378 			xs->error = XS_RESET;
2379 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2380 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2381 				    channel_q);
2382 		}
2383 	}
2384 	wakeup(&chan->chan_complete);
2385 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2386 	for (target = 0; target < chan->chan_ntargets; target++) {
2387 		if (target == chan->chan_id)
2388 			continue;
2389 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2390 			periph = scsipi_lookup_periph(chan, target, lun);
2391 			if (periph) {
2392 				xs = periph->periph_xscheck;
2393 				if (xs)
2394 					xs->error = XS_RESET;
2395 			}
2396 		}
2397 	}
2398 }
2399 
2400 /*
2401  * scsipi_target_detach:
2402  *
2403  *	detach all periph associated with a I_T
2404  * 	must be called from valid thread context
2405  */
2406 int
2407 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2408     int flags)
2409 {
2410 	struct scsipi_periph *periph;
2411 	int ctarget, mintarget, maxtarget;
2412 	int clun, minlun, maxlun;
2413 	int error;
2414 
2415 	if (target == -1) {
2416 		mintarget = 0;
2417 		maxtarget = chan->chan_ntargets;
2418 	} else {
2419 		if (target == chan->chan_id)
2420 			return EINVAL;
2421 		if (target < 0 || target >= chan->chan_ntargets)
2422 			return EINVAL;
2423 		mintarget = target;
2424 		maxtarget = target + 1;
2425 	}
2426 
2427 	if (lun == -1) {
2428 		minlun = 0;
2429 		maxlun = chan->chan_nluns;
2430 	} else {
2431 		if (lun < 0 || lun >= chan->chan_nluns)
2432 			return EINVAL;
2433 		minlun = lun;
2434 		maxlun = lun + 1;
2435 	}
2436 
2437 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2438 		if (ctarget == chan->chan_id)
2439 			continue;
2440 
2441 		for (clun = minlun; clun < maxlun; clun++) {
2442 			periph = scsipi_lookup_periph(chan, ctarget, clun);
2443 			if (periph == NULL)
2444 				continue;
2445 			error = config_detach(periph->periph_dev, flags);
2446 			if (error)
2447 				return (error);
2448 		}
2449 	}
2450 	return(0);
2451 }
2452 
2453 /*
2454  * scsipi_adapter_addref:
2455  *
2456  *	Add a reference to the adapter pointed to by the provided
2457  *	link, enabling the adapter if necessary.
2458  */
2459 int
2460 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2461 {
2462 	int s, error = 0;
2463 
2464 	s = splbio();
2465 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2466 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2467 		if (error)
2468 			adapt->adapt_refcnt--;
2469 	}
2470 	splx(s);
2471 	return (error);
2472 }
2473 
2474 /*
2475  * scsipi_adapter_delref:
2476  *
2477  *	Delete a reference to the adapter pointed to by the provided
2478  *	link, disabling the adapter if possible.
2479  */
2480 void
2481 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2482 {
2483 	int s;
2484 
2485 	s = splbio();
2486 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2487 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2488 	splx(s);
2489 }
2490 
2491 static struct scsipi_syncparam {
2492 	int	ss_factor;
2493 	int	ss_period;	/* ns * 100 */
2494 } scsipi_syncparams[] = {
2495 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2496 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2497 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2498 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2499 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2500 };
2501 static const int scsipi_nsyncparams =
2502     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2503 
2504 int
2505 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2506 {
2507 	int i;
2508 
2509 	for (i = 0; i < scsipi_nsyncparams; i++) {
2510 		if (period <= scsipi_syncparams[i].ss_period)
2511 			return (scsipi_syncparams[i].ss_factor);
2512 	}
2513 
2514 	return ((period / 100) / 4);
2515 }
2516 
2517 int
2518 scsipi_sync_factor_to_period(int factor)
2519 {
2520 	int i;
2521 
2522 	for (i = 0; i < scsipi_nsyncparams; i++) {
2523 		if (factor == scsipi_syncparams[i].ss_factor)
2524 			return (scsipi_syncparams[i].ss_period);
2525 	}
2526 
2527 	return ((factor * 4) * 100);
2528 }
2529 
2530 int
2531 scsipi_sync_factor_to_freq(int factor)
2532 {
2533 	int i;
2534 
2535 	for (i = 0; i < scsipi_nsyncparams; i++) {
2536 		if (factor == scsipi_syncparams[i].ss_factor)
2537 			return (100000000 / scsipi_syncparams[i].ss_period);
2538 	}
2539 
2540 	return (10000000 / ((factor * 4) * 10));
2541 }
2542 
2543 #ifdef SCSIPI_DEBUG
2544 /*
2545  * Given a scsipi_xfer, dump the request, in all it's glory
2546  */
2547 void
2548 show_scsipi_xs(struct scsipi_xfer *xs)
2549 {
2550 
2551 	printf("xs(%p): ", xs);
2552 	printf("xs_control(0x%08x)", xs->xs_control);
2553 	printf("xs_status(0x%08x)", xs->xs_status);
2554 	printf("periph(%p)", xs->xs_periph);
2555 	printf("retr(0x%x)", xs->xs_retries);
2556 	printf("timo(0x%x)", xs->timeout);
2557 	printf("cmd(%p)", xs->cmd);
2558 	printf("len(0x%x)", xs->cmdlen);
2559 	printf("data(%p)", xs->data);
2560 	printf("len(0x%x)", xs->datalen);
2561 	printf("res(0x%x)", xs->resid);
2562 	printf("err(0x%x)", xs->error);
2563 	printf("bp(%p)", xs->bp);
2564 	show_scsipi_cmd(xs);
2565 }
2566 
2567 void
2568 show_scsipi_cmd(struct scsipi_xfer *xs)
2569 {
2570 	u_char *b = (u_char *) xs->cmd;
2571 	int i = 0;
2572 
2573 	scsipi_printaddr(xs->xs_periph);
2574 	printf(" command: ");
2575 
2576 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2577 		while (i < xs->cmdlen) {
2578 			if (i)
2579 				printf(",");
2580 			printf("0x%x", b[i++]);
2581 		}
2582 		printf("-[%d bytes]\n", xs->datalen);
2583 		if (xs->datalen)
2584 			show_mem(xs->data, min(64, xs->datalen));
2585 	} else
2586 		printf("-RESET-\n");
2587 }
2588 
2589 void
2590 show_mem(u_char *address, int num)
2591 {
2592 	int x;
2593 
2594 	printf("------------------------------");
2595 	for (x = 0; x < num; x++) {
2596 		if ((x % 16) == 0)
2597 			printf("\n%03d: ", x);
2598 		printf("%02x ", *address++);
2599 	}
2600 	printf("\n------------------------------\n");
2601 }
2602 #endif /* SCSIPI_DEBUG */
2603