xref: /netbsd/sys/dev/scsipi/scsipi_base.c (revision c4a72b64)
1 /*	$NetBSD: scsipi_base.c,v 1.82 2002/11/24 11:52:13 scw Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.82 2002/11/24 11:52:13 scw Exp $");
42 
43 #include "opt_scsi.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57 
58 #include <dev/scsipi/scsipi_all.h>
59 #include <dev/scsipi/scsipi_disk.h>
60 #include <dev/scsipi/scsipiconf.h>
61 #include <dev/scsipi/scsipi_base.h>
62 
63 #include <dev/scsipi/scsi_all.h>
64 #include <dev/scsipi/scsi_message.h>
65 
66 int	scsipi_complete __P((struct scsipi_xfer *));
67 void	scsipi_request_sense __P((struct scsipi_xfer *));
68 int	scsipi_enqueue __P((struct scsipi_xfer *));
69 void	scsipi_run_queue __P((struct scsipi_channel *chan));
70 
71 void	scsipi_completion_thread __P((void *));
72 
73 void	scsipi_get_tag __P((struct scsipi_xfer *));
74 void	scsipi_put_tag __P((struct scsipi_xfer *));
75 
76 int	scsipi_get_resource __P((struct scsipi_channel *));
77 void	scsipi_put_resource __P((struct scsipi_channel *));
78 __inline int scsipi_grow_resources __P((struct scsipi_channel *));
79 
80 void	scsipi_async_event_max_openings __P((struct scsipi_channel *,
81 	    struct scsipi_max_openings *));
82 void	scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
83 	    struct scsipi_xfer_mode *));
84 void	scsipi_async_event_channel_reset __P((struct scsipi_channel *));
85 
86 struct pool scsipi_xfer_pool;
87 
88 /*
89  * scsipi_init:
90  *
91  *	Called when a scsibus or atapibus is attached to the system
92  *	to initialize shared data structures.
93  */
94 void
95 scsipi_init()
96 {
97 	static int scsipi_init_done;
98 
99 	if (scsipi_init_done)
100 		return;
101 	scsipi_init_done = 1;
102 
103 	/* Initialize the scsipi_xfer pool. */
104 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
105 	    0, 0, "scxspl", NULL);
106 }
107 
108 /*
109  * scsipi_channel_init:
110  *
111  *	Initialize a scsipi_channel when it is attached.
112  */
113 int
114 scsipi_channel_init(chan)
115 	struct scsipi_channel *chan;
116 {
117 	int i;
118 
119 	/* Initialize shared data. */
120 	scsipi_init();
121 
122 	/* Initialize the queues. */
123 	TAILQ_INIT(&chan->chan_queue);
124 	TAILQ_INIT(&chan->chan_complete);
125 
126 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
127 		LIST_INIT(&chan->chan_periphtab[i]);
128 
129 	/*
130 	 * Create the asynchronous completion thread.
131 	 */
132 	kthread_create(scsipi_create_completion_thread, chan);
133 	return (0);
134 }
135 
136 /*
137  * scsipi_channel_shutdown:
138  *
139  *	Shutdown a scsipi_channel.
140  */
141 void
142 scsipi_channel_shutdown(chan)
143 	struct scsipi_channel *chan;
144 {
145 
146 	/*
147 	 * Shut down the completion thread.
148 	 */
149 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
150 	wakeup(&chan->chan_complete);
151 
152 	/*
153 	 * Now wait for the thread to exit.
154 	 */
155 	while (chan->chan_thread != NULL)
156 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
157 }
158 
159 static uint32_t
160 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
161 {
162 	uint32_t hash;
163 
164 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
165 	hash = hash32_buf(&l, sizeof(l), hash);
166 
167 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
168 }
169 
170 /*
171  * scsipi_insert_periph:
172  *
173  *	Insert a periph into the channel.
174  */
175 void
176 scsipi_insert_periph(chan, periph)
177 	struct scsipi_channel *chan;
178 	struct scsipi_periph *periph;
179 {
180 	uint32_t hash;
181 	int s;
182 
183 	hash = scsipi_chan_periph_hash(periph->periph_target,
184 	    periph->periph_lun);
185 
186 	s = splbio();
187 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
188 	splx(s);
189 }
190 
191 /*
192  * scsipi_remove_periph:
193  *
194  *	Remove a periph from the channel.
195  */
196 void
197 scsipi_remove_periph(chan, periph)
198 	struct scsipi_channel *chan;
199 	struct scsipi_periph *periph;
200 {
201 	int s;
202 
203 	s = splbio();
204 	LIST_REMOVE(periph, periph_hash);
205 	splx(s);
206 }
207 
208 /*
209  * scsipi_lookup_periph:
210  *
211  *	Lookup a periph on the specified channel.
212  */
213 struct scsipi_periph *
214 scsipi_lookup_periph(chan, target, lun)
215 	struct scsipi_channel *chan;
216 	int target, lun;
217 {
218 	struct scsipi_periph *periph;
219 	uint32_t hash;
220 	int s;
221 
222 	if (target >= chan->chan_ntargets ||
223 	    lun >= chan->chan_nluns)
224 		return (NULL);
225 
226 	hash = scsipi_chan_periph_hash(target, lun);
227 
228 	s = splbio();
229 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
230 		if (periph->periph_target == target &&
231 		    periph->periph_lun == lun)
232 			break;
233 	}
234 	splx(s);
235 
236 	return (periph);
237 }
238 
239 /*
240  * scsipi_get_resource:
241  *
242  *	Allocate a single xfer `resource' from the channel.
243  *
244  *	NOTE: Must be called at splbio().
245  */
246 int
247 scsipi_get_resource(chan)
248 	struct scsipi_channel *chan;
249 {
250 	struct scsipi_adapter *adapt = chan->chan_adapter;
251 
252 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 		if (chan->chan_openings > 0) {
254 			chan->chan_openings--;
255 			return (1);
256 		}
257 		return (0);
258 	}
259 
260 	if (adapt->adapt_openings > 0) {
261 		adapt->adapt_openings--;
262 		return (1);
263 	}
264 	return (0);
265 }
266 
267 /*
268  * scsipi_grow_resources:
269  *
270  *	Attempt to grow resources for a channel.  If this succeeds,
271  *	we allocate one for our caller.
272  *
273  *	NOTE: Must be called at splbio().
274  */
275 __inline int
276 scsipi_grow_resources(chan)
277 	struct scsipi_channel *chan;
278 {
279 
280 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
281 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
282 			scsipi_adapter_request(chan,
283 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
284 			return (scsipi_get_resource(chan));
285 		}
286 		/*
287 		 * ask the channel thread to do it. It'll have to thaw the
288 		 * queue
289 		 */
290 		scsipi_channel_freeze(chan, 1);
291 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
292 		wakeup(&chan->chan_complete);
293 		return (0);
294 	}
295 
296 	return (0);
297 }
298 
299 /*
300  * scsipi_put_resource:
301  *
302  *	Free a single xfer `resource' to the channel.
303  *
304  *	NOTE: Must be called at splbio().
305  */
306 void
307 scsipi_put_resource(chan)
308 	struct scsipi_channel *chan;
309 {
310 	struct scsipi_adapter *adapt = chan->chan_adapter;
311 
312 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
313 		chan->chan_openings++;
314 	else
315 		adapt->adapt_openings++;
316 }
317 
318 /*
319  * scsipi_get_tag:
320  *
321  *	Get a tag ID for the specified xfer.
322  *
323  *	NOTE: Must be called at splbio().
324  */
325 void
326 scsipi_get_tag(xs)
327 	struct scsipi_xfer *xs;
328 {
329 	struct scsipi_periph *periph = xs->xs_periph;
330 	int bit, tag;
331 	u_int word;
332 
333 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
334 		bit = ffs(periph->periph_freetags[word]);
335 		if (bit != 0)
336 			break;
337 	}
338 #ifdef DIAGNOSTIC
339 	if (word == PERIPH_NTAGWORDS) {
340 		scsipi_printaddr(periph);
341 		printf("no free tags\n");
342 		panic("scsipi_get_tag");
343 	}
344 #endif
345 
346 	bit -= 1;
347 	periph->periph_freetags[word] &= ~(1 << bit);
348 	tag = (word << 5) | bit;
349 
350 	/* XXX Should eventually disallow this completely. */
351 	if (tag >= periph->periph_openings) {
352 		scsipi_printaddr(periph);
353 		printf("WARNING: tag %d greater than available openings %d\n",
354 		    tag, periph->periph_openings);
355 	}
356 
357 	xs->xs_tag_id = tag;
358 }
359 
360 /*
361  * scsipi_put_tag:
362  *
363  *	Put the tag ID for the specified xfer back into the pool.
364  *
365  *	NOTE: Must be called at splbio().
366  */
367 void
368 scsipi_put_tag(xs)
369 	struct scsipi_xfer *xs;
370 {
371 	struct scsipi_periph *periph = xs->xs_periph;
372 	int word, bit;
373 
374 	word = xs->xs_tag_id >> 5;
375 	bit = xs->xs_tag_id & 0x1f;
376 
377 	periph->periph_freetags[word] |= (1 << bit);
378 }
379 
380 /*
381  * scsipi_get_xs:
382  *
383  *	Allocate an xfer descriptor and associate it with the
384  *	specified peripherial.  If the peripherial has no more
385  *	available command openings, we either block waiting for
386  *	one to become available, or fail.
387  */
388 struct scsipi_xfer *
389 scsipi_get_xs(periph, flags)
390 	struct scsipi_periph *periph;
391 	int flags;
392 {
393 	struct scsipi_xfer *xs;
394 	int s;
395 
396 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
397 
398 	/*
399 	 * If we're cold, make sure we poll.
400 	 */
401 	if (cold)
402 		flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
403 
404 #ifdef DIAGNOSTIC
405 	/*
406 	 * URGENT commands can never be ASYNC.
407 	 */
408 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
409 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
410 		scsipi_printaddr(periph);
411 		printf("URGENT and ASYNC\n");
412 		panic("scsipi_get_xs");
413 	}
414 #endif
415 
416 	s = splbio();
417 	/*
418 	 * Wait for a command opening to become available.  Rules:
419 	 *
420 	 *	- All xfers must wait for an available opening.
421 	 *	  Exception: URGENT xfers can proceed when
422 	 *	  active == openings, because we use the opening
423 	 *	  of the command we're recovering for.
424 	 *	- if the periph has sense pending, only URGENT & REQSENSE
425 	 *	  xfers may proceed.
426 	 *
427 	 *	- If the periph is recovering, only URGENT xfers may
428 	 *	  proceed.
429 	 *
430 	 *	- If the periph is currently executing a recovery
431 	 *	  command, URGENT commands must block, because only
432 	 *	  one recovery command can execute at a time.
433 	 */
434 	for (;;) {
435 		if (flags & XS_CTL_URGENT) {
436 			if (periph->periph_active > periph->periph_openings)
437 				goto wait_for_opening;
438 			if (periph->periph_flags & PERIPH_SENSE) {
439 				if ((flags & XS_CTL_REQSENSE) == 0)
440 					goto wait_for_opening;
441 			} else {
442 				if ((periph->periph_flags &
443 				    PERIPH_RECOVERY_ACTIVE) != 0)
444 					goto wait_for_opening;
445 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
446 			}
447 			break;
448 		}
449 		if (periph->periph_active >= periph->periph_openings ||
450 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
451 			goto wait_for_opening;
452 		periph->periph_active++;
453 		break;
454 
455  wait_for_opening:
456 		if (flags & XS_CTL_NOSLEEP) {
457 			splx(s);
458 			return (NULL);
459 		}
460 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
461 		periph->periph_flags |= PERIPH_WAITING;
462 		(void) tsleep(periph, PRIBIO, "getxs", 0);
463 	}
464 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
465 	xs = pool_get(&scsipi_xfer_pool,
466 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
467 	if (xs == NULL) {
468 		if (flags & XS_CTL_URGENT) {
469 			if ((flags & XS_CTL_REQSENSE) == 0)
470 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
471 		} else
472 			periph->periph_active--;
473 		scsipi_printaddr(periph);
474 		printf("unable to allocate %sscsipi_xfer\n",
475 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
476 	}
477 	splx(s);
478 
479 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
480 
481 	if (xs != NULL) {
482 		callout_init(&xs->xs_callout);
483 		memset(xs, 0, sizeof(*xs));
484 		xs->xs_periph = periph;
485 		xs->xs_control = flags;
486 		xs->xs_status = 0;
487 		s = splbio();
488 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
489 		splx(s);
490 	}
491 	return (xs);
492 }
493 
494 /*
495  * scsipi_put_xs:
496  *
497  *	Release an xfer descriptor, decreasing the outstanding command
498  *	count for the peripherial.  If there is a thread waiting for
499  *	an opening, wake it up.  If not, kick any queued I/O the
500  *	peripherial may have.
501  *
502  *	NOTE: Must be called at splbio().
503  */
504 void
505 scsipi_put_xs(xs)
506 	struct scsipi_xfer *xs;
507 {
508 	struct scsipi_periph *periph = xs->xs_periph;
509 	int flags = xs->xs_control;
510 
511 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
512 
513 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
514 	pool_put(&scsipi_xfer_pool, xs);
515 
516 #ifdef DIAGNOSTIC
517 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
518 	    periph->periph_active == 0) {
519 		scsipi_printaddr(periph);
520 		printf("recovery without a command to recovery for\n");
521 		panic("scsipi_put_xs");
522 	}
523 #endif
524 
525 	if (flags & XS_CTL_URGENT) {
526 		if ((flags & XS_CTL_REQSENSE) == 0)
527 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
528 	} else
529 		periph->periph_active--;
530 	if (periph->periph_active == 0 &&
531 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
532 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
533 		wakeup(&periph->periph_active);
534 	}
535 
536 	if (periph->periph_flags & PERIPH_WAITING) {
537 		periph->periph_flags &= ~PERIPH_WAITING;
538 		wakeup(periph);
539 	} else {
540 		if (periph->periph_switch->psw_start != NULL) {
541 			SC_DEBUG(periph, SCSIPI_DB2,
542 			    ("calling private start()\n"));
543 			(*periph->periph_switch->psw_start)(periph);
544 		}
545 	}
546 }
547 
548 /*
549  * scsipi_channel_freeze:
550  *
551  *	Freeze a channel's xfer queue.
552  */
553 void
554 scsipi_channel_freeze(chan, count)
555 	struct scsipi_channel *chan;
556 	int count;
557 {
558 	int s;
559 
560 	s = splbio();
561 	chan->chan_qfreeze += count;
562 	splx(s);
563 }
564 
565 /*
566  * scsipi_channel_thaw:
567  *
568  *	Thaw a channel's xfer queue.
569  */
570 void
571 scsipi_channel_thaw(chan, count)
572 	struct scsipi_channel *chan;
573 	int count;
574 {
575 	int s;
576 
577 	s = splbio();
578 	chan->chan_qfreeze -= count;
579 	/*
580 	 * Don't let the freeze count go negative.
581 	 *
582 	 * Presumably the adapter driver could keep track of this,
583 	 * but it might just be easier to do this here so as to allow
584 	 * multiple callers, including those outside the adapter driver.
585 	 */
586 	if (chan->chan_qfreeze < 0) {
587 		chan->chan_qfreeze = 0;
588 	}
589 	splx(s);
590 	/*
591 	 * Kick the channel's queue here.  Note, we may be running in
592 	 * interrupt context (softclock or HBA's interrupt), so the adapter
593 	 * driver had better not sleep.
594 	 */
595 	if (chan->chan_qfreeze == 0)
596 		scsipi_run_queue(chan);
597 }
598 
599 /*
600  * scsipi_channel_timed_thaw:
601  *
602  *	Thaw a channel after some time has expired. This will also
603  * 	run the channel's queue if the freeze count has reached 0.
604  */
605 void
606 scsipi_channel_timed_thaw(arg)
607 	void *arg;
608 {
609 	struct scsipi_channel *chan = arg;
610 
611 	scsipi_channel_thaw(chan, 1);
612 }
613 
614 /*
615  * scsipi_periph_freeze:
616  *
617  *	Freeze a device's xfer queue.
618  */
619 void
620 scsipi_periph_freeze(periph, count)
621 	struct scsipi_periph *periph;
622 	int count;
623 {
624 	int s;
625 
626 	s = splbio();
627 	periph->periph_qfreeze += count;
628 	splx(s);
629 }
630 
631 /*
632  * scsipi_periph_thaw:
633  *
634  *	Thaw a device's xfer queue.
635  */
636 void
637 scsipi_periph_thaw(periph, count)
638 	struct scsipi_periph *periph;
639 	int count;
640 {
641 	int s;
642 
643 	s = splbio();
644 	periph->periph_qfreeze -= count;
645 #ifdef DIAGNOSTIC
646 	if (periph->periph_qfreeze < 0) {
647 		static const char pc[] = "periph freeze count < 0";
648 		scsipi_printaddr(periph);
649 		printf("%s\n", pc);
650 		panic(pc);
651 	}
652 #endif
653 	if (periph->periph_qfreeze == 0 &&
654 	    (periph->periph_flags & PERIPH_WAITING) != 0)
655 		wakeup(periph);
656 	splx(s);
657 }
658 
659 /*
660  * scsipi_periph_timed_thaw:
661  *
662  *	Thaw a device after some time has expired.
663  */
664 void
665 scsipi_periph_timed_thaw(arg)
666 	void *arg;
667 {
668 	int s;
669 	struct scsipi_periph *periph = arg;
670 
671 	callout_stop(&periph->periph_callout);
672 
673 	s = splbio();
674 	scsipi_periph_thaw(periph, 1);
675 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
676 		/*
677 		 * Kick the channel's queue here.  Note, we're running in
678 		 * interrupt context (softclock), so the adapter driver
679 		 * had better not sleep.
680 		 */
681 		scsipi_run_queue(periph->periph_channel);
682 	} else {
683 		/*
684 		 * Tell the completion thread to kick the channel's queue here.
685 		 */
686 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
687 		wakeup(&periph->periph_channel->chan_complete);
688 	}
689 	splx(s);
690 }
691 
692 /*
693  * scsipi_wait_drain:
694  *
695  *	Wait for a periph's pending xfers to drain.
696  */
697 void
698 scsipi_wait_drain(periph)
699 	struct scsipi_periph *periph;
700 {
701 	int s;
702 
703 	s = splbio();
704 	while (periph->periph_active != 0) {
705 		periph->periph_flags |= PERIPH_WAITDRAIN;
706 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
707 	}
708 	splx(s);
709 }
710 
711 /*
712  * scsipi_kill_pending:
713  *
714  *	Kill off all pending xfers for a periph.
715  *
716  *	NOTE: Must be called at splbio().
717  */
718 void
719 scsipi_kill_pending(periph)
720 	struct scsipi_periph *periph;
721 {
722 
723 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
724 #ifdef DIAGNOSTIC
725 	if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
726 		panic("scsipi_kill_pending");
727 #endif
728 	scsipi_wait_drain(periph);
729 }
730 
731 /*
732  * scsipi_interpret_sense:
733  *
734  *	Look at the returned sense and act on the error, determining
735  *	the unix error number to pass back.  (0 = report no error)
736  *
737  *	NOTE: If we return ERESTART, we are expected to haved
738  *	thawed the device!
739  *
740  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
741  */
742 int
743 scsipi_interpret_sense(xs)
744 	struct scsipi_xfer *xs;
745 {
746 	struct scsipi_sense_data *sense;
747 	struct scsipi_periph *periph = xs->xs_periph;
748 	u_int8_t key;
749 	u_int32_t info;
750 	int error;
751 #ifndef	SCSIVERBOSE
752 	static char *error_mes[] = {
753 		"soft error (corrected)",
754 		"not ready", "medium error",
755 		"non-media hardware failure", "illegal request",
756 		"unit attention", "readonly device",
757 		"no data found", "vendor unique",
758 		"copy aborted", "command aborted",
759 		"search returned equal", "volume overflow",
760 		"verify miscompare", "unknown error key"
761 	};
762 #endif
763 
764 	sense = &xs->sense.scsi_sense;
765 #ifdef SCSIPI_DEBUG
766 	if (periph->periph_flags & SCSIPI_DB1) {
767 		int count;
768 		scsipi_printaddr(periph);
769 		printf(" sense debug information:\n");
770 		printf("\tcode 0x%x valid 0x%x\n",
771 			sense->error_code & SSD_ERRCODE,
772 			sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
773 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
774 			sense->segment,
775 			sense->flags & SSD_KEY,
776 			sense->flags & SSD_ILI ? 1 : 0,
777 			sense->flags & SSD_EOM ? 1 : 0,
778 			sense->flags & SSD_FILEMARK ? 1 : 0);
779 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
780 			"extra bytes\n",
781 			sense->info[0],
782 			sense->info[1],
783 			sense->info[2],
784 			sense->info[3],
785 			sense->extra_len);
786 		printf("\textra: ");
787 		for (count = 0; count < ADD_BYTES_LIM(sense); count++)
788 			printf("0x%x ", sense->cmd_spec_info[count]);
789 		printf("\n");
790 	}
791 #endif
792 
793 	/*
794 	 * If the periph has it's own error handler, call it first.
795 	 * If it returns a legit error value, return that, otherwise
796 	 * it wants us to continue with normal error processing.
797 	 */
798 	if (periph->periph_switch->psw_error != NULL) {
799 		SC_DEBUG(periph, SCSIPI_DB2,
800 		    ("calling private err_handler()\n"));
801 		error = (*periph->periph_switch->psw_error)(xs);
802 		if (error != EJUSTRETURN)
803 			return (error);
804 	}
805 	/* otherwise use the default */
806 	switch (sense->error_code & SSD_ERRCODE) {
807 
808 		/*
809 		 * Old SCSI-1 and SASI devices respond with
810 		 * codes other than 70.
811 		 */
812 	case 0x00:		/* no error (command completed OK) */
813 		return (0);
814 	case 0x04:		/* drive not ready after it was selected */
815 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
816 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
817 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
818 			return (0);
819 		/* XXX - display some sort of error here? */
820 		return (EIO);
821 	case 0x20:		/* invalid command */
822 		if ((xs->xs_control &
823 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
824 			return (0);
825 		return (EINVAL);
826 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
827 		return (EACCES);
828 
829 		/*
830 		 * If it's code 70, use the extended stuff and
831 		 * interpret the key
832 		 */
833 	case 0x71:		/* delayed error */
834 		scsipi_printaddr(periph);
835 		key = sense->flags & SSD_KEY;
836 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
837 		/* FALLTHROUGH */
838 	case 0x70:
839 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
840 			info = _4btol(sense->info);
841 		else
842 			info = 0;
843 		key = sense->flags & SSD_KEY;
844 
845 		switch (key) {
846 		case SKEY_NO_SENSE:
847 		case SKEY_RECOVERED_ERROR:
848 			if (xs->resid == xs->datalen && xs->datalen) {
849 				/*
850 				 * Why is this here?
851 				 */
852 				xs->resid = 0;	/* not short read */
853 			}
854 		case SKEY_EQUAL:
855 			error = 0;
856 			break;
857 		case SKEY_NOT_READY:
858 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
859 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
860 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
861 				return (0);
862 			if (sense->add_sense_code == 0x3A) {
863 				error = ENODEV; /* Medium not present */
864 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
865 					return (error);
866 			} else
867 				error = EIO;
868 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
869 				return (error);
870 			break;
871 		case SKEY_ILLEGAL_REQUEST:
872 			if ((xs->xs_control &
873 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
874 				return (0);
875 			/*
876 			 * Handle the case where a device reports
877 			 * Logical Unit Not Supported during discovery.
878 			 */
879 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
880 			    sense->add_sense_code == 0x25 &&
881 			    sense->add_sense_code_qual == 0x00)
882 				return (EINVAL);
883 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
884 				return (EIO);
885 			error = EINVAL;
886 			break;
887 		case SKEY_UNIT_ATTENTION:
888 			if (sense->add_sense_code == 0x29 &&
889 			    sense->add_sense_code_qual == 0x00) {
890 				/* device or bus reset */
891 				return (ERESTART);
892 			}
893 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
894 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
895 			if ((xs->xs_control &
896 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
897 				/* XXX Should reupload any transient state. */
898 				(periph->periph_flags &
899 				 PERIPH_REMOVABLE) == 0) {
900 				return (ERESTART);
901 			}
902 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
903 				return (EIO);
904 			error = EIO;
905 			break;
906 		case SKEY_WRITE_PROTECT:
907 			error = EROFS;
908 			break;
909 		case SKEY_BLANK_CHECK:
910 			error = 0;
911 			break;
912 		case SKEY_ABORTED_COMMAND:
913 			error = ERESTART;
914 			break;
915 		case SKEY_VOLUME_OVERFLOW:
916 			error = ENOSPC;
917 			break;
918 		default:
919 			error = EIO;
920 			break;
921 		}
922 
923 #ifdef SCSIVERBOSE
924 		if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
925 			scsipi_print_sense(xs, 0);
926 #else
927 		if (key) {
928 			scsipi_printaddr(periph);
929 			printf("%s", error_mes[key - 1]);
930 			if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
931 				switch (key) {
932 				case SKEY_NOT_READY:
933 				case SKEY_ILLEGAL_REQUEST:
934 				case SKEY_UNIT_ATTENTION:
935 				case SKEY_WRITE_PROTECT:
936 					break;
937 				case SKEY_BLANK_CHECK:
938 					printf(", requested size: %d (decimal)",
939 					    info);
940 					break;
941 				case SKEY_ABORTED_COMMAND:
942 					if (xs->xs_retries)
943 						printf(", retrying");
944 					printf(", cmd 0x%x, info 0x%x",
945 					    xs->cmd->opcode, info);
946 					break;
947 				default:
948 					printf(", info = %d (decimal)", info);
949 				}
950 			}
951 			if (sense->extra_len != 0) {
952 				int n;
953 				printf(", data =");
954 				for (n = 0; n < sense->extra_len; n++)
955 					printf(" %02x",
956 					    sense->cmd_spec_info[n]);
957 			}
958 			printf("\n");
959 		}
960 #endif
961 		return (error);
962 
963 	/*
964 	 * Some other code, just report it
965 	 */
966 	default:
967 #if    defined(SCSIDEBUG) || defined(DEBUG)
968 	{
969 		static char *uc = "undecodable sense error";
970 		int i;
971 		u_int8_t *cptr = (u_int8_t *) sense;
972 		scsipi_printaddr(periph);
973 		if (xs->cmd == &xs->cmdstore) {
974 			printf("%s for opcode 0x%x, data=",
975 			    uc, xs->cmdstore.opcode);
976 		} else {
977 			printf("%s, data=", uc);
978 		}
979 		for (i = 0; i < sizeof (sense); i++)
980 			printf(" 0x%02x", *(cptr++) & 0xff);
981 		printf("\n");
982 	}
983 #else
984 		scsipi_printaddr(periph);
985 		printf("Sense Error Code 0x%x",
986 			sense->error_code & SSD_ERRCODE);
987 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
988 			struct scsipi_sense_data_unextended *usense =
989 			    (struct scsipi_sense_data_unextended *)sense;
990 			printf(" at block no. %d (decimal)",
991 			    _3btol(usense->block));
992 		}
993 		printf("\n");
994 #endif
995 		return (EIO);
996 	}
997 }
998 
999 /*
1000  * scsipi_size:
1001  *
1002  *	Find out from the device what its capacity is.
1003  */
1004 u_long
1005 scsipi_size(periph, flags)
1006 	struct scsipi_periph *periph;
1007 	int flags;
1008 {
1009 	struct scsipi_read_cap_data rdcap;
1010 	struct scsipi_read_capacity scsipi_cmd;
1011 
1012 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1013 	scsipi_cmd.opcode = READ_CAPACITY;
1014 
1015 	/*
1016 	 * If the command works, interpret the result as a 4 byte
1017 	 * number of blocks
1018 	 */
1019 	if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1020 	    sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1021 	    SCSIPIRETRIES, 20000, NULL,
1022 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1023 		return (0);
1024 
1025 	return (_4btol(rdcap.addr) + 1);
1026 }
1027 
1028 /*
1029  * scsipi_test_unit_ready:
1030  *
1031  *	Issue a `test unit ready' request.
1032  */
1033 int
1034 scsipi_test_unit_ready(periph, flags)
1035 	struct scsipi_periph *periph;
1036 	int flags;
1037 {
1038 	struct scsipi_test_unit_ready scsipi_cmd;
1039 
1040 	/* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1041 	if (periph->periph_quirks & PQUIRK_NOTUR)
1042 		return (0);
1043 
1044 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1045 	scsipi_cmd.opcode = TEST_UNIT_READY;
1046 
1047 	return (scsipi_command(periph,
1048 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1049 	    0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1050 }
1051 
1052 /*
1053  * scsipi_inquire:
1054  *
1055  *	Ask the device about itself.
1056  */
1057 int
1058 scsipi_inquire(periph, inqbuf, flags)
1059 	struct scsipi_periph *periph;
1060 	struct scsipi_inquiry_data *inqbuf;
1061 	int flags;
1062 {
1063 	struct scsipi_inquiry scsipi_cmd;
1064 	int error;
1065 
1066 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1067 	scsipi_cmd.opcode = INQUIRY;
1068 	scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1069 
1070 	error = scsipi_command(periph,
1071 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1072 	    (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1073 	    SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags);
1074 
1075 #ifdef SCSI_OLD_NOINQUIRY
1076 	/*
1077 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1078 	 * This board doesn't support the INQUIRY command at all.
1079 	 */
1080 	if (error == EINVAL || error == EACCES) {
1081 		/*
1082 		 * Conjure up an INQUIRY response.
1083 		 */
1084 		inqbuf->device = (error == EINVAL ?
1085 			 SID_QUAL_LU_PRESENT :
1086 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1087 		inqbuf->dev_qual2 = 0;
1088 		inqbuf->version = 0;
1089 		inqbuf->response_format = SID_FORMAT_SCSI1;
1090 		inqbuf->additional_length = 3 + 28;
1091 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1092 		memcpy(inqbuf->vendor, "ADAPTEC ", sizeof(inqbuf->vendor));
1093 		memcpy(inqbuf->product, "ACB-4000        ",
1094 			sizeof(inqbuf->product));
1095 		memcpy(inqbuf->revision, "    ", sizeof(inqbuf->revision));
1096 		error = 0;
1097 	}
1098 
1099 	/*
1100 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1101 	 * This board gives an empty response to an INQUIRY command.
1102 	 */
1103 	else if (error == 0 &&
1104 		 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1105 		 inqbuf->dev_qual2 == 0 &&
1106 		 inqbuf->version == 0 &&
1107 		 inqbuf->response_format == SID_FORMAT_SCSI1) {
1108 		/*
1109 		 * Fill out the INQUIRY response.
1110 		 */
1111 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1112 		inqbuf->dev_qual2 = SID_REMOVABLE;
1113 		inqbuf->additional_length = 3 + 28;
1114 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1115 		memcpy(inqbuf->vendor, "EMULEX  ", sizeof(inqbuf->vendor));
1116 		memcpy(inqbuf->product, "MT-02 QIC       ",
1117 			sizeof(inqbuf->product));
1118 		memcpy(inqbuf->revision, "    ", sizeof(inqbuf->revision));
1119 	}
1120 #endif /* SCSI_OLD_NOINQUIRY */
1121 
1122 	return error;
1123 }
1124 
1125 /*
1126  * scsipi_prevent:
1127  *
1128  *	Prevent or allow the user to remove the media
1129  */
1130 int
1131 scsipi_prevent(periph, type, flags)
1132 	struct scsipi_periph *periph;
1133 	int type, flags;
1134 {
1135 	struct scsipi_prevent scsipi_cmd;
1136 
1137 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1138 		return (0);
1139 
1140 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1141 	scsipi_cmd.opcode = PREVENT_ALLOW;
1142 	scsipi_cmd.how = type;
1143 
1144 	return (scsipi_command(periph,
1145 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1146 	    0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1147 }
1148 
1149 /*
1150  * scsipi_start:
1151  *
1152  *	Send a START UNIT.
1153  */
1154 int
1155 scsipi_start(periph, type, flags)
1156 	struct scsipi_periph *periph;
1157 	int type, flags;
1158 {
1159 	struct scsipi_start_stop scsipi_cmd;
1160 
1161 	if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1162 		return 0;
1163 
1164 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1165 	scsipi_cmd.opcode = START_STOP;
1166 	scsipi_cmd.byte2 = 0x00;
1167 	scsipi_cmd.how = type;
1168 
1169 	return (scsipi_command(periph,
1170 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1171 	    0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1172 	    NULL, flags));
1173 }
1174 
1175 /*
1176  * scsipi_mode_sense, scsipi_mode_sense_big:
1177  *	get a sense page from a device
1178  */
1179 
1180 int
1181 scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1182 	struct scsipi_periph *periph;
1183 	int byte2, page, len, flags, retries, timeout;
1184 	struct scsipi_mode_header *data;
1185 {
1186 	struct scsipi_mode_sense scsipi_cmd;
1187 	int error;
1188 
1189 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1190 	scsipi_cmd.opcode = MODE_SENSE;
1191 	scsipi_cmd.byte2 = byte2;
1192 	scsipi_cmd.page = page;
1193 	if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1194 		_lto2b(len, scsipi_cmd.u_len.atapi.length);
1195 	else
1196 		scsipi_cmd.u_len.scsi.length = len & 0xff;
1197 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1198 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1199 	    flags | XS_CTL_DATA_IN);
1200 	SC_DEBUG(periph, SCSIPI_DB2,
1201 	    ("scsipi_mode_sense: error=%d\n", error));
1202 	return (error);
1203 }
1204 
1205 int
1206 scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1207 	struct scsipi_periph *periph;
1208 	int byte2, page, len, flags, retries, timeout;
1209 	struct scsipi_mode_header_big *data;
1210 {
1211 	struct scsipi_mode_sense_big scsipi_cmd;
1212 	int error;
1213 
1214 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1215 	scsipi_cmd.opcode = MODE_SENSE_BIG;
1216 	scsipi_cmd.byte2 = byte2;
1217 	scsipi_cmd.page = page;
1218 	_lto2b(len, scsipi_cmd.length);
1219 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1220 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1221 	    flags | XS_CTL_DATA_IN);
1222 	SC_DEBUG(periph, SCSIPI_DB2,
1223 	    ("scsipi_mode_sense_big: error=%d\n", error));
1224 	return (error);
1225 }
1226 
1227 int
1228 scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1229 	struct scsipi_periph *periph;
1230 	int byte2, len, flags, retries, timeout;
1231 	struct scsipi_mode_header *data;
1232 {
1233 	struct scsipi_mode_select scsipi_cmd;
1234 	int error;
1235 
1236 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1237 	scsipi_cmd.opcode = MODE_SELECT;
1238 	scsipi_cmd.byte2 = byte2;
1239 	if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1240 		_lto2b(len, scsipi_cmd.u_len.atapi.length);
1241 	else
1242 		scsipi_cmd.u_len.scsi.length = len & 0xff;
1243 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1244 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1245 	    flags | XS_CTL_DATA_OUT);
1246 	SC_DEBUG(periph, SCSIPI_DB2,
1247 	    ("scsipi_mode_select: error=%d\n", error));
1248 	return (error);
1249 }
1250 
1251 int
1252 scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1253 	struct scsipi_periph *periph;
1254 	int byte2, len, flags, retries, timeout;
1255 	struct scsipi_mode_header_big *data;
1256 {
1257 	struct scsipi_mode_select_big scsipi_cmd;
1258 	int error;
1259 
1260 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1261 	scsipi_cmd.opcode = MODE_SELECT_BIG;
1262 	scsipi_cmd.byte2 = byte2;
1263 	_lto2b(len, scsipi_cmd.length);
1264 	error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1265 	    sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1266 	    flags | XS_CTL_DATA_OUT);
1267 	SC_DEBUG(periph, SCSIPI_DB2,
1268 	    ("scsipi_mode_select: error=%d\n", error));
1269 	return (error);
1270 }
1271 
1272 /*
1273  * scsipi_done:
1274  *
1275  *	This routine is called by an adapter's interrupt handler when
1276  *	an xfer is completed.
1277  */
1278 void
1279 scsipi_done(xs)
1280 	struct scsipi_xfer *xs;
1281 {
1282 	struct scsipi_periph *periph = xs->xs_periph;
1283 	struct scsipi_channel *chan = periph->periph_channel;
1284 	int s, freezecnt;
1285 
1286 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1287 #ifdef SCSIPI_DEBUG
1288 	if (periph->periph_dbflags & SCSIPI_DB1)
1289 		show_scsipi_cmd(xs);
1290 #endif
1291 
1292 	s = splbio();
1293 	/*
1294 	 * The resource this command was using is now free.
1295 	 */
1296 	scsipi_put_resource(chan);
1297 	xs->xs_periph->periph_sent--;
1298 
1299 	/*
1300 	 * If the command was tagged, free the tag.
1301 	 */
1302 	if (XS_CTL_TAGTYPE(xs) != 0)
1303 		scsipi_put_tag(xs);
1304 	else
1305 		periph->periph_flags &= ~PERIPH_UNTAG;
1306 
1307 	/* Mark the command as `done'. */
1308 	xs->xs_status |= XS_STS_DONE;
1309 
1310 #ifdef DIAGNOSTIC
1311 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1312 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1313 		panic("scsipi_done: ASYNC and POLL");
1314 #endif
1315 
1316 	/*
1317 	 * If the xfer had an error of any sort, freeze the
1318 	 * periph's queue.  Freeze it again if we were requested
1319 	 * to do so in the xfer.
1320 	 */
1321 	freezecnt = 0;
1322 	if (xs->error != XS_NOERROR)
1323 		freezecnt++;
1324 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1325 		freezecnt++;
1326 	if (freezecnt != 0)
1327 		scsipi_periph_freeze(periph, freezecnt);
1328 
1329 	/*
1330 	 * record the xfer with a pending sense, in case a SCSI reset is
1331 	 * received before the thread is waked up.
1332 	 */
1333 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1334 		periph->periph_flags |= PERIPH_SENSE;
1335 		periph->periph_xscheck = xs;
1336 	}
1337 
1338 	/*
1339 	 * If this was an xfer that was not to complete asynchronously,
1340 	 * let the requesting thread perform error checking/handling
1341 	 * in its context.
1342 	 */
1343 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1344 		splx(s);
1345 		/*
1346 		 * If it's a polling job, just return, to unwind the
1347 		 * call graph.  We don't need to restart the queue,
1348 		 * because pollings jobs are treated specially, and
1349 		 * are really only used during crash dumps anyway
1350 		 * (XXX or during boot-time autconfiguration of
1351 		 * ATAPI devices).
1352 		 */
1353 		if (xs->xs_control & XS_CTL_POLL)
1354 			return;
1355 		wakeup(xs);
1356 		goto out;
1357 	}
1358 
1359 	/*
1360 	 * Catch the extremely common case of I/O completing
1361 	 * without error; no use in taking a context switch
1362 	 * if we can handle it in interrupt context.
1363 	 */
1364 	if (xs->error == XS_NOERROR) {
1365 		splx(s);
1366 		(void) scsipi_complete(xs);
1367 		goto out;
1368 	}
1369 
1370 	/*
1371 	 * There is an error on this xfer.  Put it on the channel's
1372 	 * completion queue, and wake up the completion thread.
1373 	 */
1374 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1375 	splx(s);
1376 	wakeup(&chan->chan_complete);
1377 
1378  out:
1379 	/*
1380 	 * If there are more xfers on the channel's queue, attempt to
1381 	 * run them.
1382 	 */
1383 	scsipi_run_queue(chan);
1384 }
1385 
1386 /*
1387  * scsipi_complete:
1388  *
1389  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1390  *
1391  *	NOTE: This routine MUST be called with valid thread context
1392  *	except for the case where the following two conditions are
1393  *	true:
1394  *
1395  *		xs->error == XS_NOERROR
1396  *		XS_CTL_ASYNC is set in xs->xs_control
1397  *
1398  *	The semantics of this routine can be tricky, so here is an
1399  *	explanation:
1400  *
1401  *		0		Xfer completed successfully.
1402  *
1403  *		ERESTART	Xfer had an error, but was restarted.
1404  *
1405  *		anything else	Xfer had an error, return value is Unix
1406  *				errno.
1407  *
1408  *	If the return value is anything but ERESTART:
1409  *
1410  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1411  *		  the pool.
1412  *		- If there is a buf associated with the xfer,
1413  *		  it has been biodone()'d.
1414  */
1415 int
1416 scsipi_complete(xs)
1417 	struct scsipi_xfer *xs;
1418 {
1419 	struct scsipi_periph *periph = xs->xs_periph;
1420 	struct scsipi_channel *chan = periph->periph_channel;
1421 	struct buf *bp;
1422 	int error, s;
1423 
1424 #ifdef DIAGNOSTIC
1425 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1426 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1427 #endif
1428 	/*
1429 	 * If command terminated with a CHECK CONDITION, we need to issue a
1430 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1431 	 * we'll have the real status.
1432 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
1433 	 * for this command.
1434 	 */
1435 	s = splbio();
1436 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1437 		/* request sense for a request sense ? */
1438 		if (xs->xs_control & XS_CTL_REQSENSE) {
1439 			scsipi_printaddr(periph);
1440 			printf("request sense for a request sense ?\n");
1441 			/* XXX maybe we should reset the device ? */
1442 			/* we've been frozen because xs->error != XS_NOERROR */
1443 			scsipi_periph_thaw(periph, 1);
1444 			splx(s);
1445 			if (xs->resid < xs->datalen) {
1446 				printf("we read %d bytes of sense anyway:\n",
1447 				    xs->datalen - xs->resid);
1448 #ifdef SCSIVERBOSE
1449 				scsipi_print_sense_data((void *)xs->data, 0);
1450 #endif
1451 			}
1452 			return EINVAL;
1453 		}
1454 		scsipi_request_sense(xs);
1455 	}
1456 	splx(s);
1457 
1458 	/*
1459 	 * If it's a user level request, bypass all usual completion
1460 	 * processing, let the user work it out..
1461 	 */
1462 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1463 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1464 		if (xs->error != XS_NOERROR)
1465 			scsipi_periph_thaw(periph, 1);
1466 		scsipi_user_done(xs);
1467 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1468 		return 0;
1469 	}
1470 
1471 	switch (xs->error) {
1472 	case XS_NOERROR:
1473 		error = 0;
1474 		break;
1475 
1476 	case XS_SENSE:
1477 	case XS_SHORTSENSE:
1478 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1479 		break;
1480 
1481 	case XS_RESOURCE_SHORTAGE:
1482 		/*
1483 		 * XXX Should freeze channel's queue.
1484 		 */
1485 		scsipi_printaddr(periph);
1486 		printf("adapter resource shortage\n");
1487 		/* FALLTHROUGH */
1488 
1489 	case XS_BUSY:
1490 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1491 			struct scsipi_max_openings mo;
1492 
1493 			/*
1494 			 * We set the openings to active - 1, assuming that
1495 			 * the command that got us here is the first one that
1496 			 * can't fit into the device's queue.  If that's not
1497 			 * the case, I guess we'll find out soon enough.
1498 			 */
1499 			mo.mo_target = periph->periph_target;
1500 			mo.mo_lun = periph->periph_lun;
1501 			if (periph->periph_active < periph->periph_openings)
1502 				mo.mo_openings = periph->periph_active - 1;
1503 			else
1504 				mo.mo_openings = periph->periph_openings - 1;
1505 #ifdef DIAGNOSTIC
1506 			if (mo.mo_openings < 0) {
1507 				scsipi_printaddr(periph);
1508 				printf("QUEUE FULL resulted in < 0 openings\n");
1509 				panic("scsipi_done");
1510 			}
1511 #endif
1512 			if (mo.mo_openings == 0) {
1513 				scsipi_printaddr(periph);
1514 				printf("QUEUE FULL resulted in 0 openings\n");
1515 				mo.mo_openings = 1;
1516 			}
1517 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1518 			error = ERESTART;
1519 		} else if (xs->xs_retries != 0) {
1520 			xs->xs_retries--;
1521 			/*
1522 			 * Wait one second, and try again.
1523 			 */
1524 			if ((xs->xs_control & XS_CTL_POLL) ||
1525 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1526 				delay(1000000);
1527 			} else if (!callout_active(&periph->periph_callout)) {
1528 				scsipi_periph_freeze(periph, 1);
1529 				callout_reset(&periph->periph_callout,
1530 				    hz, scsipi_periph_timed_thaw, periph);
1531 			}
1532 			error = ERESTART;
1533 		} else
1534 			error = EBUSY;
1535 		break;
1536 
1537 	case XS_REQUEUE:
1538 		error = ERESTART;
1539 		break;
1540 
1541 	case XS_SELTIMEOUT:
1542 	case XS_TIMEOUT:
1543 		/*
1544 		 * If the device hasn't gone away, honor retry counts.
1545 		 *
1546 		 * Note that if we're in the middle of probing it,
1547 		 * it won't be found because it isn't here yet so
1548 		 * we won't honor the retry count in that case.
1549 		 */
1550 		if (scsipi_lookup_periph(chan, periph->periph_target,
1551 		    periph->periph_lun) && xs->xs_retries != 0) {
1552 			xs->xs_retries--;
1553 			error = ERESTART;
1554 		} else
1555 			error = EIO;
1556 		break;
1557 
1558 	case XS_RESET:
1559 		if (xs->xs_control & XS_CTL_REQSENSE) {
1560 			/*
1561 			 * request sense interrupted by reset: signal it
1562 			 * with EINTR return code.
1563 			 */
1564 			error = EINTR;
1565 		} else {
1566 			if (xs->xs_retries != 0) {
1567 				xs->xs_retries--;
1568 				error = ERESTART;
1569 			} else
1570 				error = EIO;
1571 		}
1572 		break;
1573 
1574 	case XS_DRIVER_STUFFUP:
1575 		scsipi_printaddr(periph);
1576 		printf("generic HBA error\n");
1577 		error = EIO;
1578 		break;
1579 	default:
1580 		scsipi_printaddr(periph);
1581 		printf("invalid return code from adapter: %d\n", xs->error);
1582 		error = EIO;
1583 		break;
1584 	}
1585 
1586 	s = splbio();
1587 	if (error == ERESTART) {
1588 		/*
1589 		 * If we get here, the periph has been thawed and frozen
1590 		 * again if we had to issue recovery commands.  Alternatively,
1591 		 * it may have been frozen again and in a timed thaw.  In
1592 		 * any case, we thaw the periph once we re-enqueue the
1593 		 * command.  Once the periph is fully thawed, it will begin
1594 		 * operation again.
1595 		 */
1596 		xs->error = XS_NOERROR;
1597 		xs->status = SCSI_OK;
1598 		xs->xs_status &= ~XS_STS_DONE;
1599 		xs->xs_requeuecnt++;
1600 		error = scsipi_enqueue(xs);
1601 		if (error == 0) {
1602 			scsipi_periph_thaw(periph, 1);
1603 			splx(s);
1604 			return (ERESTART);
1605 		}
1606 	}
1607 
1608 	/*
1609 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1610 	 * Thaw it here.
1611 	 */
1612 	if (xs->error != XS_NOERROR)
1613 		scsipi_periph_thaw(periph, 1);
1614 
1615 	/*
1616 	 * Set buffer fields in case the periph
1617 	 * switch done func uses them
1618 	 */
1619 	if ((bp = xs->bp) != NULL) {
1620 		if (error) {
1621 			bp->b_error = error;
1622 			bp->b_flags |= B_ERROR;
1623 			bp->b_resid = bp->b_bcount;
1624 		} else {
1625 			bp->b_error = 0;
1626 			bp->b_resid = xs->resid;
1627 		}
1628 	}
1629 
1630 	if (periph->periph_switch->psw_done)
1631 		periph->periph_switch->psw_done(xs);
1632 
1633 	if (bp)
1634 		biodone(bp);
1635 
1636 	if (xs->xs_control & XS_CTL_ASYNC)
1637 		scsipi_put_xs(xs);
1638 	splx(s);
1639 
1640 	return (error);
1641 }
1642 
1643 /*
1644  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1645  * returns with a CHECK_CONDITION status. Must be called in valid thread
1646  * context and at splbio().
1647  */
1648 
1649 void
1650 scsipi_request_sense(xs)
1651 	struct scsipi_xfer *xs;
1652 {
1653 	struct scsipi_periph *periph = xs->xs_periph;
1654 	int flags, error;
1655 	struct scsipi_sense cmd;
1656 
1657 	periph->periph_flags |= PERIPH_SENSE;
1658 
1659 	/* if command was polling, request sense will too */
1660 	flags = xs->xs_control & XS_CTL_POLL;
1661 	/* Polling commands can't sleep */
1662 	if (flags)
1663 		flags |= XS_CTL_NOSLEEP;
1664 
1665 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1666 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1667 
1668 	memset(&cmd, 0, sizeof(cmd));
1669 	cmd.opcode = REQUEST_SENSE;
1670 	cmd.length = sizeof(struct scsipi_sense_data);
1671 
1672 	error = scsipi_command(periph,
1673 	    (struct scsipi_generic *) &cmd, sizeof(cmd),
1674 	    (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1675 	    0, 1000, NULL, flags);
1676 	periph->periph_flags &= ~PERIPH_SENSE;
1677 	periph->periph_xscheck = NULL;
1678 	switch(error) {
1679 	case 0:
1680 		/* we have a valid sense */
1681 		xs->error = XS_SENSE;
1682 		return;
1683 	case EINTR:
1684 		/* REQUEST_SENSE interrupted by bus reset. */
1685 		xs->error = XS_RESET;
1686 		return;
1687 	case EIO:
1688 		 /* request sense coudn't be performed */
1689 		/*
1690 		 * XXX this isn't quite right but we don't have anything
1691 		 * better for now
1692 		 */
1693 		xs->error = XS_DRIVER_STUFFUP;
1694 		return;
1695 	default:
1696 		 /* Notify that request sense failed. */
1697 		xs->error = XS_DRIVER_STUFFUP;
1698 		scsipi_printaddr(periph);
1699 		printf("request sense failed with error %d\n", error);
1700 		return;
1701 	}
1702 }
1703 
1704 /*
1705  * scsipi_enqueue:
1706  *
1707  *	Enqueue an xfer on a channel.
1708  */
1709 int
1710 scsipi_enqueue(xs)
1711 	struct scsipi_xfer *xs;
1712 {
1713 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1714 	struct scsipi_xfer *qxs;
1715 	int s;
1716 
1717 	s = splbio();
1718 
1719 	/*
1720 	 * If the xfer is to be polled, and there are already jobs on
1721 	 * the queue, we can't proceed.
1722 	 */
1723 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1724 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
1725 		splx(s);
1726 		xs->error = XS_DRIVER_STUFFUP;
1727 		return (EAGAIN);
1728 	}
1729 
1730 	/*
1731 	 * If we have an URGENT xfer, it's an error recovery command
1732 	 * and it should just go on the head of the channel's queue.
1733 	 */
1734 	if (xs->xs_control & XS_CTL_URGENT) {
1735 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1736 		goto out;
1737 	}
1738 
1739 	/*
1740 	 * If this xfer has already been on the queue before, we
1741 	 * need to reinsert it in the correct order.  That order is:
1742 	 *
1743 	 *	Immediately before the first xfer for this periph
1744 	 *	with a requeuecnt less than xs->xs_requeuecnt.
1745 	 *
1746 	 * Failing that, at the end of the queue.  (We'll end up
1747 	 * there naturally.)
1748 	 */
1749 	if (xs->xs_requeuecnt != 0) {
1750 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1751 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
1752 			if (qxs->xs_periph == xs->xs_periph &&
1753 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
1754 				break;
1755 		}
1756 		if (qxs != NULL) {
1757 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1758 			    channel_q);
1759 			goto out;
1760 		}
1761 	}
1762 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1763  out:
1764 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
1765 		scsipi_periph_thaw(xs->xs_periph, 1);
1766 	splx(s);
1767 	return (0);
1768 }
1769 
1770 /*
1771  * scsipi_run_queue:
1772  *
1773  *	Start as many xfers as possible running on the channel.
1774  */
1775 void
1776 scsipi_run_queue(chan)
1777 	struct scsipi_channel *chan;
1778 {
1779 	struct scsipi_xfer *xs;
1780 	struct scsipi_periph *periph;
1781 	int s;
1782 
1783 	for (;;) {
1784 		s = splbio();
1785 
1786 		/*
1787 		 * If the channel is frozen, we can't do any work right
1788 		 * now.
1789 		 */
1790 		if (chan->chan_qfreeze != 0) {
1791 			splx(s);
1792 			return;
1793 		}
1794 
1795 		/*
1796 		 * Look for work to do, and make sure we can do it.
1797 		 */
1798 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1799 		     xs = TAILQ_NEXT(xs, channel_q)) {
1800 			periph = xs->xs_periph;
1801 
1802 			if ((periph->periph_sent >= periph->periph_openings) ||
1803 			    periph->periph_qfreeze != 0 ||
1804 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
1805 				continue;
1806 
1807 			if ((periph->periph_flags &
1808 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1809 			    (xs->xs_control & XS_CTL_URGENT) == 0)
1810 				continue;
1811 
1812 			/*
1813 			 * We can issue this xfer!
1814 			 */
1815 			goto got_one;
1816 		}
1817 
1818 		/*
1819 		 * Can't find any work to do right now.
1820 		 */
1821 		splx(s);
1822 		return;
1823 
1824  got_one:
1825 		/*
1826 		 * Have an xfer to run.  Allocate a resource from
1827 		 * the adapter to run it.  If we can't allocate that
1828 		 * resource, we don't dequeue the xfer.
1829 		 */
1830 		if (scsipi_get_resource(chan) == 0) {
1831 			/*
1832 			 * Adapter is out of resources.  If the adapter
1833 			 * supports it, attempt to grow them.
1834 			 */
1835 			if (scsipi_grow_resources(chan) == 0) {
1836 				/*
1837 				 * Wasn't able to grow resources,
1838 				 * nothing more we can do.
1839 				 */
1840 				if (xs->xs_control & XS_CTL_POLL) {
1841 					scsipi_printaddr(xs->xs_periph);
1842 					printf("polling command but no "
1843 					    "adapter resources");
1844 					/* We'll panic shortly... */
1845 				}
1846 				splx(s);
1847 
1848 				/*
1849 				 * XXX: We should be able to note that
1850 				 * XXX: that resources are needed here!
1851 				 */
1852 				return;
1853 			}
1854 			/*
1855 			 * scsipi_grow_resources() allocated the resource
1856 			 * for us.
1857 			 */
1858 		}
1859 
1860 		/*
1861 		 * We have a resource to run this xfer, do it!
1862 		 */
1863 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1864 
1865 		/*
1866 		 * If the command is to be tagged, allocate a tag ID
1867 		 * for it.
1868 		 */
1869 		if (XS_CTL_TAGTYPE(xs) != 0)
1870 			scsipi_get_tag(xs);
1871 		else
1872 			periph->periph_flags |= PERIPH_UNTAG;
1873 		periph->periph_sent++;
1874 		splx(s);
1875 
1876 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1877 	}
1878 #ifdef DIAGNOSTIC
1879 	panic("scsipi_run_queue: impossible");
1880 #endif
1881 }
1882 
1883 /*
1884  * scsipi_execute_xs:
1885  *
1886  *	Begin execution of an xfer, waiting for it to complete, if necessary.
1887  */
1888 int
1889 scsipi_execute_xs(xs)
1890 	struct scsipi_xfer *xs;
1891 {
1892 	struct scsipi_periph *periph = xs->xs_periph;
1893 	struct scsipi_channel *chan = periph->periph_channel;
1894 	int oasync, async, poll, retries, error, s;
1895 
1896 	xs->xs_status &= ~XS_STS_DONE;
1897 	xs->error = XS_NOERROR;
1898 	xs->resid = xs->datalen;
1899 	xs->status = SCSI_OK;
1900 
1901 #ifdef SCSIPI_DEBUG
1902 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1903 		printf("scsipi_execute_xs: ");
1904 		show_scsipi_xs(xs);
1905 		printf("\n");
1906 	}
1907 #endif
1908 
1909 	/*
1910 	 * Deal with command tagging:
1911 	 *
1912 	 *	- If the device's current operating mode doesn't
1913 	 *	  include tagged queueing, clear the tag mask.
1914 	 *
1915 	 *	- If the device's current operating mode *does*
1916 	 *	  include tagged queueing, set the tag_type in
1917 	 *	  the xfer to the appropriate byte for the tag
1918 	 *	  message.
1919 	 */
1920 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1921 		(xs->xs_control & XS_CTL_REQSENSE)) {
1922 		xs->xs_control &= ~XS_CTL_TAGMASK;
1923 		xs->xs_tag_type = 0;
1924 	} else {
1925 		/*
1926 		 * If the request doesn't specify a tag, give Head
1927 		 * tags to URGENT operations and Ordered tags to
1928 		 * everything else.
1929 		 */
1930 		if (XS_CTL_TAGTYPE(xs) == 0) {
1931 			if (xs->xs_control & XS_CTL_URGENT)
1932 				xs->xs_control |= XS_CTL_HEAD_TAG;
1933 			else
1934 				xs->xs_control |= XS_CTL_ORDERED_TAG;
1935 		}
1936 
1937 		switch (XS_CTL_TAGTYPE(xs)) {
1938 		case XS_CTL_ORDERED_TAG:
1939 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1940 			break;
1941 
1942 		case XS_CTL_SIMPLE_TAG:
1943 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1944 			break;
1945 
1946 		case XS_CTL_HEAD_TAG:
1947 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1948 			break;
1949 
1950 		default:
1951 			scsipi_printaddr(periph);
1952 			printf("invalid tag mask 0x%08x\n",
1953 			    XS_CTL_TAGTYPE(xs));
1954 			panic("scsipi_execute_xs");
1955 		}
1956 	}
1957 
1958 	/* If the adaptor wants us to poll, poll. */
1959 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1960 		xs->xs_control |= XS_CTL_POLL;
1961 
1962 	/*
1963 	 * If we don't yet have a completion thread, or we are to poll for
1964 	 * completion, clear the ASYNC flag.
1965 	 */
1966 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
1967 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1968 		xs->xs_control &= ~XS_CTL_ASYNC;
1969 
1970 	async = (xs->xs_control & XS_CTL_ASYNC);
1971 	poll = (xs->xs_control & XS_CTL_POLL);
1972 	retries = xs->xs_retries;		/* for polling commands */
1973 
1974 #ifdef DIAGNOSTIC
1975 	if (oasync != 0 && xs->bp == NULL)
1976 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1977 #endif
1978 
1979 	/*
1980 	 * Enqueue the transfer.  If we're not polling for completion, this
1981 	 * should ALWAYS return `no error'.
1982 	 */
1983  try_again:
1984 	error = scsipi_enqueue(xs);
1985 	if (error) {
1986 		if (poll == 0) {
1987 			scsipi_printaddr(periph);
1988 			printf("not polling, but enqueue failed with %d\n",
1989 			    error);
1990 			panic("scsipi_execute_xs");
1991 		}
1992 
1993 		scsipi_printaddr(periph);
1994 		printf("failed to enqueue polling command");
1995 		if (retries != 0) {
1996 			printf(", retrying...\n");
1997 			delay(1000000);
1998 			retries--;
1999 			goto try_again;
2000 		}
2001 		printf("\n");
2002 		goto free_xs;
2003 	}
2004 
2005  restarted:
2006 	scsipi_run_queue(chan);
2007 
2008 	/*
2009 	 * The xfer is enqueued, and possibly running.  If it's to be
2010 	 * completed asynchronously, just return now.
2011 	 */
2012 	if (async)
2013 		return (EJUSTRETURN);
2014 
2015 	/*
2016 	 * Not an asynchronous command; wait for it to complete.
2017 	 */
2018 	s = splbio();
2019 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2020 		if (poll) {
2021 			scsipi_printaddr(periph);
2022 			printf("polling command not done\n");
2023 			panic("scsipi_execute_xs");
2024 		}
2025 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
2026 	}
2027 	splx(s);
2028 
2029 	/*
2030 	 * Command is complete.  scsipi_done() has awakened us to perform
2031 	 * the error handling.
2032 	 */
2033 	error = scsipi_complete(xs);
2034 	if (error == ERESTART)
2035 		goto restarted;
2036 
2037 	/*
2038 	 * If it was meant to run async and we cleared aync ourselve,
2039 	 * don't return an error here. It has already been handled
2040 	 */
2041 	if (oasync)
2042 		error = EJUSTRETURN;
2043 	/*
2044 	 * Command completed successfully or fatal error occurred.  Fall
2045 	 * into....
2046 	 */
2047  free_xs:
2048 	s = splbio();
2049 	scsipi_put_xs(xs);
2050 	splx(s);
2051 
2052 	/*
2053 	 * Kick the queue, keep it running in case it stopped for some
2054 	 * reason.
2055 	 */
2056 	scsipi_run_queue(chan);
2057 
2058 	return (error);
2059 }
2060 
2061 /*
2062  * scsipi_completion_thread:
2063  *
2064  *	This is the completion thread.  We wait for errors on
2065  *	asynchronous xfers, and perform the error handling
2066  *	function, restarting the command, if necessary.
2067  */
2068 void
2069 scsipi_completion_thread(arg)
2070 	void *arg;
2071 {
2072 	struct scsipi_channel *chan = arg;
2073 	struct scsipi_xfer *xs;
2074 	int s;
2075 
2076 	if (chan->chan_init_cb)
2077 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2078 
2079 	s = splbio();
2080 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2081 	splx(s);
2082 	for (;;) {
2083 		s = splbio();
2084 		xs = TAILQ_FIRST(&chan->chan_complete);
2085 		if (xs == NULL && chan->chan_tflags  == 0) {
2086 			/* nothing to do; wait */
2087 			(void) tsleep(&chan->chan_complete, PRIBIO,
2088 			    "sccomp", 0);
2089 			splx(s);
2090 			continue;
2091 		}
2092 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2093 			/* call chan_callback from thread context */
2094 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2095 			chan->chan_callback(chan, chan->chan_callback_arg);
2096 			splx(s);
2097 			continue;
2098 		}
2099 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2100 			/* attempt to get more openings for this channel */
2101 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2102 			scsipi_adapter_request(chan,
2103 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2104 			scsipi_channel_thaw(chan, 1);
2105 			splx(s);
2106 			continue;
2107 		}
2108 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2109 			/* explicitly run the queues for this channel */
2110 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2111 			scsipi_run_queue(chan);
2112 			splx(s);
2113 			continue;
2114 		}
2115 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2116 			splx(s);
2117 			break;
2118 		}
2119 		if (xs) {
2120 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2121 			splx(s);
2122 
2123 			/*
2124 			 * Have an xfer with an error; process it.
2125 			 */
2126 			(void) scsipi_complete(xs);
2127 
2128 			/*
2129 			 * Kick the queue; keep it running if it was stopped
2130 			 * for some reason.
2131 			 */
2132 			scsipi_run_queue(chan);
2133 		} else {
2134 			splx(s);
2135 		}
2136 	}
2137 
2138 	chan->chan_thread = NULL;
2139 
2140 	/* In case parent is waiting for us to exit. */
2141 	wakeup(&chan->chan_thread);
2142 
2143 	kthread_exit(0);
2144 }
2145 
2146 /*
2147  * scsipi_create_completion_thread:
2148  *
2149  *	Callback to actually create the completion thread.
2150  */
2151 void
2152 scsipi_create_completion_thread(arg)
2153 	void *arg;
2154 {
2155 	struct scsipi_channel *chan = arg;
2156 	struct scsipi_adapter *adapt = chan->chan_adapter;
2157 
2158 	if (kthread_create1(scsipi_completion_thread, chan,
2159 	    &chan->chan_thread, "%s", chan->chan_name)) {
2160 		printf("%s: unable to create completion thread for "
2161 		    "channel %d\n", adapt->adapt_dev->dv_xname,
2162 		    chan->chan_channel);
2163 		panic("scsipi_create_completion_thread");
2164 	}
2165 }
2166 
2167 /*
2168  * scsipi_thread_call_callback:
2169  *
2170  * 	request to call a callback from the completion thread
2171  */
2172 int
2173 scsipi_thread_call_callback(chan, callback, arg)
2174 	struct scsipi_channel *chan;
2175 	void (*callback) __P((struct scsipi_channel *, void *));
2176 	void *arg;
2177 {
2178 	int s;
2179 
2180 	s = splbio();
2181 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2182 		/* kernel thread doesn't exist yet */
2183 		splx(s);
2184 		return ESRCH;
2185 	}
2186 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2187 		splx(s);
2188 		return EBUSY;
2189 	}
2190 	scsipi_channel_freeze(chan, 1);
2191 	chan->chan_callback = callback;
2192 	chan->chan_callback_arg = arg;
2193 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2194 	wakeup(&chan->chan_complete);
2195 	splx(s);
2196 	return(0);
2197 }
2198 
2199 /*
2200  * scsipi_async_event:
2201  *
2202  *	Handle an asynchronous event from an adapter.
2203  */
2204 void
2205 scsipi_async_event(chan, event, arg)
2206 	struct scsipi_channel *chan;
2207 	scsipi_async_event_t event;
2208 	void *arg;
2209 {
2210 	int s;
2211 
2212 	s = splbio();
2213 	switch (event) {
2214 	case ASYNC_EVENT_MAX_OPENINGS:
2215 		scsipi_async_event_max_openings(chan,
2216 		    (struct scsipi_max_openings *)arg);
2217 		break;
2218 
2219 	case ASYNC_EVENT_XFER_MODE:
2220 		scsipi_async_event_xfer_mode(chan,
2221 		    (struct scsipi_xfer_mode *)arg);
2222 		break;
2223 	case ASYNC_EVENT_RESET:
2224 		scsipi_async_event_channel_reset(chan);
2225 		break;
2226 	}
2227 	splx(s);
2228 }
2229 
2230 /*
2231  * scsipi_print_xfer_mode:
2232  *
2233  *	Print a periph's capabilities.
2234  */
2235 void
2236 scsipi_print_xfer_mode(periph)
2237 	struct scsipi_periph *periph;
2238 {
2239 	int period, freq, speed, mbs;
2240 
2241 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2242 		return;
2243 
2244 	printf("%s: ", periph->periph_dev->dv_xname);
2245 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2246 		period = scsipi_sync_factor_to_period(periph->periph_period);
2247 		printf("sync (%d.%dns offset %d)",
2248 		    period / 10, period % 10, periph->periph_offset);
2249 	} else
2250 		printf("async");
2251 
2252 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
2253 		printf(", 32-bit");
2254 	else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2255 		printf(", 16-bit");
2256 	else
2257 		printf(", 8-bit");
2258 
2259 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2260 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
2261 		speed = freq;
2262 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
2263 			speed *= 4;
2264 		else if (periph->periph_mode &
2265 		    (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2266 			speed *= 2;
2267 		mbs = speed / 1000;
2268 		if (mbs > 0)
2269 			printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2270 		else
2271 			printf(" (%dKB/s)", speed % 1000);
2272 	}
2273 
2274 	printf(" transfers");
2275 
2276 	if (periph->periph_mode & PERIPH_CAP_TQING)
2277 		printf(", tagged queueing");
2278 
2279 	printf("\n");
2280 }
2281 
2282 /*
2283  * scsipi_async_event_max_openings:
2284  *
2285  *	Update the maximum number of outstanding commands a
2286  *	device may have.
2287  */
2288 void
2289 scsipi_async_event_max_openings(chan, mo)
2290 	struct scsipi_channel *chan;
2291 	struct scsipi_max_openings *mo;
2292 {
2293 	struct scsipi_periph *periph;
2294 	int minlun, maxlun;
2295 
2296 	if (mo->mo_lun == -1) {
2297 		/*
2298 		 * Wildcarded; apply it to all LUNs.
2299 		 */
2300 		minlun = 0;
2301 		maxlun = chan->chan_nluns - 1;
2302 	} else
2303 		minlun = maxlun = mo->mo_lun;
2304 
2305 	/* XXX This could really suck with a large LUN space. */
2306 	for (; minlun <= maxlun; minlun++) {
2307 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2308 		if (periph == NULL)
2309 			continue;
2310 
2311 		if (mo->mo_openings < periph->periph_openings)
2312 			periph->periph_openings = mo->mo_openings;
2313 		else if (mo->mo_openings > periph->periph_openings &&
2314 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2315 			periph->periph_openings = mo->mo_openings;
2316 	}
2317 }
2318 
2319 /*
2320  * scsipi_async_event_xfer_mode:
2321  *
2322  *	Update the xfer mode for all periphs sharing the
2323  *	specified I_T Nexus.
2324  */
2325 void
2326 scsipi_async_event_xfer_mode(chan, xm)
2327 	struct scsipi_channel *chan;
2328 	struct scsipi_xfer_mode *xm;
2329 {
2330 	struct scsipi_periph *periph;
2331 	int lun, announce, mode, period, offset;
2332 
2333 	for (lun = 0; lun < chan->chan_nluns; lun++) {
2334 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2335 		if (periph == NULL)
2336 			continue;
2337 		announce = 0;
2338 
2339 		/*
2340 		 * Clamp the xfer mode down to this periph's capabilities.
2341 		 */
2342 		mode = xm->xm_mode & periph->periph_cap;
2343 		if (mode & PERIPH_CAP_SYNC) {
2344 			period = xm->xm_period;
2345 			offset = xm->xm_offset;
2346 		} else {
2347 			period = 0;
2348 			offset = 0;
2349 		}
2350 
2351 		/*
2352 		 * If we do not have a valid xfer mode yet, or the parameters
2353 		 * are different, announce them.
2354 		 */
2355 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2356 		    periph->periph_mode != mode ||
2357 		    periph->periph_period != period ||
2358 		    periph->periph_offset != offset)
2359 			announce = 1;
2360 
2361 		periph->periph_mode = mode;
2362 		periph->periph_period = period;
2363 		periph->periph_offset = offset;
2364 		periph->periph_flags |= PERIPH_MODE_VALID;
2365 
2366 		if (announce)
2367 			scsipi_print_xfer_mode(periph);
2368 	}
2369 }
2370 
2371 /*
2372  * scsipi_set_xfer_mode:
2373  *
2374  *	Set the xfer mode for the specified I_T Nexus.
2375  */
2376 void
2377 scsipi_set_xfer_mode(chan, target, immed)
2378 	struct scsipi_channel *chan;
2379 	int target, immed;
2380 {
2381 	struct scsipi_xfer_mode xm;
2382 	struct scsipi_periph *itperiph;
2383 	int lun, s;
2384 
2385 	/*
2386 	 * Go to the minimal xfer mode.
2387 	 */
2388 	xm.xm_target = target;
2389 	xm.xm_mode = 0;
2390 	xm.xm_period = 0;			/* ignored */
2391 	xm.xm_offset = 0;			/* ignored */
2392 
2393 	/*
2394 	 * Find the first LUN we know about on this I_T Nexus.
2395 	 */
2396 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2397 		itperiph = scsipi_lookup_periph(chan, target, lun);
2398 		if (itperiph != NULL)
2399 			break;
2400 	}
2401 	if (itperiph != NULL) {
2402 		xm.xm_mode = itperiph->periph_cap;
2403 		/*
2404 		 * Now issue the request to the adapter.
2405 		 */
2406 		s = splbio();
2407 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2408 		splx(s);
2409 		/*
2410 		 * If we want this to happen immediately, issue a dummy
2411 		 * command, since most adapters can't really negotiate unless
2412 		 * they're executing a job.
2413 		 */
2414 		if (immed != 0) {
2415 			(void) scsipi_test_unit_ready(itperiph,
2416 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2417 			    XS_CTL_IGNORE_NOT_READY |
2418 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2419 		}
2420 	}
2421 }
2422 
2423 /*
2424  * scsipi_channel_reset:
2425  *
2426  *	handle scsi bus reset
2427  * called at splbio
2428  */
2429 void
2430 scsipi_async_event_channel_reset(chan)
2431 	struct scsipi_channel *chan;
2432 {
2433 	struct scsipi_xfer *xs, *xs_next;
2434 	struct scsipi_periph *periph;
2435 	int target, lun;
2436 
2437 	/*
2438 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2439 	 * commands; as the sense is not available any more.
2440 	 * can't call scsipi_done() from here, as the command has not been
2441 	 * sent to the adapter yet (this would corrupt accounting).
2442 	 */
2443 
2444 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2445 		xs_next = TAILQ_NEXT(xs, channel_q);
2446 		if (xs->xs_control & XS_CTL_REQSENSE) {
2447 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2448 			xs->error = XS_RESET;
2449 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2450 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2451 				    channel_q);
2452 		}
2453 	}
2454 	wakeup(&chan->chan_complete);
2455 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2456 	for (target = 0; target < chan->chan_ntargets; target++) {
2457 		if (target == chan->chan_id)
2458 			continue;
2459 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2460 			periph = scsipi_lookup_periph(chan, target, lun);
2461 			if (periph) {
2462 				xs = periph->periph_xscheck;
2463 				if (xs)
2464 					xs->error = XS_RESET;
2465 			}
2466 		}
2467 	}
2468 }
2469 
2470 /*
2471  * scsipi_target_detach:
2472  *
2473  *	detach all periph associated with a I_T
2474  * 	must be called from valid thread context
2475  */
2476 int
2477 scsipi_target_detach(chan, target, lun, flags)
2478 	struct scsipi_channel *chan;
2479 	int target, lun;
2480 	int flags;
2481 {
2482 	struct scsipi_periph *periph;
2483 	int ctarget, mintarget, maxtarget;
2484 	int clun, minlun, maxlun;
2485 	int error;
2486 
2487 	if (target == -1) {
2488 		mintarget = 0;
2489 		maxtarget = chan->chan_ntargets;
2490 	} else {
2491 		if (target == chan->chan_id)
2492 			return EINVAL;
2493 		if (target < 0 || target >= chan->chan_ntargets)
2494 			return EINVAL;
2495 		mintarget = target;
2496 		maxtarget = target + 1;
2497 	}
2498 
2499 	if (lun == -1) {
2500 		minlun = 0;
2501 		maxlun = chan->chan_nluns;
2502 	} else {
2503 		if (lun < 0 || lun >= chan->chan_nluns)
2504 			return EINVAL;
2505 		minlun = lun;
2506 		maxlun = lun + 1;
2507 	}
2508 
2509 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2510 		if (ctarget == chan->chan_id)
2511 			continue;
2512 
2513 		for (clun = minlun; clun < maxlun; clun++) {
2514 			periph = scsipi_lookup_periph(chan, ctarget, clun);
2515 			if (periph == NULL)
2516 				continue;
2517 			error = config_detach(periph->periph_dev, flags);
2518 			if (error)
2519 				return (error);
2520 			scsipi_remove_periph(chan, periph);
2521 			free(periph, M_DEVBUF);
2522 		}
2523 	}
2524 	return(0);
2525 }
2526 
2527 /*
2528  * scsipi_adapter_addref:
2529  *
2530  *	Add a reference to the adapter pointed to by the provided
2531  *	link, enabling the adapter if necessary.
2532  */
2533 int
2534 scsipi_adapter_addref(adapt)
2535 	struct scsipi_adapter *adapt;
2536 {
2537 	int s, error = 0;
2538 
2539 	s = splbio();
2540 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2541 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2542 		if (error)
2543 			adapt->adapt_refcnt--;
2544 	}
2545 	splx(s);
2546 	return (error);
2547 }
2548 
2549 /*
2550  * scsipi_adapter_delref:
2551  *
2552  *	Delete a reference to the adapter pointed to by the provided
2553  *	link, disabling the adapter if possible.
2554  */
2555 void
2556 scsipi_adapter_delref(adapt)
2557 	struct scsipi_adapter *adapt;
2558 {
2559 	int s;
2560 
2561 	s = splbio();
2562 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2563 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2564 	splx(s);
2565 }
2566 
2567 struct scsipi_syncparam {
2568 	int	ss_factor;
2569 	int	ss_period;	/* ns * 10 */
2570 } scsipi_syncparams[] = {
2571 	{ 0x09,		125 },
2572 	{ 0x0a,		250 },
2573 	{ 0x0b,		303 },
2574 	{ 0x0c,		500 },
2575 };
2576 const int scsipi_nsyncparams =
2577     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2578 
2579 int
2580 scsipi_sync_period_to_factor(period)
2581 	int period;		/* ns * 10 */
2582 {
2583 	int i;
2584 
2585 	for (i = 0; i < scsipi_nsyncparams; i++) {
2586 		if (period <= scsipi_syncparams[i].ss_period)
2587 			return (scsipi_syncparams[i].ss_factor);
2588 	}
2589 
2590 	return ((period / 10) / 4);
2591 }
2592 
2593 int
2594 scsipi_sync_factor_to_period(factor)
2595 	int factor;
2596 {
2597 	int i;
2598 
2599 	for (i = 0; i < scsipi_nsyncparams; i++) {
2600 		if (factor == scsipi_syncparams[i].ss_factor)
2601 			return (scsipi_syncparams[i].ss_period);
2602 	}
2603 
2604 	return ((factor * 4) * 10);
2605 }
2606 
2607 int
2608 scsipi_sync_factor_to_freq(factor)
2609 	int factor;
2610 {
2611 	int i;
2612 
2613 	for (i = 0; i < scsipi_nsyncparams; i++) {
2614 		if (factor == scsipi_syncparams[i].ss_factor)
2615 			return (10000000 / scsipi_syncparams[i].ss_period);
2616 	}
2617 
2618 	return (10000000 / ((factor * 4) * 10));
2619 }
2620 
2621 #ifdef SCSIPI_DEBUG
2622 /*
2623  * Given a scsipi_xfer, dump the request, in all it's glory
2624  */
2625 void
2626 show_scsipi_xs(xs)
2627 	struct scsipi_xfer *xs;
2628 {
2629 
2630 	printf("xs(%p): ", xs);
2631 	printf("xs_control(0x%08x)", xs->xs_control);
2632 	printf("xs_status(0x%08x)", xs->xs_status);
2633 	printf("periph(%p)", xs->xs_periph);
2634 	printf("retr(0x%x)", xs->xs_retries);
2635 	printf("timo(0x%x)", xs->timeout);
2636 	printf("cmd(%p)", xs->cmd);
2637 	printf("len(0x%x)", xs->cmdlen);
2638 	printf("data(%p)", xs->data);
2639 	printf("len(0x%x)", xs->datalen);
2640 	printf("res(0x%x)", xs->resid);
2641 	printf("err(0x%x)", xs->error);
2642 	printf("bp(%p)", xs->bp);
2643 	show_scsipi_cmd(xs);
2644 }
2645 
2646 void
2647 show_scsipi_cmd(xs)
2648 	struct scsipi_xfer *xs;
2649 {
2650 	u_char *b = (u_char *) xs->cmd;
2651 	int i = 0;
2652 
2653 	scsipi_printaddr(xs->xs_periph);
2654 	printf(" command: ");
2655 
2656 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2657 		while (i < xs->cmdlen) {
2658 			if (i)
2659 				printf(",");
2660 			printf("0x%x", b[i++]);
2661 		}
2662 		printf("-[%d bytes]\n", xs->datalen);
2663 		if (xs->datalen)
2664 			show_mem(xs->data, min(64, xs->datalen));
2665 	} else
2666 		printf("-RESET-\n");
2667 }
2668 
2669 void
2670 show_mem(address, num)
2671 	u_char *address;
2672 	int num;
2673 {
2674 	int x;
2675 
2676 	printf("------------------------------");
2677 	for (x = 0; x < num; x++) {
2678 		if ((x % 16) == 0)
2679 			printf("\n%03d: ", x);
2680 		printf("%02x ", *address++);
2681 	}
2682 	printf("\n------------------------------\n");
2683 }
2684 #endif /* SCSIPI_DEBUG */
2685