xref: /netbsd/sys/dev/scsipi/scsipi_base.c (revision 1e11a48f)
1 /*	$NetBSD: scsipi_base.c,v 1.189 2022/04/09 23:38:32 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.189 2022/04/09 23:38:32 riastradh Exp $");
35 
36 #ifdef _KERNEL_OPT
37 #include "opt_scsi.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/hash.h>
52 #include <sys/atomic.h>
53 
54 #include <dev/scsipi/scsi_sdt.h>
55 #include <dev/scsipi/scsi_spc.h>
56 #include <dev/scsipi/scsipi_all.h>
57 #include <dev/scsipi/scsipi_disk.h>
58 #include <dev/scsipi/scsipiconf.h>
59 #include <dev/scsipi/scsipi_base.h>
60 
61 #include <dev/scsipi/scsi_all.h>
62 #include <dev/scsipi/scsi_message.h>
63 
64 #include <machine/param.h>
65 
66 SDT_PROVIDER_DEFINE(scsi);
67 
68 SDT_PROBE_DEFINE3(scsi, base, tag, get,
69     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
70 SDT_PROBE_DEFINE3(scsi, base, tag, put,
71     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
72 
73 SDT_PROBE_DEFINE3(scsi, base, adapter, request__start,
74     "struct scsipi_channel *"/*chan*/,
75     "scsipi_adapter_req_t"/*req*/,
76     "void *"/*arg*/);
77 SDT_PROBE_DEFINE3(scsi, base, adapter, request__done,
78     "struct scsipi_channel *"/*chan*/,
79     "scsipi_adapter_req_t"/*req*/,
80     "void *"/*arg*/);
81 
82 SDT_PROBE_DEFINE1(scsi, base, queue, batch__start,
83     "struct scsipi_channel *"/*chan*/);
84 SDT_PROBE_DEFINE2(scsi, base, queue, run,
85     "struct scsipi_channel *"/*chan*/,
86     "struct scsipi_xfer *"/*xs*/);
87 SDT_PROBE_DEFINE1(scsi, base, queue, batch__done,
88     "struct scsipi_channel *"/*chan*/);
89 
90 SDT_PROBE_DEFINE1(scsi, base, xfer, execute,  "struct scsipi_xfer *"/*xs*/);
91 SDT_PROBE_DEFINE1(scsi, base, xfer, enqueue,  "struct scsipi_xfer *"/*xs*/);
92 SDT_PROBE_DEFINE1(scsi, base, xfer, done,  "struct scsipi_xfer *"/*xs*/);
93 SDT_PROBE_DEFINE1(scsi, base, xfer, redone,  "struct scsipi_xfer *"/*xs*/);
94 SDT_PROBE_DEFINE1(scsi, base, xfer, complete,  "struct scsipi_xfer *"/*xs*/);
95 SDT_PROBE_DEFINE1(scsi, base, xfer, restart,  "struct scsipi_xfer *"/*xs*/);
96 SDT_PROBE_DEFINE1(scsi, base, xfer, free,  "struct scsipi_xfer *"/*xs*/);
97 
98 static int	scsipi_complete(struct scsipi_xfer *);
99 static void	scsipi_request_sense(struct scsipi_xfer *);
100 static int	scsipi_enqueue(struct scsipi_xfer *);
101 static void	scsipi_run_queue(struct scsipi_channel *chan);
102 
103 static void	scsipi_completion_thread(void *);
104 
105 static void	scsipi_get_tag(struct scsipi_xfer *);
106 static void	scsipi_put_tag(struct scsipi_xfer *);
107 
108 static int	scsipi_get_resource(struct scsipi_channel *);
109 static void	scsipi_put_resource(struct scsipi_channel *);
110 
111 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
112 		    struct scsipi_max_openings *);
113 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
114 
115 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
116 
117 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
118 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
119 
120 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
121 
122 static struct pool scsipi_xfer_pool;
123 
124 int scsipi_xs_count = 0;
125 
126 /*
127  * scsipi_init:
128  *
129  *	Called when a scsibus or atapibus is attached to the system
130  *	to initialize shared data structures.
131  */
132 void
scsipi_init(void)133 scsipi_init(void)
134 {
135 	static int scsipi_init_done;
136 
137 	if (scsipi_init_done)
138 		return;
139 	scsipi_init_done = 1;
140 
141 	/* Initialize the scsipi_xfer pool. */
142 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
143 	    0, 0, "scxspl", NULL, IPL_BIO);
144 	pool_prime(&scsipi_xfer_pool, 1);
145 
146 	scsipi_ioctl_init();
147 }
148 
149 /*
150  * scsipi_channel_init:
151  *
152  *	Initialize a scsipi_channel when it is attached.
153  */
154 int
scsipi_channel_init(struct scsipi_channel * chan)155 scsipi_channel_init(struct scsipi_channel *chan)
156 {
157 	struct scsipi_adapter *adapt = chan->chan_adapter;
158 	int i;
159 
160 	/* Initialize shared data. */
161 	scsipi_init();
162 
163 	/* Initialize the queues. */
164 	TAILQ_INIT(&chan->chan_queue);
165 	TAILQ_INIT(&chan->chan_complete);
166 
167 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
168 		LIST_INIT(&chan->chan_periphtab[i]);
169 
170 	/*
171 	 * Create the asynchronous completion thread.
172 	 */
173 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
174 	    &chan->chan_thread, "%s", chan->chan_name)) {
175 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
176 		    "channel %d\n", chan->chan_channel);
177 		panic("scsipi_channel_init");
178 	}
179 
180 	return 0;
181 }
182 
183 /*
184  * scsipi_channel_shutdown:
185  *
186  *	Shutdown a scsipi_channel.
187  */
188 void
scsipi_channel_shutdown(struct scsipi_channel * chan)189 scsipi_channel_shutdown(struct scsipi_channel *chan)
190 {
191 
192 	mutex_enter(chan_mtx(chan));
193 	/*
194 	 * Shut down the completion thread.
195 	 */
196 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
197 	cv_broadcast(chan_cv_complete(chan));
198 
199 	/*
200 	 * Now wait for the thread to exit.
201 	 */
202 	while (chan->chan_thread != NULL)
203 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
204 	mutex_exit(chan_mtx(chan));
205 }
206 
207 static uint32_t
scsipi_chan_periph_hash(uint64_t t,uint64_t l)208 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
209 {
210 	uint32_t hash;
211 
212 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
213 	hash = hash32_buf(&l, sizeof(l), hash);
214 
215 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
216 }
217 
218 /*
219  * scsipi_insert_periph:
220  *
221  *	Insert a periph into the channel.
222  */
223 void
scsipi_insert_periph(struct scsipi_channel * chan,struct scsipi_periph * periph)224 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
225 {
226 	uint32_t hash;
227 
228 	hash = scsipi_chan_periph_hash(periph->periph_target,
229 	    periph->periph_lun);
230 
231 	mutex_enter(chan_mtx(chan));
232 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
233 	mutex_exit(chan_mtx(chan));
234 }
235 
236 /*
237  * scsipi_remove_periph:
238  *
239  *	Remove a periph from the channel.
240  */
241 void
scsipi_remove_periph(struct scsipi_channel * chan,struct scsipi_periph * periph)242 scsipi_remove_periph(struct scsipi_channel *chan,
243     struct scsipi_periph *periph)
244 {
245 
246 	LIST_REMOVE(periph, periph_hash);
247 }
248 
249 /*
250  * scsipi_lookup_periph:
251  *
252  *	Lookup a periph on the specified channel.
253  */
254 static struct scsipi_periph *
scsipi_lookup_periph_internal(struct scsipi_channel * chan,int target,int lun,bool lock)255 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
256 {
257 	struct scsipi_periph *periph;
258 	uint32_t hash;
259 
260 	if (target >= chan->chan_ntargets ||
261 	    lun >= chan->chan_nluns)
262 		return NULL;
263 
264 	hash = scsipi_chan_periph_hash(target, lun);
265 
266 	if (lock)
267 		mutex_enter(chan_mtx(chan));
268 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
269 		if (periph->periph_target == target &&
270 		    periph->periph_lun == lun)
271 			break;
272 	}
273 	if (lock)
274 		mutex_exit(chan_mtx(chan));
275 
276 	return periph;
277 }
278 
279 struct scsipi_periph *
scsipi_lookup_periph_locked(struct scsipi_channel * chan,int target,int lun)280 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
281 {
282 	return scsipi_lookup_periph_internal(chan, target, lun, false);
283 }
284 
285 struct scsipi_periph *
scsipi_lookup_periph(struct scsipi_channel * chan,int target,int lun)286 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
287 {
288 	return scsipi_lookup_periph_internal(chan, target, lun, true);
289 }
290 
291 /*
292  * scsipi_get_resource:
293  *
294  *	Allocate a single xfer `resource' from the channel.
295  *
296  *	NOTE: Must be called with channel lock held
297  */
298 static int
scsipi_get_resource(struct scsipi_channel * chan)299 scsipi_get_resource(struct scsipi_channel *chan)
300 {
301 	struct scsipi_adapter *adapt = chan->chan_adapter;
302 
303 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
304 		if (chan->chan_openings > 0) {
305 			chan->chan_openings--;
306 			return 1;
307 		}
308 		return 0;
309 	}
310 
311 	if (adapt->adapt_openings > 0) {
312 		adapt->adapt_openings--;
313 		return 1;
314 	}
315 	return 0;
316 }
317 
318 /*
319  * scsipi_grow_resources:
320  *
321  *	Attempt to grow resources for a channel.  If this succeeds,
322  *	we allocate one for our caller.
323  *
324  *	NOTE: Must be called with channel lock held
325  */
326 static inline int
scsipi_grow_resources(struct scsipi_channel * chan)327 scsipi_grow_resources(struct scsipi_channel *chan)
328 {
329 
330 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
331 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
332 			mutex_exit(chan_mtx(chan));
333 			scsipi_adapter_request(chan,
334 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
335 			mutex_enter(chan_mtx(chan));
336 			return scsipi_get_resource(chan);
337 		}
338 		/*
339 		 * ask the channel thread to do it. It'll have to thaw the
340 		 * queue
341 		 */
342 		scsipi_channel_freeze_locked(chan, 1);
343 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
344 		cv_broadcast(chan_cv_complete(chan));
345 		return 0;
346 	}
347 
348 	return 0;
349 }
350 
351 /*
352  * scsipi_put_resource:
353  *
354  *	Free a single xfer `resource' to the channel.
355  *
356  *	NOTE: Must be called with channel lock held
357  */
358 static void
scsipi_put_resource(struct scsipi_channel * chan)359 scsipi_put_resource(struct scsipi_channel *chan)
360 {
361 	struct scsipi_adapter *adapt = chan->chan_adapter;
362 
363 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
364 		chan->chan_openings++;
365 	else
366 		adapt->adapt_openings++;
367 }
368 
369 /*
370  * scsipi_get_tag:
371  *
372  *	Get a tag ID for the specified xfer.
373  *
374  *	NOTE: Must be called with channel lock held
375  */
376 static void
scsipi_get_tag(struct scsipi_xfer * xs)377 scsipi_get_tag(struct scsipi_xfer *xs)
378 {
379 	struct scsipi_periph *periph = xs->xs_periph;
380 	int bit, tag;
381 	u_int word;
382 
383 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
384 
385 	bit = 0;	/* XXX gcc */
386 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
387 		bit = ffs(periph->periph_freetags[word]);
388 		if (bit != 0)
389 			break;
390 	}
391 #ifdef DIAGNOSTIC
392 	if (word == PERIPH_NTAGWORDS) {
393 		scsipi_printaddr(periph);
394 		printf("no free tags\n");
395 		panic("scsipi_get_tag");
396 	}
397 #endif
398 
399 	bit -= 1;
400 	periph->periph_freetags[word] &= ~(1U << bit);
401 	tag = (word << 5) | bit;
402 
403 	/* XXX Should eventually disallow this completely. */
404 	if (tag >= periph->periph_openings) {
405 		scsipi_printaddr(periph);
406 		printf("WARNING: tag %d greater than available openings %d\n",
407 		    tag, periph->periph_openings);
408 	}
409 
410 	xs->xs_tag_id = tag;
411 	SDT_PROBE3(scsi, base, tag, get,
412 	    xs, xs->xs_tag_id, xs->xs_tag_type);
413 }
414 
415 /*
416  * scsipi_put_tag:
417  *
418  *	Put the tag ID for the specified xfer back into the pool.
419  *
420  *	NOTE: Must be called with channel lock held
421  */
422 static void
scsipi_put_tag(struct scsipi_xfer * xs)423 scsipi_put_tag(struct scsipi_xfer *xs)
424 {
425 	struct scsipi_periph *periph = xs->xs_periph;
426 	int word, bit;
427 
428 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
429 
430 	SDT_PROBE3(scsi, base, tag, put,
431 	    xs, xs->xs_tag_id, xs->xs_tag_type);
432 
433 	word = xs->xs_tag_id >> 5;
434 	bit = xs->xs_tag_id & 0x1f;
435 
436 	periph->periph_freetags[word] |= (1U << bit);
437 }
438 
439 /*
440  * scsipi_get_xs:
441  *
442  *	Allocate an xfer descriptor and associate it with the
443  *	specified peripheral.  If the peripheral has no more
444  *	available command openings, we either block waiting for
445  *	one to become available, or fail.
446  *
447  *	When this routine is called with the channel lock held
448  *	the flags must include XS_CTL_NOSLEEP.
449  */
450 struct scsipi_xfer *
scsipi_get_xs(struct scsipi_periph * periph,int flags)451 scsipi_get_xs(struct scsipi_periph *periph, int flags)
452 {
453 	struct scsipi_xfer *xs;
454 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
455 
456 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
457 
458 	KASSERT(!cold);
459 
460 #ifdef DIAGNOSTIC
461 	/*
462 	 * URGENT commands can never be ASYNC.
463 	 */
464 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
465 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
466 		scsipi_printaddr(periph);
467 		printf("URGENT and ASYNC\n");
468 		panic("scsipi_get_xs");
469 	}
470 #endif
471 
472 	/*
473 	 * Wait for a command opening to become available.  Rules:
474 	 *
475 	 *	- All xfers must wait for an available opening.
476 	 *	  Exception: URGENT xfers can proceed when
477 	 *	  active == openings, because we use the opening
478 	 *	  of the command we're recovering for.
479 	 *	- if the periph has sense pending, only URGENT & REQSENSE
480 	 *	  xfers may proceed.
481 	 *
482 	 *	- If the periph is recovering, only URGENT xfers may
483 	 *	  proceed.
484 	 *
485 	 *	- If the periph is currently executing a recovery
486 	 *	  command, URGENT commands must block, because only
487 	 *	  one recovery command can execute at a time.
488 	 */
489 	if (lock)
490 		mutex_enter(chan_mtx(periph->periph_channel));
491 	for (;;) {
492 		if (flags & XS_CTL_URGENT) {
493 			if (periph->periph_active > periph->periph_openings)
494 				goto wait_for_opening;
495 			if (periph->periph_flags & PERIPH_SENSE) {
496 				if ((flags & XS_CTL_REQSENSE) == 0)
497 					goto wait_for_opening;
498 			} else {
499 				if ((periph->periph_flags &
500 				    PERIPH_RECOVERY_ACTIVE) != 0)
501 					goto wait_for_opening;
502 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
503 			}
504 			break;
505 		}
506 		if (periph->periph_active >= periph->periph_openings ||
507 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
508 			goto wait_for_opening;
509 		periph->periph_active++;
510 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
511 		break;
512 
513  wait_for_opening:
514 		if (flags & XS_CTL_NOSLEEP) {
515 			KASSERT(!lock);
516 			return NULL;
517 		}
518 		KASSERT(lock);
519 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
520 		periph->periph_flags |= PERIPH_WAITING;
521 		cv_wait(periph_cv_periph(periph),
522 		    chan_mtx(periph->periph_channel));
523 	}
524 	if (lock)
525 		mutex_exit(chan_mtx(periph->periph_channel));
526 
527 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
528 	xs = pool_get(&scsipi_xfer_pool,
529 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
530 	if (xs == NULL) {
531 		if (lock)
532 			mutex_enter(chan_mtx(periph->periph_channel));
533 		if (flags & XS_CTL_URGENT) {
534 			if ((flags & XS_CTL_REQSENSE) == 0)
535 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
536 		} else
537 			periph->periph_active--;
538 		if (lock)
539 			mutex_exit(chan_mtx(periph->periph_channel));
540 		scsipi_printaddr(periph);
541 		printf("unable to allocate %sscsipi_xfer\n",
542 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
543 	}
544 
545 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
546 
547 	if (xs != NULL) {
548 		memset(xs, 0, sizeof(*xs));
549 		callout_init(&xs->xs_callout, 0);
550 		xs->xs_periph = periph;
551 		xs->xs_control = flags;
552 		xs->xs_status = 0;
553 		if ((flags & XS_CTL_NOSLEEP) == 0)
554 			mutex_enter(chan_mtx(periph->periph_channel));
555 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
556 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
557 		if ((flags & XS_CTL_NOSLEEP) == 0)
558 			mutex_exit(chan_mtx(periph->periph_channel));
559 	}
560 	return xs;
561 }
562 
563 /*
564  * scsipi_put_xs:
565  *
566  *	Release an xfer descriptor, decreasing the outstanding command
567  *	count for the peripheral.  If there is a thread waiting for
568  *	an opening, wake it up.  If not, kick any queued I/O the
569  *	peripheral may have.
570  *
571  *	NOTE: Must be called with channel lock held
572  */
573 void
scsipi_put_xs(struct scsipi_xfer * xs)574 scsipi_put_xs(struct scsipi_xfer *xs)
575 {
576 	struct scsipi_periph *periph = xs->xs_periph;
577 	int flags = xs->xs_control;
578 
579 	SDT_PROBE1(scsi, base, xfer, free,  xs);
580 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
581 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
582 
583 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
584 	callout_destroy(&xs->xs_callout);
585 	pool_put(&scsipi_xfer_pool, xs);
586 
587 #ifdef DIAGNOSTIC
588 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
589 	    periph->periph_active == 0) {
590 		scsipi_printaddr(periph);
591 		printf("recovery without a command to recovery for\n");
592 		panic("scsipi_put_xs");
593 	}
594 #endif
595 
596 	if (flags & XS_CTL_URGENT) {
597 		if ((flags & XS_CTL_REQSENSE) == 0)
598 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
599 	} else
600 		periph->periph_active--;
601 	if (periph->periph_active == 0 &&
602 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
603 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
604 		cv_broadcast(periph_cv_active(periph));
605 	}
606 
607 	if (periph->periph_flags & PERIPH_WAITING) {
608 		periph->periph_flags &= ~PERIPH_WAITING;
609 		cv_broadcast(periph_cv_periph(periph));
610 	} else {
611 		if (periph->periph_switch->psw_start != NULL &&
612 		    device_is_active(periph->periph_dev)) {
613 			SC_DEBUG(periph, SCSIPI_DB2,
614 			    ("calling private start()\n"));
615 			(*periph->periph_switch->psw_start)(periph);
616 		}
617 	}
618 }
619 
620 /*
621  * scsipi_channel_freeze:
622  *
623  *	Freeze a channel's xfer queue.
624  */
625 void
scsipi_channel_freeze(struct scsipi_channel * chan,int count)626 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
627 {
628 	bool lock = chan_running(chan) > 0;
629 
630 	if (lock)
631 		mutex_enter(chan_mtx(chan));
632 	chan->chan_qfreeze += count;
633 	if (lock)
634 		mutex_exit(chan_mtx(chan));
635 }
636 
637 static void
scsipi_channel_freeze_locked(struct scsipi_channel * chan,int count)638 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
639 {
640 
641 	chan->chan_qfreeze += count;
642 }
643 
644 /*
645  * scsipi_channel_thaw:
646  *
647  *	Thaw a channel's xfer queue.
648  */
649 void
scsipi_channel_thaw(struct scsipi_channel * chan,int count)650 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
651 {
652 	bool lock = chan_running(chan) > 0;
653 
654 	if (lock)
655 		mutex_enter(chan_mtx(chan));
656 	chan->chan_qfreeze -= count;
657 	/*
658 	 * Don't let the freeze count go negative.
659 	 *
660 	 * Presumably the adapter driver could keep track of this,
661 	 * but it might just be easier to do this here so as to allow
662 	 * multiple callers, including those outside the adapter driver.
663 	 */
664 	if (chan->chan_qfreeze < 0) {
665 		chan->chan_qfreeze = 0;
666 	}
667 	if (lock)
668 		mutex_exit(chan_mtx(chan));
669 
670 	/*
671 	 * until the channel is running
672 	 */
673 	if (!lock)
674 		return;
675 
676 	/*
677 	 * Kick the channel's queue here.  Note, we may be running in
678 	 * interrupt context (softclock or HBA's interrupt), so the adapter
679 	 * driver had better not sleep.
680 	 */
681 	if (chan->chan_qfreeze == 0)
682 		scsipi_run_queue(chan);
683 }
684 
685 /*
686  * scsipi_channel_timed_thaw:
687  *
688  *	Thaw a channel after some time has expired. This will also
689  * 	run the channel's queue if the freeze count has reached 0.
690  */
691 void
scsipi_channel_timed_thaw(void * arg)692 scsipi_channel_timed_thaw(void *arg)
693 {
694 	struct scsipi_channel *chan = arg;
695 
696 	scsipi_channel_thaw(chan, 1);
697 }
698 
699 /*
700  * scsipi_periph_freeze:
701  *
702  *	Freeze a device's xfer queue.
703  */
704 void
scsipi_periph_freeze_locked(struct scsipi_periph * periph,int count)705 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
706 {
707 
708 	periph->periph_qfreeze += count;
709 }
710 
711 /*
712  * scsipi_periph_thaw:
713  *
714  *	Thaw a device's xfer queue.
715  */
716 void
scsipi_periph_thaw_locked(struct scsipi_periph * periph,int count)717 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
718 {
719 
720 	periph->periph_qfreeze -= count;
721 #ifdef DIAGNOSTIC
722 	if (periph->periph_qfreeze < 0) {
723 		static const char pc[] = "periph freeze count < 0";
724 		scsipi_printaddr(periph);
725 		printf("%s\n", pc);
726 		panic(pc);
727 	}
728 #endif
729 	if (periph->periph_qfreeze == 0 &&
730 	    (periph->periph_flags & PERIPH_WAITING) != 0)
731 		cv_broadcast(periph_cv_periph(periph));
732 }
733 
734 void
scsipi_periph_freeze(struct scsipi_periph * periph,int count)735 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
736 {
737 
738 	mutex_enter(chan_mtx(periph->periph_channel));
739 	scsipi_periph_freeze_locked(periph, count);
740 	mutex_exit(chan_mtx(periph->periph_channel));
741 }
742 
743 void
scsipi_periph_thaw(struct scsipi_periph * periph,int count)744 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
745 {
746 
747 	mutex_enter(chan_mtx(periph->periph_channel));
748 	scsipi_periph_thaw_locked(periph, count);
749 	mutex_exit(chan_mtx(periph->periph_channel));
750 }
751 
752 /*
753  * scsipi_periph_timed_thaw:
754  *
755  *	Thaw a device after some time has expired.
756  */
757 void
scsipi_periph_timed_thaw(void * arg)758 scsipi_periph_timed_thaw(void *arg)
759 {
760 	struct scsipi_periph *periph = arg;
761 	struct scsipi_channel *chan = periph->periph_channel;
762 
763 	callout_stop(&periph->periph_callout);
764 
765 	mutex_enter(chan_mtx(chan));
766 	scsipi_periph_thaw_locked(periph, 1);
767 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
768 		/*
769 		 * Kick the channel's queue here.  Note, we're running in
770 		 * interrupt context (softclock), so the adapter driver
771 		 * had better not sleep.
772 		 */
773 		mutex_exit(chan_mtx(chan));
774 		scsipi_run_queue(periph->periph_channel);
775 	} else {
776 		/*
777 		 * Tell the completion thread to kick the channel's queue here.
778 		 */
779 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
780 		cv_broadcast(chan_cv_complete(chan));
781 		mutex_exit(chan_mtx(chan));
782 	}
783 }
784 
785 /*
786  * scsipi_wait_drain:
787  *
788  *	Wait for a periph's pending xfers to drain.
789  */
790 void
scsipi_wait_drain(struct scsipi_periph * periph)791 scsipi_wait_drain(struct scsipi_periph *periph)
792 {
793 	struct scsipi_channel *chan = periph->periph_channel;
794 
795 	mutex_enter(chan_mtx(chan));
796 	while (periph->periph_active != 0) {
797 		periph->periph_flags |= PERIPH_WAITDRAIN;
798 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
799 	}
800 	mutex_exit(chan_mtx(chan));
801 }
802 
803 /*
804  * scsipi_kill_pending:
805  *
806  *	Kill off all pending xfers for a periph.
807  *
808  *	NOTE: Must be called with channel lock held
809  */
810 void
scsipi_kill_pending(struct scsipi_periph * periph)811 scsipi_kill_pending(struct scsipi_periph *periph)
812 {
813 	struct scsipi_channel *chan = periph->periph_channel;
814 
815 	(*chan->chan_bustype->bustype_kill_pending)(periph);
816 	while (periph->periph_active != 0) {
817 		periph->periph_flags |= PERIPH_WAITDRAIN;
818 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
819 	}
820 }
821 
822 /*
823  * scsipi_print_cdb:
824  * prints a command descriptor block (for debug purpose, error messages,
825  * SCSIVERBOSE, ...)
826  */
827 void
scsipi_print_cdb(struct scsipi_generic * cmd)828 scsipi_print_cdb(struct scsipi_generic *cmd)
829 {
830 	int i, j;
831 
832  	printf("0x%02x", cmd->opcode);
833 
834  	switch (CDB_GROUPID(cmd->opcode)) {
835  	case CDB_GROUPID_0:
836  		j = CDB_GROUP0;
837  		break;
838  	case CDB_GROUPID_1:
839  		j = CDB_GROUP1;
840  		break;
841  	case CDB_GROUPID_2:
842  		j = CDB_GROUP2;
843  		break;
844  	case CDB_GROUPID_3:
845  		j = CDB_GROUP3;
846  		break;
847  	case CDB_GROUPID_4:
848  		j = CDB_GROUP4;
849  		break;
850  	case CDB_GROUPID_5:
851  		j = CDB_GROUP5;
852  		break;
853  	case CDB_GROUPID_6:
854  		j = CDB_GROUP6;
855  		break;
856  	case CDB_GROUPID_7:
857  		j = CDB_GROUP7;
858  		break;
859  	default:
860  		j = 0;
861  	}
862  	if (j == 0)
863  		j = sizeof (cmd->bytes);
864  	for (i = 0; i < j-1; i++) /* already done the opcode */
865  		printf(" %02x", cmd->bytes[i]);
866 }
867 
868 /*
869  * scsipi_interpret_sense:
870  *
871  *	Look at the returned sense and act on the error, determining
872  *	the unix error number to pass back.  (0 = report no error)
873  *
874  *	NOTE: If we return ERESTART, we are expected to have
875  *	thawed the device!
876  *
877  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
878  */
879 int
scsipi_interpret_sense(struct scsipi_xfer * xs)880 scsipi_interpret_sense(struct scsipi_xfer *xs)
881 {
882 	struct scsi_sense_data *sense;
883 	struct scsipi_periph *periph = xs->xs_periph;
884 	u_int8_t key;
885 	int error;
886 	u_int32_t info;
887 	static const char *error_mes[] = {
888 		"soft error (corrected)",
889 		"not ready", "medium error",
890 		"non-media hardware failure", "illegal request",
891 		"unit attention", "readonly device",
892 		"no data found", "vendor unique",
893 		"copy aborted", "command aborted",
894 		"search returned equal", "volume overflow",
895 		"verify miscompare", "unknown error key"
896 	};
897 
898 	sense = &xs->sense.scsi_sense;
899 #ifdef SCSIPI_DEBUG
900 	if (periph->periph_flags & SCSIPI_DB1) {
901 	        int count, len;
902 		scsipi_printaddr(periph);
903 		printf(" sense debug information:\n");
904 		printf("\tcode 0x%x valid %d\n",
905 			SSD_RCODE(sense->response_code),
906 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
907 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
908 			sense->segment,
909 			SSD_SENSE_KEY(sense->flags),
910 			sense->flags & SSD_ILI ? 1 : 0,
911 			sense->flags & SSD_EOM ? 1 : 0,
912 			sense->flags & SSD_FILEMARK ? 1 : 0);
913 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
914 			"extra bytes\n",
915 			sense->info[0],
916 			sense->info[1],
917 			sense->info[2],
918 			sense->info[3],
919 			sense->extra_len);
920 		len = SSD_ADD_BYTES_LIM(sense);
921 		printf("\textra (up to %d bytes): ", len);
922 		for (count = 0; count < len; count++)
923 			printf("0x%x ", sense->csi[count]);
924 		printf("\n");
925 	}
926 #endif
927 
928 	/*
929 	 * If the periph has its own error handler, call it first.
930 	 * If it returns a legit error value, return that, otherwise
931 	 * it wants us to continue with normal error processing.
932 	 */
933 	if (periph->periph_switch->psw_error != NULL) {
934 		SC_DEBUG(periph, SCSIPI_DB2,
935 		    ("calling private err_handler()\n"));
936 		error = (*periph->periph_switch->psw_error)(xs);
937 		if (error != EJUSTRETURN)
938 			return error;
939 	}
940 	/* otherwise use the default */
941 	switch (SSD_RCODE(sense->response_code)) {
942 
943 		/*
944 		 * Old SCSI-1 and SASI devices respond with
945 		 * codes other than 70.
946 		 */
947 	case 0x00:		/* no error (command completed OK) */
948 		return 0;
949 	case 0x04:		/* drive not ready after it was selected */
950 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
951 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
952 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
953 			return 0;
954 		/* XXX - display some sort of error here? */
955 		return EIO;
956 	case 0x20:		/* invalid command */
957 		if ((xs->xs_control &
958 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
959 			return 0;
960 		return EINVAL;
961 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
962 		return EACCES;
963 
964 		/*
965 		 * If it's code 70, use the extended stuff and
966 		 * interpret the key
967 		 */
968 	case 0x71:		/* delayed error */
969 		scsipi_printaddr(periph);
970 		key = SSD_SENSE_KEY(sense->flags);
971 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
972 		/* FALLTHROUGH */
973 	case 0x70:
974 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
975 			info = _4btol(sense->info);
976 		else
977 			info = 0;
978 		key = SSD_SENSE_KEY(sense->flags);
979 
980 		switch (key) {
981 		case SKEY_NO_SENSE:
982 		case SKEY_RECOVERED_ERROR:
983 			if (xs->resid == xs->datalen && xs->datalen) {
984 				/*
985 				 * Why is this here?
986 				 */
987 				xs->resid = 0;	/* not short read */
988 			}
989 			error = 0;
990 			break;
991 		case SKEY_EQUAL:
992 			error = 0;
993 			break;
994 		case SKEY_NOT_READY:
995 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
996 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
997 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
998 				return 0;
999 			if (sense->asc == 0x3A) {
1000 				error = ENODEV; /* Medium not present */
1001 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
1002 					return error;
1003 			} else
1004 				error = EIO;
1005 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1006 				return error;
1007 			break;
1008 		case SKEY_ILLEGAL_REQUEST:
1009 			if ((xs->xs_control &
1010 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
1011 				return 0;
1012 			/*
1013 			 * Handle the case where a device reports
1014 			 * Logical Unit Not Supported during discovery.
1015 			 */
1016 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
1017 			    sense->asc == 0x25 &&
1018 			    sense->ascq == 0x00)
1019 				return EINVAL;
1020 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1021 				return EIO;
1022 			error = EINVAL;
1023 			break;
1024 		case SKEY_UNIT_ATTENTION:
1025 			if (sense->asc == 0x29 &&
1026 			    sense->ascq == 0x00) {
1027 				/* device or bus reset */
1028 				return ERESTART;
1029 			}
1030 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
1031 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
1032 			if ((xs->xs_control &
1033 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
1034 				/* XXX Should reupload any transient state. */
1035 				(periph->periph_flags &
1036 				 PERIPH_REMOVABLE) == 0) {
1037 				return ERESTART;
1038 			}
1039 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1040 				return EIO;
1041 			error = EIO;
1042 			break;
1043 		case SKEY_DATA_PROTECT:
1044 			error = EROFS;
1045 			break;
1046 		case SKEY_BLANK_CHECK:
1047 			error = 0;
1048 			break;
1049 		case SKEY_ABORTED_COMMAND:
1050 			if (xs->xs_retries != 0) {
1051 				xs->xs_retries--;
1052 				error = ERESTART;
1053 			} else
1054 				error = EIO;
1055 			break;
1056 		case SKEY_VOLUME_OVERFLOW:
1057 			error = ENOSPC;
1058 			break;
1059 		default:
1060 			error = EIO;
1061 			break;
1062 		}
1063 
1064 		/* Print verbose decode if appropriate and possible */
1065 		if ((key == 0) ||
1066 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
1067 		    (scsipi_print_sense(xs, 0) != 0))
1068 			return error;
1069 
1070 		/* Print brief(er) sense information */
1071 		scsipi_printaddr(periph);
1072 		printf("%s", error_mes[key - 1]);
1073 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1074 			switch (key) {
1075 			case SKEY_NOT_READY:
1076 			case SKEY_ILLEGAL_REQUEST:
1077 			case SKEY_UNIT_ATTENTION:
1078 			case SKEY_DATA_PROTECT:
1079 				break;
1080 			case SKEY_BLANK_CHECK:
1081 				printf(", requested size: %d (decimal)",
1082 				    info);
1083 				break;
1084 			case SKEY_ABORTED_COMMAND:
1085 				if (xs->xs_retries)
1086 					printf(", retrying");
1087 				printf(", cmd 0x%x, info 0x%x",
1088 				    xs->cmd->opcode, info);
1089 				break;
1090 			default:
1091 				printf(", info = %d (decimal)", info);
1092 			}
1093 		}
1094 		if (sense->extra_len != 0) {
1095 			int n;
1096 			printf(", data =");
1097 			for (n = 0; n < sense->extra_len; n++)
1098 				printf(" %02x",
1099 				    sense->csi[n]);
1100 		}
1101 		printf("\n");
1102 		return error;
1103 
1104 	/*
1105 	 * Some other code, just report it
1106 	 */
1107 	default:
1108 #if    defined(SCSIDEBUG) || defined(DEBUG)
1109 	{
1110 		static const char *uc = "undecodable sense error";
1111 		int i;
1112 		u_int8_t *cptr = (u_int8_t *) sense;
1113 		scsipi_printaddr(periph);
1114 		if (xs->cmd == &xs->cmdstore) {
1115 			printf("%s for opcode 0x%x, data=",
1116 			    uc, xs->cmdstore.opcode);
1117 		} else {
1118 			printf("%s, data=", uc);
1119 		}
1120 		for (i = 0; i < sizeof (sense); i++)
1121 			printf(" 0x%02x", *(cptr++) & 0xff);
1122 		printf("\n");
1123 	}
1124 #else
1125 		scsipi_printaddr(periph);
1126 		printf("Sense Error Code 0x%x",
1127 			SSD_RCODE(sense->response_code));
1128 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1129 			struct scsi_sense_data_unextended *usense =
1130 			    (struct scsi_sense_data_unextended *)sense;
1131 			printf(" at block no. %d (decimal)",
1132 			    _3btol(usense->block));
1133 		}
1134 		printf("\n");
1135 #endif
1136 		return EIO;
1137 	}
1138 }
1139 
1140 /*
1141  * scsipi_test_unit_ready:
1142  *
1143  *	Issue a `test unit ready' request.
1144  */
1145 int
scsipi_test_unit_ready(struct scsipi_periph * periph,int flags)1146 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1147 {
1148 	struct scsi_test_unit_ready cmd;
1149 	int retries;
1150 
1151 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1152 	if (periph->periph_quirks & PQUIRK_NOTUR)
1153 		return 0;
1154 
1155 	if (flags & XS_CTL_DISCOVERY)
1156 		retries = 0;
1157 	else
1158 		retries = SCSIPIRETRIES;
1159 
1160 	memset(&cmd, 0, sizeof(cmd));
1161 	cmd.opcode = SCSI_TEST_UNIT_READY;
1162 
1163 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1164 	    retries, 10000, NULL, flags);
1165 }
1166 
1167 static const struct scsipi_inquiry3_pattern {
1168 	const char vendor[8];
1169 	const char product[16];
1170 	const char revision[4];
1171 } scsipi_inquiry3_quirk[] = {
1172 	{ "ES-6600 ", "", "" },
1173 };
1174 
1175 static int
scsipi_inquiry3_ok(const struct scsipi_inquiry_data * ib)1176 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
1177 {
1178 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
1179 		const struct scsipi_inquiry3_pattern *q =
1180 		    &scsipi_inquiry3_quirk[i];
1181 #define MATCH(field) \
1182     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
1183 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
1184 			return 0;
1185 	}
1186 	return 1;
1187 }
1188 
1189 /*
1190  * scsipi_inquire:
1191  *
1192  *	Ask the device about itself.
1193  */
1194 int
scsipi_inquire(struct scsipi_periph * periph,struct scsipi_inquiry_data * inqbuf,int flags)1195 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1196     int flags)
1197 {
1198 	struct scsipi_inquiry cmd;
1199 	int error;
1200 	int retries;
1201 
1202 	if (flags & XS_CTL_DISCOVERY)
1203 		retries = 0;
1204 	else
1205 		retries = SCSIPIRETRIES;
1206 
1207 	/*
1208 	 * If we request more data than the device can provide, it SHOULD just
1209 	 * return a short response.  However, some devices error with an
1210 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1211 	 * failure modes (such as the GL641USB flash adapter, which goes loony
1212 	 * and sends corrupted CRCs).  To work around this, and to bring our
1213 	 * behavior more in line with other OSes, we do a shorter inquiry,
1214 	 * covering all the SCSI-2 information, first, and then request more
1215 	 * data iff the "additional length" field indicates there is more.
1216 	 * - mycroft, 2003/10/16
1217 	 */
1218 	memset(&cmd, 0, sizeof(cmd));
1219 	cmd.opcode = INQUIRY;
1220 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1221 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1222 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1223 	    10000, NULL, flags | XS_CTL_DATA_IN);
1224 	if (!error &&
1225 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1226 	    if (scsipi_inquiry3_ok(inqbuf)) {
1227 #if 0
1228 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1229 #endif
1230 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1231 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1232 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1233 		    10000, NULL, flags | XS_CTL_DATA_IN);
1234 #if 0
1235 printf("inquire: error=%d\n", error);
1236 #endif
1237 	    }
1238 	}
1239 
1240 #ifdef SCSI_OLD_NOINQUIRY
1241 	/*
1242 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1243 	 * This board doesn't support the INQUIRY command at all.
1244 	 */
1245 	if (error == EINVAL || error == EACCES) {
1246 		/*
1247 		 * Conjure up an INQUIRY response.
1248 		 */
1249 		inqbuf->device = (error == EINVAL ?
1250 			 SID_QUAL_LU_PRESENT :
1251 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1252 		inqbuf->dev_qual2 = 0;
1253 		inqbuf->version = 0;
1254 		inqbuf->response_format = SID_FORMAT_SCSI1;
1255 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1256 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1257 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1258 		error = 0;
1259 	}
1260 
1261 	/*
1262 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1263 	 * This board gives an empty response to an INQUIRY command.
1264 	 */
1265 	else if (error == 0 &&
1266 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1267 	    inqbuf->dev_qual2 == 0 &&
1268 	    inqbuf->version == 0 &&
1269 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1270 		/*
1271 		 * Fill out the INQUIRY response.
1272 		 */
1273 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1274 		inqbuf->dev_qual2 = SID_REMOVABLE;
1275 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1276 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1277 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1278 	}
1279 #endif /* SCSI_OLD_NOINQUIRY */
1280 
1281 	return error;
1282 }
1283 
1284 /*
1285  * scsipi_prevent:
1286  *
1287  *	Prevent or allow the user to remove the media
1288  */
1289 int
scsipi_prevent(struct scsipi_periph * periph,int type,int flags)1290 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1291 {
1292 	struct scsi_prevent_allow_medium_removal cmd;
1293 
1294 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1295 		return 0;
1296 
1297 	memset(&cmd, 0, sizeof(cmd));
1298 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1299 	cmd.how = type;
1300 
1301 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1302 	    SCSIPIRETRIES, 5000, NULL, flags));
1303 }
1304 
1305 /*
1306  * scsipi_start:
1307  *
1308  *	Send a START UNIT.
1309  */
1310 int
scsipi_start(struct scsipi_periph * periph,int type,int flags)1311 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1312 {
1313 	struct scsipi_start_stop cmd;
1314 
1315 	memset(&cmd, 0, sizeof(cmd));
1316 	cmd.opcode = START_STOP;
1317 	cmd.byte2 = 0x00;
1318 	cmd.how = type;
1319 
1320 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1321 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
1322 }
1323 
1324 /*
1325  * scsipi_mode_sense, scsipi_mode_sense_big:
1326  *	get a sense page from a device
1327  */
1328 
1329 int
scsipi_mode_sense(struct scsipi_periph * periph,int byte2,int page,struct scsi_mode_parameter_header_6 * data,int len,int flags,int retries,int timeout)1330 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1331     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1332     int timeout)
1333 {
1334 	struct scsi_mode_sense_6 cmd;
1335 
1336 	memset(&cmd, 0, sizeof(cmd));
1337 	cmd.opcode = SCSI_MODE_SENSE_6;
1338 	cmd.byte2 = byte2;
1339 	cmd.page = page;
1340 	cmd.length = len & 0xff;
1341 
1342 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1343 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1344 }
1345 
1346 int
scsipi_mode_sense_big(struct scsipi_periph * periph,int byte2,int page,struct scsi_mode_parameter_header_10 * data,int len,int flags,int retries,int timeout)1347 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1348     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1349     int timeout)
1350 {
1351 	struct scsi_mode_sense_10 cmd;
1352 
1353 	memset(&cmd, 0, sizeof(cmd));
1354 	cmd.opcode = SCSI_MODE_SENSE_10;
1355 	cmd.byte2 = byte2;
1356 	cmd.page = page;
1357 	_lto2b(len, cmd.length);
1358 
1359 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1360 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1361 }
1362 
1363 int
scsipi_mode_select(struct scsipi_periph * periph,int byte2,struct scsi_mode_parameter_header_6 * data,int len,int flags,int retries,int timeout)1364 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1365     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1366     int timeout)
1367 {
1368 	struct scsi_mode_select_6 cmd;
1369 
1370 	memset(&cmd, 0, sizeof(cmd));
1371 	cmd.opcode = SCSI_MODE_SELECT_6;
1372 	cmd.byte2 = byte2;
1373 	cmd.length = len & 0xff;
1374 
1375 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1376 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1377 }
1378 
1379 int
scsipi_mode_select_big(struct scsipi_periph * periph,int byte2,struct scsi_mode_parameter_header_10 * data,int len,int flags,int retries,int timeout)1380 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1381     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1382     int timeout)
1383 {
1384 	struct scsi_mode_select_10 cmd;
1385 
1386 	memset(&cmd, 0, sizeof(cmd));
1387 	cmd.opcode = SCSI_MODE_SELECT_10;
1388 	cmd.byte2 = byte2;
1389 	_lto2b(len, cmd.length);
1390 
1391 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1392 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1393 }
1394 
1395 /*
1396  * scsipi_get_opcodeinfo:
1397  *
1398  * query the device for supported commands and their timeout
1399  * building a timeout lookup table if timeout information is available.
1400  */
1401 void
scsipi_get_opcodeinfo(struct scsipi_periph * periph)1402 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
1403 {
1404 	u_int8_t *data;
1405 	int len = 16*1024;
1406 	int rc;
1407 	struct scsi_repsuppopcode cmd;
1408 
1409 	/* refrain from asking for supported opcodes */
1410 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
1411 	    periph->periph_type == T_PROCESSOR || /* spec. */
1412 	    periph->periph_type == T_CDROM) /* spec. */
1413 		return;
1414 
1415 	scsipi_free_opcodeinfo(periph);
1416 
1417 	/*
1418 	 * query REPORT SUPPORTED OPERATION CODES
1419 	 * if OK
1420 	 *   enumerate all codes
1421 	 *     if timeout exists insert maximum into opcode table
1422 	 */
1423 
1424 	data = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
1425 
1426 	memset(&cmd, 0, sizeof(cmd));
1427 	cmd.opcode = SCSI_MAINTENANCE_IN;
1428 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
1429 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
1430 	_lto4b(len, cmd.alloclen);
1431 
1432 	rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1433 			    (void *)data, len, 0, 1000, NULL,
1434 			    XS_CTL_DATA_IN|XS_CTL_SILENT);
1435 
1436 	if (rc == 0) {
1437 		int count;
1438                 int dlen = _4btol(data);
1439                 u_int8_t *c = data + 4;
1440 
1441 		SC_DEBUG(periph, SCSIPI_DB3,
1442 			 ("supported opcode timeout-values loaded\n"));
1443 		SC_DEBUG(periph, SCSIPI_DB3,
1444 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
1445 
1446 		struct scsipi_opcodes *tot = malloc(sizeof(struct scsipi_opcodes),
1447 		    M_DEVBUF, M_WAITOK|M_ZERO);
1448 
1449 		count = 0;
1450                 while (tot != NULL &&
1451 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
1452                         struct scsi_repsupopcode_all_commands_descriptor *acd
1453 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
1454 #ifdef SCSIPI_DEBUG
1455                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
1456 #endif
1457                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1458                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1459                         SC_DEBUG(periph, SCSIPI_DB3,
1460 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
1461 
1462 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
1463 
1464                         if (acd->flags & RSOC_ACD_SERVACTV) {
1465                                 SC_DEBUGN(periph, SCSIPI_DB3,
1466 					 ("0x%02x%02x ",
1467 					  acd->serviceaction[0],
1468 					  acd->serviceaction[1]));
1469                         } else {
1470 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
1471                         }
1472 
1473                         if (acd->flags & RSOC_ACD_CTDP
1474 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
1475                                 struct scsi_repsupopcode_timeouts_descriptor *td
1476 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
1477                                 long nomto = _4btol(td->nom_process_timeout);
1478                                 long cmdto = _4btol(td->cmd_process_timeout);
1479 				long t = (cmdto > nomto) ? cmdto : nomto;
1480 
1481                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1482                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1483 
1484                                 SC_DEBUGN(periph, SCSIPI_DB3,
1485 					  ("0x%02x %10ld %10ld",
1486 					   td->cmd_specific,
1487 					   nomto, cmdto));
1488 
1489 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
1490 					tot->opcode_info[acd->opcode].ti_timeout = t;
1491 					++count;
1492 				}
1493                         }
1494                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
1495                 }
1496 
1497 		if (count > 0) {
1498 			periph->periph_opcs = tot;
1499 		} else {
1500 			free(tot, M_DEVBUF);
1501 			SC_DEBUG(periph, SCSIPI_DB3,
1502 			 	("no usable timeout values available\n"));
1503 		}
1504 	} else {
1505 		SC_DEBUG(periph, SCSIPI_DB3,
1506 			 ("SCSI_MAINTENANCE_IN"
1507 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
1508 			  " - no device provided timeout "
1509 			  "values available\n", rc));
1510 	}
1511 
1512 	free(data, M_DEVBUF);
1513 }
1514 
1515 /*
1516  * scsipi_update_timeouts:
1517  * 	Override timeout value if device/config provided
1518  *      timeouts are available.
1519  */
1520 static void
scsipi_update_timeouts(struct scsipi_xfer * xs)1521 scsipi_update_timeouts(struct scsipi_xfer *xs)
1522 {
1523 	struct scsipi_opcodes *opcs;
1524 	u_int8_t cmd;
1525 	int timeout;
1526 	struct scsipi_opinfo *oi;
1527 
1528 	if (xs->timeout <= 0) {
1529 		return;
1530 	}
1531 
1532 	opcs = xs->xs_periph->periph_opcs;
1533 
1534 	if (opcs == NULL) {
1535 		return;
1536 	}
1537 
1538 	cmd = xs->cmd->opcode;
1539 	oi = &opcs->opcode_info[cmd];
1540 
1541 	timeout = 1000 * (int)oi->ti_timeout;
1542 
1543 
1544 	if (timeout > xs->timeout && timeout < 86400000) {
1545 		/*
1546 		 * pick up device configured timeouts if they
1547 		 * are longer than the requested ones but less
1548 		 * than a day
1549 		 */
1550 #ifdef SCSIPI_DEBUG
1551 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
1552 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
1553 				 ("Overriding command 0x%02x "
1554 				  "timeout of %d with %d ms\n",
1555 				  cmd, xs->timeout, timeout));
1556 			oi->ti_flags |= SCSIPI_TI_LOGGED;
1557 		}
1558 #endif
1559 		xs->timeout = timeout;
1560 	}
1561 }
1562 
1563 /*
1564  * scsipi_free_opcodeinfo:
1565  *
1566  * free the opcode information table
1567  */
1568 void
scsipi_free_opcodeinfo(struct scsipi_periph * periph)1569 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
1570 {
1571 	if (periph->periph_opcs != NULL) {
1572 		free(periph->periph_opcs, M_DEVBUF);
1573 	}
1574 
1575 	periph->periph_opcs = NULL;
1576 }
1577 
1578 /*
1579  * scsipi_done:
1580  *
1581  *	This routine is called by an adapter's interrupt handler when
1582  *	an xfer is completed.
1583  */
1584 void
scsipi_done(struct scsipi_xfer * xs)1585 scsipi_done(struct scsipi_xfer *xs)
1586 {
1587 	struct scsipi_periph *periph = xs->xs_periph;
1588 	struct scsipi_channel *chan = periph->periph_channel;
1589 	int freezecnt;
1590 
1591 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1592 #ifdef SCSIPI_DEBUG
1593 	if (periph->periph_dbflags & SCSIPI_DB1)
1594 		show_scsipi_cmd(xs);
1595 #endif
1596 
1597 	mutex_enter(chan_mtx(chan));
1598 	SDT_PROBE1(scsi, base, xfer, done,  xs);
1599 	/*
1600 	 * The resource this command was using is now free.
1601 	 */
1602 	if (xs->xs_status & XS_STS_DONE) {
1603 		/* XXX in certain circumstances, such as a device
1604 		 * being detached, a xs that has already been
1605 		 * scsipi_done()'d by the main thread will be done'd
1606 		 * again by scsibusdetach(). Putting the xs on the
1607 		 * chan_complete queue causes list corruption and
1608 		 * everyone dies. This prevents that, but perhaps
1609 		 * there should be better coordination somewhere such
1610 		 * that this won't ever happen (and can be turned into
1611 		 * a KASSERT().
1612 		 */
1613 		SDT_PROBE1(scsi, base, xfer, redone,  xs);
1614 		mutex_exit(chan_mtx(chan));
1615 		goto out;
1616 	}
1617 	scsipi_put_resource(chan);
1618 	xs->xs_periph->periph_sent--;
1619 
1620 	/*
1621 	 * If the command was tagged, free the tag.
1622 	 */
1623 	if (XS_CTL_TAGTYPE(xs) != 0)
1624 		scsipi_put_tag(xs);
1625 	else
1626 		periph->periph_flags &= ~PERIPH_UNTAG;
1627 
1628 	/* Mark the command as `done'. */
1629 	xs->xs_status |= XS_STS_DONE;
1630 
1631 #ifdef DIAGNOSTIC
1632 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1633 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1634 		panic("scsipi_done: ASYNC and POLL");
1635 #endif
1636 
1637 	/*
1638 	 * If the xfer had an error of any sort, freeze the
1639 	 * periph's queue.  Freeze it again if we were requested
1640 	 * to do so in the xfer.
1641 	 */
1642 	freezecnt = 0;
1643 	if (xs->error != XS_NOERROR)
1644 		freezecnt++;
1645 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1646 		freezecnt++;
1647 	if (freezecnt != 0)
1648 		scsipi_periph_freeze_locked(periph, freezecnt);
1649 
1650 	/*
1651 	 * record the xfer with a pending sense, in case a SCSI reset is
1652 	 * received before the thread is waked up.
1653 	 */
1654 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1655 		periph->periph_flags |= PERIPH_SENSE;
1656 		periph->periph_xscheck = xs;
1657 	}
1658 
1659 	/*
1660 	 * If this was an xfer that was not to complete asynchronously,
1661 	 * let the requesting thread perform error checking/handling
1662 	 * in its context.
1663 	 */
1664 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1665 		/*
1666 		 * If it's a polling job, just return, to unwind the
1667 		 * call graph.  We don't need to restart the queue,
1668 		 * because polling jobs are treated specially, and
1669 		 * are really only used during crash dumps anyway
1670 		 * (XXX or during boot-time autoconfiguration of
1671 		 * ATAPI devices).
1672 		 */
1673 		if (xs->xs_control & XS_CTL_POLL) {
1674 			mutex_exit(chan_mtx(chan));
1675 			return;
1676 		}
1677 		cv_broadcast(xs_cv(xs));
1678 		mutex_exit(chan_mtx(chan));
1679 		goto out;
1680 	}
1681 
1682 	/*
1683 	 * Catch the extremely common case of I/O completing
1684 	 * without error; no use in taking a context switch
1685 	 * if we can handle it in interrupt context.
1686 	 */
1687 	if (xs->error == XS_NOERROR) {
1688 		mutex_exit(chan_mtx(chan));
1689 		(void) scsipi_complete(xs);
1690 		goto out;
1691 	}
1692 
1693 	/*
1694 	 * There is an error on this xfer.  Put it on the channel's
1695 	 * completion queue, and wake up the completion thread.
1696 	 */
1697 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1698 	cv_broadcast(chan_cv_complete(chan));
1699 	mutex_exit(chan_mtx(chan));
1700 
1701  out:
1702 	/*
1703 	 * If there are more xfers on the channel's queue, attempt to
1704 	 * run them.
1705 	 */
1706 	scsipi_run_queue(chan);
1707 }
1708 
1709 /*
1710  * scsipi_complete:
1711  *
1712  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1713  *
1714  *	NOTE: This routine MUST be called with valid thread context
1715  *	except for the case where the following two conditions are
1716  *	true:
1717  *
1718  *		xs->error == XS_NOERROR
1719  *		XS_CTL_ASYNC is set in xs->xs_control
1720  *
1721  *	The semantics of this routine can be tricky, so here is an
1722  *	explanation:
1723  *
1724  *		0		Xfer completed successfully.
1725  *
1726  *		ERESTART	Xfer had an error, but was restarted.
1727  *
1728  *		anything else	Xfer had an error, return value is Unix
1729  *				errno.
1730  *
1731  *	If the return value is anything but ERESTART:
1732  *
1733  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1734  *		  the pool.
1735  *		- If there is a buf associated with the xfer,
1736  *		  it has been biodone()'d.
1737  */
1738 static int
scsipi_complete(struct scsipi_xfer * xs)1739 scsipi_complete(struct scsipi_xfer *xs)
1740 {
1741 	struct scsipi_periph *periph = xs->xs_periph;
1742 	struct scsipi_channel *chan = periph->periph_channel;
1743 	int error;
1744 
1745 	SDT_PROBE1(scsi, base, xfer, complete,  xs);
1746 
1747 #ifdef DIAGNOSTIC
1748 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1749 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1750 #endif
1751 	/*
1752 	 * If command terminated with a CHECK CONDITION, we need to issue a
1753 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1754 	 * we'll have the real status.
1755 	 * Must be processed with channel lock held to avoid missing
1756 	 * a SCSI bus reset for this command.
1757 	 */
1758 	mutex_enter(chan_mtx(chan));
1759 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1760 		/* request sense for a request sense ? */
1761 		if (xs->xs_control & XS_CTL_REQSENSE) {
1762 			scsipi_printaddr(periph);
1763 			printf("request sense for a request sense ?\n");
1764 			/* XXX maybe we should reset the device ? */
1765 			/* we've been frozen because xs->error != XS_NOERROR */
1766 			scsipi_periph_thaw_locked(periph, 1);
1767 			mutex_exit(chan_mtx(chan));
1768 			if (xs->resid < xs->datalen) {
1769 				printf("we read %d bytes of sense anyway:\n",
1770 				    xs->datalen - xs->resid);
1771 				scsipi_print_sense_data((void *)xs->data, 0);
1772 			}
1773 			return EINVAL;
1774 		}
1775 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
1776 		scsipi_request_sense(xs);
1777 	} else
1778 		mutex_exit(chan_mtx(chan));
1779 
1780 	/*
1781 	 * If it's a user level request, bypass all usual completion
1782 	 * processing, let the user work it out..
1783 	 */
1784 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1785 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1786 		mutex_enter(chan_mtx(chan));
1787 		if (xs->error != XS_NOERROR)
1788 			scsipi_periph_thaw_locked(periph, 1);
1789 		mutex_exit(chan_mtx(chan));
1790 		scsipi_user_done(xs);
1791 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1792 		return 0;
1793 	}
1794 
1795 	switch (xs->error) {
1796 	case XS_NOERROR:
1797 		error = 0;
1798 		break;
1799 
1800 	case XS_SENSE:
1801 	case XS_SHORTSENSE:
1802 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1803 		break;
1804 
1805 	case XS_RESOURCE_SHORTAGE:
1806 		/*
1807 		 * XXX Should freeze channel's queue.
1808 		 */
1809 		scsipi_printaddr(periph);
1810 		printf("adapter resource shortage\n");
1811 		/* FALLTHROUGH */
1812 
1813 	case XS_BUSY:
1814 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1815 			struct scsipi_max_openings mo;
1816 
1817 			/*
1818 			 * We set the openings to active - 1, assuming that
1819 			 * the command that got us here is the first one that
1820 			 * can't fit into the device's queue.  If that's not
1821 			 * the case, I guess we'll find out soon enough.
1822 			 */
1823 			mo.mo_target = periph->periph_target;
1824 			mo.mo_lun = periph->periph_lun;
1825 			if (periph->periph_active < periph->periph_openings)
1826 				mo.mo_openings = periph->periph_active - 1;
1827 			else
1828 				mo.mo_openings = periph->periph_openings - 1;
1829 #ifdef DIAGNOSTIC
1830 			if (mo.mo_openings < 0) {
1831 				scsipi_printaddr(periph);
1832 				printf("QUEUE FULL resulted in < 0 openings\n");
1833 				panic("scsipi_done");
1834 			}
1835 #endif
1836 			if (mo.mo_openings == 0) {
1837 				scsipi_printaddr(periph);
1838 				printf("QUEUE FULL resulted in 0 openings\n");
1839 				mo.mo_openings = 1;
1840 			}
1841 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1842 			error = ERESTART;
1843 		} else if (xs->xs_retries != 0) {
1844 			xs->xs_retries--;
1845 			/*
1846 			 * Wait one second, and try again.
1847 			 */
1848 			mutex_enter(chan_mtx(chan));
1849 			if ((xs->xs_control & XS_CTL_POLL) ||
1850 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1851 				/* XXX: quite extreme */
1852 				kpause("xsbusy", false, hz, chan_mtx(chan));
1853 			} else if (!callout_pending(&periph->periph_callout)) {
1854 				scsipi_periph_freeze_locked(periph, 1);
1855 				callout_reset(&periph->periph_callout,
1856 				    hz, scsipi_periph_timed_thaw, periph);
1857 			}
1858 			mutex_exit(chan_mtx(chan));
1859 			error = ERESTART;
1860 		} else
1861 			error = EBUSY;
1862 		break;
1863 
1864 	case XS_REQUEUE:
1865 		error = ERESTART;
1866 		break;
1867 
1868 	case XS_SELTIMEOUT:
1869 	case XS_TIMEOUT:
1870 		/*
1871 		 * If the device hasn't gone away, honor retry counts.
1872 		 *
1873 		 * Note that if we're in the middle of probing it,
1874 		 * it won't be found because it isn't here yet so
1875 		 * we won't honor the retry count in that case.
1876 		 */
1877 		if (scsipi_lookup_periph(chan, periph->periph_target,
1878 		    periph->periph_lun) && xs->xs_retries != 0) {
1879 			xs->xs_retries--;
1880 			error = ERESTART;
1881 		} else
1882 			error = EIO;
1883 		break;
1884 
1885 	case XS_RESET:
1886 		if (xs->xs_control & XS_CTL_REQSENSE) {
1887 			/*
1888 			 * request sense interrupted by reset: signal it
1889 			 * with EINTR return code.
1890 			 */
1891 			error = EINTR;
1892 		} else {
1893 			if (xs->xs_retries != 0) {
1894 				xs->xs_retries--;
1895 				error = ERESTART;
1896 			} else
1897 				error = EIO;
1898 		}
1899 		break;
1900 
1901 	case XS_DRIVER_STUFFUP:
1902 		scsipi_printaddr(periph);
1903 		printf("generic HBA error\n");
1904 		error = EIO;
1905 		break;
1906 	default:
1907 		scsipi_printaddr(periph);
1908 		printf("invalid return code from adapter: %d\n", xs->error);
1909 		error = EIO;
1910 		break;
1911 	}
1912 
1913 	mutex_enter(chan_mtx(chan));
1914 	if (error == ERESTART) {
1915 		SDT_PROBE1(scsi, base, xfer, restart,  xs);
1916 		/*
1917 		 * If we get here, the periph has been thawed and frozen
1918 		 * again if we had to issue recovery commands.  Alternatively,
1919 		 * it may have been frozen again and in a timed thaw.  In
1920 		 * any case, we thaw the periph once we re-enqueue the
1921 		 * command.  Once the periph is fully thawed, it will begin
1922 		 * operation again.
1923 		 */
1924 		xs->error = XS_NOERROR;
1925 		xs->status = SCSI_OK;
1926 		xs->xs_status &= ~XS_STS_DONE;
1927 		xs->xs_requeuecnt++;
1928 		error = scsipi_enqueue(xs);
1929 		if (error == 0) {
1930 			scsipi_periph_thaw_locked(periph, 1);
1931 			mutex_exit(chan_mtx(chan));
1932 			return ERESTART;
1933 		}
1934 	}
1935 
1936 	/*
1937 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1938 	 * Thaw it here.
1939 	 */
1940 	if (xs->error != XS_NOERROR)
1941 		scsipi_periph_thaw_locked(periph, 1);
1942 	mutex_exit(chan_mtx(chan));
1943 
1944 	if (periph->periph_switch->psw_done)
1945 		periph->periph_switch->psw_done(xs, error);
1946 
1947 	mutex_enter(chan_mtx(chan));
1948 	if (xs->xs_control & XS_CTL_ASYNC)
1949 		scsipi_put_xs(xs);
1950 	mutex_exit(chan_mtx(chan));
1951 
1952 	return error;
1953 }
1954 
1955 /*
1956  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1957  * returns with a CHECK_CONDITION status. Must be called in valid thread
1958  * context.
1959  */
1960 
1961 static void
scsipi_request_sense(struct scsipi_xfer * xs)1962 scsipi_request_sense(struct scsipi_xfer *xs)
1963 {
1964 	struct scsipi_periph *periph = xs->xs_periph;
1965 	int flags, error;
1966 	struct scsi_request_sense cmd;
1967 
1968 	periph->periph_flags |= PERIPH_SENSE;
1969 
1970 	/* if command was polling, request sense will too */
1971 	flags = xs->xs_control & XS_CTL_POLL;
1972 	/* Polling commands can't sleep */
1973 	if (flags)
1974 		flags |= XS_CTL_NOSLEEP;
1975 
1976 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1977 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1978 
1979 	memset(&cmd, 0, sizeof(cmd));
1980 	cmd.opcode = SCSI_REQUEST_SENSE;
1981 	cmd.length = sizeof(struct scsi_sense_data);
1982 
1983 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1984 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1985 	    0, 1000, NULL, flags);
1986 	periph->periph_flags &= ~PERIPH_SENSE;
1987 	periph->periph_xscheck = NULL;
1988 	switch (error) {
1989 	case 0:
1990 		/* we have a valid sense */
1991 		xs->error = XS_SENSE;
1992 		return;
1993 	case EINTR:
1994 		/* REQUEST_SENSE interrupted by bus reset. */
1995 		xs->error = XS_RESET;
1996 		return;
1997 	case EIO:
1998 		 /* request sense couldn't be performed */
1999 		/*
2000 		 * XXX this isn't quite right but we don't have anything
2001 		 * better for now
2002 		 */
2003 		xs->error = XS_DRIVER_STUFFUP;
2004 		return;
2005 	default:
2006 		 /* Notify that request sense failed. */
2007 		xs->error = XS_DRIVER_STUFFUP;
2008 		scsipi_printaddr(periph);
2009 		printf("request sense failed with error %d\n", error);
2010 		return;
2011 	}
2012 }
2013 
2014 /*
2015  * scsipi_enqueue:
2016  *
2017  *	Enqueue an xfer on a channel.
2018  */
2019 static int
scsipi_enqueue(struct scsipi_xfer * xs)2020 scsipi_enqueue(struct scsipi_xfer *xs)
2021 {
2022 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
2023 	struct scsipi_xfer *qxs;
2024 
2025 	SDT_PROBE1(scsi, base, xfer, enqueue,  xs);
2026 
2027 	/*
2028 	 * If the xfer is to be polled, and there are already jobs on
2029 	 * the queue, we can't proceed.
2030 	 */
2031 	KASSERT(mutex_owned(chan_mtx(chan)));
2032 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
2033 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
2034 		xs->error = XS_DRIVER_STUFFUP;
2035 		return EAGAIN;
2036 	}
2037 
2038 	/*
2039 	 * If we have an URGENT xfer, it's an error recovery command
2040 	 * and it should just go on the head of the channel's queue.
2041 	 */
2042 	if (xs->xs_control & XS_CTL_URGENT) {
2043 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
2044 		goto out;
2045 	}
2046 
2047 	/*
2048 	 * If this xfer has already been on the queue before, we
2049 	 * need to reinsert it in the correct order.  That order is:
2050 	 *
2051 	 *	Immediately before the first xfer for this periph
2052 	 *	with a requeuecnt less than xs->xs_requeuecnt.
2053 	 *
2054 	 * Failing that, at the end of the queue.  (We'll end up
2055 	 * there naturally.)
2056 	 */
2057 	if (xs->xs_requeuecnt != 0) {
2058 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
2059 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
2060 			if (qxs->xs_periph == xs->xs_periph &&
2061 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
2062 				break;
2063 		}
2064 		if (qxs != NULL) {
2065 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
2066 			    channel_q);
2067 			goto out;
2068 		}
2069 	}
2070 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
2071  out:
2072 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
2073 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
2074 	return 0;
2075 }
2076 
2077 /*
2078  * scsipi_run_queue:
2079  *
2080  *	Start as many xfers as possible running on the channel.
2081  */
2082 static void
scsipi_run_queue(struct scsipi_channel * chan)2083 scsipi_run_queue(struct scsipi_channel *chan)
2084 {
2085 	struct scsipi_xfer *xs;
2086 	struct scsipi_periph *periph;
2087 
2088 	SDT_PROBE1(scsi, base, queue, batch__start,  chan);
2089 	for (;;) {
2090 		mutex_enter(chan_mtx(chan));
2091 
2092 		/*
2093 		 * If the channel is frozen, we can't do any work right
2094 		 * now.
2095 		 */
2096 		if (chan->chan_qfreeze != 0) {
2097 			mutex_exit(chan_mtx(chan));
2098 			break;
2099 		}
2100 
2101 		/*
2102 		 * Look for work to do, and make sure we can do it.
2103 		 */
2104 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
2105 		     xs = TAILQ_NEXT(xs, channel_q)) {
2106 			periph = xs->xs_periph;
2107 
2108 			if ((periph->periph_sent >= periph->periph_openings) ||
2109 			    periph->periph_qfreeze != 0 ||
2110 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
2111 				continue;
2112 
2113 			if ((periph->periph_flags &
2114 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
2115 			    (xs->xs_control & XS_CTL_URGENT) == 0)
2116 				continue;
2117 
2118 			/*
2119 			 * We can issue this xfer!
2120 			 */
2121 			goto got_one;
2122 		}
2123 
2124 		/*
2125 		 * Can't find any work to do right now.
2126 		 */
2127 		mutex_exit(chan_mtx(chan));
2128 		break;
2129 
2130  got_one:
2131 		/*
2132 		 * Have an xfer to run.  Allocate a resource from
2133 		 * the adapter to run it.  If we can't allocate that
2134 		 * resource, we don't dequeue the xfer.
2135 		 */
2136 		if (scsipi_get_resource(chan) == 0) {
2137 			/*
2138 			 * Adapter is out of resources.  If the adapter
2139 			 * supports it, attempt to grow them.
2140 			 */
2141 			if (scsipi_grow_resources(chan) == 0) {
2142 				/*
2143 				 * Wasn't able to grow resources,
2144 				 * nothing more we can do.
2145 				 */
2146 				if (xs->xs_control & XS_CTL_POLL) {
2147 					scsipi_printaddr(xs->xs_periph);
2148 					printf("polling command but no "
2149 					    "adapter resources");
2150 					/* We'll panic shortly... */
2151 				}
2152 				mutex_exit(chan_mtx(chan));
2153 
2154 				/*
2155 				 * XXX: We should be able to note that
2156 				 * XXX: that resources are needed here!
2157 				 */
2158 				break;
2159 			}
2160 			/*
2161 			 * scsipi_grow_resources() allocated the resource
2162 			 * for us.
2163 			 */
2164 		}
2165 
2166 		/*
2167 		 * We have a resource to run this xfer, do it!
2168 		 */
2169 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2170 
2171 		/*
2172 		 * If the command is to be tagged, allocate a tag ID
2173 		 * for it.
2174 		 */
2175 		if (XS_CTL_TAGTYPE(xs) != 0)
2176 			scsipi_get_tag(xs);
2177 		else
2178 			periph->periph_flags |= PERIPH_UNTAG;
2179 		periph->periph_sent++;
2180 		mutex_exit(chan_mtx(chan));
2181 
2182 		SDT_PROBE2(scsi, base, queue, run,  chan, xs);
2183 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
2184 	}
2185 	SDT_PROBE1(scsi, base, queue, batch__done,  chan);
2186 }
2187 
2188 /*
2189  * scsipi_execute_xs:
2190  *
2191  *	Begin execution of an xfer, waiting for it to complete, if necessary.
2192  */
2193 int
scsipi_execute_xs(struct scsipi_xfer * xs)2194 scsipi_execute_xs(struct scsipi_xfer *xs)
2195 {
2196 	struct scsipi_periph *periph = xs->xs_periph;
2197 	struct scsipi_channel *chan = periph->periph_channel;
2198 	int oasync, async, poll, error;
2199 
2200 	KASSERT(!cold);
2201 
2202 	scsipi_update_timeouts(xs);
2203 
2204 	(chan->chan_bustype->bustype_cmd)(xs);
2205 
2206 	xs->xs_status &= ~XS_STS_DONE;
2207 	xs->error = XS_NOERROR;
2208 	xs->resid = xs->datalen;
2209 	xs->status = SCSI_OK;
2210 	SDT_PROBE1(scsi, base, xfer, execute,  xs);
2211 
2212 #ifdef SCSIPI_DEBUG
2213 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
2214 		printf("scsipi_execute_xs: ");
2215 		show_scsipi_xs(xs);
2216 		printf("\n");
2217 	}
2218 #endif
2219 
2220 	/*
2221 	 * Deal with command tagging:
2222 	 *
2223 	 *	- If the device's current operating mode doesn't
2224 	 *	  include tagged queueing, clear the tag mask.
2225 	 *
2226 	 *	- If the device's current operating mode *does*
2227 	 *	  include tagged queueing, set the tag_type in
2228 	 *	  the xfer to the appropriate byte for the tag
2229 	 *	  message.
2230 	 */
2231 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
2232 		(xs->xs_control & XS_CTL_REQSENSE)) {
2233 		xs->xs_control &= ~XS_CTL_TAGMASK;
2234 		xs->xs_tag_type = 0;
2235 	} else {
2236 		/*
2237 		 * If the request doesn't specify a tag, give Head
2238 		 * tags to URGENT operations and Simple tags to
2239 		 * everything else.
2240 		 */
2241 		if (XS_CTL_TAGTYPE(xs) == 0) {
2242 			if (xs->xs_control & XS_CTL_URGENT)
2243 				xs->xs_control |= XS_CTL_HEAD_TAG;
2244 			else
2245 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
2246 		}
2247 
2248 		switch (XS_CTL_TAGTYPE(xs)) {
2249 		case XS_CTL_ORDERED_TAG:
2250 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2251 			break;
2252 
2253 		case XS_CTL_SIMPLE_TAG:
2254 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2255 			break;
2256 
2257 		case XS_CTL_HEAD_TAG:
2258 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2259 			break;
2260 
2261 		default:
2262 			scsipi_printaddr(periph);
2263 			printf("invalid tag mask 0x%08x\n",
2264 			    XS_CTL_TAGTYPE(xs));
2265 			panic("scsipi_execute_xs");
2266 		}
2267 	}
2268 
2269 	/* If the adapter wants us to poll, poll. */
2270 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2271 		xs->xs_control |= XS_CTL_POLL;
2272 
2273 	/*
2274 	 * If we don't yet have a completion thread, or we are to poll for
2275 	 * completion, clear the ASYNC flag.
2276 	 */
2277 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
2278 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2279 		xs->xs_control &= ~XS_CTL_ASYNC;
2280 
2281 	async = (xs->xs_control & XS_CTL_ASYNC);
2282 	poll = (xs->xs_control & XS_CTL_POLL);
2283 
2284 #ifdef DIAGNOSTIC
2285 	if (oasync != 0 && xs->bp == NULL)
2286 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2287 #endif
2288 
2289 	/*
2290 	 * Enqueue the transfer.  If we're not polling for completion, this
2291 	 * should ALWAYS return `no error'.
2292 	 */
2293 	error = scsipi_enqueue(xs);
2294 	if (error) {
2295 		if (poll == 0) {
2296 			scsipi_printaddr(periph);
2297 			printf("not polling, but enqueue failed with %d\n",
2298 			    error);
2299 			panic("scsipi_execute_xs");
2300 		}
2301 
2302 		scsipi_printaddr(periph);
2303 		printf("should have flushed queue?\n");
2304 		goto free_xs;
2305 	}
2306 
2307 	mutex_exit(chan_mtx(chan));
2308  restarted:
2309 	scsipi_run_queue(chan);
2310 	mutex_enter(chan_mtx(chan));
2311 
2312 	/*
2313 	 * The xfer is enqueued, and possibly running.  If it's to be
2314 	 * completed asynchronously, just return now.
2315 	 */
2316 	if (async)
2317 		return 0;
2318 
2319 	/*
2320 	 * Not an asynchronous command; wait for it to complete.
2321 	 */
2322 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2323 		if (poll) {
2324 			scsipi_printaddr(periph);
2325 			printf("polling command not done\n");
2326 			panic("scsipi_execute_xs");
2327 		}
2328 		cv_wait(xs_cv(xs), chan_mtx(chan));
2329 	}
2330 
2331 	/*
2332 	 * Command is complete.  scsipi_done() has awakened us to perform
2333 	 * the error handling.
2334 	 */
2335 	mutex_exit(chan_mtx(chan));
2336 	error = scsipi_complete(xs);
2337 	if (error == ERESTART)
2338 		goto restarted;
2339 
2340 	/*
2341 	 * If it was meant to run async and we cleared async ourselves,
2342 	 * don't return an error here. It has already been handled
2343 	 */
2344 	if (oasync)
2345 		error = 0;
2346 	/*
2347 	 * Command completed successfully or fatal error occurred.  Fall
2348 	 * into....
2349 	 */
2350 	mutex_enter(chan_mtx(chan));
2351  free_xs:
2352 	scsipi_put_xs(xs);
2353 	mutex_exit(chan_mtx(chan));
2354 
2355 	/*
2356 	 * Kick the queue, keep it running in case it stopped for some
2357 	 * reason.
2358 	 */
2359 	scsipi_run_queue(chan);
2360 
2361 	mutex_enter(chan_mtx(chan));
2362 	return error;
2363 }
2364 
2365 /*
2366  * scsipi_completion_thread:
2367  *
2368  *	This is the completion thread.  We wait for errors on
2369  *	asynchronous xfers, and perform the error handling
2370  *	function, restarting the command, if necessary.
2371  */
2372 static void
scsipi_completion_thread(void * arg)2373 scsipi_completion_thread(void *arg)
2374 {
2375 	struct scsipi_channel *chan = arg;
2376 	struct scsipi_xfer *xs;
2377 
2378 	if (chan->chan_init_cb)
2379 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2380 
2381 	mutex_enter(chan_mtx(chan));
2382 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2383 	for (;;) {
2384 		xs = TAILQ_FIRST(&chan->chan_complete);
2385 		if (xs == NULL && chan->chan_tflags == 0) {
2386 			/* nothing to do; wait */
2387 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
2388 			continue;
2389 		}
2390 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2391 			/* call chan_callback from thread context */
2392 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2393 			chan->chan_callback(chan, chan->chan_callback_arg);
2394 			continue;
2395 		}
2396 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2397 			/* attempt to get more openings for this channel */
2398 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2399 			mutex_exit(chan_mtx(chan));
2400 			scsipi_adapter_request(chan,
2401 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2402 			scsipi_channel_thaw(chan, 1);
2403 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2404 				kpause("scsizzz", FALSE, hz/10, NULL);
2405 			mutex_enter(chan_mtx(chan));
2406 			continue;
2407 		}
2408 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2409 			/* explicitly run the queues for this channel */
2410 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2411 			mutex_exit(chan_mtx(chan));
2412 			scsipi_run_queue(chan);
2413 			mutex_enter(chan_mtx(chan));
2414 			continue;
2415 		}
2416 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2417 			break;
2418 		}
2419 		if (xs) {
2420 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2421 			mutex_exit(chan_mtx(chan));
2422 
2423 			/*
2424 			 * Have an xfer with an error; process it.
2425 			 */
2426 			(void) scsipi_complete(xs);
2427 
2428 			/*
2429 			 * Kick the queue; keep it running if it was stopped
2430 			 * for some reason.
2431 			 */
2432 			scsipi_run_queue(chan);
2433 			mutex_enter(chan_mtx(chan));
2434 		}
2435 	}
2436 
2437 	chan->chan_thread = NULL;
2438 
2439 	/* In case parent is waiting for us to exit. */
2440 	cv_broadcast(chan_cv_thread(chan));
2441 	mutex_exit(chan_mtx(chan));
2442 
2443 	kthread_exit(0);
2444 }
2445 /*
2446  * scsipi_thread_call_callback:
2447  *
2448  * 	request to call a callback from the completion thread
2449  */
2450 int
scsipi_thread_call_callback(struct scsipi_channel * chan,void (* callback)(struct scsipi_channel *,void *),void * arg)2451 scsipi_thread_call_callback(struct scsipi_channel *chan,
2452     void (*callback)(struct scsipi_channel *, void *), void *arg)
2453 {
2454 
2455 	mutex_enter(chan_mtx(chan));
2456 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2457 		/* kernel thread doesn't exist yet */
2458 		mutex_exit(chan_mtx(chan));
2459 		return ESRCH;
2460 	}
2461 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2462 		mutex_exit(chan_mtx(chan));
2463 		return EBUSY;
2464 	}
2465 	scsipi_channel_freeze(chan, 1);
2466 	chan->chan_callback = callback;
2467 	chan->chan_callback_arg = arg;
2468 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2469 	cv_broadcast(chan_cv_complete(chan));
2470 	mutex_exit(chan_mtx(chan));
2471 	return 0;
2472 }
2473 
2474 /*
2475  * scsipi_async_event:
2476  *
2477  *	Handle an asynchronous event from an adapter.
2478  */
2479 void
scsipi_async_event(struct scsipi_channel * chan,scsipi_async_event_t event,void * arg)2480 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2481     void *arg)
2482 {
2483 	bool lock = chan_running(chan) > 0;
2484 
2485 	if (lock)
2486 		mutex_enter(chan_mtx(chan));
2487 	switch (event) {
2488 	case ASYNC_EVENT_MAX_OPENINGS:
2489 		scsipi_async_event_max_openings(chan,
2490 		    (struct scsipi_max_openings *)arg);
2491 		break;
2492 
2493 	case ASYNC_EVENT_XFER_MODE:
2494 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2495 			chan->chan_bustype->bustype_async_event_xfer_mode(
2496 			    chan, arg);
2497 		}
2498 		break;
2499 	case ASYNC_EVENT_RESET:
2500 		scsipi_async_event_channel_reset(chan);
2501 		break;
2502 	}
2503 	if (lock)
2504 		mutex_exit(chan_mtx(chan));
2505 }
2506 
2507 /*
2508  * scsipi_async_event_max_openings:
2509  *
2510  *	Update the maximum number of outstanding commands a
2511  *	device may have.
2512  */
2513 static void
scsipi_async_event_max_openings(struct scsipi_channel * chan,struct scsipi_max_openings * mo)2514 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2515     struct scsipi_max_openings *mo)
2516 {
2517 	struct scsipi_periph *periph;
2518 	int minlun, maxlun;
2519 
2520 	if (mo->mo_lun == -1) {
2521 		/*
2522 		 * Wildcarded; apply it to all LUNs.
2523 		 */
2524 		minlun = 0;
2525 		maxlun = chan->chan_nluns - 1;
2526 	} else
2527 		minlun = maxlun = mo->mo_lun;
2528 
2529 	/* XXX This could really suck with a large LUN space. */
2530 	for (; minlun <= maxlun; minlun++) {
2531 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
2532 		if (periph == NULL)
2533 			continue;
2534 
2535 		if (mo->mo_openings < periph->periph_openings)
2536 			periph->periph_openings = mo->mo_openings;
2537 		else if (mo->mo_openings > periph->periph_openings &&
2538 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2539 			periph->periph_openings = mo->mo_openings;
2540 	}
2541 }
2542 
2543 /*
2544  * scsipi_set_xfer_mode:
2545  *
2546  *	Set the xfer mode for the specified I_T Nexus.
2547  */
2548 void
scsipi_set_xfer_mode(struct scsipi_channel * chan,int target,int immed)2549 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2550 {
2551 	struct scsipi_xfer_mode xm;
2552 	struct scsipi_periph *itperiph;
2553 	int lun;
2554 
2555 	/*
2556 	 * Go to the minimal xfer mode.
2557 	 */
2558 	xm.xm_target = target;
2559 	xm.xm_mode = 0;
2560 	xm.xm_period = 0;			/* ignored */
2561 	xm.xm_offset = 0;			/* ignored */
2562 
2563 	/*
2564 	 * Find the first LUN we know about on this I_T Nexus.
2565 	 */
2566 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2567 		itperiph = scsipi_lookup_periph(chan, target, lun);
2568 		if (itperiph != NULL)
2569 			break;
2570 	}
2571 	if (itperiph != NULL) {
2572 		xm.xm_mode = itperiph->periph_cap;
2573 		/*
2574 		 * Now issue the request to the adapter.
2575 		 */
2576 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2577 		/*
2578 		 * If we want this to happen immediately, issue a dummy
2579 		 * command, since most adapters can't really negotiate unless
2580 		 * they're executing a job.
2581 		 */
2582 		if (immed != 0) {
2583 			(void) scsipi_test_unit_ready(itperiph,
2584 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2585 			    XS_CTL_IGNORE_NOT_READY |
2586 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2587 		}
2588 	}
2589 }
2590 
2591 /*
2592  * scsipi_channel_reset:
2593  *
2594  *	handle scsi bus reset
2595  * called with channel lock held
2596  */
2597 static void
scsipi_async_event_channel_reset(struct scsipi_channel * chan)2598 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2599 {
2600 	struct scsipi_xfer *xs, *xs_next;
2601 	struct scsipi_periph *periph;
2602 	int target, lun;
2603 
2604 	/*
2605 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2606 	 * commands; as the sense is not available any more.
2607 	 * can't call scsipi_done() from here, as the command has not been
2608 	 * sent to the adapter yet (this would corrupt accounting).
2609 	 */
2610 
2611 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2612 		xs_next = TAILQ_NEXT(xs, channel_q);
2613 		if (xs->xs_control & XS_CTL_REQSENSE) {
2614 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2615 			xs->error = XS_RESET;
2616 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2617 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2618 				    channel_q);
2619 		}
2620 	}
2621 	cv_broadcast(chan_cv_complete(chan));
2622 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2623 	for (target = 0; target < chan->chan_ntargets; target++) {
2624 		if (target == chan->chan_id)
2625 			continue;
2626 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2627 			periph = scsipi_lookup_periph_locked(chan, target, lun);
2628 			if (periph) {
2629 				xs = periph->periph_xscheck;
2630 				if (xs)
2631 					xs->error = XS_RESET;
2632 			}
2633 		}
2634 	}
2635 }
2636 
2637 /*
2638  * scsipi_target_detach:
2639  *
2640  *	detach all periph associated with a I_T
2641  * 	must be called from valid thread context
2642  */
2643 int
scsipi_target_detach(struct scsipi_channel * chan,int target,int lun,int flags)2644 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2645     int flags)
2646 {
2647 	struct scsipi_periph *periph;
2648 	device_t tdev;
2649 	int ctarget, mintarget, maxtarget;
2650 	int clun, minlun, maxlun;
2651 	int error = 0;
2652 
2653 	if (target == -1) {
2654 		mintarget = 0;
2655 		maxtarget = chan->chan_ntargets;
2656 	} else {
2657 		if (target == chan->chan_id)
2658 			return EINVAL;
2659 		if (target < 0 || target >= chan->chan_ntargets)
2660 			return EINVAL;
2661 		mintarget = target;
2662 		maxtarget = target + 1;
2663 	}
2664 
2665 	if (lun == -1) {
2666 		minlun = 0;
2667 		maxlun = chan->chan_nluns;
2668 	} else {
2669 		if (lun < 0 || lun >= chan->chan_nluns)
2670 			return EINVAL;
2671 		minlun = lun;
2672 		maxlun = lun + 1;
2673 	}
2674 
2675 	/* for config_detach */
2676 	KERNEL_LOCK(1, curlwp);
2677 
2678 	mutex_enter(chan_mtx(chan));
2679 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2680 		if (ctarget == chan->chan_id)
2681 			continue;
2682 
2683 		for (clun = minlun; clun < maxlun; clun++) {
2684 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
2685 			if (periph == NULL)
2686 				continue;
2687 			tdev = periph->periph_dev;
2688 			mutex_exit(chan_mtx(chan));
2689 			error = config_detach(tdev, flags);
2690 			if (error)
2691 				goto out;
2692 			mutex_enter(chan_mtx(chan));
2693 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
2694 		}
2695 	}
2696 	mutex_exit(chan_mtx(chan));
2697 
2698 out:
2699 	KERNEL_UNLOCK_ONE(curlwp);
2700 
2701 	return error;
2702 }
2703 
2704 /*
2705  * scsipi_adapter_addref:
2706  *
2707  *	Add a reference to the adapter pointed to by the provided
2708  *	link, enabling the adapter if necessary.
2709  */
2710 int
scsipi_adapter_addref(struct scsipi_adapter * adapt)2711 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2712 {
2713 	int error = 0;
2714 
2715 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
2716 	    && adapt->adapt_enable != NULL) {
2717 		scsipi_adapter_lock(adapt);
2718 		error = scsipi_adapter_enable(adapt, 1);
2719 		scsipi_adapter_unlock(adapt);
2720 		if (error)
2721 			atomic_dec_uint(&adapt->adapt_refcnt);
2722 	}
2723 	return error;
2724 }
2725 
2726 /*
2727  * scsipi_adapter_delref:
2728  *
2729  *	Delete a reference to the adapter pointed to by the provided
2730  *	link, disabling the adapter if possible.
2731  */
2732 void
scsipi_adapter_delref(struct scsipi_adapter * adapt)2733 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2734 {
2735 
2736 	membar_release();
2737 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
2738 	    && adapt->adapt_enable != NULL) {
2739 		membar_acquire();
2740 		scsipi_adapter_lock(adapt);
2741 		(void) scsipi_adapter_enable(adapt, 0);
2742 		scsipi_adapter_unlock(adapt);
2743 	}
2744 }
2745 
2746 static struct scsipi_syncparam {
2747 	int	ss_factor;
2748 	int	ss_period;	/* ns * 100 */
2749 } scsipi_syncparams[] = {
2750 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2751 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2752 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2753 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2754 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2755 };
2756 static const int scsipi_nsyncparams =
2757     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2758 
2759 int
scsipi_sync_period_to_factor(int period)2760 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2761 {
2762 	int i;
2763 
2764 	for (i = 0; i < scsipi_nsyncparams; i++) {
2765 		if (period <= scsipi_syncparams[i].ss_period)
2766 			return scsipi_syncparams[i].ss_factor;
2767 	}
2768 
2769 	return (period / 100) / 4;
2770 }
2771 
2772 int
scsipi_sync_factor_to_period(int factor)2773 scsipi_sync_factor_to_period(int factor)
2774 {
2775 	int i;
2776 
2777 	for (i = 0; i < scsipi_nsyncparams; i++) {
2778 		if (factor == scsipi_syncparams[i].ss_factor)
2779 			return scsipi_syncparams[i].ss_period;
2780 	}
2781 
2782 	return (factor * 4) * 100;
2783 }
2784 
2785 int
scsipi_sync_factor_to_freq(int factor)2786 scsipi_sync_factor_to_freq(int factor)
2787 {
2788 	int i;
2789 
2790 	for (i = 0; i < scsipi_nsyncparams; i++) {
2791 		if (factor == scsipi_syncparams[i].ss_factor)
2792 			return 100000000 / scsipi_syncparams[i].ss_period;
2793 	}
2794 
2795 	return 10000000 / ((factor * 4) * 10);
2796 }
2797 
2798 static inline void
scsipi_adapter_lock(struct scsipi_adapter * adapt)2799 scsipi_adapter_lock(struct scsipi_adapter *adapt)
2800 {
2801 
2802 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2803 		KERNEL_LOCK(1, NULL);
2804 }
2805 
2806 static inline void
scsipi_adapter_unlock(struct scsipi_adapter * adapt)2807 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
2808 {
2809 
2810 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2811 		KERNEL_UNLOCK_ONE(NULL);
2812 }
2813 
2814 void
scsipi_adapter_minphys(struct scsipi_channel * chan,struct buf * bp)2815 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
2816 {
2817 	struct scsipi_adapter *adapt = chan->chan_adapter;
2818 
2819 	scsipi_adapter_lock(adapt);
2820 	(adapt->adapt_minphys)(bp);
2821 	scsipi_adapter_unlock(chan->chan_adapter);
2822 }
2823 
2824 void
scsipi_adapter_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)2825 scsipi_adapter_request(struct scsipi_channel *chan,
2826 	scsipi_adapter_req_t req, void *arg)
2827 
2828 {
2829 	struct scsipi_adapter *adapt = chan->chan_adapter;
2830 
2831 	scsipi_adapter_lock(adapt);
2832 	SDT_PROBE3(scsi, base, adapter, request__start,  chan, req, arg);
2833 	(adapt->adapt_request)(chan, req, arg);
2834 	SDT_PROBE3(scsi, base, adapter, request__done,  chan, req, arg);
2835 	scsipi_adapter_unlock(adapt);
2836 }
2837 
2838 int
scsipi_adapter_ioctl(struct scsipi_channel * chan,u_long cmd,void * data,int flag,struct proc * p)2839 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
2840 	void *data, int flag, struct proc *p)
2841 {
2842 	struct scsipi_adapter *adapt = chan->chan_adapter;
2843 	int error;
2844 
2845 	if (adapt->adapt_ioctl == NULL)
2846 		return ENOTTY;
2847 
2848 	scsipi_adapter_lock(adapt);
2849 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
2850 	scsipi_adapter_unlock(adapt);
2851 	return error;
2852 }
2853 
2854 int
scsipi_adapter_enable(struct scsipi_adapter * adapt,int enable)2855 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
2856 {
2857 	int error;
2858 
2859 	scsipi_adapter_lock(adapt);
2860 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
2861 	scsipi_adapter_unlock(adapt);
2862 	return error;
2863 }
2864 
2865 #ifdef SCSIPI_DEBUG
2866 /*
2867  * Given a scsipi_xfer, dump the request, in all its glory
2868  */
2869 void
show_scsipi_xs(struct scsipi_xfer * xs)2870 show_scsipi_xs(struct scsipi_xfer *xs)
2871 {
2872 
2873 	printf("xs(%p): ", xs);
2874 	printf("xs_control(0x%08x)", xs->xs_control);
2875 	printf("xs_status(0x%08x)", xs->xs_status);
2876 	printf("periph(%p)", xs->xs_periph);
2877 	printf("retr(0x%x)", xs->xs_retries);
2878 	printf("timo(0x%x)", xs->timeout);
2879 	printf("cmd(%p)", xs->cmd);
2880 	printf("len(0x%x)", xs->cmdlen);
2881 	printf("data(%p)", xs->data);
2882 	printf("len(0x%x)", xs->datalen);
2883 	printf("res(0x%x)", xs->resid);
2884 	printf("err(0x%x)", xs->error);
2885 	printf("bp(%p)", xs->bp);
2886 	show_scsipi_cmd(xs);
2887 }
2888 
2889 void
show_scsipi_cmd(struct scsipi_xfer * xs)2890 show_scsipi_cmd(struct scsipi_xfer *xs)
2891 {
2892 	u_char *b = (u_char *) xs->cmd;
2893 	int i = 0;
2894 
2895 	scsipi_printaddr(xs->xs_periph);
2896 	printf(" command: ");
2897 
2898 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2899 		while (i < xs->cmdlen) {
2900 			if (i)
2901 				printf(",");
2902 			printf("0x%x", b[i++]);
2903 		}
2904 		printf("-[%d bytes]\n", xs->datalen);
2905 		if (xs->datalen)
2906 			show_mem(xs->data, uimin(64, xs->datalen));
2907 	} else
2908 		printf("-RESET-\n");
2909 }
2910 
2911 void
show_mem(u_char * address,int num)2912 show_mem(u_char *address, int num)
2913 {
2914 	int x;
2915 
2916 	printf("------------------------------");
2917 	for (x = 0; x < num; x++) {
2918 		if ((x % 16) == 0)
2919 			printf("\n%03d: ", x);
2920 		printf("%02x ", *address++);
2921 	}
2922 	printf("\n------------------------------\n");
2923 }
2924 #endif /* SCSIPI_DEBUG */
2925