xref: /freebsd/sys/cam/cam_periph.c (revision e17f5b1d)
1 /*-
2  * Common functions for CAM "type" (peripheral) drivers.
3  *
4  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5  *
6  * Copyright (c) 1997, 1998 Justin T. Gibbs.
7  * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/types.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/bio.h>
41 #include <sys/conf.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/buf.h>
45 #include <sys/proc.h>
46 #include <sys/devicestat.h>
47 #include <sys/bus.h>
48 #include <sys/sbuf.h>
49 #include <sys/sysctl.h>
50 #include <vm/vm.h>
51 #include <vm/vm_extern.h>
52 
53 #include <cam/cam.h>
54 #include <cam/cam_ccb.h>
55 #include <cam/cam_queue.h>
56 #include <cam/cam_xpt_periph.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_debug.h>
59 #include <cam/cam_sim.h>
60 
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
63 #include <cam/scsi/scsi_pass.h>
64 
65 static	u_int		camperiphnextunit(struct periph_driver *p_drv,
66 					  u_int newunit, int wired,
67 					  path_id_t pathid, target_id_t target,
68 					  lun_id_t lun);
69 static	u_int		camperiphunit(struct periph_driver *p_drv,
70 				      path_id_t pathid, target_id_t target,
71 				      lun_id_t lun);
72 static	void		camperiphdone(struct cam_periph *periph,
73 					union ccb *done_ccb);
74 static  void		camperiphfree(struct cam_periph *periph);
75 static int		camperiphscsistatuserror(union ccb *ccb,
76 					        union ccb **orig_ccb,
77 						 cam_flags camflags,
78 						 u_int32_t sense_flags,
79 						 int *openings,
80 						 u_int32_t *relsim_flags,
81 						 u_int32_t *timeout,
82 						 u_int32_t  *action,
83 						 const char **action_string);
84 static	int		camperiphscsisenseerror(union ccb *ccb,
85 					        union ccb **orig_ccb,
86 					        cam_flags camflags,
87 					        u_int32_t sense_flags,
88 					        int *openings,
89 					        u_int32_t *relsim_flags,
90 					        u_int32_t *timeout,
91 					        u_int32_t *action,
92 					        const char **action_string);
93 static void		cam_periph_devctl_notify(union ccb *ccb);
94 
95 static int nperiph_drivers;
96 static int initialized = 0;
97 struct periph_driver **periph_drivers;
98 
99 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers");
100 
101 static int periph_selto_delay = 1000;
102 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay);
103 static int periph_noresrc_delay = 500;
104 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay);
105 static int periph_busy_delay = 500;
106 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay);
107 
108 static u_int periph_mapmem_thresh = 65536;
109 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN,
110     &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping");
111 
112 void
113 periphdriver_register(void *data)
114 {
115 	struct periph_driver *drv = (struct periph_driver *)data;
116 	struct periph_driver **newdrivers, **old;
117 	int ndrivers;
118 
119 again:
120 	ndrivers = nperiph_drivers + 2;
121 	newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH,
122 			    M_WAITOK);
123 	xpt_lock_buses();
124 	if (ndrivers != nperiph_drivers + 2) {
125 		/*
126 		 * Lost race against itself; go around.
127 		 */
128 		xpt_unlock_buses();
129 		free(newdrivers, M_CAMPERIPH);
130 		goto again;
131 	}
132 	if (periph_drivers)
133 		bcopy(periph_drivers, newdrivers,
134 		      sizeof(*newdrivers) * nperiph_drivers);
135 	newdrivers[nperiph_drivers] = drv;
136 	newdrivers[nperiph_drivers + 1] = NULL;
137 	old = periph_drivers;
138 	periph_drivers = newdrivers;
139 	nperiph_drivers++;
140 	xpt_unlock_buses();
141 	if (old)
142 		free(old, M_CAMPERIPH);
143 	/* If driver marked as early or it is late now, initialize it. */
144 	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
145 	    initialized > 1)
146 		(*drv->init)();
147 }
148 
149 int
150 periphdriver_unregister(void *data)
151 {
152 	struct periph_driver *drv = (struct periph_driver *)data;
153 	int error, n;
154 
155 	/* If driver marked as early or it is late now, deinitialize it. */
156 	if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) ||
157 	    initialized > 1) {
158 		if (drv->deinit == NULL) {
159 			printf("CAM periph driver '%s' doesn't have deinit.\n",
160 			    drv->driver_name);
161 			return (EOPNOTSUPP);
162 		}
163 		error = drv->deinit();
164 		if (error != 0)
165 			return (error);
166 	}
167 
168 	xpt_lock_buses();
169 	for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++)
170 		;
171 	KASSERT(n < nperiph_drivers,
172 	    ("Periph driver '%s' was not registered", drv->driver_name));
173 	for (; n + 1 < nperiph_drivers; n++)
174 		periph_drivers[n] = periph_drivers[n + 1];
175 	periph_drivers[n + 1] = NULL;
176 	nperiph_drivers--;
177 	xpt_unlock_buses();
178 	return (0);
179 }
180 
181 void
182 periphdriver_init(int level)
183 {
184 	int	i, early;
185 
186 	initialized = max(initialized, level);
187 	for (i = 0; periph_drivers[i] != NULL; i++) {
188 		early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2;
189 		if (early == initialized)
190 			(*periph_drivers[i]->init)();
191 	}
192 }
193 
194 cam_status
195 cam_periph_alloc(periph_ctor_t *periph_ctor,
196 		 periph_oninv_t *periph_oninvalidate,
197 		 periph_dtor_t *periph_dtor, periph_start_t *periph_start,
198 		 char *name, cam_periph_type type, struct cam_path *path,
199 		 ac_callback_t *ac_callback, ac_code code, void *arg)
200 {
201 	struct		periph_driver **p_drv;
202 	struct		cam_sim *sim;
203 	struct		cam_periph *periph;
204 	struct		cam_periph *cur_periph;
205 	path_id_t	path_id;
206 	target_id_t	target_id;
207 	lun_id_t	lun_id;
208 	cam_status	status;
209 	u_int		init_level;
210 
211 	init_level = 0;
212 	/*
213 	 * Handle Hot-Plug scenarios.  If there is already a peripheral
214 	 * of our type assigned to this path, we are likely waiting for
215 	 * final close on an old, invalidated, peripheral.  If this is
216 	 * the case, queue up a deferred call to the peripheral's async
217 	 * handler.  If it looks like a mistaken re-allocation, complain.
218 	 */
219 	if ((periph = cam_periph_find(path, name)) != NULL) {
220 
221 		if ((periph->flags & CAM_PERIPH_INVALID) != 0
222 		 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) {
223 			periph->flags |= CAM_PERIPH_NEW_DEV_FOUND;
224 			periph->deferred_callback = ac_callback;
225 			periph->deferred_ac = code;
226 			return (CAM_REQ_INPROG);
227 		} else {
228 			printf("cam_periph_alloc: attempt to re-allocate "
229 			       "valid device %s%d rejected flags %#x "
230 			       "refcount %d\n", periph->periph_name,
231 			       periph->unit_number, periph->flags,
232 			       periph->refcount);
233 		}
234 		return (CAM_REQ_INVALID);
235 	}
236 
237 	periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH,
238 					     M_NOWAIT|M_ZERO);
239 
240 	if (periph == NULL)
241 		return (CAM_RESRC_UNAVAIL);
242 
243 	init_level++;
244 
245 
246 	sim = xpt_path_sim(path);
247 	path_id = xpt_path_path_id(path);
248 	target_id = xpt_path_target_id(path);
249 	lun_id = xpt_path_lun_id(path);
250 	periph->periph_start = periph_start;
251 	periph->periph_dtor = periph_dtor;
252 	periph->periph_oninval = periph_oninvalidate;
253 	periph->type = type;
254 	periph->periph_name = name;
255 	periph->scheduled_priority = CAM_PRIORITY_NONE;
256 	periph->immediate_priority = CAM_PRIORITY_NONE;
257 	periph->refcount = 1;		/* Dropped by invalidation. */
258 	periph->sim = sim;
259 	SLIST_INIT(&periph->ccb_list);
260 	status = xpt_create_path(&path, periph, path_id, target_id, lun_id);
261 	if (status != CAM_REQ_CMP)
262 		goto failure;
263 	periph->path = path;
264 
265 	xpt_lock_buses();
266 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
267 		if (strcmp((*p_drv)->driver_name, name) == 0)
268 			break;
269 	}
270 	if (*p_drv == NULL) {
271 		printf("cam_periph_alloc: invalid periph name '%s'\n", name);
272 		xpt_unlock_buses();
273 		xpt_free_path(periph->path);
274 		free(periph, M_CAMPERIPH);
275 		return (CAM_REQ_INVALID);
276 	}
277 	periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id);
278 	cur_periph = TAILQ_FIRST(&(*p_drv)->units);
279 	while (cur_periph != NULL
280 	    && cur_periph->unit_number < periph->unit_number)
281 		cur_periph = TAILQ_NEXT(cur_periph, unit_links);
282 	if (cur_periph != NULL) {
283 		KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list"));
284 		TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links);
285 	} else {
286 		TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links);
287 		(*p_drv)->generation++;
288 	}
289 	xpt_unlock_buses();
290 
291 	init_level++;
292 
293 	status = xpt_add_periph(periph);
294 	if (status != CAM_REQ_CMP)
295 		goto failure;
296 
297 	init_level++;
298 	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n"));
299 
300 	status = periph_ctor(periph, arg);
301 
302 	if (status == CAM_REQ_CMP)
303 		init_level++;
304 
305 failure:
306 	switch (init_level) {
307 	case 4:
308 		/* Initialized successfully */
309 		break;
310 	case 3:
311 		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
312 		xpt_remove_periph(periph);
313 		/* FALLTHROUGH */
314 	case 2:
315 		xpt_lock_buses();
316 		TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links);
317 		xpt_unlock_buses();
318 		xpt_free_path(periph->path);
319 		/* FALLTHROUGH */
320 	case 1:
321 		free(periph, M_CAMPERIPH);
322 		/* FALLTHROUGH */
323 	case 0:
324 		/* No cleanup to perform. */
325 		break;
326 	default:
327 		panic("%s: Unknown init level", __func__);
328 	}
329 	return(status);
330 }
331 
332 /*
333  * Find a peripheral structure with the specified path, target, lun,
334  * and (optionally) type.  If the name is NULL, this function will return
335  * the first peripheral driver that matches the specified path.
336  */
337 struct cam_periph *
338 cam_periph_find(struct cam_path *path, char *name)
339 {
340 	struct periph_driver **p_drv;
341 	struct cam_periph *periph;
342 
343 	xpt_lock_buses();
344 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
345 
346 		if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0))
347 			continue;
348 
349 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
350 			if (xpt_path_comp(periph->path, path) == 0) {
351 				xpt_unlock_buses();
352 				cam_periph_assert(periph, MA_OWNED);
353 				return(periph);
354 			}
355 		}
356 		if (name != NULL) {
357 			xpt_unlock_buses();
358 			return(NULL);
359 		}
360 	}
361 	xpt_unlock_buses();
362 	return(NULL);
363 }
364 
365 /*
366  * Find peripheral driver instances attached to the specified path.
367  */
368 int
369 cam_periph_list(struct cam_path *path, struct sbuf *sb)
370 {
371 	struct sbuf local_sb;
372 	struct periph_driver **p_drv;
373 	struct cam_periph *periph;
374 	int count;
375 	int sbuf_alloc_len;
376 
377 	sbuf_alloc_len = 16;
378 retry:
379 	sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN);
380 	count = 0;
381 	xpt_lock_buses();
382 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
383 
384 		TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
385 			if (xpt_path_comp(periph->path, path) != 0)
386 				continue;
387 
388 			if (sbuf_len(&local_sb) != 0)
389 				sbuf_cat(&local_sb, ",");
390 
391 			sbuf_printf(&local_sb, "%s%d", periph->periph_name,
392 				    periph->unit_number);
393 
394 			if (sbuf_error(&local_sb) == ENOMEM) {
395 				sbuf_alloc_len *= 2;
396 				xpt_unlock_buses();
397 				sbuf_delete(&local_sb);
398 				goto retry;
399 			}
400 			count++;
401 		}
402 	}
403 	xpt_unlock_buses();
404 	sbuf_finish(&local_sb);
405 	if (sbuf_len(sb) != 0)
406 		sbuf_cat(sb, ",");
407 	sbuf_cat(sb, sbuf_data(&local_sb));
408 	sbuf_delete(&local_sb);
409 	return (count);
410 }
411 
412 int
413 cam_periph_acquire(struct cam_periph *periph)
414 {
415 	int status;
416 
417 	if (periph == NULL)
418 		return (EINVAL);
419 
420 	status = ENOENT;
421 	xpt_lock_buses();
422 	if ((periph->flags & CAM_PERIPH_INVALID) == 0) {
423 		periph->refcount++;
424 		status = 0;
425 	}
426 	xpt_unlock_buses();
427 
428 	return (status);
429 }
430 
431 void
432 cam_periph_doacquire(struct cam_periph *periph)
433 {
434 
435 	xpt_lock_buses();
436 	KASSERT(periph->refcount >= 1,
437 	    ("cam_periph_doacquire() with refcount == %d", periph->refcount));
438 	periph->refcount++;
439 	xpt_unlock_buses();
440 }
441 
442 void
443 cam_periph_release_locked_buses(struct cam_periph *periph)
444 {
445 
446 	cam_periph_assert(periph, MA_OWNED);
447 	KASSERT(periph->refcount >= 1, ("periph->refcount >= 1"));
448 	if (--periph->refcount == 0)
449 		camperiphfree(periph);
450 }
451 
452 void
453 cam_periph_release_locked(struct cam_periph *periph)
454 {
455 
456 	if (periph == NULL)
457 		return;
458 
459 	xpt_lock_buses();
460 	cam_periph_release_locked_buses(periph);
461 	xpt_unlock_buses();
462 }
463 
464 void
465 cam_periph_release(struct cam_periph *periph)
466 {
467 	struct mtx *mtx;
468 
469 	if (periph == NULL)
470 		return;
471 
472 	cam_periph_assert(periph, MA_NOTOWNED);
473 	mtx = cam_periph_mtx(periph);
474 	mtx_lock(mtx);
475 	cam_periph_release_locked(periph);
476 	mtx_unlock(mtx);
477 }
478 
479 /*
480  * hold/unhold act as mutual exclusion for sections of the code that
481  * need to sleep and want to make sure that other sections that
482  * will interfere are held off. This only protects exclusive sections
483  * from each other.
484  */
485 int
486 cam_periph_hold(struct cam_periph *periph, int priority)
487 {
488 	int error;
489 
490 	/*
491 	 * Increment the reference count on the peripheral
492 	 * while we wait for our lock attempt to succeed
493 	 * to ensure the peripheral doesn't disappear out
494 	 * from user us while we sleep.
495 	 */
496 
497 	if (cam_periph_acquire(periph) != 0)
498 		return (ENXIO);
499 
500 	cam_periph_assert(periph, MA_OWNED);
501 	while ((periph->flags & CAM_PERIPH_LOCKED) != 0) {
502 		periph->flags |= CAM_PERIPH_LOCK_WANTED;
503 		if ((error = cam_periph_sleep(periph, periph, priority,
504 		    "caplck", 0)) != 0) {
505 			cam_periph_release_locked(periph);
506 			return (error);
507 		}
508 		if (periph->flags & CAM_PERIPH_INVALID) {
509 			cam_periph_release_locked(periph);
510 			return (ENXIO);
511 		}
512 	}
513 
514 	periph->flags |= CAM_PERIPH_LOCKED;
515 	return (0);
516 }
517 
518 void
519 cam_periph_unhold(struct cam_periph *periph)
520 {
521 
522 	cam_periph_assert(periph, MA_OWNED);
523 
524 	periph->flags &= ~CAM_PERIPH_LOCKED;
525 	if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) {
526 		periph->flags &= ~CAM_PERIPH_LOCK_WANTED;
527 		wakeup(periph);
528 	}
529 
530 	cam_periph_release_locked(periph);
531 }
532 
533 /*
534  * Look for the next unit number that is not currently in use for this
535  * peripheral type starting at "newunit".  Also exclude unit numbers that
536  * are reserved by for future "hardwiring" unless we already know that this
537  * is a potential wired device.  Only assume that the device is "wired" the
538  * first time through the loop since after that we'll be looking at unit
539  * numbers that did not match a wiring entry.
540  */
541 static u_int
542 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired,
543 		  path_id_t pathid, target_id_t target, lun_id_t lun)
544 {
545 	struct	cam_periph *periph;
546 	char	*periph_name;
547 	int	i, val, dunit, r;
548 	const char *dname, *strval;
549 
550 	periph_name = p_drv->driver_name;
551 	for (;;newunit++) {
552 
553 		for (periph = TAILQ_FIRST(&p_drv->units);
554 		     periph != NULL && periph->unit_number != newunit;
555 		     periph = TAILQ_NEXT(periph, unit_links))
556 			;
557 
558 		if (periph != NULL && periph->unit_number == newunit) {
559 			if (wired != 0) {
560 				xpt_print(periph->path, "Duplicate Wired "
561 				    "Device entry!\n");
562 				xpt_print(periph->path, "Second device (%s "
563 				    "device at scbus%d target %d lun %d) will "
564 				    "not be wired\n", periph_name, pathid,
565 				    target, lun);
566 				wired = 0;
567 			}
568 			continue;
569 		}
570 		if (wired)
571 			break;
572 
573 		/*
574 		 * Don't match entries like "da 4" as a wired down
575 		 * device, but do match entries like "da 4 target 5"
576 		 * or even "da 4 scbus 1".
577 		 */
578 		i = 0;
579 		dname = periph_name;
580 		for (;;) {
581 			r = resource_find_dev(&i, dname, &dunit, NULL, NULL);
582 			if (r != 0)
583 				break;
584 			/* if no "target" and no specific scbus, skip */
585 			if (resource_int_value(dname, dunit, "target", &val) &&
586 			    (resource_string_value(dname, dunit, "at",&strval)||
587 			     strcmp(strval, "scbus") == 0))
588 				continue;
589 			if (newunit == dunit)
590 				break;
591 		}
592 		if (r != 0)
593 			break;
594 	}
595 	return (newunit);
596 }
597 
598 static u_int
599 camperiphunit(struct periph_driver *p_drv, path_id_t pathid,
600 	      target_id_t target, lun_id_t lun)
601 {
602 	u_int	unit;
603 	int	wired, i, val, dunit;
604 	const char *dname, *strval;
605 	char	pathbuf[32], *periph_name;
606 
607 	periph_name = p_drv->driver_name;
608 	snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid);
609 	unit = 0;
610 	i = 0;
611 	dname = periph_name;
612 	for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0;
613 	     wired = 0) {
614 		if (resource_string_value(dname, dunit, "at", &strval) == 0) {
615 			if (strcmp(strval, pathbuf) != 0)
616 				continue;
617 			wired++;
618 		}
619 		if (resource_int_value(dname, dunit, "target", &val) == 0) {
620 			if (val != target)
621 				continue;
622 			wired++;
623 		}
624 		if (resource_int_value(dname, dunit, "lun", &val) == 0) {
625 			if (val != lun)
626 				continue;
627 			wired++;
628 		}
629 		if (wired != 0) {
630 			unit = dunit;
631 			break;
632 		}
633 	}
634 
635 	/*
636 	 * Either start from 0 looking for the next unit or from
637 	 * the unit number given in the resource config.  This way,
638 	 * if we have wildcard matches, we don't return the same
639 	 * unit number twice.
640 	 */
641 	unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun);
642 
643 	return (unit);
644 }
645 
646 void
647 cam_periph_invalidate(struct cam_periph *periph)
648 {
649 
650 	cam_periph_assert(periph, MA_OWNED);
651 	/*
652 	 * We only tear down the device the first time a peripheral is
653 	 * invalidated.
654 	 */
655 	if ((periph->flags & CAM_PERIPH_INVALID) != 0)
656 		return;
657 
658 	CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n"));
659 	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) {
660 		struct sbuf sb;
661 		char buffer[160];
662 
663 		sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN);
664 		xpt_denounce_periph_sbuf(periph, &sb);
665 		sbuf_finish(&sb);
666 		sbuf_putbuf(&sb);
667 	}
668 	periph->flags |= CAM_PERIPH_INVALID;
669 	periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND;
670 	if (periph->periph_oninval != NULL)
671 		periph->periph_oninval(periph);
672 	cam_periph_release_locked(periph);
673 }
674 
675 static void
676 camperiphfree(struct cam_periph *periph)
677 {
678 	struct periph_driver **p_drv;
679 	struct periph_driver *drv;
680 
681 	cam_periph_assert(periph, MA_OWNED);
682 	KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
683 	    periph->periph_name, periph->unit_number));
684 	for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
685 		if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
686 			break;
687 	}
688 	if (*p_drv == NULL) {
689 		printf("camperiphfree: attempt to free non-existant periph\n");
690 		return;
691 	}
692 	/*
693 	 * Cache a pointer to the periph_driver structure.  If a
694 	 * periph_driver is added or removed from the array (see
695 	 * periphdriver_register()) while we drop the toplogy lock
696 	 * below, p_drv may change.  This doesn't protect against this
697 	 * particular periph_driver going away.  That will require full
698 	 * reference counting in the periph_driver infrastructure.
699 	 */
700 	drv = *p_drv;
701 
702 	/*
703 	 * We need to set this flag before dropping the topology lock, to
704 	 * let anyone who is traversing the list that this peripheral is
705 	 * about to be freed, and there will be no more reference count
706 	 * checks.
707 	 */
708 	periph->flags |= CAM_PERIPH_FREE;
709 
710 	/*
711 	 * The peripheral destructor semantics dictate calling with only the
712 	 * SIM mutex held.  Since it might sleep, it should not be called
713 	 * with the topology lock held.
714 	 */
715 	xpt_unlock_buses();
716 
717 	/*
718 	 * We need to call the peripheral destructor prior to removing the
719 	 * peripheral from the list.  Otherwise, we risk running into a
720 	 * scenario where the peripheral unit number may get reused
721 	 * (because it has been removed from the list), but some resources
722 	 * used by the peripheral are still hanging around.  In particular,
723 	 * the devfs nodes used by some peripherals like the pass(4) driver
724 	 * aren't fully cleaned up until the destructor is run.  If the
725 	 * unit number is reused before the devfs instance is fully gone,
726 	 * devfs will panic.
727 	 */
728 	if (periph->periph_dtor != NULL)
729 		periph->periph_dtor(periph);
730 
731 	/*
732 	 * The peripheral list is protected by the topology lock. We have to
733 	 * remove the periph from the drv list before we call deferred_ac. The
734 	 * AC_FOUND_DEVICE callback won't create a new periph if it's still there.
735 	 */
736 	xpt_lock_buses();
737 
738 	TAILQ_REMOVE(&drv->units, periph, unit_links);
739 	drv->generation++;
740 
741 	xpt_remove_periph(periph);
742 
743 	xpt_unlock_buses();
744 	if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting)
745 		xpt_print(periph->path, "Periph destroyed\n");
746 	else
747 		CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n"));
748 
749 	if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) {
750 		union ccb ccb;
751 		void *arg;
752 
753 		switch (periph->deferred_ac) {
754 		case AC_FOUND_DEVICE:
755 			ccb.ccb_h.func_code = XPT_GDEV_TYPE;
756 			xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
757 			xpt_action(&ccb);
758 			arg = &ccb;
759 			break;
760 		case AC_PATH_REGISTERED:
761 			xpt_path_inq(&ccb.cpi, periph->path);
762 			arg = &ccb;
763 			break;
764 		default:
765 			arg = NULL;
766 			break;
767 		}
768 		periph->deferred_callback(NULL, periph->deferred_ac,
769 					  periph->path, arg);
770 	}
771 	xpt_free_path(periph->path);
772 	free(periph, M_CAMPERIPH);
773 	xpt_lock_buses();
774 }
775 
776 /*
777  * Map user virtual pointers into kernel virtual address space, so we can
778  * access the memory.  This is now a generic function that centralizes most
779  * of the sanity checks on the data flags, if any.
780  * This also only works for up to MAXPHYS memory.  Since we use
781  * buffers to map stuff in and out, we're limited to the buffer size.
782  */
783 int
784 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo,
785     u_int maxmap)
786 {
787 	int numbufs, i;
788 	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
789 	u_int32_t lengths[CAM_PERIPH_MAXMAPS];
790 	u_int32_t dirs[CAM_PERIPH_MAXMAPS];
791 	bool misaligned[CAM_PERIPH_MAXMAPS];
792 
793 	bzero(mapinfo, sizeof(*mapinfo));
794 	if (maxmap == 0)
795 		maxmap = DFLTPHYS;	/* traditional default */
796 	else if (maxmap > MAXPHYS)
797 		maxmap = MAXPHYS;	/* for safety */
798 	switch(ccb->ccb_h.func_code) {
799 	case XPT_DEV_MATCH:
800 		if (ccb->cdm.match_buf_len == 0) {
801 			printf("cam_periph_mapmem: invalid match buffer "
802 			       "length 0\n");
803 			return(EINVAL);
804 		}
805 		if (ccb->cdm.pattern_buf_len > 0) {
806 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
807 			lengths[0] = ccb->cdm.pattern_buf_len;
808 			dirs[0] = CAM_DIR_OUT;
809 			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
810 			lengths[1] = ccb->cdm.match_buf_len;
811 			dirs[1] = CAM_DIR_IN;
812 			numbufs = 2;
813 		} else {
814 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
815 			lengths[0] = ccb->cdm.match_buf_len;
816 			dirs[0] = CAM_DIR_IN;
817 			numbufs = 1;
818 		}
819 		/*
820 		 * This request will not go to the hardware, no reason
821 		 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
822 		 */
823 		maxmap = MAXPHYS;
824 		break;
825 	case XPT_SCSI_IO:
826 	case XPT_CONT_TARGET_IO:
827 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
828 			return(0);
829 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
830 			return (EINVAL);
831 		data_ptrs[0] = &ccb->csio.data_ptr;
832 		lengths[0] = ccb->csio.dxfer_len;
833 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
834 		numbufs = 1;
835 		break;
836 	case XPT_ATA_IO:
837 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
838 			return(0);
839 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
840 			return (EINVAL);
841 		data_ptrs[0] = &ccb->ataio.data_ptr;
842 		lengths[0] = ccb->ataio.dxfer_len;
843 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
844 		numbufs = 1;
845 		break;
846 	case XPT_MMC_IO:
847 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
848 			return(0);
849 		/* Two mappings: one for cmd->data and one for cmd->data->data */
850 		data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data;
851 		lengths[0] = sizeof(struct mmc_data *);
852 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
853 		data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data;
854 		lengths[1] = ccb->mmcio.cmd.data->len;
855 		dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
856 		numbufs = 2;
857 		break;
858 	case XPT_SMP_IO:
859 		data_ptrs[0] = &ccb->smpio.smp_request;
860 		lengths[0] = ccb->smpio.smp_request_len;
861 		dirs[0] = CAM_DIR_OUT;
862 		data_ptrs[1] = &ccb->smpio.smp_response;
863 		lengths[1] = ccb->smpio.smp_response_len;
864 		dirs[1] = CAM_DIR_IN;
865 		numbufs = 2;
866 		break;
867 	case XPT_NVME_IO:
868 	case XPT_NVME_ADMIN:
869 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
870 			return (0);
871 		if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
872 			return (EINVAL);
873 		data_ptrs[0] = &ccb->nvmeio.data_ptr;
874 		lengths[0] = ccb->nvmeio.dxfer_len;
875 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
876 		numbufs = 1;
877 		break;
878 	case XPT_DEV_ADVINFO:
879 		if (ccb->cdai.bufsiz == 0)
880 			return (0);
881 
882 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
883 		lengths[0] = ccb->cdai.bufsiz;
884 		dirs[0] = CAM_DIR_IN;
885 		numbufs = 1;
886 
887 		/*
888 		 * This request will not go to the hardware, no reason
889 		 * to be so strict. vmapbuf() is able to map up to MAXPHYS.
890 		 */
891 		maxmap = MAXPHYS;
892 		break;
893 	default:
894 		return(EINVAL);
895 		break; /* NOTREACHED */
896 	}
897 
898 	/*
899 	 * Check the transfer length and permissions first, so we don't
900 	 * have to unmap any previously mapped buffers.
901 	 */
902 	for (i = 0; i < numbufs; i++) {
903 		if (lengths[i] > maxmap) {
904 			printf("cam_periph_mapmem: attempt to map %lu bytes, "
905 			       "which is greater than %lu\n",
906 			       (long)(lengths[i]), (u_long)maxmap);
907 			return (E2BIG);
908 		}
909 
910 		/*
911 		 * The userland data pointer passed in may not be page
912 		 * aligned.  vmapbuf() truncates the address to a page
913 		 * boundary, so if the address isn't page aligned, we'll
914 		 * need enough space for the given transfer length, plus
915 		 * whatever extra space is necessary to make it to the page
916 		 * boundary.
917 		 */
918 		misaligned[i] = (lengths[i] +
919 		    (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK) > MAXPHYS);
920 	}
921 
922 	/*
923 	 * This keeps the kernel stack of current thread from getting
924 	 * swapped.  In low-memory situations where the kernel stack might
925 	 * otherwise get swapped out, this holds it and allows the thread
926 	 * to make progress and release the kernel mapped pages sooner.
927 	 *
928 	 * XXX KDM should I use P_NOSWAP instead?
929 	 */
930 	PHOLD(curproc);
931 
932 	for (i = 0; i < numbufs; i++) {
933 
934 		/* Save the user's data address. */
935 		mapinfo->orig[i] = *data_ptrs[i];
936 
937 		/*
938 		 * For small buffers use malloc+copyin/copyout instead of
939 		 * mapping to KVA to avoid expensive TLB shootdowns.  For
940 		 * small allocations malloc is backed by UMA, and so much
941 		 * cheaper on SMP systems.
942 		 */
943 		if ((lengths[i] <= periph_mapmem_thresh || misaligned[i]) &&
944 		    ccb->ccb_h.func_code != XPT_MMC_IO) {
945 			*data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH,
946 			    M_WAITOK);
947 			if (dirs[i] != CAM_DIR_IN) {
948 				if (copyin(mapinfo->orig[i], *data_ptrs[i],
949 				    lengths[i]) != 0) {
950 					free(*data_ptrs[i], M_CAMPERIPH);
951 					*data_ptrs[i] = mapinfo->orig[i];
952 					goto fail;
953 				}
954 			} else
955 				bzero(*data_ptrs[i], lengths[i]);
956 			continue;
957 		}
958 
959 		/*
960 		 * Get the buffer.
961 		 */
962 		mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK);
963 
964 		/* put our pointer in the data slot */
965 		mapinfo->bp[i]->b_data = *data_ptrs[i];
966 
967 		/* set the transfer length, we know it's < MAXPHYS */
968 		mapinfo->bp[i]->b_bufsize = lengths[i];
969 
970 		/* set the direction */
971 		mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ?
972 		    BIO_WRITE : BIO_READ;
973 
974 		/* Map the buffer into kernel memory. */
975 		if (vmapbuf(mapinfo->bp[i], 1) < 0) {
976 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
977 			goto fail;
978 		}
979 
980 		/* set our pointer to the new mapped area */
981 		*data_ptrs[i] = mapinfo->bp[i]->b_data;
982 	}
983 
984 	/*
985 	 * Now that we've gotten this far, change ownership to the kernel
986 	 * of the buffers so that we don't run afoul of returning to user
987 	 * space with locks (on the buffer) held.
988 	 */
989 	for (i = 0; i < numbufs; i++) {
990 		if (mapinfo->bp[i])
991 			BUF_KERNPROC(mapinfo->bp[i]);
992 	}
993 
994 	mapinfo->num_bufs_used = numbufs;
995 	return(0);
996 
997 fail:
998 	for (i--; i >= 0; i--) {
999 		if (mapinfo->bp[i]) {
1000 			vunmapbuf(mapinfo->bp[i]);
1001 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
1002 		} else
1003 			free(*data_ptrs[i], M_CAMPERIPH);
1004 		*data_ptrs[i] = mapinfo->orig[i];
1005 	}
1006 	PRELE(curproc);
1007 	return(EACCES);
1008 }
1009 
1010 /*
1011  * Unmap memory segments mapped into kernel virtual address space by
1012  * cam_periph_mapmem().
1013  */
1014 void
1015 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo)
1016 {
1017 	int numbufs, i;
1018 	u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1019 	u_int32_t lengths[CAM_PERIPH_MAXMAPS];
1020 	u_int32_t dirs[CAM_PERIPH_MAXMAPS];
1021 
1022 	if (mapinfo->num_bufs_used <= 0) {
1023 		/* nothing to free and the process wasn't held. */
1024 		return;
1025 	}
1026 
1027 	switch (ccb->ccb_h.func_code) {
1028 	case XPT_DEV_MATCH:
1029 		if (ccb->cdm.pattern_buf_len > 0) {
1030 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns;
1031 			lengths[0] = ccb->cdm.pattern_buf_len;
1032 			dirs[0] = CAM_DIR_OUT;
1033 			data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches;
1034 			lengths[1] = ccb->cdm.match_buf_len;
1035 			dirs[1] = CAM_DIR_IN;
1036 			numbufs = 2;
1037 		} else {
1038 			data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches;
1039 			lengths[0] = ccb->cdm.match_buf_len;
1040 			dirs[0] = CAM_DIR_IN;
1041 			numbufs = 1;
1042 		}
1043 		break;
1044 	case XPT_SCSI_IO:
1045 	case XPT_CONT_TARGET_IO:
1046 		data_ptrs[0] = &ccb->csio.data_ptr;
1047 		lengths[0] = ccb->csio.dxfer_len;
1048 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1049 		numbufs = 1;
1050 		break;
1051 	case XPT_ATA_IO:
1052 		data_ptrs[0] = &ccb->ataio.data_ptr;
1053 		lengths[0] = ccb->ataio.dxfer_len;
1054 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1055 		numbufs = 1;
1056 		break;
1057 	case XPT_MMC_IO:
1058 		data_ptrs[0] = (u_int8_t **)&ccb->mmcio.cmd.data;
1059 		lengths[0] = sizeof(struct mmc_data *);
1060 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1061 		data_ptrs[1] = (u_int8_t **)&ccb->mmcio.cmd.data->data;
1062 		lengths[1] = ccb->mmcio.cmd.data->len;
1063 		dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK;
1064 		numbufs = 2;
1065 		break;
1066 	case XPT_SMP_IO:
1067 		data_ptrs[0] = &ccb->smpio.smp_request;
1068 		lengths[0] = ccb->smpio.smp_request_len;
1069 		dirs[0] = CAM_DIR_OUT;
1070 		data_ptrs[1] = &ccb->smpio.smp_response;
1071 		lengths[1] = ccb->smpio.smp_response_len;
1072 		dirs[1] = CAM_DIR_IN;
1073 		numbufs = 2;
1074 		break;
1075 	case XPT_NVME_IO:
1076 	case XPT_NVME_ADMIN:
1077 		data_ptrs[0] = &ccb->nvmeio.data_ptr;
1078 		lengths[0] = ccb->nvmeio.dxfer_len;
1079 		dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1080 		numbufs = 1;
1081 		break;
1082 	case XPT_DEV_ADVINFO:
1083 		data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1084 		lengths[0] = ccb->cdai.bufsiz;
1085 		dirs[0] = CAM_DIR_IN;
1086 		numbufs = 1;
1087 		break;
1088 	default:
1089 		/* allow ourselves to be swapped once again */
1090 		PRELE(curproc);
1091 		return;
1092 		break; /* NOTREACHED */
1093 	}
1094 
1095 	for (i = 0; i < numbufs; i++) {
1096 		if (mapinfo->bp[i]) {
1097 			/* unmap the buffer */
1098 			vunmapbuf(mapinfo->bp[i]);
1099 
1100 			/* release the buffer */
1101 			uma_zfree(pbuf_zone, mapinfo->bp[i]);
1102 		} else {
1103 			if (dirs[i] != CAM_DIR_OUT) {
1104 				copyout(*data_ptrs[i], mapinfo->orig[i],
1105 				    lengths[i]);
1106 			}
1107 			free(*data_ptrs[i], M_CAMPERIPH);
1108 		}
1109 
1110 		/* Set the user's pointer back to the original value */
1111 		*data_ptrs[i] = mapinfo->orig[i];
1112 	}
1113 
1114 	/* allow ourselves to be swapped once again */
1115 	PRELE(curproc);
1116 }
1117 
1118 int
1119 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr,
1120 		 int (*error_routine)(union ccb *ccb,
1121 				      cam_flags camflags,
1122 				      u_int32_t sense_flags))
1123 {
1124 	union ccb 	     *ccb;
1125 	int 		     error;
1126 	int		     found;
1127 
1128 	error = found = 0;
1129 
1130 	switch(cmd){
1131 	case CAMGETPASSTHRU:
1132 		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1133 		xpt_setup_ccb(&ccb->ccb_h,
1134 			      ccb->ccb_h.path,
1135 			      CAM_PRIORITY_NORMAL);
1136 		ccb->ccb_h.func_code = XPT_GDEVLIST;
1137 
1138 		/*
1139 		 * Basically, the point of this is that we go through
1140 		 * getting the list of devices, until we find a passthrough
1141 		 * device.  In the current version of the CAM code, the
1142 		 * only way to determine what type of device we're dealing
1143 		 * with is by its name.
1144 		 */
1145 		while (found == 0) {
1146 			ccb->cgdl.index = 0;
1147 			ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS;
1148 			while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) {
1149 
1150 				/* we want the next device in the list */
1151 				xpt_action(ccb);
1152 				if (strncmp(ccb->cgdl.periph_name,
1153 				    "pass", 4) == 0){
1154 					found = 1;
1155 					break;
1156 				}
1157 			}
1158 			if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) &&
1159 			    (found == 0)) {
1160 				ccb->cgdl.periph_name[0] = '\0';
1161 				ccb->cgdl.unit_number = 0;
1162 				break;
1163 			}
1164 		}
1165 
1166 		/* copy the result back out */
1167 		bcopy(ccb, addr, sizeof(union ccb));
1168 
1169 		/* and release the ccb */
1170 		xpt_release_ccb(ccb);
1171 
1172 		break;
1173 	default:
1174 		error = ENOTTY;
1175 		break;
1176 	}
1177 	return(error);
1178 }
1179 
1180 static void
1181 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb)
1182 {
1183 
1184 	panic("%s: already done with ccb %p", __func__, done_ccb);
1185 }
1186 
1187 static void
1188 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb)
1189 {
1190 
1191 	/* Caller will release the CCB */
1192 	xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED);
1193 	done_ccb->ccb_h.cbfcnp = cam_periph_done_panic;
1194 	wakeup(&done_ccb->ccb_h.cbfcnp);
1195 }
1196 
1197 static void
1198 cam_periph_ccbwait(union ccb *ccb)
1199 {
1200 
1201 	if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
1202 		while (ccb->ccb_h.cbfcnp != cam_periph_done_panic)
1203 			xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp,
1204 			    PRIBIO, "cbwait", 0);
1205 	}
1206 	KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX &&
1207 	    (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG,
1208 	    ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, "
1209 	     "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code,
1210 	     ccb->ccb_h.status, ccb->ccb_h.pinfo.index));
1211 }
1212 
1213 /*
1214  * Dispatch a CCB and wait for it to complete.  If the CCB has set a
1215  * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost.
1216  */
1217 int
1218 cam_periph_runccb(union ccb *ccb,
1219 		  int (*error_routine)(union ccb *ccb,
1220 				       cam_flags camflags,
1221 				       u_int32_t sense_flags),
1222 		  cam_flags camflags, u_int32_t sense_flags,
1223 		  struct devstat *ds)
1224 {
1225 	struct bintime *starttime;
1226 	struct bintime ltime;
1227 	int error;
1228 	bool must_poll;
1229 	uint32_t timeout = 1;
1230 
1231 	starttime = NULL;
1232 	xpt_path_assert(ccb->ccb_h.path, MA_OWNED);
1233 	KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0,
1234 	    ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb,
1235 	     ccb->ccb_h.func_code, ccb->ccb_h.flags));
1236 
1237 	/*
1238 	 * If the user has supplied a stats structure, and if we understand
1239 	 * this particular type of ccb, record the transaction start.
1240 	 */
1241 	if (ds != NULL &&
1242 	    (ccb->ccb_h.func_code == XPT_SCSI_IO ||
1243 	    ccb->ccb_h.func_code == XPT_ATA_IO ||
1244 	    ccb->ccb_h.func_code == XPT_NVME_IO)) {
1245 		starttime = &ltime;
1246 		binuptime(starttime);
1247 		devstat_start_transaction(ds, starttime);
1248 	}
1249 
1250 	/*
1251 	 * We must poll the I/O while we're dumping. The scheduler is normally
1252 	 * stopped for dumping, except when we call doadump from ddb. While the
1253 	 * scheduler is running in this case, we still need to poll the I/O to
1254 	 * avoid sleeping waiting for the ccb to complete.
1255 	 *
1256 	 * A panic triggered dump stops the scheduler, any callback from the
1257 	 * shutdown_post_sync event will run with the scheduler stopped, but
1258 	 * before we're officially dumping. To avoid hanging in adashutdown
1259 	 * initiated commands (or other similar situations), we have to test for
1260 	 * either SCHEDULER_STOPPED() here as well.
1261 	 *
1262 	 * To avoid locking problems, dumping/polling callers must call
1263 	 * without a periph lock held.
1264 	 */
1265 	must_poll = dumping || SCHEDULER_STOPPED();
1266 	ccb->ccb_h.cbfcnp = cam_periph_done;
1267 
1268 	/*
1269 	 * If we're polling, then we need to ensure that we have ample resources
1270 	 * in the periph.  cam_periph_error can reschedule the ccb by calling
1271 	 * xpt_action and returning ERESTART, so we have to effect the polling
1272 	 * in the do loop below.
1273 	 */
1274 	if (must_poll) {
1275 		timeout = xpt_poll_setup(ccb);
1276 	}
1277 
1278 	if (timeout == 0) {
1279 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1280 		error = EBUSY;
1281 	} else {
1282 		xpt_action(ccb);
1283 		do {
1284 			if (must_poll) {
1285 				xpt_pollwait(ccb, timeout);
1286 				timeout = ccb->ccb_h.timeout * 10;
1287 			} else {
1288 				cam_periph_ccbwait(ccb);
1289 			}
1290 			if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
1291 				error = 0;
1292 			else if (error_routine != NULL) {
1293 				ccb->ccb_h.cbfcnp = cam_periph_done;
1294 				error = (*error_routine)(ccb, camflags, sense_flags);
1295 			} else
1296 				error = 0;
1297 		} while (error == ERESTART);
1298 	}
1299 
1300 	if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1301 		cam_release_devq(ccb->ccb_h.path,
1302 				 /* relsim_flags */0,
1303 				 /* openings */0,
1304 				 /* timeout */0,
1305 				 /* getcount_only */ FALSE);
1306 		ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1307 	}
1308 
1309 	if (ds != NULL) {
1310 		uint32_t bytes;
1311 		devstat_tag_type tag;
1312 		bool valid = true;
1313 
1314 		if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1315 			bytes = ccb->csio.dxfer_len - ccb->csio.resid;
1316 			tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3);
1317 		} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1318 			bytes = ccb->ataio.dxfer_len - ccb->ataio.resid;
1319 			tag = (devstat_tag_type)0;
1320 		} else if (ccb->ccb_h.func_code == XPT_NVME_IO) {
1321 			bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */
1322 			tag = (devstat_tag_type)0;
1323 		} else {
1324 			valid = false;
1325 		}
1326 		if (valid)
1327 			devstat_end_transaction(ds, bytes, tag,
1328 			    ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ?
1329 			    DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ?
1330 			    DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime);
1331 	}
1332 
1333 	return(error);
1334 }
1335 
1336 void
1337 cam_freeze_devq(struct cam_path *path)
1338 {
1339 	struct ccb_hdr ccb_h;
1340 
1341 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n"));
1342 	xpt_setup_ccb(&ccb_h, path, /*priority*/1);
1343 	ccb_h.func_code = XPT_NOOP;
1344 	ccb_h.flags = CAM_DEV_QFREEZE;
1345 	xpt_action((union ccb *)&ccb_h);
1346 }
1347 
1348 u_int32_t
1349 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags,
1350 		 u_int32_t openings, u_int32_t arg,
1351 		 int getcount_only)
1352 {
1353 	struct ccb_relsim crs;
1354 
1355 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n",
1356 	    relsim_flags, openings, arg, getcount_only));
1357 	xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL);
1358 	crs.ccb_h.func_code = XPT_REL_SIMQ;
1359 	crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0;
1360 	crs.release_flags = relsim_flags;
1361 	crs.openings = openings;
1362 	crs.release_timeout = arg;
1363 	xpt_action((union ccb *)&crs);
1364 	return (crs.qfrozen_cnt);
1365 }
1366 
1367 #define saved_ccb_ptr ppriv_ptr0
1368 static void
1369 camperiphdone(struct cam_periph *periph, union ccb *done_ccb)
1370 {
1371 	union ccb      *saved_ccb;
1372 	cam_status	status;
1373 	struct scsi_start_stop_unit *scsi_cmd;
1374 	int		error = 0, error_code, sense_key, asc, ascq;
1375 
1376 	scsi_cmd = (struct scsi_start_stop_unit *)
1377 	    &done_ccb->csio.cdb_io.cdb_bytes;
1378 	status = done_ccb->ccb_h.status;
1379 
1380 	if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1381 		if (scsi_extract_sense_ccb(done_ccb,
1382 		    &error_code, &sense_key, &asc, &ascq)) {
1383 			/*
1384 			 * If the error is "invalid field in CDB",
1385 			 * and the load/eject flag is set, turn the
1386 			 * flag off and try again.  This is just in
1387 			 * case the drive in question barfs on the
1388 			 * load eject flag.  The CAM code should set
1389 			 * the load/eject flag by default for
1390 			 * removable media.
1391 			 */
1392 			if ((scsi_cmd->opcode == START_STOP_UNIT) &&
1393 			    ((scsi_cmd->how & SSS_LOEJ) != 0) &&
1394 			     (asc == 0x24) && (ascq == 0x00)) {
1395 				scsi_cmd->how &= ~SSS_LOEJ;
1396 				if (status & CAM_DEV_QFRZN) {
1397 					cam_release_devq(done_ccb->ccb_h.path,
1398 					    0, 0, 0, 0);
1399 					done_ccb->ccb_h.status &=
1400 					    ~CAM_DEV_QFRZN;
1401 				}
1402 				xpt_action(done_ccb);
1403 				goto out;
1404 			}
1405 		}
1406 		error = cam_periph_error(done_ccb, 0,
1407 		    SF_RETRY_UA | SF_NO_PRINT);
1408 		if (error == ERESTART)
1409 			goto out;
1410 		if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) {
1411 			cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1412 			done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1413 		}
1414 	} else {
1415 		/*
1416 		 * If we have successfully taken a device from the not
1417 		 * ready to ready state, re-scan the device and re-get
1418 		 * the inquiry information.  Many devices (mostly disks)
1419 		 * don't properly report their inquiry information unless
1420 		 * they are spun up.
1421 		 */
1422 		if (scsi_cmd->opcode == START_STOP_UNIT)
1423 			xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL);
1424 	}
1425 
1426 	/* If we tried long wait and still failed, remember that. */
1427 	if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) &&
1428 	    (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) {
1429 		periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT;
1430 		if (error != 0 && done_ccb->ccb_h.retry_count == 0)
1431 			periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED;
1432 	}
1433 
1434 	/*
1435 	 * After recovery action(s) completed, return to the original CCB.
1436 	 * If the recovery CCB has failed, considering its own possible
1437 	 * retries and recovery, assume we are back in state where we have
1438 	 * been originally, but without recovery hopes left.  In such case,
1439 	 * after the final attempt below, we cancel any further retries,
1440 	 * blocking by that also any new recovery attempts for this CCB,
1441 	 * and the result will be the final one returned to the CCB owher.
1442 	 */
1443 	saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr;
1444 	bcopy(saved_ccb, done_ccb, sizeof(*done_ccb));
1445 	xpt_free_ccb(saved_ccb);
1446 	if (done_ccb->ccb_h.cbfcnp != camperiphdone)
1447 		periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG;
1448 	if (error != 0)
1449 		done_ccb->ccb_h.retry_count = 0;
1450 	xpt_action(done_ccb);
1451 
1452 out:
1453 	/* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */
1454 	cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0);
1455 }
1456 
1457 /*
1458  * Generic Async Event handler.  Peripheral drivers usually
1459  * filter out the events that require personal attention,
1460  * and leave the rest to this function.
1461  */
1462 void
1463 cam_periph_async(struct cam_periph *periph, u_int32_t code,
1464 		 struct cam_path *path, void *arg)
1465 {
1466 	switch (code) {
1467 	case AC_LOST_DEVICE:
1468 		cam_periph_invalidate(periph);
1469 		break;
1470 	default:
1471 		break;
1472 	}
1473 }
1474 
1475 void
1476 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle)
1477 {
1478 	struct ccb_getdevstats cgds;
1479 
1480 	xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1481 	cgds.ccb_h.func_code = XPT_GDEV_STATS;
1482 	xpt_action((union ccb *)&cgds);
1483 	cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle);
1484 }
1485 
1486 void
1487 cam_periph_freeze_after_event(struct cam_periph *periph,
1488 			      struct timeval* event_time, u_int duration_ms)
1489 {
1490 	struct timeval delta;
1491 	struct timeval duration_tv;
1492 
1493 	if (!timevalisset(event_time))
1494 		return;
1495 
1496 	microtime(&delta);
1497 	timevalsub(&delta, event_time);
1498 	duration_tv.tv_sec = duration_ms / 1000;
1499 	duration_tv.tv_usec = (duration_ms % 1000) * 1000;
1500 	if (timevalcmp(&delta, &duration_tv, <)) {
1501 		timevalsub(&duration_tv, &delta);
1502 
1503 		duration_ms = duration_tv.tv_sec * 1000;
1504 		duration_ms += duration_tv.tv_usec / 1000;
1505 		cam_freeze_devq(periph->path);
1506 		cam_release_devq(periph->path,
1507 				RELSIM_RELEASE_AFTER_TIMEOUT,
1508 				/*reduction*/0,
1509 				/*timeout*/duration_ms,
1510 				/*getcount_only*/0);
1511 	}
1512 
1513 }
1514 
1515 static int
1516 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb,
1517     cam_flags camflags, u_int32_t sense_flags,
1518     int *openings, u_int32_t *relsim_flags,
1519     u_int32_t *timeout, u_int32_t *action, const char **action_string)
1520 {
1521 	struct cam_periph *periph;
1522 	int error;
1523 
1524 	switch (ccb->csio.scsi_status) {
1525 	case SCSI_STATUS_OK:
1526 	case SCSI_STATUS_COND_MET:
1527 	case SCSI_STATUS_INTERMED:
1528 	case SCSI_STATUS_INTERMED_COND_MET:
1529 		error = 0;
1530 		break;
1531 	case SCSI_STATUS_CMD_TERMINATED:
1532 	case SCSI_STATUS_CHECK_COND:
1533 		error = camperiphscsisenseerror(ccb, orig_ccb,
1534 					        camflags,
1535 					        sense_flags,
1536 					        openings,
1537 					        relsim_flags,
1538 					        timeout,
1539 					        action,
1540 					        action_string);
1541 		break;
1542 	case SCSI_STATUS_QUEUE_FULL:
1543 	{
1544 		/* no decrement */
1545 		struct ccb_getdevstats cgds;
1546 
1547 		/*
1548 		 * First off, find out what the current
1549 		 * transaction counts are.
1550 		 */
1551 		xpt_setup_ccb(&cgds.ccb_h,
1552 			      ccb->ccb_h.path,
1553 			      CAM_PRIORITY_NORMAL);
1554 		cgds.ccb_h.func_code = XPT_GDEV_STATS;
1555 		xpt_action((union ccb *)&cgds);
1556 
1557 		/*
1558 		 * If we were the only transaction active, treat
1559 		 * the QUEUE FULL as if it were a BUSY condition.
1560 		 */
1561 		if (cgds.dev_active != 0) {
1562 			int total_openings;
1563 
1564 			/*
1565 		 	 * Reduce the number of openings to
1566 			 * be 1 less than the amount it took
1567 			 * to get a queue full bounded by the
1568 			 * minimum allowed tag count for this
1569 			 * device.
1570 		 	 */
1571 			total_openings = cgds.dev_active + cgds.dev_openings;
1572 			*openings = cgds.dev_active;
1573 			if (*openings < cgds.mintags)
1574 				*openings = cgds.mintags;
1575 			if (*openings < total_openings)
1576 				*relsim_flags = RELSIM_ADJUST_OPENINGS;
1577 			else {
1578 				/*
1579 				 * Some devices report queue full for
1580 				 * temporary resource shortages.  For
1581 				 * this reason, we allow a minimum
1582 				 * tag count to be entered via a
1583 				 * quirk entry to prevent the queue
1584 				 * count on these devices from falling
1585 				 * to a pessimisticly low value.  We
1586 				 * still wait for the next successful
1587 				 * completion, however, before queueing
1588 				 * more transactions to the device.
1589 				 */
1590 				*relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT;
1591 			}
1592 			*timeout = 0;
1593 			error = ERESTART;
1594 			*action &= ~SSQ_PRINT_SENSE;
1595 			break;
1596 		}
1597 		/* FALLTHROUGH */
1598 	}
1599 	case SCSI_STATUS_BUSY:
1600 		/*
1601 		 * Restart the queue after either another
1602 		 * command completes or a 1 second timeout.
1603 		 */
1604 		periph = xpt_path_periph(ccb->ccb_h.path);
1605 		if (periph->flags & CAM_PERIPH_INVALID) {
1606 			error = EIO;
1607 			*action_string = "Periph was invalidated";
1608 		} else if ((sense_flags & SF_RETRY_BUSY) != 0 ||
1609 		    ccb->ccb_h.retry_count > 0) {
1610 			if ((sense_flags & SF_RETRY_BUSY) == 0)
1611 				ccb->ccb_h.retry_count--;
1612 			error = ERESTART;
1613 			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT
1614 				      | RELSIM_RELEASE_AFTER_CMDCMPLT;
1615 			*timeout = 1000;
1616 		} else {
1617 			error = EIO;
1618 			*action_string = "Retries exhausted";
1619 		}
1620 		break;
1621 	case SCSI_STATUS_RESERV_CONFLICT:
1622 	default:
1623 		error = EIO;
1624 		break;
1625 	}
1626 	return (error);
1627 }
1628 
1629 static int
1630 camperiphscsisenseerror(union ccb *ccb, union ccb **orig,
1631     cam_flags camflags, u_int32_t sense_flags,
1632     int *openings, u_int32_t *relsim_flags,
1633     u_int32_t *timeout, u_int32_t *action, const char **action_string)
1634 {
1635 	struct cam_periph *periph;
1636 	union ccb *orig_ccb = ccb;
1637 	int error, recoveryccb;
1638 
1639 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1640 	if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL)
1641 		biotrack(ccb->csio.bio, __func__);
1642 #endif
1643 
1644 	periph = xpt_path_periph(ccb->ccb_h.path);
1645 	recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone);
1646 	if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) {
1647 		/*
1648 		 * If error recovery is already in progress, don't attempt
1649 		 * to process this error, but requeue it unconditionally
1650 		 * and attempt to process it once error recovery has
1651 		 * completed.  This failed command is probably related to
1652 		 * the error that caused the currently active error recovery
1653 		 * action so our  current recovery efforts should also
1654 		 * address this command.  Be aware that the error recovery
1655 		 * code assumes that only one recovery action is in progress
1656 		 * on a particular peripheral instance at any given time
1657 		 * (e.g. only one saved CCB for error recovery) so it is
1658 		 * imperitive that we don't violate this assumption.
1659 		 */
1660 		error = ERESTART;
1661 		*action &= ~SSQ_PRINT_SENSE;
1662 	} else {
1663 		scsi_sense_action err_action;
1664 		struct ccb_getdev cgd;
1665 
1666 		/*
1667 		 * Grab the inquiry data for this device.
1668 		 */
1669 		xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL);
1670 		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1671 		xpt_action((union ccb *)&cgd);
1672 
1673 		err_action = scsi_error_action(&ccb->csio, &cgd.inq_data,
1674 		    sense_flags);
1675 		error = err_action & SS_ERRMASK;
1676 
1677 		/*
1678 		 * Do not autostart sequential access devices
1679 		 * to avoid unexpected tape loading.
1680 		 */
1681 		if ((err_action & SS_MASK) == SS_START &&
1682 		    SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) {
1683 			*action_string = "Will not autostart a "
1684 			    "sequential access device";
1685 			goto sense_error_done;
1686 		}
1687 
1688 		/*
1689 		 * Avoid recovery recursion if recovery action is the same.
1690 		 */
1691 		if ((err_action & SS_MASK) >= SS_START && recoveryccb) {
1692 			if (((err_action & SS_MASK) == SS_START &&
1693 			     ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) ||
1694 			    ((err_action & SS_MASK) == SS_TUR &&
1695 			     (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) {
1696 				err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO;
1697 				*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1698 				*timeout = 500;
1699 			}
1700 		}
1701 
1702 		/*
1703 		 * If the recovery action will consume a retry,
1704 		 * make sure we actually have retries available.
1705 		 */
1706 		if ((err_action & SSQ_DECREMENT_COUNT) != 0) {
1707 		 	if (ccb->ccb_h.retry_count > 0 &&
1708 			    (periph->flags & CAM_PERIPH_INVALID) == 0)
1709 		 		ccb->ccb_h.retry_count--;
1710 			else {
1711 				*action_string = "Retries exhausted";
1712 				goto sense_error_done;
1713 			}
1714 		}
1715 
1716 		if ((err_action & SS_MASK) >= SS_START) {
1717 			/*
1718 			 * Do common portions of commands that
1719 			 * use recovery CCBs.
1720 			 */
1721 			orig_ccb = xpt_alloc_ccb_nowait();
1722 			if (orig_ccb == NULL) {
1723 				*action_string = "Can't allocate recovery CCB";
1724 				goto sense_error_done;
1725 			}
1726 			/*
1727 			 * Clear freeze flag for original request here, as
1728 			 * this freeze will be dropped as part of ERESTART.
1729 			 */
1730 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
1731 			bcopy(ccb, orig_ccb, sizeof(*orig_ccb));
1732 		}
1733 
1734 		switch (err_action & SS_MASK) {
1735 		case SS_NOP:
1736 			*action_string = "No recovery action needed";
1737 			error = 0;
1738 			break;
1739 		case SS_RETRY:
1740 			*action_string = "Retrying command (per sense data)";
1741 			error = ERESTART;
1742 			break;
1743 		case SS_FAIL:
1744 			*action_string = "Unretryable error";
1745 			break;
1746 		case SS_START:
1747 		{
1748 			int le;
1749 
1750 			/*
1751 			 * Send a start unit command to the device, and
1752 			 * then retry the command.
1753 			 */
1754 			*action_string = "Attempting to start unit";
1755 			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1756 
1757 			/*
1758 			 * Check for removable media and set
1759 			 * load/eject flag appropriately.
1760 			 */
1761 			if (SID_IS_REMOVABLE(&cgd.inq_data))
1762 				le = TRUE;
1763 			else
1764 				le = FALSE;
1765 
1766 			scsi_start_stop(&ccb->csio,
1767 					/*retries*/1,
1768 					camperiphdone,
1769 					MSG_SIMPLE_Q_TAG,
1770 					/*start*/TRUE,
1771 					/*load/eject*/le,
1772 					/*immediate*/FALSE,
1773 					SSD_FULL_SIZE,
1774 					/*timeout*/50000);
1775 			break;
1776 		}
1777 		case SS_TUR:
1778 		{
1779 			/*
1780 			 * Send a Test Unit Ready to the device.
1781 			 * If the 'many' flag is set, we send 120
1782 			 * test unit ready commands, one every half
1783 			 * second.  Otherwise, we just send one TUR.
1784 			 * We only want to do this if the retry
1785 			 * count has not been exhausted.
1786 			 */
1787 			int retries;
1788 
1789 			if ((err_action & SSQ_MANY) != 0 && (periph->flags &
1790 			     CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) {
1791 				periph->flags |= CAM_PERIPH_RECOVERY_WAIT;
1792 				*action_string = "Polling device for readiness";
1793 				retries = 120;
1794 			} else {
1795 				*action_string = "Testing device for readiness";
1796 				retries = 1;
1797 			}
1798 			periph->flags |= CAM_PERIPH_RECOVERY_INPROG;
1799 			scsi_test_unit_ready(&ccb->csio,
1800 					     retries,
1801 					     camperiphdone,
1802 					     MSG_SIMPLE_Q_TAG,
1803 					     SSD_FULL_SIZE,
1804 					     /*timeout*/5000);
1805 
1806 			/*
1807 			 * Accomplish our 500ms delay by deferring
1808 			 * the release of our device queue appropriately.
1809 			 */
1810 			*relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1811 			*timeout = 500;
1812 			break;
1813 		}
1814 		default:
1815 			panic("Unhandled error action %x", err_action);
1816 		}
1817 
1818 		if ((err_action & SS_MASK) >= SS_START) {
1819 			/*
1820 			 * Drop the priority, so that the recovery
1821 			 * CCB is the first to execute.  Freeze the queue
1822 			 * after this command is sent so that we can
1823 			 * restore the old csio and have it queued in
1824 			 * the proper order before we release normal
1825 			 * transactions to the device.
1826 			 */
1827 			ccb->ccb_h.pinfo.priority--;
1828 			ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1829 			ccb->ccb_h.saved_ccb_ptr = orig_ccb;
1830 			error = ERESTART;
1831 			*orig = orig_ccb;
1832 		}
1833 
1834 sense_error_done:
1835 		*action = err_action;
1836 	}
1837 	return (error);
1838 }
1839 
1840 /*
1841  * Generic error handler.  Peripheral drivers usually filter
1842  * out the errors that they handle in a unique manner, then
1843  * call this function.
1844  */
1845 int
1846 cam_periph_error(union ccb *ccb, cam_flags camflags,
1847 		 u_int32_t sense_flags)
1848 {
1849 	struct cam_path *newpath;
1850 	union ccb  *orig_ccb, *scan_ccb;
1851 	struct cam_periph *periph;
1852 	const char *action_string;
1853 	cam_status  status;
1854 	int	    frozen, error, openings, devctl_err;
1855 	u_int32_t   action, relsim_flags, timeout;
1856 
1857 	action = SSQ_PRINT_SENSE;
1858 	periph = xpt_path_periph(ccb->ccb_h.path);
1859 	action_string = NULL;
1860 	status = ccb->ccb_h.status;
1861 	frozen = (status & CAM_DEV_QFRZN) != 0;
1862 	status &= CAM_STATUS_MASK;
1863 	devctl_err = openings = relsim_flags = timeout = 0;
1864 	orig_ccb = ccb;
1865 
1866 	/* Filter the errors that should be reported via devctl */
1867 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
1868 	case CAM_CMD_TIMEOUT:
1869 	case CAM_REQ_ABORTED:
1870 	case CAM_REQ_CMP_ERR:
1871 	case CAM_REQ_TERMIO:
1872 	case CAM_UNREC_HBA_ERROR:
1873 	case CAM_DATA_RUN_ERR:
1874 	case CAM_SCSI_STATUS_ERROR:
1875 	case CAM_ATA_STATUS_ERROR:
1876 	case CAM_SMP_STATUS_ERROR:
1877 		devctl_err++;
1878 		break;
1879 	default:
1880 		break;
1881 	}
1882 
1883 	switch (status) {
1884 	case CAM_REQ_CMP:
1885 		error = 0;
1886 		action &= ~SSQ_PRINT_SENSE;
1887 		break;
1888 	case CAM_SCSI_STATUS_ERROR:
1889 		error = camperiphscsistatuserror(ccb, &orig_ccb,
1890 		    camflags, sense_flags, &openings, &relsim_flags,
1891 		    &timeout, &action, &action_string);
1892 		break;
1893 	case CAM_AUTOSENSE_FAIL:
1894 		error = EIO;	/* we have to kill the command */
1895 		break;
1896 	case CAM_UA_ABORT:
1897 	case CAM_UA_TERMIO:
1898 	case CAM_MSG_REJECT_REC:
1899 		/* XXX Don't know that these are correct */
1900 		error = EIO;
1901 		break;
1902 	case CAM_SEL_TIMEOUT:
1903 		if ((camflags & CAM_RETRY_SELTO) != 0) {
1904 			if (ccb->ccb_h.retry_count > 0 &&
1905 			    (periph->flags & CAM_PERIPH_INVALID) == 0) {
1906 				ccb->ccb_h.retry_count--;
1907 				error = ERESTART;
1908 
1909 				/*
1910 				 * Wait a bit to give the device
1911 				 * time to recover before we try again.
1912 				 */
1913 				relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1914 				timeout = periph_selto_delay;
1915 				break;
1916 			}
1917 			action_string = "Retries exhausted";
1918 		}
1919 		/* FALLTHROUGH */
1920 	case CAM_DEV_NOT_THERE:
1921 		error = ENXIO;
1922 		action = SSQ_LOST;
1923 		break;
1924 	case CAM_REQ_INVALID:
1925 	case CAM_PATH_INVALID:
1926 	case CAM_NO_HBA:
1927 	case CAM_PROVIDE_FAIL:
1928 	case CAM_REQ_TOO_BIG:
1929 	case CAM_LUN_INVALID:
1930 	case CAM_TID_INVALID:
1931 	case CAM_FUNC_NOTAVAIL:
1932 		error = EINVAL;
1933 		break;
1934 	case CAM_SCSI_BUS_RESET:
1935 	case CAM_BDR_SENT:
1936 		/*
1937 		 * Commands that repeatedly timeout and cause these
1938 		 * kinds of error recovery actions, should return
1939 		 * CAM_CMD_TIMEOUT, which allows us to safely assume
1940 		 * that this command was an innocent bystander to
1941 		 * these events and should be unconditionally
1942 		 * retried.
1943 		 */
1944 	case CAM_REQUEUE_REQ:
1945 		/* Unconditional requeue if device is still there */
1946 		if (periph->flags & CAM_PERIPH_INVALID) {
1947 			action_string = "Periph was invalidated";
1948 			error = EIO;
1949 		} else if (sense_flags & SF_NO_RETRY) {
1950 			error = EIO;
1951 			action_string = "Retry was blocked";
1952 		} else {
1953 			error = ERESTART;
1954 			action &= ~SSQ_PRINT_SENSE;
1955 		}
1956 		break;
1957 	case CAM_RESRC_UNAVAIL:
1958 		/* Wait a bit for the resource shortage to abate. */
1959 		timeout = periph_noresrc_delay;
1960 		/* FALLTHROUGH */
1961 	case CAM_BUSY:
1962 		if (timeout == 0) {
1963 			/* Wait a bit for the busy condition to abate. */
1964 			timeout = periph_busy_delay;
1965 		}
1966 		relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT;
1967 		/* FALLTHROUGH */
1968 	case CAM_ATA_STATUS_ERROR:
1969 	case CAM_REQ_CMP_ERR:
1970 	case CAM_CMD_TIMEOUT:
1971 	case CAM_UNEXP_BUSFREE:
1972 	case CAM_UNCOR_PARITY:
1973 	case CAM_DATA_RUN_ERR:
1974 	default:
1975 		if (periph->flags & CAM_PERIPH_INVALID) {
1976 			error = EIO;
1977 			action_string = "Periph was invalidated";
1978 		} else if (ccb->ccb_h.retry_count == 0) {
1979 			error = EIO;
1980 			action_string = "Retries exhausted";
1981 		} else if (sense_flags & SF_NO_RETRY) {
1982 			error = EIO;
1983 			action_string = "Retry was blocked";
1984 		} else {
1985 			ccb->ccb_h.retry_count--;
1986 			error = ERESTART;
1987 		}
1988 		break;
1989 	}
1990 
1991 	if ((sense_flags & SF_PRINT_ALWAYS) ||
1992 	    CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO))
1993 		action |= SSQ_PRINT_SENSE;
1994 	else if (sense_flags & SF_NO_PRINT)
1995 		action &= ~SSQ_PRINT_SENSE;
1996 	if ((action & SSQ_PRINT_SENSE) != 0)
1997 		cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1998 	if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) {
1999 		if (error != ERESTART) {
2000 			if (action_string == NULL)
2001 				action_string = "Unretryable error";
2002 			xpt_print(ccb->ccb_h.path, "Error %d, %s\n",
2003 			    error, action_string);
2004 		} else if (action_string != NULL)
2005 			xpt_print(ccb->ccb_h.path, "%s\n", action_string);
2006 		else {
2007 			xpt_print(ccb->ccb_h.path,
2008 			    "Retrying command, %d more tries remain\n",
2009 			    ccb->ccb_h.retry_count);
2010 		}
2011 	}
2012 
2013 	if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0))
2014 		cam_periph_devctl_notify(orig_ccb);
2015 
2016 	if ((action & SSQ_LOST) != 0) {
2017 		lun_id_t lun_id;
2018 
2019 		/*
2020 		 * For a selection timeout, we consider all of the LUNs on
2021 		 * the target to be gone.  If the status is CAM_DEV_NOT_THERE,
2022 		 * then we only get rid of the device(s) specified by the
2023 		 * path in the original CCB.
2024 		 */
2025 		if (status == CAM_SEL_TIMEOUT)
2026 			lun_id = CAM_LUN_WILDCARD;
2027 		else
2028 			lun_id = xpt_path_lun_id(ccb->ccb_h.path);
2029 
2030 		/* Should we do more if we can't create the path?? */
2031 		if (xpt_create_path(&newpath, periph,
2032 				    xpt_path_path_id(ccb->ccb_h.path),
2033 				    xpt_path_target_id(ccb->ccb_h.path),
2034 				    lun_id) == CAM_REQ_CMP) {
2035 
2036 			/*
2037 			 * Let peripheral drivers know that this
2038 			 * device has gone away.
2039 			 */
2040 			xpt_async(AC_LOST_DEVICE, newpath, NULL);
2041 			xpt_free_path(newpath);
2042 		}
2043 	}
2044 
2045 	/* Broadcast UNIT ATTENTIONs to all periphs. */
2046 	if ((action & SSQ_UA) != 0)
2047 		xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb);
2048 
2049 	/* Rescan target on "Reported LUNs data has changed" */
2050 	if ((action & SSQ_RESCAN) != 0) {
2051 		if (xpt_create_path(&newpath, NULL,
2052 				    xpt_path_path_id(ccb->ccb_h.path),
2053 				    xpt_path_target_id(ccb->ccb_h.path),
2054 				    CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2055 
2056 			scan_ccb = xpt_alloc_ccb_nowait();
2057 			if (scan_ccb != NULL) {
2058 				scan_ccb->ccb_h.path = newpath;
2059 				scan_ccb->ccb_h.func_code = XPT_SCAN_TGT;
2060 				scan_ccb->crcn.flags = 0;
2061 				xpt_rescan(scan_ccb);
2062 			} else {
2063 				xpt_print(newpath,
2064 				    "Can't allocate CCB to rescan target\n");
2065 				xpt_free_path(newpath);
2066 			}
2067 		}
2068 	}
2069 
2070 	/* Attempt a retry */
2071 	if (error == ERESTART || error == 0) {
2072 		if (frozen != 0)
2073 			ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
2074 		if (error == ERESTART)
2075 			xpt_action(ccb);
2076 		if (frozen != 0)
2077 			cam_release_devq(ccb->ccb_h.path,
2078 					 relsim_flags,
2079 					 openings,
2080 					 timeout,
2081 					 /*getcount_only*/0);
2082 	}
2083 
2084 	return (error);
2085 }
2086 
2087 #define CAM_PERIPH_DEVD_MSG_SIZE	256
2088 
2089 static void
2090 cam_periph_devctl_notify(union ccb *ccb)
2091 {
2092 	struct cam_periph *periph;
2093 	struct ccb_getdev *cgd;
2094 	struct sbuf sb;
2095 	int serr, sk, asc, ascq;
2096 	char *sbmsg, *type;
2097 
2098 	sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT);
2099 	if (sbmsg == NULL)
2100 		return;
2101 
2102 	sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN);
2103 
2104 	periph = xpt_path_periph(ccb->ccb_h.path);
2105 	sbuf_printf(&sb, "device=%s%d ", periph->periph_name,
2106 	    periph->unit_number);
2107 
2108 	sbuf_printf(&sb, "serial=\"");
2109 	if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) {
2110 		xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path,
2111 		    CAM_PRIORITY_NORMAL);
2112 		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2113 		xpt_action((union ccb *)cgd);
2114 
2115 		if (cgd->ccb_h.status == CAM_REQ_CMP)
2116 			sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len);
2117 		xpt_free_ccb((union ccb *)cgd);
2118 	}
2119 	sbuf_printf(&sb, "\" ");
2120 	sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status);
2121 
2122 	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2123 	case CAM_CMD_TIMEOUT:
2124 		sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout);
2125 		type = "timeout";
2126 		break;
2127 	case CAM_SCSI_STATUS_ERROR:
2128 		sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status);
2129 		if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq))
2130 			sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ",
2131 			    serr, sk, asc, ascq);
2132 		type = "error";
2133 		break;
2134 	case CAM_ATA_STATUS_ERROR:
2135 		sbuf_printf(&sb, "RES=\"");
2136 		ata_res_sbuf(&ccb->ataio.res, &sb);
2137 		sbuf_printf(&sb, "\" ");
2138 		type = "error";
2139 		break;
2140 	default:
2141 		type = "error";
2142 		break;
2143 	}
2144 
2145 	if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2146 		sbuf_printf(&sb, "CDB=\"");
2147 		scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb);
2148 		sbuf_printf(&sb, "\" ");
2149 	} else if (ccb->ccb_h.func_code == XPT_ATA_IO) {
2150 		sbuf_printf(&sb, "ACB=\"");
2151 		ata_cmd_sbuf(&ccb->ataio.cmd, &sb);
2152 		sbuf_printf(&sb, "\" ");
2153 	}
2154 
2155 	if (sbuf_finish(&sb) == 0)
2156 		devctl_notify("CAM", "periph", type, sbuf_data(&sb));
2157 	sbuf_delete(&sb);
2158 	free(sbmsg, M_CAMPERIPH);
2159 }
2160 
2161 /*
2162  * Sysctl to force an invalidation of the drive right now. Can be
2163  * called with CTLFLAG_MPSAFE since we take periph lock.
2164  */
2165 int
2166 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS)
2167 {
2168 	struct cam_periph *periph;
2169 	int error, value;
2170 
2171 	periph = arg1;
2172 	value = 0;
2173 	error = sysctl_handle_int(oidp, &value, 0, req);
2174 	if (error != 0 || req->newptr == NULL || value != 1)
2175 		return (error);
2176 
2177 	cam_periph_lock(periph);
2178 	cam_periph_invalidate(periph);
2179 	cam_periph_unlock(periph);
2180 
2181 	return (0);
2182 }
2183