xref: /dragonfly/sys/bus/cam/cam_xpt.c (revision 9b5a9965)
1 /*
2  * Implementation of the Common Access Method Transport (XPT) layer.
3  *
4  * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5  * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification, immediately at the beginning of the file.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30  * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.35 2007/07/28 23:24:34 dillon Exp $
31  */
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/types.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/fcntl.h>
41 #include <sys/md5.h>
42 #include <sys/devicestat.h>
43 #include <sys/interrupt.h>
44 #include <sys/bus.h>
45 #include <sys/thread.h>
46 #include <sys/thread2.h>
47 
48 #include <machine/clock.h>
49 
50 #include "cam.h"
51 #include "cam_ccb.h"
52 #include "cam_periph.h"
53 #include "cam_sim.h"
54 #include "cam_xpt.h"
55 #include "cam_xpt_sim.h"
56 #include "cam_xpt_periph.h"
57 #include "cam_debug.h"
58 
59 #include "scsi/scsi_all.h"
60 #include "scsi/scsi_message.h"
61 #include "scsi/scsi_pass.h"
62 #include "opt_cam.h"
63 
64 /* Datastructures internal to the xpt layer */
65 
66 /*
67  * Definition of an async handler callback block.  These are used to add
68  * SIMs and peripherals to the async callback lists.
69  */
70 struct async_node {
71 	SLIST_ENTRY(async_node)	links;
72 	u_int32_t	event_enable;	/* Async Event enables */
73 	void		(*callback)(void *arg, u_int32_t code,
74 				    struct cam_path *path, void *args);
75 	void		*callback_arg;
76 };
77 
78 SLIST_HEAD(async_list, async_node);
79 SLIST_HEAD(periph_list, cam_periph);
80 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
81 
82 /*
83  * This is the maximum number of high powered commands (e.g. start unit)
84  * that can be outstanding at a particular time.
85  */
86 #ifndef CAM_MAX_HIGHPOWER
87 #define CAM_MAX_HIGHPOWER  4
88 #endif
89 
90 /* number of high powered commands that can go through right now */
91 static int num_highpower = CAM_MAX_HIGHPOWER;
92 
93 /*
94  * Structure for queueing a device in a run queue.
95  * There is one run queue for allocating new ccbs,
96  * and another for sending ccbs to the controller.
97  */
98 struct cam_ed_qinfo {
99 	cam_pinfo pinfo;
100 	struct	  cam_ed *device;
101 };
102 
103 /*
104  * The CAM EDT (Existing Device Table) contains the device information for
105  * all devices for all busses in the system.  The table contains a
106  * cam_ed structure for each device on the bus.
107  */
108 struct cam_ed {
109 	TAILQ_ENTRY(cam_ed) links;
110 	struct	cam_ed_qinfo alloc_ccb_entry;
111 	struct	cam_ed_qinfo send_ccb_entry;
112 	struct	cam_et	 *target;
113 	lun_id_t	 lun_id;
114 	struct	camq drvq;		/*
115 					 * Queue of type drivers wanting to do
116 					 * work on this device.
117 					 */
118 	struct	cam_ccbq ccbq;		/* Queue of pending ccbs */
119 	struct	async_list asyncs;	/* Async callback info for this B/T/L */
120 	struct	periph_list periphs;	/* All attached devices */
121 	u_int	generation;		/* Generation number */
122 	struct	cam_periph *owner;	/* Peripheral driver's ownership tag */
123 	struct	xpt_quirk_entry *quirk;	/* Oddities about this device */
124 					/* Storage for the inquiry data */
125 	struct	scsi_inquiry_data inq_data;
126 	u_int8_t	 inq_flags;	/*
127 					 * Current settings for inquiry flags.
128 					 * This allows us to override settings
129 					 * like disconnection and tagged
130 					 * queuing for a device.
131 					 */
132 	u_int8_t	 queue_flags;	/* Queue flags from the control page */
133 	u_int8_t	 serial_num_len;
134 	u_int8_t	 *serial_num;
135 	u_int32_t	 qfrozen_cnt;
136 	u_int32_t	 flags;
137 #define CAM_DEV_UNCONFIGURED	 	0x01
138 #define CAM_DEV_REL_TIMEOUT_PENDING	0x02
139 #define CAM_DEV_REL_ON_COMPLETE		0x04
140 #define CAM_DEV_REL_ON_QUEUE_EMPTY	0x08
141 #define CAM_DEV_RESIZE_QUEUE_NEEDED	0x10
142 #define CAM_DEV_TAG_AFTER_COUNT		0x20
143 #define CAM_DEV_INQUIRY_DATA_VALID	0x40
144 	u_int32_t	 tag_delay_count;
145 #define	CAM_TAG_DELAY_COUNT		5
146 	u_int32_t	 refcount;
147 	struct		 callout c_handle;
148 };
149 
150 /*
151  * Each target is represented by an ET (Existing Target).  These
152  * entries are created when a target is successfully probed with an
153  * identify, and removed when a device fails to respond after a number
154  * of retries, or a bus rescan finds the device missing.
155  */
156 struct cam_et {
157 	TAILQ_HEAD(, cam_ed) ed_entries;
158 	TAILQ_ENTRY(cam_et) links;
159 	struct	cam_eb	*bus;
160 	target_id_t	target_id;
161 	u_int32_t	refcount;
162 	u_int		generation;
163 	struct		timeval last_reset;	/* uptime of last reset */
164 };
165 
166 /*
167  * Each bus is represented by an EB (Existing Bus).  These entries
168  * are created by calls to xpt_bus_register and deleted by calls to
169  * xpt_bus_deregister.
170  */
171 struct cam_eb {
172 	TAILQ_HEAD(, cam_et) et_entries;
173 	TAILQ_ENTRY(cam_eb)  links;
174 	path_id_t	     path_id;
175 	struct cam_sim	     *sim;
176 	struct timeval	     last_reset;	/* uptime of last reset */
177 	u_int32_t	     flags;
178 #define	CAM_EB_RUNQ_SCHEDULED	0x01
179 	u_int32_t	     refcount;
180 	u_int		     generation;
181 };
182 
183 struct cam_path {
184 	struct cam_periph *periph;
185 	struct cam_eb	  *bus;
186 	struct cam_et	  *target;
187 	struct cam_ed	  *device;
188 };
189 
190 struct xpt_quirk_entry {
191 	struct scsi_inquiry_pattern inq_pat;
192 	u_int8_t quirks;
193 #define	CAM_QUIRK_NOLUNS	0x01
194 #define	CAM_QUIRK_NOSERIAL	0x02
195 #define	CAM_QUIRK_HILUNS	0x04
196 	u_int mintags;
197 	u_int maxtags;
198 };
199 #define	CAM_SCSI2_MAXLUN	8
200 
201 typedef enum {
202 	XPT_FLAG_OPEN		= 0x01
203 } xpt_flags;
204 
205 struct xpt_softc {
206 	xpt_flags	flags;
207 	u_int32_t	generation;
208 };
209 
210 static const char quantum[] = "QUANTUM";
211 static const char sony[] = "SONY";
212 static const char west_digital[] = "WDIGTL";
213 static const char samsung[] = "SAMSUNG";
214 static const char seagate[] = "SEAGATE";
215 static const char microp[] = "MICROP";
216 
217 static struct xpt_quirk_entry xpt_quirk_table[] =
218 {
219 	{
220 		/* Reports QUEUE FULL for temporary resource shortages */
221 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
222 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
223 	},
224 	{
225 		/* Reports QUEUE FULL for temporary resource shortages */
226 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
227 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
228 	},
229 	{
230 		/* Reports QUEUE FULL for temporary resource shortages */
231 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
232 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
233 	},
234 	{
235 		/* Broken tagged queuing drive */
236 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
237 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
238 	},
239 	{
240 		/* Broken tagged queuing drive */
241 		{ T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
242 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
243 	},
244 	{
245 		/* Broken tagged queuing drive */
246 		{ T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
247 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
248 	},
249 	{
250 		/*
251 		 * Unfortunately, the Quantum Atlas III has the same
252 		 * problem as the Atlas II drives above.
253 		 * Reported by: "Johan Granlund" <johan@granlund.nu>
254 		 *
255 		 * For future reference, the drive with the problem was:
256 		 * QUANTUM QM39100TD-SW N1B0
257 		 *
258 		 * It's possible that Quantum will fix the problem in later
259 		 * firmware revisions.  If that happens, the quirk entry
260 		 * will need to be made specific to the firmware revisions
261 		 * with the problem.
262 		 *
263 		 */
264 		/* Reports QUEUE FULL for temporary resource shortages */
265 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
266 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
267 	},
268 	{
269 		/*
270 		 * 18 Gig Atlas III, same problem as the 9G version.
271 		 * Reported by: Andre Albsmeier
272 		 *		<andre.albsmeier@mchp.siemens.de>
273 		 *
274 		 * For future reference, the drive with the problem was:
275 		 * QUANTUM QM318000TD-S N491
276 		 */
277 		/* Reports QUEUE FULL for temporary resource shortages */
278 		{ T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
279 		/*quirks*/0, /*mintags*/24, /*maxtags*/32
280 	},
281 	{
282 		/*
283 		 * Broken tagged queuing drive
284 		 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
285 		 *         and: Martin Renters <martin@tdc.on.ca>
286 		 */
287 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
288 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
289 	},
290 		/*
291 		 * The Seagate Medalist Pro drives have very poor write
292 		 * performance with anything more than 2 tags.
293 		 *
294 		 * Reported by:  Paul van der Zwan <paulz@trantor.xs4all.nl>
295 		 * Drive:  <SEAGATE ST36530N 1444>
296 		 *
297 		 * Reported by:  Jeremy Lea <reg@shale.csir.co.za>
298 		 * Drive:  <SEAGATE ST34520W 1281>
299 		 *
300 		 * No one has actually reported that the 9G version
301 		 * (ST39140*) of the Medalist Pro has the same problem, but
302 		 * we're assuming that it does because the 4G and 6.5G
303 		 * versions of the drive are broken.
304 		 */
305 	{
306 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
307 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
308 	},
309 	{
310 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
311 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
312 	},
313 	{
314 		{ T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
315 		/*quirks*/0, /*mintags*/2, /*maxtags*/2
316 	},
317 	{
318 		/*
319 		 * Slow when tagged queueing is enabled.  Write performance
320 		 * steadily drops off with more and more concurrent
321 		 * transactions.  Best sequential write performance with
322 		 * tagged queueing turned off and write caching turned on.
323 		 *
324 		 * PR:  kern/10398
325 		 * Submitted by:  Hideaki Okada <hokada@isl.melco.co.jp>
326 		 * Drive:  DCAS-34330 w/ "S65A" firmware.
327 		 *
328 		 * The drive with the problem had the "S65A" firmware
329 		 * revision, and has also been reported (by Stephen J.
330 		 * Roznowski <sjr@home.net>) for a drive with the "S61A"
331 		 * firmware revision.
332 		 *
333 		 * Although no one has reported problems with the 2 gig
334 		 * version of the DCAS drive, the assumption is that it
335 		 * has the same problems as the 4 gig version.  Therefore
336 		 * this quirk entries disables tagged queueing for all
337 		 * DCAS drives.
338 		 */
339 		{ T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
340 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
341 	},
342 	{
343 		/* Broken tagged queuing drive */
344 		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
345 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
346 	},
347 	{
348 		/* Broken tagged queuing drive */
349 		{ T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
350 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
351 	},
352 	{
353 		/*
354 		 * Broken tagged queuing drive.
355 		 * Submitted by:
356 		 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
357 		 * in PR kern/9535
358 		 */
359 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
360 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
361 	},
362         {
363 		/*
364 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
365 		 * 8MB/sec.)
366 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
367 		 * Best performance with these drives is achieved with
368 		 * tagged queueing turned off, and write caching turned on.
369 		 */
370 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
371 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
372         },
373         {
374 		/*
375 		 * Slow when tagged queueing is enabled. (1.5MB/sec versus
376 		 * 8MB/sec.)
377 		 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
378 		 * Best performance with these drives is achieved with
379 		 * tagged queueing turned off, and write caching turned on.
380 		 */
381 		{ T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
382 		/*quirks*/0, /*mintags*/0, /*maxtags*/0
383         },
384 	{
385 		/*
386 		 * Doesn't handle queue full condition correctly,
387 		 * so we need to limit maxtags to what the device
388 		 * can handle instead of determining this automatically.
389 		 */
390 		{ T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
391 		/*quirks*/0, /*mintags*/2, /*maxtags*/32
392 	},
393 	{
394 		/* Really only one LUN */
395 		{ T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
396 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
397 	},
398 	{
399 		/* I can't believe we need a quirk for DPT volumes. */
400 		{ T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
401 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS,
402 		/*mintags*/0, /*maxtags*/255
403 	},
404 	{
405 		/*
406 		 * Many Sony CDROM drives don't like multi-LUN probing.
407 		 */
408 		{ T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
409 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
410 	},
411 	{
412 		/*
413 		 * This drive doesn't like multiple LUN probing.
414 		 * Submitted by:  Parag Patel <parag@cgt.com>
415 		 */
416 		{ T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R   CDU9*", "*" },
417 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
418 	},
419 	{
420 		{ T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
421 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
422 	},
423 	{
424 		/*
425 		 * The 8200 doesn't like multi-lun probing, and probably
426 		 * don't like serial number requests either.
427 		 */
428 		{
429 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
430 			"EXB-8200*", "*"
431 		},
432 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
433 	},
434 	{
435 		/*
436 		 * Let's try the same as above, but for a drive that says
437 		 * it's an IPL-6860 but is actually an EXB 8200.
438 		 */
439 		{
440 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
441 			"IPL-6860*", "*"
442 		},
443 		CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
444 	},
445 	{
446 		/*
447 		 * These Hitachi drives don't like multi-lun probing.
448 		 * The PR submitter has a DK319H, but says that the Linux
449 		 * kernel has a similar work-around for the DK312 and DK314,
450 		 * so all DK31* drives are quirked here.
451 		 * PR:            misc/18793
452 		 * Submitted by:  Paul Haddad <paul@pth.com>
453 		 */
454 		{ T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
455 		CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
456 	},
457 	{
458 		/*
459 		 * This old revision of the TDC3600 is also SCSI-1, and
460 		 * hangs upon serial number probing.
461 		 */
462 		{
463 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
464 			" TDC 3600", "U07:"
465 		},
466 		CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
467 	},
468 	{
469 		/*
470 		 * Would repond to all LUNs if asked for.
471 		 */
472 		{
473 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
474 			"CP150", "*"
475 		},
476 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
477 	},
478 	{
479 		/*
480 		 * Would repond to all LUNs if asked for.
481 		 */
482 		{
483 			T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
484 			"96X2*", "*"
485 		},
486 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
487 	},
488 	{
489 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
490 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
491 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
492 	},
493 	{
494 		/* Submitted by: Matthew Dodd <winter@jurai.net> */
495 		{ T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
496 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
497 	},
498 	{
499 		/* TeraSolutions special settings for TRC-22 RAID */
500 		{ T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
501 		  /*quirks*/0, /*mintags*/55, /*maxtags*/255
502 	},
503 	{
504 		/* Veritas Storage Appliance */
505 		{ T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
506 		  CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
507 	},
508 	{
509 		/*
510 		 * Would respond to all LUNs.  Device type and removable
511 		 * flag are jumper-selectable.
512 		 */
513 		{ T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
514 		  "Tahiti 1", "*"
515 		},
516 		CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
517 	},
518 	{
519 		/* Default tagged queuing parameters for all devices */
520 		{
521 		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
522 		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
523 		},
524 		/*quirks*/0, /*mintags*/2, /*maxtags*/255
525 	},
526 };
527 
528 static const int xpt_quirk_table_size =
529 	sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
530 
531 typedef enum {
532 	DM_RET_COPY		= 0x01,
533 	DM_RET_FLAG_MASK	= 0x0f,
534 	DM_RET_NONE		= 0x00,
535 	DM_RET_STOP		= 0x10,
536 	DM_RET_DESCEND		= 0x20,
537 	DM_RET_ERROR		= 0x30,
538 	DM_RET_ACTION_MASK	= 0xf0
539 } dev_match_ret;
540 
541 typedef enum {
542 	XPT_DEPTH_BUS,
543 	XPT_DEPTH_TARGET,
544 	XPT_DEPTH_DEVICE,
545 	XPT_DEPTH_PERIPH
546 } xpt_traverse_depth;
547 
548 struct xpt_traverse_config {
549 	xpt_traverse_depth	depth;
550 	void			*tr_func;
551 	void			*tr_arg;
552 };
553 
554 typedef	int	xpt_busfunc_t (struct cam_eb *bus, void *arg);
555 typedef	int	xpt_targetfunc_t (struct cam_et *target, void *arg);
556 typedef	int	xpt_devicefunc_t (struct cam_ed *device, void *arg);
557 typedef	int	xpt_periphfunc_t (struct cam_periph *periph, void *arg);
558 typedef int	xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
559 
560 /* Transport layer configuration information */
561 static struct xpt_softc xsoftc;
562 
563 /* Queues for our software interrupt handler */
564 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
565 static cam_isrq_t cam_bioq;
566 static cam_isrq_t cam_netq;
567 
568 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */
569 static SLIST_HEAD(,ccb_hdr) ccb_freeq;
570 static u_int xpt_max_ccbs;	/*
571 				 * Maximum size of ccb pool.  Modified as
572 				 * devices are added/removed or have their
573 				 * opening counts changed.
574 				 */
575 static u_int xpt_ccb_count;	/* Current count of allocated ccbs */
576 
577 struct cam_periph *xpt_periph;
578 
579 static periph_init_t xpt_periph_init;
580 
581 static periph_init_t probe_periph_init;
582 
583 static struct periph_driver xpt_driver =
584 {
585 	xpt_periph_init, "xpt",
586 	TAILQ_HEAD_INITIALIZER(xpt_driver.units)
587 };
588 
589 static struct periph_driver probe_driver =
590 {
591 	probe_periph_init, "probe",
592 	TAILQ_HEAD_INITIALIZER(probe_driver.units)
593 };
594 
595 DATA_SET(periphdriver_set, xpt_driver);
596 DATA_SET(periphdriver_set, probe_driver);
597 
598 #define XPT_CDEV_MAJOR 104
599 
600 static d_open_t xptopen;
601 static d_close_t xptclose;
602 static d_ioctl_t xptioctl;
603 
604 static struct dev_ops xpt_ops = {
605 	{ "xpt", XPT_CDEV_MAJOR, 0 },
606 	.d_open = xptopen,
607 	.d_close = xptclose,
608 	.d_ioctl = xptioctl
609 };
610 
611 static struct intr_config_hook *xpt_config_hook;
612 
613 /* Registered busses */
614 static TAILQ_HEAD(,cam_eb) xpt_busses;
615 static u_int bus_generation;
616 
617 /* Storage for debugging datastructures */
618 #ifdef	CAMDEBUG
619 struct cam_path *cam_dpath;
620 u_int32_t cam_dflags;
621 u_int32_t cam_debug_delay;
622 #endif
623 
624 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
625 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
626 #endif
627 
628 /*
629  * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
630  * enabled.  Also, the user must have either none, or all of CAM_DEBUG_BUS,
631  * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
632  */
633 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
634     || defined(CAM_DEBUG_LUN)
635 #ifdef CAMDEBUG
636 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
637     || !defined(CAM_DEBUG_LUN)
638 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
639         and CAM_DEBUG_LUN"
640 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
641 #else /* !CAMDEBUG */
642 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
643 #endif /* CAMDEBUG */
644 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
645 
646 /* Our boot-time initialization hook */
647 static void	xpt_init(void *);
648 SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL);
649 
650 static cam_status	xpt_compile_path(struct cam_path *new_path,
651 					 struct cam_periph *perph,
652 					 path_id_t path_id,
653 					 target_id_t target_id,
654 					 lun_id_t lun_id);
655 
656 static void		xpt_release_path(struct cam_path *path);
657 
658 static void		xpt_async_bcast(struct async_list *async_head,
659 					u_int32_t async_code,
660 					struct cam_path *path,
661 					void *async_arg);
662 static void		xpt_dev_async(u_int32_t async_code,
663 				      struct cam_eb *bus,
664 				      struct cam_et *target,
665 				      struct cam_ed *device,
666 				      void *async_arg);
667 static path_id_t xptnextfreepathid(void);
668 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
669 static union ccb *xpt_get_ccb(struct cam_ed *device);
670 static int	 xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
671 				  u_int32_t new_priority);
672 static void	 xpt_run_dev_allocq(struct cam_eb *bus);
673 static void	 xpt_run_dev_sendq(struct cam_eb *bus);
674 static timeout_t xpt_release_devq_timeout;
675 static void	 xpt_release_bus(struct cam_eb *bus);
676 static void	 xpt_release_devq_device(struct cam_ed *dev, u_int count,
677 					 int run_queue);
678 static struct cam_et*
679 		 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
680 static void	 xpt_release_target(struct cam_eb *bus, struct cam_et *target);
681 static struct cam_ed*
682 		 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
683 				  lun_id_t lun_id);
684 static void	 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
685 				    struct cam_ed *device);
686 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
687 static struct cam_eb*
688 		 xpt_find_bus(path_id_t path_id);
689 static struct cam_et*
690 		 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
691 static struct cam_ed*
692 		 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
693 static void	 xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
694 static void	 xpt_scan_lun(struct cam_periph *periph,
695 			      struct cam_path *path, cam_flags flags,
696 			      union ccb *ccb);
697 static void	 xptscandone(struct cam_periph *periph, union ccb *done_ccb);
698 static xpt_busfunc_t	xptconfigbuscountfunc;
699 static xpt_busfunc_t	xptconfigfunc;
700 static void	 xpt_config(void *arg);
701 static xpt_devicefunc_t xptpassannouncefunc;
702 static void	 xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
703 static void	 xptaction(struct cam_sim *sim, union ccb *work_ccb);
704 static void	 xptpoll(struct cam_sim *sim);
705 static inthand2_t swi_camnet;
706 static inthand2_t swi_cambio;
707 static void	 camisr(cam_isrq_t *queue);
708 #if 0
709 static void	 xptstart(struct cam_periph *periph, union ccb *work_ccb);
710 static void	 xptasync(struct cam_periph *periph,
711 			  u_int32_t code, cam_path *path);
712 #endif
713 static dev_match_ret	xptbusmatch(struct dev_match_pattern *patterns,
714 				    int num_patterns, struct cam_eb *bus);
715 static dev_match_ret	xptdevicematch(struct dev_match_pattern *patterns,
716 				       int num_patterns, struct cam_ed *device);
717 static dev_match_ret	xptperiphmatch(struct dev_match_pattern *patterns,
718 				       int num_patterns,
719 				       struct cam_periph *periph);
720 static xpt_busfunc_t	xptedtbusfunc;
721 static xpt_targetfunc_t	xptedttargetfunc;
722 static xpt_devicefunc_t	xptedtdevicefunc;
723 static xpt_periphfunc_t	xptedtperiphfunc;
724 static xpt_pdrvfunc_t	xptplistpdrvfunc;
725 static xpt_periphfunc_t	xptplistperiphfunc;
726 static int		xptedtmatch(struct ccb_dev_match *cdm);
727 static int		xptperiphlistmatch(struct ccb_dev_match *cdm);
728 static int		xptbustraverse(struct cam_eb *start_bus,
729 				       xpt_busfunc_t *tr_func, void *arg);
730 static int		xpttargettraverse(struct cam_eb *bus,
731 					  struct cam_et *start_target,
732 					  xpt_targetfunc_t *tr_func, void *arg);
733 static int		xptdevicetraverse(struct cam_et *target,
734 					  struct cam_ed *start_device,
735 					  xpt_devicefunc_t *tr_func, void *arg);
736 static int		xptperiphtraverse(struct cam_ed *device,
737 					  struct cam_periph *start_periph,
738 					  xpt_periphfunc_t *tr_func, void *arg);
739 static int		xptpdrvtraverse(struct periph_driver **start_pdrv,
740 					xpt_pdrvfunc_t *tr_func, void *arg);
741 static int		xptpdperiphtraverse(struct periph_driver **pdrv,
742 					    struct cam_periph *start_periph,
743 					    xpt_periphfunc_t *tr_func,
744 					    void *arg);
745 static xpt_busfunc_t	xptdefbusfunc;
746 static xpt_targetfunc_t	xptdeftargetfunc;
747 static xpt_devicefunc_t	xptdefdevicefunc;
748 static xpt_periphfunc_t	xptdefperiphfunc;
749 static int		xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
750 #ifdef notusedyet
751 static int		xpt_for_all_targets(xpt_targetfunc_t *tr_func,
752 					    void *arg);
753 #endif
754 static int		xpt_for_all_devices(xpt_devicefunc_t *tr_func,
755 					    void *arg);
756 #ifdef notusedyet
757 static int		xpt_for_all_periphs(xpt_periphfunc_t *tr_func,
758 					    void *arg);
759 #endif
760 static xpt_devicefunc_t	xptsetasyncfunc;
761 static xpt_busfunc_t	xptsetasyncbusfunc;
762 static cam_status	xptregister(struct cam_periph *periph,
763 				    void *arg);
764 static cam_status	proberegister(struct cam_periph *periph,
765 				      void *arg);
766 static void	 probeschedule(struct cam_periph *probe_periph);
767 static void	 probestart(struct cam_periph *periph, union ccb *start_ccb);
768 static void	 proberequestdefaultnegotiation(struct cam_periph *periph);
769 static void	 probedone(struct cam_periph *periph, union ccb *done_ccb);
770 static void	 probecleanup(struct cam_periph *periph);
771 static void	 xpt_find_quirk(struct cam_ed *device);
772 static void	 xpt_set_transfer_settings(struct ccb_trans_settings *cts,
773 					   struct cam_ed *device,
774 					   int async_update);
775 static void	 xpt_toggle_tags(struct cam_path *path);
776 static void	 xpt_start_tags(struct cam_path *path);
777 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
778 					    struct cam_ed *dev);
779 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
780 					   struct cam_ed *dev);
781 static __inline int periph_is_queued(struct cam_periph *periph);
782 static __inline int device_is_alloc_queued(struct cam_ed *device);
783 static __inline int device_is_send_queued(struct cam_ed *device);
784 static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
785 
786 static __inline int
787 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
788 {
789 	int retval;
790 
791 	if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
792 		if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
793 			cam_ccbq_resize(&dev->ccbq,
794 					dev->ccbq.dev_openings
795 					+ dev->ccbq.dev_active);
796 			dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
797 		}
798 		/*
799 		 * The priority of a device waiting for CCB resources
800 		 * is that of the the highest priority peripheral driver
801 		 * enqueued.
802 		 */
803 		retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
804 					  &dev->alloc_ccb_entry.pinfo,
805 					  CAMQ_GET_HEAD(&dev->drvq)->priority);
806 	} else {
807 		retval = 0;
808 	}
809 
810 	return (retval);
811 }
812 
813 static __inline int
814 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
815 {
816 	int	retval;
817 
818 	if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
819 		/*
820 		 * The priority of a device waiting for controller
821 		 * resources is that of the the highest priority CCB
822 		 * enqueued.
823 		 */
824 		retval =
825 		    xpt_schedule_dev(&bus->sim->devq->send_queue,
826 				     &dev->send_ccb_entry.pinfo,
827 				     CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
828 	} else {
829 		retval = 0;
830 	}
831 	return (retval);
832 }
833 
834 static __inline int
835 periph_is_queued(struct cam_periph *periph)
836 {
837 	return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
838 }
839 
840 static __inline int
841 device_is_alloc_queued(struct cam_ed *device)
842 {
843 	return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
844 }
845 
846 static __inline int
847 device_is_send_queued(struct cam_ed *device)
848 {
849 	return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
850 }
851 
852 static __inline int
853 dev_allocq_is_runnable(struct cam_devq *devq)
854 {
855 	/*
856 	 * Have work to do.
857 	 * Have space to do more work.
858 	 * Allowed to do work.
859 	 */
860 	return ((devq->alloc_queue.qfrozen_cnt == 0)
861 	     && (devq->alloc_queue.entries > 0)
862 	     && (devq->alloc_openings > 0));
863 }
864 
865 static void
866 xpt_periph_init(void)
867 {
868 	dev_ops_add(&xpt_ops, 0, 0);
869 	make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
870 }
871 
872 static void
873 probe_periph_init(void)
874 {
875 }
876 
877 
878 static void
879 xptdone(struct cam_periph *periph, union ccb *done_ccb)
880 {
881 	/* Caller will release the CCB */
882 	wakeup(&done_ccb->ccb_h.cbfcnp);
883 }
884 
885 static int
886 xptopen(struct dev_open_args *ap)
887 {
888 	cdev_t dev = ap->a_head.a_dev;
889 	int unit;
890 
891 	unit = minor(dev) & 0xff;
892 
893 	/*
894 	 * Only allow read-write access.
895 	 */
896 	if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
897 		return(EPERM);
898 
899 	/*
900 	 * We don't allow nonblocking access.
901 	 */
902 	if ((ap->a_oflags & O_NONBLOCK) != 0) {
903 		kprintf("xpt%d: can't do nonblocking access\n", unit);
904 		return(ENODEV);
905 	}
906 
907 	/*
908 	 * We only have one transport layer right now.  If someone accesses
909 	 * us via something other than minor number 1, point out their
910 	 * mistake.
911 	 */
912 	if (unit != 0) {
913 		kprintf("xptopen: got invalid xpt unit %d\n", unit);
914 		return(ENXIO);
915 	}
916 
917 	/* Mark ourselves open */
918 	xsoftc.flags |= XPT_FLAG_OPEN;
919 
920 	return(0);
921 }
922 
923 static int
924 xptclose(struct dev_close_args *ap)
925 {
926 	cdev_t dev = ap->a_head.a_dev;
927 	int unit;
928 
929 	unit = minor(dev) & 0xff;
930 
931 	/*
932 	 * We only have one transport layer right now.  If someone accesses
933 	 * us via something other than minor number 1, point out their
934 	 * mistake.
935 	 */
936 	if (unit != 0) {
937 		kprintf("xptclose: got invalid xpt unit %d\n", unit);
938 		return(ENXIO);
939 	}
940 
941 	/* Mark ourselves closed */
942 	xsoftc.flags &= ~XPT_FLAG_OPEN;
943 
944 	return(0);
945 }
946 
947 static int
948 xptioctl(struct dev_ioctl_args *ap)
949 {
950 	cdev_t dev = ap->a_head.a_dev;
951 	int unit, error;
952 
953 	error = 0;
954 	unit = minor(dev) & 0xff;
955 
956 	/*
957 	 * We only have one transport layer right now.  If someone accesses
958 	 * us via something other than minor number 1, point out their
959 	 * mistake.
960 	 */
961 	if (unit != 0) {
962 		kprintf("xptioctl: got invalid xpt unit %d\n", unit);
963 		return(ENXIO);
964 	}
965 
966 	switch(ap->a_cmd) {
967 	/*
968 	 * For the transport layer CAMIOCOMMAND ioctl, we really only want
969 	 * to accept CCB types that don't quite make sense to send through a
970 	 * passthrough driver.
971 	 */
972 	case CAMIOCOMMAND: {
973 		union ccb *ccb;
974 		union ccb *inccb;
975 
976 		inccb = (union ccb *)ap->a_data;
977 
978 		switch(inccb->ccb_h.func_code) {
979 		case XPT_SCAN_BUS:
980 		case XPT_RESET_BUS:
981 			if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
982 			 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
983 				error = EINVAL;
984 				break;
985 			}
986 			/* FALLTHROUGH */
987 		case XPT_PATH_INQ:
988 		case XPT_ENG_INQ:
989 		case XPT_SCAN_LUN:
990 
991 			ccb = xpt_alloc_ccb();
992 
993 			/*
994 			 * Create a path using the bus, target, and lun the
995 			 * user passed in.
996 			 */
997 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
998 					    inccb->ccb_h.path_id,
999 					    inccb->ccb_h.target_id,
1000 					    inccb->ccb_h.target_lun) !=
1001 					    CAM_REQ_CMP){
1002 				error = EINVAL;
1003 				xpt_free_ccb(ccb);
1004 				break;
1005 			}
1006 			/* Ensure all of our fields are correct */
1007 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1008 				      inccb->ccb_h.pinfo.priority);
1009 			xpt_merge_ccb(ccb, inccb);
1010 			ccb->ccb_h.cbfcnp = xptdone;
1011 			cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1012 			bcopy(ccb, inccb, sizeof(union ccb));
1013 			xpt_free_path(ccb->ccb_h.path);
1014 			xpt_free_ccb(ccb);
1015 			break;
1016 
1017 		case XPT_DEBUG: {
1018 			union ccb ccb;
1019 
1020 			/*
1021 			 * This is an immediate CCB, so it's okay to
1022 			 * allocate it on the stack.
1023 			 */
1024 
1025 			/*
1026 			 * Create a path using the bus, target, and lun the
1027 			 * user passed in.
1028 			 */
1029 			if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1030 					    inccb->ccb_h.path_id,
1031 					    inccb->ccb_h.target_id,
1032 					    inccb->ccb_h.target_lun) !=
1033 					    CAM_REQ_CMP){
1034 				error = EINVAL;
1035 				break;
1036 			}
1037 			/* Ensure all of our fields are correct */
1038 			xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1039 				      inccb->ccb_h.pinfo.priority);
1040 			xpt_merge_ccb(&ccb, inccb);
1041 			ccb.ccb_h.cbfcnp = xptdone;
1042 			xpt_action(&ccb);
1043 			bcopy(&ccb, inccb, sizeof(union ccb));
1044 			xpt_free_path(ccb.ccb_h.path);
1045 			break;
1046 
1047 		}
1048 		case XPT_DEV_MATCH: {
1049 			struct cam_periph_map_info mapinfo;
1050 			struct cam_path *old_path;
1051 
1052 			/*
1053 			 * We can't deal with physical addresses for this
1054 			 * type of transaction.
1055 			 */
1056 			if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1057 				error = EINVAL;
1058 				break;
1059 			}
1060 
1061 			/*
1062 			 * Save this in case the caller had it set to
1063 			 * something in particular.
1064 			 */
1065 			old_path = inccb->ccb_h.path;
1066 
1067 			/*
1068 			 * We really don't need a path for the matching
1069 			 * code.  The path is needed because of the
1070 			 * debugging statements in xpt_action().  They
1071 			 * assume that the CCB has a valid path.
1072 			 */
1073 			inccb->ccb_h.path = xpt_periph->path;
1074 
1075 			bzero(&mapinfo, sizeof(mapinfo));
1076 
1077 			/*
1078 			 * Map the pattern and match buffers into kernel
1079 			 * virtual address space.
1080 			 */
1081 			error = cam_periph_mapmem(inccb, &mapinfo);
1082 
1083 			if (error) {
1084 				inccb->ccb_h.path = old_path;
1085 				break;
1086 			}
1087 
1088 			/*
1089 			 * This is an immediate CCB, we can send it on directly.
1090 			 */
1091 			xpt_action(inccb);
1092 
1093 			/*
1094 			 * Map the buffers back into user space.
1095 			 */
1096 			cam_periph_unmapmem(inccb, &mapinfo);
1097 
1098 			inccb->ccb_h.path = old_path;
1099 
1100 			error = 0;
1101 			break;
1102 		}
1103 		default:
1104 			error = ENOTSUP;
1105 			break;
1106 		}
1107 		break;
1108 	}
1109 	/*
1110 	 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1111 	 * with the periphal driver name and unit name filled in.  The other
1112 	 * fields don't really matter as input.  The passthrough driver name
1113 	 * ("pass"), and unit number are passed back in the ccb.  The current
1114 	 * device generation number, and the index into the device peripheral
1115 	 * driver list, and the status are also passed back.  Note that
1116 	 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1117 	 * we never return a status of CAM_GDEVLIST_LIST_CHANGED.  It is
1118 	 * (or rather should be) impossible for the device peripheral driver
1119 	 * list to change since we look at the whole thing in one pass, and
1120 	 * we do it within a critical section.
1121 	 *
1122 	 */
1123 	case CAMGETPASSTHRU: {
1124 		union ccb *ccb;
1125 		struct cam_periph *periph;
1126 		struct periph_driver **p_drv;
1127 		char   *name;
1128 		int unit;
1129 		int cur_generation;
1130 		int base_periph_found;
1131 		int splbreaknum;
1132 
1133 		ccb = (union ccb *)ap->a_data;
1134 		unit = ccb->cgdl.unit_number;
1135 		name = ccb->cgdl.periph_name;
1136 		/*
1137 		 * Every 100 devices, we want to call splz() to check for
1138 		 * and allow the software interrupt handler a chance to run.
1139 		 *
1140 		 * Most systems won't run into this check, but this should
1141 		 * avoid starvation in the software interrupt handler in
1142 		 * large systems.
1143 		 */
1144 		splbreaknum = 100;
1145 
1146 		ccb = (union ccb *)ap->a_data;
1147 
1148 		base_periph_found = 0;
1149 
1150 		/*
1151 		 * Sanity check -- make sure we don't get a null peripheral
1152 		 * driver name.
1153 		 */
1154 		if (*ccb->cgdl.periph_name == '\0') {
1155 			error = EINVAL;
1156 			break;
1157 		}
1158 
1159 		/* Keep the list from changing while we traverse it */
1160 		crit_enter();
1161 ptstartover:
1162 		cur_generation = xsoftc.generation;
1163 
1164 		/* first find our driver in the list of drivers */
1165 		SET_FOREACH(p_drv, periphdriver_set) {
1166 			if (strcmp((*p_drv)->driver_name, name) == 0)
1167 				break;
1168 		}
1169 
1170 		if (*p_drv == NULL) {
1171 			crit_exit();
1172 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1173 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1174 			*ccb->cgdl.periph_name = '\0';
1175 			ccb->cgdl.unit_number = 0;
1176 			error = ENOENT;
1177 			break;
1178 		}
1179 
1180 		/*
1181 		 * Run through every peripheral instance of this driver
1182 		 * and check to see whether it matches the unit passed
1183 		 * in by the user.  If it does, get out of the loops and
1184 		 * find the passthrough driver associated with that
1185 		 * peripheral driver.
1186 		 */
1187 		for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL;
1188 		     periph = TAILQ_NEXT(periph, unit_links)) {
1189 
1190 			if (periph->unit_number == unit) {
1191 				break;
1192 			} else if (--splbreaknum == 0) {
1193 				splz();
1194 				splbreaknum = 100;
1195 				if (cur_generation != xsoftc.generation)
1196 				       goto ptstartover;
1197 			}
1198 		}
1199 		/*
1200 		 * If we found the peripheral driver that the user passed
1201 		 * in, go through all of the peripheral drivers for that
1202 		 * particular device and look for a passthrough driver.
1203 		 */
1204 		if (periph != NULL) {
1205 			struct cam_ed *device;
1206 			int i;
1207 
1208 			base_periph_found = 1;
1209 			device = periph->path->device;
1210 			for (i = 0, periph = device->periphs.slh_first;
1211 			     periph != NULL;
1212 			     periph = periph->periph_links.sle_next, i++) {
1213 				/*
1214 				 * Check to see whether we have a
1215 				 * passthrough device or not.
1216 				 */
1217 				if (strcmp(periph->periph_name, "pass") == 0) {
1218 					/*
1219 					 * Fill in the getdevlist fields.
1220 					 */
1221 					strcpy(ccb->cgdl.periph_name,
1222 					       periph->periph_name);
1223 					ccb->cgdl.unit_number =
1224 						periph->unit_number;
1225 					if (periph->periph_links.sle_next)
1226 						ccb->cgdl.status =
1227 							CAM_GDEVLIST_MORE_DEVS;
1228 					else
1229 						ccb->cgdl.status =
1230 						       CAM_GDEVLIST_LAST_DEVICE;
1231 					ccb->cgdl.generation =
1232 						device->generation;
1233 					ccb->cgdl.index = i;
1234 					/*
1235 					 * Fill in some CCB header fields
1236 					 * that the user may want.
1237 					 */
1238 					ccb->ccb_h.path_id =
1239 						periph->path->bus->path_id;
1240 					ccb->ccb_h.target_id =
1241 						periph->path->target->target_id;
1242 					ccb->ccb_h.target_lun =
1243 						periph->path->device->lun_id;
1244 					ccb->ccb_h.status = CAM_REQ_CMP;
1245 					break;
1246 				}
1247 			}
1248 		}
1249 
1250 		/*
1251 		 * If the periph is null here, one of two things has
1252 		 * happened.  The first possibility is that we couldn't
1253 		 * find the unit number of the particular peripheral driver
1254 		 * that the user is asking about.  e.g. the user asks for
1255 		 * the passthrough driver for "da11".  We find the list of
1256 		 * "da" peripherals all right, but there is no unit 11.
1257 		 * The other possibility is that we went through the list
1258 		 * of peripheral drivers attached to the device structure,
1259 		 * but didn't find one with the name "pass".  Either way,
1260 		 * we return ENOENT, since we couldn't find something.
1261 		 */
1262 		if (periph == NULL) {
1263 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1264 			ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1265 			*ccb->cgdl.periph_name = '\0';
1266 			ccb->cgdl.unit_number = 0;
1267 			error = ENOENT;
1268 			/*
1269 			 * It is unfortunate that this is even necessary,
1270 			 * but there are many, many clueless users out there.
1271 			 * If this is true, the user is looking for the
1272 			 * passthrough driver, but doesn't have one in his
1273 			 * kernel.
1274 			 */
1275 			if (base_periph_found == 1) {
1276 				kprintf("xptioctl: pass driver is not in the "
1277 				       "kernel\n");
1278 				kprintf("xptioctl: put \"device pass0\" in "
1279 				       "your kernel config file\n");
1280 			}
1281 		}
1282 		crit_exit();
1283 		break;
1284 		}
1285 	default:
1286 		error = ENOTTY;
1287 		break;
1288 	}
1289 
1290 	return(error);
1291 }
1292 
1293 /* Functions accessed by the peripheral drivers */
1294 static void
1295 xpt_init(void *dummy)
1296 {
1297 	struct cam_sim *xpt_sim;
1298 	struct cam_path *path;
1299 	struct cam_devq *devq;
1300 	cam_status status;
1301 
1302 	TAILQ_INIT(&xpt_busses);
1303 	TAILQ_INIT(&cam_bioq);
1304 	TAILQ_INIT(&cam_netq);
1305 	SLIST_INIT(&ccb_freeq);
1306 	STAILQ_INIT(&highpowerq);
1307 
1308 	/*
1309 	 * The xpt layer is, itself, the equivelent of a SIM.
1310 	 * Allow 16 ccbs in the ccb pool for it.  This should
1311 	 * give decent parallelism when we probe busses and
1312 	 * perform other XPT functions.
1313 	 */
1314 	devq = cam_simq_alloc(16);
1315 	xpt_sim = cam_sim_alloc(xptaction,
1316 				xptpoll,
1317 				"xpt",
1318 				/*softc*/NULL,
1319 				/*unit*/0,
1320 				/*max_dev_transactions*/0,
1321 				/*max_tagged_dev_transactions*/0,
1322 				devq);
1323 	cam_simq_release(devq);
1324 	xpt_max_ccbs = 16;
1325 
1326 	xpt_bus_register(xpt_sim, /*bus #*/0);
1327 
1328 	/*
1329 	 * Looking at the XPT from the SIM layer, the XPT is
1330 	 * the equivelent of a peripheral driver.  Allocate
1331 	 * a peripheral driver entry for us.
1332 	 */
1333 	if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1334 				      CAM_TARGET_WILDCARD,
1335 				      CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1336 		kprintf("xpt_init: xpt_create_path failed with status %#x,"
1337 		       " failing attach\n", status);
1338 		return;
1339 	}
1340 
1341 	cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1342 			 path, NULL, 0, NULL);
1343 	xpt_free_path(path);
1344 
1345 	xpt_sim->softc = xpt_periph;
1346 
1347 	/*
1348 	 * Register a callback for when interrupts are enabled.
1349 	 */
1350 	xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1351 				  M_TEMP, M_INTWAIT | M_ZERO);
1352 	xpt_config_hook->ich_func = xpt_config;
1353 	xpt_config_hook->ich_desc = "xpt";
1354 	xpt_config_hook->ich_order = 1000;
1355 	if (config_intrhook_establish(xpt_config_hook) != 0) {
1356 		kfree (xpt_config_hook, M_TEMP);
1357 		kprintf("xpt_init: config_intrhook_establish failed "
1358 		       "- failing attach\n");
1359 	}
1360 
1361 	/* Install our software interrupt handlers */
1362 	register_swi(SWI_CAMNET, swi_camnet, NULL, "swi_camnet", NULL);
1363 	register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1364 }
1365 
1366 static cam_status
1367 xptregister(struct cam_periph *periph, void *arg)
1368 {
1369 	if (periph == NULL) {
1370 		kprintf("xptregister: periph was NULL!!\n");
1371 		return(CAM_REQ_CMP_ERR);
1372 	}
1373 
1374 	periph->softc = NULL;
1375 
1376 	xpt_periph = periph;
1377 
1378 	return(CAM_REQ_CMP);
1379 }
1380 
1381 int32_t
1382 xpt_add_periph(struct cam_periph *periph)
1383 {
1384 	struct cam_ed *device;
1385 	int32_t	 status;
1386 	struct periph_list *periph_head;
1387 
1388 	device = periph->path->device;
1389 
1390 	periph_head = &device->periphs;
1391 
1392 	status = CAM_REQ_CMP;
1393 
1394 	if (device != NULL) {
1395 		/*
1396 		 * Make room for this peripheral
1397 		 * so it will fit in the queue
1398 		 * when it's scheduled to run
1399 		 */
1400 		crit_enter();
1401 		status = camq_resize(&device->drvq,
1402 				     device->drvq.array_size + 1);
1403 
1404 		device->generation++;
1405 
1406 		SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1407 		crit_exit();
1408 	}
1409 
1410 	xsoftc.generation++;
1411 
1412 	return (status);
1413 }
1414 
1415 void
1416 xpt_remove_periph(struct cam_periph *periph)
1417 {
1418 	struct cam_ed *device;
1419 
1420 	device = periph->path->device;
1421 
1422 	if (device != NULL) {
1423 		struct periph_list *periph_head;
1424 
1425 		periph_head = &device->periphs;
1426 
1427 		/* Release the slot for this peripheral */
1428 		crit_enter();
1429 		camq_resize(&device->drvq, device->drvq.array_size - 1);
1430 
1431 		device->generation++;
1432 
1433 		SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1434 		crit_exit();
1435 	}
1436 
1437 	xsoftc.generation++;
1438 
1439 }
1440 
1441 void
1442 xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1443 {
1444 	u_int mb;
1445 	struct cam_path *path;
1446 	struct ccb_trans_settings cts;
1447 
1448 	path = periph->path;
1449 	/*
1450 	 * To ensure that this is printed in one piece,
1451 	 * mask out CAM interrupts.
1452 	 */
1453 	crit_enter();
1454 	kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1455 	       periph->periph_name, periph->unit_number,
1456 	       path->bus->sim->sim_name,
1457 	       path->bus->sim->unit_number,
1458 	       path->bus->sim->bus_id,
1459 	       path->target->target_id,
1460 	       path->device->lun_id);
1461 	kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1462 	scsi_print_inquiry(&path->device->inq_data);
1463 	if ((bootverbose)
1464 	 && (path->device->serial_num_len > 0)) {
1465 		/* Don't wrap the screen  - print only the first 60 chars */
1466 		kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1467 		       periph->unit_number, path->device->serial_num);
1468 	}
1469 	xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1470 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1471 	cts.flags = CCB_TRANS_CURRENT_SETTINGS;
1472 	xpt_action((union ccb*)&cts);
1473 	if (cts.ccb_h.status == CAM_REQ_CMP) {
1474 		u_int speed;
1475 		u_int freq;
1476 
1477 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1478 		  && cts.sync_offset != 0) {
1479 			freq = scsi_calc_syncsrate(cts.sync_period);
1480 			speed = freq;
1481 		} else {
1482 			struct ccb_pathinq cpi;
1483 
1484 			/* Ask the SIM for its base transfer speed */
1485 			xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1486 			cpi.ccb_h.func_code = XPT_PATH_INQ;
1487 			xpt_action((union ccb *)&cpi);
1488 
1489 			speed = cpi.base_transfer_speed;
1490 			freq = 0;
1491 		}
1492 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0)
1493 			speed *= (0x01 << cts.bus_width);
1494 		mb = speed / 1000;
1495 		if (mb > 0)
1496 			kprintf("%s%d: %d.%03dMB/s transfers",
1497 			       periph->periph_name, periph->unit_number,
1498 			       mb, speed % 1000);
1499 		else
1500 			kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1501 			       periph->unit_number, speed);
1502 		if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1503 		 && cts.sync_offset != 0) {
1504 			kprintf(" (%d.%03dMHz, offset %d", freq / 1000,
1505 			       freq % 1000, cts.sync_offset);
1506 		}
1507 		if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0
1508 		 && cts.bus_width > 0) {
1509 			if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1510 			 && cts.sync_offset != 0) {
1511 				kprintf(", ");
1512 			} else {
1513 				kprintf(" (");
1514 			}
1515 			kprintf("%dbit)", 8 * (0x01 << cts.bus_width));
1516 		} else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0
1517 			&& cts.sync_offset != 0) {
1518 			kprintf(")");
1519 		}
1520 
1521 		if (path->device->inq_flags & SID_CmdQue
1522 		 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1523 			kprintf(", Tagged Queueing Enabled");
1524 		}
1525 
1526 		kprintf("\n");
1527 	} else if (path->device->inq_flags & SID_CmdQue
1528    		|| path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1529 		kprintf("%s%d: Tagged Queueing Enabled\n",
1530 		       periph->periph_name, periph->unit_number);
1531 	}
1532 
1533 	/*
1534 	 * We only want to print the caller's announce string if they've
1535 	 * passed one in..
1536 	 */
1537 	if (announce_string != NULL)
1538 		kprintf("%s%d: %s\n", periph->periph_name,
1539 		       periph->unit_number, announce_string);
1540 	crit_exit();
1541 }
1542 
1543 
1544 static dev_match_ret
1545 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns,
1546 	    struct cam_eb *bus)
1547 {
1548 	dev_match_ret retval;
1549 	int i;
1550 
1551 	retval = DM_RET_NONE;
1552 
1553 	/*
1554 	 * If we aren't given something to match against, that's an error.
1555 	 */
1556 	if (bus == NULL)
1557 		return(DM_RET_ERROR);
1558 
1559 	/*
1560 	 * If there are no match entries, then this bus matches no
1561 	 * matter what.
1562 	 */
1563 	if ((patterns == NULL) || (num_patterns == 0))
1564 		return(DM_RET_DESCEND | DM_RET_COPY);
1565 
1566 	for (i = 0; i < num_patterns; i++) {
1567 		struct bus_match_pattern *cur_pattern;
1568 
1569 		/*
1570 		 * If the pattern in question isn't for a bus node, we
1571 		 * aren't interested.  However, we do indicate to the
1572 		 * calling routine that we should continue descending the
1573 		 * tree, since the user wants to match against lower-level
1574 		 * EDT elements.
1575 		 */
1576 		if (patterns[i].type != DEV_MATCH_BUS) {
1577 			if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1578 				retval |= DM_RET_DESCEND;
1579 			continue;
1580 		}
1581 
1582 		cur_pattern = &patterns[i].pattern.bus_pattern;
1583 
1584 		/*
1585 		 * If they want to match any bus node, we give them any
1586 		 * device node.
1587 		 */
1588 		if (cur_pattern->flags == BUS_MATCH_ANY) {
1589 			/* set the copy flag */
1590 			retval |= DM_RET_COPY;
1591 
1592 			/*
1593 			 * If we've already decided on an action, go ahead
1594 			 * and return.
1595 			 */
1596 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1597 				return(retval);
1598 		}
1599 
1600 		/*
1601 		 * Not sure why someone would do this...
1602 		 */
1603 		if (cur_pattern->flags == BUS_MATCH_NONE)
1604 			continue;
1605 
1606 		if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1607 		 && (cur_pattern->path_id != bus->path_id))
1608 			continue;
1609 
1610 		if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1611 		 && (cur_pattern->bus_id != bus->sim->bus_id))
1612 			continue;
1613 
1614 		if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1615 		 && (cur_pattern->unit_number != bus->sim->unit_number))
1616 			continue;
1617 
1618 		if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1619 		 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1620 			     DEV_IDLEN) != 0))
1621 			continue;
1622 
1623 		/*
1624 		 * If we get to this point, the user definitely wants
1625 		 * information on this bus.  So tell the caller to copy the
1626 		 * data out.
1627 		 */
1628 		retval |= DM_RET_COPY;
1629 
1630 		/*
1631 		 * If the return action has been set to descend, then we
1632 		 * know that we've already seen a non-bus matching
1633 		 * expression, therefore we need to further descend the tree.
1634 		 * This won't change by continuing around the loop, so we
1635 		 * go ahead and return.  If we haven't seen a non-bus
1636 		 * matching expression, we keep going around the loop until
1637 		 * we exhaust the matching expressions.  We'll set the stop
1638 		 * flag once we fall out of the loop.
1639 		 */
1640 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1641 			return(retval);
1642 	}
1643 
1644 	/*
1645 	 * If the return action hasn't been set to descend yet, that means
1646 	 * we haven't seen anything other than bus matching patterns.  So
1647 	 * tell the caller to stop descending the tree -- the user doesn't
1648 	 * want to match against lower level tree elements.
1649 	 */
1650 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1651 		retval |= DM_RET_STOP;
1652 
1653 	return(retval);
1654 }
1655 
1656 static dev_match_ret
1657 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns,
1658 	       struct cam_ed *device)
1659 {
1660 	dev_match_ret retval;
1661 	int i;
1662 
1663 	retval = DM_RET_NONE;
1664 
1665 	/*
1666 	 * If we aren't given something to match against, that's an error.
1667 	 */
1668 	if (device == NULL)
1669 		return(DM_RET_ERROR);
1670 
1671 	/*
1672 	 * If there are no match entries, then this device matches no
1673 	 * matter what.
1674 	 */
1675 	if ((patterns == NULL) || (patterns == 0))
1676 		return(DM_RET_DESCEND | DM_RET_COPY);
1677 
1678 	for (i = 0; i < num_patterns; i++) {
1679 		struct device_match_pattern *cur_pattern;
1680 
1681 		/*
1682 		 * If the pattern in question isn't for a device node, we
1683 		 * aren't interested.
1684 		 */
1685 		if (patterns[i].type != DEV_MATCH_DEVICE) {
1686 			if ((patterns[i].type == DEV_MATCH_PERIPH)
1687 			 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1688 				retval |= DM_RET_DESCEND;
1689 			continue;
1690 		}
1691 
1692 		cur_pattern = &patterns[i].pattern.device_pattern;
1693 
1694 		/*
1695 		 * If they want to match any device node, we give them any
1696 		 * device node.
1697 		 */
1698 		if (cur_pattern->flags == DEV_MATCH_ANY) {
1699 			/* set the copy flag */
1700 			retval |= DM_RET_COPY;
1701 
1702 
1703 			/*
1704 			 * If we've already decided on an action, go ahead
1705 			 * and return.
1706 			 */
1707 			if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1708 				return(retval);
1709 		}
1710 
1711 		/*
1712 		 * Not sure why someone would do this...
1713 		 */
1714 		if (cur_pattern->flags == DEV_MATCH_NONE)
1715 			continue;
1716 
1717 		if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1718 		 && (cur_pattern->path_id != device->target->bus->path_id))
1719 			continue;
1720 
1721 		if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1722 		 && (cur_pattern->target_id != device->target->target_id))
1723 			continue;
1724 
1725 		if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1726 		 && (cur_pattern->target_lun != device->lun_id))
1727 			continue;
1728 
1729 		if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1730 		 && (cam_quirkmatch((caddr_t)&device->inq_data,
1731 				    (caddr_t)&cur_pattern->inq_pat,
1732 				    1, sizeof(cur_pattern->inq_pat),
1733 				    scsi_static_inquiry_match) == NULL))
1734 			continue;
1735 
1736 		/*
1737 		 * If we get to this point, the user definitely wants
1738 		 * information on this device.  So tell the caller to copy
1739 		 * the data out.
1740 		 */
1741 		retval |= DM_RET_COPY;
1742 
1743 		/*
1744 		 * If the return action has been set to descend, then we
1745 		 * know that we've already seen a peripheral matching
1746 		 * expression, therefore we need to further descend the tree.
1747 		 * This won't change by continuing around the loop, so we
1748 		 * go ahead and return.  If we haven't seen a peripheral
1749 		 * matching expression, we keep going around the loop until
1750 		 * we exhaust the matching expressions.  We'll set the stop
1751 		 * flag once we fall out of the loop.
1752 		 */
1753 		if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1754 			return(retval);
1755 	}
1756 
1757 	/*
1758 	 * If the return action hasn't been set to descend yet, that means
1759 	 * we haven't seen any peripheral matching patterns.  So tell the
1760 	 * caller to stop descending the tree -- the user doesn't want to
1761 	 * match against lower level tree elements.
1762 	 */
1763 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1764 		retval |= DM_RET_STOP;
1765 
1766 	return(retval);
1767 }
1768 
1769 /*
1770  * Match a single peripheral against any number of match patterns.
1771  */
1772 static dev_match_ret
1773 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns,
1774 	       struct cam_periph *periph)
1775 {
1776 	dev_match_ret retval;
1777 	int i;
1778 
1779 	/*
1780 	 * If we aren't given something to match against, that's an error.
1781 	 */
1782 	if (periph == NULL)
1783 		return(DM_RET_ERROR);
1784 
1785 	/*
1786 	 * If there are no match entries, then this peripheral matches no
1787 	 * matter what.
1788 	 */
1789 	if ((patterns == NULL) || (num_patterns == 0))
1790 		return(DM_RET_STOP | DM_RET_COPY);
1791 
1792 	/*
1793 	 * There aren't any nodes below a peripheral node, so there's no
1794 	 * reason to descend the tree any further.
1795 	 */
1796 	retval = DM_RET_STOP;
1797 
1798 	for (i = 0; i < num_patterns; i++) {
1799 		struct periph_match_pattern *cur_pattern;
1800 
1801 		/*
1802 		 * If the pattern in question isn't for a peripheral, we
1803 		 * aren't interested.
1804 		 */
1805 		if (patterns[i].type != DEV_MATCH_PERIPH)
1806 			continue;
1807 
1808 		cur_pattern = &patterns[i].pattern.periph_pattern;
1809 
1810 		/*
1811 		 * If they want to match on anything, then we will do so.
1812 		 */
1813 		if (cur_pattern->flags == PERIPH_MATCH_ANY) {
1814 			/* set the copy flag */
1815 			retval |= DM_RET_COPY;
1816 
1817 			/*
1818 			 * We've already set the return action to stop,
1819 			 * since there are no nodes below peripherals in
1820 			 * the tree.
1821 			 */
1822 			return(retval);
1823 		}
1824 
1825 		/*
1826 		 * Not sure why someone would do this...
1827 		 */
1828 		if (cur_pattern->flags == PERIPH_MATCH_NONE)
1829 			continue;
1830 
1831 		if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
1832 		 && (cur_pattern->path_id != periph->path->bus->path_id))
1833 			continue;
1834 
1835 		/*
1836 		 * For the target and lun id's, we have to make sure the
1837 		 * target and lun pointers aren't NULL.  The xpt peripheral
1838 		 * has a wildcard target and device.
1839 		 */
1840 		if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
1841 		 && ((periph->path->target == NULL)
1842 		 ||(cur_pattern->target_id != periph->path->target->target_id)))
1843 			continue;
1844 
1845 		if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
1846 		 && ((periph->path->device == NULL)
1847 		 || (cur_pattern->target_lun != periph->path->device->lun_id)))
1848 			continue;
1849 
1850 		if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
1851 		 && (cur_pattern->unit_number != periph->unit_number))
1852 			continue;
1853 
1854 		if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
1855 		 && (strncmp(cur_pattern->periph_name, periph->periph_name,
1856 			     DEV_IDLEN) != 0))
1857 			continue;
1858 
1859 		/*
1860 		 * If we get to this point, the user definitely wants
1861 		 * information on this peripheral.  So tell the caller to
1862 		 * copy the data out.
1863 		 */
1864 		retval |= DM_RET_COPY;
1865 
1866 		/*
1867 		 * The return action has already been set to stop, since
1868 		 * peripherals don't have any nodes below them in the EDT.
1869 		 */
1870 		return(retval);
1871 	}
1872 
1873 	/*
1874 	 * If we get to this point, the peripheral that was passed in
1875 	 * doesn't match any of the patterns.
1876 	 */
1877 	return(retval);
1878 }
1879 
1880 static int
1881 xptedtbusfunc(struct cam_eb *bus, void *arg)
1882 {
1883 	struct ccb_dev_match *cdm;
1884 	dev_match_ret retval;
1885 
1886 	cdm = (struct ccb_dev_match *)arg;
1887 
1888 	/*
1889 	 * If our position is for something deeper in the tree, that means
1890 	 * that we've already seen this node.  So, we keep going down.
1891 	 */
1892 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1893 	 && (cdm->pos.cookie.bus == bus)
1894 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1895 	 && (cdm->pos.cookie.target != NULL))
1896 		retval = DM_RET_DESCEND;
1897 	else
1898 		retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
1899 
1900 	/*
1901 	 * If we got an error, bail out of the search.
1902 	 */
1903 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
1904 		cdm->status = CAM_DEV_MATCH_ERROR;
1905 		return(0);
1906 	}
1907 
1908 	/*
1909 	 * If the copy flag is set, copy this bus out.
1910 	 */
1911 	if (retval & DM_RET_COPY) {
1912 		int spaceleft, j;
1913 
1914 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
1915 			sizeof(struct dev_match_result));
1916 
1917 		/*
1918 		 * If we don't have enough space to put in another
1919 		 * match result, save our position and tell the
1920 		 * user there are more devices to check.
1921 		 */
1922 		if (spaceleft < sizeof(struct dev_match_result)) {
1923 			bzero(&cdm->pos, sizeof(cdm->pos));
1924 			cdm->pos.position_type =
1925 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
1926 
1927 			cdm->pos.cookie.bus = bus;
1928 			cdm->pos.generations[CAM_BUS_GENERATION]=
1929 				bus_generation;
1930 			cdm->status = CAM_DEV_MATCH_MORE;
1931 			return(0);
1932 		}
1933 		j = cdm->num_matches;
1934 		cdm->num_matches++;
1935 		cdm->matches[j].type = DEV_MATCH_BUS;
1936 		cdm->matches[j].result.bus_result.path_id = bus->path_id;
1937 		cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
1938 		cdm->matches[j].result.bus_result.unit_number =
1939 			bus->sim->unit_number;
1940 		strncpy(cdm->matches[j].result.bus_result.dev_name,
1941 			bus->sim->sim_name, DEV_IDLEN);
1942 	}
1943 
1944 	/*
1945 	 * If the user is only interested in busses, there's no
1946 	 * reason to descend to the next level in the tree.
1947 	 */
1948 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
1949 		return(1);
1950 
1951 	/*
1952 	 * If there is a target generation recorded, check it to
1953 	 * make sure the target list hasn't changed.
1954 	 */
1955 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1956 	 && (bus == cdm->pos.cookie.bus)
1957 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1958 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
1959 	 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
1960 	     bus->generation)) {
1961 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1962 		return(0);
1963 	}
1964 
1965 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1966 	 && (cdm->pos.cookie.bus == bus)
1967 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1968 	 && (cdm->pos.cookie.target != NULL))
1969 		return(xpttargettraverse(bus,
1970 					(struct cam_et *)cdm->pos.cookie.target,
1971 					 xptedttargetfunc, arg));
1972 	else
1973 		return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
1974 }
1975 
1976 static int
1977 xptedttargetfunc(struct cam_et *target, void *arg)
1978 {
1979 	struct ccb_dev_match *cdm;
1980 
1981 	cdm = (struct ccb_dev_match *)arg;
1982 
1983 	/*
1984 	 * If there is a device list generation recorded, check it to
1985 	 * make sure the device list hasn't changed.
1986 	 */
1987 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
1988 	 && (cdm->pos.cookie.bus == target->bus)
1989 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
1990 	 && (cdm->pos.cookie.target == target)
1991 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
1992 	 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
1993 	 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
1994 	     target->generation)) {
1995 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
1996 		return(0);
1997 	}
1998 
1999 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2000 	 && (cdm->pos.cookie.bus == target->bus)
2001 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2002 	 && (cdm->pos.cookie.target == target)
2003 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2004 	 && (cdm->pos.cookie.device != NULL))
2005 		return(xptdevicetraverse(target,
2006 					(struct cam_ed *)cdm->pos.cookie.device,
2007 					 xptedtdevicefunc, arg));
2008 	else
2009 		return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2010 }
2011 
2012 static int
2013 xptedtdevicefunc(struct cam_ed *device, void *arg)
2014 {
2015 
2016 	struct ccb_dev_match *cdm;
2017 	dev_match_ret retval;
2018 
2019 	cdm = (struct ccb_dev_match *)arg;
2020 
2021 	/*
2022 	 * If our position is for something deeper in the tree, that means
2023 	 * that we've already seen this node.  So, we keep going down.
2024 	 */
2025 	if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2026 	 && (cdm->pos.cookie.device == device)
2027 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2028 	 && (cdm->pos.cookie.periph != NULL))
2029 		retval = DM_RET_DESCEND;
2030 	else
2031 		retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2032 					device);
2033 
2034 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2035 		cdm->status = CAM_DEV_MATCH_ERROR;
2036 		return(0);
2037 	}
2038 
2039 	/*
2040 	 * If the copy flag is set, copy this device out.
2041 	 */
2042 	if (retval & DM_RET_COPY) {
2043 		int spaceleft, j;
2044 
2045 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2046 			sizeof(struct dev_match_result));
2047 
2048 		/*
2049 		 * If we don't have enough space to put in another
2050 		 * match result, save our position and tell the
2051 		 * user there are more devices to check.
2052 		 */
2053 		if (spaceleft < sizeof(struct dev_match_result)) {
2054 			bzero(&cdm->pos, sizeof(cdm->pos));
2055 			cdm->pos.position_type =
2056 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2057 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2058 
2059 			cdm->pos.cookie.bus = device->target->bus;
2060 			cdm->pos.generations[CAM_BUS_GENERATION]=
2061 				bus_generation;
2062 			cdm->pos.cookie.target = device->target;
2063 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2064 				device->target->bus->generation;
2065 			cdm->pos.cookie.device = device;
2066 			cdm->pos.generations[CAM_DEV_GENERATION] =
2067 				device->target->generation;
2068 			cdm->status = CAM_DEV_MATCH_MORE;
2069 			return(0);
2070 		}
2071 		j = cdm->num_matches;
2072 		cdm->num_matches++;
2073 		cdm->matches[j].type = DEV_MATCH_DEVICE;
2074 		cdm->matches[j].result.device_result.path_id =
2075 			device->target->bus->path_id;
2076 		cdm->matches[j].result.device_result.target_id =
2077 			device->target->target_id;
2078 		cdm->matches[j].result.device_result.target_lun =
2079 			device->lun_id;
2080 		bcopy(&device->inq_data,
2081 		      &cdm->matches[j].result.device_result.inq_data,
2082 		      sizeof(struct scsi_inquiry_data));
2083 
2084 		/* Let the user know whether this device is unconfigured */
2085 		if (device->flags & CAM_DEV_UNCONFIGURED)
2086 			cdm->matches[j].result.device_result.flags =
2087 				DEV_RESULT_UNCONFIGURED;
2088 		else
2089 			cdm->matches[j].result.device_result.flags =
2090 				DEV_RESULT_NOFLAG;
2091 	}
2092 
2093 	/*
2094 	 * If the user isn't interested in peripherals, don't descend
2095 	 * the tree any further.
2096 	 */
2097 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2098 		return(1);
2099 
2100 	/*
2101 	 * If there is a peripheral list generation recorded, make sure
2102 	 * it hasn't changed.
2103 	 */
2104 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2105 	 && (device->target->bus == cdm->pos.cookie.bus)
2106 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2107 	 && (device->target == cdm->pos.cookie.target)
2108 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2109 	 && (device == cdm->pos.cookie.device)
2110 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2111 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2112 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2113 	     device->generation)){
2114 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2115 		return(0);
2116 	}
2117 
2118 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2119 	 && (cdm->pos.cookie.bus == device->target->bus)
2120 	 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2121 	 && (cdm->pos.cookie.target == device->target)
2122 	 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2123 	 && (cdm->pos.cookie.device == device)
2124 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2125 	 && (cdm->pos.cookie.periph != NULL))
2126 		return(xptperiphtraverse(device,
2127 				(struct cam_periph *)cdm->pos.cookie.periph,
2128 				xptedtperiphfunc, arg));
2129 	else
2130 		return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2131 }
2132 
2133 static int
2134 xptedtperiphfunc(struct cam_periph *periph, void *arg)
2135 {
2136 	struct ccb_dev_match *cdm;
2137 	dev_match_ret retval;
2138 
2139 	cdm = (struct ccb_dev_match *)arg;
2140 
2141 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2142 
2143 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2144 		cdm->status = CAM_DEV_MATCH_ERROR;
2145 		return(0);
2146 	}
2147 
2148 	/*
2149 	 * If the copy flag is set, copy this peripheral out.
2150 	 */
2151 	if (retval & DM_RET_COPY) {
2152 		int spaceleft, j;
2153 
2154 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2155 			sizeof(struct dev_match_result));
2156 
2157 		/*
2158 		 * If we don't have enough space to put in another
2159 		 * match result, save our position and tell the
2160 		 * user there are more devices to check.
2161 		 */
2162 		if (spaceleft < sizeof(struct dev_match_result)) {
2163 			bzero(&cdm->pos, sizeof(cdm->pos));
2164 			cdm->pos.position_type =
2165 				CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2166 				CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2167 				CAM_DEV_POS_PERIPH;
2168 
2169 			cdm->pos.cookie.bus = periph->path->bus;
2170 			cdm->pos.generations[CAM_BUS_GENERATION]=
2171 				bus_generation;
2172 			cdm->pos.cookie.target = periph->path->target;
2173 			cdm->pos.generations[CAM_TARGET_GENERATION] =
2174 				periph->path->bus->generation;
2175 			cdm->pos.cookie.device = periph->path->device;
2176 			cdm->pos.generations[CAM_DEV_GENERATION] =
2177 				periph->path->target->generation;
2178 			cdm->pos.cookie.periph = periph;
2179 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2180 				periph->path->device->generation;
2181 			cdm->status = CAM_DEV_MATCH_MORE;
2182 			return(0);
2183 		}
2184 
2185 		j = cdm->num_matches;
2186 		cdm->num_matches++;
2187 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2188 		cdm->matches[j].result.periph_result.path_id =
2189 			periph->path->bus->path_id;
2190 		cdm->matches[j].result.periph_result.target_id =
2191 			periph->path->target->target_id;
2192 		cdm->matches[j].result.periph_result.target_lun =
2193 			periph->path->device->lun_id;
2194 		cdm->matches[j].result.periph_result.unit_number =
2195 			periph->unit_number;
2196 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2197 			periph->periph_name, DEV_IDLEN);
2198 	}
2199 
2200 	return(1);
2201 }
2202 
2203 static int
2204 xptedtmatch(struct ccb_dev_match *cdm)
2205 {
2206 	int ret;
2207 
2208 	cdm->num_matches = 0;
2209 
2210 	/*
2211 	 * Check the bus list generation.  If it has changed, the user
2212 	 * needs to reset everything and start over.
2213 	 */
2214 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2215 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2216 	 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) {
2217 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2218 		return(0);
2219 	}
2220 
2221 	if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2222 	 && (cdm->pos.cookie.bus != NULL))
2223 		ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2224 				     xptedtbusfunc, cdm);
2225 	else
2226 		ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2227 
2228 	/*
2229 	 * If we get back 0, that means that we had to stop before fully
2230 	 * traversing the EDT.  It also means that one of the subroutines
2231 	 * has set the status field to the proper value.  If we get back 1,
2232 	 * we've fully traversed the EDT and copied out any matching entries.
2233 	 */
2234 	if (ret == 1)
2235 		cdm->status = CAM_DEV_MATCH_LAST;
2236 
2237 	return(ret);
2238 }
2239 
2240 static int
2241 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2242 {
2243 	struct ccb_dev_match *cdm;
2244 
2245 	cdm = (struct ccb_dev_match *)arg;
2246 
2247 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2248 	 && (cdm->pos.cookie.pdrv == pdrv)
2249 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2250 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2251 	 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2252 	     (*pdrv)->generation)) {
2253 		cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2254 		return(0);
2255 	}
2256 
2257 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2258 	 && (cdm->pos.cookie.pdrv == pdrv)
2259 	 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2260 	 && (cdm->pos.cookie.periph != NULL))
2261 		return(xptpdperiphtraverse(pdrv,
2262 				(struct cam_periph *)cdm->pos.cookie.periph,
2263 				xptplistperiphfunc, arg));
2264 	else
2265 		return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2266 }
2267 
2268 static int
2269 xptplistperiphfunc(struct cam_periph *periph, void *arg)
2270 {
2271 	struct ccb_dev_match *cdm;
2272 	dev_match_ret retval;
2273 
2274 	cdm = (struct ccb_dev_match *)arg;
2275 
2276 	retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2277 
2278 	if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2279 		cdm->status = CAM_DEV_MATCH_ERROR;
2280 		return(0);
2281 	}
2282 
2283 	/*
2284 	 * If the copy flag is set, copy this peripheral out.
2285 	 */
2286 	if (retval & DM_RET_COPY) {
2287 		int spaceleft, j;
2288 
2289 		spaceleft = cdm->match_buf_len - (cdm->num_matches *
2290 			sizeof(struct dev_match_result));
2291 
2292 		/*
2293 		 * If we don't have enough space to put in another
2294 		 * match result, save our position and tell the
2295 		 * user there are more devices to check.
2296 		 */
2297 		if (spaceleft < sizeof(struct dev_match_result)) {
2298 			struct periph_driver **pdrv;
2299 
2300 			pdrv = NULL;
2301 			bzero(&cdm->pos, sizeof(cdm->pos));
2302 			cdm->pos.position_type =
2303 				CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2304 				CAM_DEV_POS_PERIPH;
2305 
2306 			/*
2307 			 * This may look a bit non-sensical, but it is
2308 			 * actually quite logical.  There are very few
2309 			 * peripheral drivers, and bloating every peripheral
2310 			 * structure with a pointer back to its parent
2311 			 * peripheral driver linker set entry would cost
2312 			 * more in the long run than doing this quick lookup.
2313 			 */
2314 			SET_FOREACH(pdrv, periphdriver_set) {
2315 				if (strcmp((*pdrv)->driver_name,
2316 				    periph->periph_name) == 0)
2317 					break;
2318 			}
2319 
2320 			if (*pdrv == NULL) {
2321 				cdm->status = CAM_DEV_MATCH_ERROR;
2322 				return(0);
2323 			}
2324 
2325 			cdm->pos.cookie.pdrv = pdrv;
2326 			/*
2327 			 * The periph generation slot does double duty, as
2328 			 * does the periph pointer slot.  They are used for
2329 			 * both edt and pdrv lookups and positioning.
2330 			 */
2331 			cdm->pos.cookie.periph = periph;
2332 			cdm->pos.generations[CAM_PERIPH_GENERATION] =
2333 				(*pdrv)->generation;
2334 			cdm->status = CAM_DEV_MATCH_MORE;
2335 			return(0);
2336 		}
2337 
2338 		j = cdm->num_matches;
2339 		cdm->num_matches++;
2340 		cdm->matches[j].type = DEV_MATCH_PERIPH;
2341 		cdm->matches[j].result.periph_result.path_id =
2342 			periph->path->bus->path_id;
2343 
2344 		/*
2345 		 * The transport layer peripheral doesn't have a target or
2346 		 * lun.
2347 		 */
2348 		if (periph->path->target)
2349 			cdm->matches[j].result.periph_result.target_id =
2350 				periph->path->target->target_id;
2351 		else
2352 			cdm->matches[j].result.periph_result.target_id = -1;
2353 
2354 		if (periph->path->device)
2355 			cdm->matches[j].result.periph_result.target_lun =
2356 				periph->path->device->lun_id;
2357 		else
2358 			cdm->matches[j].result.periph_result.target_lun = -1;
2359 
2360 		cdm->matches[j].result.periph_result.unit_number =
2361 			periph->unit_number;
2362 		strncpy(cdm->matches[j].result.periph_result.periph_name,
2363 			periph->periph_name, DEV_IDLEN);
2364 	}
2365 
2366 	return(1);
2367 }
2368 
2369 static int
2370 xptperiphlistmatch(struct ccb_dev_match *cdm)
2371 {
2372 	int ret;
2373 
2374 	cdm->num_matches = 0;
2375 
2376 	/*
2377 	 * At this point in the edt traversal function, we check the bus
2378 	 * list generation to make sure that no busses have been added or
2379 	 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2380 	 * For the peripheral driver list traversal function, however, we
2381 	 * don't have to worry about new peripheral driver types coming or
2382 	 * going; they're in a linker set, and therefore can't change
2383 	 * without a recompile.
2384 	 */
2385 
2386 	if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2387 	 && (cdm->pos.cookie.pdrv != NULL))
2388 		ret = xptpdrvtraverse(
2389 				(struct periph_driver **)cdm->pos.cookie.pdrv,
2390 				xptplistpdrvfunc, cdm);
2391 	else
2392 		ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2393 
2394 	/*
2395 	 * If we get back 0, that means that we had to stop before fully
2396 	 * traversing the peripheral driver tree.  It also means that one of
2397 	 * the subroutines has set the status field to the proper value.  If
2398 	 * we get back 1, we've fully traversed the EDT and copied out any
2399 	 * matching entries.
2400 	 */
2401 	if (ret == 1)
2402 		cdm->status = CAM_DEV_MATCH_LAST;
2403 
2404 	return(ret);
2405 }
2406 
2407 static int
2408 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2409 {
2410 	struct cam_eb *bus, *next_bus;
2411 	int retval;
2412 
2413 	retval = 1;
2414 
2415 	for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses));
2416 	     bus != NULL;
2417 	     bus = next_bus) {
2418 		next_bus = TAILQ_NEXT(bus, links);
2419 
2420 		retval = tr_func(bus, arg);
2421 		if (retval == 0)
2422 			return(retval);
2423 	}
2424 
2425 	return(retval);
2426 }
2427 
2428 static int
2429 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2430 		  xpt_targetfunc_t *tr_func, void *arg)
2431 {
2432 	struct cam_et *target, *next_target;
2433 	int retval;
2434 
2435 	retval = 1;
2436 	for (target = (start_target ? start_target :
2437 		       TAILQ_FIRST(&bus->et_entries));
2438 	     target != NULL; target = next_target) {
2439 
2440 		next_target = TAILQ_NEXT(target, links);
2441 
2442 		retval = tr_func(target, arg);
2443 
2444 		if (retval == 0)
2445 			return(retval);
2446 	}
2447 
2448 	return(retval);
2449 }
2450 
2451 static int
2452 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2453 		  xpt_devicefunc_t *tr_func, void *arg)
2454 {
2455 	struct cam_ed *device, *next_device;
2456 	int retval;
2457 
2458 	retval = 1;
2459 	for (device = (start_device ? start_device :
2460 		       TAILQ_FIRST(&target->ed_entries));
2461 	     device != NULL;
2462 	     device = next_device) {
2463 
2464 		next_device = TAILQ_NEXT(device, links);
2465 
2466 		retval = tr_func(device, arg);
2467 
2468 		if (retval == 0)
2469 			return(retval);
2470 	}
2471 
2472 	return(retval);
2473 }
2474 
2475 static int
2476 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2477 		  xpt_periphfunc_t *tr_func, void *arg)
2478 {
2479 	struct cam_periph *periph, *next_periph;
2480 	int retval;
2481 
2482 	retval = 1;
2483 
2484 	for (periph = (start_periph ? start_periph :
2485 		       SLIST_FIRST(&device->periphs));
2486 	     periph != NULL;
2487 	     periph = next_periph) {
2488 
2489 		next_periph = SLIST_NEXT(periph, periph_links);
2490 
2491 		retval = tr_func(periph, arg);
2492 		if (retval == 0)
2493 			return(retval);
2494 	}
2495 
2496 	return(retval);
2497 }
2498 
2499 static int
2500 xptpdrvtraverse(struct periph_driver **start_pdrv,
2501 		xpt_pdrvfunc_t *tr_func, void *arg)
2502 {
2503 	struct periph_driver **pdrv;
2504 	int retval;
2505 
2506 	retval = 1;
2507 
2508 	/*
2509 	 * We don't traverse the peripheral driver list like we do the
2510 	 * other lists, because it is a linker set, and therefore cannot be
2511 	 * changed during runtime.  If the peripheral driver list is ever
2512 	 * re-done to be something other than a linker set (i.e. it can
2513 	 * change while the system is running), the list traversal should
2514 	 * be modified to work like the other traversal functions.
2515 	 */
2516 	SET_FOREACH(pdrv, periphdriver_set) {
2517 		if (start_pdrv == NULL || start_pdrv == pdrv) {
2518 			retval = tr_func(pdrv, arg);
2519 			if (retval == 0)
2520 				return(retval);
2521 			start_pdrv = NULL; /* traverse remainder */
2522 		}
2523 	}
2524 	return(retval);
2525 }
2526 
2527 static int
2528 xptpdperiphtraverse(struct periph_driver **pdrv,
2529 		    struct cam_periph *start_periph,
2530 		    xpt_periphfunc_t *tr_func, void *arg)
2531 {
2532 	struct cam_periph *periph, *next_periph;
2533 	int retval;
2534 
2535 	retval = 1;
2536 
2537 	for (periph = (start_periph ? start_periph :
2538 	     TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2539 	     periph = next_periph) {
2540 
2541 		next_periph = TAILQ_NEXT(periph, unit_links);
2542 
2543 		retval = tr_func(periph, arg);
2544 		if (retval == 0)
2545 			return(retval);
2546 	}
2547 	return(retval);
2548 }
2549 
2550 static int
2551 xptdefbusfunc(struct cam_eb *bus, void *arg)
2552 {
2553 	struct xpt_traverse_config *tr_config;
2554 
2555 	tr_config = (struct xpt_traverse_config *)arg;
2556 
2557 	if (tr_config->depth == XPT_DEPTH_BUS) {
2558 		xpt_busfunc_t *tr_func;
2559 
2560 		tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2561 
2562 		return(tr_func(bus, tr_config->tr_arg));
2563 	} else
2564 		return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2565 }
2566 
2567 static int
2568 xptdeftargetfunc(struct cam_et *target, void *arg)
2569 {
2570 	struct xpt_traverse_config *tr_config;
2571 
2572 	tr_config = (struct xpt_traverse_config *)arg;
2573 
2574 	if (tr_config->depth == XPT_DEPTH_TARGET) {
2575 		xpt_targetfunc_t *tr_func;
2576 
2577 		tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2578 
2579 		return(tr_func(target, tr_config->tr_arg));
2580 	} else
2581 		return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2582 }
2583 
2584 static int
2585 xptdefdevicefunc(struct cam_ed *device, void *arg)
2586 {
2587 	struct xpt_traverse_config *tr_config;
2588 
2589 	tr_config = (struct xpt_traverse_config *)arg;
2590 
2591 	if (tr_config->depth == XPT_DEPTH_DEVICE) {
2592 		xpt_devicefunc_t *tr_func;
2593 
2594 		tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2595 
2596 		return(tr_func(device, tr_config->tr_arg));
2597 	} else
2598 		return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2599 }
2600 
2601 static int
2602 xptdefperiphfunc(struct cam_periph *periph, void *arg)
2603 {
2604 	struct xpt_traverse_config *tr_config;
2605 	xpt_periphfunc_t *tr_func;
2606 
2607 	tr_config = (struct xpt_traverse_config *)arg;
2608 
2609 	tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2610 
2611 	/*
2612 	 * Unlike the other default functions, we don't check for depth
2613 	 * here.  The peripheral driver level is the last level in the EDT,
2614 	 * so if we're here, we should execute the function in question.
2615 	 */
2616 	return(tr_func(periph, tr_config->tr_arg));
2617 }
2618 
2619 /*
2620  * Execute the given function for every bus in the EDT.
2621  */
2622 static int
2623 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2624 {
2625 	struct xpt_traverse_config tr_config;
2626 
2627 	tr_config.depth = XPT_DEPTH_BUS;
2628 	tr_config.tr_func = tr_func;
2629 	tr_config.tr_arg = arg;
2630 
2631 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2632 }
2633 
2634 #ifdef notusedyet
2635 /*
2636  * Execute the given function for every target in the EDT.
2637  */
2638 static int
2639 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg)
2640 {
2641 	struct xpt_traverse_config tr_config;
2642 
2643 	tr_config.depth = XPT_DEPTH_TARGET;
2644 	tr_config.tr_func = tr_func;
2645 	tr_config.tr_arg = arg;
2646 
2647 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2648 }
2649 #endif /* notusedyet */
2650 
2651 /*
2652  * Execute the given function for every device in the EDT.
2653  */
2654 static int
2655 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2656 {
2657 	struct xpt_traverse_config tr_config;
2658 
2659 	tr_config.depth = XPT_DEPTH_DEVICE;
2660 	tr_config.tr_func = tr_func;
2661 	tr_config.tr_arg = arg;
2662 
2663 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2664 }
2665 
2666 #ifdef notusedyet
2667 /*
2668  * Execute the given function for every peripheral in the EDT.
2669  */
2670 static int
2671 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg)
2672 {
2673 	struct xpt_traverse_config tr_config;
2674 
2675 	tr_config.depth = XPT_DEPTH_PERIPH;
2676 	tr_config.tr_func = tr_func;
2677 	tr_config.tr_arg = arg;
2678 
2679 	return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2680 }
2681 #endif /* notusedyet */
2682 
2683 static int
2684 xptsetasyncfunc(struct cam_ed *device, void *arg)
2685 {
2686 	struct cam_path path;
2687 	struct ccb_getdev cgd;
2688 	struct async_node *cur_entry;
2689 
2690 	cur_entry = (struct async_node *)arg;
2691 
2692 	/*
2693 	 * Don't report unconfigured devices (Wildcard devs,
2694 	 * devices only for target mode, device instances
2695 	 * that have been invalidated but are waiting for
2696 	 * their last reference count to be released).
2697 	 */
2698 	if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2699 		return (1);
2700 
2701 	xpt_compile_path(&path,
2702 			 NULL,
2703 			 device->target->bus->path_id,
2704 			 device->target->target_id,
2705 			 device->lun_id);
2706 	xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2707 	cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2708 	xpt_action((union ccb *)&cgd);
2709 	cur_entry->callback(cur_entry->callback_arg,
2710 			    AC_FOUND_DEVICE,
2711 			    &path, &cgd);
2712 	xpt_release_path(&path);
2713 
2714 	return(1);
2715 }
2716 
2717 static int
2718 xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2719 {
2720 	struct cam_path path;
2721 	struct ccb_pathinq cpi;
2722 	struct async_node *cur_entry;
2723 
2724 	cur_entry = (struct async_node *)arg;
2725 
2726 	xpt_compile_path(&path, /*periph*/NULL,
2727 			 bus->sim->path_id,
2728 			 CAM_TARGET_WILDCARD,
2729 			 CAM_LUN_WILDCARD);
2730 	xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2731 	cpi.ccb_h.func_code = XPT_PATH_INQ;
2732 	xpt_action((union ccb *)&cpi);
2733 	cur_entry->callback(cur_entry->callback_arg,
2734 			    AC_PATH_REGISTERED,
2735 			    &path, &cpi);
2736 	xpt_release_path(&path);
2737 
2738 	return(1);
2739 }
2740 
2741 void
2742 xpt_action(union ccb *start_ccb)
2743 {
2744 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2745 
2746 	start_ccb->ccb_h.status = CAM_REQ_INPROG;
2747 
2748 	crit_enter();
2749 
2750 	switch (start_ccb->ccb_h.func_code) {
2751 	case XPT_SCSI_IO:
2752 	{
2753 #ifdef CAMDEBUG
2754 		char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2755 		struct cam_path *path;
2756 
2757 		path = start_ccb->ccb_h.path;
2758 #endif
2759 
2760 		/*
2761 		 * For the sake of compatibility with SCSI-1
2762 		 * devices that may not understand the identify
2763 		 * message, we include lun information in the
2764 		 * second byte of all commands.  SCSI-1 specifies
2765 		 * that luns are a 3 bit value and reserves only 3
2766 		 * bits for lun information in the CDB.  Later
2767 		 * revisions of the SCSI spec allow for more than 8
2768 		 * luns, but have deprecated lun information in the
2769 		 * CDB.  So, if the lun won't fit, we must omit.
2770 		 *
2771 		 * Also be aware that during initial probing for devices,
2772 		 * the inquiry information is unknown but initialized to 0.
2773 		 * This means that this code will be exercised while probing
2774 		 * devices with an ANSI revision greater than 2.
2775 		 */
2776 		if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2
2777 		 && start_ccb->ccb_h.target_lun < 8
2778 		 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2779 
2780 			start_ccb->csio.cdb_io.cdb_bytes[1] |=
2781 			    start_ccb->ccb_h.target_lun << 5;
2782 		}
2783 		start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2784 		CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
2785 			  scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
2786 			  	       &path->device->inq_data),
2787 			  scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
2788 					  cdb_str, sizeof(cdb_str))));
2789 		/* FALLTHROUGH */
2790 	}
2791 	case XPT_TARGET_IO:
2792 	case XPT_CONT_TARGET_IO:
2793 		start_ccb->csio.sense_resid = 0;
2794 		start_ccb->csio.resid = 0;
2795 		/* FALLTHROUGH */
2796 	case XPT_RESET_DEV:
2797 	case XPT_ENG_EXEC:
2798 	{
2799 		struct cam_path *path;
2800 		int runq;
2801 
2802 		path = start_ccb->ccb_h.path;
2803 
2804 		cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
2805 		if (path->device->qfrozen_cnt == 0)
2806 			runq = xpt_schedule_dev_sendq(path->bus, path->device);
2807 		else
2808 			runq = 0;
2809 		if (runq != 0)
2810 			xpt_run_dev_sendq(path->bus);
2811 		break;
2812 	}
2813 	case XPT_SET_TRAN_SETTINGS:
2814 	{
2815 		xpt_set_transfer_settings(&start_ccb->cts,
2816 					  start_ccb->ccb_h.path->device,
2817 					  /*async_update*/FALSE);
2818 		break;
2819 	}
2820 	case XPT_CALC_GEOMETRY:
2821 	{
2822 		struct cam_sim *sim;
2823 
2824 		/* Filter out garbage */
2825 		if (start_ccb->ccg.block_size == 0
2826 		 || start_ccb->ccg.volume_size == 0) {
2827 			start_ccb->ccg.cylinders = 0;
2828 			start_ccb->ccg.heads = 0;
2829 			start_ccb->ccg.secs_per_track = 0;
2830 			start_ccb->ccb_h.status = CAM_REQ_CMP;
2831 			break;
2832 		}
2833 		sim = start_ccb->ccb_h.path->bus->sim;
2834 		(*(sim->sim_action))(sim, start_ccb);
2835 		break;
2836 	}
2837 	case XPT_ABORT:
2838 	{
2839 		union ccb* abort_ccb;
2840 
2841 		abort_ccb = start_ccb->cab.abort_ccb;
2842 		if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
2843 
2844 			if (abort_ccb->ccb_h.pinfo.index >= 0) {
2845 				struct cam_ccbq *ccbq;
2846 
2847 				ccbq = &abort_ccb->ccb_h.path->device->ccbq;
2848 				cam_ccbq_remove_ccb(ccbq, abort_ccb);
2849 				abort_ccb->ccb_h.status =
2850 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2851 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2852 				xpt_done(abort_ccb);
2853 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2854 				break;
2855 			}
2856 			if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
2857 			 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
2858 				/*
2859 				 * We've caught this ccb en route to
2860 				 * the SIM.  Flag it for abort and the
2861 				 * SIM will do so just before starting
2862 				 * real work on the CCB.
2863 				 */
2864 				abort_ccb->ccb_h.status =
2865 				    CAM_REQ_ABORTED|CAM_DEV_QFRZN;
2866 				xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
2867 				start_ccb->ccb_h.status = CAM_REQ_CMP;
2868 				break;
2869 			}
2870 		}
2871 		if (XPT_FC_IS_QUEUED(abort_ccb)
2872 		 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
2873 			/*
2874 			 * It's already completed but waiting
2875 			 * for our SWI to get to it.
2876 			 */
2877 			start_ccb->ccb_h.status = CAM_UA_ABORT;
2878 			break;
2879 		}
2880 		/*
2881 		 * If we weren't able to take care of the abort request
2882 		 * in the XPT, pass the request down to the SIM for processing.
2883 		 */
2884 		/* FALLTHROUGH */
2885 	}
2886 	case XPT_ACCEPT_TARGET_IO:
2887 	case XPT_EN_LUN:
2888 	case XPT_IMMED_NOTIFY:
2889 	case XPT_NOTIFY_ACK:
2890 	case XPT_GET_TRAN_SETTINGS:
2891 	case XPT_RESET_BUS:
2892 	{
2893 		struct cam_sim *sim;
2894 
2895 		sim = start_ccb->ccb_h.path->bus->sim;
2896 		(*(sim->sim_action))(sim, start_ccb);
2897 		break;
2898 	}
2899 	case XPT_PATH_INQ:
2900 	{
2901 		struct cam_sim *sim;
2902 
2903 		sim = start_ccb->ccb_h.path->bus->sim;
2904 		(*(sim->sim_action))(sim, start_ccb);
2905 		break;
2906 	}
2907 	case XPT_PATH_STATS:
2908 		start_ccb->cpis.last_reset =
2909 			start_ccb->ccb_h.path->bus->last_reset;
2910 		start_ccb->ccb_h.status = CAM_REQ_CMP;
2911 		break;
2912 	case XPT_GDEV_TYPE:
2913 	{
2914 		struct cam_ed *dev;
2915 
2916 		dev = start_ccb->ccb_h.path->device;
2917 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2918 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2919 		} else {
2920 			struct ccb_getdev *cgd;
2921 			struct cam_eb *bus;
2922 			struct cam_et *tar;
2923 
2924 			cgd = &start_ccb->cgd;
2925 			bus = cgd->ccb_h.path->bus;
2926 			tar = cgd->ccb_h.path->target;
2927 			cgd->inq_data = dev->inq_data;
2928 			cgd->ccb_h.status = CAM_REQ_CMP;
2929 			cgd->serial_num_len = dev->serial_num_len;
2930 			if ((dev->serial_num_len > 0)
2931 			 && (dev->serial_num != NULL))
2932 				bcopy(dev->serial_num, cgd->serial_num,
2933 				      dev->serial_num_len);
2934 		}
2935 		break;
2936 	}
2937 	case XPT_GDEV_STATS:
2938 	{
2939 		struct cam_ed *dev;
2940 
2941 		dev = start_ccb->ccb_h.path->device;
2942 		if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
2943 			start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2944 		} else {
2945 			struct ccb_getdevstats *cgds;
2946 			struct cam_eb *bus;
2947 			struct cam_et *tar;
2948 
2949 			cgds = &start_ccb->cgds;
2950 			bus = cgds->ccb_h.path->bus;
2951 			tar = cgds->ccb_h.path->target;
2952 			cgds->dev_openings = dev->ccbq.dev_openings;
2953 			cgds->dev_active = dev->ccbq.dev_active;
2954 			cgds->devq_openings = dev->ccbq.devq_openings;
2955 			cgds->devq_queued = dev->ccbq.queue.entries;
2956 			cgds->held = dev->ccbq.held;
2957 			cgds->last_reset = tar->last_reset;
2958 			cgds->maxtags = dev->quirk->maxtags;
2959 			cgds->mintags = dev->quirk->mintags;
2960 			if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
2961 				cgds->last_reset = bus->last_reset;
2962 			cgds->ccb_h.status = CAM_REQ_CMP;
2963 		}
2964 		break;
2965 	}
2966 	case XPT_GDEVLIST:
2967 	{
2968 		struct cam_periph	*nperiph;
2969 		struct periph_list	*periph_head;
2970 		struct ccb_getdevlist	*cgdl;
2971 		int			i;
2972 		struct cam_ed		*device;
2973 		int			found;
2974 
2975 
2976 		found = 0;
2977 
2978 		/*
2979 		 * Don't want anyone mucking with our data.
2980 		 */
2981 		device = start_ccb->ccb_h.path->device;
2982 		periph_head = &device->periphs;
2983 		cgdl = &start_ccb->cgdl;
2984 
2985 		/*
2986 		 * Check and see if the list has changed since the user
2987 		 * last requested a list member.  If so, tell them that the
2988 		 * list has changed, and therefore they need to start over
2989 		 * from the beginning.
2990 		 */
2991 		if ((cgdl->index != 0) &&
2992 		    (cgdl->generation != device->generation)) {
2993 			cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
2994 			break;
2995 		}
2996 
2997 		/*
2998 		 * Traverse the list of peripherals and attempt to find
2999 		 * the requested peripheral.
3000 		 */
3001 		for (nperiph = periph_head->slh_first, i = 0;
3002 		     (nperiph != NULL) && (i <= cgdl->index);
3003 		     nperiph = nperiph->periph_links.sle_next, i++) {
3004 			if (i == cgdl->index) {
3005 				strncpy(cgdl->periph_name,
3006 					nperiph->periph_name,
3007 					DEV_IDLEN);
3008 				cgdl->unit_number = nperiph->unit_number;
3009 				found = 1;
3010 			}
3011 		}
3012 		if (found == 0) {
3013 			cgdl->status = CAM_GDEVLIST_ERROR;
3014 			break;
3015 		}
3016 
3017 		if (nperiph == NULL)
3018 			cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3019 		else
3020 			cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3021 
3022 		cgdl->index++;
3023 		cgdl->generation = device->generation;
3024 
3025 		cgdl->ccb_h.status = CAM_REQ_CMP;
3026 		break;
3027 	}
3028 	case XPT_DEV_MATCH:
3029 	{
3030 		dev_pos_type position_type;
3031 		struct ccb_dev_match *cdm;
3032 		int ret;
3033 
3034 		cdm = &start_ccb->cdm;
3035 
3036 		/*
3037 		 * Prevent EDT changes while we traverse it.
3038 		 */
3039 		/*
3040 		 * There are two ways of getting at information in the EDT.
3041 		 * The first way is via the primary EDT tree.  It starts
3042 		 * with a list of busses, then a list of targets on a bus,
3043 		 * then devices/luns on a target, and then peripherals on a
3044 		 * device/lun.  The "other" way is by the peripheral driver
3045 		 * lists.  The peripheral driver lists are organized by
3046 		 * peripheral driver.  (obviously)  So it makes sense to
3047 		 * use the peripheral driver list if the user is looking
3048 		 * for something like "da1", or all "da" devices.  If the
3049 		 * user is looking for something on a particular bus/target
3050 		 * or lun, it's generally better to go through the EDT tree.
3051 		 */
3052 
3053 		if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3054 			position_type = cdm->pos.position_type;
3055 		else {
3056 			int i;
3057 
3058 			position_type = CAM_DEV_POS_NONE;
3059 
3060 			for (i = 0; i < cdm->num_patterns; i++) {
3061 				if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3062 				 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3063 					position_type = CAM_DEV_POS_EDT;
3064 					break;
3065 				}
3066 			}
3067 
3068 			if (cdm->num_patterns == 0)
3069 				position_type = CAM_DEV_POS_EDT;
3070 			else if (position_type == CAM_DEV_POS_NONE)
3071 				position_type = CAM_DEV_POS_PDRV;
3072 		}
3073 
3074 		switch(position_type & CAM_DEV_POS_TYPEMASK) {
3075 		case CAM_DEV_POS_EDT:
3076 			ret = xptedtmatch(cdm);
3077 			break;
3078 		case CAM_DEV_POS_PDRV:
3079 			ret = xptperiphlistmatch(cdm);
3080 			break;
3081 		default:
3082 			cdm->status = CAM_DEV_MATCH_ERROR;
3083 			break;
3084 		}
3085 
3086 		if (cdm->status == CAM_DEV_MATCH_ERROR)
3087 			start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3088 		else
3089 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3090 
3091 		break;
3092 	}
3093 	case XPT_SASYNC_CB:
3094 	{
3095 		struct ccb_setasync *csa;
3096 		struct async_node *cur_entry;
3097 		struct async_list *async_head;
3098 		u_int32_t added;
3099 
3100 		csa = &start_ccb->csa;
3101 		added = csa->event_enable;
3102 		async_head = &csa->ccb_h.path->device->asyncs;
3103 
3104 		/*
3105 		 * If there is already an entry for us, simply
3106 		 * update it.
3107 		 */
3108 		cur_entry = SLIST_FIRST(async_head);
3109 		while (cur_entry != NULL) {
3110 			if ((cur_entry->callback_arg == csa->callback_arg)
3111 			 && (cur_entry->callback == csa->callback))
3112 				break;
3113 			cur_entry = SLIST_NEXT(cur_entry, links);
3114 		}
3115 
3116 		if (cur_entry != NULL) {
3117 		 	/*
3118 			 * If the request has no flags set,
3119 			 * remove the entry.
3120 			 */
3121 			added &= ~cur_entry->event_enable;
3122 			if (csa->event_enable == 0) {
3123 				SLIST_REMOVE(async_head, cur_entry,
3124 					     async_node, links);
3125 				csa->ccb_h.path->device->refcount--;
3126 				kfree(cur_entry, M_DEVBUF);
3127 			} else {
3128 				cur_entry->event_enable = csa->event_enable;
3129 			}
3130 		} else {
3131 			cur_entry = kmalloc(sizeof(*cur_entry),
3132 					    M_DEVBUF, M_INTWAIT);
3133 			cur_entry->event_enable = csa->event_enable;
3134 			cur_entry->callback_arg = csa->callback_arg;
3135 			cur_entry->callback = csa->callback;
3136 			SLIST_INSERT_HEAD(async_head, cur_entry, links);
3137 			csa->ccb_h.path->device->refcount++;
3138 		}
3139 
3140 		if ((added & AC_FOUND_DEVICE) != 0) {
3141 			/*
3142 			 * Get this peripheral up to date with all
3143 			 * the currently existing devices.
3144 			 */
3145 			xpt_for_all_devices(xptsetasyncfunc, cur_entry);
3146 		}
3147 		if ((added & AC_PATH_REGISTERED) != 0) {
3148 			/*
3149 			 * Get this peripheral up to date with all
3150 			 * the currently existing busses.
3151 			 */
3152 			xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
3153 		}
3154 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3155 		break;
3156 	}
3157 	case XPT_REL_SIMQ:
3158 	{
3159 		struct ccb_relsim *crs;
3160 		struct cam_ed *dev;
3161 
3162 		crs = &start_ccb->crs;
3163 		dev = crs->ccb_h.path->device;
3164 		if (dev == NULL) {
3165 
3166 			crs->ccb_h.status = CAM_DEV_NOT_THERE;
3167 			break;
3168 		}
3169 
3170 		if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3171 
3172  			if ((dev->inq_data.flags & SID_CmdQue) != 0) {
3173 
3174 				/* Don't ever go below one opening */
3175 				if (crs->openings > 0) {
3176 					xpt_dev_ccbq_resize(crs->ccb_h.path,
3177 							    crs->openings);
3178 
3179 					if (bootverbose) {
3180 						xpt_print_path(crs->ccb_h.path);
3181 						kprintf("tagged openings "
3182 						       "now %d\n",
3183 						       crs->openings);
3184 					}
3185 				}
3186 			}
3187 		}
3188 
3189 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3190 
3191 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3192 
3193 				/*
3194 				 * Just extend the old timeout and decrement
3195 				 * the freeze count so that a single timeout
3196 				 * is sufficient for releasing the queue.
3197 				 */
3198 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3199 				callout_stop(&dev->c_handle);
3200 			} else {
3201 
3202 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3203 			}
3204 
3205 			callout_reset(&dev->c_handle,
3206 				      (crs->release_timeout * hz) / 1000,
3207 				      xpt_release_devq_timeout, dev);
3208 
3209 			dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3210 
3211 		}
3212 
3213 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3214 
3215 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3216 				/*
3217 				 * Decrement the freeze count so that a single
3218 				 * completion is still sufficient to unfreeze
3219 				 * the queue.
3220 				 */
3221 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3222 			} else {
3223 
3224 				dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3225 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3226 			}
3227 		}
3228 
3229 		if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3230 
3231 			if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3232 			 || (dev->ccbq.dev_active == 0)) {
3233 
3234 				start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3235 			} else {
3236 
3237 				dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3238 				start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3239 			}
3240 		}
3241 
3242 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3243 
3244 			xpt_release_devq(crs->ccb_h.path, /*count*/1,
3245 					 /*run_queue*/TRUE);
3246 		}
3247 		start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3248 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3249 		break;
3250 	}
3251 	case XPT_SCAN_BUS:
3252 		xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3253 		break;
3254 	case XPT_SCAN_LUN:
3255 		xpt_scan_lun(start_ccb->ccb_h.path->periph,
3256 			     start_ccb->ccb_h.path, start_ccb->crcn.flags,
3257 			     start_ccb);
3258 		break;
3259 	case XPT_DEBUG: {
3260 #ifdef CAMDEBUG
3261 #ifdef CAM_DEBUG_DELAY
3262 		cam_debug_delay = CAM_DEBUG_DELAY;
3263 #endif
3264 		cam_dflags = start_ccb->cdbg.flags;
3265 		if (cam_dpath != NULL) {
3266 			xpt_free_path(cam_dpath);
3267 			cam_dpath = NULL;
3268 		}
3269 
3270 		if (cam_dflags != CAM_DEBUG_NONE) {
3271 			if (xpt_create_path(&cam_dpath, xpt_periph,
3272 					    start_ccb->ccb_h.path_id,
3273 					    start_ccb->ccb_h.target_id,
3274 					    start_ccb->ccb_h.target_lun) !=
3275 					    CAM_REQ_CMP) {
3276 				start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3277 				cam_dflags = CAM_DEBUG_NONE;
3278 			} else {
3279 				start_ccb->ccb_h.status = CAM_REQ_CMP;
3280 				xpt_print_path(cam_dpath);
3281 				kprintf("debugging flags now %x\n", cam_dflags);
3282 			}
3283 		} else {
3284 			cam_dpath = NULL;
3285 			start_ccb->ccb_h.status = CAM_REQ_CMP;
3286 		}
3287 #else /* !CAMDEBUG */
3288 		start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3289 #endif /* CAMDEBUG */
3290 		break;
3291 	}
3292 	case XPT_NOOP:
3293 		if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3294 			xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3295 		start_ccb->ccb_h.status = CAM_REQ_CMP;
3296 		break;
3297 	default:
3298 	case XPT_SDEV_TYPE:
3299 	case XPT_TERM_IO:
3300 	case XPT_ENG_INQ:
3301 		/* XXX Implement */
3302 		start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3303 		break;
3304 	}
3305 	crit_exit();
3306 }
3307 
3308 void
3309 xpt_polled_action(union ccb *start_ccb)
3310 {
3311 	u_int32_t timeout;
3312 	struct	  cam_sim *sim;
3313 	struct	  cam_devq *devq;
3314 	struct	  cam_ed *dev;
3315 
3316 	timeout = start_ccb->ccb_h.timeout;
3317 	sim = start_ccb->ccb_h.path->bus->sim;
3318 	devq = sim->devq;
3319 	dev = start_ccb->ccb_h.path->device;
3320 
3321 	crit_enter();
3322 
3323 	/*
3324 	 * Steal an opening so that no other queued requests
3325 	 * can get it before us while we simulate interrupts.
3326 	 */
3327 	dev->ccbq.devq_openings--;
3328 	dev->ccbq.dev_openings--;
3329 
3330 	while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3331 	   && (--timeout > 0)) {
3332 		DELAY(1000);
3333 		(*(sim->sim_poll))(sim);
3334 		swi_camnet(NULL, NULL);
3335 		swi_cambio(NULL, NULL);
3336 	}
3337 
3338 	dev->ccbq.devq_openings++;
3339 	dev->ccbq.dev_openings++;
3340 
3341 	if (timeout != 0) {
3342 		xpt_action(start_ccb);
3343 		while(--timeout > 0) {
3344 			(*(sim->sim_poll))(sim);
3345 			swi_camnet(NULL, NULL);
3346 			swi_cambio(NULL, NULL);
3347 			if ((start_ccb->ccb_h.status  & CAM_STATUS_MASK)
3348 			    != CAM_REQ_INPROG)
3349 				break;
3350 			DELAY(1000);
3351 		}
3352 		if (timeout == 0) {
3353 			/*
3354 			 * XXX Is it worth adding a sim_timeout entry
3355 			 * point so we can attempt recovery?  If
3356 			 * this is only used for dumps, I don't think
3357 			 * it is.
3358 			 */
3359 			start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3360 		}
3361 	} else {
3362 		start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3363 	}
3364 	crit_exit();
3365 }
3366 
3367 /*
3368  * Schedule a peripheral driver to receive a ccb when it's
3369  * target device has space for more transactions.
3370  */
3371 void
3372 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3373 {
3374 	struct cam_ed *device;
3375 	int runq;
3376 
3377 	CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3378 	device = perph->path->device;
3379 	crit_enter();
3380 	if (periph_is_queued(perph)) {
3381 		/* Simply reorder based on new priority */
3382 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3383 			  ("   change priority to %d\n", new_priority));
3384 		if (new_priority < perph->pinfo.priority) {
3385 			camq_change_priority(&device->drvq,
3386 					     perph->pinfo.index,
3387 					     new_priority);
3388 		}
3389 		runq = 0;
3390 	} else {
3391 		/* New entry on the queue */
3392 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3393 			  ("   added periph to queue\n"));
3394 		perph->pinfo.priority = new_priority;
3395 		perph->pinfo.generation = ++device->drvq.generation;
3396 		camq_insert(&device->drvq, &perph->pinfo);
3397 		runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3398 	}
3399 	crit_exit();
3400 	if (runq != 0) {
3401 		CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3402 			  ("   calling xpt_run_devq\n"));
3403 		xpt_run_dev_allocq(perph->path->bus);
3404 	}
3405 }
3406 
3407 
3408 /*
3409  * Schedule a device to run on a given queue.
3410  * If the device was inserted as a new entry on the queue,
3411  * return 1 meaning the device queue should be run. If we
3412  * were already queued, implying someone else has already
3413  * started the queue, return 0 so the caller doesn't attempt
3414  * to run the queue.  Must be run in a critical section.
3415  */
3416 static int
3417 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3418 		 u_int32_t new_priority)
3419 {
3420 	int retval;
3421 	u_int32_t old_priority;
3422 
3423 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3424 
3425 	old_priority = pinfo->priority;
3426 
3427 	/*
3428 	 * Are we already queued?
3429 	 */
3430 	if (pinfo->index != CAM_UNQUEUED_INDEX) {
3431 		/* Simply reorder based on new priority */
3432 		if (new_priority < old_priority) {
3433 			camq_change_priority(queue, pinfo->index,
3434 					     new_priority);
3435 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3436 					("changed priority to %d\n",
3437 					 new_priority));
3438 		}
3439 		retval = 0;
3440 	} else {
3441 		/* New entry on the queue */
3442 		if (new_priority < old_priority)
3443 			pinfo->priority = new_priority;
3444 
3445 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3446 				("Inserting onto queue\n"));
3447 		pinfo->generation = ++queue->generation;
3448 		camq_insert(queue, pinfo);
3449 		retval = 1;
3450 	}
3451 	return (retval);
3452 }
3453 
3454 static void
3455 xpt_run_dev_allocq(struct cam_eb *bus)
3456 {
3457 	struct	cam_devq *devq;
3458 
3459 	if ((devq = bus->sim->devq) == NULL) {
3460 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3461 		return;
3462 	}
3463 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3464 
3465 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3466 			("   qfrozen_cnt == 0x%x, entries == %d, "
3467 			 "openings == %d, active == %d\n",
3468 			 devq->alloc_queue.qfrozen_cnt,
3469 			 devq->alloc_queue.entries,
3470 			 devq->alloc_openings,
3471 			 devq->alloc_active));
3472 
3473 	crit_enter();
3474 	devq->alloc_queue.qfrozen_cnt++;
3475 	while ((devq->alloc_queue.entries > 0)
3476 	    && (devq->alloc_openings > 0)
3477 	    && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3478 		struct	cam_ed_qinfo *qinfo;
3479 		struct	cam_ed *device;
3480 		union	ccb *work_ccb;
3481 		struct	cam_periph *drv;
3482 		struct	camq *drvq;
3483 
3484 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3485 							   CAMQ_HEAD);
3486 		device = qinfo->device;
3487 
3488 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3489 				("running device %p\n", device));
3490 
3491 		drvq = &device->drvq;
3492 
3493 #ifdef CAMDEBUG
3494 		if (drvq->entries <= 0) {
3495 			panic("xpt_run_dev_allocq: "
3496 			      "Device on queue without any work to do");
3497 		}
3498 #endif
3499 		if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3500 			devq->alloc_openings--;
3501 			devq->alloc_active++;
3502 			drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3503 			crit_exit();
3504 			xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3505 				      drv->pinfo.priority);
3506 			CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3507 					("calling periph start\n"));
3508 			drv->periph_start(drv, work_ccb);
3509 		} else {
3510 			/*
3511 			 * Malloc failure in alloc_ccb
3512 			 */
3513 			/*
3514 			 * XXX add us to a list to be run from free_ccb
3515 			 * if we don't have any ccbs active on this
3516 			 * device queue otherwise we may never get run
3517 			 * again.
3518 			 */
3519 			break;
3520 		}
3521 
3522 		/* Raise IPL for possible insertion and test at top of loop */
3523 		crit_enter();
3524 
3525 		if (drvq->entries > 0) {
3526 			/* We have more work.  Attempt to reschedule */
3527 			xpt_schedule_dev_allocq(bus, device);
3528 		}
3529 	}
3530 	devq->alloc_queue.qfrozen_cnt--;
3531 	crit_exit();
3532 }
3533 
3534 static void
3535 xpt_run_dev_sendq(struct cam_eb *bus)
3536 {
3537 	struct	cam_devq *devq;
3538 
3539 	if ((devq = bus->sim->devq) == NULL) {
3540 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3541 		return;
3542 	}
3543 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3544 
3545 	crit_enter();
3546 	devq->send_queue.qfrozen_cnt++;
3547 	while ((devq->send_queue.entries > 0)
3548 	    && (devq->send_openings > 0)) {
3549 		struct	cam_ed_qinfo *qinfo;
3550 		struct	cam_ed *device;
3551 		union ccb *work_ccb;
3552 		struct	cam_sim *sim;
3553 
3554 	    	if (devq->send_queue.qfrozen_cnt > 1) {
3555 			break;
3556 		}
3557 
3558 		qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3559 							   CAMQ_HEAD);
3560 		device = qinfo->device;
3561 
3562 		/*
3563 		 * If the device has been "frozen", don't attempt
3564 		 * to run it.
3565 		 */
3566 		if (device->qfrozen_cnt > 0) {
3567 			continue;
3568 		}
3569 
3570 		CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3571 				("running device %p\n", device));
3572 
3573 		work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3574 		if (work_ccb == NULL) {
3575 			kprintf("device on run queue with no ccbs???\n");
3576 			continue;
3577 		}
3578 
3579 		if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3580 
3581 		 	if (num_highpower <= 0) {
3582 				/*
3583 				 * We got a high power command, but we
3584 				 * don't have any available slots.  Freeze
3585 				 * the device queue until we have a slot
3586 				 * available.
3587 				 */
3588 				device->qfrozen_cnt++;
3589 				STAILQ_INSERT_TAIL(&highpowerq,
3590 						   &work_ccb->ccb_h,
3591 						   xpt_links.stqe);
3592 
3593 				continue;
3594 			} else {
3595 				/*
3596 				 * Consume a high power slot while
3597 				 * this ccb runs.
3598 				 */
3599 				num_highpower--;
3600 			}
3601 		}
3602 		devq->active_dev = device;
3603 		cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3604 
3605 		cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3606 
3607 		devq->send_openings--;
3608 		devq->send_active++;
3609 
3610 		if (device->ccbq.queue.entries > 0)
3611 			xpt_schedule_dev_sendq(bus, device);
3612 
3613 		if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3614 			/*
3615 			 * The client wants to freeze the queue
3616 			 * after this CCB is sent.
3617 			 */
3618 			device->qfrozen_cnt++;
3619 		}
3620 
3621 		/* In Target mode, the peripheral driver knows best... */
3622 		if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3623 			if ((device->inq_flags & SID_CmdQue) != 0
3624 			 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3625 				work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3626 			else
3627 				/*
3628 				 * Clear this in case of a retried CCB that
3629 				 * failed due to a rejected tag.
3630 				 */
3631 				work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3632 		}
3633 
3634 		/*
3635 		 * Device queues can be shared among multiple sim instances
3636 		 * that reside on different busses.  Use the SIM in the queue
3637 		 * CCB's path, rather than the one in the bus that was passed
3638 		 * into this function.
3639 		 */
3640 		sim = work_ccb->ccb_h.path->bus->sim;
3641 		(*(sim->sim_action))(sim, work_ccb);
3642 
3643 		devq->active_dev = NULL;
3644 		/* Raise IPL for possible insertion and test at top of loop */
3645 	}
3646 	devq->send_queue.qfrozen_cnt--;
3647 	crit_exit();
3648 }
3649 
3650 /*
3651  * This function merges stuff from the slave ccb into the master ccb, while
3652  * keeping important fields in the master ccb constant.
3653  */
3654 void
3655 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3656 {
3657 	/*
3658 	 * Pull fields that are valid for peripheral drivers to set
3659 	 * into the master CCB along with the CCB "payload".
3660 	 */
3661 	master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3662 	master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3663 	master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3664 	master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3665 	bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3666 	      sizeof(union ccb) - sizeof(struct ccb_hdr));
3667 }
3668 
3669 void
3670 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3671 {
3672 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3673 	callout_init(&ccb_h->timeout_ch);
3674 	ccb_h->pinfo.priority = priority;
3675 	ccb_h->path = path;
3676 	ccb_h->path_id = path->bus->path_id;
3677 	if (path->target)
3678 		ccb_h->target_id = path->target->target_id;
3679 	else
3680 		ccb_h->target_id = CAM_TARGET_WILDCARD;
3681 	if (path->device) {
3682 		ccb_h->target_lun = path->device->lun_id;
3683 		ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3684 	} else {
3685 		ccb_h->target_lun = CAM_TARGET_WILDCARD;
3686 	}
3687 	ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3688 	ccb_h->flags = 0;
3689 }
3690 
3691 /* Path manipulation functions */
3692 cam_status
3693 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3694 		path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3695 {
3696 	struct	   cam_path *path;
3697 	cam_status status;
3698 
3699 	path = kmalloc(sizeof(*path), M_DEVBUF, M_INTWAIT);
3700 	status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3701 	if (status != CAM_REQ_CMP) {
3702 		kfree(path, M_DEVBUF);
3703 		path = NULL;
3704 	}
3705 	*new_path_ptr = path;
3706 	return (status);
3707 }
3708 
3709 static cam_status
3710 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3711 		 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3712 {
3713 	struct	     cam_eb *bus;
3714 	struct	     cam_et *target;
3715 	struct	     cam_ed *device;
3716 	cam_status   status;
3717 
3718 	status = CAM_REQ_CMP;	/* Completed without error */
3719 	target = NULL;		/* Wildcarded */
3720 	device = NULL;		/* Wildcarded */
3721 
3722 	/*
3723 	 * We will potentially modify the EDT, so block interrupts
3724 	 * that may attempt to create cam paths.
3725 	 */
3726 	crit_enter();
3727 	bus = xpt_find_bus(path_id);
3728 	if (bus == NULL) {
3729 		status = CAM_PATH_INVALID;
3730 	} else {
3731 		target = xpt_find_target(bus, target_id);
3732 		if (target == NULL) {
3733 			/* Create one */
3734 			struct cam_et *new_target;
3735 
3736 			new_target = xpt_alloc_target(bus, target_id);
3737 			if (new_target == NULL) {
3738 				status = CAM_RESRC_UNAVAIL;
3739 			} else {
3740 				target = new_target;
3741 			}
3742 		}
3743 		if (target != NULL) {
3744 			device = xpt_find_device(target, lun_id);
3745 			if (device == NULL) {
3746 				/* Create one */
3747 				struct cam_ed *new_device;
3748 
3749 				new_device = xpt_alloc_device(bus,
3750 							      target,
3751 							      lun_id);
3752 				if (new_device == NULL) {
3753 					status = CAM_RESRC_UNAVAIL;
3754 				} else {
3755 					device = new_device;
3756 				}
3757 			}
3758 		}
3759 	}
3760 	crit_exit();
3761 
3762 	/*
3763 	 * Only touch the user's data if we are successful.
3764 	 */
3765 	if (status == CAM_REQ_CMP) {
3766 		new_path->periph = perph;
3767 		new_path->bus = bus;
3768 		new_path->target = target;
3769 		new_path->device = device;
3770 		CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
3771 	} else {
3772 		if (device != NULL)
3773 			xpt_release_device(bus, target, device);
3774 		if (target != NULL)
3775 			xpt_release_target(bus, target);
3776 		if (bus != NULL)
3777 			xpt_release_bus(bus);
3778 	}
3779 	return (status);
3780 }
3781 
3782 static void
3783 xpt_release_path(struct cam_path *path)
3784 {
3785 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
3786 	if (path->device != NULL) {
3787 		xpt_release_device(path->bus, path->target, path->device);
3788 		path->device = NULL;
3789 	}
3790 	if (path->target != NULL) {
3791 		xpt_release_target(path->bus, path->target);
3792 		path->target = NULL;
3793 	}
3794 	if (path->bus != NULL) {
3795 		xpt_release_bus(path->bus);
3796 		path->bus = NULL;
3797 	}
3798 }
3799 
3800 void
3801 xpt_free_path(struct cam_path *path)
3802 {
3803 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
3804 	xpt_release_path(path);
3805 	kfree(path, M_DEVBUF);
3806 }
3807 
3808 
3809 /*
3810  * Return -1 for failure, 0 for exact match, 1 for match with wildcards
3811  * in path1, 2 for match with wildcards in path2.
3812  */
3813 int
3814 xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
3815 {
3816 	int retval = 0;
3817 
3818 	if (path1->bus != path2->bus) {
3819 		if (path1->bus->path_id == CAM_BUS_WILDCARD)
3820 			retval = 1;
3821 		else if (path2->bus->path_id == CAM_BUS_WILDCARD)
3822 			retval = 2;
3823 		else
3824 			return (-1);
3825 	}
3826 	if (path1->target != path2->target) {
3827 		if (path1->target->target_id == CAM_TARGET_WILDCARD) {
3828 			if (retval == 0)
3829 				retval = 1;
3830 		} else if (path2->target->target_id == CAM_TARGET_WILDCARD)
3831 			retval = 2;
3832 		else
3833 			return (-1);
3834 	}
3835 	if (path1->device != path2->device) {
3836 		if (path1->device->lun_id == CAM_LUN_WILDCARD) {
3837 			if (retval == 0)
3838 				retval = 1;
3839 		} else if (path2->device->lun_id == CAM_LUN_WILDCARD)
3840 			retval = 2;
3841 		else
3842 			return (-1);
3843 	}
3844 	return (retval);
3845 }
3846 
3847 void
3848 xpt_print_path(struct cam_path *path)
3849 {
3850 	if (path == NULL)
3851 		kprintf("(nopath): ");
3852 	else {
3853 		if (path->periph != NULL)
3854 			kprintf("(%s%d:", path->periph->periph_name,
3855 			       path->periph->unit_number);
3856 		else
3857 			kprintf("(noperiph:");
3858 
3859 		if (path->bus != NULL)
3860 			kprintf("%s%d:%d:", path->bus->sim->sim_name,
3861 			       path->bus->sim->unit_number,
3862 			       path->bus->sim->bus_id);
3863 		else
3864 			kprintf("nobus:");
3865 
3866 		if (path->target != NULL)
3867 			kprintf("%d:", path->target->target_id);
3868 		else
3869 			kprintf("X:");
3870 
3871 		if (path->device != NULL)
3872 			kprintf("%d): ", path->device->lun_id);
3873 		else
3874 			kprintf("X): ");
3875 	}
3876 }
3877 
3878 path_id_t
3879 xpt_path_path_id(struct cam_path *path)
3880 {
3881 	return(path->bus->path_id);
3882 }
3883 
3884 target_id_t
3885 xpt_path_target_id(struct cam_path *path)
3886 {
3887 	if (path->target != NULL)
3888 		return (path->target->target_id);
3889 	else
3890 		return (CAM_TARGET_WILDCARD);
3891 }
3892 
3893 lun_id_t
3894 xpt_path_lun_id(struct cam_path *path)
3895 {
3896 	if (path->device != NULL)
3897 		return (path->device->lun_id);
3898 	else
3899 		return (CAM_LUN_WILDCARD);
3900 }
3901 
3902 struct cam_sim *
3903 xpt_path_sim(struct cam_path *path)
3904 {
3905 	return (path->bus->sim);
3906 }
3907 
3908 struct cam_periph*
3909 xpt_path_periph(struct cam_path *path)
3910 {
3911 	return (path->periph);
3912 }
3913 
3914 /*
3915  * Release a CAM control block for the caller.  Remit the cost of the structure
3916  * to the device referenced by the path.  If the this device had no 'credits'
3917  * and peripheral drivers have registered async callbacks for this notification
3918  * call them now.
3919  */
3920 void
3921 xpt_release_ccb(union ccb *free_ccb)
3922 {
3923 	struct	 cam_path *path;
3924 	struct	 cam_ed *device;
3925 	struct	 cam_eb *bus;
3926 
3927 	CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
3928 	path = free_ccb->ccb_h.path;
3929 	device = path->device;
3930 	bus = path->bus;
3931 	crit_enter();
3932 	cam_ccbq_release_opening(&device->ccbq);
3933 	if (xpt_ccb_count > xpt_max_ccbs) {
3934 		xpt_free_ccb(free_ccb);
3935 		xpt_ccb_count--;
3936 	} else {
3937 		SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle);
3938 	}
3939 	if (bus->sim->devq) {
3940 		bus->sim->devq->alloc_openings++;
3941 		bus->sim->devq->alloc_active--;
3942 	}
3943 	/* XXX Turn this into an inline function - xpt_run_device?? */
3944 	if ((device_is_alloc_queued(device) == 0)
3945 	 && (device->drvq.entries > 0)) {
3946 		xpt_schedule_dev_allocq(bus, device);
3947 	}
3948 	crit_exit();
3949 	if (bus->sim->devq && dev_allocq_is_runnable(bus->sim->devq))
3950 		xpt_run_dev_allocq(bus);
3951 }
3952 
3953 /* Functions accessed by SIM drivers */
3954 
3955 /*
3956  * A sim structure, listing the SIM entry points and instance
3957  * identification info is passed to xpt_bus_register to hook the SIM
3958  * into the CAM framework.  xpt_bus_register creates a cam_eb entry
3959  * for this new bus and places it in the array of busses and assigns
3960  * it a path_id.  The path_id may be influenced by "hard wiring"
3961  * information specified by the user.  Once interrupt services are
3962  * availible, the bus will be probed.
3963  */
3964 int32_t
3965 xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
3966 {
3967 	struct cam_eb *new_bus;
3968 	struct cam_eb *old_bus;
3969 	struct ccb_pathinq cpi;
3970 
3971 	sim->bus_id = bus;
3972 	new_bus = kmalloc(sizeof(*new_bus), M_DEVBUF, M_INTWAIT);
3973 
3974 	if (strcmp(sim->sim_name, "xpt") != 0) {
3975 		sim->path_id =
3976 		    xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
3977 	}
3978 
3979 	TAILQ_INIT(&new_bus->et_entries);
3980 	new_bus->path_id = sim->path_id;
3981 	new_bus->sim = sim;
3982 	++sim->refcount;
3983 	timevalclear(&new_bus->last_reset);
3984 	new_bus->flags = 0;
3985 	new_bus->refcount = 1;	/* Held until a bus_deregister event */
3986 	new_bus->generation = 0;
3987 	crit_enter();
3988 	old_bus = TAILQ_FIRST(&xpt_busses);
3989 	while (old_bus != NULL
3990 	    && old_bus->path_id < new_bus->path_id)
3991 		old_bus = TAILQ_NEXT(old_bus, links);
3992 	if (old_bus != NULL)
3993 		TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
3994 	else
3995 		TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links);
3996 	bus_generation++;
3997 	crit_exit();
3998 
3999 	/* Notify interested parties */
4000 	if (sim->path_id != CAM_XPT_PATH_ID) {
4001 		struct cam_path path;
4002 
4003 		xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4004 			         CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4005 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4006 		cpi.ccb_h.func_code = XPT_PATH_INQ;
4007 		xpt_action((union ccb *)&cpi);
4008 		xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi);
4009 		xpt_release_path(&path);
4010 	}
4011 	return (CAM_SUCCESS);
4012 }
4013 
4014 /*
4015  * Deregister a bus.  We must clean out all transactions pending on the bus.
4016  * This routine is typically called prior to cam_sim_free() (e.g. see
4017  * dev/usbmisc/umass/umass.c)
4018  */
4019 int32_t
4020 xpt_bus_deregister(path_id_t pathid)
4021 {
4022 	struct cam_path bus_path;
4023 	cam_status status;
4024 
4025 	status = xpt_compile_path(&bus_path, NULL, pathid,
4026 				  CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4027 	if (status != CAM_REQ_CMP)
4028 		return (status);
4029 
4030 	/*
4031 	 * This should clear out all pending requests and timeouts, but
4032 	 * the ccb's may be queued to a software interrupt.
4033 	 *
4034 	 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4035 	 * and it really ought to.
4036 	 */
4037 	xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4038 	xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4039 
4040 	/* make sure all responses have been processed */
4041 	camisr(&cam_netq);
4042 	camisr(&cam_bioq);
4043 
4044 	/* Release the reference count held while registered. */
4045 	xpt_release_bus(bus_path.bus);
4046 	xpt_release_path(&bus_path);
4047 
4048 	return (CAM_REQ_CMP);
4049 }
4050 
4051 static path_id_t
4052 xptnextfreepathid(void)
4053 {
4054 	struct cam_eb *bus;
4055 	path_id_t pathid;
4056 	char *strval;
4057 
4058 	pathid = 0;
4059 	bus = TAILQ_FIRST(&xpt_busses);
4060 retry:
4061 	/* Find an unoccupied pathid */
4062 	while (bus != NULL
4063 	    && bus->path_id <= pathid) {
4064 		if (bus->path_id == pathid)
4065 			pathid++;
4066 		bus = TAILQ_NEXT(bus, links);
4067 	}
4068 
4069 	/*
4070 	 * Ensure that this pathid is not reserved for
4071 	 * a bus that may be registered in the future.
4072 	 */
4073 	if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4074 		++pathid;
4075 		/* Start the search over */
4076 		goto retry;
4077 	}
4078 	return (pathid);
4079 }
4080 
4081 static path_id_t
4082 xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4083 {
4084 	path_id_t pathid;
4085 	int i, dunit, val;
4086 	char buf[32], *strval;
4087 
4088 	pathid = CAM_XPT_PATH_ID;
4089 	ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4090 	i = -1;
4091 	while ((i = resource_locate(i, "scbus")) != -1) {
4092 		dunit = resource_query_unit(i);
4093 		if (dunit < 0)		/* unwired?! */
4094 			continue;
4095 		if (resource_string_value("scbus", dunit, "at", &strval) != 0)
4096 			continue;
4097 		if (strcmp(buf, strval) != 0)
4098 			continue;
4099 		if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4100 			if (sim_bus == val) {
4101 				pathid = dunit;
4102 				break;
4103 			}
4104 		} else if (sim_bus == 0) {
4105 			/* Unspecified matches bus 0 */
4106 			pathid = dunit;
4107 			break;
4108 		} else {
4109 			kprintf("Ambiguous scbus configuration for %s%d "
4110 			       "bus %d, cannot wire down.  The kernel "
4111 			       "config entry for scbus%d should "
4112 			       "specify a controller bus.\n"
4113 			       "Scbus will be assigned dynamically.\n",
4114 			       sim_name, sim_unit, sim_bus, dunit);
4115 			break;
4116 		}
4117 	}
4118 
4119 	if (pathid == CAM_XPT_PATH_ID)
4120 		pathid = xptnextfreepathid();
4121 	return (pathid);
4122 }
4123 
4124 void
4125 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4126 {
4127 	struct cam_eb *bus;
4128 	struct cam_et *target, *next_target;
4129 	struct cam_ed *device, *next_device;
4130 
4131 	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4132 
4133 	/*
4134 	 * Most async events come from a CAM interrupt context.  In
4135 	 * a few cases, the error recovery code at the peripheral layer,
4136 	 * which may run from our SWI or a process context, may signal
4137 	 * deferred events with a call to xpt_async. Ensure async
4138 	 * notifications are serialized by blocking cam interrupts.
4139 	 */
4140 	crit_enter();
4141 
4142 	bus = path->bus;
4143 
4144 	if (async_code == AC_BUS_RESET) {
4145 		/* Update our notion of when the last reset occurred */
4146 		microuptime(&bus->last_reset);
4147 	}
4148 
4149 	for (target = TAILQ_FIRST(&bus->et_entries);
4150 	     target != NULL;
4151 	     target = next_target) {
4152 
4153 		next_target = TAILQ_NEXT(target, links);
4154 
4155 		if (path->target != target
4156 		 && path->target->target_id != CAM_TARGET_WILDCARD
4157 		 && target->target_id != CAM_TARGET_WILDCARD)
4158 			continue;
4159 
4160 		if (async_code == AC_SENT_BDR) {
4161 			/* Update our notion of when the last reset occurred */
4162 			microuptime(&path->target->last_reset);
4163 		}
4164 
4165 		for (device = TAILQ_FIRST(&target->ed_entries);
4166 		     device != NULL;
4167 		     device = next_device) {
4168 
4169 			next_device = TAILQ_NEXT(device, links);
4170 
4171 			if (path->device != device
4172 			 && path->device->lun_id != CAM_LUN_WILDCARD
4173 			 && device->lun_id != CAM_LUN_WILDCARD)
4174 				continue;
4175 
4176 			xpt_dev_async(async_code, bus, target,
4177 				      device, async_arg);
4178 
4179 			xpt_async_bcast(&device->asyncs, async_code,
4180 					path, async_arg);
4181 		}
4182 	}
4183 
4184 	/*
4185 	 * If this wasn't a fully wildcarded async, tell all
4186 	 * clients that want all async events.
4187 	 */
4188 	if (bus != xpt_periph->path->bus)
4189 		xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4190 				path, async_arg);
4191 	crit_exit();
4192 }
4193 
4194 static void
4195 xpt_async_bcast(struct async_list *async_head,
4196 		u_int32_t async_code,
4197 		struct cam_path *path, void *async_arg)
4198 {
4199 	struct async_node *cur_entry;
4200 
4201 	cur_entry = SLIST_FIRST(async_head);
4202 	while (cur_entry != NULL) {
4203 		struct async_node *next_entry;
4204 		/*
4205 		 * Grab the next list entry before we call the current
4206 		 * entry's callback.  This is because the callback function
4207 		 * can delete its async callback entry.
4208 		 */
4209 		next_entry = SLIST_NEXT(cur_entry, links);
4210 		if ((cur_entry->event_enable & async_code) != 0)
4211 			cur_entry->callback(cur_entry->callback_arg,
4212 					    async_code, path,
4213 					    async_arg);
4214 		cur_entry = next_entry;
4215 	}
4216 }
4217 
4218 /*
4219  * Handle any per-device event notifications that require action by the XPT.
4220  */
4221 static void
4222 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4223 	      struct cam_ed *device, void *async_arg)
4224 {
4225 	cam_status status;
4226 	struct cam_path newpath;
4227 
4228 	/*
4229 	 * We only need to handle events for real devices.
4230 	 */
4231 	if (target->target_id == CAM_TARGET_WILDCARD
4232 	 || device->lun_id == CAM_LUN_WILDCARD)
4233 		return;
4234 
4235 	/*
4236 	 * We need our own path with wildcards expanded to
4237 	 * handle certain types of events.
4238 	 */
4239 	if ((async_code == AC_SENT_BDR)
4240 	 || (async_code == AC_BUS_RESET)
4241 	 || (async_code == AC_INQ_CHANGED))
4242 		status = xpt_compile_path(&newpath, NULL,
4243 					  bus->path_id,
4244 					  target->target_id,
4245 					  device->lun_id);
4246 	else
4247 		status = CAM_REQ_CMP_ERR;
4248 
4249 	if (status == CAM_REQ_CMP) {
4250 
4251 		/*
4252 		 * Allow transfer negotiation to occur in a
4253 		 * tag free environment.
4254 		 */
4255 		if (async_code == AC_SENT_BDR
4256 		 || async_code == AC_BUS_RESET)
4257 			xpt_toggle_tags(&newpath);
4258 
4259 		if (async_code == AC_INQ_CHANGED) {
4260 			/*
4261 			 * We've sent a start unit command, or
4262 			 * something similar to a device that
4263 			 * may have caused its inquiry data to
4264 			 * change. So we re-scan the device to
4265 			 * refresh the inquiry data for it.
4266 			 */
4267 			xpt_scan_lun(newpath.periph, &newpath,
4268 				     CAM_EXPECT_INQ_CHANGE, NULL);
4269 		}
4270 		xpt_release_path(&newpath);
4271 	} else if (async_code == AC_LOST_DEVICE) {
4272 		/*
4273 		 * When we lose a device the device may be about to detach
4274 		 * the sim, we have to clear out all pending timeouts and
4275 		 * requests before that happens.  XXX it would be nice if
4276 		 * we could abort the requests pertaining to the device.
4277 		 */
4278 		xpt_release_devq_timeout(device);
4279 		if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4280 			device->flags |= CAM_DEV_UNCONFIGURED;
4281 			xpt_release_device(bus, target, device);
4282 		}
4283 	} else if (async_code == AC_TRANSFER_NEG) {
4284 		struct ccb_trans_settings *settings;
4285 
4286 		settings = (struct ccb_trans_settings *)async_arg;
4287 		xpt_set_transfer_settings(settings, device,
4288 					  /*async_update*/TRUE);
4289 	}
4290 }
4291 
4292 u_int32_t
4293 xpt_freeze_devq(struct cam_path *path, u_int count)
4294 {
4295 	struct ccb_hdr *ccbh;
4296 
4297 	crit_enter();
4298 	path->device->qfrozen_cnt += count;
4299 
4300 	/*
4301 	 * Mark the last CCB in the queue as needing
4302 	 * to be requeued if the driver hasn't
4303 	 * changed it's state yet.  This fixes a race
4304 	 * where a ccb is just about to be queued to
4305 	 * a controller driver when it's interrupt routine
4306 	 * freezes the queue.  To completly close the
4307 	 * hole, controller drives must check to see
4308 	 * if a ccb's status is still CAM_REQ_INPROG
4309 	 * under critical section protection just before they queue
4310 	 * the CCB.  See ahc_action/ahc_freeze_devq for
4311 	 * an example.
4312 	 */
4313 	ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4314 	if (ccbh && ccbh->status == CAM_REQ_INPROG)
4315 		ccbh->status = CAM_REQUEUE_REQ;
4316 	crit_exit();
4317 	return (path->device->qfrozen_cnt);
4318 }
4319 
4320 u_int32_t
4321 xpt_freeze_simq(struct cam_sim *sim, u_int count)
4322 {
4323 	if (sim->devq == NULL)
4324 		return(count);
4325 	sim->devq->send_queue.qfrozen_cnt += count;
4326 	if (sim->devq->active_dev != NULL) {
4327 		struct ccb_hdr *ccbh;
4328 
4329 		ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4330 				  ccb_hdr_tailq);
4331 		if (ccbh && ccbh->status == CAM_REQ_INPROG)
4332 			ccbh->status = CAM_REQUEUE_REQ;
4333 	}
4334 	return (sim->devq->send_queue.qfrozen_cnt);
4335 }
4336 
4337 /*
4338  * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4339  * We ref-count the sim (and the bus only NULLs it out when the bus has been
4340  * freed, which is not the case here), but the device queue is also freed XXX
4341  * and we have to check that here.
4342  *
4343  * XXX fixme: could we simply not null-out the device queue via
4344  * cam_sim_free()?
4345  */
4346 static void
4347 xpt_release_devq_timeout(void *arg)
4348 {
4349 	struct cam_ed *device;
4350 
4351 	device = (struct cam_ed *)arg;
4352 
4353 	xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4354 }
4355 
4356 void
4357 xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4358 {
4359 	xpt_release_devq_device(path->device, count, run_queue);
4360 }
4361 
4362 static void
4363 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4364 {
4365 	int	rundevq;
4366 
4367 	rundevq = 0;
4368 	crit_enter();
4369 
4370 	if (dev->qfrozen_cnt > 0) {
4371 
4372 		count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4373 		dev->qfrozen_cnt -= count;
4374 		if (dev->qfrozen_cnt == 0) {
4375 
4376 			/*
4377 			 * No longer need to wait for a successful
4378 			 * command completion.
4379 			 */
4380 			dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4381 
4382 			/*
4383 			 * Remove any timeouts that might be scheduled
4384 			 * to release this queue.
4385 			 */
4386 			if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4387 				callout_stop(&dev->c_handle);
4388 				dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4389 			}
4390 
4391 			/*
4392 			 * Now that we are unfrozen schedule the
4393 			 * device so any pending transactions are
4394 			 * run.
4395 			 */
4396 			if ((dev->ccbq.queue.entries > 0)
4397 			 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4398 			 && (run_queue != 0)) {
4399 				rundevq = 1;
4400 			}
4401 		}
4402 	}
4403 	if (rundevq != 0)
4404 		xpt_run_dev_sendq(dev->target->bus);
4405 	crit_exit();
4406 }
4407 
4408 void
4409 xpt_release_simq(struct cam_sim *sim, int run_queue)
4410 {
4411 	struct	camq *sendq;
4412 
4413 	if (sim->devq == NULL)
4414 		return;
4415 
4416 	sendq = &(sim->devq->send_queue);
4417 	crit_enter();
4418 
4419 	if (sendq->qfrozen_cnt > 0) {
4420 		sendq->qfrozen_cnt--;
4421 		if (sendq->qfrozen_cnt == 0) {
4422 			struct cam_eb *bus;
4423 
4424 			/*
4425 			 * If there is a timeout scheduled to release this
4426 			 * sim queue, remove it.  The queue frozen count is
4427 			 * already at 0.
4428 			 */
4429 			if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4430 				callout_stop(&sim->c_handle);
4431 				sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4432 			}
4433 			bus = xpt_find_bus(sim->path_id);
4434 			crit_exit();
4435 
4436 			if (run_queue) {
4437 				/*
4438 				 * Now that we are unfrozen run the send queue.
4439 				 */
4440 				xpt_run_dev_sendq(bus);
4441 			}
4442 			xpt_release_bus(bus);
4443 		} else {
4444 			crit_exit();
4445 		}
4446 	} else {
4447 		crit_exit();
4448 	}
4449 }
4450 
4451 void
4452 xpt_done(union ccb *done_ccb)
4453 {
4454 	crit_enter();
4455 
4456 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4457 	if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4458 		/*
4459 		 * Queue up the request for handling by our SWI handler
4460 		 * any of the "non-immediate" type of ccbs.
4461 		 */
4462 		switch (done_ccb->ccb_h.path->periph->type) {
4463 		case CAM_PERIPH_BIO:
4464 			TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h,
4465 					  sim_links.tqe);
4466 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4467 			setsoftcambio();
4468 			break;
4469 		case CAM_PERIPH_NET:
4470 			TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h,
4471 					  sim_links.tqe);
4472 			done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4473 			setsoftcamnet();
4474 			break;
4475 		}
4476 	}
4477 	crit_exit();
4478 }
4479 
4480 union ccb *
4481 xpt_alloc_ccb(void)
4482 {
4483 	union ccb *new_ccb;
4484 
4485 	new_ccb = kmalloc(sizeof(*new_ccb), M_DEVBUF, M_INTWAIT);
4486 	return (new_ccb);
4487 }
4488 
4489 void
4490 xpt_free_ccb(union ccb *free_ccb)
4491 {
4492 	kfree(free_ccb, M_DEVBUF);
4493 }
4494 
4495 
4496 
4497 /* Private XPT functions */
4498 
4499 /*
4500  * Get a CAM control block for the caller. Charge the structure to the device
4501  * referenced by the path.  If the this device has no 'credits' then the
4502  * device already has the maximum number of outstanding operations under way
4503  * and we return NULL. If we don't have sufficient resources to allocate more
4504  * ccbs, we also return NULL.
4505  */
4506 static union ccb *
4507 xpt_get_ccb(struct cam_ed *device)
4508 {
4509 	union ccb *new_ccb;
4510 
4511 	crit_enter();
4512 	if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) {
4513 		new_ccb = kmalloc(sizeof(*new_ccb), M_DEVBUF, M_INTWAIT);
4514 		SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h,
4515 				  xpt_links.sle);
4516 		xpt_ccb_count++;
4517 	}
4518 	cam_ccbq_take_opening(&device->ccbq);
4519 	SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle);
4520 	crit_exit();
4521 	return (new_ccb);
4522 }
4523 
4524 static void
4525 xpt_release_bus(struct cam_eb *bus)
4526 {
4527 
4528 	crit_enter();
4529 	if (bus->refcount == 1) {
4530 		KKASSERT(TAILQ_FIRST(&bus->et_entries) == NULL);
4531 		TAILQ_REMOVE(&xpt_busses, bus, links);
4532 		if (bus->sim) {
4533 			cam_sim_release(bus->sim, 0);
4534 			bus->sim = NULL;
4535 		}
4536 		bus_generation++;
4537 		KKASSERT(bus->refcount == 1);
4538 		kfree(bus, M_DEVBUF);
4539 	} else {
4540 		--bus->refcount;
4541 	}
4542 	crit_exit();
4543 }
4544 
4545 static struct cam_et *
4546 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4547 {
4548 	struct cam_et *target;
4549 	struct cam_et *cur_target;
4550 
4551 	target = kmalloc(sizeof(*target), M_DEVBUF, M_INTWAIT);
4552 
4553 	TAILQ_INIT(&target->ed_entries);
4554 	target->bus = bus;
4555 	target->target_id = target_id;
4556 	target->refcount = 1;
4557 	target->generation = 0;
4558 	timevalclear(&target->last_reset);
4559 	/*
4560 	 * Hold a reference to our parent bus so it
4561 	 * will not go away before we do.
4562 	 */
4563 	bus->refcount++;
4564 
4565 	/* Insertion sort into our bus's target list */
4566 	cur_target = TAILQ_FIRST(&bus->et_entries);
4567 	while (cur_target != NULL && cur_target->target_id < target_id)
4568 		cur_target = TAILQ_NEXT(cur_target, links);
4569 
4570 	if (cur_target != NULL) {
4571 		TAILQ_INSERT_BEFORE(cur_target, target, links);
4572 	} else {
4573 		TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4574 	}
4575 	bus->generation++;
4576 	return (target);
4577 }
4578 
4579 static void
4580 xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4581 {
4582 	crit_enter();
4583 	if (target->refcount == 1) {
4584 		KKASSERT(TAILQ_FIRST(&target->ed_entries) == NULL);
4585 		TAILQ_REMOVE(&bus->et_entries, target, links);
4586 		bus->generation++;
4587 		xpt_release_bus(bus);
4588 		KKASSERT(target->refcount == 1);
4589 		kfree(target, M_DEVBUF);
4590 	} else {
4591 		--target->refcount;
4592 	}
4593 	crit_exit();
4594 }
4595 
4596 static struct cam_ed *
4597 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
4598 {
4599 	struct	   cam_ed *device;
4600 	struct	   cam_devq *devq;
4601 	cam_status status;
4602 
4603 	/* Make space for us in the device queue on our bus */
4604 	if (bus->sim->devq == NULL)
4605 		return(NULL);
4606 	devq = bus->sim->devq;
4607 	status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
4608 
4609 	if (status != CAM_REQ_CMP) {
4610 		device = NULL;
4611 	} else {
4612 		device = kmalloc(sizeof(*device), M_DEVBUF, M_INTWAIT);
4613 	}
4614 
4615 	if (device != NULL) {
4616 		struct cam_ed *cur_device;
4617 
4618 		cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
4619 		device->alloc_ccb_entry.device = device;
4620 		cam_init_pinfo(&device->send_ccb_entry.pinfo);
4621 		device->send_ccb_entry.device = device;
4622 		device->target = target;
4623 		device->lun_id = lun_id;
4624 		/* Initialize our queues */
4625 		if (camq_init(&device->drvq, 0) != 0) {
4626 			kfree(device, M_DEVBUF);
4627 			return (NULL);
4628 		}
4629 		if (cam_ccbq_init(&device->ccbq,
4630 				  bus->sim->max_dev_openings) != 0) {
4631 			camq_fini(&device->drvq);
4632 			kfree(device, M_DEVBUF);
4633 			return (NULL);
4634 		}
4635 		SLIST_INIT(&device->asyncs);
4636 		SLIST_INIT(&device->periphs);
4637 		device->generation = 0;
4638 		device->owner = NULL;
4639 		/*
4640 		 * Take the default quirk entry until we have inquiry
4641 		 * data and can determine a better quirk to use.
4642 		 */
4643 		device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
4644 		bzero(&device->inq_data, sizeof(device->inq_data));
4645 		device->inq_flags = 0;
4646 		device->queue_flags = 0;
4647 		device->serial_num = NULL;
4648 		device->serial_num_len = 0;
4649 		device->qfrozen_cnt = 0;
4650 		device->flags = CAM_DEV_UNCONFIGURED;
4651 		device->tag_delay_count = 0;
4652 		device->refcount = 1;
4653 		callout_init(&device->c_handle);
4654 
4655 		/*
4656 		 * Hold a reference to our parent target so it
4657 		 * will not go away before we do.
4658 		 */
4659 		target->refcount++;
4660 
4661 		/*
4662 		 * XXX should be limited by number of CCBs this bus can
4663 		 * do.
4664 		 */
4665 		xpt_max_ccbs += device->ccbq.devq_openings;
4666 		/* Insertion sort into our target's device list */
4667 		cur_device = TAILQ_FIRST(&target->ed_entries);
4668 		while (cur_device != NULL && cur_device->lun_id < lun_id)
4669 			cur_device = TAILQ_NEXT(cur_device, links);
4670 		if (cur_device != NULL) {
4671 			TAILQ_INSERT_BEFORE(cur_device, device, links);
4672 		} else {
4673 			TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
4674 		}
4675 		target->generation++;
4676 	}
4677 	return (device);
4678 }
4679 
4680 static void
4681 xpt_reference_device(struct cam_ed *device)
4682 {
4683 	++device->refcount;
4684 }
4685 
4686 static void
4687 xpt_release_device(struct cam_eb *bus, struct cam_et *target,
4688 		   struct cam_ed *device)
4689 {
4690 	struct cam_devq *devq;
4691 
4692 	crit_enter();
4693 	if (device->refcount == 1) {
4694 		KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
4695 
4696 		if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
4697 		 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
4698 			panic("Removing device while still queued for ccbs");
4699 
4700 		if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4701 			device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4702 			callout_stop(&device->c_handle);
4703 		}
4704 
4705 		TAILQ_REMOVE(&target->ed_entries, device,links);
4706 		target->generation++;
4707 		xpt_max_ccbs -= device->ccbq.devq_openings;
4708 		/* Release our slot in the devq */
4709 		devq = bus->sim->devq;
4710 		cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
4711 		xpt_release_target(bus, target);
4712 		KKASSERT(device->refcount == 1);
4713 		kfree(device, M_DEVBUF);
4714 	} else {
4715 		--device->refcount;
4716 	}
4717 	crit_exit();
4718 }
4719 
4720 static u_int32_t
4721 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
4722 {
4723 	int	diff;
4724 	int	result;
4725 	struct	cam_ed *dev;
4726 
4727 	dev = path->device;
4728 
4729 	crit_enter();
4730 
4731 	diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
4732 	result = cam_ccbq_resize(&dev->ccbq, newopenings);
4733 	if (result == CAM_REQ_CMP && (diff < 0)) {
4734 		dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
4735 	}
4736 	/* Adjust the global limit */
4737 	xpt_max_ccbs += diff;
4738 	crit_exit();
4739 	return (result);
4740 }
4741 
4742 static struct cam_eb *
4743 xpt_find_bus(path_id_t path_id)
4744 {
4745 	struct cam_eb *bus;
4746 
4747 	for (bus = TAILQ_FIRST(&xpt_busses);
4748 	     bus != NULL;
4749 	     bus = TAILQ_NEXT(bus, links)) {
4750 		if (bus->path_id == path_id) {
4751 			bus->refcount++;
4752 			break;
4753 		}
4754 	}
4755 	return (bus);
4756 }
4757 
4758 static struct cam_et *
4759 xpt_find_target(struct cam_eb *bus, target_id_t	target_id)
4760 {
4761 	struct cam_et *target;
4762 
4763 	for (target = TAILQ_FIRST(&bus->et_entries);
4764 	     target != NULL;
4765 	     target = TAILQ_NEXT(target, links)) {
4766 		if (target->target_id == target_id) {
4767 			target->refcount++;
4768 			break;
4769 		}
4770 	}
4771 	return (target);
4772 }
4773 
4774 static struct cam_ed *
4775 xpt_find_device(struct cam_et *target, lun_id_t lun_id)
4776 {
4777 	struct cam_ed *device;
4778 
4779 	for (device = TAILQ_FIRST(&target->ed_entries);
4780 	     device != NULL;
4781 	     device = TAILQ_NEXT(device, links)) {
4782 		if (device->lun_id == lun_id) {
4783 			device->refcount++;
4784 			break;
4785 		}
4786 	}
4787 	return (device);
4788 }
4789 
4790 typedef struct {
4791 	union	ccb *request_ccb;
4792 	struct 	ccb_pathinq *cpi;
4793 	int	pending_count;
4794 } xpt_scan_bus_info;
4795 
4796 /*
4797  * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
4798  * As the scan progresses, xpt_scan_bus is used as the
4799  * callback on completion function.
4800  */
4801 static void
4802 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
4803 {
4804 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
4805 		  ("xpt_scan_bus\n"));
4806 	switch (request_ccb->ccb_h.func_code) {
4807 	case XPT_SCAN_BUS:
4808 	{
4809 		xpt_scan_bus_info *scan_info;
4810 		union	ccb *work_ccb;
4811 		struct	cam_path *path;
4812 		u_int	i;
4813 		u_int	max_target;
4814 		u_int	initiator_id;
4815 
4816 		/* Find out the characteristics of the bus */
4817 		work_ccb = xpt_alloc_ccb();
4818 		xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
4819 			      request_ccb->ccb_h.pinfo.priority);
4820 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
4821 		xpt_action(work_ccb);
4822 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
4823 			request_ccb->ccb_h.status = work_ccb->ccb_h.status;
4824 			xpt_free_ccb(work_ccb);
4825 			xpt_done(request_ccb);
4826 			return;
4827 		}
4828 
4829 		if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
4830 			/*
4831 			 * Can't scan the bus on an adapter that
4832 			 * cannot perform the initiator role.
4833 			 */
4834 			request_ccb->ccb_h.status = CAM_REQ_CMP;
4835 			xpt_free_ccb(work_ccb);
4836 			xpt_done(request_ccb);
4837 			return;
4838 		}
4839 
4840 		/* Save some state for use while we probe for devices */
4841 		scan_info = (xpt_scan_bus_info *)
4842 		    kmalloc(sizeof(xpt_scan_bus_info), M_TEMP, M_INTWAIT);
4843 		scan_info->request_ccb = request_ccb;
4844 		scan_info->cpi = &work_ccb->cpi;
4845 
4846 		/* Cache on our stack so we can work asynchronously */
4847 		max_target = scan_info->cpi->max_target;
4848 		initiator_id = scan_info->cpi->initiator_id;
4849 
4850 		/*
4851 		 * Don't count the initiator if the
4852 		 * initiator is addressable.
4853 		 */
4854 		scan_info->pending_count = max_target + 1;
4855 		if (initiator_id <= max_target)
4856 			scan_info->pending_count--;
4857 
4858 		for (i = 0; i <= max_target; i++) {
4859 			cam_status status;
4860 		 	if (i == initiator_id)
4861 				continue;
4862 
4863 			status = xpt_create_path(&path, xpt_periph,
4864 						 request_ccb->ccb_h.path_id,
4865 						 i, 0);
4866 			if (status != CAM_REQ_CMP) {
4867 				kprintf("xpt_scan_bus: xpt_create_path failed"
4868 				       " with status %#x, bus scan halted\n",
4869 				       status);
4870 				break;
4871 			}
4872 			work_ccb = xpt_alloc_ccb();
4873 			xpt_setup_ccb(&work_ccb->ccb_h, path,
4874 				      request_ccb->ccb_h.pinfo.priority);
4875 			work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4876 			work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4877 			work_ccb->ccb_h.ppriv_ptr0 = scan_info;
4878 			work_ccb->crcn.flags = request_ccb->crcn.flags;
4879 #if 0
4880 			kprintf("xpt_scan_bus: probing %d:%d:%d\n",
4881 				request_ccb->ccb_h.path_id, i, 0);
4882 #endif
4883 			xpt_action(work_ccb);
4884 		}
4885 		break;
4886 	}
4887 	case XPT_SCAN_LUN:
4888 	{
4889 		xpt_scan_bus_info *scan_info;
4890 		path_id_t path_id;
4891 		target_id_t target_id;
4892 		lun_id_t lun_id;
4893 
4894 		/* Reuse the same CCB to query if a device was really found */
4895 		scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
4896 		xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
4897 			      request_ccb->ccb_h.pinfo.priority);
4898 		request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
4899 
4900 		path_id = request_ccb->ccb_h.path_id;
4901 		target_id = request_ccb->ccb_h.target_id;
4902 		lun_id = request_ccb->ccb_h.target_lun;
4903 		xpt_action(request_ccb);
4904 
4905 #if 0
4906 		kprintf("xpt_scan_bus: got back probe from %d:%d:%d\n",
4907 			path_id, target_id, lun_id);
4908 #endif
4909 
4910 		if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
4911 			struct cam_ed *device;
4912 			struct cam_et *target;
4913 			int phl;
4914 
4915 			/*
4916 			 * If we already probed lun 0 successfully, or
4917 			 * we have additional configured luns on this
4918 			 * target that might have "gone away", go onto
4919 			 * the next lun.
4920 			 */
4921 			target = request_ccb->ccb_h.path->target;
4922 			/*
4923 			 * We may touch devices that we don't
4924 			 * hold references too, so ensure they
4925 			 * don't disappear out from under us.
4926 			 * The target above is referenced by the
4927 			 * path in the request ccb.
4928 			 */
4929 			phl = 0;
4930 			crit_enter();
4931 			device = TAILQ_FIRST(&target->ed_entries);
4932 			if (device != NULL) {
4933 				phl = device->quirk->quirks & CAM_QUIRK_HILUNS;
4934 				if (device->lun_id == 0)
4935 					device = TAILQ_NEXT(device, links);
4936 			}
4937 			crit_exit();
4938 			if ((lun_id != 0) || (device != NULL)) {
4939 				if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
4940 					lun_id++;
4941 			}
4942 		} else {
4943 			struct cam_ed *device;
4944 
4945 			device = request_ccb->ccb_h.path->device;
4946 
4947 			if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
4948 				/* Try the next lun */
4949 				if (lun_id < (CAM_SCSI2_MAXLUN-1) ||
4950 				    (device->quirk->quirks & CAM_QUIRK_HILUNS))
4951 					lun_id++;
4952 			}
4953 		}
4954 
4955 		xpt_free_path(request_ccb->ccb_h.path);
4956 
4957 		/* Check Bounds */
4958 		if ((lun_id == request_ccb->ccb_h.target_lun)
4959 		 || lun_id > scan_info->cpi->max_lun) {
4960 			/* We're done */
4961 
4962 			xpt_free_ccb(request_ccb);
4963 			scan_info->pending_count--;
4964 			if (scan_info->pending_count == 0) {
4965 				xpt_free_ccb((union ccb *)scan_info->cpi);
4966 				request_ccb = scan_info->request_ccb;
4967 				kfree(scan_info, M_TEMP);
4968 				request_ccb->ccb_h.status = CAM_REQ_CMP;
4969 				xpt_done(request_ccb);
4970 			}
4971 		} else {
4972 			/* Try the next device */
4973 			struct cam_path *path;
4974 			cam_status status;
4975 
4976 			path = request_ccb->ccb_h.path;
4977 			status = xpt_create_path(&path, xpt_periph,
4978 						 path_id, target_id, lun_id);
4979 			if (status != CAM_REQ_CMP) {
4980 				kprintf("xpt_scan_bus: xpt_create_path failed "
4981 				       "with status %#x, halting LUN scan\n",
4982 			 	       status);
4983 				xpt_free_ccb(request_ccb);
4984 				scan_info->pending_count--;
4985 				if (scan_info->pending_count == 0) {
4986 					xpt_free_ccb(
4987 						(union ccb *)scan_info->cpi);
4988 					request_ccb = scan_info->request_ccb;
4989 					kfree(scan_info, M_TEMP);
4990 					request_ccb->ccb_h.status = CAM_REQ_CMP;
4991 					xpt_done(request_ccb);
4992 					break;
4993 				}
4994 			}
4995 			xpt_setup_ccb(&request_ccb->ccb_h, path,
4996 				      request_ccb->ccb_h.pinfo.priority);
4997 			request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
4998 			request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
4999 			request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5000 			request_ccb->crcn.flags =
5001 				scan_info->request_ccb->crcn.flags;
5002 #if 0
5003 			xpt_print_path(path);
5004 			kprintf("xpt_scan bus probing\n");
5005 #endif
5006 			xpt_action(request_ccb);
5007 		}
5008 		break;
5009 	}
5010 	default:
5011 		break;
5012 	}
5013 }
5014 
5015 typedef enum {
5016 	PROBE_TUR,
5017 	PROBE_INQUIRY,
5018 	PROBE_FULL_INQUIRY,
5019 	PROBE_MODE_SENSE,
5020 	PROBE_SERIAL_NUM,
5021 	PROBE_TUR_FOR_NEGOTIATION
5022 } probe_action;
5023 
5024 typedef enum {
5025 	PROBE_INQUIRY_CKSUM	= 0x01,
5026 	PROBE_SERIAL_CKSUM	= 0x02,
5027 	PROBE_NO_ANNOUNCE	= 0x04
5028 } probe_flags;
5029 
5030 typedef struct {
5031 	TAILQ_HEAD(, ccb_hdr) request_ccbs;
5032 	probe_action	action;
5033 	union ccb	saved_ccb;
5034 	probe_flags	flags;
5035 	MD5_CTX		context;
5036 	u_int8_t	digest[16];
5037 } probe_softc;
5038 
5039 static void
5040 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5041 	     cam_flags flags, union ccb *request_ccb)
5042 {
5043 	struct ccb_pathinq cpi;
5044 	cam_status status;
5045 	struct cam_path *new_path;
5046 	struct cam_periph *old_periph;
5047 
5048 	CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5049 		  ("xpt_scan_lun\n"));
5050 
5051 	xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5052 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5053 	xpt_action((union ccb *)&cpi);
5054 
5055 	if (cpi.ccb_h.status != CAM_REQ_CMP) {
5056 		if (request_ccb != NULL) {
5057 			request_ccb->ccb_h.status = cpi.ccb_h.status;
5058 			xpt_done(request_ccb);
5059 		}
5060 		return;
5061 	}
5062 
5063 	if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5064 		/*
5065 		 * Can't scan the bus on an adapter that
5066 		 * cannot perform the initiator role.
5067 		 */
5068 		if (request_ccb != NULL) {
5069 			request_ccb->ccb_h.status = CAM_REQ_CMP;
5070 			xpt_done(request_ccb);
5071 		}
5072 		return;
5073 	}
5074 
5075 	if (request_ccb == NULL) {
5076 		request_ccb = kmalloc(sizeof(union ccb), M_TEMP, M_INTWAIT);
5077 		new_path = kmalloc(sizeof(*new_path), M_TEMP, M_INTWAIT);
5078 		status = xpt_compile_path(new_path, xpt_periph,
5079 					  path->bus->path_id,
5080 					  path->target->target_id,
5081 					  path->device->lun_id);
5082 
5083 		if (status != CAM_REQ_CMP) {
5084 			xpt_print_path(path);
5085 			kprintf("xpt_scan_lun: can't compile path, can't "
5086 			       "continue\n");
5087 			kfree(request_ccb, M_TEMP);
5088 			kfree(new_path, M_TEMP);
5089 			return;
5090 		}
5091 		xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5092 		request_ccb->ccb_h.cbfcnp = xptscandone;
5093 		request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5094 		request_ccb->crcn.flags = flags;
5095 	}
5096 
5097 	crit_enter();
5098 	if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5099 		probe_softc *softc;
5100 
5101 		softc = (probe_softc *)old_periph->softc;
5102 		TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5103 				  periph_links.tqe);
5104 	} else {
5105 		status = cam_periph_alloc(proberegister, NULL, probecleanup,
5106 					  probestart, "probe",
5107 					  CAM_PERIPH_BIO,
5108 					  request_ccb->ccb_h.path, NULL, 0,
5109 					  request_ccb);
5110 
5111 		if (status != CAM_REQ_CMP) {
5112 			xpt_print_path(path);
5113 			kprintf("xpt_scan_lun: cam_alloc_periph returned an "
5114 			       "error, can't continue probe\n");
5115 			request_ccb->ccb_h.status = status;
5116 			xpt_done(request_ccb);
5117 		}
5118 	}
5119 	crit_exit();
5120 }
5121 
5122 static void
5123 xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5124 {
5125 	xpt_release_path(done_ccb->ccb_h.path);
5126 	kfree(done_ccb->ccb_h.path, M_TEMP);
5127 	kfree(done_ccb, M_TEMP);
5128 }
5129 
5130 static cam_status
5131 proberegister(struct cam_periph *periph, void *arg)
5132 {
5133 	union ccb *request_ccb;	/* CCB representing the probe request */
5134 	probe_softc *softc;
5135 
5136 	request_ccb = (union ccb *)arg;
5137 	if (periph == NULL) {
5138 		kprintf("proberegister: periph was NULL!!\n");
5139 		return(CAM_REQ_CMP_ERR);
5140 	}
5141 
5142 	if (request_ccb == NULL) {
5143 		kprintf("proberegister: no probe CCB, "
5144 		       "can't register device\n");
5145 		return(CAM_REQ_CMP_ERR);
5146 	}
5147 
5148 	softc = kmalloc(sizeof(*softc), M_TEMP, M_INTWAIT | M_ZERO);
5149 	TAILQ_INIT(&softc->request_ccbs);
5150 	TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5151 			  periph_links.tqe);
5152 	softc->flags = 0;
5153 	periph->softc = softc;
5154 	cam_periph_acquire(periph);
5155 	/*
5156 	 * Ensure we've waited at least a bus settle
5157 	 * delay before attempting to probe the device.
5158 	 * For HBAs that don't do bus resets, this won't make a difference.
5159 	 */
5160 	cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5161 				      SCSI_DELAY);
5162 	probeschedule(periph);
5163 	return(CAM_REQ_CMP);
5164 }
5165 
5166 static void
5167 probeschedule(struct cam_periph *periph)
5168 {
5169 	struct ccb_pathinq cpi;
5170 	union ccb *ccb;
5171 	probe_softc *softc;
5172 
5173 	softc = (probe_softc *)periph->softc;
5174 	ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5175 
5176 	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5177 	cpi.ccb_h.func_code = XPT_PATH_INQ;
5178 	xpt_action((union ccb *)&cpi);
5179 
5180 	/*
5181 	 * If a device has gone away and another device, or the same one,
5182 	 * is back in the same place, it should have a unit attention
5183 	 * condition pending.  It will not report the unit attention in
5184 	 * response to an inquiry, which may leave invalid transfer
5185 	 * negotiations in effect.  The TUR will reveal the unit attention
5186 	 * condition.  Only send the TUR for lun 0, since some devices
5187 	 * will get confused by commands other than inquiry to non-existent
5188 	 * luns.  If you think a device has gone away start your scan from
5189 	 * lun 0.  This will insure that any bogus transfer settings are
5190 	 * invalidated.
5191 	 *
5192 	 * If we haven't seen the device before and the controller supports
5193 	 * some kind of transfer negotiation, negotiate with the first
5194 	 * sent command if no bus reset was performed at startup.  This
5195 	 * ensures that the device is not confused by transfer negotiation
5196 	 * settings left over by loader or BIOS action.
5197 	 */
5198 	if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5199 	 && (ccb->ccb_h.target_lun == 0)) {
5200 		softc->action = PROBE_TUR;
5201 	} else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5202 	      && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5203 		proberequestdefaultnegotiation(periph);
5204 		softc->action = PROBE_INQUIRY;
5205 	} else {
5206 		softc->action = PROBE_INQUIRY;
5207 	}
5208 
5209 	if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5210 		softc->flags |= PROBE_NO_ANNOUNCE;
5211 	else
5212 		softc->flags &= ~PROBE_NO_ANNOUNCE;
5213 
5214 	xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5215 }
5216 
5217 static void
5218 probestart(struct cam_periph *periph, union ccb *start_ccb)
5219 {
5220 	/* Probe the device that our peripheral driver points to */
5221 	struct ccb_scsiio *csio;
5222 	probe_softc *softc;
5223 
5224 	CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5225 
5226 	softc = (probe_softc *)periph->softc;
5227 	csio = &start_ccb->csio;
5228 
5229 	switch (softc->action) {
5230 	case PROBE_TUR:
5231 	case PROBE_TUR_FOR_NEGOTIATION:
5232 	{
5233 		scsi_test_unit_ready(csio,
5234 				     /*retries*/4,
5235 				     probedone,
5236 				     MSG_SIMPLE_Q_TAG,
5237 				     SSD_FULL_SIZE,
5238 				     /*timeout*/60000);
5239 		break;
5240 	}
5241 	case PROBE_INQUIRY:
5242 	case PROBE_FULL_INQUIRY:
5243 	{
5244 		u_int inquiry_len;
5245 		struct scsi_inquiry_data *inq_buf;
5246 
5247 		inq_buf = &periph->path->device->inq_data;
5248 		/*
5249 		 * If the device is currently configured, we calculate an
5250 		 * MD5 checksum of the inquiry data, and if the serial number
5251 		 * length is greater than 0, add the serial number data
5252 		 * into the checksum as well.  Once the inquiry and the
5253 		 * serial number check finish, we attempt to figure out
5254 		 * whether we still have the same device.
5255 		 */
5256 		if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5257 
5258 			MD5Init(&softc->context);
5259 			MD5Update(&softc->context, (unsigned char *)inq_buf,
5260 				  sizeof(struct scsi_inquiry_data));
5261 			softc->flags |= PROBE_INQUIRY_CKSUM;
5262 			if (periph->path->device->serial_num_len > 0) {
5263 				MD5Update(&softc->context,
5264 					  periph->path->device->serial_num,
5265 					  periph->path->device->serial_num_len);
5266 				softc->flags |= PROBE_SERIAL_CKSUM;
5267 			}
5268 			MD5Final(softc->digest, &softc->context);
5269 		}
5270 
5271 		if (softc->action == PROBE_INQUIRY)
5272 			inquiry_len = SHORT_INQUIRY_LENGTH;
5273 		else
5274 			inquiry_len = inq_buf->additional_length + 5;
5275 
5276 		scsi_inquiry(csio,
5277 			     /*retries*/4,
5278 			     probedone,
5279 			     MSG_SIMPLE_Q_TAG,
5280 			     (u_int8_t *)inq_buf,
5281 			     inquiry_len,
5282 			     /*evpd*/FALSE,
5283 			     /*page_code*/0,
5284 			     SSD_MIN_SIZE,
5285 			     /*timeout*/60 * 1000);
5286 		break;
5287 	}
5288 	case PROBE_MODE_SENSE:
5289 	{
5290 		void  *mode_buf;
5291 		int    mode_buf_len;
5292 
5293 		mode_buf_len = sizeof(struct scsi_mode_header_6)
5294 			     + sizeof(struct scsi_mode_blk_desc)
5295 			     + sizeof(struct scsi_control_page);
5296 		mode_buf = kmalloc(mode_buf_len, M_TEMP, M_INTWAIT);
5297 		scsi_mode_sense(csio,
5298 				/*retries*/4,
5299 				probedone,
5300 				MSG_SIMPLE_Q_TAG,
5301 				/*dbd*/FALSE,
5302 				SMS_PAGE_CTRL_CURRENT,
5303 				SMS_CONTROL_MODE_PAGE,
5304 				mode_buf,
5305 				mode_buf_len,
5306 				SSD_FULL_SIZE,
5307 				/*timeout*/60000);
5308 		break;
5309 	}
5310 	case PROBE_SERIAL_NUM:
5311 	{
5312 		struct scsi_vpd_unit_serial_number *serial_buf;
5313 		struct cam_ed* device;
5314 
5315 		serial_buf = NULL;
5316 		device = periph->path->device;
5317 		device->serial_num = NULL;
5318 		device->serial_num_len = 0;
5319 
5320 		if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5321 			serial_buf = kmalloc(sizeof(*serial_buf), M_TEMP,
5322 					    M_INTWAIT | M_ZERO);
5323 			scsi_inquiry(csio,
5324 				     /*retries*/4,
5325 				     probedone,
5326 				     MSG_SIMPLE_Q_TAG,
5327 				     (u_int8_t *)serial_buf,
5328 				     sizeof(*serial_buf),
5329 				     /*evpd*/TRUE,
5330 				     SVPD_UNIT_SERIAL_NUMBER,
5331 				     SSD_MIN_SIZE,
5332 				     /*timeout*/60 * 1000);
5333 			break;
5334 		}
5335 		/*
5336 		 * We'll have to do without, let our probedone
5337 		 * routine finish up for us.
5338 		 */
5339 		start_ccb->csio.data_ptr = NULL;
5340 		probedone(periph, start_ccb);
5341 		return;
5342 	}
5343 	}
5344 	xpt_action(start_ccb);
5345 }
5346 
5347 static void
5348 proberequestdefaultnegotiation(struct cam_periph *periph)
5349 {
5350 	struct ccb_trans_settings cts;
5351 
5352 	xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5353 	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5354 	cts.flags = CCB_TRANS_USER_SETTINGS;
5355 	xpt_action((union ccb *)&cts);
5356 	cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5357 	cts.flags &= ~CCB_TRANS_USER_SETTINGS;
5358 	cts.flags |= CCB_TRANS_CURRENT_SETTINGS;
5359 	xpt_action((union ccb *)&cts);
5360 }
5361 
5362 static void
5363 probedone(struct cam_periph *periph, union ccb *done_ccb)
5364 {
5365 	probe_softc *softc;
5366 	struct cam_path *path;
5367 	u_int32_t  priority;
5368 
5369 	CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5370 
5371 	softc = (probe_softc *)periph->softc;
5372 	path = done_ccb->ccb_h.path;
5373 	priority = done_ccb->ccb_h.pinfo.priority;
5374 
5375 	switch (softc->action) {
5376 	case PROBE_TUR:
5377 	{
5378 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5379 
5380 			if (cam_periph_error(done_ccb, 0,
5381 					     SF_NO_PRINT, NULL) == ERESTART)
5382 				return;
5383 			else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5384 				/* Don't wedge the queue */
5385 				xpt_release_devq(done_ccb->ccb_h.path,
5386 						 /*count*/1,
5387 						 /*run_queue*/TRUE);
5388 		}
5389 		softc->action = PROBE_INQUIRY;
5390 		xpt_release_ccb(done_ccb);
5391 		xpt_schedule(periph, priority);
5392 		return;
5393 	}
5394 	case PROBE_INQUIRY:
5395 	case PROBE_FULL_INQUIRY:
5396 	{
5397 		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5398 			struct scsi_inquiry_data *inq_buf;
5399 			u_int8_t periph_qual;
5400 
5401 			path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
5402 			inq_buf = &path->device->inq_data;
5403 
5404 			periph_qual = SID_QUAL(inq_buf);
5405 
5406 			switch(periph_qual) {
5407 			case SID_QUAL_LU_CONNECTED:
5408 			{
5409 				u_int8_t alen;
5410 
5411 				/*
5412 				 * We conservatively request only
5413 				 * SHORT_INQUIRY_LEN bytes of inquiry
5414 				 * information during our first try
5415 				 * at sending an INQUIRY. If the device
5416 				 * has more information to give,
5417 				 * perform a second request specifying
5418 				 * the amount of information the device
5419 				 * is willing to give.
5420 				 */
5421 				alen = inq_buf->additional_length;
5422 				if (softc->action == PROBE_INQUIRY
5423 				 && alen > (SHORT_INQUIRY_LENGTH - 5)) {
5424 					softc->action = PROBE_FULL_INQUIRY;
5425 					xpt_release_ccb(done_ccb);
5426 					xpt_schedule(periph, priority);
5427 					return;
5428 				}
5429 
5430 				xpt_find_quirk(path->device);
5431 
5432 				if ((inq_buf->flags & SID_CmdQue) != 0)
5433 					softc->action = PROBE_MODE_SENSE;
5434 				else
5435 					softc->action = PROBE_SERIAL_NUM;
5436 
5437 				path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5438 				xpt_reference_device(path->device);
5439 
5440 				xpt_release_ccb(done_ccb);
5441 				xpt_schedule(periph, priority);
5442 				return;
5443 			}
5444 			default:
5445 				break;
5446 			}
5447 		} else if (cam_periph_error(done_ccb, 0,
5448 					    done_ccb->ccb_h.target_lun > 0
5449 					    ? SF_RETRY_UA|SF_QUIET_IR
5450 					    : SF_RETRY_UA,
5451 					    &softc->saved_ccb) == ERESTART) {
5452 			return;
5453 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5454 			/* Don't wedge the queue */
5455 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5456 					 /*run_queue*/TRUE);
5457 		}
5458 		/*
5459 		 * If we get to this point, we got an error status back
5460 		 * from the inquiry and the error status doesn't require
5461 		 * automatically retrying the command.  Therefore, the
5462 		 * inquiry failed.  If we had inquiry information before
5463 		 * for this device, but this latest inquiry command failed,
5464 		 * the device has probably gone away.  If this device isn't
5465 		 * already marked unconfigured, notify the peripheral
5466 		 * drivers that this device is no more.
5467 		 */
5468 		if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5469 			/* Send the async notification. */
5470 			xpt_async(AC_LOST_DEVICE, path, NULL);
5471 		}
5472 
5473 		xpt_release_ccb(done_ccb);
5474 		break;
5475 	}
5476 	case PROBE_MODE_SENSE:
5477 	{
5478 		struct ccb_scsiio *csio;
5479 		struct scsi_mode_header_6 *mode_hdr;
5480 
5481 		csio = &done_ccb->csio;
5482 		mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
5483 		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5484 			struct scsi_control_page *page;
5485 			u_int8_t *offset;
5486 
5487 			offset = ((u_int8_t *)&mode_hdr[1])
5488 			    + mode_hdr->blk_desc_len;
5489 			page = (struct scsi_control_page *)offset;
5490 			path->device->queue_flags = page->queue_flags;
5491 		} else if (cam_periph_error(done_ccb, 0,
5492 					    SF_RETRY_UA|SF_NO_PRINT,
5493 					    &softc->saved_ccb) == ERESTART) {
5494 			return;
5495 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5496 			/* Don't wedge the queue */
5497 			xpt_release_devq(done_ccb->ccb_h.path,
5498 					 /*count*/1, /*run_queue*/TRUE);
5499 		}
5500 		xpt_release_ccb(done_ccb);
5501 		kfree(mode_hdr, M_TEMP);
5502 		softc->action = PROBE_SERIAL_NUM;
5503 		xpt_schedule(periph, priority);
5504 		return;
5505 	}
5506 	case PROBE_SERIAL_NUM:
5507 	{
5508 		struct ccb_scsiio *csio;
5509 		struct scsi_vpd_unit_serial_number *serial_buf;
5510 		u_int32_t  priority;
5511 		int changed;
5512 		int have_serialnum;
5513 
5514 		changed = 1;
5515 		have_serialnum = 0;
5516 		csio = &done_ccb->csio;
5517 		priority = done_ccb->ccb_h.pinfo.priority;
5518 		serial_buf =
5519 		    (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
5520 
5521 		/* Clean up from previous instance of this device */
5522 		if (path->device->serial_num != NULL) {
5523 			kfree(path->device->serial_num, M_DEVBUF);
5524 			path->device->serial_num = NULL;
5525 			path->device->serial_num_len = 0;
5526 		}
5527 
5528 		if (serial_buf == NULL) {
5529 			/*
5530 			 * Don't process the command as it was never sent
5531 			 */
5532 		} else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
5533 			&& (serial_buf->length > 0)) {
5534 
5535 			have_serialnum = 1;
5536 			path->device->serial_num =
5537 				kmalloc((serial_buf->length + 1),
5538 				       M_DEVBUF, M_INTWAIT);
5539 			bcopy(serial_buf->serial_num,
5540 			      path->device->serial_num,
5541 			      serial_buf->length);
5542 			path->device->serial_num_len = serial_buf->length;
5543 			path->device->serial_num[serial_buf->length] = '\0';
5544 		} else if (cam_periph_error(done_ccb, 0,
5545 					    SF_RETRY_UA|SF_NO_PRINT,
5546 					    &softc->saved_ccb) == ERESTART) {
5547 			return;
5548 		} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5549 			/* Don't wedge the queue */
5550 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5551 					 /*run_queue*/TRUE);
5552 		}
5553 
5554 		/*
5555 		 * Let's see if we have seen this device before.
5556 		 */
5557 		if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
5558 			MD5_CTX context;
5559 			u_int8_t digest[16];
5560 
5561 			MD5Init(&context);
5562 
5563 			MD5Update(&context,
5564 				  (unsigned char *)&path->device->inq_data,
5565 				  sizeof(struct scsi_inquiry_data));
5566 
5567 			if (have_serialnum)
5568 				MD5Update(&context, serial_buf->serial_num,
5569 					  serial_buf->length);
5570 
5571 			MD5Final(digest, &context);
5572 			if (bcmp(softc->digest, digest, 16) == 0)
5573 				changed = 0;
5574 
5575 			/*
5576 			 * XXX Do we need to do a TUR in order to ensure
5577 			 *     that the device really hasn't changed???
5578 			 */
5579 			if ((changed != 0)
5580 			 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
5581 				xpt_async(AC_LOST_DEVICE, path, NULL);
5582 		}
5583 		if (serial_buf != NULL)
5584 			kfree(serial_buf, M_TEMP);
5585 
5586 		if (changed != 0) {
5587 			/*
5588 			 * Now that we have all the necessary
5589 			 * information to safely perform transfer
5590 			 * negotiations... Controllers don't perform
5591 			 * any negotiation or tagged queuing until
5592 			 * after the first XPT_SET_TRAN_SETTINGS ccb is
5593 			 * received.  So, on a new device, just retreive
5594 			 * the user settings, and set them as the current
5595 			 * settings to set the device up.
5596 			 */
5597 			proberequestdefaultnegotiation(periph);
5598 			xpt_release_ccb(done_ccb);
5599 
5600 			/*
5601 			 * Perform a TUR to allow the controller to
5602 			 * perform any necessary transfer negotiation.
5603 			 */
5604 			softc->action = PROBE_TUR_FOR_NEGOTIATION;
5605 			xpt_schedule(periph, priority);
5606 			return;
5607 		}
5608 		xpt_release_ccb(done_ccb);
5609 		break;
5610 	}
5611 	case PROBE_TUR_FOR_NEGOTIATION:
5612 		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5613 			/* Don't wedge the queue */
5614 			xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
5615 					 /*run_queue*/TRUE);
5616 		}
5617 
5618 		path->device->flags &= ~CAM_DEV_UNCONFIGURED;
5619 		xpt_reference_device(path->device);
5620 
5621 		if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
5622 			/* Inform the XPT that a new device has been found */
5623 			done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5624 			xpt_action(done_ccb);
5625 
5626 			xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb);
5627 		}
5628 		xpt_release_ccb(done_ccb);
5629 		break;
5630 	}
5631 	done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5632 	TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
5633 	done_ccb->ccb_h.status = CAM_REQ_CMP;
5634 	xpt_done(done_ccb);
5635 	if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
5636 		cam_periph_invalidate(periph);
5637 		cam_periph_release(periph);
5638 	} else {
5639 		probeschedule(periph);
5640 	}
5641 }
5642 
5643 static void
5644 probecleanup(struct cam_periph *periph)
5645 {
5646 	kfree(periph->softc, M_TEMP);
5647 }
5648 
5649 static void
5650 xpt_find_quirk(struct cam_ed *device)
5651 {
5652 	caddr_t	match;
5653 
5654 	match = cam_quirkmatch((caddr_t)&device->inq_data,
5655 			       (caddr_t)xpt_quirk_table,
5656 			       sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
5657 			       sizeof(*xpt_quirk_table), scsi_inquiry_match);
5658 
5659 	if (match == NULL)
5660 		panic("xpt_find_quirk: device didn't match wildcard entry!!");
5661 
5662 	device->quirk = (struct xpt_quirk_entry *)match;
5663 }
5664 
5665 static void
5666 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
5667 			  int async_update)
5668 {
5669 	struct	cam_sim *sim;
5670 	int	qfrozen;
5671 
5672 	sim = cts->ccb_h.path->bus->sim;
5673 	if (async_update == FALSE) {
5674 		struct	scsi_inquiry_data *inq_data;
5675 		struct	ccb_pathinq cpi;
5676 		struct	ccb_trans_settings cur_cts;
5677 
5678 		if (device == NULL) {
5679 			cts->ccb_h.status = CAM_PATH_INVALID;
5680 			xpt_done((union ccb *)cts);
5681 			return;
5682 		}
5683 
5684 		/*
5685 		 * Perform sanity checking against what the
5686 		 * controller and device can do.
5687 		 */
5688 		xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
5689 		cpi.ccb_h.func_code = XPT_PATH_INQ;
5690 		xpt_action((union ccb *)&cpi);
5691 		xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
5692 		cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5693 		cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS;
5694 		xpt_action((union ccb *)&cur_cts);
5695 		inq_data = &device->inq_data;
5696 
5697 		/* Fill in any gaps in what the user gave us */
5698 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
5699 			cts->sync_period = cur_cts.sync_period;
5700 		if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
5701 			cts->sync_offset = cur_cts.sync_offset;
5702 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0)
5703 			cts->bus_width = cur_cts.bus_width;
5704 		if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) {
5705 			cts->flags &= ~CCB_TRANS_DISC_ENB;
5706 			cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB;
5707 		}
5708 		if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) {
5709 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5710 			cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB;
5711 		}
5712 
5713 		if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
5714 		  && (inq_data->flags & SID_Sync) == 0)
5715 		 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
5716 		 || (cts->sync_offset == 0)
5717 		 || (cts->sync_period == 0)) {
5718 			/* Force async */
5719 			cts->sync_period = 0;
5720 			cts->sync_offset = 0;
5721 		} else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) {
5722 
5723 			if ((inq_data->spi3data & SID_SPI_CLOCK_DT) == 0
5724 			 && cts->sync_period <= 0x9) {
5725 				/*
5726 				 * Don't allow DT transmission rates if the
5727 				 * device does not support it.
5728 				 */
5729 				cts->sync_period = 0xa;
5730 			}
5731 			if ((inq_data->spi3data & SID_SPI_IUS) == 0
5732 			 && cts->sync_period <= 0x8) {
5733 				/*
5734 				 * Don't allow PACE transmission rates
5735 				 * if the device does support packetized
5736 				 * transfers.
5737 				 */
5738 				cts->sync_period = 0x9;
5739 			}
5740 		}
5741 
5742 		switch (cts->bus_width) {
5743 		case MSG_EXT_WDTR_BUS_32_BIT:
5744 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5745 			  || (inq_data->flags & SID_WBus32) != 0)
5746 			 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
5747 				break;
5748 			/* Fall Through to 16-bit */
5749 		case MSG_EXT_WDTR_BUS_16_BIT:
5750 			if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
5751 			  || (inq_data->flags & SID_WBus16) != 0)
5752 			 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
5753 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5754 				break;
5755 			}
5756 			/* Fall Through to 8-bit */
5757 		default: /* New bus width?? */
5758 		case MSG_EXT_WDTR_BUS_8_BIT:
5759 			/* All targets can do this */
5760 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5761 			break;
5762 		}
5763 
5764 		if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) {
5765 			/*
5766 			 * Can't tag queue without disconnection.
5767 			 */
5768 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5769 			cts->valid |= CCB_TRANS_TQ_VALID;
5770 		}
5771 
5772 		if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
5773 		 || (inq_data->flags & SID_CmdQue) == 0
5774 		 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
5775 		 || (device->quirk->mintags == 0)) {
5776 			/*
5777 			 * Can't tag on hardware that doesn't support,
5778 			 * doesn't have it enabled, or has broken tag support.
5779 			 */
5780 			cts->flags &= ~CCB_TRANS_TAG_ENB;
5781 		}
5782 	}
5783 
5784 	qfrozen = FALSE;
5785 	if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
5786 		int device_tagenb;
5787 
5788 		/*
5789 		 * If we are transitioning from tags to no-tags or
5790 		 * vice-versa, we need to carefully freeze and restart
5791 		 * the queue so that we don't overlap tagged and non-tagged
5792 		 * commands.  We also temporarily stop tags if there is
5793 		 * a change in transfer negotiation settings to allow
5794 		 * "tag-less" negotiation.
5795 		 */
5796 		if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5797 		 || (device->inq_flags & SID_CmdQue) != 0)
5798 			device_tagenb = TRUE;
5799 		else
5800 			device_tagenb = FALSE;
5801 
5802 		if (((cts->flags & CCB_TRANS_TAG_ENB) != 0
5803 		  && device_tagenb == FALSE)
5804 		 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0
5805 		  && device_tagenb == TRUE)) {
5806 
5807 			if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
5808 				/*
5809 				 * Delay change to use tags until after a
5810 				 * few commands have gone to this device so
5811 				 * the controller has time to perform transfer
5812 				 * negotiations without tagged messages getting
5813 				 * in the way.
5814 				 */
5815 				device->tag_delay_count = CAM_TAG_DELAY_COUNT;
5816 				device->flags |= CAM_DEV_TAG_AFTER_COUNT;
5817 			} else {
5818 				xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
5819 				qfrozen = TRUE;
5820 		  		device->inq_flags &= ~SID_CmdQue;
5821 				xpt_dev_ccbq_resize(cts->ccb_h.path,
5822 						    sim->max_dev_openings);
5823 				device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5824 				device->tag_delay_count = 0;
5825 			}
5826 		}
5827 	}
5828 
5829 	if (async_update == FALSE) {
5830 		/*
5831 		 * If we are currently performing tagged transactions to
5832 		 * this device and want to change its negotiation parameters,
5833 		 * go non-tagged for a bit to give the controller a chance to
5834 		 * negotiate unhampered by tag messages.
5835 		 */
5836 		if ((device->inq_flags & SID_CmdQue) != 0
5837 		 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID|
5838 				   CCB_TRANS_SYNC_OFFSET_VALID|
5839 				   CCB_TRANS_BUS_WIDTH_VALID)) != 0)
5840 			xpt_toggle_tags(cts->ccb_h.path);
5841 
5842 		(*(sim->sim_action))(sim, (union ccb *)cts);
5843 	}
5844 
5845 	if (qfrozen) {
5846 		struct ccb_relsim crs;
5847 
5848 		xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
5849 			      /*priority*/1);
5850 		crs.ccb_h.func_code = XPT_REL_SIMQ;
5851 		crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5852 		crs.openings
5853 		    = crs.release_timeout
5854 		    = crs.qfrozen_cnt
5855 		    = 0;
5856 		xpt_action((union ccb *)&crs);
5857 	}
5858 }
5859 
5860 static void
5861 xpt_toggle_tags(struct cam_path *path)
5862 {
5863 	struct cam_ed *dev;
5864 
5865 	/*
5866 	 * Give controllers a chance to renegotiate
5867 	 * before starting tag operations.  We
5868 	 * "toggle" tagged queuing off then on
5869 	 * which causes the tag enable command delay
5870 	 * counter to come into effect.
5871 	 */
5872 	dev = path->device;
5873 	if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5874 	 || ((dev->inq_flags & SID_CmdQue) != 0
5875  	  && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
5876 		struct ccb_trans_settings cts;
5877 
5878 		xpt_setup_ccb(&cts.ccb_h, path, 1);
5879 		cts.flags = 0;
5880 		cts.valid = CCB_TRANS_TQ_VALID;
5881 		xpt_set_transfer_settings(&cts, path->device,
5882 					  /*async_update*/TRUE);
5883 		cts.flags = CCB_TRANS_TAG_ENB;
5884 		xpt_set_transfer_settings(&cts, path->device,
5885 					  /*async_update*/TRUE);
5886 	}
5887 }
5888 
5889 static void
5890 xpt_start_tags(struct cam_path *path)
5891 {
5892 	struct ccb_relsim crs;
5893 	struct cam_ed *device;
5894 	struct cam_sim *sim;
5895 	int    newopenings;
5896 
5897 	device = path->device;
5898 	sim = path->bus->sim;
5899 	device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
5900 	xpt_freeze_devq(path, /*count*/1);
5901 	device->inq_flags |= SID_CmdQue;
5902 	newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings);
5903 	xpt_dev_ccbq_resize(path, newopenings);
5904 	xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
5905 	crs.ccb_h.func_code = XPT_REL_SIMQ;
5906 	crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
5907 	crs.openings
5908 	    = crs.release_timeout
5909 	    = crs.qfrozen_cnt
5910 	    = 0;
5911 	xpt_action((union ccb *)&crs);
5912 }
5913 
5914 static int busses_to_config;
5915 static int busses_to_reset;
5916 
5917 static int
5918 xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
5919 {
5920 	if (bus->path_id != CAM_XPT_PATH_ID) {
5921 		struct cam_path path;
5922 		struct ccb_pathinq cpi;
5923 		int can_negotiate;
5924 
5925 		busses_to_config++;
5926 		xpt_compile_path(&path, NULL, bus->path_id,
5927 				 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
5928 		xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
5929 		cpi.ccb_h.func_code = XPT_PATH_INQ;
5930 		xpt_action((union ccb *)&cpi);
5931 		can_negotiate = cpi.hba_inquiry;
5932 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
5933 		if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
5934 		 && can_negotiate)
5935 			busses_to_reset++;
5936 		xpt_release_path(&path);
5937 	}
5938 
5939 	return(1);
5940 }
5941 
5942 static int
5943 xptconfigfunc(struct cam_eb *bus, void *arg)
5944 {
5945 	struct	cam_path *path;
5946 	union	ccb *work_ccb;
5947 
5948 	if (bus->path_id != CAM_XPT_PATH_ID) {
5949 		cam_status status;
5950 		int can_negotiate;
5951 
5952 		work_ccb = xpt_alloc_ccb();
5953 		if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
5954 					      CAM_TARGET_WILDCARD,
5955 					      CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
5956 			kprintf("xptconfigfunc: xpt_create_path failed with "
5957 			       "status %#x for bus %d\n", status, bus->path_id);
5958 			kprintf("xptconfigfunc: halting bus configuration\n");
5959 			xpt_free_ccb(work_ccb);
5960 			busses_to_config--;
5961 			xpt_finishconfig(xpt_periph, NULL);
5962 			return(0);
5963 		}
5964 		xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5965 		work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5966 		xpt_action(work_ccb);
5967 		if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5968 			kprintf("xptconfigfunc: CPI failed on bus %d "
5969 			       "with status %d\n", bus->path_id,
5970 			       work_ccb->ccb_h.status);
5971 			xpt_finishconfig(xpt_periph, work_ccb);
5972 			return(1);
5973 		}
5974 
5975 		can_negotiate = work_ccb->cpi.hba_inquiry;
5976 		can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
5977 		if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
5978 		 && (can_negotiate != 0)) {
5979 			xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
5980 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5981 			work_ccb->ccb_h.cbfcnp = NULL;
5982 			CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
5983 				  ("Resetting Bus\n"));
5984 			xpt_action(work_ccb);
5985 			xpt_finishconfig(xpt_periph, work_ccb);
5986 		} else {
5987 			/* Act as though we performed a successful BUS RESET */
5988 			work_ccb->ccb_h.func_code = XPT_RESET_BUS;
5989 			xpt_finishconfig(xpt_periph, work_ccb);
5990 		}
5991 	}
5992 
5993 	return(1);
5994 }
5995 
5996 static void
5997 xpt_config(void *arg)
5998 {
5999 	/* Now that interrupts are enabled, go find our devices */
6000 
6001 #ifdef CAMDEBUG
6002 	/* Setup debugging flags and path */
6003 #ifdef CAM_DEBUG_FLAGS
6004 	cam_dflags = CAM_DEBUG_FLAGS;
6005 #else /* !CAM_DEBUG_FLAGS */
6006 	cam_dflags = CAM_DEBUG_NONE;
6007 #endif /* CAM_DEBUG_FLAGS */
6008 #ifdef CAM_DEBUG_BUS
6009 	if (cam_dflags != CAM_DEBUG_NONE) {
6010 		if (xpt_create_path(&cam_dpath, xpt_periph,
6011 				    CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6012 				    CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6013 			kprintf("xpt_config: xpt_create_path() failed for debug"
6014 			       " target %d:%d:%d, debugging disabled\n",
6015 			       CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6016 			cam_dflags = CAM_DEBUG_NONE;
6017 		}
6018 	} else
6019 		cam_dpath = NULL;
6020 #else /* !CAM_DEBUG_BUS */
6021 	cam_dpath = NULL;
6022 #endif /* CAM_DEBUG_BUS */
6023 #endif /* CAMDEBUG */
6024 
6025 	/*
6026 	 * Scan all installed busses.
6027 	 */
6028 	xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6029 
6030 	if (busses_to_config == 0) {
6031 		/* Call manually because we don't have any busses */
6032 		xpt_finishconfig(xpt_periph, NULL);
6033 	} else  {
6034 		if (busses_to_reset > 0 && SCSI_DELAY >= 2000) {
6035 			kprintf("Waiting %d seconds for SCSI "
6036 			       "devices to settle\n", SCSI_DELAY/1000);
6037 		}
6038 		xpt_for_all_busses(xptconfigfunc, NULL);
6039 	}
6040 }
6041 
6042 /*
6043  * If the given device only has one peripheral attached to it, and if that
6044  * peripheral is the passthrough driver, announce it.  This insures that the
6045  * user sees some sort of announcement for every peripheral in their system.
6046  */
6047 static int
6048 xptpassannouncefunc(struct cam_ed *device, void *arg)
6049 {
6050 	struct cam_periph *periph;
6051 	int i;
6052 
6053 	for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
6054 	     periph = SLIST_NEXT(periph, periph_links), i++);
6055 
6056 	periph = SLIST_FIRST(&device->periphs);
6057 	if ((i == 1)
6058 	 && (strncmp(periph->periph_name, "pass", 4) == 0))
6059 		xpt_announce_periph(periph, NULL);
6060 
6061 	return(1);
6062 }
6063 
6064 static void
6065 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
6066 {
6067 	struct	periph_driver **p_drv;
6068 
6069 	if (done_ccb != NULL) {
6070 		CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
6071 			  ("xpt_finishconfig\n"));
6072 		switch(done_ccb->ccb_h.func_code) {
6073 		case XPT_RESET_BUS:
6074 			if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
6075 				done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
6076 				done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
6077 				xpt_action(done_ccb);
6078 				return;
6079 			}
6080 			/* FALLTHROUGH */
6081 		case XPT_SCAN_BUS:
6082 		default:
6083 			xpt_free_path(done_ccb->ccb_h.path);
6084 			busses_to_config--;
6085 			break;
6086 		}
6087 	}
6088 
6089 	if (busses_to_config == 0) {
6090 		/* Register all the peripheral drivers */
6091 		/* XXX This will have to change when we have loadable modules */
6092 		SET_FOREACH(p_drv, periphdriver_set) {
6093 			(*p_drv)->init();
6094 		}
6095 
6096 		/*
6097 		 * Check for devices with no "standard" peripheral driver
6098 		 * attached.  For any devices like that, announce the
6099 		 * passthrough driver so the user will see something.
6100 		 */
6101 		xpt_for_all_devices(xptpassannouncefunc, NULL);
6102 
6103 		/* Release our hook so that the boot can continue. */
6104 		config_intrhook_disestablish(xpt_config_hook);
6105 		kfree(xpt_config_hook, M_TEMP);
6106 		xpt_config_hook = NULL;
6107 	}
6108 	if (done_ccb != NULL)
6109 		xpt_free_ccb(done_ccb);
6110 }
6111 
6112 static void
6113 xptaction(struct cam_sim *sim, union ccb *work_ccb)
6114 {
6115 	CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
6116 
6117 	switch (work_ccb->ccb_h.func_code) {
6118 	/* Common cases first */
6119 	case XPT_PATH_INQ:		/* Path routing inquiry */
6120 	{
6121 		struct ccb_pathinq *cpi;
6122 
6123 		cpi = &work_ccb->cpi;
6124 		cpi->version_num = 1; /* XXX??? */
6125 		cpi->hba_inquiry = 0;
6126 		cpi->target_sprt = 0;
6127 		cpi->hba_misc = 0;
6128 		cpi->hba_eng_cnt = 0;
6129 		cpi->max_target = 0;
6130 		cpi->max_lun = 0;
6131 		cpi->initiator_id = 0;
6132 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
6133 		strncpy(cpi->hba_vid, "", HBA_IDLEN);
6134 		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
6135 		cpi->unit_number = sim->unit_number;
6136 		cpi->bus_id = sim->bus_id;
6137 		cpi->base_transfer_speed = 0;
6138 		cpi->ccb_h.status = CAM_REQ_CMP;
6139 		xpt_done(work_ccb);
6140 		break;
6141 	}
6142 	default:
6143 		work_ccb->ccb_h.status = CAM_REQ_INVALID;
6144 		xpt_done(work_ccb);
6145 		break;
6146 	}
6147 }
6148 
6149 /*
6150  * The xpt as a "controller" has no interrupt sources, so polling
6151  * is a no-op.
6152  */
6153 static void
6154 xptpoll(struct cam_sim *sim)
6155 {
6156 }
6157 
6158 /*
6159  * Should only be called by the machine interrupt dispatch routines,
6160  * so put these prototypes here instead of in the header.
6161  */
6162 
6163 static void
6164 swi_camnet(void *arg, void *frame)
6165 {
6166 	camisr(&cam_netq);
6167 }
6168 
6169 static void
6170 swi_cambio(void *arg, void *frame)
6171 {
6172 	camisr(&cam_bioq);
6173 }
6174 
6175 static void
6176 camisr(cam_isrq_t *queue)
6177 {
6178 	struct	ccb_hdr *ccb_h;
6179 
6180 	crit_enter();
6181 	while ((ccb_h = TAILQ_FIRST(queue)) != NULL) {
6182 		int	runq;
6183 
6184 		TAILQ_REMOVE(queue, ccb_h, sim_links.tqe);
6185 		ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
6186 		splz();
6187 
6188 		CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
6189 			  ("camisr\n"));
6190 
6191 		runq = FALSE;
6192 
6193 		if (ccb_h->flags & CAM_HIGH_POWER) {
6194 			struct highpowerlist	*hphead;
6195 			struct cam_ed		*device;
6196 			union ccb		*send_ccb;
6197 
6198 			hphead = &highpowerq;
6199 
6200 			send_ccb = (union ccb *)STAILQ_FIRST(hphead);
6201 
6202 			/*
6203 			 * Increment the count since this command is done.
6204 			 */
6205 			num_highpower++;
6206 
6207 			/*
6208 			 * Any high powered commands queued up?
6209 			 */
6210 			if (send_ccb != NULL) {
6211 				device = send_ccb->ccb_h.path->device;
6212 
6213 				STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
6214 
6215 				xpt_release_devq(send_ccb->ccb_h.path,
6216 						 /*count*/1, /*runqueue*/TRUE);
6217 			}
6218 		}
6219 		if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
6220 			struct cam_ed *dev;
6221 
6222 			dev = ccb_h->path->device;
6223 
6224 			cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
6225 
6226 			if (ccb_h->path->bus->sim->devq) {
6227 				ccb_h->path->bus->sim->devq->send_active--;
6228 				ccb_h->path->bus->sim->devq->send_openings++;
6229 			}
6230 
6231 			if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
6232 			 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
6233 			  && (dev->ccbq.dev_active == 0))) {
6234 
6235 				xpt_release_devq(ccb_h->path, /*count*/1,
6236 						 /*run_queue*/TRUE);
6237 			}
6238 
6239 			if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6240 			 && (--dev->tag_delay_count == 0))
6241 				xpt_start_tags(ccb_h->path);
6242 
6243 			if ((dev->ccbq.queue.entries > 0)
6244 			 && (dev->qfrozen_cnt == 0)
6245 			 && (device_is_send_queued(dev) == 0)) {
6246 				runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
6247 							      dev);
6248 			}
6249 		}
6250 
6251 		if (ccb_h->status & CAM_RELEASE_SIMQ) {
6252 			xpt_release_simq(ccb_h->path->bus->sim,
6253 					 /*run_queue*/TRUE);
6254 			ccb_h->status &= ~CAM_RELEASE_SIMQ;
6255 			runq = FALSE;
6256 		}
6257 
6258 		if ((ccb_h->flags & CAM_DEV_QFRZDIS)
6259 		 && (ccb_h->status & CAM_DEV_QFRZN)) {
6260 			xpt_release_devq(ccb_h->path, /*count*/1,
6261 					 /*run_queue*/TRUE);
6262 			ccb_h->status &= ~CAM_DEV_QFRZN;
6263 		} else if (runq) {
6264 			xpt_run_dev_sendq(ccb_h->path->bus);
6265 		}
6266 
6267 		/* Call the peripheral driver's callback */
6268 		(*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
6269 	}
6270 	crit_exit();
6271 }
6272