xref: /dragonfly/sys/dev/raid/asr/asr.c (revision 67640b13)
1 /*-
2  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3  * Copyright (c) 2000-2001 Adaptec Corporation
4  * All rights reserved.
5  *
6  * TERMS AND CONDITIONS OF USE
7  *
8  * Redistribution and use in source form, with or without modification, are
9  * permitted provided that redistributions of source code must retain the
10  * above copyright notice, this list of conditions and the following disclaimer.
11  *
12  * This software is provided `as is' by Adaptec and any express or implied
13  * warranties, including, but not limited to, the implied warranties of
14  * merchantability and fitness for a particular purpose, are disclaimed. In no
15  * event shall Adaptec be liable for any direct, indirect, incidental, special,
16  * exemplary or consequential damages (including, but not limited to,
17  * procurement of substitute goods or services; loss of use, data, or profits;
18  * or business interruptions) however caused and on any theory of liability,
19  * whether in contract, strict liability, or tort (including negligence or
20  * otherwise) arising in any way out of the use of this driver software, even
21  * if advised of the possibility of such damage.
22  *
23  * SCSI I2O host adapter driver
24  *
25  *	V1.10 2004/05/05 scottl@freebsd.org
26  *		- Massive cleanup of the driver to remove dead code and
27  *		  non-conformant style.
28  *		- Removed most i386-specific code to make it more portable.
29  *		- Converted to the bus_space API.
30  *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31  *		- The 2000S and 2005S do not initialize on some machines,
32  *		  increased timeout to 255ms from 50ms for the StatusGet
33  *		  command.
34  *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35  *		- I knew this one was too good to be true. The error return
36  *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37  *		  to the bit masked status.
38  *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39  *		- The 2005S that was supported is affectionately called the
40  *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41  *		  16MB low-cost configuration, Firmware was forced to go
42  *		  to a Split BAR Firmware. This requires a separate IOP and
43  *		  Messaging base address.
44  *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45  *		- Handle support for 2005S Zero Channel RAID solution.
46  *		- System locked up if the Adapter locked up. Do not try
47  *		  to send other commands if the resetIOP command fails. The
48  *		  fail outstanding command discovery loop was flawed as the
49  *		  removal of the command from the list prevented discovering
50  *		  all the commands.
51  *		- Comment changes to clarify driver.
52  *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53  *		- We do not use the AC_FOUND_DEV event because of I2O.
54  *		  Removed asr_async.
55  *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56  *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57  *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58  *		  mode as this is confused with competitor adapters in run
59  *		  mode.
60  *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61  *		  to prevent operating system panic.
62  *		- moved default major number to 154 from 97.
63  *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64  *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65  *		  series that is visible, it's more of an internal code name.
66  *		  remove any visible references within reason for now.
67  *		- bus_ptr->LUN was not correctly zeroed when initially
68  *		  allocated causing a possible panic of the operating system
69  *		  during boot.
70  *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71  *		- Code always fails for ASR_getTid affecting performance.
72  *		- initiated a set of changes that resulted from a formal
73  *		  code inspection by Mark_Salyzyn@adaptec.com,
74  *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75  *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76  *		  Their findings were focussed on the LCT & TID handler, and
77  *		  all resulting changes were to improve code readability,
78  *		  consistency or have a positive effect on performance.
79  *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80  *		- Passthrough returned an incorrect error.
81  *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82  *		  on command completion.
83  *		- generate control device nodes using make_dev and delete_dev.
84  *		- Performance affected by TID caching reallocing.
85  *		- Made suggested changes by Justin_Gibbs@adaptec.com
86  *			- use splcam instead of splbio.
87  *			- use cam_imask instead of bio_imask.
88  *			- use u_int8_t instead of u_char.
89  *			- use u_int16_t instead of u_short.
90  *			- use u_int32_t instead of u_long where appropriate.
91  *			- use 64 bit context handler instead of 32 bit.
92  *			- create_ccb should only allocate the worst case
93  *			  requirements for the driver since CAM may evolve
94  *			  making union ccb much larger than needed here.
95  *			  renamed create_ccb to asr_alloc_ccb.
96  *			- go nutz justifying all debug prints as macros
97  *			  defined at the top and remove unsightly ifdefs.
98  *			- INLINE STATIC viewed as confusing. Historically
99  *			  utilized to affect code performance and debug
100  *			  issues in OS, Compiler or OEM specific situations.
101  *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102  *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103  *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104  *			changed variable name xs to ccb
105  *			changed struct scsi_link to struct cam_path
106  *			changed struct scsibus_data to struct cam_sim
107  *			stopped using fordriver for holding on to the TID
108  *			use proprietary packet creation instead of scsi_inquire
109  *			CAM layer sends synchronize commands.
110  *
111  * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
112  */
113 
114 #include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
115 #include <sys/kernel.h>
116 #include <sys/module.h>
117 #include <sys/systm.h>
118 #include <sys/malloc.h>
119 #include <sys/conf.h>
120 #include <sys/priv.h>
121 #include <sys/proc.h>
122 #include <sys/bus.h>
123 #include <sys/rman.h>
124 #include <sys/stat.h>
125 #include <sys/device.h>
126 #include <sys/thread2.h>
127 #include <sys/bus_dma.h>
128 
129 #include <bus/cam/cam.h>
130 #include <bus/cam/cam_ccb.h>
131 #include <bus/cam/cam_sim.h>
132 #include <bus/cam/cam_xpt_sim.h>
133 
134 #include <bus/cam/scsi/scsi_all.h>
135 #include <bus/cam/scsi/scsi_message.h>
136 
137 #include <vm/vm.h>
138 #include <vm/pmap.h>
139 
140 #include <machine/vmparam.h>
141 
142 #include <bus/pci/pcivar.h>
143 #include <bus/pci/pcireg.h>
144 
145 #define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
146 #define	KVTOPHYS(x) vtophys(x)
147 #include	<dev/raid/asr/dptalign.h>
148 #include	<dev/raid/asr/i2oexec.h>
149 #include	<dev/raid/asr/i2obscsi.h>
150 #include	<dev/raid/asr/i2odpt.h>
151 #include	<dev/raid/asr/i2oadptr.h>
152 
153 #include	<dev/raid/asr/sys_info.h>
154 
155 #define	ASR_VERSION	1
156 #define	ASR_REVISION	'1'
157 #define	ASR_SUBREVISION '0'
158 #define	ASR_MONTH	5
159 #define	ASR_DAY		5
160 #define	ASR_YEAR	(2004 - 1980)
161 
162 /*
163  *	Debug macros to reduce the unsightly ifdefs
164  */
165 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
166 static __inline void
167 debug_asr_message(PI2O_MESSAGE_FRAME message)
168 {
169 	u_int32_t * pointer = (u_int32_t *)message;
170 	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
171 	u_int32_t   counter = 0;
172 
173 	while (length--) {
174 		kprintf("%08lx%c", (u_long)*(pointer++),
175 		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
176 	}
177 }
178 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
179 
180 #ifdef DEBUG_ASR
181   /* Breaks on none STDC based compilers :-( */
182 #define debug_asr_printf(fmt,args...)	kprintf(fmt, ##args)
183 #define debug_asr_dump_message(message)	debug_asr_message(message)
184 #define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
185 #else /* DEBUG_ASR */
186 #define debug_asr_printf(fmt,args...)
187 #define debug_asr_dump_message(message)
188 #define debug_asr_print_path(ccb)
189 #endif /* DEBUG_ASR */
190 
191 /*
192  *	If DEBUG_ASR_CMD is defined:
193  *		0 - Display incoming SCSI commands
194  *		1 - add in a quick character before queueing.
195  *		2 - add in outgoing message frames.
196  */
197 #if (defined(DEBUG_ASR_CMD))
198 #define debug_asr_cmd_printf(fmt,args...)     kprintf(fmt,##args)
199 static __inline void
200 debug_asr_dump_ccb(union ccb *ccb)
201 {
202 	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
203 	int		len = ccb->csio.cdb_len;
204 
205 	while (len) {
206 		debug_asr_cmd_printf (" %02x", *(cp++));
207 		--len;
208 	}
209 }
210 #if (DEBUG_ASR_CMD > 0)
211 #define debug_asr_cmd1_printf		       debug_asr_cmd_printf
212 #else
213 #define debug_asr_cmd1_printf(fmt,args...)
214 #endif
215 #if (DEBUG_ASR_CMD > 1)
216 #define debug_asr_cmd2_printf			debug_asr_cmd_printf
217 #define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
218 #else
219 #define debug_asr_cmd2_printf(fmt,args...)
220 #define debug_asr_cmd2_dump_message(message)
221 #endif
222 #else /* DEBUG_ASR_CMD */
223 #define debug_asr_cmd_printf(fmt,args...)
224 #define debug_asr_dump_ccb(ccb)
225 #define debug_asr_cmd1_printf(fmt,args...)
226 #define debug_asr_cmd2_printf(fmt,args...)
227 #define debug_asr_cmd2_dump_message(message)
228 #endif /* DEBUG_ASR_CMD */
229 
230 #if (defined(DEBUG_ASR_USR_CMD))
231 #define debug_usr_cmd_printf(fmt,args...)   kprintf(fmt,##args)
232 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
233 #else /* DEBUG_ASR_USR_CMD */
234 #define debug_usr_cmd_printf(fmt,args...)
235 #define debug_usr_cmd_dump_message(message)
236 #endif /* DEBUG_ASR_USR_CMD */
237 
238 #ifdef ASR_IOCTL_COMPAT
239 #define	dsDescription_size 46	/* Snug as a bug in a rug */
240 #endif /* ASR_IOCTL_COMPAT */
241 
242 #include "dev/raid/asr/dptsig.h"
243 
244 static dpt_sig_S ASR_sig = {
245 	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
246 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
247 	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
248 	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
249 	ASR_MONTH, ASR_DAY, ASR_YEAR,
250 /*	 01234567890123456789012345678901234567890123456789	< 50 chars */
251 	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
252 	/*		 ^^^^^ asr_attach alters these to match OS */
253 };
254 
255 /* Configuration Definitions */
256 
257 #define	SG_SIZE		 58	/* Scatter Gather list Size		 */
258 #define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
259 #define	MAX_LUN		 255	/* Maximum LUN Supported		 */
260 #define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
261 #define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
262 #define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
263 #define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
264 #define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
265 				/* Also serves as the minimum map for	 */
266 				/* the 2005S zero channel RAID product	 */
267 
268 /* I2O register set */
269 #define	I2O_REG_STATUS		0x30
270 #define	I2O_REG_MASK		0x34
271 #define	I2O_REG_TOFIFO		0x40
272 #define	I2O_REG_FROMFIFO	0x44
273 
274 #define	Mask_InterruptsDisabled	0x08
275 
276 /*
277  * A MIX of performance and space considerations for TID lookups
278  */
279 typedef u_int16_t tid_t;
280 
281 typedef struct {
282 	u_int32_t size;		/* up to MAX_LUN    */
283 	tid_t	  TID[1];
284 } lun2tid_t;
285 
286 typedef struct {
287 	u_int32_t   size;	/* up to MAX_TARGET */
288 	lun2tid_t * LUN[1];
289 } target2lun_t;
290 
291 /*
292  *	To ensure that we only allocate and use the worst case ccb here, lets
293  *	make our own local ccb union. If asr_alloc_ccb is utilized for another
294  *	ccb type, ensure that you add the additional structures into our local
295  *	ccb union. To ensure strict type checking, we will utilize the local
296  *	ccb definition wherever possible.
297  */
298 union asr_ccb {
299 	struct ccb_hdr	    ccb_h;  /* For convenience */
300 	struct ccb_scsiio   csio;
301 	struct ccb_setasync csa;
302 };
303 
304 struct Asr_status_mem {
305 	I2O_EXEC_STATUS_GET_REPLY	status;
306 	U32				rstatus;
307 };
308 
309 /**************************************************************************
310 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
311 **  Is Configured Into The System.  The Structure Supplies Configuration **
312 **  Information, Status Info, Queue Info And An Active CCB List Pointer. **
313 ***************************************************************************/
314 
315 typedef struct Asr_softc {
316 	device_t		ha_dev;
317 	u_int16_t		ha_irq;
318 	u_long			ha_Base;       /* base port for each board */
319 	bus_size_t		ha_blinkLED;
320 	bus_space_handle_t	ha_i2o_bhandle;
321 	bus_space_tag_t		ha_i2o_btag;
322 	bus_space_handle_t	ha_frame_bhandle;
323 	bus_space_tag_t		ha_frame_btag;
324 	I2O_IOP_ENTRY		ha_SystemTable;
325 	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
326 
327 	bus_dma_tag_t		ha_parent_dmat;
328 	bus_dma_tag_t		ha_statusmem_dmat;
329 	bus_dmamap_t		ha_statusmem_dmamap;
330 	struct Asr_status_mem * ha_statusmem;
331 	u_int32_t		ha_rstatus_phys;
332 	u_int32_t		ha_status_phys;
333 	struct cam_path	      * ha_path[MAX_CHANNEL+1];
334 	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
335 	struct resource	      * ha_mem_res;
336 	struct resource	      * ha_mes_res;
337 	struct resource	      * ha_irq_res;
338 	void		      * ha_intr;
339 	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
340 #define le_type	  IdentityTag[0]
341 #define I2O_BSA	    0x20
342 #define I2O_FCA	    0x40
343 #define I2O_SCSI    0x00
344 #define I2O_PORT    0x80
345 #define I2O_UNKNOWN 0x7F
346 #define le_bus	  IdentityTag[1]
347 #define le_target IdentityTag[2]
348 #define le_lun	  IdentityTag[3]
349 	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
350 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
351 	u_long			ha_Msgs_Phys;
352 
353 	u_int8_t		ha_in_reset;
354 #define HA_OPERATIONAL	    0
355 #define HA_IN_RESET	    1
356 #define HA_OFF_LINE	    2
357 #define HA_OFF_LINE_RECOVERY 3
358 	/* Configuration information */
359 	/* The target id maximums we take */
360 	u_int8_t		ha_MaxBus;     /* Maximum bus */
361 	u_int8_t		ha_MaxId;      /* Maximum target ID */
362 	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
363 	u_int8_t		ha_SgSize;     /* Max SG elements */
364 	u_int8_t		ha_pciBusNum;
365 	u_int8_t		ha_pciDeviceNum;
366 	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
367 	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
368 	u_int16_t		ha_Msgs_Count;
369 
370 	/* Links into other parents and HBAs */
371 	struct Asr_softc      * ha_next;       /* HBA list */
372 	struct cdev *ha_devt;
373 } Asr_softc_t;
374 
375 static Asr_softc_t *Asr_softc_list;
376 
377 /*
378  *	Prototypes of the routines we have in this object.
379  */
380 
381 /* I2O HDM interface */
382 static int	asr_probe(device_t dev);
383 static int	asr_attach(device_t dev);
384 
385 static d_ioctl_t asr_ioctl;
386 static d_open_t asr_open;
387 static d_close_t asr_close;
388 static int	asr_intr(Asr_softc_t *sc);
389 static void	asr_timeout(void *arg);
390 static int	ASR_init(Asr_softc_t *sc);
391 static int	ASR_acquireLct(Asr_softc_t *sc);
392 static int	ASR_acquireHrt(Asr_softc_t *sc);
393 static void	asr_action(struct cam_sim *sim, union ccb *ccb);
394 static void	asr_poll(struct cam_sim *sim);
395 static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
396 
397 /*
398  *	Here is the auto-probe structure used to nest our tests appropriately
399  *	during the startup phase of the operating system.
400  */
401 static device_method_t asr_methods[] = {
402 	DEVMETHOD(device_probe,	 asr_probe),
403 	DEVMETHOD(device_attach, asr_attach),
404 	DEVMETHOD_END
405 };
406 
407 static driver_t asr_driver = {
408 	"asr",
409 	asr_methods,
410 	sizeof(Asr_softc_t)
411 };
412 
413 static devclass_t asr_devclass;
414 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, NULL, NULL);
415 MODULE_VERSION(asr, 1);
416 MODULE_DEPEND(asr, pci, 1, 1, 1);
417 MODULE_DEPEND(asr, cam, 1, 1, 1);
418 
419 /*
420  * devsw for asr hba driver
421  *
422  * only ioctl is used. the sd driver provides all other access.
423  */
424 static struct dev_ops asr_ops = {
425 	{ "asr", 0, 0 },
426 	.d_open =	asr_open,
427 	.d_close =	asr_close,
428 	.d_ioctl =	asr_ioctl,
429 };
430 
431 /* I2O support routines */
432 
433 static __inline u_int32_t
434 asr_get_FromFIFO(Asr_softc_t *sc)
435 {
436 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
437 				 I2O_REG_FROMFIFO));
438 }
439 
440 static __inline u_int32_t
441 asr_get_ToFIFO(Asr_softc_t *sc)
442 {
443 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
444 				 I2O_REG_TOFIFO));
445 }
446 
447 static __inline u_int32_t
448 asr_get_intr(Asr_softc_t *sc)
449 {
450 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
451 				 I2O_REG_MASK));
452 }
453 
454 static __inline u_int32_t
455 asr_get_status(Asr_softc_t *sc)
456 {
457 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
458 				 I2O_REG_STATUS));
459 }
460 
461 static __inline void
462 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
463 {
464 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
465 			  val);
466 }
467 
468 static __inline void
469 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
470 {
471 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
472 			  val);
473 }
474 
475 static __inline void
476 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
477 {
478 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
479 			  val);
480 }
481 
482 static __inline void
483 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
484 {
485 	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
486 				 offset, (u_int32_t *)frame, len);
487 }
488 
489 /*
490  *	Fill message with default.
491  */
492 static PI2O_MESSAGE_FRAME
493 ASR_fillMessage(void *Message, u_int16_t size)
494 {
495 	PI2O_MESSAGE_FRAME Message_Ptr;
496 
497 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
498 	bzero(Message_Ptr, size);
499 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
500 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
501 	  (size + sizeof(U32) - 1) >> 2);
502 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
503 	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
504 	return (Message_Ptr);
505 } /* ASR_fillMessage */
506 
507 #define	EMPTY_QUEUE (0xffffffff)
508 
509 static __inline U32
510 ASR_getMessage(Asr_softc_t *sc)
511 {
512 	U32	MessageOffset;
513 
514 	MessageOffset = asr_get_ToFIFO(sc);
515 	if (MessageOffset == EMPTY_QUEUE)
516 		MessageOffset = asr_get_ToFIFO(sc);
517 
518 	return (MessageOffset);
519 } /* ASR_getMessage */
520 
521 /* Issue a polled command */
522 static U32
523 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
524 {
525 	U32	Mask = 0xffffffff;
526 	U32	MessageOffset;
527 	u_int	Delay = 1500;
528 
529 	/*
530 	 * ASR_initiateCp is only used for synchronous commands and will
531 	 * be made more resiliant to adapter delays since commands like
532 	 * resetIOP can cause the adapter to be deaf for a little time.
533 	 */
534 	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
535 	 && (--Delay != 0)) {
536 		DELAY (10000);
537 	}
538 	if (MessageOffset != EMPTY_QUEUE) {
539 		asr_set_frame(sc, Message, MessageOffset,
540 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
541 		/*
542 		 *	Disable the Interrupts
543 		 */
544 		Mask = asr_get_intr(sc);
545 		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
546 		asr_set_ToFIFO(sc, MessageOffset);
547 	}
548 	return (Mask);
549 } /* ASR_initiateCp */
550 
551 /*
552  *	Reset the adapter.
553  */
554 static U32
555 ASR_resetIOP(Asr_softc_t *sc)
556 {
557 	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
558 	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
559 	U32			       * Reply_Ptr;
560 	U32				 Old;
561 
562 	/*
563 	 *  Build up our copy of the Message.
564 	 */
565 	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
566 	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
567 	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
568 	/*
569 	 *  Reset the Reply Status
570 	 */
571 	Reply_Ptr = &sc->ha_statusmem->rstatus;
572 	*Reply_Ptr = 0;
573 	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
574 	    sc->ha_rstatus_phys);
575 	/*
576 	 *	Send the Message out
577 	 */
578 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
579 	     0xffffffff) {
580 		/*
581 		 * Wait for a response (Poll), timeouts are dangerous if
582 		 * the card is truly responsive. We assume response in 2s.
583 		 */
584 		u_int8_t Delay = 200;
585 
586 		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
587 			DELAY (10000);
588 		}
589 		/*
590 		 *	Re-enable the interrupts.
591 		 */
592 		asr_set_intr(sc, Old);
593 		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
594 		return(*Reply_Ptr);
595 	}
596 	KASSERT(Old != 0xffffffff, ("Old == -1"));
597 	return (0);
598 } /* ASR_resetIOP */
599 
600 /*
601  *	Get the curent state of the adapter
602  */
603 static PI2O_EXEC_STATUS_GET_REPLY
604 ASR_getStatus(Asr_softc_t *sc)
605 {
606 	I2O_EXEC_STATUS_GET_MESSAGE	Message;
607 	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
608 	PI2O_EXEC_STATUS_GET_REPLY	buffer;
609 	U32				Old;
610 
611 	/*
612 	 *  Build up our copy of the Message.
613 	 */
614 	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
615 	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
616 	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
617 	    I2O_EXEC_STATUS_GET);
618 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
619 	    sc->ha_status_phys);
620 	/* This one is a Byte Count */
621 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
622 	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
623 	/*
624 	 *  Reset the Reply Status
625 	 */
626 	buffer = &sc->ha_statusmem->status;
627 	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
628 	/*
629 	 *	Send the Message out
630 	 */
631 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
632 	    0xffffffff) {
633 		/*
634 		 *	Wait for a response (Poll), timeouts are dangerous if
635 		 * the card is truly responsive. We assume response in 50ms.
636 		 */
637 		u_int8_t Delay = 255;
638 
639 		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
640 			if (--Delay == 0) {
641 				buffer = NULL;
642 				break;
643 			}
644 			DELAY (1000);
645 		}
646 		/*
647 		 *	Re-enable the interrupts.
648 		 */
649 		asr_set_intr(sc, Old);
650 		return (buffer);
651 	}
652 	return (NULL);
653 } /* ASR_getStatus */
654 
655 /*
656  *	Check if the device is a SCSI I2O HBA, and add it to the list.
657  */
658 
659 /*
660  * Probe for ASR controller.  If we find it, we will use it.
661  * virtual adapters.
662  */
663 static int
664 asr_probe(device_t dev)
665 {
666 	u_int32_t id;
667 
668 	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
669 	if ((id == 0xA5011044) || (id == 0xA5111044)) {
670 		device_set_desc(dev, "Adaptec Caching SCSI RAID");
671 		return (BUS_PROBE_DEFAULT);
672 	}
673 	return (ENXIO);
674 } /* asr_probe */
675 
676 static __inline union asr_ccb *
677 asr_alloc_ccb(Asr_softc_t *sc)
678 {
679 	union asr_ccb *new_ccb;
680 
681 	if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb),
682 	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
683 		new_ccb->ccb_h.pinfo.priority = 1;
684 		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
685 		new_ccb->ccb_h.spriv_ptr0 = sc;
686 	}
687 	return (new_ccb);
688 } /* asr_alloc_ccb */
689 
690 static __inline void
691 asr_free_ccb(union asr_ccb *free_ccb)
692 {
693 	kfree(free_ccb, M_DEVBUF);
694 } /* asr_free_ccb */
695 
696 /*
697  *	Print inquiry data `carefully'
698  */
699 static void
700 ASR_prstring(u_int8_t *s, int len)
701 {
702 	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
703 		kprintf ("%c", *(s++));
704 	}
705 } /* ASR_prstring */
706 
707 /*
708  *	Send a message synchronously and without Interrupt to a ccb.
709  */
710 static int
711 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
712 {
713 	U32		Mask;
714 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
715 
716 	/*
717 	 * We do not need any (optional byteswapping) method access to
718 	 * the Initiator context field.
719 	 */
720 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
721 
722 	/* Prevent interrupt service */
723 	crit_enter();
724 	Mask = asr_get_intr(sc);
725 	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
726 
727 	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
728 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
729 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
730 	}
731 
732 	/*
733 	 * Wait for this board to report a finished instruction.
734 	 */
735 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
736 		(void)asr_intr (sc);
737 	}
738 
739 	/* Re-enable Interrupts */
740 	asr_set_intr(sc, Mask);
741 	crit_exit();
742 
743 	return (ccb->ccb_h.status);
744 } /* ASR_queue_s */
745 
746 /*
747  *	Send a message synchronously to an Asr_softc_t.
748  */
749 static int
750 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
751 {
752 	union asr_ccb	*ccb;
753 	int		status;
754 
755 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
756 		return (CAM_REQUEUE_REQ);
757 	}
758 
759 	status = ASR_queue_s (ccb, Message);
760 
761 	asr_free_ccb(ccb);
762 
763 	return (status);
764 } /* ASR_queue_c */
765 
766 /*
767  *	Add the specified ccb to the active queue
768  */
769 static __inline void
770 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
771 {
772 	crit_enter();
773 	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
774 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
775 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
776 			/*
777 			 * RAID systems can take considerable time to
778 			 * complete some commands given the large cache
779 			 * flashes switching from write back to write thru.
780 			 */
781 			ccb->ccb_h.timeout = 6 * 60 * 1000;
782 		}
783 		callout_reset(&ccb->ccb_h.timeout_ch,
784 		    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
785 	}
786 	crit_exit();
787 } /* ASR_ccbAdd */
788 
789 /*
790  *	Remove the specified ccb from the active queue.
791  */
792 static __inline void
793 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
794 {
795 	crit_enter();
796 	callout_stop(&ccb->ccb_h.timeout_ch);
797 	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
798 	crit_exit();
799 } /* ASR_ccbRemove */
800 
801 /*
802  *	Fail all the active commands, so they get re-issued by the operating
803  *	system.
804  */
805 static void
806 ASR_failActiveCommands(Asr_softc_t *sc)
807 {
808 	struct ccb_hdr	*ccb;
809 
810 	crit_enter();
811 	/*
812 	 *	We do not need to inform the CAM layer that we had a bus
813 	 * reset since we manage it on our own, this also prevents the
814 	 * SCSI_DELAY settling that would be required on other systems.
815 	 * The `SCSI_DELAY' has already been handled by the card via the
816 	 * acquisition of the LCT table while we are at CAM priority level.
817 	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
818 	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
819 	 *  }
820 	 */
821 	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
822 		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
823 
824 		ccb->status &= ~CAM_STATUS_MASK;
825 		ccb->status |= CAM_REQUEUE_REQ;
826 		/* Nothing Transfered */
827 		((struct ccb_scsiio *)ccb)->resid
828 		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
829 
830 		if (ccb->path) {
831 			xpt_done ((union ccb *)ccb);
832 		} else {
833 			wakeup (ccb);
834 		}
835 	}
836 	crit_exit();
837 } /* ASR_failActiveCommands */
838 
839 /*
840  *	The following command causes the HBA to reset the specific bus
841  */
842 static void
843 ASR_resetBus(Asr_softc_t *sc, int bus)
844 {
845 	I2O_HBA_BUS_RESET_MESSAGE	Message;
846 	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
847 	PI2O_LCT_ENTRY			Device;
848 
849 	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
850 	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
851 	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
852 	  I2O_HBA_BUS_RESET);
853 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
854 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
855 	  ++Device) {
856 		if (((Device->le_type & I2O_PORT) != 0)
857 		 && (Device->le_bus == bus)) {
858 			I2O_MESSAGE_FRAME_setTargetAddress(
859 			  &Message_Ptr->StdMessageFrame,
860 			  I2O_LCT_ENTRY_getLocalTID(Device));
861 			/* Asynchronous command, with no expectations */
862 			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
863 			break;
864 		}
865 	}
866 } /* ASR_resetBus */
867 
868 static __inline int
869 ASR_getBlinkLedCode(Asr_softc_t *sc)
870 {
871 	U8	blink;
872 
873 	if (sc == NULL)
874 		return (0);
875 
876 	blink = bus_space_read_1(sc->ha_frame_btag,
877 				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
878 	if (blink != 0xBC)
879 		return (0);
880 
881 	blink = bus_space_read_1(sc->ha_frame_btag,
882 				 sc->ha_frame_bhandle, sc->ha_blinkLED);
883 	return (blink);
884 } /* ASR_getBlinkCode */
885 
886 /*
887  *	Determine the address of an TID lookup. Must be done at high priority
888  *	since the address can be changed by other threads of execution.
889  *
890  *	Returns NULL pointer if not indexible (but will attempt to generate
891  *	an index if `new_entry' flag is set to TRUE).
892  *
893  *	All addressible entries are to be guaranteed zero if never initialized.
894  */
895 static tid_t *
896 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
897 {
898 	target2lun_t	*bus_ptr;
899 	lun2tid_t	*target_ptr;
900 	unsigned	new_size;
901 
902 	/*
903 	 *	Validity checking of incoming parameters. More of a bound
904 	 * expansion limit than an issue with the code dealing with the
905 	 * values.
906 	 *
907 	 *	sc must be valid before it gets here, so that check could be
908 	 * dropped if speed a critical issue.
909 	 */
910 	if ((sc == NULL)
911 	 || (bus > MAX_CHANNEL)
912 	 || (target > sc->ha_MaxId)
913 	 || (lun > sc->ha_MaxLun)) {
914 		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
915 		  (u_long)sc, bus, target, lun);
916 		return (NULL);
917 	}
918 	/*
919 	 *	See if there is an associated bus list.
920 	 *
921 	 *	for performance, allocate in size of BUS_CHUNK chunks.
922 	 *	BUS_CHUNK must be a power of two. This is to reduce
923 	 *	fragmentation effects on the allocations.
924 	 */
925 #define BUS_CHUNK 8
926 	new_size = roundup2(target, BUS_CHUNK);
927 	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
928 		/*
929 		 *	Allocate a new structure?
930 		 *		Since one element in structure, the +1
931 		 *		needed for size has been abstracted.
932 		 */
933 		if ((new_entry == FALSE)
934 		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
935 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
936 		    M_TEMP, M_WAITOK | M_ZERO))
937 		   == NULL)) {
938 			debug_asr_printf("failed to allocate bus list\n");
939 			return (NULL);
940 		}
941 		bus_ptr->size = new_size + 1;
942 	} else if (bus_ptr->size <= new_size) {
943 		target2lun_t * new_bus_ptr;
944 
945 		/*
946 		 *	Reallocate a new structure?
947 		 *		Since one element in structure, the +1
948 		 *		needed for size has been abstracted.
949 		 */
950 		if ((new_entry == FALSE)
951 		 || ((new_bus_ptr = (target2lun_t *)kmalloc (
952 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
953 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
954 			debug_asr_printf("failed to reallocate bus list\n");
955 			return (NULL);
956 		}
957 		/*
958 		 *	Copy the whole thing, safer, simpler coding
959 		 * and not really performance critical at this point.
960 		 */
961 		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
962 		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
963 		sc->ha_targets[bus] = new_bus_ptr;
964 		kfree(bus_ptr, M_TEMP);
965 		bus_ptr = new_bus_ptr;
966 		bus_ptr->size = new_size + 1;
967 	}
968 	/*
969 	 *	We now have the bus list, lets get to the target list.
970 	 *	Since most systems have only *one* lun, we do not allocate
971 	 *	in chunks as above, here we allow one, then in chunk sizes.
972 	 *	TARGET_CHUNK must be a power of two. This is to reduce
973 	 *	fragmentation effects on the allocations.
974 	 */
975 #define TARGET_CHUNK 8
976 	if ((new_size = lun) != 0) {
977 		new_size = roundup2(lun, TARGET_CHUNK);
978 	}
979 	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
980 		/*
981 		 *	Allocate a new structure?
982 		 *		Since one element in structure, the +1
983 		 *		needed for size has been abstracted.
984 		 */
985 		if ((new_entry == FALSE)
986 		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
987 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
988 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
989 			debug_asr_printf("failed to allocate target list\n");
990 			return (NULL);
991 		}
992 		target_ptr->size = new_size + 1;
993 	} else if (target_ptr->size <= new_size) {
994 		lun2tid_t * new_target_ptr;
995 
996 		/*
997 		 *	Reallocate a new structure?
998 		 *		Since one element in structure, the +1
999 		 *		needed for size has been abstracted.
1000 		 */
1001 		if ((new_entry == FALSE)
1002 		 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1003 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1004 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1005 			debug_asr_printf("failed to reallocate target list\n");
1006 			return (NULL);
1007 		}
1008 		/*
1009 		 *	Copy the whole thing, safer, simpler coding
1010 		 * and not really performance critical at this point.
1011 		 */
1012 		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1013 		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1014 		bus_ptr->LUN[target] = new_target_ptr;
1015 		kfree(target_ptr, M_TEMP);
1016 		target_ptr = new_target_ptr;
1017 		target_ptr->size = new_size + 1;
1018 	}
1019 	/*
1020 	 *	Now, acquire the TID address from the LUN indexed list.
1021 	 */
1022 	return (&(target_ptr->TID[lun]));
1023 } /* ASR_getTidAddress */
1024 
1025 /*
1026  *	Get a pre-existing TID relationship.
1027  *
1028  *	If the TID was never set, return (tid_t)-1.
1029  *
1030  *	should use mutex rather than spl.
1031  */
1032 static __inline tid_t
1033 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1034 {
1035 	tid_t	*tid_ptr;
1036 	tid_t	retval;
1037 
1038 	crit_enter();
1039 	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1040 	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1041 	 || (*tid_ptr == (tid_t)0)) {
1042 		crit_exit();
1043 		return ((tid_t)-1);
1044 	}
1045 	retval = *tid_ptr;
1046 	crit_exit();
1047 	return (retval);
1048 } /* ASR_getTid */
1049 
1050 /*
1051  *	Set a TID relationship.
1052  *
1053  *	If the TID was not set, return (tid_t)-1.
1054  *
1055  *	should use mutex rather than spl.
1056  */
1057 static __inline tid_t
1058 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1059 {
1060 	tid_t	*tid_ptr;
1061 
1062 	if (TID != (tid_t)-1) {
1063 		if (TID == 0) {
1064 			return ((tid_t)-1);
1065 		}
1066 		crit_enter();
1067 		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1068 		 == NULL) {
1069 			crit_exit();
1070 			return ((tid_t)-1);
1071 		}
1072 		*tid_ptr = TID;
1073 		crit_exit();
1074 	}
1075 	return (TID);
1076 } /* ASR_setTid */
1077 
1078 /*-------------------------------------------------------------------------*/
1079 /*		      Function ASR_rescan				   */
1080 /*-------------------------------------------------------------------------*/
1081 /* The Parameters Passed To This Function Are :				   */
1082 /*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1083 /*									   */
1084 /* This Function Will rescan the adapter and resynchronize any data	   */
1085 /*									   */
1086 /* Return : 0 For OK, Error Code Otherwise				   */
1087 /*-------------------------------------------------------------------------*/
1088 
1089 static int
1090 ASR_rescan(Asr_softc_t *sc)
1091 {
1092 	int bus;
1093 	int error;
1094 
1095 	/*
1096 	 * Re-acquire the LCT table and synchronize us to the adapter.
1097 	 */
1098 	if ((error = ASR_acquireLct(sc)) == 0) {
1099 		error = ASR_acquireHrt(sc);
1100 	}
1101 
1102 	if (error != 0) {
1103 		return error;
1104 	}
1105 
1106 	bus = sc->ha_MaxBus;
1107 	/* Reset all existing cached TID lookups */
1108 	do {
1109 		int target, event = 0;
1110 
1111 		/*
1112 		 *	Scan for all targets on this bus to see if they
1113 		 * got affected by the rescan.
1114 		 */
1115 		for (target = 0; target <= sc->ha_MaxId; ++target) {
1116 			int lun;
1117 
1118 			/* Stay away from the controller ID */
1119 			if (target == sc->ha_adapter_target[bus]) {
1120 				continue;
1121 			}
1122 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1123 				PI2O_LCT_ENTRY Device;
1124 				tid_t	       TID = (tid_t)-1;
1125 				tid_t	       LastTID;
1126 
1127 				/*
1128 				 * See if the cached TID changed. Search for
1129 				 * the device in our new LCT.
1130 				 */
1131 				for (Device = sc->ha_LCT->LCTEntry;
1132 				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1133 				   + I2O_LCT_getTableSize(sc->ha_LCT));
1134 				  ++Device) {
1135 					if ((Device->le_type != I2O_UNKNOWN)
1136 					 && (Device->le_bus == bus)
1137 					 && (Device->le_target == target)
1138 					 && (Device->le_lun == lun)
1139 					 && (I2O_LCT_ENTRY_getUserTID(Device)
1140 					  == 0xFFF)) {
1141 						TID = I2O_LCT_ENTRY_getLocalTID(
1142 						  Device);
1143 						break;
1144 					}
1145 				}
1146 				/*
1147 				 * Indicate to the OS that the label needs
1148 				 * to be recalculated, or that the specific
1149 				 * open device is no longer valid (Merde)
1150 				 * because the cached TID changed.
1151 				 */
1152 				LastTID = ASR_getTid (sc, bus, target, lun);
1153 				if (LastTID != TID) {
1154 					struct cam_path * path;
1155 
1156 					if (xpt_create_path(&path,
1157 					  /*periph*/NULL,
1158 					  cam_sim_path(sc->ha_sim[bus]),
1159 					  target, lun) != CAM_REQ_CMP) {
1160 						if (TID == (tid_t)-1) {
1161 							event |= AC_LOST_DEVICE;
1162 						} else {
1163 							event |= AC_INQ_CHANGED
1164 							       | AC_GETDEV_CHANGED;
1165 						}
1166 					} else {
1167 						if (TID == (tid_t)-1) {
1168 							xpt_async(
1169 							  AC_LOST_DEVICE,
1170 							  path, NULL);
1171 						} else if (LastTID == (tid_t)-1) {
1172 							struct ccb_getdev ccb;
1173 
1174 							xpt_setup_ccb(
1175 							  &(ccb.ccb_h),
1176 							  path, /*priority*/5);
1177 							xpt_async(
1178 							  AC_FOUND_DEVICE,
1179 							  path,
1180 							  &ccb);
1181 						} else {
1182 							xpt_async(
1183 							  AC_INQ_CHANGED,
1184 							  path, NULL);
1185 							xpt_async(
1186 							  AC_GETDEV_CHANGED,
1187 							  path, NULL);
1188 						}
1189 					}
1190 				}
1191 				/*
1192 				 *	We have the option of clearing the
1193 				 * cached TID for it to be rescanned, or to
1194 				 * set it now even if the device never got
1195 				 * accessed. We chose the later since we
1196 				 * currently do not use the condition that
1197 				 * the TID ever got cached.
1198 				 */
1199 				ASR_setTid (sc, bus, target, lun, TID);
1200 			}
1201 		}
1202 		/*
1203 		 *	The xpt layer can not handle multiple events at the
1204 		 * same call.
1205 		 */
1206 		if (event & AC_LOST_DEVICE) {
1207 			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1208 		}
1209 		if (event & AC_INQ_CHANGED) {
1210 			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1211 		}
1212 		if (event & AC_GETDEV_CHANGED) {
1213 			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1214 		}
1215 	} while (--bus >= 0);
1216 	return (error);
1217 } /* ASR_rescan */
1218 
1219 /*-------------------------------------------------------------------------*/
1220 /*		      Function ASR_reset				   */
1221 /*-------------------------------------------------------------------------*/
1222 /* The Parameters Passed To This Function Are :				   */
1223 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1224 /*									   */
1225 /* This Function Will reset the adapter and resynchronize any data	   */
1226 /*									   */
1227 /* Return : None							   */
1228 /*-------------------------------------------------------------------------*/
1229 
1230 static int
1231 ASR_reset(Asr_softc_t *sc)
1232 {
1233 	int retVal;
1234 
1235 	crit_enter();
1236 	if ((sc->ha_in_reset == HA_IN_RESET)
1237 	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1238 		crit_exit();
1239 		return (EBUSY);
1240 	}
1241 	/*
1242 	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1243 	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1244 	 */
1245 	++(sc->ha_in_reset);
1246 	if (ASR_resetIOP(sc) == 0) {
1247 		debug_asr_printf ("ASR_resetIOP failed\n");
1248 		/*
1249 		 *	We really need to take this card off-line, easier said
1250 		 * than make sense. Better to keep retrying for now since if a
1251 		 * UART cable is connected the blinkLEDs the adapter is now in
1252 		 * a hard state requiring action from the monitor commands to
1253 		 * the HBA to continue. For debugging waiting forever is a
1254 		 * good thing. In a production system, however, one may wish
1255 		 * to instead take the card off-line ...
1256 		 */
1257 		/* Wait Forever */
1258 		while (ASR_resetIOP(sc) == 0);
1259 	}
1260 	retVal = ASR_init (sc);
1261 	crit_exit();
1262 	if (retVal != 0) {
1263 		debug_asr_printf ("ASR_init failed\n");
1264 		sc->ha_in_reset = HA_OFF_LINE;
1265 		return (ENXIO);
1266 	}
1267 	if (ASR_rescan (sc) != 0) {
1268 		debug_asr_printf ("ASR_rescan failed\n");
1269 	}
1270 	ASR_failActiveCommands (sc);
1271 	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1272 		kprintf ("asr%d: Brining adapter back on-line\n",
1273 		  sc->ha_path[0]
1274 		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1275 		    : 0);
1276 	}
1277 	sc->ha_in_reset = HA_OPERATIONAL;
1278 	return (0);
1279 } /* ASR_reset */
1280 
1281 /*
1282  *	Device timeout handler.
1283  */
1284 static void
1285 asr_timeout(void *arg)
1286 {
1287 	union asr_ccb	*ccb = (union asr_ccb *)arg;
1288 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1289 	int		s;
1290 
1291 	debug_asr_print_path(ccb);
1292 	debug_asr_printf("timed out");
1293 
1294 	/*
1295 	 *	Check if the adapter has locked up?
1296 	 */
1297 	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1298 		/* Reset Adapter */
1299 		kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1300 		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1301 		if (ASR_reset (sc) == ENXIO) {
1302 			/* Try again later */
1303 			callout_reset(&ccb->ccb_h.timeout_ch,
1304 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1305 		}
1306 		return;
1307 	}
1308 	/*
1309 	 *	Abort does not function on the ASR card!!! Walking away from
1310 	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1311 	 * our best bet, followed by a complete adapter reset if that fails.
1312 	 */
1313 	crit_enter();
1314 	/* Check if we already timed out once to raise the issue */
1315 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1316 		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1317 		if (ASR_reset (sc) == ENXIO) {
1318 			callout_reset(&ccb->ccb_h.timeout_ch,
1319 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1320 		}
1321 		crit_exit();
1322 		return;
1323 	}
1324 	debug_asr_printf ("\nresetting bus\n");
1325 	/* If the BUS reset does not take, then an adapter reset is next! */
1326 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1327 	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1328 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1329 		      asr_timeout, ccb);
1330 	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1331 	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1332 	crit_exit();
1333 } /* asr_timeout */
1334 
1335 /*
1336  * send a message asynchronously
1337  */
1338 static int
1339 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1340 {
1341 	U32		MessageOffset;
1342 	union asr_ccb	*ccb;
1343 
1344 	debug_asr_printf("Host Command Dump:\n");
1345 	debug_asr_dump_message(Message);
1346 
1347 	ccb = (union asr_ccb *)(long)
1348 	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1349 
1350 	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1351 		asr_set_frame(sc, Message, MessageOffset,
1352 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1353 		if (ccb) {
1354 			ASR_ccbAdd (sc, ccb);
1355 		}
1356 		/* Post the command */
1357 		asr_set_ToFIFO(sc, MessageOffset);
1358 	} else {
1359 		if (ASR_getBlinkLedCode(sc)) {
1360 			/*
1361 			 *	Unlikely we can do anything if we can't grab a
1362 			 * message frame :-(, but lets give it a try.
1363 			 */
1364 			(void)ASR_reset(sc);
1365 		}
1366 	}
1367 	return (MessageOffset);
1368 } /* ASR_queue */
1369 
1370 
1371 /* Simple Scatter Gather elements */
1372 #define	SG(SGL,Index,Flags,Buffer,Size)				   \
1373 	I2O_FLAGS_COUNT_setCount(				   \
1374 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1375 	  Size);						   \
1376 	I2O_FLAGS_COUNT_setFlags(				   \
1377 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1378 	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1379 	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1380 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1381 	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1382 
1383 /*
1384  *	Retrieve Parameter Group.
1385  */
1386 static void *
1387 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1388 	      unsigned BufferSize)
1389 {
1390 	struct paramGetMessage {
1391 		I2O_UTIL_PARAMS_GET_MESSAGE M;
1392 		char
1393 		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1394 		struct Operations {
1395 			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1396 			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1397 		}			     O;
1398 	}				Message;
1399 	struct Operations		*Operations_Ptr;
1400 	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1401 	struct ParamBuffer {
1402 		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1403 		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1404 		char				    Info[1];
1405 	}				*Buffer_Ptr;
1406 
1407 	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1408 	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1409 	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1410 	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1411 	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1412 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1413 	bzero(Operations_Ptr, sizeof(struct Operations));
1414 	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1415 	  &(Operations_Ptr->Header), 1);
1416 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1417 	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1418 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1419 	  &(Operations_Ptr->Template[0]), 0xFFFF);
1420 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1421 	  &(Operations_Ptr->Template[0]), Group);
1422 	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1423 	bzero(Buffer_Ptr, BufferSize);
1424 
1425 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1426 	  I2O_VERSION_11
1427 	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1428 	    / sizeof(U32)) << 4));
1429 	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1430 	  TID);
1431 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1432 	  I2O_UTIL_PARAMS_GET);
1433 	/*
1434 	 *  Set up the buffers as scatter gather elements.
1435 	 */
1436 	SG(&(Message_Ptr->SGL), 0,
1437 	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1438 	  Operations_Ptr, sizeof(struct Operations));
1439 	SG(&(Message_Ptr->SGL), 1,
1440 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1441 	  Buffer_Ptr, BufferSize);
1442 
1443 	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1444 	 && (Buffer_Ptr->Header.ResultCount)) {
1445 		return ((void *)(Buffer_Ptr->Info));
1446 	}
1447 	return (NULL);
1448 } /* ASR_getParams */
1449 
1450 /*
1451  *	Acquire the LCT information.
1452  */
1453 static int
1454 ASR_acquireLct(Asr_softc_t *sc)
1455 {
1456 	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1457 	PI2O_SGE_SIMPLE_ELEMENT		sg;
1458 	int				MessageSizeInBytes;
1459 	caddr_t				v;
1460 	int				len;
1461 	I2O_LCT				Table, *TableP = &Table;
1462 	PI2O_LCT_ENTRY			Entry;
1463 
1464 	/*
1465 	 *	sc value assumed valid
1466 	 */
1467 	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1468 	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1469 	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1470 	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1471 		return (ENOMEM);
1472 	}
1473 	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1474 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1475 	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1476 	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1477 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1478 	    I2O_EXEC_LCT_NOTIFY);
1479 	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1480 	    I2O_CLASS_MATCH_ANYCLASS);
1481 	/*
1482 	 *	Call the LCT table to determine the number of device entries
1483 	 * to reserve space for.
1484 	 */
1485 	SG(&(Message_Ptr->SGL), 0,
1486 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, TableP,
1487 	  sizeof(I2O_LCT));
1488 	/*
1489 	 *	since this code is reused in several systems, code efficiency
1490 	 * is greater by using a shift operation rather than a divide by
1491 	 * sizeof(u_int32_t).
1492 	 */
1493 	I2O_LCT_setTableSize(&Table,
1494 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1495 	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1496 	/*
1497 	 *	Determine the size of the LCT table.
1498 	 */
1499 	if (sc->ha_LCT) {
1500 		kfree(sc->ha_LCT, M_TEMP);
1501 	}
1502 	/*
1503 	 *	malloc only generates contiguous memory when less than a
1504 	 * page is expected. We must break the request up into an SG list ...
1505 	 */
1506 	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1507 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1508 	 || (len > (128 * 1024))) {	/* Arbitrary */
1509 		kfree(Message_Ptr, M_TEMP);
1510 		return (EINVAL);
1511 	}
1512 	if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1513 		kfree(Message_Ptr, M_TEMP);
1514 		return (ENOMEM);
1515 	}
1516 	/*
1517 	 *	since this code is reused in several systems, code efficiency
1518 	 * is greater by using a shift operation rather than a divide by
1519 	 * sizeof(u_int32_t).
1520 	 */
1521 	I2O_LCT_setTableSize(sc->ha_LCT,
1522 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1523 	/*
1524 	 *	Convert the access to the LCT table into a SG list.
1525 	 */
1526 	sg = Message_Ptr->SGL.u.Simple;
1527 	v = (caddr_t)(sc->ha_LCT);
1528 	for (;;) {
1529 		int next, base, span;
1530 
1531 		span = 0;
1532 		next = base = KVTOPHYS(v);
1533 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1534 
1535 		/* How far can we go contiguously */
1536 		while ((len > 0) && (base == next)) {
1537 			int size;
1538 
1539 			next = trunc_page(base) + PAGE_SIZE;
1540 			size = next - base;
1541 			if (size > len) {
1542 				size = len;
1543 			}
1544 			span += size;
1545 			v += size;
1546 			len -= size;
1547 			base = KVTOPHYS(v);
1548 		}
1549 
1550 		/* Construct the Flags */
1551 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1552 		{
1553 			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1554 			if (len <= 0) {
1555 				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1556 				    | I2O_SGL_FLAGS_LAST_ELEMENT
1557 				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1558 			}
1559 			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1560 		}
1561 
1562 		if (len <= 0) {
1563 			break;
1564 		}
1565 
1566 		/*
1567 		 * Incrementing requires resizing of the packet.
1568 		 */
1569 		++sg;
1570 		MessageSizeInBytes += sizeof(*sg);
1571 		I2O_MESSAGE_FRAME_setMessageSize(
1572 		  &(Message_Ptr->StdMessageFrame),
1573 		  I2O_MESSAGE_FRAME_getMessageSize(
1574 		    &(Message_Ptr->StdMessageFrame))
1575 		  + (sizeof(*sg) / sizeof(U32)));
1576 		{
1577 			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1578 
1579 			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1580 			    kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1581 			    == NULL) {
1582 				kfree(sc->ha_LCT, M_TEMP);
1583 				sc->ha_LCT = NULL;
1584 				kfree(Message_Ptr, M_TEMP);
1585 				return (ENOMEM);
1586 			}
1587 			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1588 			bcopy(Message_Ptr, NewMessage_Ptr, span);
1589 			kfree(Message_Ptr, M_TEMP);
1590 			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1591 			  (((caddr_t)NewMessage_Ptr) + span);
1592 			Message_Ptr = NewMessage_Ptr;
1593 		}
1594 	}
1595 	{	int retval;
1596 
1597 		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1598 		kfree(Message_Ptr, M_TEMP);
1599 		if (retval != CAM_REQ_CMP) {
1600 			return (ENODEV);
1601 		}
1602 	}
1603 	/* If the LCT table grew, lets truncate accesses */
1604 	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1605 		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1606 	}
1607 	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1608 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1609 	  ++Entry) {
1610 		Entry->le_type = I2O_UNKNOWN;
1611 		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1612 
1613 		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1614 			Entry->le_type = I2O_BSA;
1615 			break;
1616 
1617 		case I2O_CLASS_SCSI_PERIPHERAL:
1618 			Entry->le_type = I2O_SCSI;
1619 			break;
1620 
1621 		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1622 			Entry->le_type = I2O_FCA;
1623 			break;
1624 
1625 		case I2O_CLASS_BUS_ADAPTER_PORT:
1626 			Entry->le_type = I2O_PORT | I2O_SCSI;
1627 			/* FALLTHRU */
1628 		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1629 			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1630 			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1631 				Entry->le_type = I2O_PORT | I2O_FCA;
1632 			}
1633 		{	struct ControllerInfo {
1634 				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1635 				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1636 				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1637 			} Buffer;
1638 			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1639 
1640 			Entry->le_bus = 0xff;
1641 			Entry->le_target = 0xff;
1642 			Entry->le_lun = 0xff;
1643 
1644 			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1645 			  ASR_getParams(sc,
1646 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1647 			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1648 			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1649 				continue;
1650 			}
1651 			Entry->le_target
1652 			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1653 			    Info);
1654 			Entry->le_lun = 0;
1655 		}	/* FALLTHRU */
1656 		default:
1657 			continue;
1658 		}
1659 		{	struct DeviceInfo {
1660 				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1661 				I2O_PARAM_READ_OPERATION_RESULT Read;
1662 				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1663 			} Buffer;
1664 			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1665 
1666 			Entry->le_bus = 0xff;
1667 			Entry->le_target = 0xff;
1668 			Entry->le_lun = 0xff;
1669 
1670 			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1671 			  ASR_getParams(sc,
1672 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1673 			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1674 			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1675 				continue;
1676 			}
1677 			Entry->le_type
1678 			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1679 			Entry->le_bus
1680 			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1681 			if ((Entry->le_bus > sc->ha_MaxBus)
1682 			 && (Entry->le_bus <= MAX_CHANNEL)) {
1683 				sc->ha_MaxBus = Entry->le_bus;
1684 			}
1685 			Entry->le_target
1686 			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1687 			Entry->le_lun
1688 			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1689 		}
1690 	}
1691 	/*
1692 	 *	A zero return value indicates success.
1693 	 */
1694 	return (0);
1695 } /* ASR_acquireLct */
1696 
1697 /*
1698  * Initialize a message frame.
1699  * We assume that the CDB has already been set up, so all we do here is
1700  * generate the Scatter Gather list.
1701  */
1702 static PI2O_MESSAGE_FRAME
1703 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1704 {
1705 	PI2O_MESSAGE_FRAME	Message_Ptr;
1706 	PI2O_SGE_SIMPLE_ELEMENT sg;
1707 	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1708 	vm_size_t		size, len;
1709 	caddr_t			v;
1710 	U32			MessageSize;
1711 	int			next, span, base, rw;
1712 	int			target = ccb->ccb_h.target_id;
1713 	int			lun = ccb->ccb_h.target_lun;
1714 	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1715 	tid_t			TID;
1716 
1717 	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1718 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1719 	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1720 	      sizeof(I2O_SG_ELEMENT)));
1721 
1722 	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1723 		PI2O_LCT_ENTRY Device;
1724 
1725 		TID = 0;
1726 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1727 		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1728 		    ++Device) {
1729 			if ((Device->le_type != I2O_UNKNOWN)
1730 			 && (Device->le_bus == bus)
1731 			 && (Device->le_target == target)
1732 			 && (Device->le_lun == lun)
1733 			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1734 				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1735 				ASR_setTid(sc, Device->le_bus,
1736 					   Device->le_target, Device->le_lun,
1737 					   TID);
1738 				break;
1739 			}
1740 		}
1741 	}
1742 	if (TID == (tid_t)0) {
1743 		return (NULL);
1744 	}
1745 	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1746 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1747 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1748 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1749 	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1750 		/ sizeof(U32)) << 4));
1751 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1752 	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1753 	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1754 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1755 	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1756 	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1757 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1758 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1759 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1760 	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1761 	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1762 	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1763 	/*
1764 	 * We do not need any (optional byteswapping) method access to
1765 	 * the Initiator & Transaction context field.
1766 	 */
1767 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1768 
1769 	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1770 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1771 	/*
1772 	 * copy the cdb over
1773 	 */
1774 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1775 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1776 	bcopy(&(ccb->csio.cdb_io),
1777 	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1778 	    ccb->csio.cdb_len);
1779 
1780 	/*
1781 	 * Given a buffer describing a transfer, set up a scatter/gather map
1782 	 * in a ccb to map that SCSI transfer.
1783 	 */
1784 
1785 	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1786 
1787 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1788 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1789 	  (ccb->csio.dxfer_len)
1790 	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1791 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1792 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1793 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1794 		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1795 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1796 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1797 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1798 	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1799 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1800 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1801 
1802 	/*
1803 	 * Given a transfer described by a `data', fill in the SG list.
1804 	 */
1805 	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1806 
1807 	len = ccb->csio.dxfer_len;
1808 	v = ccb->csio.data_ptr;
1809 	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1810 	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1811 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1812 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1813 	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1814 	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1815 		span = 0;
1816 		next = base = KVTOPHYS(v);
1817 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1818 
1819 		/* How far can we go contiguously */
1820 		while ((len > 0) && (base == next)) {
1821 			next = trunc_page(base) + PAGE_SIZE;
1822 			size = next - base;
1823 			if (size > len) {
1824 				size = len;
1825 			}
1826 			span += size;
1827 			v += size;
1828 			len -= size;
1829 			base = KVTOPHYS(v);
1830 		}
1831 
1832 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1833 		if (len == 0) {
1834 			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1835 		}
1836 		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1837 		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1838 		++sg;
1839 		MessageSize += sizeof(*sg) / sizeof(U32);
1840 	}
1841 	/* We always do the request sense ... */
1842 	if ((span = ccb->csio.sense_len) == 0) {
1843 		span = sizeof(ccb->csio.sense_data);
1844 	}
1845 	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1846 	  &(ccb->csio.sense_data), span);
1847 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1848 	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1849 	return (Message_Ptr);
1850 } /* ASR_init_message */
1851 
1852 /*
1853  *	Reset the adapter.
1854  */
1855 static U32
1856 ASR_initOutBound(Asr_softc_t *sc)
1857 {
1858 	struct initOutBoundMessage {
1859 		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1860 		U32			       R;
1861 	}				Message;
1862 	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1863 	U32				*volatile Reply_Ptr;
1864 	U32				Old;
1865 
1866 	/*
1867 	 *  Build up our copy of the Message.
1868 	 */
1869 	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1870 	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1871 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1872 	  I2O_EXEC_OUTBOUND_INIT);
1873 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1874 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1875 	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1876 	/*
1877 	 *  Reset the Reply Status
1878 	 */
1879 	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1880 	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1881 	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1882 	  sizeof(U32));
1883 	/*
1884 	 *	Send the Message out
1885 	 */
1886 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1887 	    0xffffffff) {
1888 		u_long size, addr;
1889 
1890 		/*
1891 		 *	Wait for a response (Poll).
1892 		 */
1893 		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1894 		/*
1895 		 *	Re-enable the interrupts.
1896 		 */
1897 		asr_set_intr(sc, Old);
1898 		/*
1899 		 *	Populate the outbound table.
1900 		 */
1901 		if (sc->ha_Msgs == NULL) {
1902 
1903 			/* Allocate the reply frames */
1904 			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1905 			  * sc->ha_Msgs_Count;
1906 
1907 			/*
1908 			 *	contigmalloc only works reliably at
1909 			 * initialization time.
1910 			 */
1911 			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1912 			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1913 			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1914 				bzero(sc->ha_Msgs, size);
1915 				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1916 			}
1917 		}
1918 
1919 		/* Initialize the outbound FIFO */
1920 		if (sc->ha_Msgs != NULL)
1921 			for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1922 			    size; --size) {
1923 				asr_set_FromFIFO(sc, addr);
1924 				addr +=
1925 				    sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1926 			}
1927 		return (*Reply_Ptr);
1928 	}
1929 	return (0);
1930 } /* ASR_initOutBound */
1931 
1932 /*
1933  *	Set the system table
1934  */
1935 static int
1936 ASR_setSysTab(Asr_softc_t *sc)
1937 {
1938 	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1939 	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1940 	Asr_softc_t		    * ha;
1941 	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1942 	int			      retVal;
1943 
1944 	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1945 	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1946 		return (ENOMEM);
1947 	}
1948 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1949 		++SystemTable->NumberEntries;
1950 	}
1951 	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1952 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1953 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1954 	  M_TEMP, M_WAITOK)) == NULL) {
1955 		kfree(SystemTable, M_TEMP);
1956 		return (ENOMEM);
1957 	}
1958 	(void)ASR_fillMessage((void *)Message_Ptr,
1959 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1960 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1961 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1962 	  (I2O_VERSION_11 +
1963 	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1964 			/ sizeof(U32)) << 4)));
1965 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1966 	  I2O_EXEC_SYS_TAB_SET);
1967 	/*
1968 	 *	Call the LCT table to determine the number of device entries
1969 	 * to reserve space for.
1970 	 *	since this code is reused in several systems, code efficiency
1971 	 * is greater by using a shift operation rather than a divide by
1972 	 * sizeof(u_int32_t).
1973 	 */
1974 	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1975 	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1976 	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1977 	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1978 	++sg;
1979 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1980 		SG(sg, 0,
1981 		  ((ha->ha_next)
1982 		    ? (I2O_SGL_FLAGS_DIR)
1983 		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1984 		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1985 		++sg;
1986 	}
1987 	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1988 	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1989 	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1990 	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1991 	kfree(Message_Ptr, M_TEMP);
1992 	kfree(SystemTable, M_TEMP);
1993 	return (retVal);
1994 } /* ASR_setSysTab */
1995 
1996 static int
1997 ASR_acquireHrt(Asr_softc_t *sc)
1998 {
1999 	I2O_EXEC_HRT_GET_MESSAGE	Message;
2000 	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2001 	struct {
2002 		I2O_HRT	      Header;
2003 		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2004 	}				Hrt, *HrtP = &Hrt;
2005 	u_int8_t			NumberOfEntries;
2006 	PI2O_HRT_ENTRY			Entry;
2007 
2008 	bzero(&Hrt, sizeof (Hrt));
2009 	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2010 	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2011 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2012 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2013 	  (I2O_VERSION_11
2014 	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2015 		   / sizeof(U32)) << 4)));
2016 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2017 	  I2O_EXEC_HRT_GET);
2018 
2019 	/*
2020 	 *  Set up the buffers as scatter gather elements.
2021 	 */
2022 	SG(&(Message_Ptr->SGL), 0,
2023 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2024 	  HrtP, sizeof(Hrt));
2025 	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2026 		return (ENODEV);
2027 	}
2028 	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2029 	  > (MAX_CHANNEL + 1)) {
2030 		NumberOfEntries = MAX_CHANNEL + 1;
2031 	}
2032 	for (Entry = Hrt.Header.HRTEntry;
2033 	  NumberOfEntries != 0;
2034 	  ++Entry, --NumberOfEntries) {
2035 		PI2O_LCT_ENTRY Device;
2036 
2037 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2038 		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2039 		  ++Device) {
2040 			if (I2O_LCT_ENTRY_getLocalTID(Device)
2041 			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2042 				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2043 				  Entry) >> 16;
2044 				if ((Device->le_bus > sc->ha_MaxBus)
2045 				 && (Device->le_bus <= MAX_CHANNEL)) {
2046 					sc->ha_MaxBus = Device->le_bus;
2047 				}
2048 			}
2049 		}
2050 	}
2051 	return (0);
2052 } /* ASR_acquireHrt */
2053 
2054 /*
2055  *	Enable the adapter.
2056  */
2057 static int
2058 ASR_enableSys(Asr_softc_t *sc)
2059 {
2060 	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2061 	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2062 
2063 	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2064 	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2065 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2066 	  I2O_EXEC_SYS_ENABLE);
2067 	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2068 } /* ASR_enableSys */
2069 
2070 /*
2071  *	Perform the stages necessary to initialize the adapter
2072  */
2073 static int
2074 ASR_init(Asr_softc_t *sc)
2075 {
2076 	return ((ASR_initOutBound(sc) == 0)
2077 	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2078 	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2079 } /* ASR_init */
2080 
2081 /*
2082  *	Send a Synchronize Cache command to the target device.
2083  */
2084 static void
2085 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2086 {
2087 	tid_t TID;
2088 
2089 	/*
2090 	 * We will not synchronize the device when there are outstanding
2091 	 * commands issued by the OS (this is due to a locked up device,
2092 	 * as the OS normally would flush all outstanding commands before
2093 	 * issuing a shutdown or an adapter reset).
2094 	 */
2095 	if ((sc != NULL)
2096 	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2097 	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2098 	 && (TID != (tid_t)0)) {
2099 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2100 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2101 
2102 		Message_Ptr = &Message;
2103 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2104 		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2105 
2106 		I2O_MESSAGE_FRAME_setVersionOffset(
2107 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2108 		  I2O_VERSION_11
2109 		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2110 		    - sizeof(I2O_SG_ELEMENT))
2111 			/ sizeof(U32)) << 4));
2112 		I2O_MESSAGE_FRAME_setMessageSize(
2113 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2114 		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2115 		  - sizeof(I2O_SG_ELEMENT))
2116 			/ sizeof(U32));
2117 		I2O_MESSAGE_FRAME_setInitiatorAddress (
2118 		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2119 		I2O_MESSAGE_FRAME_setFunction(
2120 		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2121 		I2O_MESSAGE_FRAME_setTargetAddress(
2122 		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2123 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2124 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2125 		  I2O_SCSI_SCB_EXEC);
2126 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2127 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2128 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2129 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2130 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2131 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2132 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2133 		  DPT_ORGANIZATION_ID);
2134 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2135 		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2136 		Message_Ptr->CDB[1] = (lun << 5);
2137 
2138 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2139 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2140 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2141 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2142 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2143 
2144 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2145 
2146 	}
2147 }
2148 
2149 static void
2150 ASR_synchronize(Asr_softc_t *sc)
2151 {
2152 	int bus, target, lun;
2153 
2154 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2155 		for (target = 0; target <= sc->ha_MaxId; ++target) {
2156 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2157 				ASR_sync(sc,bus,target,lun);
2158 			}
2159 		}
2160 	}
2161 }
2162 
2163 /*
2164  *	Reset the HBA, targets and BUS.
2165  *		Currently this resets *all* the SCSI busses.
2166  */
2167 static __inline void
2168 asr_hbareset(Asr_softc_t *sc)
2169 {
2170 	ASR_synchronize(sc);
2171 	(void)ASR_reset(sc);
2172 } /* asr_hbareset */
2173 
2174 /*
2175  *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2176  * limit and a reduction in error checking (in the pre 4.0 case).
2177  */
2178 static int
2179 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2180 {
2181 	int		rid;
2182 	u_int32_t	p, l, s;
2183 
2184 	/*
2185 	 * I2O specification says we must find first *memory* mapped BAR
2186 	 */
2187 	for (rid = 0; rid < 4; rid++) {
2188 		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2189 		if ((p & 1) == 0) {
2190 			break;
2191 		}
2192 	}
2193 	/*
2194 	 *	Give up?
2195 	 */
2196 	if (rid >= 4) {
2197 		rid = 0;
2198 	}
2199 	rid = PCIR_BAR(rid);
2200 	p = pci_read_config(dev, rid, sizeof(p));
2201 	pci_write_config(dev, rid, -1, sizeof(p));
2202 	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2203 	pci_write_config(dev, rid, p, sizeof(p));
2204 	if (l > MAX_MAP) {
2205 		l = MAX_MAP;
2206 	}
2207 	/*
2208 	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2209 	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2210 	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2211 	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2212 	 * accessible via BAR0, the messaging registers are accessible
2213 	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2214 	 */
2215 	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2216 	if (s != 0xA5111044) {
2217 		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2218 		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2219 		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2220 		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2221 			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2222 		}
2223 	}
2224 	p &= ~15;
2225 	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2226 	  p, p + l, l, RF_ACTIVE);
2227 	if (sc->ha_mem_res == NULL) {
2228 		return (0);
2229 	}
2230 	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2231 	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2232 	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2233 
2234 	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2235 		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2236 			return (0);
2237 		}
2238 		p = pci_read_config(dev, rid, sizeof(p));
2239 		pci_write_config(dev, rid, -1, sizeof(p));
2240 		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2241 		pci_write_config(dev, rid, p, sizeof(p));
2242 		if (l > MAX_MAP) {
2243 			l = MAX_MAP;
2244 		}
2245 		p &= ~15;
2246 		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2247 		  p, p + l, l, RF_ACTIVE);
2248 		if (sc->ha_mes_res == NULL) {
2249 			return (0);
2250 		}
2251 		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2252 		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2253 	} else {
2254 		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2255 		sc->ha_frame_btag = sc->ha_i2o_btag;
2256 	}
2257 	return (1);
2258 } /* asr_pci_map_mem */
2259 
2260 /*
2261  *	A simplified copy of the real pci_map_int with additional
2262  * registration requirements.
2263  */
2264 static int
2265 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2266 {
2267 	int rid = 0;
2268 
2269 	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2270 	  RF_ACTIVE | RF_SHAREABLE);
2271 	if (sc->ha_irq_res == NULL) {
2272 		return (0);
2273 	}
2274 	if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2275 	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2276 		return (0);
2277 	}
2278 	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2279 	return (1);
2280 } /* asr_pci_map_int */
2281 
2282 static void
2283 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2284 {
2285 	Asr_softc_t *sc;
2286 
2287 	if (error)
2288 		return;
2289 
2290 	sc = (Asr_softc_t *)arg;
2291 
2292 	/* XXX
2293 	 * The status word can be at a 64-bit address, but the existing
2294 	 * accessor macros simply cannot manipulate 64-bit addresses.
2295 	 */
2296 	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2297 	    offsetof(struct Asr_status_mem, status);
2298 	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2299 	    offsetof(struct Asr_status_mem, rstatus);
2300 }
2301 
2302 static int
2303 asr_alloc_dma(Asr_softc_t *sc)
2304 {
2305 	device_t dev;
2306 
2307 	dev = sc->ha_dev;
2308 
2309 	if (bus_dma_tag_create(NULL,			/* parent */
2310 			       1, 0,			/* algnmnt, boundary */
2311 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2312 			       BUS_SPACE_MAXADDR,	/* highaddr */
2313 			       NULL, NULL,		/* filter, filterarg */
2314 			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2315 			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2316 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2317 			       0,			/* flags */
2318 			       &sc->ha_parent_dmat)) {
2319 		device_printf(dev, "Cannot allocate parent DMA tag\n");
2320 		return (ENOMEM);
2321 	}
2322 
2323 	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2324 			       1, 0,			/* algnmnt, boundary */
2325 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2326 			       BUS_SPACE_MAXADDR,	/* highaddr */
2327 			       NULL, NULL,		/* filter, filterarg */
2328 			       sizeof(sc->ha_statusmem),/* maxsize */
2329 			       1,			/* nsegments */
2330 			       sizeof(sc->ha_statusmem),/* maxsegsize */
2331 			       0,			/* flags */
2332 			       &sc->ha_statusmem_dmat)) {
2333 		device_printf(dev, "Cannot allocate status DMA tag\n");
2334 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2335 		return (ENOMEM);
2336 	}
2337 
2338 	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2339 	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2340 		device_printf(dev, "Cannot allocate status memory\n");
2341 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2342 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2343 		return (ENOMEM);
2344 	}
2345 	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2346 	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2347 
2348 	return (0);
2349 }
2350 
2351 static void
2352 asr_release_dma(Asr_softc_t *sc)
2353 {
2354 
2355 	if (sc->ha_rstatus_phys != 0)
2356 		bus_dmamap_unload(sc->ha_statusmem_dmat,
2357 		    sc->ha_statusmem_dmamap);
2358 	if (sc->ha_statusmem != NULL)
2359 		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2360 		    sc->ha_statusmem_dmamap);
2361 	if (sc->ha_statusmem_dmat != NULL)
2362 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2363 	if (sc->ha_parent_dmat != NULL)
2364 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2365 }
2366 
2367 /*
2368  *	Attach the devices, and virtual devices to the driver list.
2369  */
2370 static int
2371 asr_attach(device_t dev)
2372 {
2373 	PI2O_EXEC_STATUS_GET_REPLY status;
2374 	PI2O_LCT_ENTRY		 Device;
2375 	Asr_softc_t		 *sc, **ha;
2376 	struct scsi_inquiry_data *iq;
2377 	int			 bus, size, unit;
2378 	int			 error;
2379 
2380 	sc = device_get_softc(dev);
2381 	unit = device_get_unit(dev);
2382 	sc->ha_dev = dev;
2383 
2384 	if (Asr_softc_list == NULL) {
2385 		/*
2386 		 *	Fixup the OS revision as saved in the dptsig for the
2387 		 *	engine (dptioctl.h) to pick up.
2388 		 */
2389 		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2390 	}
2391 	/*
2392 	 *	Initialize the software structure
2393 	 */
2394 	LIST_INIT(&(sc->ha_ccb));
2395 	/* Link us into the HA list */
2396 	for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next));
2397 		*(ha) = sc;
2398 
2399 	/*
2400 	 *	This is the real McCoy!
2401 	 */
2402 	if (!asr_pci_map_mem(dev, sc)) {
2403 		device_printf(dev, "could not map memory\n");
2404 		return(ENXIO);
2405 	}
2406 	/* Enable if not formerly enabled */
2407 	pci_write_config(dev, PCIR_COMMAND,
2408 	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2409 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2410 
2411 	sc->ha_pciBusNum = pci_get_bus(dev);
2412 	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2413 
2414 	if ((error = asr_alloc_dma(sc)) != 0)
2415 		return (error);
2416 
2417 	/* Check if the device is there? */
2418 	if (ASR_resetIOP(sc) == 0) {
2419 		device_printf(dev, "Cannot reset adapter\n");
2420 		asr_release_dma(sc);
2421 		return (EIO);
2422 	}
2423 	status = &sc->ha_statusmem->status;
2424 	if (ASR_getStatus(sc) == NULL) {
2425 		device_printf(dev, "could not initialize hardware\n");
2426 		asr_release_dma(sc);
2427 		return(ENODEV);
2428 	}
2429 	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2430 	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2431 	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2432 	sc->ha_SystemTable.IopState = status->IopState;
2433 	sc->ha_SystemTable.MessengerType = status->MessengerType;
2434 	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2435 	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2436 	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2437 
2438 	if (!asr_pci_map_int(dev, (void *)sc)) {
2439 		device_printf(dev, "could not map interrupt\n");
2440 		asr_release_dma(sc);
2441 		return(ENXIO);
2442 	}
2443 
2444 	/* Adjust the maximim inbound count */
2445 	if (((sc->ha_QueueSize =
2446 	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2447 	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2448 		sc->ha_QueueSize = MAX_INBOUND;
2449 	}
2450 
2451 	/* Adjust the maximum outbound count */
2452 	if (((sc->ha_Msgs_Count =
2453 	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2454 	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2455 		sc->ha_Msgs_Count = MAX_OUTBOUND;
2456 	}
2457 	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2458 		sc->ha_Msgs_Count = sc->ha_QueueSize;
2459 	}
2460 
2461 	/* Adjust the maximum SG size to adapter */
2462 	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2463 	    2)) > MAX_INBOUND_SIZE) {
2464 		size = MAX_INBOUND_SIZE;
2465 	}
2466 	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2467 	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2468 
2469 	/*
2470 	 *	Only do a bus/HBA reset on the first time through. On this
2471 	 * first time through, we do not send a flush to the devices.
2472 	 */
2473 	if (ASR_init(sc) == 0) {
2474 		struct BufferInfo {
2475 			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2476 			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2477 			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2478 		} Buffer;
2479 		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2480 #define FW_DEBUG_BLED_OFFSET 8
2481 
2482 		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2483 		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2484 		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2485 			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2486 			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2487 		}
2488 		if (ASR_acquireLct(sc) == 0) {
2489 			(void)ASR_acquireHrt(sc);
2490 		}
2491 	} else {
2492 		device_printf(dev, "failed to initialize\n");
2493 		asr_release_dma(sc);
2494 		return(ENXIO);
2495 	}
2496 	/*
2497 	 *	Add in additional probe responses for more channels. We
2498 	 * are reusing the variable `target' for a channel loop counter.
2499 	 * Done here because of we need both the acquireLct and
2500 	 * acquireHrt data.
2501 	 */
2502 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2503 	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2504 		if (Device->le_type == I2O_UNKNOWN) {
2505 			continue;
2506 		}
2507 		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2508 			if (Device->le_target > sc->ha_MaxId) {
2509 				sc->ha_MaxId = Device->le_target;
2510 			}
2511 			if (Device->le_lun > sc->ha_MaxLun) {
2512 				sc->ha_MaxLun = Device->le_lun;
2513 			}
2514 		}
2515 		if (((Device->le_type & I2O_PORT) != 0)
2516 		 && (Device->le_bus <= MAX_CHANNEL)) {
2517 			/* Do not increase MaxId for efficiency */
2518 			sc->ha_adapter_target[Device->le_bus] =
2519 			    Device->le_target;
2520 		}
2521 	}
2522 
2523 	/*
2524 	 *	Print the HBA model number as inquired from the card.
2525 	 */
2526 
2527 	device_printf(dev, " ");
2528 
2529 	if ((iq = (struct scsi_inquiry_data *)kmalloc(
2530 	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2531 	    NULL) {
2532 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2533 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2534 		int					posted = 0;
2535 
2536 		Message_Ptr = &Message;
2537 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2538 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2539 
2540 		I2O_MESSAGE_FRAME_setVersionOffset(
2541 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2542 		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2543 		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2544 		I2O_MESSAGE_FRAME_setMessageSize(
2545 		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2546 		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2547 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2548 		    sizeof(U32));
2549 		I2O_MESSAGE_FRAME_setInitiatorAddress(
2550 		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2551 		I2O_MESSAGE_FRAME_setFunction(
2552 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2553 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2554 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2555 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2556 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2557 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2558 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2559 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2560 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2561 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2562 		    DPT_ORGANIZATION_ID);
2563 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2564 		Message_Ptr->CDB[0] = INQUIRY;
2565 		Message_Ptr->CDB[4] =
2566 		    (unsigned char)sizeof(struct scsi_inquiry_data);
2567 		if (Message_Ptr->CDB[4] == 0) {
2568 			Message_Ptr->CDB[4] = 255;
2569 		}
2570 
2571 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2572 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2573 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2574 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2575 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2576 
2577 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2578 		  Message_Ptr, sizeof(struct scsi_inquiry_data));
2579 		SG(&(Message_Ptr->SGL), 0,
2580 		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2581 		  iq, sizeof(struct scsi_inquiry_data));
2582 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2583 
2584 		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2585 			kprintf (" ");
2586 			ASR_prstring (iq->vendor, 8);
2587 			++posted;
2588 		}
2589 		if (iq->product[0] && (iq->product[0] != ' ')) {
2590 			kprintf (" ");
2591 			ASR_prstring (iq->product, 16);
2592 			++posted;
2593 		}
2594 		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2595 			kprintf (" FW Rev. ");
2596 			ASR_prstring (iq->revision, 4);
2597 			++posted;
2598 		}
2599 		kfree(iq, M_TEMP);
2600 		if (posted) {
2601 			kprintf (",");
2602 		}
2603 	}
2604 	kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2605 	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2606 
2607 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2608 		struct cam_devq	  * devq;
2609 		int		    QueueSize = sc->ha_QueueSize;
2610 
2611 		if (QueueSize > MAX_INBOUND) {
2612 			QueueSize = MAX_INBOUND;
2613 		}
2614 
2615 		/*
2616 		 *	Create the device queue for our SIM(s).
2617 		 */
2618 		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2619 			continue;
2620 		}
2621 
2622 		/*
2623 		 *	Construct our first channel SIM entry
2624 		 */
2625 		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2626 						unit, &sim_mplock,
2627 						1, QueueSize, devq);
2628 		if (sc->ha_sim[bus] == NULL) {
2629 			continue;
2630 		}
2631 
2632 		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2633 			cam_sim_free(sc->ha_sim[bus]);
2634 			sc->ha_sim[bus] = NULL;
2635 			continue;
2636 		}
2637 
2638 		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2639 		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2640 		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2641 			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2642 			cam_sim_free(sc->ha_sim[bus]);
2643 			sc->ha_sim[bus] = NULL;
2644 			continue;
2645 		}
2646 	}
2647 
2648 	/*
2649 	 *	Generate the device node information
2650 	 */
2651 	sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2652 			       "asr%d", unit);
2653 	if (sc->ha_devt != NULL)
2654 		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2655 	sc->ha_devt->si_drv1 = sc;
2656 	return(0);
2657 } /* asr_attach */
2658 
2659 static void
2660 asr_poll(struct cam_sim *sim)
2661 {
2662 	asr_intr(cam_sim_softc(sim));
2663 } /* asr_poll */
2664 
2665 static void
2666 asr_action(struct cam_sim *sim, union ccb  *ccb)
2667 {
2668 	struct Asr_softc *sc;
2669 
2670 	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2671 			 ccb->ccb_h.func_code);
2672 
2673 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2674 
2675 	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2676 
2677 	switch (ccb->ccb_h.func_code) {
2678 
2679 	/* Common cases first */
2680 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2681 	{
2682 		struct Message {
2683 			char M[MAX_INBOUND_SIZE];
2684 		} Message;
2685 		PI2O_MESSAGE_FRAME   Message_Ptr;
2686 
2687 		/* Reject incoming commands while we are resetting the card */
2688 		if (sc->ha_in_reset != HA_OPERATIONAL) {
2689 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2690 			if (sc->ha_in_reset >= HA_OFF_LINE) {
2691 				/* HBA is now off-line */
2692 				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2693 			} else {
2694 				/* HBA currently resetting, try again later. */
2695 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2696 			}
2697 			debug_asr_cmd_printf (" e\n");
2698 			xpt_done(ccb);
2699 			debug_asr_cmd_printf (" q\n");
2700 			break;
2701 		}
2702 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2703 			kprintf(
2704 			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2705 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2706 			  ccb->csio.cdb_io.cdb_bytes[0],
2707 			  cam_sim_bus(sim),
2708 			  ccb->ccb_h.target_id,
2709 			  ccb->ccb_h.target_lun);
2710 		}
2711 		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2712 				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2713 				     ccb->ccb_h.target_lun);
2714 		debug_asr_dump_ccb(ccb);
2715 
2716 		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2717 		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2718 			debug_asr_cmd2_printf ("TID=%x:\n",
2719 			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2720 			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2721 			debug_asr_cmd2_dump_message(Message_Ptr);
2722 			debug_asr_cmd1_printf (" q");
2723 
2724 			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2725 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2726 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2727 				debug_asr_cmd_printf (" E\n");
2728 				xpt_done(ccb);
2729 			}
2730 			debug_asr_cmd_printf(" Q\n");
2731 			break;
2732 		}
2733 		/*
2734 		 *	We will get here if there is no valid TID for the device
2735 		 * referenced in the scsi command packet.
2736 		 */
2737 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2738 		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2739 		debug_asr_cmd_printf (" B\n");
2740 		xpt_done(ccb);
2741 		break;
2742 	}
2743 
2744 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2745 		/* Reset HBA device ... */
2746 		asr_hbareset (sc);
2747 		ccb->ccb_h.status = CAM_REQ_CMP;
2748 		xpt_done(ccb);
2749 		break;
2750 
2751 	case XPT_ABORT:			/* Abort the specified CCB */
2752 		/* XXX Implement */
2753 		ccb->ccb_h.status = CAM_REQ_INVALID;
2754 		xpt_done(ccb);
2755 		break;
2756 
2757 	case XPT_SET_TRAN_SETTINGS:
2758 		/* XXX Implement */
2759 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2760 		xpt_done(ccb);
2761 		break;
2762 
2763 	case XPT_GET_TRAN_SETTINGS:
2764 	/* Get default/user set transfer settings for the target */
2765 	{
2766 		struct	ccb_trans_settings *cts = &(ccb->cts);
2767 		struct ccb_trans_settings_scsi *scsi =
2768 		    &cts->proto_specific.scsi;
2769 		struct ccb_trans_settings_spi *spi =
2770 		    &cts->xport_specific.spi;
2771 
2772 		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2773 			cts->protocol = PROTO_SCSI;
2774 			cts->protocol_version = SCSI_REV_2;
2775 			cts->transport = XPORT_SPI;
2776 			cts->transport_version = 2;
2777 
2778 			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2779 			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2780 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2781 			spi->sync_period = 6; /* 40MHz */
2782 			spi->sync_offset = 15;
2783 			spi->valid = CTS_SPI_VALID_SYNC_RATE
2784 				   | CTS_SPI_VALID_SYNC_OFFSET
2785 				   | CTS_SPI_VALID_BUS_WIDTH
2786 				   | CTS_SPI_VALID_DISC;
2787 			scsi->valid = CTS_SCSI_VALID_TQ;
2788 
2789 			ccb->ccb_h.status = CAM_REQ_CMP;
2790 		} else {
2791 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2792 		}
2793 		xpt_done(ccb);
2794 		break;
2795 	}
2796 
2797 	case XPT_CALC_GEOMETRY:
2798 	{
2799 		struct	  ccb_calc_geometry *ccg;
2800 		u_int32_t size_mb;
2801 		u_int32_t secs_per_cylinder;
2802 
2803 		ccg = &(ccb->ccg);
2804 		size_mb = ccg->volume_size
2805 			/ ((1024L * 1024L) / ccg->block_size);
2806 
2807 		if (size_mb > 4096) {
2808 			ccg->heads = 255;
2809 			ccg->secs_per_track = 63;
2810 		} else if (size_mb > 2048) {
2811 			ccg->heads = 128;
2812 			ccg->secs_per_track = 63;
2813 		} else if (size_mb > 1024) {
2814 			ccg->heads = 65;
2815 			ccg->secs_per_track = 63;
2816 		} else {
2817 			ccg->heads = 64;
2818 			ccg->secs_per_track = 32;
2819 		}
2820 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2821 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2822 		ccb->ccb_h.status = CAM_REQ_CMP;
2823 		xpt_done(ccb);
2824 		break;
2825 	}
2826 
2827 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2828 		ASR_resetBus (sc, cam_sim_bus(sim));
2829 		ccb->ccb_h.status = CAM_REQ_CMP;
2830 		xpt_done(ccb);
2831 		break;
2832 
2833 	case XPT_TERM_IO:		/* Terminate the I/O process */
2834 		/* XXX Implement */
2835 		ccb->ccb_h.status = CAM_REQ_INVALID;
2836 		xpt_done(ccb);
2837 		break;
2838 
2839 	case XPT_PATH_INQ:		/* Path routing inquiry */
2840 	{
2841 		struct ccb_pathinq *cpi = &(ccb->cpi);
2842 
2843 		cpi->version_num = 1; /* XXX??? */
2844 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2845 		cpi->target_sprt = 0;
2846 		/* Not necessary to reset bus, done by HDM initialization */
2847 		cpi->hba_misc = PIM_NOBUSRESET;
2848 		cpi->hba_eng_cnt = 0;
2849 		cpi->max_target = sc->ha_MaxId;
2850 		cpi->max_lun = sc->ha_MaxLun;
2851 		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2852 		cpi->bus_id = cam_sim_bus(sim);
2853 		cpi->base_transfer_speed = 3300;
2854 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2855 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2856 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2857 		cpi->unit_number = cam_sim_unit(sim);
2858 		cpi->ccb_h.status = CAM_REQ_CMP;
2859                 cpi->transport = XPORT_SPI;
2860                 cpi->transport_version = 2;
2861                 cpi->protocol = PROTO_SCSI;
2862                 cpi->protocol_version = SCSI_REV_2;
2863 		xpt_done(ccb);
2864 		break;
2865 	}
2866 	default:
2867 		ccb->ccb_h.status = CAM_REQ_INVALID;
2868 		xpt_done(ccb);
2869 		break;
2870 	}
2871 } /* asr_action */
2872 
2873 /*
2874  * Handle processing of current CCB as pointed to by the Status.
2875  */
2876 static int
2877 asr_intr(Asr_softc_t *sc)
2878 {
2879 	int processed;
2880 
2881 	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2882 	    processed = 1) {
2883 		union asr_ccb			   *ccb;
2884 		u_int				    dsc;
2885 		U32				    ReplyOffset;
2886 		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2887 
2888 		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2889 		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2890 			break;
2891 		}
2892 		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2893 		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2894 		/*
2895 		 * We do not need any (optional byteswapping) method access to
2896 		 * the Initiator context field.
2897 		 */
2898 		ccb = (union asr_ccb *)(long)
2899 		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2900 		    &(Reply->StdReplyFrame.StdMessageFrame));
2901 		if (I2O_MESSAGE_FRAME_getMsgFlags(
2902 		  &(Reply->StdReplyFrame.StdMessageFrame))
2903 		  & I2O_MESSAGE_FLAGS_FAIL) {
2904 			I2O_UTIL_NOP_MESSAGE	Message;
2905 			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2906 			U32			MessageOffset;
2907 
2908 			MessageOffset = (u_long)
2909 			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2910 			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2911 			/*
2912 			 *  Get the Original Message Frame's address, and get
2913 			 * it's Transaction Context into our space. (Currently
2914 			 * unused at original authorship, but better to be
2915 			 * safe than sorry). Straight copy means that we
2916 			 * need not concern ourselves with the (optional
2917 			 * byteswapping) method access.
2918 			 */
2919 			Reply->StdReplyFrame.TransactionContext =
2920 			    bus_space_read_4(sc->ha_frame_btag,
2921 			    sc->ha_frame_bhandle, MessageOffset +
2922 			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2923 			    TransactionContext));
2924 			/*
2925 			 *	For 64 bit machines, we need to reconstruct the
2926 			 * 64 bit context.
2927 			 */
2928 			ccb = (union asr_ccb *)(long)
2929 			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2930 			    &(Reply->StdReplyFrame.StdMessageFrame));
2931 			/*
2932 			 * Unique error code for command failure.
2933 			 */
2934 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2935 			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2936 			/*
2937 			 *  Modify the message frame to contain a NOP and
2938 			 * re-issue it to the controller.
2939 			 */
2940 			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2941 			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2942 #if (I2O_UTIL_NOP != 0)
2943 				I2O_MESSAGE_FRAME_setFunction (
2944 				  &(Message_Ptr->StdMessageFrame),
2945 				  I2O_UTIL_NOP);
2946 #endif
2947 			/*
2948 			 *  Copy the packet out to the Original Message
2949 			 */
2950 			asr_set_frame(sc, Message_Ptr, MessageOffset,
2951 				      sizeof(I2O_UTIL_NOP_MESSAGE));
2952 			/*
2953 			 *  Issue the NOP
2954 			 */
2955 			asr_set_ToFIFO(sc, MessageOffset);
2956 		}
2957 
2958 		/*
2959 		 *	Asynchronous command with no return requirements,
2960 		 * and a generic handler for immunity against odd error
2961 		 * returns from the adapter.
2962 		 */
2963 		if (ccb == NULL) {
2964 			/*
2965 			 * Return Reply so that it can be used for the
2966 			 * next command
2967 			 */
2968 			asr_set_FromFIFO(sc, ReplyOffset);
2969 			continue;
2970 		}
2971 
2972 		/* Welease Wadjah! (and stop timeouts) */
2973 		ASR_ccbRemove (sc, ccb);
2974 
2975 		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2976 		    &(Reply->StdReplyFrame));
2977 		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2978 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2979 		switch (dsc) {
2980 
2981 		case I2O_SCSI_DSC_SUCCESS:
2982 			ccb->ccb_h.status |= CAM_REQ_CMP;
2983 			break;
2984 
2985 		case I2O_SCSI_DSC_CHECK_CONDITION:
2986 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
2987 			    CAM_AUTOSNS_VALID;
2988 			break;
2989 
2990 		case I2O_SCSI_DSC_BUSY:
2991 			/* FALLTHRU */
2992 		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
2993 			/* FALLTHRU */
2994 		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
2995 			/* FALLTHRU */
2996 		case I2O_SCSI_HBA_DSC_BUS_BUSY:
2997 			ccb->ccb_h.status |= CAM_SCSI_BUSY;
2998 			break;
2999 
3000 		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3001 			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3002 			break;
3003 
3004 		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3005 			/* FALLTHRU */
3006 		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3007 			/* FALLTHRU */
3008 		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3009 			/* FALLTHRU */
3010 		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3011 			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3012 			break;
3013 
3014 		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3015 			/* FALLTHRU */
3016 		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3017 			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3018 			break;
3019 
3020 		default:
3021 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3022 			break;
3023 		}
3024 		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3025 			ccb->csio.resid -=
3026 			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3027 			    Reply);
3028 		}
3029 
3030 		/* Sense data in reply packet */
3031 		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3032 			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3033 
3034 			if (size) {
3035 				if (size > sizeof(ccb->csio.sense_data)) {
3036 					size = sizeof(ccb->csio.sense_data);
3037 				}
3038 				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3039 					size = I2O_SCSI_SENSE_DATA_SZ;
3040 				}
3041 				if ((ccb->csio.sense_len)
3042 				 && (size > ccb->csio.sense_len)) {
3043 					size = ccb->csio.sense_len;
3044 				}
3045 				if (size < ccb->csio.sense_len) {
3046 					ccb->csio.sense_resid =
3047 					    ccb->csio.sense_len - size;
3048 				} else {
3049 					ccb->csio.sense_resid = 0;
3050 				}
3051 				bzero(&(ccb->csio.sense_data),
3052 				    sizeof(ccb->csio.sense_data));
3053 				bcopy(Reply->SenseData,
3054 				      &(ccb->csio.sense_data), size);
3055 			}
3056 		}
3057 
3058 		/*
3059 		 * Return Reply so that it can be used for the next command
3060 		 * since we have no more need for it now
3061 		 */
3062 		asr_set_FromFIFO(sc, ReplyOffset);
3063 
3064 		if (ccb->ccb_h.path) {
3065 			xpt_done ((union ccb *)ccb);
3066 		} else {
3067 			wakeup (ccb);
3068 		}
3069 	}
3070 	return (processed);
3071 } /* asr_intr */
3072 
3073 #undef QueueSize	/* Grrrr */
3074 #undef SG_Size		/* Grrrr */
3075 
3076 /*
3077  *	Meant to be included at the bottom of asr.c !!!
3078  */
3079 
3080 /*
3081  *	Included here as hard coded. Done because other necessary include
3082  *	files utilize C++ comment structures which make them a nuisance to
3083  *	included here just to pick up these three typedefs.
3084  */
3085 typedef U32   DPT_TAG_T;
3086 typedef U32   DPT_MSG_T;
3087 typedef U32   DPT_RTN_T;
3088 
3089 #undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3090 #include	"dev/raid/asr/osd_unix.h"
3091 
3092 #define	asr_unit(dev)	  minor(dev)
3093 
3094 static u_int8_t ASR_ctlr_held;
3095 
3096 static int
3097 asr_open(struct dev_open_args *ap)
3098 {
3099 	cdev_t dev = ap->a_head.a_dev;
3100 	int		 error;
3101 
3102 	if (dev->si_drv1 == NULL) {
3103 		return (ENODEV);
3104 	}
3105 	crit_enter();
3106 	if (ASR_ctlr_held) {
3107 		error = EBUSY;
3108 	} else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
3109 		++ASR_ctlr_held;
3110 	}
3111 	crit_exit();
3112 	return (error);
3113 } /* asr_open */
3114 
3115 static int
3116 asr_close(struct dev_close_args *ap)
3117 {
3118 
3119 	ASR_ctlr_held = 0;
3120 	return (0);
3121 } /* asr_close */
3122 
3123 
3124 /*-------------------------------------------------------------------------*/
3125 /*		      Function ASR_queue_i				   */
3126 /*-------------------------------------------------------------------------*/
3127 /* The Parameters Passed To This Function Are :				   */
3128 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3129 /*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3130 /*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3131 /*									   */
3132 /* This Function Will Take The User Request Packet And Convert It To An	   */
3133 /* I2O MSG And Send It Off To The Adapter.				   */
3134 /*									   */
3135 /* Return : 0 For OK, Error Code Otherwise				   */
3136 /*-------------------------------------------------------------------------*/
3137 static int
3138 ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3139 {
3140 	union asr_ccb				   * ccb;
3141 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3142 	PI2O_MESSAGE_FRAME			     Message_Ptr;
3143 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3144 	int					     MessageSizeInBytes;
3145 	int					     ReplySizeInBytes;
3146 	int					     error;
3147 	int					     s;
3148 	/* Scatter Gather buffer list */
3149 	struct ioctlSgList_S {
3150 		SLIST_ENTRY(ioctlSgList_S) link;
3151 		caddr_t			   UserSpace;
3152 		I2O_FLAGS_COUNT		   FlagsCount;
3153 		char			   KernelSpace[sizeof(long)];
3154 	}					   * elm;
3155 	/* Generates a `first' entry */
3156 	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3157 
3158 	if (ASR_getBlinkLedCode(sc)) {
3159 		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3160 		  ASR_getBlinkLedCode(sc));
3161 		return (EIO);
3162 	}
3163 	/* Copy in the message into a local allocation */
3164 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3165 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3166 		debug_usr_cmd_printf (
3167 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3168 		return (ENOMEM);
3169 	}
3170 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3171 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3172 		kfree(Message_Ptr, M_TEMP);
3173 		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3174 		return (error);
3175 	}
3176 	/* Acquire information to determine type of packet */
3177 	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3178 	/* The offset of the reply information within the user packet */
3179 	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3180 	  + MessageSizeInBytes);
3181 
3182 	/* Check if the message is a synchronous initialization command */
3183 	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3184 	kfree(Message_Ptr, M_TEMP);
3185 	switch (s) {
3186 
3187 	case I2O_EXEC_IOP_RESET:
3188 	{	U32 status;
3189 
3190 		status = ASR_resetIOP(sc);
3191 		ReplySizeInBytes = sizeof(status);
3192 		debug_usr_cmd_printf ("resetIOP done\n");
3193 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3194 		  ReplySizeInBytes));
3195 	}
3196 
3197 	case I2O_EXEC_STATUS_GET:
3198 	{	PI2O_EXEC_STATUS_GET_REPLY status;
3199 
3200 		status = &sc->ha_statusmem->status;
3201 		if (ASR_getStatus(sc) == NULL) {
3202 			debug_usr_cmd_printf ("getStatus failed\n");
3203 			return (ENXIO);
3204 		}
3205 		ReplySizeInBytes = sizeof(status);
3206 		debug_usr_cmd_printf ("getStatus done\n");
3207 		return (copyout ((caddr_t)status, (caddr_t)Reply,
3208 		  ReplySizeInBytes));
3209 	}
3210 
3211 	case I2O_EXEC_OUTBOUND_INIT:
3212 	{	U32 status;
3213 
3214 		status = ASR_initOutBound(sc);
3215 		ReplySizeInBytes = sizeof(status);
3216 		debug_usr_cmd_printf ("intOutBound done\n");
3217 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3218 		  ReplySizeInBytes));
3219 	}
3220 	}
3221 
3222 	/* Determine if the message size is valid */
3223 	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3224 	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3225 		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3226 		  MessageSizeInBytes);
3227 		return (EINVAL);
3228 	}
3229 
3230 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3231 	  M_TEMP, M_WAITOK)) == NULL) {
3232 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3233 		  MessageSizeInBytes);
3234 		return (ENOMEM);
3235 	}
3236 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3237 	  MessageSizeInBytes)) != 0) {
3238 		kfree(Message_Ptr, M_TEMP);
3239 		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3240 		  MessageSizeInBytes, error);
3241 		return (error);
3242 	}
3243 
3244 	/* Check the size of the reply frame, and start constructing */
3245 
3246 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3247 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3248 		kfree(Message_Ptr, M_TEMP);
3249 		debug_usr_cmd_printf (
3250 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3251 		return (ENOMEM);
3252 	}
3253 	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3254 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3255 		kfree(Reply_Ptr, M_TEMP);
3256 		kfree(Message_Ptr, M_TEMP);
3257 		debug_usr_cmd_printf (
3258 		  "Failed to copy in reply frame, errno=%d\n",
3259 		  error);
3260 		return (error);
3261 	}
3262 	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3263 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3264 	kfree(Reply_Ptr, M_TEMP);
3265 	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3266 		kfree(Message_Ptr, M_TEMP);
3267 		debug_usr_cmd_printf (
3268 		  "Failed to copy in reply frame[%d], errno=%d\n",
3269 		  ReplySizeInBytes, error);
3270 		return (EINVAL);
3271 	}
3272 
3273 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3274 	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3275 	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3276 	  M_TEMP, M_WAITOK)) == NULL) {
3277 		kfree(Message_Ptr, M_TEMP);
3278 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3279 		  ReplySizeInBytes);
3280 		return (ENOMEM);
3281 	}
3282 	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3283 	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3284 	  = Message_Ptr->InitiatorContext;
3285 	Reply_Ptr->StdReplyFrame.TransactionContext
3286 	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3287 	I2O_MESSAGE_FRAME_setMsgFlags(
3288 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3289 	  I2O_MESSAGE_FRAME_getMsgFlags(
3290 	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3291 	      | I2O_MESSAGE_FLAGS_REPLY);
3292 
3293 	/* Check if the message is a special case command */
3294 	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3295 	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3296 		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3297 		  Message_Ptr) & 0xF0) >> 2)) {
3298 			kfree(Message_Ptr, M_TEMP);
3299 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3300 			  &(Reply_Ptr->StdReplyFrame),
3301 			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3302 			I2O_MESSAGE_FRAME_setMessageSize(
3303 			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3304 			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3305 			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3306 			  ReplySizeInBytes);
3307 			kfree(Reply_Ptr, M_TEMP);
3308 			return (error);
3309 		}
3310 	}
3311 
3312 	/* Deal in the general case */
3313 	/* First allocate and optionally copy in each scatter gather element */
3314 	SLIST_INIT(&sgList);
3315 	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3316 		PI2O_SGE_SIMPLE_ELEMENT sg;
3317 
3318 		/*
3319 		 *	since this code is reused in several systems, code
3320 		 * efficiency is greater by using a shift operation rather
3321 		 * than a divide by sizeof(u_int32_t).
3322 		 */
3323 		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3324 		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3325 		    >> 2));
3326 		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3327 		  + MessageSizeInBytes)) {
3328 			caddr_t v;
3329 			int	len;
3330 
3331 			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3332 			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3333 				error = EINVAL;
3334 				break;
3335 			}
3336 			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3337 			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3338 			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3339 			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3340 				Message_Ptr) & 0xF0) >> 2)),
3341 			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3342 
3343 			if ((elm = (struct ioctlSgList_S *)kmalloc (
3344 			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3345 			  M_TEMP, M_WAITOK)) == NULL) {
3346 				debug_usr_cmd_printf (
3347 				  "Failed to allocate SG[%d]\n", len);
3348 				error = ENOMEM;
3349 				break;
3350 			}
3351 			SLIST_INSERT_HEAD(&sgList, elm, link);
3352 			elm->FlagsCount = sg->FlagsCount;
3353 			elm->UserSpace = (caddr_t)
3354 			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3355 			v = elm->KernelSpace;
3356 			/* Copy in outgoing data (DIR bit could be invalid) */
3357 			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3358 			  != 0) {
3359 				break;
3360 			}
3361 			/*
3362 			 *	If the buffer is not contiguous, lets
3363 			 * break up the scatter/gather entries.
3364 			 */
3365 			while ((len > 0)
3366 			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3367 			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3368 				int next, base, span;
3369 
3370 				span = 0;
3371 				next = base = KVTOPHYS(v);
3372 				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3373 				  base);
3374 
3375 				/* How far can we go physically contiguously */
3376 				while ((len > 0) && (base == next)) {
3377 					int size;
3378 
3379 					next = trunc_page(base) + PAGE_SIZE;
3380 					size = next - base;
3381 					if (size > len) {
3382 						size = len;
3383 					}
3384 					span += size;
3385 					v += size;
3386 					len -= size;
3387 					base = KVTOPHYS(v);
3388 				}
3389 
3390 				/* Construct the Flags */
3391 				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3392 				  span);
3393 				{
3394 					int flags = I2O_FLAGS_COUNT_getFlags(
3395 					  &(elm->FlagsCount));
3396 					/* Any remaining length? */
3397 					if (len > 0) {
3398 					    flags &=
3399 						~(I2O_SGL_FLAGS_END_OF_BUFFER
3400 						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3401 					}
3402 					I2O_FLAGS_COUNT_setFlags(
3403 					  &(sg->FlagsCount), flags);
3404 				}
3405 
3406 				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3407 				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3408 				    ((char *)Message_Ptr
3409 				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3410 					Message_Ptr) & 0xF0) >> 2)),
3411 				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3412 				  span);
3413 				if (len <= 0) {
3414 					break;
3415 				}
3416 
3417 				/*
3418 				 * Incrementing requires resizing of the
3419 				 * packet, and moving up the existing SG
3420 				 * elements.
3421 				 */
3422 				++sg;
3423 				MessageSizeInBytes += sizeof(*sg);
3424 				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3425 				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3426 				  + (sizeof(*sg) / sizeof(U32)));
3427 				{
3428 					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3429 
3430 					if ((NewMessage_Ptr
3431 					  = (PI2O_MESSAGE_FRAME)
3432 					    kmalloc (MessageSizeInBytes,
3433 					     M_TEMP, M_WAITOK)) == NULL) {
3434 						debug_usr_cmd_printf (
3435 						  "Failed to acquire frame[%d] memory\n",
3436 						  MessageSizeInBytes);
3437 						error = ENOMEM;
3438 						break;
3439 					}
3440 					span = ((caddr_t)sg)
3441 					     - (caddr_t)Message_Ptr;
3442 					bcopy(Message_Ptr,NewMessage_Ptr, span);
3443 					bcopy((caddr_t)(sg-1),
3444 					  ((caddr_t)NewMessage_Ptr) + span,
3445 					  MessageSizeInBytes - span);
3446 					kfree(Message_Ptr, M_TEMP);
3447 					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3448 					  (((caddr_t)NewMessage_Ptr) + span);
3449 					Message_Ptr = NewMessage_Ptr;
3450 				}
3451 			}
3452 			if ((error)
3453 			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3454 			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3455 				break;
3456 			}
3457 			++sg;
3458 		}
3459 		if (error) {
3460 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3461 				SLIST_REMOVE_HEAD(&sgList, link);
3462 				kfree(elm, M_TEMP);
3463 			}
3464 			kfree(Reply_Ptr, M_TEMP);
3465 			kfree(Message_Ptr, M_TEMP);
3466 			return (error);
3467 		}
3468 	}
3469 
3470 	debug_usr_cmd_printf ("Inbound: ");
3471 	debug_usr_cmd_dump_message(Message_Ptr);
3472 
3473 	/* Send the command */
3474 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3475 		/* Free up in-kernel buffers */
3476 		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3477 			SLIST_REMOVE_HEAD(&sgList, link);
3478 			kfree(elm, M_TEMP);
3479 		}
3480 		kfree(Reply_Ptr, M_TEMP);
3481 		kfree(Message_Ptr, M_TEMP);
3482 		return (ENOMEM);
3483 	}
3484 
3485 	/*
3486 	 * We do not need any (optional byteswapping) method access to
3487 	 * the Initiator context field.
3488 	 */
3489 	I2O_MESSAGE_FRAME_setInitiatorContext64(
3490 	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3491 
3492 	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3493 
3494 	kfree(Message_Ptr, M_TEMP);
3495 
3496 	/*
3497 	 * Wait for the board to report a finished instruction.
3498 	 */
3499 	crit_enter();
3500 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3501 		if (ASR_getBlinkLedCode(sc)) {
3502 			/* Reset Adapter */
3503 			kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3504 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3505 			  ASR_getBlinkLedCode(sc));
3506 			if (ASR_reset (sc) == ENXIO) {
3507 				/* Command Cleanup */
3508 				ASR_ccbRemove(sc, ccb);
3509 			}
3510 			crit_exit();
3511 			/* Free up in-kernel buffers */
3512 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3513 				SLIST_REMOVE_HEAD(&sgList, link);
3514 				kfree(elm, M_TEMP);
3515 			}
3516 			kfree(Reply_Ptr, M_TEMP);
3517 			asr_free_ccb(ccb);
3518 			return (EIO);
3519 		}
3520 		/* Check every second for BlinkLed */
3521 		/* There is no PRICAM, but outwardly PRIBIO is functional */
3522 		tsleep(ccb, 0, "asr", hz);
3523 	}
3524 	crit_exit();
3525 
3526 	debug_usr_cmd_printf ("Outbound: ");
3527 	debug_usr_cmd_dump_message(Reply_Ptr);
3528 
3529 	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3530 	  &(Reply_Ptr->StdReplyFrame),
3531 	  (ccb->ccb_h.status != CAM_REQ_CMP));
3532 
3533 	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3534 	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3535 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3536 		  ccb->csio.dxfer_len - ccb->csio.resid);
3537 	}
3538 	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3539 	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3540 	 - I2O_SCSI_SENSE_DATA_SZ))) {
3541 		int size = ReplySizeInBytes
3542 		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3543 		  - I2O_SCSI_SENSE_DATA_SZ;
3544 
3545 		if (size > sizeof(ccb->csio.sense_data)) {
3546 			size = sizeof(ccb->csio.sense_data);
3547 		}
3548 		if (size < ccb->csio.sense_len) {
3549 			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3550 		} else {
3551 			ccb->csio.sense_resid = 0;
3552 		}
3553 		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3554 		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3555 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3556 		    Reply_Ptr, size);
3557 	}
3558 
3559 	/* Free up in-kernel buffers */
3560 	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3561 		/* Copy out as necessary */
3562 		if ((error == 0)
3563 		/* DIR bit considered `valid', error due to ignorance works */
3564 		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3565 		  & I2O_SGL_FLAGS_DIR) == 0)) {
3566 			error = copyout((caddr_t)(elm->KernelSpace),
3567 			  elm->UserSpace,
3568 			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3569 		}
3570 		SLIST_REMOVE_HEAD(&sgList, link);
3571 		kfree(elm, M_TEMP);
3572 	}
3573 	if (error == 0) {
3574 	/* Copy reply frame to user space */
3575 		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3576 				ReplySizeInBytes);
3577 	}
3578 	kfree(Reply_Ptr, M_TEMP);
3579 	asr_free_ccb(ccb);
3580 
3581 	return (error);
3582 } /* ASR_queue_i */
3583 
3584 /*----------------------------------------------------------------------*/
3585 /*			    Function asr_ioctl			       */
3586 /*----------------------------------------------------------------------*/
3587 /* The parameters passed to this function are :				*/
3588 /*     dev  : Device number.						*/
3589 /*     cmd  : Ioctl Command						*/
3590 /*     data : User Argument Passed In.					*/
3591 /*     flag : Mode Parameter						*/
3592 /*     proc : Process Parameter						*/
3593 /*									*/
3594 /* This function is the user interface into this adapter driver		*/
3595 /*									*/
3596 /* Return : zero if OK, error code if not				*/
3597 /*----------------------------------------------------------------------*/
3598 
3599 static int
3600 asr_ioctl(struct dev_ioctl_args *ap)
3601 {
3602 	cdev_t dev = ap->a_head.a_dev;
3603 	u_long cmd = ap->a_cmd;
3604 	caddr_t data = ap->a_data;
3605 	Asr_softc_t	*sc = dev->si_drv1;
3606 	int		i, error = 0;
3607 #ifdef ASR_IOCTL_COMPAT
3608 	int		j;
3609 #endif /* ASR_IOCTL_COMPAT */
3610 
3611 	if (sc == NULL)
3612 		return (EINVAL);
3613 
3614 	switch(cmd) {
3615 	case DPT_SIGNATURE:
3616 #ifdef ASR_IOCTL_COMPAT
3617 #if (dsDescription_size != 50)
3618 	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3619 #endif
3620 		if (cmd & 0xFFFF0000) {
3621 			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3622 			return (0);
3623 		}
3624 	/* Traditional version of the ioctl interface */
3625 	case DPT_SIGNATURE & 0x0000FFFF:
3626 #endif
3627 		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3628 				sizeof(dpt_sig_S)));
3629 
3630 	/* Traditional version of the ioctl interface */
3631 	case DPT_CTRLINFO & 0x0000FFFF:
3632 	case DPT_CTRLINFO: {
3633 		struct {
3634 			u_int16_t length;
3635 			u_int16_t drvrHBAnum;
3636 			u_int32_t baseAddr;
3637 			u_int16_t blinkState;
3638 			u_int8_t  pciBusNum;
3639 			u_int8_t  pciDeviceNum;
3640 			u_int16_t hbaFlags;
3641 			u_int16_t Interrupt;
3642 			u_int32_t reserved1;
3643 			u_int32_t reserved2;
3644 			u_int32_t reserved3;
3645 		} CtlrInfo;
3646 
3647 		bzero(&CtlrInfo, sizeof(CtlrInfo));
3648 		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3649 		CtlrInfo.drvrHBAnum = asr_unit(dev);
3650 		CtlrInfo.baseAddr = sc->ha_Base;
3651 		i = ASR_getBlinkLedCode (sc);
3652 		if (i == -1)
3653 			i = 0;
3654 
3655 		CtlrInfo.blinkState = i;
3656 		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3657 		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3658 #define	FLG_OSD_PCI_VALID 0x0001
3659 #define	FLG_OSD_DMA	  0x0002
3660 #define	FLG_OSD_I2O	  0x0004
3661 		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3662 		CtlrInfo.Interrupt = sc->ha_irq;
3663 #ifdef ASR_IOCTL_COMPAT
3664 		if (cmd & 0xffff0000)
3665 			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3666 		else
3667 #endif /* ASR_IOCTL_COMPAT */
3668 		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3669 	}	return (error);
3670 
3671 	/* Traditional version of the ioctl interface */
3672 	case DPT_SYSINFO & 0x0000FFFF:
3673 	case DPT_SYSINFO: {
3674 		sysInfo_S	Info;
3675 #ifdef ASR_IOCTL_COMPAT
3676 		char	      * cp;
3677 		/* Kernel Specific ptok `hack' */
3678 #define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3679 
3680 		bzero(&Info, sizeof(Info));
3681 
3682 		/* Appears I am the only person in the Kernel doing this */
3683 		outb (0x70, 0x12);
3684 		i = inb(0x71);
3685 		j = i >> 4;
3686 		if (i == 0x0f) {
3687 			outb (0x70, 0x19);
3688 			j = inb (0x71);
3689 		}
3690 		Info.drive0CMOS = j;
3691 
3692 		j = i & 0x0f;
3693 		if (i == 0x0f) {
3694 			outb (0x70, 0x1a);
3695 			j = inb (0x71);
3696 		}
3697 		Info.drive1CMOS = j;
3698 
3699 		Info.numDrives = *((char *)ptok(0x475));
3700 #else /* ASR_IOCTL_COMPAT */
3701 		bzero(&Info, sizeof(Info));
3702 #endif /* ASR_IOCTL_COMPAT */
3703 
3704 		Info.processorFamily = ASR_sig.dsProcessorFamily;
3705 		Info.osType = OS_BSDI_UNIX;
3706 		Info.osMajorVersion = osrelease[0] - '0';
3707 		Info.osMinorVersion = osrelease[2] - '0';
3708 		/* Info.osRevision = 0; */
3709 		/* Info.osSubRevision = 0; */
3710 		Info.busType = SI_PCI_BUS;
3711 		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3712 
3713 #ifdef ASR_IOCTL_COMPAT
3714 		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3715 		/* Go Out And Look For I2O SmartROM */
3716 		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3717 			int k;
3718 
3719 			cp = ptok(j);
3720 			if (*((unsigned short *)cp) != 0xAA55) {
3721 				continue;
3722 			}
3723 			j += (cp[2] * 512) - 2048;
3724 			if ((*((u_long *)(cp + 6))
3725 			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3726 			 || (*((u_long *)(cp + 10))
3727 			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3728 				continue;
3729 			}
3730 			cp += 0x24;
3731 			for (k = 0; k < 64; ++k) {
3732 				if (*((unsigned short *)cp)
3733 				 == (' ' + ('v' * 256))) {
3734 					break;
3735 				}
3736 			}
3737 			if (k < 64) {
3738 				Info.smartROMMajorVersion
3739 				    = *((unsigned char *)(cp += 4)) - '0';
3740 				Info.smartROMMinorVersion
3741 				    = *((unsigned char *)(cp += 2));
3742 				Info.smartROMRevision
3743 				    = *((unsigned char *)(++cp));
3744 				Info.flags |= SI_SmartROMverValid;
3745 				Info.flags &= ~SI_NO_SmartROM;
3746 				break;
3747 			}
3748 		}
3749 		/* Get The Conventional Memory Size From CMOS */
3750 		outb (0x70, 0x16);
3751 		j = inb (0x71);
3752 		j <<= 8;
3753 		outb (0x70, 0x15);
3754 		j |= inb(0x71);
3755 		Info.conventionalMemSize = j;
3756 
3757 		/* Get The Extended Memory Found At Power On From CMOS */
3758 		outb (0x70, 0x31);
3759 		j = inb (0x71);
3760 		j <<= 8;
3761 		outb (0x70, 0x30);
3762 		j |= inb(0x71);
3763 		Info.extendedMemSize = j;
3764 		Info.flags |= SI_MemorySizeValid;
3765 
3766 		/* Copy Out The Info Structure To The User */
3767 		if (cmd & 0xFFFF0000)
3768 			bcopy(&Info, data, sizeof(Info));
3769 		else
3770 #endif /* ASR_IOCTL_COMPAT */
3771 		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3772 		return (error); }
3773 
3774 		/* Get The BlinkLED State */
3775 	case DPT_BLINKLED:
3776 		i = ASR_getBlinkLedCode (sc);
3777 		if (i == -1)
3778 			i = 0;
3779 #ifdef ASR_IOCTL_COMPAT
3780 		if (cmd & 0xffff0000)
3781 			bcopy(&i, data, sizeof(i));
3782 		else
3783 #endif /* ASR_IOCTL_COMPAT */
3784 		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3785 		break;
3786 
3787 		/* Send an I2O command */
3788 	case I2OUSRCMD:
3789 		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3790 
3791 		/* Reset and re-initialize the adapter */
3792 	case I2ORESETCMD:
3793 		return (ASR_reset(sc));
3794 
3795 		/* Rescan the LCT table and resynchronize the information */
3796 	case I2ORESCANCMD:
3797 		return (ASR_rescan(sc));
3798 	}
3799 	return (EINVAL);
3800 } /* asr_ioctl */
3801