xref: /dragonfly/sys/dev/raid/asr/asr.c (revision 2b3f93ea)
1 /*-
2  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3  * Copyright (c) 2000-2001 Adaptec Corporation
4  * All rights reserved.
5  *
6  * TERMS AND CONDITIONS OF USE
7  *
8  * Redistribution and use in source form, with or without modification, are
9  * permitted provided that redistributions of source code must retain the
10  * above copyright notice, this list of conditions and the following disclaimer.
11  *
12  * This software is provided `as is' by Adaptec and any express or implied
13  * warranties, including, but not limited to, the implied warranties of
14  * merchantability and fitness for a particular purpose, are disclaimed. In no
15  * event shall Adaptec be liable for any direct, indirect, incidental, special,
16  * exemplary or consequential damages (including, but not limited to,
17  * procurement of substitute goods or services; loss of use, data, or profits;
18  * or business interruptions) however caused and on any theory of liability,
19  * whether in contract, strict liability, or tort (including negligence or
20  * otherwise) arising in any way out of the use of this driver software, even
21  * if advised of the possibility of such damage.
22  *
23  * SCSI I2O host adapter driver
24  *
25  *	V1.10 2004/05/05 scottl@freebsd.org
26  *		- Massive cleanup of the driver to remove dead code and
27  *		  non-conformant style.
28  *		- Removed most i386-specific code to make it more portable.
29  *		- Converted to the bus_space API.
30  *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31  *		- The 2000S and 2005S do not initialize on some machines,
32  *		  increased timeout to 255ms from 50ms for the StatusGet
33  *		  command.
34  *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35  *		- I knew this one was too good to be true. The error return
36  *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37  *		  to the bit masked status.
38  *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39  *		- The 2005S that was supported is affectionately called the
40  *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41  *		  16MB low-cost configuration, Firmware was forced to go
42  *		  to a Split BAR Firmware. This requires a separate IOP and
43  *		  Messaging base address.
44  *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45  *		- Handle support for 2005S Zero Channel RAID solution.
46  *		- System locked up if the Adapter locked up. Do not try
47  *		  to send other commands if the resetIOP command fails. The
48  *		  fail outstanding command discovery loop was flawed as the
49  *		  removal of the command from the list prevented discovering
50  *		  all the commands.
51  *		- Comment changes to clarify driver.
52  *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53  *		- We do not use the AC_FOUND_DEV event because of I2O.
54  *		  Removed asr_async.
55  *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56  *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57  *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58  *		  mode as this is confused with competitor adapters in run
59  *		  mode.
60  *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61  *		  to prevent operating system panic.
62  *		- moved default major number to 154 from 97.
63  *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64  *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65  *		  series that is visible, it's more of an internal code name.
66  *		  remove any visible references within reason for now.
67  *		- bus_ptr->LUN was not correctly zeroed when initially
68  *		  allocated causing a possible panic of the operating system
69  *		  during boot.
70  *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71  *		- Code always fails for ASR_getTid affecting performance.
72  *		- initiated a set of changes that resulted from a formal
73  *		  code inspection by Mark_Salyzyn@adaptec.com,
74  *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75  *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76  *		  Their findings were focussed on the LCT & TID handler, and
77  *		  all resulting changes were to improve code readability,
78  *		  consistency or have a positive effect on performance.
79  *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80  *		- Passthrough returned an incorrect error.
81  *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82  *		  on command completion.
83  *		- generate control device nodes using make_dev and delete_dev.
84  *		- Performance affected by TID caching reallocing.
85  *		- Made suggested changes by Justin_Gibbs@adaptec.com
86  *			- use splcam instead of splbio.
87  *			- use cam_imask instead of bio_imask.
88  *			- use u_int8_t instead of u_char.
89  *			- use u_int16_t instead of u_short.
90  *			- use u_int32_t instead of u_long where appropriate.
91  *			- use 64 bit context handler instead of 32 bit.
92  *			- create_ccb should only allocate the worst case
93  *			  requirements for the driver since CAM may evolve
94  *			  making union ccb much larger than needed here.
95  *			  renamed create_ccb to asr_alloc_ccb.
96  *			- go nutz justifying all debug prints as macros
97  *			  defined at the top and remove unsightly ifdefs.
98  *			- INLINE STATIC viewed as confusing. Historically
99  *			  utilized to affect code performance and debug
100  *			  issues in OS, Compiler or OEM specific situations.
101  *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102  *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103  *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104  *			changed variable name xs to ccb
105  *			changed struct scsi_link to struct cam_path
106  *			changed struct scsibus_data to struct cam_sim
107  *			stopped using fordriver for holding on to the TID
108  *			use proprietary packet creation instead of scsi_inquire
109  *			CAM layer sends synchronize commands.
110  *
111  * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
112  */
113 
114 #include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
115 #include <sys/kernel.h>
116 #include <sys/module.h>
117 #include <sys/systm.h>
118 #include <sys/malloc.h>
119 #include <sys/conf.h>
120 #include <sys/caps.h>
121 #include <sys/proc.h>
122 #include <sys/bus.h>
123 #include <sys/rman.h>
124 #include <sys/stat.h>
125 #include <sys/device.h>
126 #include <sys/thread2.h>
127 #include <sys/bus_dma.h>
128 
129 #include <bus/cam/cam.h>
130 #include <bus/cam/cam_ccb.h>
131 #include <bus/cam/cam_sim.h>
132 #include <bus/cam/cam_xpt_sim.h>
133 #include <bus/cam/cam_xpt_periph.h>
134 
135 #include <bus/cam/scsi/scsi_all.h>
136 #include <bus/cam/scsi/scsi_message.h>
137 
138 #include <vm/vm.h>
139 #include <vm/pmap.h>
140 
141 #include <machine/vmparam.h>
142 
143 #include <bus/pci/pcivar.h>
144 #include <bus/pci/pcireg.h>
145 
146 #define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
147 #define	KVTOPHYS(x) vtophys(x)
148 #include	<dev/raid/asr/dptalign.h>
149 #include	<dev/raid/asr/i2oexec.h>
150 #include	<dev/raid/asr/i2obscsi.h>
151 #include	<dev/raid/asr/i2odpt.h>
152 #include	<dev/raid/asr/i2oadptr.h>
153 
154 #include	<dev/raid/asr/sys_info.h>
155 
156 #define	ASR_VERSION	1
157 #define	ASR_REVISION	'1'
158 #define	ASR_SUBREVISION '0'
159 #define	ASR_MONTH	5
160 #define	ASR_DAY		5
161 #define	ASR_YEAR	(2004 - 1980)
162 
163 /*
164  *	Debug macros to reduce the unsightly ifdefs
165  */
166 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
167 static __inline void
168 debug_asr_message(PI2O_MESSAGE_FRAME message)
169 {
170 	u_int32_t * pointer = (u_int32_t *)message;
171 	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
172 	u_int32_t   counter = 0;
173 
174 	while (length--) {
175 		kprintf("%08lx%c", (u_long)*(pointer++),
176 		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
177 	}
178 }
179 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
180 
181 #ifdef DEBUG_ASR
182   /* Breaks on none STDC based compilers :-( */
183 #define debug_asr_printf(fmt,args...)	kprintf(fmt, ##args)
184 #define debug_asr_dump_message(message)	debug_asr_message(message)
185 #define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
186 #else /* DEBUG_ASR */
187 #define debug_asr_printf(fmt,args...)
188 #define debug_asr_dump_message(message)
189 #define debug_asr_print_path(ccb)
190 #endif /* DEBUG_ASR */
191 
192 /*
193  *	If DEBUG_ASR_CMD is defined:
194  *		0 - Display incoming SCSI commands
195  *		1 - add in a quick character before queueing.
196  *		2 - add in outgoing message frames.
197  */
198 #if (defined(DEBUG_ASR_CMD))
199 #define debug_asr_cmd_printf(fmt,args...)     kprintf(fmt,##args)
200 static __inline void
201 debug_asr_dump_ccb(union ccb *ccb)
202 {
203 	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
204 	int		len = ccb->csio.cdb_len;
205 
206 	while (len) {
207 		debug_asr_cmd_printf (" %02x", *(cp++));
208 		--len;
209 	}
210 }
211 #if (DEBUG_ASR_CMD > 0)
212 #define debug_asr_cmd1_printf		       debug_asr_cmd_printf
213 #else
214 #define debug_asr_cmd1_printf(fmt,args...)
215 #endif
216 #if (DEBUG_ASR_CMD > 1)
217 #define debug_asr_cmd2_printf			debug_asr_cmd_printf
218 #define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
219 #else
220 #define debug_asr_cmd2_printf(fmt,args...)
221 #define debug_asr_cmd2_dump_message(message)
222 #endif
223 #else /* DEBUG_ASR_CMD */
224 #define debug_asr_cmd_printf(fmt,args...)
225 #define debug_asr_dump_ccb(ccb)
226 #define debug_asr_cmd1_printf(fmt,args...)
227 #define debug_asr_cmd2_printf(fmt,args...)
228 #define debug_asr_cmd2_dump_message(message)
229 #endif /* DEBUG_ASR_CMD */
230 
231 #if (defined(DEBUG_ASR_USR_CMD))
232 #define debug_usr_cmd_printf(fmt,args...)   kprintf(fmt,##args)
233 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
234 #else /* DEBUG_ASR_USR_CMD */
235 #define debug_usr_cmd_printf(fmt,args...)
236 #define debug_usr_cmd_dump_message(message)
237 #endif /* DEBUG_ASR_USR_CMD */
238 
239 #ifdef ASR_IOCTL_COMPAT
240 #define	dsDescription_size 46	/* Snug as a bug in a rug */
241 #endif /* ASR_IOCTL_COMPAT */
242 
243 #include "dev/raid/asr/dptsig.h"
244 
245 static dpt_sig_S ASR_sig = {
246 	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
247 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
248 	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
249 	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
250 	ASR_MONTH, ASR_DAY, ASR_YEAR,
251 /*	 01234567890123456789012345678901234567890123456789	< 50 chars */
252 	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
253 	/*		 ^^^^^ asr_attach alters these to match OS */
254 };
255 
256 /* Configuration Definitions */
257 
258 #define	SG_SIZE		 58	/* Scatter Gather list Size		 */
259 #define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
260 #define	MAX_LUN		 255	/* Maximum LUN Supported		 */
261 #define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
262 #define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
263 #define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
264 #define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
265 #define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
266 				/* Also serves as the minimum map for	 */
267 				/* the 2005S zero channel RAID product	 */
268 
269 /* I2O register set */
270 #define	I2O_REG_STATUS		0x30
271 #define	I2O_REG_MASK		0x34
272 #define	I2O_REG_TOFIFO		0x40
273 #define	I2O_REG_FROMFIFO	0x44
274 
275 #define	Mask_InterruptsDisabled	0x08
276 
277 /*
278  * A MIX of performance and space considerations for TID lookups
279  */
280 typedef u_int16_t tid_t;
281 
282 typedef struct {
283 	u_int32_t size;		/* up to MAX_LUN    */
284 	tid_t	  TID[1];
285 } lun2tid_t;
286 
287 typedef struct {
288 	u_int32_t   size;	/* up to MAX_TARGET */
289 	lun2tid_t * LUN[1];
290 } target2lun_t;
291 
292 /*
293  * Don't play games with the ccb any more, use the CAM ccb
294  */
295 #define asr_ccb ccb
296 
297 struct Asr_status_mem {
298 	I2O_EXEC_STATUS_GET_REPLY	status;
299 	U32				rstatus;
300 };
301 
302 /**************************************************************************
303 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
304 **  Is Configured Into The System.  The Structure Supplies Configuration **
305 **  Information, Status Info, Queue Info And An Active CCB List Pointer. **
306 ***************************************************************************/
307 
308 typedef struct Asr_softc {
309 	device_t		ha_dev;
310 	u_int16_t		ha_irq;
311 	u_long			ha_Base;       /* base port for each board */
312 	bus_size_t		ha_blinkLED;
313 	bus_space_handle_t	ha_i2o_bhandle;
314 	bus_space_tag_t		ha_i2o_btag;
315 	bus_space_handle_t	ha_frame_bhandle;
316 	bus_space_tag_t		ha_frame_btag;
317 	I2O_IOP_ENTRY		ha_SystemTable;
318 	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
319 
320 	bus_dma_tag_t		ha_parent_dmat;
321 	bus_dma_tag_t		ha_statusmem_dmat;
322 	bus_dmamap_t		ha_statusmem_dmamap;
323 	struct Asr_status_mem * ha_statusmem;
324 	u_int32_t		ha_rstatus_phys;
325 	u_int32_t		ha_status_phys;
326 	struct cam_path	      * ha_path[MAX_CHANNEL+1];
327 	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
328 	struct resource	      * ha_mem_res;
329 	struct resource	      * ha_mes_res;
330 	struct resource	      * ha_irq_res;
331 	void		      * ha_intr;
332 	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
333 #define le_type	  IdentityTag[0]
334 #define I2O_BSA	    0x20
335 #define I2O_FCA	    0x40
336 #define I2O_SCSI    0x00
337 #define I2O_PORT    0x80
338 #define I2O_UNKNOWN 0x7F
339 #define le_bus	  IdentityTag[1]
340 #define le_target IdentityTag[2]
341 #define le_lun	  IdentityTag[3]
342 	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
343 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
344 	u_long			ha_Msgs_Phys;
345 
346 	u_int8_t		ha_in_reset;
347 #define HA_OPERATIONAL	    0
348 #define HA_IN_RESET	    1
349 #define HA_OFF_LINE	    2
350 #define HA_OFF_LINE_RECOVERY 3
351 	/* Configuration information */
352 	/* The target id maximums we take */
353 	u_int8_t		ha_MaxBus;     /* Maximum bus */
354 	u_int8_t		ha_MaxId;      /* Maximum target ID */
355 	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
356 	u_int8_t		ha_SgSize;     /* Max SG elements */
357 	u_int8_t		ha_pciBusNum;
358 	u_int8_t		ha_pciDeviceNum;
359 	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
360 	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
361 	u_int16_t		ha_Msgs_Count;
362 
363 	/* Links into other parents and HBAs */
364 	struct Asr_softc      * ha_next;       /* HBA list */
365 	struct cdev *ha_devt;
366 } Asr_softc_t;
367 
368 static Asr_softc_t *Asr_softc_list;
369 
370 /*
371  *	Prototypes of the routines we have in this object.
372  */
373 
374 /* I2O HDM interface */
375 static int	asr_probe(device_t dev);
376 static int	asr_attach(device_t dev);
377 
378 static d_ioctl_t asr_ioctl;
379 static d_open_t asr_open;
380 static d_close_t asr_close;
381 static int	asr_intr(Asr_softc_t *sc);
382 static void	asr_timeout(void *arg);
383 static int	ASR_init(Asr_softc_t *sc);
384 static int	ASR_acquireLct(Asr_softc_t *sc);
385 static int	ASR_acquireHrt(Asr_softc_t *sc);
386 static void	asr_action(struct cam_sim *sim, union ccb *ccb);
387 static void	asr_poll(struct cam_sim *sim);
388 static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
389 
390 /*
391  *	Here is the auto-probe structure used to nest our tests appropriately
392  *	during the startup phase of the operating system.
393  */
394 static device_method_t asr_methods[] = {
395 	DEVMETHOD(device_probe,	 asr_probe),
396 	DEVMETHOD(device_attach, asr_attach),
397 	DEVMETHOD_END
398 };
399 
400 static driver_t asr_driver = {
401 	"asr",
402 	asr_methods,
403 	sizeof(Asr_softc_t)
404 };
405 
406 static devclass_t asr_devclass;
407 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, NULL, NULL);
408 MODULE_VERSION(asr, 1);
409 MODULE_DEPEND(asr, pci, 1, 1, 1);
410 MODULE_DEPEND(asr, cam, 1, 1, 1);
411 
412 /*
413  * devsw for asr hba driver
414  *
415  * only ioctl is used. the sd driver provides all other access.
416  */
417 static struct dev_ops asr_ops = {
418 	{ "asr", 0, 0 },
419 	.d_open =	asr_open,
420 	.d_close =	asr_close,
421 	.d_ioctl =	asr_ioctl,
422 };
423 
424 /* I2O support routines */
425 
426 static __inline u_int32_t
427 asr_get_FromFIFO(Asr_softc_t *sc)
428 {
429 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
430 				 I2O_REG_FROMFIFO));
431 }
432 
433 static __inline u_int32_t
434 asr_get_ToFIFO(Asr_softc_t *sc)
435 {
436 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
437 				 I2O_REG_TOFIFO));
438 }
439 
440 static __inline u_int32_t
441 asr_get_intr(Asr_softc_t *sc)
442 {
443 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
444 				 I2O_REG_MASK));
445 }
446 
447 static __inline u_int32_t
448 asr_get_status(Asr_softc_t *sc)
449 {
450 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
451 				 I2O_REG_STATUS));
452 }
453 
454 static __inline void
455 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
456 {
457 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
458 			  val);
459 }
460 
461 static __inline void
462 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
463 {
464 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
465 			  val);
466 }
467 
468 static __inline void
469 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
470 {
471 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
472 			  val);
473 }
474 
475 static __inline void
476 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
477 {
478 	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
479 				 offset, (u_int32_t *)frame, len);
480 }
481 
482 /*
483  *	Fill message with default.
484  */
485 static PI2O_MESSAGE_FRAME
486 ASR_fillMessage(void *Message, u_int16_t size)
487 {
488 	PI2O_MESSAGE_FRAME Message_Ptr;
489 
490 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
491 	bzero(Message_Ptr, size);
492 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
493 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
494 	  (size + sizeof(U32) - 1) >> 2);
495 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
496 	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
497 	return (Message_Ptr);
498 } /* ASR_fillMessage */
499 
500 #define	EMPTY_QUEUE (0xffffffff)
501 
502 static __inline U32
503 ASR_getMessage(Asr_softc_t *sc)
504 {
505 	U32	MessageOffset;
506 
507 	MessageOffset = asr_get_ToFIFO(sc);
508 	if (MessageOffset == EMPTY_QUEUE)
509 		MessageOffset = asr_get_ToFIFO(sc);
510 
511 	return (MessageOffset);
512 } /* ASR_getMessage */
513 
514 /* Issue a polled command */
515 static U32
516 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
517 {
518 	U32	Mask = 0xffffffff;
519 	U32	MessageOffset;
520 	u_int	Delay = 1500;
521 
522 	/*
523 	 * ASR_initiateCp is only used for synchronous commands and will
524 	 * be made more resiliant to adapter delays since commands like
525 	 * resetIOP can cause the adapter to be deaf for a little time.
526 	 */
527 	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
528 	 && (--Delay != 0)) {
529 		DELAY (10000);
530 	}
531 	if (MessageOffset != EMPTY_QUEUE) {
532 		asr_set_frame(sc, Message, MessageOffset,
533 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
534 		/*
535 		 *	Disable the Interrupts
536 		 */
537 		Mask = asr_get_intr(sc);
538 		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
539 		asr_set_ToFIFO(sc, MessageOffset);
540 	}
541 	return (Mask);
542 } /* ASR_initiateCp */
543 
544 /*
545  *	Reset the adapter.
546  */
547 static U32
548 ASR_resetIOP(Asr_softc_t *sc)
549 {
550 	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
551 	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
552 	U32			       * Reply_Ptr;
553 	U32				 Old;
554 
555 	/*
556 	 *  Build up our copy of the Message.
557 	 */
558 	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
559 	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
560 	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
561 	/*
562 	 *  Reset the Reply Status
563 	 */
564 	Reply_Ptr = &sc->ha_statusmem->rstatus;
565 	*Reply_Ptr = 0;
566 	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
567 	    sc->ha_rstatus_phys);
568 	/*
569 	 *	Send the Message out
570 	 */
571 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
572 	     0xffffffff) {
573 		/*
574 		 * Wait for a response (Poll), timeouts are dangerous if
575 		 * the card is truly responsive. We assume response in 2s.
576 		 */
577 		u_int8_t Delay = 200;
578 
579 		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
580 			DELAY (10000);
581 		}
582 		/*
583 		 *	Re-enable the interrupts.
584 		 */
585 		asr_set_intr(sc, Old);
586 		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
587 		return(*Reply_Ptr);
588 	}
589 	KASSERT(Old != 0xffffffff, ("Old == -1"));
590 	return (0);
591 } /* ASR_resetIOP */
592 
593 /*
594  *	Get the curent state of the adapter
595  */
596 static PI2O_EXEC_STATUS_GET_REPLY
597 ASR_getStatus(Asr_softc_t *sc)
598 {
599 	I2O_EXEC_STATUS_GET_MESSAGE	Message;
600 	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
601 	PI2O_EXEC_STATUS_GET_REPLY	buffer;
602 	U32				Old;
603 
604 	/*
605 	 *  Build up our copy of the Message.
606 	 */
607 	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
608 	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
609 	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
610 	    I2O_EXEC_STATUS_GET);
611 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
612 	    sc->ha_status_phys);
613 	/* This one is a Byte Count */
614 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
615 	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
616 	/*
617 	 *  Reset the Reply Status
618 	 */
619 	buffer = &sc->ha_statusmem->status;
620 	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
621 	/*
622 	 *	Send the Message out
623 	 */
624 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
625 	    0xffffffff) {
626 		/*
627 		 *	Wait for a response (Poll), timeouts are dangerous if
628 		 * the card is truly responsive. We assume response in 50ms.
629 		 */
630 		u_int8_t Delay = 255;
631 
632 		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
633 			if (--Delay == 0) {
634 				buffer = NULL;
635 				break;
636 			}
637 			DELAY (1000);
638 		}
639 		/*
640 		 *	Re-enable the interrupts.
641 		 */
642 		asr_set_intr(sc, Old);
643 		return (buffer);
644 	}
645 	return (NULL);
646 } /* ASR_getStatus */
647 
648 /*
649  *	Check if the device is a SCSI I2O HBA, and add it to the list.
650  */
651 
652 /*
653  * Probe for ASR controller.  If we find it, we will use it.
654  * virtual adapters.
655  */
656 static int
657 asr_probe(device_t dev)
658 {
659 	u_int32_t id;
660 
661 	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
662 	if ((id == 0xA5011044) || (id == 0xA5111044)) {
663 		device_set_desc(dev, "Adaptec Caching SCSI RAID");
664 		return (BUS_PROBE_DEFAULT);
665 	}
666 	return (ENXIO);
667 } /* asr_probe */
668 
669 static __inline union asr_ccb *
670 asr_alloc_ccb(Asr_softc_t *sc)
671 {
672 	union asr_ccb *new_ccb;
673 
674 	new_ccb = xpt_alloc_ccb();
675 	new_ccb->ccb_h.pinfo.priority = 1;
676 	new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
677 	new_ccb->ccb_h.spriv_ptr0 = sc;
678 
679 	return (new_ccb);
680 } /* asr_alloc_ccb */
681 
682 static __inline void
683 asr_free_ccb(union asr_ccb *free_ccb)
684 {
685 	xpt_free_ccb(&free_ccb->ccb_h);
686 } /* asr_free_ccb */
687 
688 /*
689  *	Print inquiry data `carefully'
690  */
691 static void
692 ASR_prstring(u_int8_t *s, int len)
693 {
694 	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
695 		kprintf ("%c", *(s++));
696 	}
697 } /* ASR_prstring */
698 
699 /*
700  *	Send a message synchronously and without Interrupt to a ccb.
701  */
702 static int
703 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
704 {
705 	U32		Mask;
706 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
707 
708 	/*
709 	 * We do not need any (optional byteswapping) method access to
710 	 * the Initiator context field.
711 	 */
712 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
713 
714 	/* Prevent interrupt service */
715 	crit_enter();
716 	Mask = asr_get_intr(sc);
717 	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
718 
719 	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
720 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
721 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
722 	}
723 
724 	/*
725 	 * Wait for this board to report a finished instruction.
726 	 */
727 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
728 		(void)asr_intr (sc);
729 	}
730 
731 	/* Re-enable Interrupts */
732 	asr_set_intr(sc, Mask);
733 	crit_exit();
734 
735 	return (ccb->ccb_h.status);
736 } /* ASR_queue_s */
737 
738 /*
739  *	Send a message synchronously to an Asr_softc_t.
740  */
741 static int
742 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
743 {
744 	union asr_ccb	*ccb;
745 	int		status;
746 
747 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
748 		return (CAM_REQUEUE_REQ);
749 	}
750 
751 	status = ASR_queue_s (ccb, Message);
752 
753 	asr_free_ccb(ccb);
754 
755 	return (status);
756 } /* ASR_queue_c */
757 
758 /*
759  *	Add the specified ccb to the active queue
760  */
761 static __inline void
762 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
763 {
764 	crit_enter();
765 	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
766 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
767 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
768 			/*
769 			 * RAID systems can take considerable time to
770 			 * complete some commands given the large cache
771 			 * flashes switching from write back to write thru.
772 			 */
773 			ccb->ccb_h.timeout = 6 * 60 * 1000;
774 		}
775 		callout_reset(ccb->ccb_h.timeout_ch,
776 			      (ccb->ccb_h.timeout * hz) / 1000,
777 			      asr_timeout, ccb);
778 	}
779 	crit_exit();
780 } /* ASR_ccbAdd */
781 
782 /*
783  *	Remove the specified ccb from the active queue.
784  */
785 static __inline void
786 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
787 {
788 	crit_enter();
789 	callout_stop(ccb->ccb_h.timeout_ch);
790 	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
791 	crit_exit();
792 } /* ASR_ccbRemove */
793 
794 /*
795  *	Fail all the active commands, so they get re-issued by the operating
796  *	system.
797  */
798 static void
799 ASR_failActiveCommands(Asr_softc_t *sc)
800 {
801 	struct ccb_hdr	*ccb;
802 
803 	crit_enter();
804 	/*
805 	 *	We do not need to inform the CAM layer that we had a bus
806 	 * reset since we manage it on our own, this also prevents the
807 	 * SCSI_DELAY settling that would be required on other systems.
808 	 * The `SCSI_DELAY' has already been handled by the card via the
809 	 * acquisition of the LCT table while we are at CAM priority level.
810 	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
811 	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
812 	 *  }
813 	 */
814 	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
815 		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
816 
817 		ccb->status &= ~CAM_STATUS_MASK;
818 		ccb->status |= CAM_REQUEUE_REQ;
819 		/* Nothing Transfered */
820 		((struct ccb_scsiio *)ccb)->resid
821 		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
822 
823 		if (ccb->path) {
824 			xpt_done ((union ccb *)ccb);
825 		} else {
826 			wakeup (ccb);
827 		}
828 	}
829 	crit_exit();
830 } /* ASR_failActiveCommands */
831 
832 /*
833  *	The following command causes the HBA to reset the specific bus
834  */
835 static void
836 ASR_resetBus(Asr_softc_t *sc, int bus)
837 {
838 	I2O_HBA_BUS_RESET_MESSAGE	Message;
839 	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
840 	PI2O_LCT_ENTRY			Device;
841 
842 	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
843 	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
844 	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
845 	  I2O_HBA_BUS_RESET);
846 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
847 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
848 	  ++Device) {
849 		if (((Device->le_type & I2O_PORT) != 0)
850 		 && (Device->le_bus == bus)) {
851 			I2O_MESSAGE_FRAME_setTargetAddress(
852 			  &Message_Ptr->StdMessageFrame,
853 			  I2O_LCT_ENTRY_getLocalTID(Device));
854 			/* Asynchronous command, with no expectations */
855 			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
856 			break;
857 		}
858 	}
859 } /* ASR_resetBus */
860 
861 static __inline int
862 ASR_getBlinkLedCode(Asr_softc_t *sc)
863 {
864 	U8	blink;
865 
866 	if (sc == NULL)
867 		return (0);
868 
869 	blink = bus_space_read_1(sc->ha_frame_btag,
870 				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
871 	if (blink != 0xBC)
872 		return (0);
873 
874 	blink = bus_space_read_1(sc->ha_frame_btag,
875 				 sc->ha_frame_bhandle, sc->ha_blinkLED);
876 	return (blink);
877 } /* ASR_getBlinkCode */
878 
879 /*
880  *	Determine the address of an TID lookup. Must be done at high priority
881  *	since the address can be changed by other threads of execution.
882  *
883  *	Returns NULL pointer if not indexible (but will attempt to generate
884  *	an index if `new_entry' flag is set to TRUE).
885  *
886  *	All addressible entries are to be guaranteed zero if never initialized.
887  */
888 static tid_t *
889 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
890 {
891 	target2lun_t	*bus_ptr;
892 	lun2tid_t	*target_ptr;
893 	unsigned	new_size;
894 
895 	/*
896 	 *	Validity checking of incoming parameters. More of a bound
897 	 * expansion limit than an issue with the code dealing with the
898 	 * values.
899 	 *
900 	 *	sc must be valid before it gets here, so that check could be
901 	 * dropped if speed a critical issue.
902 	 */
903 	if ((sc == NULL)
904 	 || (bus > MAX_CHANNEL)
905 	 || (target > sc->ha_MaxId)
906 	 || (lun > sc->ha_MaxLun)) {
907 		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
908 		  (u_long)sc, bus, target, lun);
909 		return (NULL);
910 	}
911 	/*
912 	 *	See if there is an associated bus list.
913 	 *
914 	 *	for performance, allocate in size of BUS_CHUNK chunks.
915 	 *	BUS_CHUNK must be a power of two. This is to reduce
916 	 *	fragmentation effects on the allocations.
917 	 */
918 #define BUS_CHUNK 8
919 	new_size = roundup2(target, BUS_CHUNK);
920 	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
921 		/*
922 		 *	Allocate a new structure?
923 		 *		Since one element in structure, the +1
924 		 *		needed for size has been abstracted.
925 		 */
926 		if ((new_entry == FALSE)
927 		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
928 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
929 		    M_TEMP, M_WAITOK | M_ZERO))
930 		   == NULL)) {
931 			debug_asr_printf("failed to allocate bus list\n");
932 			return (NULL);
933 		}
934 		bus_ptr->size = new_size + 1;
935 	} else if (bus_ptr->size <= new_size) {
936 		target2lun_t * new_bus_ptr;
937 
938 		/*
939 		 *	Reallocate a new structure?
940 		 *		Since one element in structure, the +1
941 		 *		needed for size has been abstracted.
942 		 */
943 		if ((new_entry == FALSE)
944 		 || ((new_bus_ptr = (target2lun_t *)kmalloc (
945 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
946 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
947 			debug_asr_printf("failed to reallocate bus list\n");
948 			return (NULL);
949 		}
950 		/*
951 		 *	Copy the whole thing, safer, simpler coding
952 		 * and not really performance critical at this point.
953 		 */
954 		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
955 		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
956 		sc->ha_targets[bus] = new_bus_ptr;
957 		kfree(bus_ptr, M_TEMP);
958 		bus_ptr = new_bus_ptr;
959 		bus_ptr->size = new_size + 1;
960 	}
961 	/*
962 	 *	We now have the bus list, lets get to the target list.
963 	 *	Since most systems have only *one* lun, we do not allocate
964 	 *	in chunks as above, here we allow one, then in chunk sizes.
965 	 *	TARGET_CHUNK must be a power of two. This is to reduce
966 	 *	fragmentation effects on the allocations.
967 	 */
968 #define TARGET_CHUNK 8
969 	if ((new_size = lun) != 0) {
970 		new_size = roundup2(lun, TARGET_CHUNK);
971 	}
972 	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
973 		/*
974 		 *	Allocate a new structure?
975 		 *		Since one element in structure, the +1
976 		 *		needed for size has been abstracted.
977 		 */
978 		if ((new_entry == FALSE)
979 		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
980 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
981 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
982 			debug_asr_printf("failed to allocate target list\n");
983 			return (NULL);
984 		}
985 		target_ptr->size = new_size + 1;
986 	} else if (target_ptr->size <= new_size) {
987 		lun2tid_t * new_target_ptr;
988 
989 		/*
990 		 *	Reallocate a new structure?
991 		 *		Since one element in structure, the +1
992 		 *		needed for size has been abstracted.
993 		 */
994 		if ((new_entry == FALSE)
995 		 || ((new_target_ptr = (lun2tid_t *)kmalloc (
996 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
997 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
998 			debug_asr_printf("failed to reallocate target list\n");
999 			return (NULL);
1000 		}
1001 		/*
1002 		 *	Copy the whole thing, safer, simpler coding
1003 		 * and not really performance critical at this point.
1004 		 */
1005 		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1006 		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1007 		bus_ptr->LUN[target] = new_target_ptr;
1008 		kfree(target_ptr, M_TEMP);
1009 		target_ptr = new_target_ptr;
1010 		target_ptr->size = new_size + 1;
1011 	}
1012 	/*
1013 	 *	Now, acquire the TID address from the LUN indexed list.
1014 	 */
1015 	return (&(target_ptr->TID[lun]));
1016 } /* ASR_getTidAddress */
1017 
1018 /*
1019  *	Get a pre-existing TID relationship.
1020  *
1021  *	If the TID was never set, return (tid_t)-1.
1022  *
1023  *	should use mutex rather than spl.
1024  */
1025 static __inline tid_t
1026 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1027 {
1028 	tid_t	*tid_ptr;
1029 	tid_t	retval;
1030 
1031 	crit_enter();
1032 	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1033 	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1034 	 || (*tid_ptr == (tid_t)0)) {
1035 		crit_exit();
1036 		return ((tid_t)-1);
1037 	}
1038 	retval = *tid_ptr;
1039 	crit_exit();
1040 	return (retval);
1041 } /* ASR_getTid */
1042 
1043 /*
1044  *	Set a TID relationship.
1045  *
1046  *	If the TID was not set, return (tid_t)-1.
1047  *
1048  *	should use mutex rather than spl.
1049  */
1050 static __inline tid_t
1051 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1052 {
1053 	tid_t	*tid_ptr;
1054 
1055 	if (TID != (tid_t)-1) {
1056 		if (TID == 0) {
1057 			return ((tid_t)-1);
1058 		}
1059 		crit_enter();
1060 		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1061 		 == NULL) {
1062 			crit_exit();
1063 			return ((tid_t)-1);
1064 		}
1065 		*tid_ptr = TID;
1066 		crit_exit();
1067 	}
1068 	return (TID);
1069 } /* ASR_setTid */
1070 
1071 /*-------------------------------------------------------------------------*/
1072 /*		      Function ASR_rescan				   */
1073 /*-------------------------------------------------------------------------*/
1074 /* The Parameters Passed To This Function Are :				   */
1075 /*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1076 /*									   */
1077 /* This Function Will rescan the adapter and resynchronize any data	   */
1078 /*									   */
1079 /* Return : 0 For OK, Error Code Otherwise				   */
1080 /*-------------------------------------------------------------------------*/
1081 
1082 static int
1083 ASR_rescan(Asr_softc_t *sc)
1084 {
1085 	int bus;
1086 	int error;
1087 
1088 	/*
1089 	 * Re-acquire the LCT table and synchronize us to the adapter.
1090 	 */
1091 	if ((error = ASR_acquireLct(sc)) == 0) {
1092 		error = ASR_acquireHrt(sc);
1093 	}
1094 
1095 	if (error != 0) {
1096 		return error;
1097 	}
1098 
1099 	bus = sc->ha_MaxBus;
1100 	/* Reset all existing cached TID lookups */
1101 	do {
1102 		int target, event = 0;
1103 
1104 		/*
1105 		 *	Scan for all targets on this bus to see if they
1106 		 * got affected by the rescan.
1107 		 */
1108 		for (target = 0; target <= sc->ha_MaxId; ++target) {
1109 			int lun;
1110 
1111 			/* Stay away from the controller ID */
1112 			if (target == sc->ha_adapter_target[bus]) {
1113 				continue;
1114 			}
1115 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1116 				PI2O_LCT_ENTRY Device;
1117 				tid_t	       TID = (tid_t)-1;
1118 				tid_t	       LastTID;
1119 
1120 				/*
1121 				 * See if the cached TID changed. Search for
1122 				 * the device in our new LCT.
1123 				 */
1124 				for (Device = sc->ha_LCT->LCTEntry;
1125 				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1126 				   + I2O_LCT_getTableSize(sc->ha_LCT));
1127 				  ++Device) {
1128 					if ((Device->le_type != I2O_UNKNOWN)
1129 					 && (Device->le_bus == bus)
1130 					 && (Device->le_target == target)
1131 					 && (Device->le_lun == lun)
1132 					 && (I2O_LCT_ENTRY_getUserTID(Device)
1133 					  == 0xFFF)) {
1134 						TID = I2O_LCT_ENTRY_getLocalTID(
1135 						  Device);
1136 						break;
1137 					}
1138 				}
1139 				/*
1140 				 * Indicate to the OS that the label needs
1141 				 * to be recalculated, or that the specific
1142 				 * open device is no longer valid (Merde)
1143 				 * because the cached TID changed.
1144 				 */
1145 				LastTID = ASR_getTid (sc, bus, target, lun);
1146 				if (LastTID != TID) {
1147 					struct cam_path * path;
1148 
1149 					if (xpt_create_path(&path,
1150 					  /*periph*/NULL,
1151 					  cam_sim_path(sc->ha_sim[bus]),
1152 					  target, lun) != CAM_REQ_CMP) {
1153 						if (TID == (tid_t)-1) {
1154 							event |= AC_LOST_DEVICE;
1155 						} else {
1156 							event |= AC_INQ_CHANGED
1157 							       | AC_GETDEV_CHANGED;
1158 						}
1159 					} else {
1160 						if (TID == (tid_t)-1) {
1161 							xpt_async(
1162 							  AC_LOST_DEVICE,
1163 							  path, NULL);
1164 						} else if (LastTID == (tid_t)-1) {
1165 							struct ccb_getdev *ccb;
1166 
1167 							ccb = &xpt_alloc_ccb()->cgd;
1168 
1169 							xpt_setup_ccb(
1170 							  &ccb->ccb_h,
1171 							  path, /*priority*/5);
1172 							xpt_async(
1173 							  AC_FOUND_DEVICE,
1174 							  path,
1175 							  ccb);
1176 							xpt_free_ccb(&ccb->ccb_h);
1177 						} else {
1178 							xpt_async(
1179 							  AC_INQ_CHANGED,
1180 							  path, NULL);
1181 							xpt_async(
1182 							  AC_GETDEV_CHANGED,
1183 							  path, NULL);
1184 						}
1185 					}
1186 				}
1187 				/*
1188 				 *	We have the option of clearing the
1189 				 * cached TID for it to be rescanned, or to
1190 				 * set it now even if the device never got
1191 				 * accessed. We chose the later since we
1192 				 * currently do not use the condition that
1193 				 * the TID ever got cached.
1194 				 */
1195 				ASR_setTid (sc, bus, target, lun, TID);
1196 			}
1197 		}
1198 		/*
1199 		 *	The xpt layer can not handle multiple events at the
1200 		 * same call.
1201 		 */
1202 		if (event & AC_LOST_DEVICE) {
1203 			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1204 		}
1205 		if (event & AC_INQ_CHANGED) {
1206 			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1207 		}
1208 		if (event & AC_GETDEV_CHANGED) {
1209 			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1210 		}
1211 	} while (--bus >= 0);
1212 	return (error);
1213 } /* ASR_rescan */
1214 
1215 /*-------------------------------------------------------------------------*/
1216 /*		      Function ASR_reset				   */
1217 /*-------------------------------------------------------------------------*/
1218 /* The Parameters Passed To This Function Are :				   */
1219 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1220 /*									   */
1221 /* This Function Will reset the adapter and resynchronize any data	   */
1222 /*									   */
1223 /* Return : None							   */
1224 /*-------------------------------------------------------------------------*/
1225 
1226 static int
1227 ASR_reset(Asr_softc_t *sc)
1228 {
1229 	int retVal;
1230 
1231 	crit_enter();
1232 	if ((sc->ha_in_reset == HA_IN_RESET)
1233 	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1234 		crit_exit();
1235 		return (EBUSY);
1236 	}
1237 	/*
1238 	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1239 	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1240 	 */
1241 	++(sc->ha_in_reset);
1242 	if (ASR_resetIOP(sc) == 0) {
1243 		debug_asr_printf ("ASR_resetIOP failed\n");
1244 		/*
1245 		 *	We really need to take this card off-line, easier said
1246 		 * than make sense. Better to keep retrying for now since if a
1247 		 * UART cable is connected the blinkLEDs the adapter is now in
1248 		 * a hard state requiring action from the monitor commands to
1249 		 * the HBA to continue. For debugging waiting forever is a
1250 		 * good thing. In a production system, however, one may wish
1251 		 * to instead take the card off-line ...
1252 		 */
1253 		/* Wait Forever */
1254 		while (ASR_resetIOP(sc) == 0);
1255 	}
1256 	retVal = ASR_init (sc);
1257 	crit_exit();
1258 	if (retVal != 0) {
1259 		debug_asr_printf ("ASR_init failed\n");
1260 		sc->ha_in_reset = HA_OFF_LINE;
1261 		return (ENXIO);
1262 	}
1263 	if (ASR_rescan (sc) != 0) {
1264 		debug_asr_printf ("ASR_rescan failed\n");
1265 	}
1266 	ASR_failActiveCommands (sc);
1267 	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1268 		kprintf ("asr%d: Brining adapter back on-line\n",
1269 		  sc->ha_path[0]
1270 		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1271 		    : 0);
1272 	}
1273 	sc->ha_in_reset = HA_OPERATIONAL;
1274 	return (0);
1275 } /* ASR_reset */
1276 
1277 /*
1278  *	Device timeout handler.
1279  */
1280 static void
1281 asr_timeout(void *arg)
1282 {
1283 	union asr_ccb	*ccb = (union asr_ccb *)arg;
1284 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1285 	int		s;
1286 
1287 	debug_asr_print_path(ccb);
1288 	debug_asr_printf("timed out");
1289 
1290 	/*
1291 	 *	Check if the adapter has locked up?
1292 	 */
1293 	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1294 		/* Reset Adapter */
1295 		kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1296 		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1297 		if (ASR_reset (sc) == ENXIO) {
1298 			/* Try again later */
1299 			callout_reset(ccb->ccb_h.timeout_ch,
1300 				      (ccb->ccb_h.timeout * hz) / 1000,
1301 				      asr_timeout, ccb);
1302 		}
1303 		return;
1304 	}
1305 	/*
1306 	 *	Abort does not function on the ASR card!!! Walking away from
1307 	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1308 	 * our best bet, followed by a complete adapter reset if that fails.
1309 	 */
1310 	crit_enter();
1311 	/* Check if we already timed out once to raise the issue */
1312 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1313 		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1314 		if (ASR_reset (sc) == ENXIO) {
1315 			callout_reset(ccb->ccb_h.timeout_ch,
1316 				      (ccb->ccb_h.timeout * hz) / 1000,
1317 				      asr_timeout, ccb);
1318 		}
1319 		crit_exit();
1320 		return;
1321 	}
1322 	debug_asr_printf ("\nresetting bus\n");
1323 	/* If the BUS reset does not take, then an adapter reset is next! */
1324 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1325 	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1326 	callout_reset(ccb->ccb_h.timeout_ch,
1327 		      (ccb->ccb_h.timeout * hz) / 1000,
1328 		      asr_timeout, ccb);
1329 	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1330 	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1331 	crit_exit();
1332 } /* asr_timeout */
1333 
1334 /*
1335  * send a message asynchronously
1336  */
1337 static int
1338 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1339 {
1340 	U32		MessageOffset;
1341 	union asr_ccb	*ccb;
1342 
1343 	debug_asr_printf("Host Command Dump:\n");
1344 	debug_asr_dump_message(Message);
1345 
1346 	ccb = (union asr_ccb *)(long)
1347 	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1348 
1349 	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1350 		asr_set_frame(sc, Message, MessageOffset,
1351 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1352 		if (ccb) {
1353 			ASR_ccbAdd (sc, ccb);
1354 		}
1355 		/* Post the command */
1356 		asr_set_ToFIFO(sc, MessageOffset);
1357 	} else {
1358 		if (ASR_getBlinkLedCode(sc)) {
1359 			/*
1360 			 *	Unlikely we can do anything if we can't grab a
1361 			 * message frame :-(, but lets give it a try.
1362 			 */
1363 			(void)ASR_reset(sc);
1364 		}
1365 	}
1366 	return (MessageOffset);
1367 } /* ASR_queue */
1368 
1369 
1370 /* Simple Scatter Gather elements */
1371 #define	SG(SGL,Index,Flags,Buffer,Size)				   \
1372 	I2O_FLAGS_COUNT_setCount(				   \
1373 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1374 	  Size);						   \
1375 	I2O_FLAGS_COUNT_setFlags(				   \
1376 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1377 	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1378 	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1379 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1380 	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1381 
1382 /*
1383  *	Retrieve Parameter Group.
1384  */
1385 static void *
1386 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1387 	      unsigned BufferSize)
1388 {
1389 	struct paramGetMessage {
1390 		I2O_UTIL_PARAMS_GET_MESSAGE M;
1391 		char
1392 		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1393 		struct Operations {
1394 			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1395 			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1396 		}			     O;
1397 	}				Message;
1398 	struct Operations		*Operations_Ptr;
1399 	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1400 	struct ParamBuffer {
1401 		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1402 		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1403 		char				    Info[1];
1404 	}				*Buffer_Ptr;
1405 
1406 	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1407 	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1408 	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1409 	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1410 	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1411 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1412 	bzero(Operations_Ptr, sizeof(struct Operations));
1413 	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1414 	  &(Operations_Ptr->Header), 1);
1415 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1416 	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1417 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1418 	  &(Operations_Ptr->Template[0]), 0xFFFF);
1419 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1420 	  &(Operations_Ptr->Template[0]), Group);
1421 	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1422 	bzero(Buffer_Ptr, BufferSize);
1423 
1424 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1425 	  I2O_VERSION_11
1426 	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1427 	    / sizeof(U32)) << 4));
1428 	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1429 	  TID);
1430 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1431 	  I2O_UTIL_PARAMS_GET);
1432 	/*
1433 	 *  Set up the buffers as scatter gather elements.
1434 	 */
1435 	SG(&(Message_Ptr->SGL), 0,
1436 	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1437 	  Operations_Ptr, sizeof(struct Operations));
1438 	SG(&(Message_Ptr->SGL), 1,
1439 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1440 	  Buffer_Ptr, BufferSize);
1441 
1442 	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1443 	 && (Buffer_Ptr->Header.ResultCount)) {
1444 		return ((void *)(Buffer_Ptr->Info));
1445 	}
1446 	return (NULL);
1447 } /* ASR_getParams */
1448 
1449 /*
1450  *	Acquire the LCT information.
1451  */
1452 static int
1453 ASR_acquireLct(Asr_softc_t *sc)
1454 {
1455 	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1456 	PI2O_SGE_SIMPLE_ELEMENT		sg;
1457 	int				MessageSizeInBytes;
1458 	caddr_t				v;
1459 	int				len;
1460 	I2O_LCT				Table, *TableP = &Table;
1461 	PI2O_LCT_ENTRY			Entry;
1462 
1463 	/*
1464 	 *	sc value assumed valid
1465 	 */
1466 	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1467 	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1468 	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1469 	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1470 		return (ENOMEM);
1471 	}
1472 	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1473 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1474 	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1475 	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1476 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1477 	    I2O_EXEC_LCT_NOTIFY);
1478 	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1479 	    I2O_CLASS_MATCH_ANYCLASS);
1480 	/*
1481 	 *	Call the LCT table to determine the number of device entries
1482 	 * to reserve space for.
1483 	 */
1484 	SG(&(Message_Ptr->SGL), 0,
1485 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, TableP,
1486 	  sizeof(I2O_LCT));
1487 	/*
1488 	 *	since this code is reused in several systems, code efficiency
1489 	 * is greater by using a shift operation rather than a divide by
1490 	 * sizeof(u_int32_t).
1491 	 */
1492 	I2O_LCT_setTableSize(&Table,
1493 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1494 	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1495 	/*
1496 	 *	Determine the size of the LCT table.
1497 	 */
1498 	if (sc->ha_LCT) {
1499 		kfree(sc->ha_LCT, M_TEMP);
1500 	}
1501 	/*
1502 	 *	malloc only generates contiguous memory when less than a
1503 	 * page is expected. We must break the request up into an SG list ...
1504 	 */
1505 	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1506 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1507 	 || (len > (128 * 1024))) {	/* Arbitrary */
1508 		kfree(Message_Ptr, M_TEMP);
1509 		return (EINVAL);
1510 	}
1511 	if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1512 		kfree(Message_Ptr, M_TEMP);
1513 		return (ENOMEM);
1514 	}
1515 	/*
1516 	 *	since this code is reused in several systems, code efficiency
1517 	 * is greater by using a shift operation rather than a divide by
1518 	 * sizeof(u_int32_t).
1519 	 */
1520 	I2O_LCT_setTableSize(sc->ha_LCT,
1521 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1522 	/*
1523 	 *	Convert the access to the LCT table into a SG list.
1524 	 */
1525 	sg = Message_Ptr->SGL.u.Simple;
1526 	v = (caddr_t)(sc->ha_LCT);
1527 	for (;;) {
1528 		int next, base, span;
1529 
1530 		span = 0;
1531 		next = base = KVTOPHYS(v);
1532 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1533 
1534 		/* How far can we go contiguously */
1535 		while ((len > 0) && (base == next)) {
1536 			int size;
1537 
1538 			next = trunc_page(base) + PAGE_SIZE;
1539 			size = next - base;
1540 			if (size > len) {
1541 				size = len;
1542 			}
1543 			span += size;
1544 			v += size;
1545 			len -= size;
1546 			base = KVTOPHYS(v);
1547 		}
1548 
1549 		/* Construct the Flags */
1550 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1551 		{
1552 			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1553 			if (len <= 0) {
1554 				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1555 				    | I2O_SGL_FLAGS_LAST_ELEMENT
1556 				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1557 			}
1558 			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1559 		}
1560 
1561 		if (len <= 0) {
1562 			break;
1563 		}
1564 
1565 		/*
1566 		 * Incrementing requires resizing of the packet.
1567 		 */
1568 		++sg;
1569 		MessageSizeInBytes += sizeof(*sg);
1570 		I2O_MESSAGE_FRAME_setMessageSize(
1571 		  &(Message_Ptr->StdMessageFrame),
1572 		  I2O_MESSAGE_FRAME_getMessageSize(
1573 		    &(Message_Ptr->StdMessageFrame))
1574 		  + (sizeof(*sg) / sizeof(U32)));
1575 		{
1576 			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1577 
1578 			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1579 			    kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1580 			    == NULL) {
1581 				kfree(sc->ha_LCT, M_TEMP);
1582 				sc->ha_LCT = NULL;
1583 				kfree(Message_Ptr, M_TEMP);
1584 				return (ENOMEM);
1585 			}
1586 			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1587 			bcopy(Message_Ptr, NewMessage_Ptr, span);
1588 			kfree(Message_Ptr, M_TEMP);
1589 			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1590 			  (((caddr_t)NewMessage_Ptr) + span);
1591 			Message_Ptr = NewMessage_Ptr;
1592 		}
1593 	}
1594 	{	int retval;
1595 
1596 		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1597 		kfree(Message_Ptr, M_TEMP);
1598 		if (retval != CAM_REQ_CMP) {
1599 			return (ENODEV);
1600 		}
1601 	}
1602 	/* If the LCT table grew, lets truncate accesses */
1603 	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1604 		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1605 	}
1606 	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1607 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1608 	  ++Entry) {
1609 		Entry->le_type = I2O_UNKNOWN;
1610 		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1611 
1612 		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1613 			Entry->le_type = I2O_BSA;
1614 			break;
1615 
1616 		case I2O_CLASS_SCSI_PERIPHERAL:
1617 			Entry->le_type = I2O_SCSI;
1618 			break;
1619 
1620 		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1621 			Entry->le_type = I2O_FCA;
1622 			break;
1623 
1624 		case I2O_CLASS_BUS_ADAPTER_PORT:
1625 			Entry->le_type = I2O_PORT | I2O_SCSI;
1626 			/* FALLTHRU */
1627 		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1628 			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1629 			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1630 				Entry->le_type = I2O_PORT | I2O_FCA;
1631 			}
1632 		{	struct ControllerInfo {
1633 				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1634 				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1635 				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1636 			} Buffer;
1637 			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1638 
1639 			Entry->le_bus = 0xff;
1640 			Entry->le_target = 0xff;
1641 			Entry->le_lun = 0xff;
1642 
1643 			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1644 			  ASR_getParams(sc,
1645 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1646 			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1647 			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1648 				continue;
1649 			}
1650 			Entry->le_target
1651 			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1652 			    Info);
1653 			Entry->le_lun = 0;
1654 		}	/* FALLTHRU */
1655 		default:
1656 			continue;
1657 		}
1658 		{	struct DeviceInfo {
1659 				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1660 				I2O_PARAM_READ_OPERATION_RESULT Read;
1661 				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1662 			} Buffer;
1663 			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1664 
1665 			Entry->le_bus = 0xff;
1666 			Entry->le_target = 0xff;
1667 			Entry->le_lun = 0xff;
1668 
1669 			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1670 			  ASR_getParams(sc,
1671 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1672 			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1673 			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1674 				continue;
1675 			}
1676 			Entry->le_type
1677 			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1678 			Entry->le_bus
1679 			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1680 			if ((Entry->le_bus > sc->ha_MaxBus)
1681 			 && (Entry->le_bus <= MAX_CHANNEL)) {
1682 				sc->ha_MaxBus = Entry->le_bus;
1683 			}
1684 			Entry->le_target
1685 			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1686 			Entry->le_lun
1687 			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1688 		}
1689 	}
1690 	/*
1691 	 *	A zero return value indicates success.
1692 	 */
1693 	return (0);
1694 } /* ASR_acquireLct */
1695 
1696 /*
1697  * Initialize a message frame.
1698  * We assume that the CDB has already been set up, so all we do here is
1699  * generate the Scatter Gather list.
1700  */
1701 static PI2O_MESSAGE_FRAME
1702 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1703 {
1704 	PI2O_MESSAGE_FRAME	Message_Ptr;
1705 	PI2O_SGE_SIMPLE_ELEMENT sg;
1706 	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1707 	vm_size_t		size, len;
1708 	caddr_t			v;
1709 	U32			MessageSize;
1710 	int			next, span, base, rw;
1711 	int			target = ccb->ccb_h.target_id;
1712 	int			lun = ccb->ccb_h.target_lun;
1713 	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1714 	tid_t			TID;
1715 
1716 	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1717 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1718 	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1719 	      sizeof(I2O_SG_ELEMENT)));
1720 
1721 	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1722 		PI2O_LCT_ENTRY Device;
1723 
1724 		TID = 0;
1725 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1726 		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1727 		    ++Device) {
1728 			if ((Device->le_type != I2O_UNKNOWN)
1729 			 && (Device->le_bus == bus)
1730 			 && (Device->le_target == target)
1731 			 && (Device->le_lun == lun)
1732 			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1733 				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1734 				ASR_setTid(sc, Device->le_bus,
1735 					   Device->le_target, Device->le_lun,
1736 					   TID);
1737 				break;
1738 			}
1739 		}
1740 	}
1741 	if (TID == (tid_t)0) {
1742 		return (NULL);
1743 	}
1744 	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1745 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1746 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1747 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1748 	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1749 		/ sizeof(U32)) << 4));
1750 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1751 	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1752 	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1753 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1754 	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1755 	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1756 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1757 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1758 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1759 	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1760 	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1761 	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1762 	/*
1763 	 * We do not need any (optional byteswapping) method access to
1764 	 * the Initiator & Transaction context field.
1765 	 */
1766 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1767 
1768 	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1769 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1770 	/*
1771 	 * copy the cdb over
1772 	 */
1773 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1774 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1775 	bcopy(&(ccb->csio.cdb_io),
1776 	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1777 	    ccb->csio.cdb_len);
1778 
1779 	/*
1780 	 * Given a buffer describing a transfer, set up a scatter/gather map
1781 	 * in a ccb to map that SCSI transfer.
1782 	 */
1783 
1784 	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1785 
1786 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1787 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1788 	  (ccb->csio.dxfer_len)
1789 	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1790 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1791 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1792 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1793 		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1794 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1795 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1796 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1797 	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1798 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1799 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1800 
1801 	/*
1802 	 * Given a transfer described by a `data', fill in the SG list.
1803 	 */
1804 	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1805 
1806 	len = ccb->csio.dxfer_len;
1807 	v = ccb->csio.data_ptr;
1808 	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1809 	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1810 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1811 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1812 	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1813 	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1814 		span = 0;
1815 		next = base = KVTOPHYS(v);
1816 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1817 
1818 		/* How far can we go contiguously */
1819 		while ((len > 0) && (base == next)) {
1820 			next = trunc_page(base) + PAGE_SIZE;
1821 			size = next - base;
1822 			if (size > len) {
1823 				size = len;
1824 			}
1825 			span += size;
1826 			v += size;
1827 			len -= size;
1828 			base = KVTOPHYS(v);
1829 		}
1830 
1831 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1832 		if (len == 0) {
1833 			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1834 		}
1835 		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1836 		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1837 		++sg;
1838 		MessageSize += sizeof(*sg) / sizeof(U32);
1839 	}
1840 	/* We always do the request sense ... */
1841 	if ((span = ccb->csio.sense_len) == 0) {
1842 		span = sizeof(ccb->csio.sense_data);
1843 	}
1844 	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1845 	  &(ccb->csio.sense_data), span);
1846 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1847 	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1848 	return (Message_Ptr);
1849 } /* ASR_init_message */
1850 
1851 /*
1852  *	Reset the adapter.
1853  */
1854 static U32
1855 ASR_initOutBound(Asr_softc_t *sc)
1856 {
1857 	struct initOutBoundMessage {
1858 		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1859 		U32			       R;
1860 	}				Message;
1861 	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1862 	U32				*volatile Reply_Ptr;
1863 	U32				Old;
1864 
1865 	/*
1866 	 *  Build up our copy of the Message.
1867 	 */
1868 	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1869 	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1870 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1871 	  I2O_EXEC_OUTBOUND_INIT);
1872 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1873 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1874 	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1875 	/*
1876 	 *  Reset the Reply Status
1877 	 */
1878 	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1879 	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1880 	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1881 	  sizeof(U32));
1882 	/*
1883 	 *	Send the Message out
1884 	 */
1885 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1886 	    0xffffffff) {
1887 		u_long size, addr;
1888 
1889 		/*
1890 		 *	Wait for a response (Poll).
1891 		 */
1892 		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1893 		/*
1894 		 *	Re-enable the interrupts.
1895 		 */
1896 		asr_set_intr(sc, Old);
1897 		/*
1898 		 *	Populate the outbound table.
1899 		 */
1900 		if (sc->ha_Msgs == NULL) {
1901 
1902 			/* Allocate the reply frames */
1903 			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1904 			  * sc->ha_Msgs_Count;
1905 
1906 			/*
1907 			 *	contigmalloc only works reliably at
1908 			 * initialization time.
1909 			 */
1910 			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1911 			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1912 			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1913 				bzero(sc->ha_Msgs, size);
1914 				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1915 			}
1916 		}
1917 
1918 		/* Initialize the outbound FIFO */
1919 		if (sc->ha_Msgs != NULL)
1920 			for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1921 			    size; --size) {
1922 				asr_set_FromFIFO(sc, addr);
1923 				addr +=
1924 				    sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1925 			}
1926 		return (*Reply_Ptr);
1927 	}
1928 	return (0);
1929 } /* ASR_initOutBound */
1930 
1931 /*
1932  *	Set the system table
1933  */
1934 static int
1935 ASR_setSysTab(Asr_softc_t *sc)
1936 {
1937 	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1938 	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1939 	Asr_softc_t		    * ha;
1940 	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1941 	int			      retVal;
1942 
1943 	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1944 	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1945 		return (ENOMEM);
1946 	}
1947 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1948 		++SystemTable->NumberEntries;
1949 	}
1950 	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1951 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1952 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1953 	  M_TEMP, M_WAITOK)) == NULL) {
1954 		kfree(SystemTable, M_TEMP);
1955 		return (ENOMEM);
1956 	}
1957 	(void)ASR_fillMessage((void *)Message_Ptr,
1958 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1959 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1960 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1961 	  (I2O_VERSION_11 +
1962 	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1963 			/ sizeof(U32)) << 4)));
1964 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1965 	  I2O_EXEC_SYS_TAB_SET);
1966 	/*
1967 	 *	Call the LCT table to determine the number of device entries
1968 	 * to reserve space for.
1969 	 *	since this code is reused in several systems, code efficiency
1970 	 * is greater by using a shift operation rather than a divide by
1971 	 * sizeof(u_int32_t).
1972 	 */
1973 	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1974 	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1975 	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1976 	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1977 	++sg;
1978 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1979 		SG(sg, 0,
1980 		  ((ha->ha_next)
1981 		    ? (I2O_SGL_FLAGS_DIR)
1982 		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1983 		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1984 		++sg;
1985 	}
1986 	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1987 	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1988 	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1989 	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1990 	kfree(Message_Ptr, M_TEMP);
1991 	kfree(SystemTable, M_TEMP);
1992 	return (retVal);
1993 } /* ASR_setSysTab */
1994 
1995 static int
1996 ASR_acquireHrt(Asr_softc_t *sc)
1997 {
1998 	I2O_EXEC_HRT_GET_MESSAGE	Message;
1999 	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2000 	struct {
2001 		I2O_HRT	      Header;
2002 		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2003 	}				Hrt, *HrtP = &Hrt;
2004 	u_int8_t			NumberOfEntries;
2005 	PI2O_HRT_ENTRY			Entry;
2006 
2007 	bzero(&Hrt, sizeof (Hrt));
2008 	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2009 	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2010 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2011 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2012 	  (I2O_VERSION_11
2013 	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2014 		   / sizeof(U32)) << 4)));
2015 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2016 	  I2O_EXEC_HRT_GET);
2017 
2018 	/*
2019 	 *  Set up the buffers as scatter gather elements.
2020 	 */
2021 	SG(&(Message_Ptr->SGL), 0,
2022 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2023 	  HrtP, sizeof(Hrt));
2024 	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2025 		return (ENODEV);
2026 	}
2027 	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2028 	  > (MAX_CHANNEL + 1)) {
2029 		NumberOfEntries = MAX_CHANNEL + 1;
2030 	}
2031 	for (Entry = Hrt.Header.HRTEntry;
2032 	  NumberOfEntries != 0;
2033 	  ++Entry, --NumberOfEntries) {
2034 		PI2O_LCT_ENTRY Device;
2035 
2036 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2037 		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2038 		  ++Device) {
2039 			if (I2O_LCT_ENTRY_getLocalTID(Device)
2040 			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2041 				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2042 				  Entry) >> 16;
2043 				if ((Device->le_bus > sc->ha_MaxBus)
2044 				 && (Device->le_bus <= MAX_CHANNEL)) {
2045 					sc->ha_MaxBus = Device->le_bus;
2046 				}
2047 			}
2048 		}
2049 	}
2050 	return (0);
2051 } /* ASR_acquireHrt */
2052 
2053 /*
2054  *	Enable the adapter.
2055  */
2056 static int
2057 ASR_enableSys(Asr_softc_t *sc)
2058 {
2059 	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2060 	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2061 
2062 	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2063 	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2064 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2065 	  I2O_EXEC_SYS_ENABLE);
2066 	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2067 } /* ASR_enableSys */
2068 
2069 /*
2070  *	Perform the stages necessary to initialize the adapter
2071  */
2072 static int
2073 ASR_init(Asr_softc_t *sc)
2074 {
2075 	return ((ASR_initOutBound(sc) == 0)
2076 	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2077 	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2078 } /* ASR_init */
2079 
2080 /*
2081  *	Send a Synchronize Cache command to the target device.
2082  */
2083 static void
2084 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2085 {
2086 	tid_t TID;
2087 
2088 	/*
2089 	 * We will not synchronize the device when there are outstanding
2090 	 * commands issued by the OS (this is due to a locked up device,
2091 	 * as the OS normally would flush all outstanding commands before
2092 	 * issuing a shutdown or an adapter reset).
2093 	 */
2094 	if ((sc != NULL)
2095 	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2096 	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2097 	 && (TID != (tid_t)0)) {
2098 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2099 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2100 
2101 		Message_Ptr = &Message;
2102 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2103 		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2104 
2105 		I2O_MESSAGE_FRAME_setVersionOffset(
2106 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2107 		  I2O_VERSION_11
2108 		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2109 		    - sizeof(I2O_SG_ELEMENT))
2110 			/ sizeof(U32)) << 4));
2111 		I2O_MESSAGE_FRAME_setMessageSize(
2112 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2113 		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2114 		  - sizeof(I2O_SG_ELEMENT))
2115 			/ sizeof(U32));
2116 		I2O_MESSAGE_FRAME_setInitiatorAddress (
2117 		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2118 		I2O_MESSAGE_FRAME_setFunction(
2119 		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2120 		I2O_MESSAGE_FRAME_setTargetAddress(
2121 		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2122 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2123 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2124 		  I2O_SCSI_SCB_EXEC);
2125 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2126 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2127 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2128 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2129 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2130 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2131 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2132 		  DPT_ORGANIZATION_ID);
2133 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2134 		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2135 		Message_Ptr->CDB[1] = (lun << 5);
2136 
2137 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2138 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2139 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2140 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2141 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2142 
2143 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2144 
2145 	}
2146 }
2147 
2148 static void
2149 ASR_synchronize(Asr_softc_t *sc)
2150 {
2151 	int bus, target, lun;
2152 
2153 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2154 		for (target = 0; target <= sc->ha_MaxId; ++target) {
2155 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2156 				ASR_sync(sc,bus,target,lun);
2157 			}
2158 		}
2159 	}
2160 }
2161 
2162 /*
2163  *	Reset the HBA, targets and BUS.
2164  *		Currently this resets *all* the SCSI busses.
2165  */
2166 static __inline void
2167 asr_hbareset(Asr_softc_t *sc)
2168 {
2169 	ASR_synchronize(sc);
2170 	(void)ASR_reset(sc);
2171 } /* asr_hbareset */
2172 
2173 /*
2174  *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2175  * limit and a reduction in error checking (in the pre 4.0 case).
2176  */
2177 static int
2178 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2179 {
2180 	int		rid;
2181 	u_int32_t	p, l, s;
2182 
2183 	/*
2184 	 * I2O specification says we must find first *memory* mapped BAR
2185 	 */
2186 	for (rid = 0; rid < 4; rid++) {
2187 		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2188 		if ((p & 1) == 0) {
2189 			break;
2190 		}
2191 	}
2192 	/*
2193 	 *	Give up?
2194 	 */
2195 	if (rid >= 4) {
2196 		rid = 0;
2197 	}
2198 	rid = PCIR_BAR(rid);
2199 	p = pci_read_config(dev, rid, sizeof(p));
2200 	pci_write_config(dev, rid, -1, sizeof(p));
2201 	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2202 	pci_write_config(dev, rid, p, sizeof(p));
2203 	if (l > MAX_MAP) {
2204 		l = MAX_MAP;
2205 	}
2206 	/*
2207 	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2208 	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2209 	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2210 	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2211 	 * accessible via BAR0, the messaging registers are accessible
2212 	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2213 	 */
2214 	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2215 	if (s != 0xA5111044) {
2216 		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2217 		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2218 		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2219 		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2220 			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2221 		}
2222 	}
2223 	p &= ~15;
2224 	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2225 	  p, p + l, l, RF_ACTIVE);
2226 	if (sc->ha_mem_res == NULL) {
2227 		return (0);
2228 	}
2229 	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2230 	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2231 	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2232 
2233 	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2234 		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2235 			return (0);
2236 		}
2237 		p = pci_read_config(dev, rid, sizeof(p));
2238 		pci_write_config(dev, rid, -1, sizeof(p));
2239 		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2240 		pci_write_config(dev, rid, p, sizeof(p));
2241 		if (l > MAX_MAP) {
2242 			l = MAX_MAP;
2243 		}
2244 		p &= ~15;
2245 		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2246 		  p, p + l, l, RF_ACTIVE);
2247 		if (sc->ha_mes_res == NULL) {
2248 			return (0);
2249 		}
2250 		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2251 		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2252 	} else {
2253 		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2254 		sc->ha_frame_btag = sc->ha_i2o_btag;
2255 	}
2256 	return (1);
2257 } /* asr_pci_map_mem */
2258 
2259 /*
2260  *	A simplified copy of the real pci_map_int with additional
2261  * registration requirements.
2262  */
2263 static int
2264 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2265 {
2266 	int rid = 0;
2267 
2268 	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2269 	  RF_ACTIVE | RF_SHAREABLE);
2270 	if (sc->ha_irq_res == NULL) {
2271 		return (0);
2272 	}
2273 	if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2274 	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2275 		return (0);
2276 	}
2277 	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2278 	return (1);
2279 } /* asr_pci_map_int */
2280 
2281 static void
2282 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2283 {
2284 	Asr_softc_t *sc;
2285 
2286 	if (error)
2287 		return;
2288 
2289 	sc = (Asr_softc_t *)arg;
2290 
2291 	/* XXX
2292 	 * The status word can be at a 64-bit address, but the existing
2293 	 * accessor macros simply cannot manipulate 64-bit addresses.
2294 	 */
2295 	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2296 	    offsetof(struct Asr_status_mem, status);
2297 	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2298 	    offsetof(struct Asr_status_mem, rstatus);
2299 }
2300 
2301 static int
2302 asr_alloc_dma(Asr_softc_t *sc)
2303 {
2304 	device_t dev;
2305 
2306 	dev = sc->ha_dev;
2307 
2308 	if (bus_dma_tag_create(NULL,			/* parent */
2309 			       1, 0,			/* algnmnt, boundary */
2310 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2311 			       BUS_SPACE_MAXADDR,	/* highaddr */
2312 			       NULL, NULL,		/* filter, filterarg */
2313 			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2314 			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2315 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2316 			       0,			/* flags */
2317 			       &sc->ha_parent_dmat)) {
2318 		device_printf(dev, "Cannot allocate parent DMA tag\n");
2319 		return (ENOMEM);
2320 	}
2321 
2322 	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2323 			       1, 0,			/* algnmnt, boundary */
2324 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2325 			       BUS_SPACE_MAXADDR,	/* highaddr */
2326 			       NULL, NULL,		/* filter, filterarg */
2327 			       sizeof(sc->ha_statusmem),/* maxsize */
2328 			       1,			/* nsegments */
2329 			       sizeof(sc->ha_statusmem),/* maxsegsize */
2330 			       0,			/* flags */
2331 			       &sc->ha_statusmem_dmat)) {
2332 		device_printf(dev, "Cannot allocate status DMA tag\n");
2333 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2334 		return (ENOMEM);
2335 	}
2336 
2337 	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2338 	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2339 		device_printf(dev, "Cannot allocate status memory\n");
2340 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2341 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2342 		return (ENOMEM);
2343 	}
2344 	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2345 	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2346 
2347 	return (0);
2348 }
2349 
2350 static void
2351 asr_release_dma(Asr_softc_t *sc)
2352 {
2353 
2354 	if (sc->ha_rstatus_phys != 0)
2355 		bus_dmamap_unload(sc->ha_statusmem_dmat,
2356 		    sc->ha_statusmem_dmamap);
2357 	if (sc->ha_statusmem != NULL)
2358 		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2359 		    sc->ha_statusmem_dmamap);
2360 	if (sc->ha_statusmem_dmat != NULL)
2361 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2362 	if (sc->ha_parent_dmat != NULL)
2363 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2364 }
2365 
2366 /*
2367  *	Attach the devices, and virtual devices to the driver list.
2368  */
2369 static int
2370 asr_attach(device_t dev)
2371 {
2372 	PI2O_EXEC_STATUS_GET_REPLY status;
2373 	PI2O_LCT_ENTRY		 Device;
2374 	Asr_softc_t		 *sc, **ha;
2375 	struct scsi_inquiry_data *iq;
2376 	int			 bus, size, unit;
2377 	int			 error;
2378 
2379 	sc = device_get_softc(dev);
2380 	unit = device_get_unit(dev);
2381 	sc->ha_dev = dev;
2382 
2383 	if (Asr_softc_list == NULL) {
2384 		/*
2385 		 *	Fixup the OS revision as saved in the dptsig for the
2386 		 *	engine (dptioctl.h) to pick up.
2387 		 */
2388 		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2389 	}
2390 	/*
2391 	 *	Initialize the software structure
2392 	 */
2393 	LIST_INIT(&(sc->ha_ccb));
2394 	/* Link us into the HA list */
2395 	for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next))
2396 		;
2397 	*(ha) = sc;
2398 
2399 	/*
2400 	 *	This is the real McCoy!
2401 	 */
2402 	if (!asr_pci_map_mem(dev, sc)) {
2403 		device_printf(dev, "could not map memory\n");
2404 		return(ENXIO);
2405 	}
2406 	/* Enable if not formerly enabled */
2407 	pci_write_config(dev, PCIR_COMMAND,
2408 	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2409 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2410 
2411 	sc->ha_pciBusNum = pci_get_bus(dev);
2412 	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2413 
2414 	if ((error = asr_alloc_dma(sc)) != 0)
2415 		return (error);
2416 
2417 	/* Check if the device is there? */
2418 	if (ASR_resetIOP(sc) == 0) {
2419 		device_printf(dev, "Cannot reset adapter\n");
2420 		asr_release_dma(sc);
2421 		return (EIO);
2422 	}
2423 	status = &sc->ha_statusmem->status;
2424 	if (ASR_getStatus(sc) == NULL) {
2425 		device_printf(dev, "could not initialize hardware\n");
2426 		asr_release_dma(sc);
2427 		return(ENODEV);
2428 	}
2429 	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2430 	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2431 	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2432 	sc->ha_SystemTable.IopState = status->IopState;
2433 	sc->ha_SystemTable.MessengerType = status->MessengerType;
2434 	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2435 	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2436 	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2437 
2438 	if (!asr_pci_map_int(dev, (void *)sc)) {
2439 		device_printf(dev, "could not map interrupt\n");
2440 		asr_release_dma(sc);
2441 		return(ENXIO);
2442 	}
2443 
2444 	/* Adjust the maximim inbound count */
2445 	if (((sc->ha_QueueSize =
2446 	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2447 	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2448 		sc->ha_QueueSize = MAX_INBOUND;
2449 	}
2450 
2451 	/* Adjust the maximum outbound count */
2452 	if (((sc->ha_Msgs_Count =
2453 	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2454 	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2455 		sc->ha_Msgs_Count = MAX_OUTBOUND;
2456 	}
2457 	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2458 		sc->ha_Msgs_Count = sc->ha_QueueSize;
2459 	}
2460 
2461 	/* Adjust the maximum SG size to adapter */
2462 	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2463 	    2)) > MAX_INBOUND_SIZE) {
2464 		size = MAX_INBOUND_SIZE;
2465 	}
2466 	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2467 	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2468 
2469 	/*
2470 	 *	Only do a bus/HBA reset on the first time through. On this
2471 	 * first time through, we do not send a flush to the devices.
2472 	 */
2473 	if (ASR_init(sc) == 0) {
2474 		struct BufferInfo {
2475 			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2476 			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2477 			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2478 		} Buffer;
2479 		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2480 #define FW_DEBUG_BLED_OFFSET 8
2481 
2482 		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2483 		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2484 		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2485 			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2486 			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2487 		}
2488 		if (ASR_acquireLct(sc) == 0) {
2489 			(void)ASR_acquireHrt(sc);
2490 		}
2491 	} else {
2492 		device_printf(dev, "failed to initialize\n");
2493 		asr_release_dma(sc);
2494 		return(ENXIO);
2495 	}
2496 	/*
2497 	 *	Add in additional probe responses for more channels. We
2498 	 * are reusing the variable `target' for a channel loop counter.
2499 	 * Done here because of we need both the acquireLct and
2500 	 * acquireHrt data.
2501 	 */
2502 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2503 	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2504 		if (Device->le_type == I2O_UNKNOWN) {
2505 			continue;
2506 		}
2507 		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2508 			if (Device->le_target > sc->ha_MaxId) {
2509 				sc->ha_MaxId = Device->le_target;
2510 			}
2511 			if (Device->le_lun > sc->ha_MaxLun) {
2512 				sc->ha_MaxLun = Device->le_lun;
2513 			}
2514 		}
2515 		if (((Device->le_type & I2O_PORT) != 0)
2516 		 && (Device->le_bus <= MAX_CHANNEL)) {
2517 			/* Do not increase MaxId for efficiency */
2518 			sc->ha_adapter_target[Device->le_bus] =
2519 			    Device->le_target;
2520 		}
2521 	}
2522 
2523 	/*
2524 	 *	Print the HBA model number as inquired from the card.
2525 	 */
2526 
2527 	device_printf(dev, " ");
2528 
2529 	if ((iq = (struct scsi_inquiry_data *)kmalloc(
2530 	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2531 	    NULL) {
2532 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2533 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2534 		int					posted = 0;
2535 
2536 		Message_Ptr = &Message;
2537 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2538 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2539 
2540 		I2O_MESSAGE_FRAME_setVersionOffset(
2541 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2542 		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2543 		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2544 		I2O_MESSAGE_FRAME_setMessageSize(
2545 		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2546 		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2547 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2548 		    sizeof(U32));
2549 		I2O_MESSAGE_FRAME_setInitiatorAddress(
2550 		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2551 		I2O_MESSAGE_FRAME_setFunction(
2552 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2553 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2554 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2555 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2556 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2557 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2558 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2559 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2560 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2561 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2562 		    DPT_ORGANIZATION_ID);
2563 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2564 		Message_Ptr->CDB[0] = INQUIRY;
2565 		Message_Ptr->CDB[4] =
2566 		    (unsigned char)sizeof(struct scsi_inquiry_data);
2567 		if (Message_Ptr->CDB[4] == 0) {
2568 			Message_Ptr->CDB[4] = 255;
2569 		}
2570 
2571 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2572 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2573 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2574 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2575 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2576 
2577 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2578 		  Message_Ptr, sizeof(struct scsi_inquiry_data));
2579 		SG(&(Message_Ptr->SGL), 0,
2580 		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2581 		  iq, sizeof(struct scsi_inquiry_data));
2582 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2583 
2584 		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2585 			kprintf (" ");
2586 			ASR_prstring (iq->vendor, 8);
2587 			++posted;
2588 		}
2589 		if (iq->product[0] && (iq->product[0] != ' ')) {
2590 			kprintf (" ");
2591 			ASR_prstring (iq->product, 16);
2592 			++posted;
2593 		}
2594 		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2595 			kprintf (" FW Rev. ");
2596 			ASR_prstring (iq->revision, 4);
2597 			++posted;
2598 		}
2599 		kfree(iq, M_TEMP);
2600 		if (posted) {
2601 			kprintf (",");
2602 		}
2603 	}
2604 	kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2605 	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2606 
2607 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2608 		struct cam_devq	  * devq;
2609 		int		    QueueSize = sc->ha_QueueSize;
2610 
2611 		if (QueueSize > MAX_INBOUND) {
2612 			QueueSize = MAX_INBOUND;
2613 		}
2614 
2615 		/*
2616 		 *	Create the device queue for our SIM(s).
2617 		 */
2618 		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2619 			continue;
2620 		}
2621 
2622 		/*
2623 		 *	Construct our first channel SIM entry
2624 		 */
2625 		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2626 						unit, &sim_mplock,
2627 						1, QueueSize, devq);
2628 		if (sc->ha_sim[bus] == NULL) {
2629 			continue;
2630 		}
2631 
2632 		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2633 			cam_sim_free(sc->ha_sim[bus]);
2634 			sc->ha_sim[bus] = NULL;
2635 			continue;
2636 		}
2637 
2638 		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2639 		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2640 		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2641 			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2642 			cam_sim_free(sc->ha_sim[bus]);
2643 			sc->ha_sim[bus] = NULL;
2644 			continue;
2645 		}
2646 	}
2647 
2648 	/*
2649 	 *	Generate the device node information
2650 	 */
2651 	sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2652 			       "asr%d", unit);
2653 	if (sc->ha_devt != NULL)
2654 		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2655 	sc->ha_devt->si_drv1 = sc;
2656 	return(0);
2657 } /* asr_attach */
2658 
2659 static void
2660 asr_poll(struct cam_sim *sim)
2661 {
2662 	asr_intr(cam_sim_softc(sim));
2663 } /* asr_poll */
2664 
2665 static void
2666 asr_action(struct cam_sim *sim, union ccb  *ccb)
2667 {
2668 	struct Asr_softc *sc;
2669 
2670 	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2671 			 ccb->ccb_h.func_code);
2672 
2673 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2674 
2675 	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2676 
2677 	switch (ccb->ccb_h.func_code) {
2678 
2679 	/* Common cases first */
2680 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2681 	{
2682 		struct Message {
2683 			char M[MAX_INBOUND_SIZE];
2684 		} Message;
2685 		PI2O_MESSAGE_FRAME   Message_Ptr;
2686 
2687 		/* Reject incoming commands while we are resetting the card */
2688 		if (sc->ha_in_reset != HA_OPERATIONAL) {
2689 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2690 			if (sc->ha_in_reset >= HA_OFF_LINE) {
2691 				/* HBA is now off-line */
2692 				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2693 			} else {
2694 				/* HBA currently resetting, try again later. */
2695 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2696 			}
2697 			debug_asr_cmd_printf (" e\n");
2698 			xpt_done(ccb);
2699 			debug_asr_cmd_printf (" q\n");
2700 			break;
2701 		}
2702 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2703 			kprintf(
2704 			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2705 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2706 			  ccb->csio.cdb_io.cdb_bytes[0],
2707 			  cam_sim_bus(sim),
2708 			  ccb->ccb_h.target_id,
2709 			  ccb->ccb_h.target_lun);
2710 		}
2711 		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2712 				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2713 				     ccb->ccb_h.target_lun);
2714 		debug_asr_dump_ccb(ccb);
2715 
2716 		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2717 		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2718 			debug_asr_cmd2_printf ("TID=%x:\n",
2719 			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2720 			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2721 			debug_asr_cmd2_dump_message(Message_Ptr);
2722 			debug_asr_cmd1_printf (" q");
2723 
2724 			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2725 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2726 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2727 				debug_asr_cmd_printf (" E\n");
2728 				xpt_done(ccb);
2729 			}
2730 			debug_asr_cmd_printf(" Q\n");
2731 			break;
2732 		}
2733 		/*
2734 		 *	We will get here if there is no valid TID for the device
2735 		 * referenced in the scsi command packet.
2736 		 */
2737 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2738 		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2739 		debug_asr_cmd_printf (" B\n");
2740 		xpt_done(ccb);
2741 		break;
2742 	}
2743 
2744 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2745 		/* Reset HBA device ... */
2746 		asr_hbareset (sc);
2747 		ccb->ccb_h.status = CAM_REQ_CMP;
2748 		xpt_done(ccb);
2749 		break;
2750 
2751 	case XPT_ABORT:			/* Abort the specified CCB */
2752 		/* XXX Implement */
2753 		ccb->ccb_h.status = CAM_REQ_INVALID;
2754 		xpt_done(ccb);
2755 		break;
2756 
2757 	case XPT_SET_TRAN_SETTINGS:
2758 		/* XXX Implement */
2759 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2760 		xpt_done(ccb);
2761 		break;
2762 
2763 	case XPT_GET_TRAN_SETTINGS:
2764 	/* Get default/user set transfer settings for the target */
2765 	{
2766 		struct	ccb_trans_settings *cts = &(ccb->cts);
2767 		struct ccb_trans_settings_scsi *scsi =
2768 		    &cts->proto_specific.scsi;
2769 		struct ccb_trans_settings_spi *spi =
2770 		    &cts->xport_specific.spi;
2771 
2772 		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2773 			cts->protocol = PROTO_SCSI;
2774 			cts->protocol_version = SCSI_REV_2;
2775 			cts->transport = XPORT_SPI;
2776 			cts->transport_version = 2;
2777 
2778 			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2779 			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2780 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2781 			spi->sync_period = 6; /* 40MHz */
2782 			spi->sync_offset = 15;
2783 			spi->valid = CTS_SPI_VALID_SYNC_RATE
2784 				   | CTS_SPI_VALID_SYNC_OFFSET
2785 				   | CTS_SPI_VALID_BUS_WIDTH
2786 				   | CTS_SPI_VALID_DISC;
2787 			scsi->valid = CTS_SCSI_VALID_TQ;
2788 
2789 			ccb->ccb_h.status = CAM_REQ_CMP;
2790 		} else {
2791 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2792 		}
2793 		xpt_done(ccb);
2794 		break;
2795 	}
2796 
2797 	case XPT_CALC_GEOMETRY:
2798 	{
2799 		struct	  ccb_calc_geometry *ccg;
2800 		u_int32_t size_mb;
2801 		u_int32_t secs_per_cylinder;
2802 
2803 		ccg = &(ccb->ccg);
2804 		size_mb = ccg->volume_size
2805 			/ ((1024L * 1024L) / ccg->block_size);
2806 
2807 		if (size_mb > 4096) {
2808 			ccg->heads = 255;
2809 			ccg->secs_per_track = 63;
2810 		} else if (size_mb > 2048) {
2811 			ccg->heads = 128;
2812 			ccg->secs_per_track = 63;
2813 		} else if (size_mb > 1024) {
2814 			ccg->heads = 65;
2815 			ccg->secs_per_track = 63;
2816 		} else {
2817 			ccg->heads = 64;
2818 			ccg->secs_per_track = 32;
2819 		}
2820 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2821 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2822 		ccb->ccb_h.status = CAM_REQ_CMP;
2823 		xpt_done(ccb);
2824 		break;
2825 	}
2826 
2827 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2828 		ASR_resetBus (sc, cam_sim_bus(sim));
2829 		ccb->ccb_h.status = CAM_REQ_CMP;
2830 		xpt_done(ccb);
2831 		break;
2832 
2833 	case XPT_TERM_IO:		/* Terminate the I/O process */
2834 		/* XXX Implement */
2835 		ccb->ccb_h.status = CAM_REQ_INVALID;
2836 		xpt_done(ccb);
2837 		break;
2838 
2839 	case XPT_PATH_INQ:		/* Path routing inquiry */
2840 	{
2841 		struct ccb_pathinq *cpi = &(ccb->cpi);
2842 
2843 		cpi->version_num = 1; /* XXX??? */
2844 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2845 		cpi->target_sprt = 0;
2846 		/* Not necessary to reset bus, done by HDM initialization */
2847 		cpi->hba_misc = PIM_NOBUSRESET;
2848 		cpi->hba_eng_cnt = 0;
2849 		cpi->max_target = sc->ha_MaxId;
2850 		cpi->max_lun = sc->ha_MaxLun;
2851 		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2852 		cpi->bus_id = cam_sim_bus(sim);
2853 		cpi->base_transfer_speed = 3300;
2854 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2855 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2856 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2857 		cpi->unit_number = cam_sim_unit(sim);
2858 		cpi->ccb_h.status = CAM_REQ_CMP;
2859                 cpi->transport = XPORT_SPI;
2860                 cpi->transport_version = 2;
2861                 cpi->protocol = PROTO_SCSI;
2862                 cpi->protocol_version = SCSI_REV_2;
2863 		xpt_done(ccb);
2864 		break;
2865 	}
2866 	default:
2867 		ccb->ccb_h.status = CAM_REQ_INVALID;
2868 		xpt_done(ccb);
2869 		break;
2870 	}
2871 } /* asr_action */
2872 
2873 /*
2874  * Handle processing of current CCB as pointed to by the Status.
2875  */
2876 static int
2877 asr_intr(Asr_softc_t *sc)
2878 {
2879 	int processed;
2880 
2881 	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2882 	    processed = 1) {
2883 		union asr_ccb			   *ccb;
2884 		u_int				    dsc;
2885 		U32				    ReplyOffset;
2886 		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2887 
2888 		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2889 		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2890 			break;
2891 		}
2892 		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2893 		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2894 		/*
2895 		 * We do not need any (optional byteswapping) method access to
2896 		 * the Initiator context field.
2897 		 */
2898 		ccb = (union asr_ccb *)(long)
2899 		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2900 		    &(Reply->StdReplyFrame.StdMessageFrame));
2901 		if (I2O_MESSAGE_FRAME_getMsgFlags(
2902 		  &(Reply->StdReplyFrame.StdMessageFrame))
2903 		  & I2O_MESSAGE_FLAGS_FAIL) {
2904 			I2O_UTIL_NOP_MESSAGE	Message;
2905 			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2906 			U32			MessageOffset;
2907 
2908 			MessageOffset = (u_long)
2909 			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2910 			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2911 			/*
2912 			 *  Get the Original Message Frame's address, and get
2913 			 * it's Transaction Context into our space. (Currently
2914 			 * unused at original authorship, but better to be
2915 			 * safe than sorry). Straight copy means that we
2916 			 * need not concern ourselves with the (optional
2917 			 * byteswapping) method access.
2918 			 */
2919 			Reply->StdReplyFrame.TransactionContext =
2920 			    bus_space_read_4(sc->ha_frame_btag,
2921 			    sc->ha_frame_bhandle, MessageOffset +
2922 			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2923 			    TransactionContext));
2924 			/*
2925 			 *	For 64 bit machines, we need to reconstruct the
2926 			 * 64 bit context.
2927 			 */
2928 			ccb = (union asr_ccb *)(long)
2929 			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2930 			    &(Reply->StdReplyFrame.StdMessageFrame));
2931 			/*
2932 			 * Unique error code for command failure.
2933 			 */
2934 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2935 			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2936 			/*
2937 			 *  Modify the message frame to contain a NOP and
2938 			 * re-issue it to the controller.
2939 			 */
2940 			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2941 			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2942 #if (I2O_UTIL_NOP != 0)
2943 				I2O_MESSAGE_FRAME_setFunction (
2944 				  &(Message_Ptr->StdMessageFrame),
2945 				  I2O_UTIL_NOP);
2946 #endif
2947 			/*
2948 			 *  Copy the packet out to the Original Message
2949 			 */
2950 			asr_set_frame(sc, Message_Ptr, MessageOffset,
2951 				      sizeof(I2O_UTIL_NOP_MESSAGE));
2952 			/*
2953 			 *  Issue the NOP
2954 			 */
2955 			asr_set_ToFIFO(sc, MessageOffset);
2956 		}
2957 
2958 		/*
2959 		 *	Asynchronous command with no return requirements,
2960 		 * and a generic handler for immunity against odd error
2961 		 * returns from the adapter.
2962 		 */
2963 		if (ccb == NULL) {
2964 			/*
2965 			 * Return Reply so that it can be used for the
2966 			 * next command
2967 			 */
2968 			asr_set_FromFIFO(sc, ReplyOffset);
2969 			continue;
2970 		}
2971 
2972 		/* Welease Wadjah! (and stop timeouts) */
2973 		ASR_ccbRemove (sc, ccb);
2974 
2975 		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2976 		    &(Reply->StdReplyFrame));
2977 		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2978 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2979 		switch (dsc) {
2980 
2981 		case I2O_SCSI_DSC_SUCCESS:
2982 			ccb->ccb_h.status |= CAM_REQ_CMP;
2983 			break;
2984 
2985 		case I2O_SCSI_DSC_CHECK_CONDITION:
2986 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
2987 			    CAM_AUTOSNS_VALID;
2988 			break;
2989 
2990 		case I2O_SCSI_DSC_BUSY:
2991 			/* FALLTHRU */
2992 		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
2993 			/* FALLTHRU */
2994 		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
2995 			/* FALLTHRU */
2996 		case I2O_SCSI_HBA_DSC_BUS_BUSY:
2997 			ccb->ccb_h.status |= CAM_SCSI_BUSY;
2998 			break;
2999 
3000 		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3001 			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3002 			break;
3003 
3004 		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3005 			/* FALLTHRU */
3006 		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3007 			/* FALLTHRU */
3008 		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3009 			/* FALLTHRU */
3010 		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3011 			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3012 			break;
3013 
3014 		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3015 			/* FALLTHRU */
3016 		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3017 			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3018 			break;
3019 
3020 		default:
3021 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3022 			break;
3023 		}
3024 		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3025 			ccb->csio.resid -=
3026 			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3027 			    Reply);
3028 		}
3029 
3030 		/* Sense data in reply packet */
3031 		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3032 			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3033 
3034 			if (size) {
3035 				if (size > sizeof(ccb->csio.sense_data)) {
3036 					size = sizeof(ccb->csio.sense_data);
3037 				}
3038 				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3039 					size = I2O_SCSI_SENSE_DATA_SZ;
3040 				}
3041 				if ((ccb->csio.sense_len)
3042 				 && (size > ccb->csio.sense_len)) {
3043 					size = ccb->csio.sense_len;
3044 				}
3045 				if (size < ccb->csio.sense_len) {
3046 					ccb->csio.sense_resid =
3047 					    ccb->csio.sense_len - size;
3048 				} else {
3049 					ccb->csio.sense_resid = 0;
3050 				}
3051 				bzero(&(ccb->csio.sense_data),
3052 				    sizeof(ccb->csio.sense_data));
3053 				bcopy(Reply->SenseData,
3054 				      &(ccb->csio.sense_data), size);
3055 			}
3056 		}
3057 
3058 		/*
3059 		 * Return Reply so that it can be used for the next command
3060 		 * since we have no more need for it now
3061 		 */
3062 		asr_set_FromFIFO(sc, ReplyOffset);
3063 
3064 		if (ccb->ccb_h.path) {
3065 			xpt_done ((union ccb *)ccb);
3066 		} else {
3067 			wakeup (ccb);
3068 		}
3069 	}
3070 	return (processed);
3071 } /* asr_intr */
3072 
3073 #undef QueueSize	/* Grrrr */
3074 #undef SG_Size		/* Grrrr */
3075 
3076 /*
3077  *	Meant to be included at the bottom of asr.c !!!
3078  */
3079 
3080 /*
3081  *	Included here as hard coded. Done because other necessary include
3082  *	files utilize C++ comment structures which make them a nuisance to
3083  *	included here just to pick up these three typedefs.
3084  */
3085 typedef U32   DPT_TAG_T;
3086 typedef U32   DPT_MSG_T;
3087 typedef U32   DPT_RTN_T;
3088 
3089 #undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3090 #include	"dev/raid/asr/osd_unix.h"
3091 
3092 #define	asr_unit(dev)	  minor(dev)
3093 
3094 static u_int8_t ASR_ctlr_held;
3095 
3096 static int
3097 asr_open(struct dev_open_args *ap)
3098 {
3099 	cdev_t dev = ap->a_head.a_dev;
3100 	int		 error;
3101 
3102 	if (dev->si_drv1 == NULL) {
3103 		return (ENODEV);
3104 	}
3105 	crit_enter();
3106 	if (ASR_ctlr_held) {
3107 		error = EBUSY;
3108 	} else {
3109 		error = caps_priv_check(ap->a_cred, SYSCAP_RESTRICTEDROOT);
3110 		if (error == 0)
3111 			++ASR_ctlr_held;
3112 	}
3113 	crit_exit();
3114 	return (error);
3115 } /* asr_open */
3116 
3117 static int
3118 asr_close(struct dev_close_args *ap)
3119 {
3120 
3121 	ASR_ctlr_held = 0;
3122 	return (0);
3123 } /* asr_close */
3124 
3125 
3126 /*-------------------------------------------------------------------------*/
3127 /*		      Function ASR_queue_i				   */
3128 /*-------------------------------------------------------------------------*/
3129 /* The Parameters Passed To This Function Are :				   */
3130 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3131 /*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3132 /*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3133 /*									   */
3134 /* This Function Will Take The User Request Packet And Convert It To An	   */
3135 /* I2O MSG And Send It Off To The Adapter.				   */
3136 /*									   */
3137 /* Return : 0 For OK, Error Code Otherwise				   */
3138 /*-------------------------------------------------------------------------*/
3139 static int
3140 ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3141 {
3142 	union asr_ccb				   * ccb;
3143 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3144 	PI2O_MESSAGE_FRAME			     Message_Ptr;
3145 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3146 	int					     MessageSizeInBytes;
3147 	int					     ReplySizeInBytes;
3148 	int					     error;
3149 	int					     s;
3150 	/* Scatter Gather buffer list */
3151 	struct ioctlSgList_S {
3152 		SLIST_ENTRY(ioctlSgList_S) link;
3153 		caddr_t			   UserSpace;
3154 		I2O_FLAGS_COUNT		   FlagsCount;
3155 		char			   KernelSpace[sizeof(long)];
3156 	}					   * elm;
3157 	/* Generates a `first' entry */
3158 	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3159 
3160 	if (ASR_getBlinkLedCode(sc)) {
3161 		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3162 		  ASR_getBlinkLedCode(sc));
3163 		return (EIO);
3164 	}
3165 	/* Copy in the message into a local allocation */
3166 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3167 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3168 		debug_usr_cmd_printf (
3169 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3170 		return (ENOMEM);
3171 	}
3172 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3173 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3174 		kfree(Message_Ptr, M_TEMP);
3175 		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3176 		return (error);
3177 	}
3178 	/* Acquire information to determine type of packet */
3179 	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3180 	/* The offset of the reply information within the user packet */
3181 	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3182 	  + MessageSizeInBytes);
3183 
3184 	/* Check if the message is a synchronous initialization command */
3185 	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3186 	kfree(Message_Ptr, M_TEMP);
3187 	switch (s) {
3188 
3189 	case I2O_EXEC_IOP_RESET:
3190 	{	U32 status;
3191 
3192 		status = ASR_resetIOP(sc);
3193 		ReplySizeInBytes = sizeof(status);
3194 		debug_usr_cmd_printf ("resetIOP done\n");
3195 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3196 		  ReplySizeInBytes));
3197 	}
3198 
3199 	case I2O_EXEC_STATUS_GET:
3200 	{	PI2O_EXEC_STATUS_GET_REPLY status;
3201 
3202 		status = &sc->ha_statusmem->status;
3203 		if (ASR_getStatus(sc) == NULL) {
3204 			debug_usr_cmd_printf ("getStatus failed\n");
3205 			return (ENXIO);
3206 		}
3207 		ReplySizeInBytes = sizeof(status);
3208 		debug_usr_cmd_printf ("getStatus done\n");
3209 		return (copyout ((caddr_t)status, (caddr_t)Reply,
3210 		  ReplySizeInBytes));
3211 	}
3212 
3213 	case I2O_EXEC_OUTBOUND_INIT:
3214 	{	U32 status;
3215 
3216 		status = ASR_initOutBound(sc);
3217 		ReplySizeInBytes = sizeof(status);
3218 		debug_usr_cmd_printf ("intOutBound done\n");
3219 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3220 		  ReplySizeInBytes));
3221 	}
3222 	}
3223 
3224 	/* Determine if the message size is valid */
3225 	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3226 	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3227 		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3228 		  MessageSizeInBytes);
3229 		return (EINVAL);
3230 	}
3231 
3232 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3233 	  M_TEMP, M_WAITOK)) == NULL) {
3234 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3235 		  MessageSizeInBytes);
3236 		return (ENOMEM);
3237 	}
3238 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3239 	  MessageSizeInBytes)) != 0) {
3240 		kfree(Message_Ptr, M_TEMP);
3241 		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3242 		  MessageSizeInBytes, error);
3243 		return (error);
3244 	}
3245 
3246 	/* Check the size of the reply frame, and start constructing */
3247 
3248 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3249 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3250 		kfree(Message_Ptr, M_TEMP);
3251 		debug_usr_cmd_printf (
3252 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3253 		return (ENOMEM);
3254 	}
3255 	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3256 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3257 		kfree(Reply_Ptr, M_TEMP);
3258 		kfree(Message_Ptr, M_TEMP);
3259 		debug_usr_cmd_printf (
3260 		  "Failed to copy in reply frame, errno=%d\n",
3261 		  error);
3262 		return (error);
3263 	}
3264 	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3265 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3266 	kfree(Reply_Ptr, M_TEMP);
3267 	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3268 		kfree(Message_Ptr, M_TEMP);
3269 		debug_usr_cmd_printf (
3270 		  "Failed to copy in reply frame[%d], errno=%d\n",
3271 		  ReplySizeInBytes, error);
3272 		return (EINVAL);
3273 	}
3274 
3275 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3276 	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3277 	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3278 	  M_TEMP, M_WAITOK)) == NULL) {
3279 		kfree(Message_Ptr, M_TEMP);
3280 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3281 		  ReplySizeInBytes);
3282 		return (ENOMEM);
3283 	}
3284 	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3285 	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3286 	  = Message_Ptr->InitiatorContext;
3287 	Reply_Ptr->StdReplyFrame.TransactionContext
3288 	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3289 	I2O_MESSAGE_FRAME_setMsgFlags(
3290 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3291 	  I2O_MESSAGE_FRAME_getMsgFlags(
3292 	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3293 	      | I2O_MESSAGE_FLAGS_REPLY);
3294 
3295 	/* Check if the message is a special case command */
3296 	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3297 	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3298 		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3299 		  Message_Ptr) & 0xF0) >> 2)) {
3300 			kfree(Message_Ptr, M_TEMP);
3301 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3302 			  &(Reply_Ptr->StdReplyFrame),
3303 			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3304 			I2O_MESSAGE_FRAME_setMessageSize(
3305 			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3306 			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3307 			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3308 			  ReplySizeInBytes);
3309 			kfree(Reply_Ptr, M_TEMP);
3310 			return (error);
3311 		}
3312 	}
3313 
3314 	/* Deal in the general case */
3315 	/* First allocate and optionally copy in each scatter gather element */
3316 	SLIST_INIT(&sgList);
3317 	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3318 		PI2O_SGE_SIMPLE_ELEMENT sg;
3319 
3320 		/*
3321 		 *	since this code is reused in several systems, code
3322 		 * efficiency is greater by using a shift operation rather
3323 		 * than a divide by sizeof(u_int32_t).
3324 		 */
3325 		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3326 		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3327 		    >> 2));
3328 		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3329 		  + MessageSizeInBytes)) {
3330 			caddr_t v;
3331 			int	len;
3332 
3333 			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3334 			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3335 				error = EINVAL;
3336 				break;
3337 			}
3338 			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3339 			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3340 			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3341 			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3342 				Message_Ptr) & 0xF0) >> 2)),
3343 			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3344 
3345 			if ((elm = (struct ioctlSgList_S *)kmalloc (
3346 			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3347 			  M_TEMP, M_WAITOK)) == NULL) {
3348 				debug_usr_cmd_printf (
3349 				  "Failed to allocate SG[%d]\n", len);
3350 				error = ENOMEM;
3351 				break;
3352 			}
3353 			SLIST_INSERT_HEAD(&sgList, elm, link);
3354 			elm->FlagsCount = sg->FlagsCount;
3355 			elm->UserSpace = (caddr_t)
3356 			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3357 			v = elm->KernelSpace;
3358 			/* Copy in outgoing data (DIR bit could be invalid) */
3359 			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3360 			  != 0) {
3361 				break;
3362 			}
3363 			/*
3364 			 *	If the buffer is not contiguous, lets
3365 			 * break up the scatter/gather entries.
3366 			 */
3367 			while ((len > 0)
3368 			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3369 			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3370 				int next, base, span;
3371 
3372 				span = 0;
3373 				next = base = KVTOPHYS(v);
3374 				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3375 				  base);
3376 
3377 				/* How far can we go physically contiguously */
3378 				while ((len > 0) && (base == next)) {
3379 					int size;
3380 
3381 					next = trunc_page(base) + PAGE_SIZE;
3382 					size = next - base;
3383 					if (size > len) {
3384 						size = len;
3385 					}
3386 					span += size;
3387 					v += size;
3388 					len -= size;
3389 					base = KVTOPHYS(v);
3390 				}
3391 
3392 				/* Construct the Flags */
3393 				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3394 				  span);
3395 				{
3396 					int flags = I2O_FLAGS_COUNT_getFlags(
3397 					  &(elm->FlagsCount));
3398 					/* Any remaining length? */
3399 					if (len > 0) {
3400 					    flags &=
3401 						~(I2O_SGL_FLAGS_END_OF_BUFFER
3402 						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3403 					}
3404 					I2O_FLAGS_COUNT_setFlags(
3405 					  &(sg->FlagsCount), flags);
3406 				}
3407 
3408 				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3409 				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3410 				    ((char *)Message_Ptr
3411 				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3412 					Message_Ptr) & 0xF0) >> 2)),
3413 				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3414 				  span);
3415 				if (len <= 0) {
3416 					break;
3417 				}
3418 
3419 				/*
3420 				 * Incrementing requires resizing of the
3421 				 * packet, and moving up the existing SG
3422 				 * elements.
3423 				 */
3424 				++sg;
3425 				MessageSizeInBytes += sizeof(*sg);
3426 				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3427 				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3428 				  + (sizeof(*sg) / sizeof(U32)));
3429 				{
3430 					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3431 
3432 					if ((NewMessage_Ptr
3433 					  = (PI2O_MESSAGE_FRAME)
3434 					    kmalloc (MessageSizeInBytes,
3435 					     M_TEMP, M_WAITOK)) == NULL) {
3436 						debug_usr_cmd_printf (
3437 						  "Failed to acquire frame[%d] memory\n",
3438 						  MessageSizeInBytes);
3439 						error = ENOMEM;
3440 						break;
3441 					}
3442 					span = ((caddr_t)sg)
3443 					     - (caddr_t)Message_Ptr;
3444 					bcopy(Message_Ptr,NewMessage_Ptr, span);
3445 					bcopy((caddr_t)(sg-1),
3446 					  ((caddr_t)NewMessage_Ptr) + span,
3447 					  MessageSizeInBytes - span);
3448 					kfree(Message_Ptr, M_TEMP);
3449 					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3450 					  (((caddr_t)NewMessage_Ptr) + span);
3451 					Message_Ptr = NewMessage_Ptr;
3452 				}
3453 			}
3454 			if ((error)
3455 			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3456 			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3457 				break;
3458 			}
3459 			++sg;
3460 		}
3461 		if (error) {
3462 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3463 				SLIST_REMOVE_HEAD(&sgList, link);
3464 				kfree(elm, M_TEMP);
3465 			}
3466 			kfree(Reply_Ptr, M_TEMP);
3467 			kfree(Message_Ptr, M_TEMP);
3468 			return (error);
3469 		}
3470 	}
3471 
3472 	debug_usr_cmd_printf ("Inbound: ");
3473 	debug_usr_cmd_dump_message(Message_Ptr);
3474 
3475 	/* Send the command */
3476 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3477 		/* Free up in-kernel buffers */
3478 		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3479 			SLIST_REMOVE_HEAD(&sgList, link);
3480 			kfree(elm, M_TEMP);
3481 		}
3482 		kfree(Reply_Ptr, M_TEMP);
3483 		kfree(Message_Ptr, M_TEMP);
3484 		return (ENOMEM);
3485 	}
3486 
3487 	/*
3488 	 * We do not need any (optional byteswapping) method access to
3489 	 * the Initiator context field.
3490 	 */
3491 	I2O_MESSAGE_FRAME_setInitiatorContext64(
3492 	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3493 
3494 	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3495 
3496 	kfree(Message_Ptr, M_TEMP);
3497 
3498 	/*
3499 	 * Wait for the board to report a finished instruction.
3500 	 */
3501 	crit_enter();
3502 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3503 		if (ASR_getBlinkLedCode(sc)) {
3504 			/* Reset Adapter */
3505 			kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3506 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3507 			  ASR_getBlinkLedCode(sc));
3508 			if (ASR_reset (sc) == ENXIO) {
3509 				/* Command Cleanup */
3510 				ASR_ccbRemove(sc, ccb);
3511 			}
3512 			crit_exit();
3513 			/* Free up in-kernel buffers */
3514 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3515 				SLIST_REMOVE_HEAD(&sgList, link);
3516 				kfree(elm, M_TEMP);
3517 			}
3518 			kfree(Reply_Ptr, M_TEMP);
3519 			asr_free_ccb(ccb);
3520 			return (EIO);
3521 		}
3522 		/* Check every second for BlinkLed */
3523 		/* There is no PRICAM, but outwardly PRIBIO is functional */
3524 		tsleep(ccb, 0, "asr", hz);
3525 	}
3526 	crit_exit();
3527 
3528 	debug_usr_cmd_printf ("Outbound: ");
3529 	debug_usr_cmd_dump_message(Reply_Ptr);
3530 
3531 	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3532 	  &(Reply_Ptr->StdReplyFrame),
3533 	  (ccb->ccb_h.status != CAM_REQ_CMP));
3534 
3535 	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3536 	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3537 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3538 		  ccb->csio.dxfer_len - ccb->csio.resid);
3539 	}
3540 	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3541 	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3542 	 - I2O_SCSI_SENSE_DATA_SZ))) {
3543 		int size = ReplySizeInBytes
3544 		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3545 		  - I2O_SCSI_SENSE_DATA_SZ;
3546 
3547 		if (size > sizeof(ccb->csio.sense_data)) {
3548 			size = sizeof(ccb->csio.sense_data);
3549 		}
3550 		if (size < ccb->csio.sense_len) {
3551 			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3552 		} else {
3553 			ccb->csio.sense_resid = 0;
3554 		}
3555 		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3556 		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3557 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3558 		    Reply_Ptr, size);
3559 	}
3560 
3561 	/* Free up in-kernel buffers */
3562 	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3563 		/* Copy out as necessary */
3564 		if ((error == 0)
3565 		/* DIR bit considered `valid', error due to ignorance works */
3566 		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3567 		  & I2O_SGL_FLAGS_DIR) == 0)) {
3568 			error = copyout((caddr_t)(elm->KernelSpace),
3569 			  elm->UserSpace,
3570 			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3571 		}
3572 		SLIST_REMOVE_HEAD(&sgList, link);
3573 		kfree(elm, M_TEMP);
3574 	}
3575 	if (error == 0) {
3576 	/* Copy reply frame to user space */
3577 		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3578 				ReplySizeInBytes);
3579 	}
3580 	kfree(Reply_Ptr, M_TEMP);
3581 	asr_free_ccb(ccb);
3582 
3583 	return (error);
3584 } /* ASR_queue_i */
3585 
3586 /*----------------------------------------------------------------------*/
3587 /*			    Function asr_ioctl			       */
3588 /*----------------------------------------------------------------------*/
3589 /* The parameters passed to this function are :				*/
3590 /*     dev  : Device number.						*/
3591 /*     cmd  : Ioctl Command						*/
3592 /*     data : User Argument Passed In.					*/
3593 /*     flag : Mode Parameter						*/
3594 /*     proc : Process Parameter						*/
3595 /*									*/
3596 /* This function is the user interface into this adapter driver		*/
3597 /*									*/
3598 /* Return : zero if OK, error code if not				*/
3599 /*----------------------------------------------------------------------*/
3600 
3601 static int
3602 asr_ioctl(struct dev_ioctl_args *ap)
3603 {
3604 	cdev_t dev = ap->a_head.a_dev;
3605 	u_long cmd = ap->a_cmd;
3606 	caddr_t data = ap->a_data;
3607 	Asr_softc_t	*sc = dev->si_drv1;
3608 	int		i, error = 0;
3609 #ifdef ASR_IOCTL_COMPAT
3610 	int		j;
3611 #endif /* ASR_IOCTL_COMPAT */
3612 
3613 	if (sc == NULL)
3614 		return (EINVAL);
3615 
3616 	switch(cmd) {
3617 	case DPT_SIGNATURE:
3618 #ifdef ASR_IOCTL_COMPAT
3619 #if (dsDescription_size != 50)
3620 	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3621 #endif
3622 		if (cmd & 0xFFFF0000) {
3623 			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3624 			return (0);
3625 		}
3626 	/* Traditional version of the ioctl interface */
3627 	case DPT_SIGNATURE & 0x0000FFFF:
3628 #endif
3629 		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3630 				sizeof(dpt_sig_S)));
3631 
3632 	/* Traditional version of the ioctl interface */
3633 	case DPT_CTRLINFO & 0x0000FFFF:
3634 	case DPT_CTRLINFO: {
3635 		struct {
3636 			u_int16_t length;
3637 			u_int16_t drvrHBAnum;
3638 			u_int32_t baseAddr;
3639 			u_int16_t blinkState;
3640 			u_int8_t  pciBusNum;
3641 			u_int8_t  pciDeviceNum;
3642 			u_int16_t hbaFlags;
3643 			u_int16_t Interrupt;
3644 			u_int32_t reserved1;
3645 			u_int32_t reserved2;
3646 			u_int32_t reserved3;
3647 		} CtlrInfo;
3648 
3649 		bzero(&CtlrInfo, sizeof(CtlrInfo));
3650 		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3651 		CtlrInfo.drvrHBAnum = asr_unit(dev);
3652 		CtlrInfo.baseAddr = sc->ha_Base;
3653 		i = ASR_getBlinkLedCode (sc);
3654 		if (i == -1)
3655 			i = 0;
3656 
3657 		CtlrInfo.blinkState = i;
3658 		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3659 		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3660 #define	FLG_OSD_PCI_VALID 0x0001
3661 #define	FLG_OSD_DMA	  0x0002
3662 #define	FLG_OSD_I2O	  0x0004
3663 		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3664 		CtlrInfo.Interrupt = sc->ha_irq;
3665 #ifdef ASR_IOCTL_COMPAT
3666 		if (cmd & 0xffff0000)
3667 			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3668 		else
3669 #endif /* ASR_IOCTL_COMPAT */
3670 		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3671 	}	return (error);
3672 
3673 	/* Traditional version of the ioctl interface */
3674 	case DPT_SYSINFO & 0x0000FFFF:
3675 	case DPT_SYSINFO: {
3676 		sysInfo_S	Info;
3677 #ifdef ASR_IOCTL_COMPAT
3678 		char	      * cp;
3679 		/* Kernel Specific ptok `hack' */
3680 #define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3681 
3682 		bzero(&Info, sizeof(Info));
3683 
3684 		/* Appears I am the only person in the Kernel doing this */
3685 		outb (0x70, 0x12);
3686 		i = inb(0x71);
3687 		j = i >> 4;
3688 		if (i == 0x0f) {
3689 			outb (0x70, 0x19);
3690 			j = inb (0x71);
3691 		}
3692 		Info.drive0CMOS = j;
3693 
3694 		j = i & 0x0f;
3695 		if (i == 0x0f) {
3696 			outb (0x70, 0x1a);
3697 			j = inb (0x71);
3698 		}
3699 		Info.drive1CMOS = j;
3700 
3701 		Info.numDrives = *((char *)ptok(0x475));
3702 #else /* ASR_IOCTL_COMPAT */
3703 		bzero(&Info, sizeof(Info));
3704 #endif /* ASR_IOCTL_COMPAT */
3705 
3706 		Info.processorFamily = ASR_sig.dsProcessorFamily;
3707 		Info.osType = OS_BSDI_UNIX;
3708 		Info.osMajorVersion = osrelease[0] - '0';
3709 		Info.osMinorVersion = osrelease[2] - '0';
3710 		/* Info.osRevision = 0; */
3711 		/* Info.osSubRevision = 0; */
3712 		Info.busType = SI_PCI_BUS;
3713 		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3714 
3715 #ifdef ASR_IOCTL_COMPAT
3716 		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3717 		/* Go Out And Look For I2O SmartROM */
3718 		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3719 			int k;
3720 
3721 			cp = ptok(j);
3722 			if (*((unsigned short *)cp) != 0xAA55) {
3723 				continue;
3724 			}
3725 			j += (cp[2] * 512) - 2048;
3726 			if ((*((u_long *)(cp + 6))
3727 			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3728 			 || (*((u_long *)(cp + 10))
3729 			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3730 				continue;
3731 			}
3732 			cp += 0x24;
3733 			for (k = 0; k < 64; ++k) {
3734 				if (*((unsigned short *)cp)
3735 				 == (' ' + ('v' * 256))) {
3736 					break;
3737 				}
3738 			}
3739 			if (k < 64) {
3740 				Info.smartROMMajorVersion
3741 				    = *((unsigned char *)(cp += 4)) - '0';
3742 				Info.smartROMMinorVersion
3743 				    = *((unsigned char *)(cp += 2));
3744 				Info.smartROMRevision
3745 				    = *((unsigned char *)(++cp));
3746 				Info.flags |= SI_SmartROMverValid;
3747 				Info.flags &= ~SI_NO_SmartROM;
3748 				break;
3749 			}
3750 		}
3751 		/* Get The Conventional Memory Size From CMOS */
3752 		outb (0x70, 0x16);
3753 		j = inb (0x71);
3754 		j <<= 8;
3755 		outb (0x70, 0x15);
3756 		j |= inb(0x71);
3757 		Info.conventionalMemSize = j;
3758 
3759 		/* Get The Extended Memory Found At Power On From CMOS */
3760 		outb (0x70, 0x31);
3761 		j = inb (0x71);
3762 		j <<= 8;
3763 		outb (0x70, 0x30);
3764 		j |= inb(0x71);
3765 		Info.extendedMemSize = j;
3766 		Info.flags |= SI_MemorySizeValid;
3767 
3768 		/* Copy Out The Info Structure To The User */
3769 		if (cmd & 0xFFFF0000)
3770 			bcopy(&Info, data, sizeof(Info));
3771 		else
3772 #endif /* ASR_IOCTL_COMPAT */
3773 		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3774 		return (error); }
3775 
3776 		/* Get The BlinkLED State */
3777 	case DPT_BLINKLED:
3778 		i = ASR_getBlinkLedCode (sc);
3779 		if (i == -1)
3780 			i = 0;
3781 #ifdef ASR_IOCTL_COMPAT
3782 		if (cmd & 0xffff0000)
3783 			bcopy(&i, data, sizeof(i));
3784 		else
3785 #endif /* ASR_IOCTL_COMPAT */
3786 		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3787 		break;
3788 
3789 		/* Send an I2O command */
3790 	case I2OUSRCMD:
3791 		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3792 
3793 		/* Reset and re-initialize the adapter */
3794 	case I2ORESETCMD:
3795 		return (ASR_reset(sc));
3796 
3797 		/* Rescan the LCT table and resynchronize the information */
3798 	case I2ORESCANCMD:
3799 		return (ASR_rescan(sc));
3800 	}
3801 	return (EINVAL);
3802 } /* asr_ioctl */
3803