xref: /dragonfly/sys/dev/raid/asr/asr.c (revision f00eae14)
1 /*-
2  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3  * Copyright (c) 2000-2001 Adaptec Corporation
4  * All rights reserved.
5  *
6  * TERMS AND CONDITIONS OF USE
7  *
8  * Redistribution and use in source form, with or without modification, are
9  * permitted provided that redistributions of source code must retain the
10  * above copyright notice, this list of conditions and the following disclaimer.
11  *
12  * This software is provided `as is' by Adaptec and any express or implied
13  * warranties, including, but not limited to, the implied warranties of
14  * merchantability and fitness for a particular purpose, are disclaimed. In no
15  * event shall Adaptec be liable for any direct, indirect, incidental, special,
16  * exemplary or consequential damages (including, but not limited to,
17  * procurement of substitute goods or services; loss of use, data, or profits;
18  * or business interruptions) however caused and on any theory of liability,
19  * whether in contract, strict liability, or tort (including negligence or
20  * otherwise) arising in any way out of the use of this driver software, even
21  * if advised of the possibility of such damage.
22  *
23  * SCSI I2O host adapter driver
24  *
25  *	V1.10 2004/05/05 scottl@freebsd.org
26  *		- Massive cleanup of the driver to remove dead code and
27  *		  non-conformant style.
28  *		- Removed most i386-specific code to make it more portable.
29  *		- Converted to the bus_space API.
30  *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31  *		- The 2000S and 2005S do not initialize on some machines,
32  *		  increased timeout to 255ms from 50ms for the StatusGet
33  *		  command.
34  *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35  *		- I knew this one was too good to be true. The error return
36  *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37  *		  to the bit masked status.
38  *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39  *		- The 2005S that was supported is affectionately called the
40  *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41  *		  16MB low-cost configuration, Firmware was forced to go
42  *		  to a Split BAR Firmware. This requires a separate IOP and
43  *		  Messaging base address.
44  *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45  *		- Handle support for 2005S Zero Channel RAID solution.
46  *		- System locked up if the Adapter locked up. Do not try
47  *		  to send other commands if the resetIOP command fails. The
48  *		  fail outstanding command discovery loop was flawed as the
49  *		  removal of the command from the list prevented discovering
50  *		  all the commands.
51  *		- Comment changes to clarify driver.
52  *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53  *		- We do not use the AC_FOUND_DEV event because of I2O.
54  *		  Removed asr_async.
55  *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56  *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57  *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58  *		  mode as this is confused with competitor adapters in run
59  *		  mode.
60  *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61  *		  to prevent operating system panic.
62  *		- moved default major number to 154 from 97.
63  *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64  *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65  *		  series that is visible, it's more of an internal code name.
66  *		  remove any visible references within reason for now.
67  *		- bus_ptr->LUN was not correctly zeroed when initially
68  *		  allocated causing a possible panic of the operating system
69  *		  during boot.
70  *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71  *		- Code always fails for ASR_getTid affecting performance.
72  *		- initiated a set of changes that resulted from a formal
73  *		  code inspection by Mark_Salyzyn@adaptec.com,
74  *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75  *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76  *		  Their findings were focussed on the LCT & TID handler, and
77  *		  all resulting changes were to improve code readability,
78  *		  consistency or have a positive effect on performance.
79  *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80  *		- Passthrough returned an incorrect error.
81  *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82  *		  on command completion.
83  *		- generate control device nodes using make_dev and delete_dev.
84  *		- Performance affected by TID caching reallocing.
85  *		- Made suggested changes by Justin_Gibbs@adaptec.com
86  *			- use splcam instead of splbio.
87  *			- use cam_imask instead of bio_imask.
88  *			- use u_int8_t instead of u_char.
89  *			- use u_int16_t instead of u_short.
90  *			- use u_int32_t instead of u_long where appropriate.
91  *			- use 64 bit context handler instead of 32 bit.
92  *			- create_ccb should only allocate the worst case
93  *			  requirements for the driver since CAM may evolve
94  *			  making union ccb much larger than needed here.
95  *			  renamed create_ccb to asr_alloc_ccb.
96  *			- go nutz justifying all debug prints as macros
97  *			  defined at the top and remove unsightly ifdefs.
98  *			- INLINE STATIC viewed as confusing. Historically
99  *			  utilized to affect code performance and debug
100  *			  issues in OS, Compiler or OEM specific situations.
101  *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102  *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103  *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104  *			changed variable name xs to ccb
105  *			changed struct scsi_link to struct cam_path
106  *			changed struct scsibus_data to struct cam_sim
107  *			stopped using fordriver for holding on to the TID
108  *			use proprietary packet creation instead of scsi_inquire
109  *			CAM layer sends synchronize commands.
110  *
111  * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
112  */
113 
114 #include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
115 #include <sys/kernel.h>
116 #include <sys/module.h>
117 #include <sys/systm.h>
118 #include <sys/malloc.h>
119 #include <sys/conf.h>
120 #include <sys/priv.h>
121 #include <sys/proc.h>
122 #include <sys/bus.h>
123 #include <sys/rman.h>
124 #include <sys/stat.h>
125 #include <sys/device.h>
126 #include <sys/thread2.h>
127 #include <sys/bus_dma.h>
128 
129 #include <bus/cam/cam.h>
130 #include <bus/cam/cam_ccb.h>
131 #include <bus/cam/cam_sim.h>
132 #include <bus/cam/cam_xpt_sim.h>
133 
134 #include <bus/cam/scsi/scsi_all.h>
135 #include <bus/cam/scsi/scsi_message.h>
136 
137 #include <vm/vm.h>
138 #include <vm/pmap.h>
139 
140 #if defined(__i386__)
141 #include "opt_asr.h"
142 #include <machine/cputypes.h>
143 
144 #if defined(ASR_COMPAT)
145 #define ASR_IOCTL_COMPAT
146 #endif /* ASR_COMPAT */
147 #endif
148 #include <machine/vmparam.h>
149 
150 #include <bus/pci/pcivar.h>
151 #include <bus/pci/pcireg.h>
152 
153 #define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
154 #define	KVTOPHYS(x) vtophys(x)
155 #include	<dev/raid/asr/dptalign.h>
156 #include	<dev/raid/asr/i2oexec.h>
157 #include	<dev/raid/asr/i2obscsi.h>
158 #include	<dev/raid/asr/i2odpt.h>
159 #include	<dev/raid/asr/i2oadptr.h>
160 
161 #include	<dev/raid/asr/sys_info.h>
162 
163 #define	ASR_VERSION	1
164 #define	ASR_REVISION	'1'
165 #define	ASR_SUBREVISION '0'
166 #define	ASR_MONTH	5
167 #define	ASR_DAY		5
168 #define	ASR_YEAR	(2004 - 1980)
169 
170 /*
171  *	Debug macros to reduce the unsightly ifdefs
172  */
173 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
174 static __inline void
175 debug_asr_message(PI2O_MESSAGE_FRAME message)
176 {
177 	u_int32_t * pointer = (u_int32_t *)message;
178 	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
179 	u_int32_t   counter = 0;
180 
181 	while (length--) {
182 		kprintf("%08lx%c", (u_long)*(pointer++),
183 		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
184 	}
185 }
186 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
187 
188 #ifdef DEBUG_ASR
189   /* Breaks on none STDC based compilers :-( */
190 #define debug_asr_printf(fmt,args...)	kprintf(fmt, ##args)
191 #define debug_asr_dump_message(message)	debug_asr_message(message)
192 #define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
193 #else /* DEBUG_ASR */
194 #define debug_asr_printf(fmt,args...)
195 #define debug_asr_dump_message(message)
196 #define debug_asr_print_path(ccb)
197 #endif /* DEBUG_ASR */
198 
199 /*
200  *	If DEBUG_ASR_CMD is defined:
201  *		0 - Display incoming SCSI commands
202  *		1 - add in a quick character before queueing.
203  *		2 - add in outgoing message frames.
204  */
205 #if (defined(DEBUG_ASR_CMD))
206 #define debug_asr_cmd_printf(fmt,args...)     kprintf(fmt,##args)
207 static __inline void
208 debug_asr_dump_ccb(union ccb *ccb)
209 {
210 	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
211 	int		len = ccb->csio.cdb_len;
212 
213 	while (len) {
214 		debug_asr_cmd_printf (" %02x", *(cp++));
215 		--len;
216 	}
217 }
218 #if (DEBUG_ASR_CMD > 0)
219 #define debug_asr_cmd1_printf		       debug_asr_cmd_printf
220 #else
221 #define debug_asr_cmd1_printf(fmt,args...)
222 #endif
223 #if (DEBUG_ASR_CMD > 1)
224 #define debug_asr_cmd2_printf			debug_asr_cmd_printf
225 #define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
226 #else
227 #define debug_asr_cmd2_printf(fmt,args...)
228 #define debug_asr_cmd2_dump_message(message)
229 #endif
230 #else /* DEBUG_ASR_CMD */
231 #define debug_asr_cmd_printf(fmt,args...)
232 #define debug_asr_dump_ccb(ccb)
233 #define debug_asr_cmd1_printf(fmt,args...)
234 #define debug_asr_cmd2_printf(fmt,args...)
235 #define debug_asr_cmd2_dump_message(message)
236 #endif /* DEBUG_ASR_CMD */
237 
238 #if (defined(DEBUG_ASR_USR_CMD))
239 #define debug_usr_cmd_printf(fmt,args...)   kprintf(fmt,##args)
240 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
241 #else /* DEBUG_ASR_USR_CMD */
242 #define debug_usr_cmd_printf(fmt,args...)
243 #define debug_usr_cmd_dump_message(message)
244 #endif /* DEBUG_ASR_USR_CMD */
245 
246 #ifdef ASR_IOCTL_COMPAT
247 #define	dsDescription_size 46	/* Snug as a bug in a rug */
248 #endif /* ASR_IOCTL_COMPAT */
249 
250 #include "dev/raid/asr/dptsig.h"
251 
252 static dpt_sig_S ASR_sig = {
253 	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
254 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
255 	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
256 	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
257 	ASR_MONTH, ASR_DAY, ASR_YEAR,
258 /*	 01234567890123456789012345678901234567890123456789	< 50 chars */
259 	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
260 	/*		 ^^^^^ asr_attach alters these to match OS */
261 };
262 
263 /* Configuration Definitions */
264 
265 #define	SG_SIZE		 58	/* Scatter Gather list Size		 */
266 #define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
267 #define	MAX_LUN		 255	/* Maximum LUN Supported		 */
268 #define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
269 #define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
270 #define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
271 #define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
272 #define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
273 				/* Also serves as the minimum map for	 */
274 				/* the 2005S zero channel RAID product	 */
275 
276 /* I2O register set */
277 #define	I2O_REG_STATUS		0x30
278 #define	I2O_REG_MASK		0x34
279 #define	I2O_REG_TOFIFO		0x40
280 #define	I2O_REG_FROMFIFO	0x44
281 
282 #define	Mask_InterruptsDisabled	0x08
283 
284 /*
285  * A MIX of performance and space considerations for TID lookups
286  */
287 typedef u_int16_t tid_t;
288 
289 typedef struct {
290 	u_int32_t size;		/* up to MAX_LUN    */
291 	tid_t	  TID[1];
292 } lun2tid_t;
293 
294 typedef struct {
295 	u_int32_t   size;	/* up to MAX_TARGET */
296 	lun2tid_t * LUN[1];
297 } target2lun_t;
298 
299 /*
300  *	To ensure that we only allocate and use the worst case ccb here, lets
301  *	make our own local ccb union. If asr_alloc_ccb is utilized for another
302  *	ccb type, ensure that you add the additional structures into our local
303  *	ccb union. To ensure strict type checking, we will utilize the local
304  *	ccb definition wherever possible.
305  */
306 union asr_ccb {
307 	struct ccb_hdr	    ccb_h;  /* For convenience */
308 	struct ccb_scsiio   csio;
309 	struct ccb_setasync csa;
310 };
311 
312 struct Asr_status_mem {
313 	I2O_EXEC_STATUS_GET_REPLY	status;
314 	U32				rstatus;
315 };
316 
317 /**************************************************************************
318 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
319 **  Is Configured Into The System.  The Structure Supplies Configuration **
320 **  Information, Status Info, Queue Info And An Active CCB List Pointer. **
321 ***************************************************************************/
322 
323 typedef struct Asr_softc {
324 	device_t		ha_dev;
325 	u_int16_t		ha_irq;
326 	u_long			ha_Base;       /* base port for each board */
327 	bus_size_t		ha_blinkLED;
328 	bus_space_handle_t	ha_i2o_bhandle;
329 	bus_space_tag_t		ha_i2o_btag;
330 	bus_space_handle_t	ha_frame_bhandle;
331 	bus_space_tag_t		ha_frame_btag;
332 	I2O_IOP_ENTRY		ha_SystemTable;
333 	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
334 
335 	bus_dma_tag_t		ha_parent_dmat;
336 	bus_dma_tag_t		ha_statusmem_dmat;
337 	bus_dmamap_t		ha_statusmem_dmamap;
338 	struct Asr_status_mem * ha_statusmem;
339 	u_int32_t		ha_rstatus_phys;
340 	u_int32_t		ha_status_phys;
341 	struct cam_path	      * ha_path[MAX_CHANNEL+1];
342 	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
343 	struct resource	      * ha_mem_res;
344 	struct resource	      * ha_mes_res;
345 	struct resource	      * ha_irq_res;
346 	void		      * ha_intr;
347 	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
348 #define le_type	  IdentityTag[0]
349 #define I2O_BSA	    0x20
350 #define I2O_FCA	    0x40
351 #define I2O_SCSI    0x00
352 #define I2O_PORT    0x80
353 #define I2O_UNKNOWN 0x7F
354 #define le_bus	  IdentityTag[1]
355 #define le_target IdentityTag[2]
356 #define le_lun	  IdentityTag[3]
357 	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
358 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
359 	u_long			ha_Msgs_Phys;
360 
361 	u_int8_t		ha_in_reset;
362 #define HA_OPERATIONAL	    0
363 #define HA_IN_RESET	    1
364 #define HA_OFF_LINE	    2
365 #define HA_OFF_LINE_RECOVERY 3
366 	/* Configuration information */
367 	/* The target id maximums we take */
368 	u_int8_t		ha_MaxBus;     /* Maximum bus */
369 	u_int8_t		ha_MaxId;      /* Maximum target ID */
370 	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
371 	u_int8_t		ha_SgSize;     /* Max SG elements */
372 	u_int8_t		ha_pciBusNum;
373 	u_int8_t		ha_pciDeviceNum;
374 	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
375 	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
376 	u_int16_t		ha_Msgs_Count;
377 
378 	/* Links into other parents and HBAs */
379 	struct Asr_softc      * ha_next;       /* HBA list */
380 	struct cdev *ha_devt;
381 } Asr_softc_t;
382 
383 static Asr_softc_t *Asr_softc_list;
384 
385 /*
386  *	Prototypes of the routines we have in this object.
387  */
388 
389 /* I2O HDM interface */
390 static int	asr_probe(device_t dev);
391 static int	asr_attach(device_t dev);
392 
393 static d_ioctl_t asr_ioctl;
394 static d_open_t asr_open;
395 static d_close_t asr_close;
396 static int	asr_intr(Asr_softc_t *sc);
397 static void	asr_timeout(void *arg);
398 static int	ASR_init(Asr_softc_t *sc);
399 static int	ASR_acquireLct(Asr_softc_t *sc);
400 static int	ASR_acquireHrt(Asr_softc_t *sc);
401 static void	asr_action(struct cam_sim *sim, union ccb *ccb);
402 static void	asr_poll(struct cam_sim *sim);
403 static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
404 
405 /*
406  *	Here is the auto-probe structure used to nest our tests appropriately
407  *	during the startup phase of the operating system.
408  */
409 static device_method_t asr_methods[] = {
410 	DEVMETHOD(device_probe,	 asr_probe),
411 	DEVMETHOD(device_attach, asr_attach),
412 	DEVMETHOD_END
413 };
414 
415 static driver_t asr_driver = {
416 	"asr",
417 	asr_methods,
418 	sizeof(Asr_softc_t)
419 };
420 
421 static devclass_t asr_devclass;
422 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
423 MODULE_VERSION(asr, 1);
424 MODULE_DEPEND(asr, pci, 1, 1, 1);
425 MODULE_DEPEND(asr, cam, 1, 1, 1);
426 
427 /*
428  * devsw for asr hba driver
429  *
430  * only ioctl is used. the sd driver provides all other access.
431  */
432 static struct dev_ops asr_ops = {
433 	{ "asr", 0, 0 },
434 	.d_open =	asr_open,
435 	.d_close =	asr_close,
436 	.d_ioctl =	asr_ioctl,
437 };
438 
439 /* I2O support routines */
440 
441 static __inline u_int32_t
442 asr_get_FromFIFO(Asr_softc_t *sc)
443 {
444 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
445 				 I2O_REG_FROMFIFO));
446 }
447 
448 static __inline u_int32_t
449 asr_get_ToFIFO(Asr_softc_t *sc)
450 {
451 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
452 				 I2O_REG_TOFIFO));
453 }
454 
455 static __inline u_int32_t
456 asr_get_intr(Asr_softc_t *sc)
457 {
458 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
459 				 I2O_REG_MASK));
460 }
461 
462 static __inline u_int32_t
463 asr_get_status(Asr_softc_t *sc)
464 {
465 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
466 				 I2O_REG_STATUS));
467 }
468 
469 static __inline void
470 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
471 {
472 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
473 			  val);
474 }
475 
476 static __inline void
477 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
478 {
479 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
480 			  val);
481 }
482 
483 static __inline void
484 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
485 {
486 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
487 			  val);
488 }
489 
490 static __inline void
491 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
492 {
493 	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
494 				 offset, (u_int32_t *)frame, len);
495 }
496 
497 /*
498  *	Fill message with default.
499  */
500 static PI2O_MESSAGE_FRAME
501 ASR_fillMessage(void *Message, u_int16_t size)
502 {
503 	PI2O_MESSAGE_FRAME Message_Ptr;
504 
505 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
506 	bzero(Message_Ptr, size);
507 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
508 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
509 	  (size + sizeof(U32) - 1) >> 2);
510 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
511 	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
512 	return (Message_Ptr);
513 } /* ASR_fillMessage */
514 
515 #define	EMPTY_QUEUE (0xffffffff)
516 
517 static __inline U32
518 ASR_getMessage(Asr_softc_t *sc)
519 {
520 	U32	MessageOffset;
521 
522 	MessageOffset = asr_get_ToFIFO(sc);
523 	if (MessageOffset == EMPTY_QUEUE)
524 		MessageOffset = asr_get_ToFIFO(sc);
525 
526 	return (MessageOffset);
527 } /* ASR_getMessage */
528 
529 /* Issue a polled command */
530 static U32
531 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
532 {
533 	U32	Mask = 0xffffffff;
534 	U32	MessageOffset;
535 	u_int	Delay = 1500;
536 
537 	/*
538 	 * ASR_initiateCp is only used for synchronous commands and will
539 	 * be made more resiliant to adapter delays since commands like
540 	 * resetIOP can cause the adapter to be deaf for a little time.
541 	 */
542 	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
543 	 && (--Delay != 0)) {
544 		DELAY (10000);
545 	}
546 	if (MessageOffset != EMPTY_QUEUE) {
547 		asr_set_frame(sc, Message, MessageOffset,
548 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
549 		/*
550 		 *	Disable the Interrupts
551 		 */
552 		Mask = asr_get_intr(sc);
553 		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
554 		asr_set_ToFIFO(sc, MessageOffset);
555 	}
556 	return (Mask);
557 } /* ASR_initiateCp */
558 
559 /*
560  *	Reset the adapter.
561  */
562 static U32
563 ASR_resetIOP(Asr_softc_t *sc)
564 {
565 	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
566 	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
567 	U32			       * Reply_Ptr;
568 	U32				 Old;
569 
570 	/*
571 	 *  Build up our copy of the Message.
572 	 */
573 	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
574 	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
575 	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
576 	/*
577 	 *  Reset the Reply Status
578 	 */
579 	Reply_Ptr = &sc->ha_statusmem->rstatus;
580 	*Reply_Ptr = 0;
581 	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
582 	    sc->ha_rstatus_phys);
583 	/*
584 	 *	Send the Message out
585 	 */
586 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
587 	     0xffffffff) {
588 		/*
589 		 * Wait for a response (Poll), timeouts are dangerous if
590 		 * the card is truly responsive. We assume response in 2s.
591 		 */
592 		u_int8_t Delay = 200;
593 
594 		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
595 			DELAY (10000);
596 		}
597 		/*
598 		 *	Re-enable the interrupts.
599 		 */
600 		asr_set_intr(sc, Old);
601 		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
602 		return(*Reply_Ptr);
603 	}
604 	KASSERT(Old != 0xffffffff, ("Old == -1"));
605 	return (0);
606 } /* ASR_resetIOP */
607 
608 /*
609  *	Get the curent state of the adapter
610  */
611 static PI2O_EXEC_STATUS_GET_REPLY
612 ASR_getStatus(Asr_softc_t *sc)
613 {
614 	I2O_EXEC_STATUS_GET_MESSAGE	Message;
615 	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
616 	PI2O_EXEC_STATUS_GET_REPLY	buffer;
617 	U32				Old;
618 
619 	/*
620 	 *  Build up our copy of the Message.
621 	 */
622 	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
623 	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
624 	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
625 	    I2O_EXEC_STATUS_GET);
626 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
627 	    sc->ha_status_phys);
628 	/* This one is a Byte Count */
629 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
630 	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
631 	/*
632 	 *  Reset the Reply Status
633 	 */
634 	buffer = &sc->ha_statusmem->status;
635 	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
636 	/*
637 	 *	Send the Message out
638 	 */
639 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
640 	    0xffffffff) {
641 		/*
642 		 *	Wait for a response (Poll), timeouts are dangerous if
643 		 * the card is truly responsive. We assume response in 50ms.
644 		 */
645 		u_int8_t Delay = 255;
646 
647 		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
648 			if (--Delay == 0) {
649 				buffer = NULL;
650 				break;
651 			}
652 			DELAY (1000);
653 		}
654 		/*
655 		 *	Re-enable the interrupts.
656 		 */
657 		asr_set_intr(sc, Old);
658 		return (buffer);
659 	}
660 	return (NULL);
661 } /* ASR_getStatus */
662 
663 /*
664  *	Check if the device is a SCSI I2O HBA, and add it to the list.
665  */
666 
667 /*
668  * Probe for ASR controller.  If we find it, we will use it.
669  * virtual adapters.
670  */
671 static int
672 asr_probe(device_t dev)
673 {
674 	u_int32_t id;
675 
676 	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
677 	if ((id == 0xA5011044) || (id == 0xA5111044)) {
678 		device_set_desc(dev, "Adaptec Caching SCSI RAID");
679 		return (BUS_PROBE_DEFAULT);
680 	}
681 	return (ENXIO);
682 } /* asr_probe */
683 
684 static __inline union asr_ccb *
685 asr_alloc_ccb(Asr_softc_t *sc)
686 {
687 	union asr_ccb *new_ccb;
688 
689 	if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb),
690 	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
691 		new_ccb->ccb_h.pinfo.priority = 1;
692 		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
693 		new_ccb->ccb_h.spriv_ptr0 = sc;
694 	}
695 	return (new_ccb);
696 } /* asr_alloc_ccb */
697 
698 static __inline void
699 asr_free_ccb(union asr_ccb *free_ccb)
700 {
701 	kfree(free_ccb, M_DEVBUF);
702 } /* asr_free_ccb */
703 
704 /*
705  *	Print inquiry data `carefully'
706  */
707 static void
708 ASR_prstring(u_int8_t *s, int len)
709 {
710 	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
711 		kprintf ("%c", *(s++));
712 	}
713 } /* ASR_prstring */
714 
715 /*
716  *	Send a message synchronously and without Interrupt to a ccb.
717  */
718 static int
719 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
720 {
721 	U32		Mask;
722 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
723 
724 	/*
725 	 * We do not need any (optional byteswapping) method access to
726 	 * the Initiator context field.
727 	 */
728 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
729 
730 	/* Prevent interrupt service */
731 	crit_enter();
732 	Mask = asr_get_intr(sc);
733 	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
734 
735 	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
736 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
737 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
738 	}
739 
740 	/*
741 	 * Wait for this board to report a finished instruction.
742 	 */
743 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
744 		(void)asr_intr (sc);
745 	}
746 
747 	/* Re-enable Interrupts */
748 	asr_set_intr(sc, Mask);
749 	crit_exit();
750 
751 	return (ccb->ccb_h.status);
752 } /* ASR_queue_s */
753 
754 /*
755  *	Send a message synchronously to an Asr_softc_t.
756  */
757 static int
758 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
759 {
760 	union asr_ccb	*ccb;
761 	int		status;
762 
763 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
764 		return (CAM_REQUEUE_REQ);
765 	}
766 
767 	status = ASR_queue_s (ccb, Message);
768 
769 	asr_free_ccb(ccb);
770 
771 	return (status);
772 } /* ASR_queue_c */
773 
774 /*
775  *	Add the specified ccb to the active queue
776  */
777 static __inline void
778 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
779 {
780 	crit_enter();
781 	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
782 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
783 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
784 			/*
785 			 * RAID systems can take considerable time to
786 			 * complete some commands given the large cache
787 			 * flashes switching from write back to write thru.
788 			 */
789 			ccb->ccb_h.timeout = 6 * 60 * 1000;
790 		}
791 		callout_reset(&ccb->ccb_h.timeout_ch,
792 		    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
793 	}
794 	crit_exit();
795 } /* ASR_ccbAdd */
796 
797 /*
798  *	Remove the specified ccb from the active queue.
799  */
800 static __inline void
801 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
802 {
803 	crit_enter();
804 	callout_stop(&ccb->ccb_h.timeout_ch);
805 	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
806 	crit_exit();
807 } /* ASR_ccbRemove */
808 
809 /*
810  *	Fail all the active commands, so they get re-issued by the operating
811  *	system.
812  */
813 static void
814 ASR_failActiveCommands(Asr_softc_t *sc)
815 {
816 	struct ccb_hdr	*ccb;
817 
818 	crit_enter();
819 	/*
820 	 *	We do not need to inform the CAM layer that we had a bus
821 	 * reset since we manage it on our own, this also prevents the
822 	 * SCSI_DELAY settling that would be required on other systems.
823 	 * The `SCSI_DELAY' has already been handled by the card via the
824 	 * acquisition of the LCT table while we are at CAM priority level.
825 	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
826 	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
827 	 *  }
828 	 */
829 	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
830 		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
831 
832 		ccb->status &= ~CAM_STATUS_MASK;
833 		ccb->status |= CAM_REQUEUE_REQ;
834 		/* Nothing Transfered */
835 		((struct ccb_scsiio *)ccb)->resid
836 		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
837 
838 		if (ccb->path) {
839 			xpt_done ((union ccb *)ccb);
840 		} else {
841 			wakeup (ccb);
842 		}
843 	}
844 	crit_exit();
845 } /* ASR_failActiveCommands */
846 
847 /*
848  *	The following command causes the HBA to reset the specific bus
849  */
850 static void
851 ASR_resetBus(Asr_softc_t *sc, int bus)
852 {
853 	I2O_HBA_BUS_RESET_MESSAGE	Message;
854 	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
855 	PI2O_LCT_ENTRY			Device;
856 
857 	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
858 	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
859 	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
860 	  I2O_HBA_BUS_RESET);
861 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
862 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
863 	  ++Device) {
864 		if (((Device->le_type & I2O_PORT) != 0)
865 		 && (Device->le_bus == bus)) {
866 			I2O_MESSAGE_FRAME_setTargetAddress(
867 			  &Message_Ptr->StdMessageFrame,
868 			  I2O_LCT_ENTRY_getLocalTID(Device));
869 			/* Asynchronous command, with no expectations */
870 			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
871 			break;
872 		}
873 	}
874 } /* ASR_resetBus */
875 
876 static __inline int
877 ASR_getBlinkLedCode(Asr_softc_t *sc)
878 {
879 	U8	blink;
880 
881 	if (sc == NULL)
882 		return (0);
883 
884 	blink = bus_space_read_1(sc->ha_frame_btag,
885 				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
886 	if (blink != 0xBC)
887 		return (0);
888 
889 	blink = bus_space_read_1(sc->ha_frame_btag,
890 				 sc->ha_frame_bhandle, sc->ha_blinkLED);
891 	return (blink);
892 } /* ASR_getBlinkCode */
893 
894 /*
895  *	Determine the address of an TID lookup. Must be done at high priority
896  *	since the address can be changed by other threads of execution.
897  *
898  *	Returns NULL pointer if not indexible (but will attempt to generate
899  *	an index if `new_entry' flag is set to TRUE).
900  *
901  *	All addressible entries are to be guaranteed zero if never initialized.
902  */
903 static tid_t *
904 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
905 {
906 	target2lun_t	*bus_ptr;
907 	lun2tid_t	*target_ptr;
908 	unsigned	new_size;
909 
910 	/*
911 	 *	Validity checking of incoming parameters. More of a bound
912 	 * expansion limit than an issue with the code dealing with the
913 	 * values.
914 	 *
915 	 *	sc must be valid before it gets here, so that check could be
916 	 * dropped if speed a critical issue.
917 	 */
918 	if ((sc == NULL)
919 	 || (bus > MAX_CHANNEL)
920 	 || (target > sc->ha_MaxId)
921 	 || (lun > sc->ha_MaxLun)) {
922 		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
923 		  (u_long)sc, bus, target, lun);
924 		return (NULL);
925 	}
926 	/*
927 	 *	See if there is an associated bus list.
928 	 *
929 	 *	for performance, allocate in size of BUS_CHUNK chunks.
930 	 *	BUS_CHUNK must be a power of two. This is to reduce
931 	 *	fragmentation effects on the allocations.
932 	 */
933 #define BUS_CHUNK 8
934 	new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
935 	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
936 		/*
937 		 *	Allocate a new structure?
938 		 *		Since one element in structure, the +1
939 		 *		needed for size has been abstracted.
940 		 */
941 		if ((new_entry == FALSE)
942 		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
943 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
944 		    M_TEMP, M_WAITOK | M_ZERO))
945 		   == NULL)) {
946 			debug_asr_printf("failed to allocate bus list\n");
947 			return (NULL);
948 		}
949 		bus_ptr->size = new_size + 1;
950 	} else if (bus_ptr->size <= new_size) {
951 		target2lun_t * new_bus_ptr;
952 
953 		/*
954 		 *	Reallocate a new structure?
955 		 *		Since one element in structure, the +1
956 		 *		needed for size has been abstracted.
957 		 */
958 		if ((new_entry == FALSE)
959 		 || ((new_bus_ptr = (target2lun_t *)kmalloc (
960 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
961 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
962 			debug_asr_printf("failed to reallocate bus list\n");
963 			return (NULL);
964 		}
965 		/*
966 		 *	Copy the whole thing, safer, simpler coding
967 		 * and not really performance critical at this point.
968 		 */
969 		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
970 		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
971 		sc->ha_targets[bus] = new_bus_ptr;
972 		kfree(bus_ptr, M_TEMP);
973 		bus_ptr = new_bus_ptr;
974 		bus_ptr->size = new_size + 1;
975 	}
976 	/*
977 	 *	We now have the bus list, lets get to the target list.
978 	 *	Since most systems have only *one* lun, we do not allocate
979 	 *	in chunks as above, here we allow one, then in chunk sizes.
980 	 *	TARGET_CHUNK must be a power of two. This is to reduce
981 	 *	fragmentation effects on the allocations.
982 	 */
983 #define TARGET_CHUNK 8
984 	if ((new_size = lun) != 0) {
985 		new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
986 	}
987 	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
988 		/*
989 		 *	Allocate a new structure?
990 		 *		Since one element in structure, the +1
991 		 *		needed for size has been abstracted.
992 		 */
993 		if ((new_entry == FALSE)
994 		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
995 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
996 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
997 			debug_asr_printf("failed to allocate target list\n");
998 			return (NULL);
999 		}
1000 		target_ptr->size = new_size + 1;
1001 	} else if (target_ptr->size <= new_size) {
1002 		lun2tid_t * new_target_ptr;
1003 
1004 		/*
1005 		 *	Reallocate a new structure?
1006 		 *		Since one element in structure, the +1
1007 		 *		needed for size has been abstracted.
1008 		 */
1009 		if ((new_entry == FALSE)
1010 		 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1011 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1012 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1013 			debug_asr_printf("failed to reallocate target list\n");
1014 			return (NULL);
1015 		}
1016 		/*
1017 		 *	Copy the whole thing, safer, simpler coding
1018 		 * and not really performance critical at this point.
1019 		 */
1020 		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1021 		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1022 		bus_ptr->LUN[target] = new_target_ptr;
1023 		kfree(target_ptr, M_TEMP);
1024 		target_ptr = new_target_ptr;
1025 		target_ptr->size = new_size + 1;
1026 	}
1027 	/*
1028 	 *	Now, acquire the TID address from the LUN indexed list.
1029 	 */
1030 	return (&(target_ptr->TID[lun]));
1031 } /* ASR_getTidAddress */
1032 
1033 /*
1034  *	Get a pre-existing TID relationship.
1035  *
1036  *	If the TID was never set, return (tid_t)-1.
1037  *
1038  *	should use mutex rather than spl.
1039  */
1040 static __inline tid_t
1041 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1042 {
1043 	tid_t	*tid_ptr;
1044 	tid_t	retval;
1045 
1046 	crit_enter();
1047 	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1048 	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1049 	 || (*tid_ptr == (tid_t)0)) {
1050 		crit_exit();
1051 		return ((tid_t)-1);
1052 	}
1053 	retval = *tid_ptr;
1054 	crit_exit();
1055 	return (retval);
1056 } /* ASR_getTid */
1057 
1058 /*
1059  *	Set a TID relationship.
1060  *
1061  *	If the TID was not set, return (tid_t)-1.
1062  *
1063  *	should use mutex rather than spl.
1064  */
1065 static __inline tid_t
1066 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1067 {
1068 	tid_t	*tid_ptr;
1069 
1070 	if (TID != (tid_t)-1) {
1071 		if (TID == 0) {
1072 			return ((tid_t)-1);
1073 		}
1074 		crit_enter();
1075 		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1076 		 == NULL) {
1077 			crit_exit();
1078 			return ((tid_t)-1);
1079 		}
1080 		*tid_ptr = TID;
1081 		crit_exit();
1082 	}
1083 	return (TID);
1084 } /* ASR_setTid */
1085 
1086 /*-------------------------------------------------------------------------*/
1087 /*		      Function ASR_rescan				   */
1088 /*-------------------------------------------------------------------------*/
1089 /* The Parameters Passed To This Function Are :				   */
1090 /*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1091 /*									   */
1092 /* This Function Will rescan the adapter and resynchronize any data	   */
1093 /*									   */
1094 /* Return : 0 For OK, Error Code Otherwise				   */
1095 /*-------------------------------------------------------------------------*/
1096 
1097 static int
1098 ASR_rescan(Asr_softc_t *sc)
1099 {
1100 	int bus;
1101 	int error;
1102 
1103 	/*
1104 	 * Re-acquire the LCT table and synchronize us to the adapter.
1105 	 */
1106 	if ((error = ASR_acquireLct(sc)) == 0) {
1107 		error = ASR_acquireHrt(sc);
1108 	}
1109 
1110 	if (error != 0) {
1111 		return error;
1112 	}
1113 
1114 	bus = sc->ha_MaxBus;
1115 	/* Reset all existing cached TID lookups */
1116 	do {
1117 		int target, event = 0;
1118 
1119 		/*
1120 		 *	Scan for all targets on this bus to see if they
1121 		 * got affected by the rescan.
1122 		 */
1123 		for (target = 0; target <= sc->ha_MaxId; ++target) {
1124 			int lun;
1125 
1126 			/* Stay away from the controller ID */
1127 			if (target == sc->ha_adapter_target[bus]) {
1128 				continue;
1129 			}
1130 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1131 				PI2O_LCT_ENTRY Device;
1132 				tid_t	       TID = (tid_t)-1;
1133 				tid_t	       LastTID;
1134 
1135 				/*
1136 				 * See if the cached TID changed. Search for
1137 				 * the device in our new LCT.
1138 				 */
1139 				for (Device = sc->ha_LCT->LCTEntry;
1140 				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1141 				   + I2O_LCT_getTableSize(sc->ha_LCT));
1142 				  ++Device) {
1143 					if ((Device->le_type != I2O_UNKNOWN)
1144 					 && (Device->le_bus == bus)
1145 					 && (Device->le_target == target)
1146 					 && (Device->le_lun == lun)
1147 					 && (I2O_LCT_ENTRY_getUserTID(Device)
1148 					  == 0xFFF)) {
1149 						TID = I2O_LCT_ENTRY_getLocalTID(
1150 						  Device);
1151 						break;
1152 					}
1153 				}
1154 				/*
1155 				 * Indicate to the OS that the label needs
1156 				 * to be recalculated, or that the specific
1157 				 * open device is no longer valid (Merde)
1158 				 * because the cached TID changed.
1159 				 */
1160 				LastTID = ASR_getTid (sc, bus, target, lun);
1161 				if (LastTID != TID) {
1162 					struct cam_path * path;
1163 
1164 					if (xpt_create_path(&path,
1165 					  /*periph*/NULL,
1166 					  cam_sim_path(sc->ha_sim[bus]),
1167 					  target, lun) != CAM_REQ_CMP) {
1168 						if (TID == (tid_t)-1) {
1169 							event |= AC_LOST_DEVICE;
1170 						} else {
1171 							event |= AC_INQ_CHANGED
1172 							       | AC_GETDEV_CHANGED;
1173 						}
1174 					} else {
1175 						if (TID == (tid_t)-1) {
1176 							xpt_async(
1177 							  AC_LOST_DEVICE,
1178 							  path, NULL);
1179 						} else if (LastTID == (tid_t)-1) {
1180 							struct ccb_getdev ccb;
1181 
1182 							xpt_setup_ccb(
1183 							  &(ccb.ccb_h),
1184 							  path, /*priority*/5);
1185 							xpt_async(
1186 							  AC_FOUND_DEVICE,
1187 							  path,
1188 							  &ccb);
1189 						} else {
1190 							xpt_async(
1191 							  AC_INQ_CHANGED,
1192 							  path, NULL);
1193 							xpt_async(
1194 							  AC_GETDEV_CHANGED,
1195 							  path, NULL);
1196 						}
1197 					}
1198 				}
1199 				/*
1200 				 *	We have the option of clearing the
1201 				 * cached TID for it to be rescanned, or to
1202 				 * set it now even if the device never got
1203 				 * accessed. We chose the later since we
1204 				 * currently do not use the condition that
1205 				 * the TID ever got cached.
1206 				 */
1207 				ASR_setTid (sc, bus, target, lun, TID);
1208 			}
1209 		}
1210 		/*
1211 		 *	The xpt layer can not handle multiple events at the
1212 		 * same call.
1213 		 */
1214 		if (event & AC_LOST_DEVICE) {
1215 			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1216 		}
1217 		if (event & AC_INQ_CHANGED) {
1218 			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1219 		}
1220 		if (event & AC_GETDEV_CHANGED) {
1221 			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1222 		}
1223 	} while (--bus >= 0);
1224 	return (error);
1225 } /* ASR_rescan */
1226 
1227 /*-------------------------------------------------------------------------*/
1228 /*		      Function ASR_reset				   */
1229 /*-------------------------------------------------------------------------*/
1230 /* The Parameters Passed To This Function Are :				   */
1231 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1232 /*									   */
1233 /* This Function Will reset the adapter and resynchronize any data	   */
1234 /*									   */
1235 /* Return : None							   */
1236 /*-------------------------------------------------------------------------*/
1237 
1238 static int
1239 ASR_reset(Asr_softc_t *sc)
1240 {
1241 	int retVal;
1242 
1243 	crit_enter();
1244 	if ((sc->ha_in_reset == HA_IN_RESET)
1245 	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1246 		crit_exit();
1247 		return (EBUSY);
1248 	}
1249 	/*
1250 	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1251 	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1252 	 */
1253 	++(sc->ha_in_reset);
1254 	if (ASR_resetIOP(sc) == 0) {
1255 		debug_asr_printf ("ASR_resetIOP failed\n");
1256 		/*
1257 		 *	We really need to take this card off-line, easier said
1258 		 * than make sense. Better to keep retrying for now since if a
1259 		 * UART cable is connected the blinkLEDs the adapter is now in
1260 		 * a hard state requiring action from the monitor commands to
1261 		 * the HBA to continue. For debugging waiting forever is a
1262 		 * good thing. In a production system, however, one may wish
1263 		 * to instead take the card off-line ...
1264 		 */
1265 		/* Wait Forever */
1266 		while (ASR_resetIOP(sc) == 0);
1267 	}
1268 	retVal = ASR_init (sc);
1269 	crit_exit();
1270 	if (retVal != 0) {
1271 		debug_asr_printf ("ASR_init failed\n");
1272 		sc->ha_in_reset = HA_OFF_LINE;
1273 		return (ENXIO);
1274 	}
1275 	if (ASR_rescan (sc) != 0) {
1276 		debug_asr_printf ("ASR_rescan failed\n");
1277 	}
1278 	ASR_failActiveCommands (sc);
1279 	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1280 		kprintf ("asr%d: Brining adapter back on-line\n",
1281 		  sc->ha_path[0]
1282 		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1283 		    : 0);
1284 	}
1285 	sc->ha_in_reset = HA_OPERATIONAL;
1286 	return (0);
1287 } /* ASR_reset */
1288 
1289 /*
1290  *	Device timeout handler.
1291  */
1292 static void
1293 asr_timeout(void *arg)
1294 {
1295 	union asr_ccb	*ccb = (union asr_ccb *)arg;
1296 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1297 	int		s;
1298 
1299 	debug_asr_print_path(ccb);
1300 	debug_asr_printf("timed out");
1301 
1302 	/*
1303 	 *	Check if the adapter has locked up?
1304 	 */
1305 	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1306 		/* Reset Adapter */
1307 		kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1308 		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1309 		if (ASR_reset (sc) == ENXIO) {
1310 			/* Try again later */
1311 			callout_reset(&ccb->ccb_h.timeout_ch,
1312 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1313 		}
1314 		return;
1315 	}
1316 	/*
1317 	 *	Abort does not function on the ASR card!!! Walking away from
1318 	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1319 	 * our best bet, followed by a complete adapter reset if that fails.
1320 	 */
1321 	crit_enter();
1322 	/* Check if we already timed out once to raise the issue */
1323 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1324 		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1325 		if (ASR_reset (sc) == ENXIO) {
1326 			callout_reset(&ccb->ccb_h.timeout_ch,
1327 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1328 		}
1329 		crit_exit();
1330 		return;
1331 	}
1332 	debug_asr_printf ("\nresetting bus\n");
1333 	/* If the BUS reset does not take, then an adapter reset is next! */
1334 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1335 	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1336 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1337 		      asr_timeout, ccb);
1338 	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1339 	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1340 	crit_exit();
1341 } /* asr_timeout */
1342 
1343 /*
1344  * send a message asynchronously
1345  */
1346 static int
1347 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1348 {
1349 	U32		MessageOffset;
1350 	union asr_ccb	*ccb;
1351 
1352 	debug_asr_printf("Host Command Dump:\n");
1353 	debug_asr_dump_message(Message);
1354 
1355 	ccb = (union asr_ccb *)(long)
1356 	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1357 
1358 	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1359 		asr_set_frame(sc, Message, MessageOffset,
1360 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1361 		if (ccb) {
1362 			ASR_ccbAdd (sc, ccb);
1363 		}
1364 		/* Post the command */
1365 		asr_set_ToFIFO(sc, MessageOffset);
1366 	} else {
1367 		if (ASR_getBlinkLedCode(sc)) {
1368 			/*
1369 			 *	Unlikely we can do anything if we can't grab a
1370 			 * message frame :-(, but lets give it a try.
1371 			 */
1372 			(void)ASR_reset(sc);
1373 		}
1374 	}
1375 	return (MessageOffset);
1376 } /* ASR_queue */
1377 
1378 
1379 /* Simple Scatter Gather elements */
1380 #define	SG(SGL,Index,Flags,Buffer,Size)				   \
1381 	I2O_FLAGS_COUNT_setCount(				   \
1382 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1383 	  Size);						   \
1384 	I2O_FLAGS_COUNT_setFlags(				   \
1385 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1386 	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1387 	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1388 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1389 	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1390 
1391 /*
1392  *	Retrieve Parameter Group.
1393  */
1394 static void *
1395 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1396 	      unsigned BufferSize)
1397 {
1398 	struct paramGetMessage {
1399 		I2O_UTIL_PARAMS_GET_MESSAGE M;
1400 		char
1401 		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1402 		struct Operations {
1403 			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1404 			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1405 		}			     O;
1406 	}				Message;
1407 	struct Operations		*Operations_Ptr;
1408 	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1409 	struct ParamBuffer {
1410 		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1411 		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1412 		char				    Info[1];
1413 	}				*Buffer_Ptr;
1414 
1415 	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1416 	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1417 	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1418 	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1419 	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1420 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1421 	bzero(Operations_Ptr, sizeof(struct Operations));
1422 	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1423 	  &(Operations_Ptr->Header), 1);
1424 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1425 	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1426 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1427 	  &(Operations_Ptr->Template[0]), 0xFFFF);
1428 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1429 	  &(Operations_Ptr->Template[0]), Group);
1430 	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1431 	bzero(Buffer_Ptr, BufferSize);
1432 
1433 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1434 	  I2O_VERSION_11
1435 	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1436 	    / sizeof(U32)) << 4));
1437 	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1438 	  TID);
1439 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1440 	  I2O_UTIL_PARAMS_GET);
1441 	/*
1442 	 *  Set up the buffers as scatter gather elements.
1443 	 */
1444 	SG(&(Message_Ptr->SGL), 0,
1445 	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1446 	  Operations_Ptr, sizeof(struct Operations));
1447 	SG(&(Message_Ptr->SGL), 1,
1448 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1449 	  Buffer_Ptr, BufferSize);
1450 
1451 	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1452 	 && (Buffer_Ptr->Header.ResultCount)) {
1453 		return ((void *)(Buffer_Ptr->Info));
1454 	}
1455 	return (NULL);
1456 } /* ASR_getParams */
1457 
1458 /*
1459  *	Acquire the LCT information.
1460  */
1461 static int
1462 ASR_acquireLct(Asr_softc_t *sc)
1463 {
1464 	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1465 	PI2O_SGE_SIMPLE_ELEMENT		sg;
1466 	int				MessageSizeInBytes;
1467 	caddr_t				v;
1468 	int				len;
1469 	I2O_LCT				Table, *TableP = &Table;
1470 	PI2O_LCT_ENTRY			Entry;
1471 
1472 	/*
1473 	 *	sc value assumed valid
1474 	 */
1475 	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1476 	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1477 	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1478 	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1479 		return (ENOMEM);
1480 	}
1481 	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1482 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1483 	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1484 	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1485 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1486 	    I2O_EXEC_LCT_NOTIFY);
1487 	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1488 	    I2O_CLASS_MATCH_ANYCLASS);
1489 	/*
1490 	 *	Call the LCT table to determine the number of device entries
1491 	 * to reserve space for.
1492 	 */
1493 	SG(&(Message_Ptr->SGL), 0,
1494 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, TableP,
1495 	  sizeof(I2O_LCT));
1496 	/*
1497 	 *	since this code is reused in several systems, code efficiency
1498 	 * is greater by using a shift operation rather than a divide by
1499 	 * sizeof(u_int32_t).
1500 	 */
1501 	I2O_LCT_setTableSize(&Table,
1502 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1503 	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1504 	/*
1505 	 *	Determine the size of the LCT table.
1506 	 */
1507 	if (sc->ha_LCT) {
1508 		kfree(sc->ha_LCT, M_TEMP);
1509 	}
1510 	/*
1511 	 *	malloc only generates contiguous memory when less than a
1512 	 * page is expected. We must break the request up into an SG list ...
1513 	 */
1514 	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1515 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1516 	 || (len > (128 * 1024))) {	/* Arbitrary */
1517 		kfree(Message_Ptr, M_TEMP);
1518 		return (EINVAL);
1519 	}
1520 	if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1521 		kfree(Message_Ptr, M_TEMP);
1522 		return (ENOMEM);
1523 	}
1524 	/*
1525 	 *	since this code is reused in several systems, code efficiency
1526 	 * is greater by using a shift operation rather than a divide by
1527 	 * sizeof(u_int32_t).
1528 	 */
1529 	I2O_LCT_setTableSize(sc->ha_LCT,
1530 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1531 	/*
1532 	 *	Convert the access to the LCT table into a SG list.
1533 	 */
1534 	sg = Message_Ptr->SGL.u.Simple;
1535 	v = (caddr_t)(sc->ha_LCT);
1536 	for (;;) {
1537 		int next, base, span;
1538 
1539 		span = 0;
1540 		next = base = KVTOPHYS(v);
1541 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1542 
1543 		/* How far can we go contiguously */
1544 		while ((len > 0) && (base == next)) {
1545 			int size;
1546 
1547 			next = trunc_page(base) + PAGE_SIZE;
1548 			size = next - base;
1549 			if (size > len) {
1550 				size = len;
1551 			}
1552 			span += size;
1553 			v += size;
1554 			len -= size;
1555 			base = KVTOPHYS(v);
1556 		}
1557 
1558 		/* Construct the Flags */
1559 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1560 		{
1561 			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1562 			if (len <= 0) {
1563 				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1564 				    | I2O_SGL_FLAGS_LAST_ELEMENT
1565 				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1566 			}
1567 			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1568 		}
1569 
1570 		if (len <= 0) {
1571 			break;
1572 		}
1573 
1574 		/*
1575 		 * Incrementing requires resizing of the packet.
1576 		 */
1577 		++sg;
1578 		MessageSizeInBytes += sizeof(*sg);
1579 		I2O_MESSAGE_FRAME_setMessageSize(
1580 		  &(Message_Ptr->StdMessageFrame),
1581 		  I2O_MESSAGE_FRAME_getMessageSize(
1582 		    &(Message_Ptr->StdMessageFrame))
1583 		  + (sizeof(*sg) / sizeof(U32)));
1584 		{
1585 			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1586 
1587 			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1588 			    kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1589 			    == NULL) {
1590 				kfree(sc->ha_LCT, M_TEMP);
1591 				sc->ha_LCT = NULL;
1592 				kfree(Message_Ptr, M_TEMP);
1593 				return (ENOMEM);
1594 			}
1595 			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1596 			bcopy(Message_Ptr, NewMessage_Ptr, span);
1597 			kfree(Message_Ptr, M_TEMP);
1598 			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1599 			  (((caddr_t)NewMessage_Ptr) + span);
1600 			Message_Ptr = NewMessage_Ptr;
1601 		}
1602 	}
1603 	{	int retval;
1604 
1605 		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1606 		kfree(Message_Ptr, M_TEMP);
1607 		if (retval != CAM_REQ_CMP) {
1608 			return (ENODEV);
1609 		}
1610 	}
1611 	/* If the LCT table grew, lets truncate accesses */
1612 	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1613 		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1614 	}
1615 	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1616 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1617 	  ++Entry) {
1618 		Entry->le_type = I2O_UNKNOWN;
1619 		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1620 
1621 		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1622 			Entry->le_type = I2O_BSA;
1623 			break;
1624 
1625 		case I2O_CLASS_SCSI_PERIPHERAL:
1626 			Entry->le_type = I2O_SCSI;
1627 			break;
1628 
1629 		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1630 			Entry->le_type = I2O_FCA;
1631 			break;
1632 
1633 		case I2O_CLASS_BUS_ADAPTER_PORT:
1634 			Entry->le_type = I2O_PORT | I2O_SCSI;
1635 			/* FALLTHRU */
1636 		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1637 			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1638 			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1639 				Entry->le_type = I2O_PORT | I2O_FCA;
1640 			}
1641 		{	struct ControllerInfo {
1642 				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1643 				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1644 				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1645 			} Buffer;
1646 			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1647 
1648 			Entry->le_bus = 0xff;
1649 			Entry->le_target = 0xff;
1650 			Entry->le_lun = 0xff;
1651 
1652 			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1653 			  ASR_getParams(sc,
1654 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1655 			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1656 			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1657 				continue;
1658 			}
1659 			Entry->le_target
1660 			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1661 			    Info);
1662 			Entry->le_lun = 0;
1663 		}	/* FALLTHRU */
1664 		default:
1665 			continue;
1666 		}
1667 		{	struct DeviceInfo {
1668 				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1669 				I2O_PARAM_READ_OPERATION_RESULT Read;
1670 				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1671 			} Buffer;
1672 			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1673 
1674 			Entry->le_bus = 0xff;
1675 			Entry->le_target = 0xff;
1676 			Entry->le_lun = 0xff;
1677 
1678 			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1679 			  ASR_getParams(sc,
1680 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1681 			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1682 			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1683 				continue;
1684 			}
1685 			Entry->le_type
1686 			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1687 			Entry->le_bus
1688 			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1689 			if ((Entry->le_bus > sc->ha_MaxBus)
1690 			 && (Entry->le_bus <= MAX_CHANNEL)) {
1691 				sc->ha_MaxBus = Entry->le_bus;
1692 			}
1693 			Entry->le_target
1694 			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1695 			Entry->le_lun
1696 			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1697 		}
1698 	}
1699 	/*
1700 	 *	A zero return value indicates success.
1701 	 */
1702 	return (0);
1703 } /* ASR_acquireLct */
1704 
1705 /*
1706  * Initialize a message frame.
1707  * We assume that the CDB has already been set up, so all we do here is
1708  * generate the Scatter Gather list.
1709  */
1710 static PI2O_MESSAGE_FRAME
1711 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1712 {
1713 	PI2O_MESSAGE_FRAME	Message_Ptr;
1714 	PI2O_SGE_SIMPLE_ELEMENT sg;
1715 	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1716 	vm_size_t		size, len;
1717 	caddr_t			v;
1718 	U32			MessageSize;
1719 	int			next, span, base, rw;
1720 	int			target = ccb->ccb_h.target_id;
1721 	int			lun = ccb->ccb_h.target_lun;
1722 	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1723 	tid_t			TID;
1724 
1725 	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1726 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1727 	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1728 	      sizeof(I2O_SG_ELEMENT)));
1729 
1730 	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1731 		PI2O_LCT_ENTRY Device;
1732 
1733 		TID = 0;
1734 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1735 		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1736 		    ++Device) {
1737 			if ((Device->le_type != I2O_UNKNOWN)
1738 			 && (Device->le_bus == bus)
1739 			 && (Device->le_target == target)
1740 			 && (Device->le_lun == lun)
1741 			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1742 				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1743 				ASR_setTid(sc, Device->le_bus,
1744 					   Device->le_target, Device->le_lun,
1745 					   TID);
1746 				break;
1747 			}
1748 		}
1749 	}
1750 	if (TID == (tid_t)0) {
1751 		return (NULL);
1752 	}
1753 	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1754 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1755 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1756 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1757 	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1758 		/ sizeof(U32)) << 4));
1759 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1760 	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1761 	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1762 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1763 	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1764 	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1765 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1766 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1767 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1768 	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1769 	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1770 	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1771 	/*
1772 	 * We do not need any (optional byteswapping) method access to
1773 	 * the Initiator & Transaction context field.
1774 	 */
1775 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1776 
1777 	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1778 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1779 	/*
1780 	 * copy the cdb over
1781 	 */
1782 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1783 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1784 	bcopy(&(ccb->csio.cdb_io),
1785 	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1786 	    ccb->csio.cdb_len);
1787 
1788 	/*
1789 	 * Given a buffer describing a transfer, set up a scatter/gather map
1790 	 * in a ccb to map that SCSI transfer.
1791 	 */
1792 
1793 	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1794 
1795 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1796 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1797 	  (ccb->csio.dxfer_len)
1798 	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1799 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1800 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1801 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1802 		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1803 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1804 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1805 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1806 	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1807 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1808 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1809 
1810 	/*
1811 	 * Given a transfer described by a `data', fill in the SG list.
1812 	 */
1813 	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1814 
1815 	len = ccb->csio.dxfer_len;
1816 	v = ccb->csio.data_ptr;
1817 	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1818 	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1819 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1820 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1821 	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1822 	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1823 		span = 0;
1824 		next = base = KVTOPHYS(v);
1825 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1826 
1827 		/* How far can we go contiguously */
1828 		while ((len > 0) && (base == next)) {
1829 			next = trunc_page(base) + PAGE_SIZE;
1830 			size = next - base;
1831 			if (size > len) {
1832 				size = len;
1833 			}
1834 			span += size;
1835 			v += size;
1836 			len -= size;
1837 			base = KVTOPHYS(v);
1838 		}
1839 
1840 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1841 		if (len == 0) {
1842 			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1843 		}
1844 		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1845 		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1846 		++sg;
1847 		MessageSize += sizeof(*sg) / sizeof(U32);
1848 	}
1849 	/* We always do the request sense ... */
1850 	if ((span = ccb->csio.sense_len) == 0) {
1851 		span = sizeof(ccb->csio.sense_data);
1852 	}
1853 	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1854 	  &(ccb->csio.sense_data), span);
1855 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1856 	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1857 	return (Message_Ptr);
1858 } /* ASR_init_message */
1859 
1860 /*
1861  *	Reset the adapter.
1862  */
1863 static U32
1864 ASR_initOutBound(Asr_softc_t *sc)
1865 {
1866 	struct initOutBoundMessage {
1867 		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1868 		U32			       R;
1869 	}				Message;
1870 	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1871 	U32				*volatile Reply_Ptr;
1872 	U32				Old;
1873 
1874 	/*
1875 	 *  Build up our copy of the Message.
1876 	 */
1877 	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1878 	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1879 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1880 	  I2O_EXEC_OUTBOUND_INIT);
1881 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1882 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1883 	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1884 	/*
1885 	 *  Reset the Reply Status
1886 	 */
1887 	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1888 	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1889 	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1890 	  sizeof(U32));
1891 	/*
1892 	 *	Send the Message out
1893 	 */
1894 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1895 	    0xffffffff) {
1896 		u_long size, addr;
1897 
1898 		/*
1899 		 *	Wait for a response (Poll).
1900 		 */
1901 		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1902 		/*
1903 		 *	Re-enable the interrupts.
1904 		 */
1905 		asr_set_intr(sc, Old);
1906 		/*
1907 		 *	Populate the outbound table.
1908 		 */
1909 		if (sc->ha_Msgs == NULL) {
1910 
1911 			/* Allocate the reply frames */
1912 			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1913 			  * sc->ha_Msgs_Count;
1914 
1915 			/*
1916 			 *	contigmalloc only works reliably at
1917 			 * initialization time.
1918 			 */
1919 			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1920 			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1921 			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1922 				bzero(sc->ha_Msgs, size);
1923 				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1924 			}
1925 		}
1926 
1927 		/* Initialize the outbound FIFO */
1928 		if (sc->ha_Msgs != NULL)
1929 			for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1930 			    size; --size) {
1931 				asr_set_FromFIFO(sc, addr);
1932 				addr +=
1933 				    sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1934 			}
1935 		return (*Reply_Ptr);
1936 	}
1937 	return (0);
1938 } /* ASR_initOutBound */
1939 
1940 /*
1941  *	Set the system table
1942  */
1943 static int
1944 ASR_setSysTab(Asr_softc_t *sc)
1945 {
1946 	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1947 	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1948 	Asr_softc_t		    * ha;
1949 	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1950 	int			      retVal;
1951 
1952 	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1953 	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1954 		return (ENOMEM);
1955 	}
1956 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1957 		++SystemTable->NumberEntries;
1958 	}
1959 	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1960 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1961 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1962 	  M_TEMP, M_WAITOK)) == NULL) {
1963 		kfree(SystemTable, M_TEMP);
1964 		return (ENOMEM);
1965 	}
1966 	(void)ASR_fillMessage((void *)Message_Ptr,
1967 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1968 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1969 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1970 	  (I2O_VERSION_11 +
1971 	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1972 			/ sizeof(U32)) << 4)));
1973 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1974 	  I2O_EXEC_SYS_TAB_SET);
1975 	/*
1976 	 *	Call the LCT table to determine the number of device entries
1977 	 * to reserve space for.
1978 	 *	since this code is reused in several systems, code efficiency
1979 	 * is greater by using a shift operation rather than a divide by
1980 	 * sizeof(u_int32_t).
1981 	 */
1982 	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1983 	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1984 	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1985 	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1986 	++sg;
1987 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1988 		SG(sg, 0,
1989 		  ((ha->ha_next)
1990 		    ? (I2O_SGL_FLAGS_DIR)
1991 		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1992 		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1993 		++sg;
1994 	}
1995 	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1996 	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1997 	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1998 	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1999 	kfree(Message_Ptr, M_TEMP);
2000 	kfree(SystemTable, M_TEMP);
2001 	return (retVal);
2002 } /* ASR_setSysTab */
2003 
2004 static int
2005 ASR_acquireHrt(Asr_softc_t *sc)
2006 {
2007 	I2O_EXEC_HRT_GET_MESSAGE	Message;
2008 	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2009 	struct {
2010 		I2O_HRT	      Header;
2011 		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2012 	}				Hrt, *HrtP = &Hrt;
2013 	u_int8_t			NumberOfEntries;
2014 	PI2O_HRT_ENTRY			Entry;
2015 
2016 	bzero(&Hrt, sizeof (Hrt));
2017 	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2018 	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2019 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2020 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2021 	  (I2O_VERSION_11
2022 	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2023 		   / sizeof(U32)) << 4)));
2024 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2025 	  I2O_EXEC_HRT_GET);
2026 
2027 	/*
2028 	 *  Set up the buffers as scatter gather elements.
2029 	 */
2030 	SG(&(Message_Ptr->SGL), 0,
2031 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2032 	  HrtP, sizeof(Hrt));
2033 	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2034 		return (ENODEV);
2035 	}
2036 	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2037 	  > (MAX_CHANNEL + 1)) {
2038 		NumberOfEntries = MAX_CHANNEL + 1;
2039 	}
2040 	for (Entry = Hrt.Header.HRTEntry;
2041 	  NumberOfEntries != 0;
2042 	  ++Entry, --NumberOfEntries) {
2043 		PI2O_LCT_ENTRY Device;
2044 
2045 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2046 		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2047 		  ++Device) {
2048 			if (I2O_LCT_ENTRY_getLocalTID(Device)
2049 			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2050 				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2051 				  Entry) >> 16;
2052 				if ((Device->le_bus > sc->ha_MaxBus)
2053 				 && (Device->le_bus <= MAX_CHANNEL)) {
2054 					sc->ha_MaxBus = Device->le_bus;
2055 				}
2056 			}
2057 		}
2058 	}
2059 	return (0);
2060 } /* ASR_acquireHrt */
2061 
2062 /*
2063  *	Enable the adapter.
2064  */
2065 static int
2066 ASR_enableSys(Asr_softc_t *sc)
2067 {
2068 	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2069 	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2070 
2071 	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2072 	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2073 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2074 	  I2O_EXEC_SYS_ENABLE);
2075 	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2076 } /* ASR_enableSys */
2077 
2078 /*
2079  *	Perform the stages necessary to initialize the adapter
2080  */
2081 static int
2082 ASR_init(Asr_softc_t *sc)
2083 {
2084 	return ((ASR_initOutBound(sc) == 0)
2085 	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2086 	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2087 } /* ASR_init */
2088 
2089 /*
2090  *	Send a Synchronize Cache command to the target device.
2091  */
2092 static void
2093 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2094 {
2095 	tid_t TID;
2096 
2097 	/*
2098 	 * We will not synchronize the device when there are outstanding
2099 	 * commands issued by the OS (this is due to a locked up device,
2100 	 * as the OS normally would flush all outstanding commands before
2101 	 * issuing a shutdown or an adapter reset).
2102 	 */
2103 	if ((sc != NULL)
2104 	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2105 	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2106 	 && (TID != (tid_t)0)) {
2107 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2108 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2109 
2110 		Message_Ptr = &Message;
2111 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2112 		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2113 
2114 		I2O_MESSAGE_FRAME_setVersionOffset(
2115 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2116 		  I2O_VERSION_11
2117 		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2118 		    - sizeof(I2O_SG_ELEMENT))
2119 			/ sizeof(U32)) << 4));
2120 		I2O_MESSAGE_FRAME_setMessageSize(
2121 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2122 		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2123 		  - sizeof(I2O_SG_ELEMENT))
2124 			/ sizeof(U32));
2125 		I2O_MESSAGE_FRAME_setInitiatorAddress (
2126 		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2127 		I2O_MESSAGE_FRAME_setFunction(
2128 		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2129 		I2O_MESSAGE_FRAME_setTargetAddress(
2130 		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2131 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2132 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2133 		  I2O_SCSI_SCB_EXEC);
2134 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2135 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2136 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2137 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2138 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2139 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2140 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2141 		  DPT_ORGANIZATION_ID);
2142 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2143 		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2144 		Message_Ptr->CDB[1] = (lun << 5);
2145 
2146 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2147 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2148 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2149 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2150 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2151 
2152 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2153 
2154 	}
2155 }
2156 
2157 static void
2158 ASR_synchronize(Asr_softc_t *sc)
2159 {
2160 	int bus, target, lun;
2161 
2162 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2163 		for (target = 0; target <= sc->ha_MaxId; ++target) {
2164 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2165 				ASR_sync(sc,bus,target,lun);
2166 			}
2167 		}
2168 	}
2169 }
2170 
2171 /*
2172  *	Reset the HBA, targets and BUS.
2173  *		Currently this resets *all* the SCSI busses.
2174  */
2175 static __inline void
2176 asr_hbareset(Asr_softc_t *sc)
2177 {
2178 	ASR_synchronize(sc);
2179 	(void)ASR_reset(sc);
2180 } /* asr_hbareset */
2181 
2182 /*
2183  *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2184  * limit and a reduction in error checking (in the pre 4.0 case).
2185  */
2186 static int
2187 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2188 {
2189 	int		rid;
2190 	u_int32_t	p, l, s;
2191 
2192 	/*
2193 	 * I2O specification says we must find first *memory* mapped BAR
2194 	 */
2195 	for (rid = 0; rid < 4; rid++) {
2196 		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2197 		if ((p & 1) == 0) {
2198 			break;
2199 		}
2200 	}
2201 	/*
2202 	 *	Give up?
2203 	 */
2204 	if (rid >= 4) {
2205 		rid = 0;
2206 	}
2207 	rid = PCIR_BAR(rid);
2208 	p = pci_read_config(dev, rid, sizeof(p));
2209 	pci_write_config(dev, rid, -1, sizeof(p));
2210 	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2211 	pci_write_config(dev, rid, p, sizeof(p));
2212 	if (l > MAX_MAP) {
2213 		l = MAX_MAP;
2214 	}
2215 	/*
2216 	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2217 	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2218 	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2219 	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2220 	 * accessible via BAR0, the messaging registers are accessible
2221 	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2222 	 */
2223 	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2224 	if (s != 0xA5111044) {
2225 		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2226 		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2227 		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2228 		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2229 			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2230 		}
2231 	}
2232 	p &= ~15;
2233 	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2234 	  p, p + l, l, RF_ACTIVE);
2235 	if (sc->ha_mem_res == NULL) {
2236 		return (0);
2237 	}
2238 	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2239 	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2240 	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2241 
2242 	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2243 		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2244 			return (0);
2245 		}
2246 		p = pci_read_config(dev, rid, sizeof(p));
2247 		pci_write_config(dev, rid, -1, sizeof(p));
2248 		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2249 		pci_write_config(dev, rid, p, sizeof(p));
2250 		if (l > MAX_MAP) {
2251 			l = MAX_MAP;
2252 		}
2253 		p &= ~15;
2254 		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2255 		  p, p + l, l, RF_ACTIVE);
2256 		if (sc->ha_mes_res == NULL) {
2257 			return (0);
2258 		}
2259 		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2260 		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2261 	} else {
2262 		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2263 		sc->ha_frame_btag = sc->ha_i2o_btag;
2264 	}
2265 	return (1);
2266 } /* asr_pci_map_mem */
2267 
2268 /*
2269  *	A simplified copy of the real pci_map_int with additional
2270  * registration requirements.
2271  */
2272 static int
2273 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2274 {
2275 	int rid = 0;
2276 
2277 	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2278 	  RF_ACTIVE | RF_SHAREABLE);
2279 	if (sc->ha_irq_res == NULL) {
2280 		return (0);
2281 	}
2282 	if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2283 	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2284 		return (0);
2285 	}
2286 	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2287 	return (1);
2288 } /* asr_pci_map_int */
2289 
2290 static void
2291 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2292 {
2293 	Asr_softc_t *sc;
2294 
2295 	if (error)
2296 		return;
2297 
2298 	sc = (Asr_softc_t *)arg;
2299 
2300 	/* XXX
2301 	 * The status word can be at a 64-bit address, but the existing
2302 	 * accessor macros simply cannot manipulate 64-bit addresses.
2303 	 */
2304 	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2305 	    offsetof(struct Asr_status_mem, status);
2306 	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2307 	    offsetof(struct Asr_status_mem, rstatus);
2308 }
2309 
2310 static int
2311 asr_alloc_dma(Asr_softc_t *sc)
2312 {
2313 	device_t dev;
2314 
2315 	dev = sc->ha_dev;
2316 
2317 	if (bus_dma_tag_create(NULL,			/* parent */
2318 			       1, 0,			/* algnmnt, boundary */
2319 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2320 			       BUS_SPACE_MAXADDR,	/* highaddr */
2321 			       NULL, NULL,		/* filter, filterarg */
2322 			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2323 			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2324 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2325 			       0,			/* flags */
2326 			       &sc->ha_parent_dmat)) {
2327 		device_printf(dev, "Cannot allocate parent DMA tag\n");
2328 		return (ENOMEM);
2329 	}
2330 
2331 	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2332 			       1, 0,			/* algnmnt, boundary */
2333 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2334 			       BUS_SPACE_MAXADDR,	/* highaddr */
2335 			       NULL, NULL,		/* filter, filterarg */
2336 			       sizeof(sc->ha_statusmem),/* maxsize */
2337 			       1,			/* nsegments */
2338 			       sizeof(sc->ha_statusmem),/* maxsegsize */
2339 			       0,			/* flags */
2340 			       &sc->ha_statusmem_dmat)) {
2341 		device_printf(dev, "Cannot allocate status DMA tag\n");
2342 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2343 		return (ENOMEM);
2344 	}
2345 
2346 	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2347 	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2348 		device_printf(dev, "Cannot allocate status memory\n");
2349 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2350 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2351 		return (ENOMEM);
2352 	}
2353 	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2354 	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2355 
2356 	return (0);
2357 }
2358 
2359 static void
2360 asr_release_dma(Asr_softc_t *sc)
2361 {
2362 
2363 	if (sc->ha_rstatus_phys != 0)
2364 		bus_dmamap_unload(sc->ha_statusmem_dmat,
2365 		    sc->ha_statusmem_dmamap);
2366 	if (sc->ha_statusmem != NULL)
2367 		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2368 		    sc->ha_statusmem_dmamap);
2369 	if (sc->ha_statusmem_dmat != NULL)
2370 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2371 	if (sc->ha_parent_dmat != NULL)
2372 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2373 }
2374 
2375 /*
2376  *	Attach the devices, and virtual devices to the driver list.
2377  */
2378 static int
2379 asr_attach(device_t dev)
2380 {
2381 	PI2O_EXEC_STATUS_GET_REPLY status;
2382 	PI2O_LCT_ENTRY		 Device;
2383 	Asr_softc_t		 *sc, **ha;
2384 	struct scsi_inquiry_data *iq;
2385 	int			 bus, size, unit;
2386 	int			 error;
2387 
2388 	sc = device_get_softc(dev);
2389 	unit = device_get_unit(dev);
2390 	sc->ha_dev = dev;
2391 
2392 	if (Asr_softc_list == NULL) {
2393 		/*
2394 		 *	Fixup the OS revision as saved in the dptsig for the
2395 		 *	engine (dptioctl.h) to pick up.
2396 		 */
2397 		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2398 	}
2399 	/*
2400 	 *	Initialize the software structure
2401 	 */
2402 	LIST_INIT(&(sc->ha_ccb));
2403 	/* Link us into the HA list */
2404 	for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next));
2405 		*(ha) = sc;
2406 
2407 	/*
2408 	 *	This is the real McCoy!
2409 	 */
2410 	if (!asr_pci_map_mem(dev, sc)) {
2411 		device_printf(dev, "could not map memory\n");
2412 		return(ENXIO);
2413 	}
2414 	/* Enable if not formerly enabled */
2415 	pci_write_config(dev, PCIR_COMMAND,
2416 	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2417 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2418 
2419 	sc->ha_pciBusNum = pci_get_bus(dev);
2420 	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2421 
2422 	if ((error = asr_alloc_dma(sc)) != 0)
2423 		return (error);
2424 
2425 	/* Check if the device is there? */
2426 	if (ASR_resetIOP(sc) == 0) {
2427 		device_printf(dev, "Cannot reset adapter\n");
2428 		asr_release_dma(sc);
2429 		return (EIO);
2430 	}
2431 	status = &sc->ha_statusmem->status;
2432 	if (ASR_getStatus(sc) == NULL) {
2433 		device_printf(dev, "could not initialize hardware\n");
2434 		asr_release_dma(sc);
2435 		return(ENODEV);
2436 	}
2437 	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2438 	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2439 	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2440 	sc->ha_SystemTable.IopState = status->IopState;
2441 	sc->ha_SystemTable.MessengerType = status->MessengerType;
2442 	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2443 	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2444 	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2445 
2446 	if (!asr_pci_map_int(dev, (void *)sc)) {
2447 		device_printf(dev, "could not map interrupt\n");
2448 		asr_release_dma(sc);
2449 		return(ENXIO);
2450 	}
2451 
2452 	/* Adjust the maximim inbound count */
2453 	if (((sc->ha_QueueSize =
2454 	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2455 	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2456 		sc->ha_QueueSize = MAX_INBOUND;
2457 	}
2458 
2459 	/* Adjust the maximum outbound count */
2460 	if (((sc->ha_Msgs_Count =
2461 	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2462 	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2463 		sc->ha_Msgs_Count = MAX_OUTBOUND;
2464 	}
2465 	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2466 		sc->ha_Msgs_Count = sc->ha_QueueSize;
2467 	}
2468 
2469 	/* Adjust the maximum SG size to adapter */
2470 	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2471 	    2)) > MAX_INBOUND_SIZE) {
2472 		size = MAX_INBOUND_SIZE;
2473 	}
2474 	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2475 	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2476 
2477 	/*
2478 	 *	Only do a bus/HBA reset on the first time through. On this
2479 	 * first time through, we do not send a flush to the devices.
2480 	 */
2481 	if (ASR_init(sc) == 0) {
2482 		struct BufferInfo {
2483 			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2484 			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2485 			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2486 		} Buffer;
2487 		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2488 #define FW_DEBUG_BLED_OFFSET 8
2489 
2490 		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2491 		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2492 		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2493 			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2494 			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2495 		}
2496 		if (ASR_acquireLct(sc) == 0) {
2497 			(void)ASR_acquireHrt(sc);
2498 		}
2499 	} else {
2500 		device_printf(dev, "failed to initialize\n");
2501 		asr_release_dma(sc);
2502 		return(ENXIO);
2503 	}
2504 	/*
2505 	 *	Add in additional probe responses for more channels. We
2506 	 * are reusing the variable `target' for a channel loop counter.
2507 	 * Done here because of we need both the acquireLct and
2508 	 * acquireHrt data.
2509 	 */
2510 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2511 	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2512 		if (Device->le_type == I2O_UNKNOWN) {
2513 			continue;
2514 		}
2515 		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2516 			if (Device->le_target > sc->ha_MaxId) {
2517 				sc->ha_MaxId = Device->le_target;
2518 			}
2519 			if (Device->le_lun > sc->ha_MaxLun) {
2520 				sc->ha_MaxLun = Device->le_lun;
2521 			}
2522 		}
2523 		if (((Device->le_type & I2O_PORT) != 0)
2524 		 && (Device->le_bus <= MAX_CHANNEL)) {
2525 			/* Do not increase MaxId for efficiency */
2526 			sc->ha_adapter_target[Device->le_bus] =
2527 			    Device->le_target;
2528 		}
2529 	}
2530 
2531 	/*
2532 	 *	Print the HBA model number as inquired from the card.
2533 	 */
2534 
2535 	device_printf(dev, " ");
2536 
2537 	if ((iq = (struct scsi_inquiry_data *)kmalloc(
2538 	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2539 	    NULL) {
2540 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2541 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2542 		int					posted = 0;
2543 
2544 		Message_Ptr = &Message;
2545 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2546 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2547 
2548 		I2O_MESSAGE_FRAME_setVersionOffset(
2549 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2550 		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2551 		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2552 		I2O_MESSAGE_FRAME_setMessageSize(
2553 		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2554 		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2555 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2556 		    sizeof(U32));
2557 		I2O_MESSAGE_FRAME_setInitiatorAddress(
2558 		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2559 		I2O_MESSAGE_FRAME_setFunction(
2560 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2561 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2562 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2563 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2564 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2565 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2566 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2567 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2568 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2569 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2570 		    DPT_ORGANIZATION_ID);
2571 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2572 		Message_Ptr->CDB[0] = INQUIRY;
2573 		Message_Ptr->CDB[4] =
2574 		    (unsigned char)sizeof(struct scsi_inquiry_data);
2575 		if (Message_Ptr->CDB[4] == 0) {
2576 			Message_Ptr->CDB[4] = 255;
2577 		}
2578 
2579 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2580 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2581 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2582 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2583 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2584 
2585 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2586 		  Message_Ptr, sizeof(struct scsi_inquiry_data));
2587 		SG(&(Message_Ptr->SGL), 0,
2588 		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2589 		  iq, sizeof(struct scsi_inquiry_data));
2590 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2591 
2592 		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2593 			kprintf (" ");
2594 			ASR_prstring (iq->vendor, 8);
2595 			++posted;
2596 		}
2597 		if (iq->product[0] && (iq->product[0] != ' ')) {
2598 			kprintf (" ");
2599 			ASR_prstring (iq->product, 16);
2600 			++posted;
2601 		}
2602 		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2603 			kprintf (" FW Rev. ");
2604 			ASR_prstring (iq->revision, 4);
2605 			++posted;
2606 		}
2607 		kfree(iq, M_TEMP);
2608 		if (posted) {
2609 			kprintf (",");
2610 		}
2611 	}
2612 	kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2613 	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2614 
2615 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2616 		struct cam_devq	  * devq;
2617 		int		    QueueSize = sc->ha_QueueSize;
2618 
2619 		if (QueueSize > MAX_INBOUND) {
2620 			QueueSize = MAX_INBOUND;
2621 		}
2622 
2623 		/*
2624 		 *	Create the device queue for our SIM(s).
2625 		 */
2626 		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2627 			continue;
2628 		}
2629 
2630 		/*
2631 		 *	Construct our first channel SIM entry
2632 		 */
2633 		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2634 						unit, &sim_mplock,
2635 						1, QueueSize, devq);
2636 		if (sc->ha_sim[bus] == NULL) {
2637 			continue;
2638 		}
2639 
2640 		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2641 			cam_sim_free(sc->ha_sim[bus]);
2642 			sc->ha_sim[bus] = NULL;
2643 			continue;
2644 		}
2645 
2646 		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2647 		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2648 		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2649 			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2650 			cam_sim_free(sc->ha_sim[bus]);
2651 			sc->ha_sim[bus] = NULL;
2652 			continue;
2653 		}
2654 	}
2655 
2656 	/*
2657 	 *	Generate the device node information
2658 	 */
2659 	sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2660 			       "asr%d", unit);
2661 	if (sc->ha_devt != NULL)
2662 		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2663 	sc->ha_devt->si_drv1 = sc;
2664 	return(0);
2665 } /* asr_attach */
2666 
2667 static void
2668 asr_poll(struct cam_sim *sim)
2669 {
2670 	asr_intr(cam_sim_softc(sim));
2671 } /* asr_poll */
2672 
2673 static void
2674 asr_action(struct cam_sim *sim, union ccb  *ccb)
2675 {
2676 	struct Asr_softc *sc;
2677 
2678 	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2679 			 ccb->ccb_h.func_code);
2680 
2681 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2682 
2683 	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2684 
2685 	switch (ccb->ccb_h.func_code) {
2686 
2687 	/* Common cases first */
2688 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2689 	{
2690 		struct Message {
2691 			char M[MAX_INBOUND_SIZE];
2692 		} Message;
2693 		PI2O_MESSAGE_FRAME   Message_Ptr;
2694 
2695 		/* Reject incoming commands while we are resetting the card */
2696 		if (sc->ha_in_reset != HA_OPERATIONAL) {
2697 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2698 			if (sc->ha_in_reset >= HA_OFF_LINE) {
2699 				/* HBA is now off-line */
2700 				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2701 			} else {
2702 				/* HBA currently resetting, try again later. */
2703 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2704 			}
2705 			debug_asr_cmd_printf (" e\n");
2706 			xpt_done(ccb);
2707 			debug_asr_cmd_printf (" q\n");
2708 			break;
2709 		}
2710 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2711 			kprintf(
2712 			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2713 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2714 			  ccb->csio.cdb_io.cdb_bytes[0],
2715 			  cam_sim_bus(sim),
2716 			  ccb->ccb_h.target_id,
2717 			  ccb->ccb_h.target_lun);
2718 		}
2719 		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2720 				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2721 				     ccb->ccb_h.target_lun);
2722 		debug_asr_dump_ccb(ccb);
2723 
2724 		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2725 		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2726 			debug_asr_cmd2_printf ("TID=%x:\n",
2727 			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2728 			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2729 			debug_asr_cmd2_dump_message(Message_Ptr);
2730 			debug_asr_cmd1_printf (" q");
2731 
2732 			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2733 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2734 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2735 				debug_asr_cmd_printf (" E\n");
2736 				xpt_done(ccb);
2737 			}
2738 			debug_asr_cmd_printf(" Q\n");
2739 			break;
2740 		}
2741 		/*
2742 		 *	We will get here if there is no valid TID for the device
2743 		 * referenced in the scsi command packet.
2744 		 */
2745 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2746 		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2747 		debug_asr_cmd_printf (" B\n");
2748 		xpt_done(ccb);
2749 		break;
2750 	}
2751 
2752 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2753 		/* Reset HBA device ... */
2754 		asr_hbareset (sc);
2755 		ccb->ccb_h.status = CAM_REQ_CMP;
2756 		xpt_done(ccb);
2757 		break;
2758 
2759 	case XPT_ABORT:			/* Abort the specified CCB */
2760 		/* XXX Implement */
2761 		ccb->ccb_h.status = CAM_REQ_INVALID;
2762 		xpt_done(ccb);
2763 		break;
2764 
2765 	case XPT_SET_TRAN_SETTINGS:
2766 		/* XXX Implement */
2767 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2768 		xpt_done(ccb);
2769 		break;
2770 
2771 	case XPT_GET_TRAN_SETTINGS:
2772 	/* Get default/user set transfer settings for the target */
2773 	{
2774 		struct	ccb_trans_settings *cts = &(ccb->cts);
2775 		struct ccb_trans_settings_scsi *scsi =
2776 		    &cts->proto_specific.scsi;
2777 		struct ccb_trans_settings_spi *spi =
2778 		    &cts->xport_specific.spi;
2779 
2780 		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2781 			cts->protocol = PROTO_SCSI;
2782 			cts->protocol_version = SCSI_REV_2;
2783 			cts->transport = XPORT_SPI;
2784 			cts->transport_version = 2;
2785 
2786 			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2787 			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2788 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2789 			spi->sync_period = 6; /* 40MHz */
2790 			spi->sync_offset = 15;
2791 			spi->valid = CTS_SPI_VALID_SYNC_RATE
2792 				   | CTS_SPI_VALID_SYNC_OFFSET
2793 				   | CTS_SPI_VALID_BUS_WIDTH
2794 				   | CTS_SPI_VALID_DISC;
2795 			scsi->valid = CTS_SCSI_VALID_TQ;
2796 
2797 			ccb->ccb_h.status = CAM_REQ_CMP;
2798 		} else {
2799 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2800 		}
2801 		xpt_done(ccb);
2802 		break;
2803 	}
2804 
2805 	case XPT_CALC_GEOMETRY:
2806 	{
2807 		struct	  ccb_calc_geometry *ccg;
2808 		u_int32_t size_mb;
2809 		u_int32_t secs_per_cylinder;
2810 
2811 		ccg = &(ccb->ccg);
2812 		size_mb = ccg->volume_size
2813 			/ ((1024L * 1024L) / ccg->block_size);
2814 
2815 		if (size_mb > 4096) {
2816 			ccg->heads = 255;
2817 			ccg->secs_per_track = 63;
2818 		} else if (size_mb > 2048) {
2819 			ccg->heads = 128;
2820 			ccg->secs_per_track = 63;
2821 		} else if (size_mb > 1024) {
2822 			ccg->heads = 65;
2823 			ccg->secs_per_track = 63;
2824 		} else {
2825 			ccg->heads = 64;
2826 			ccg->secs_per_track = 32;
2827 		}
2828 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2829 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2830 		ccb->ccb_h.status = CAM_REQ_CMP;
2831 		xpt_done(ccb);
2832 		break;
2833 	}
2834 
2835 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2836 		ASR_resetBus (sc, cam_sim_bus(sim));
2837 		ccb->ccb_h.status = CAM_REQ_CMP;
2838 		xpt_done(ccb);
2839 		break;
2840 
2841 	case XPT_TERM_IO:		/* Terminate the I/O process */
2842 		/* XXX Implement */
2843 		ccb->ccb_h.status = CAM_REQ_INVALID;
2844 		xpt_done(ccb);
2845 		break;
2846 
2847 	case XPT_PATH_INQ:		/* Path routing inquiry */
2848 	{
2849 		struct ccb_pathinq *cpi = &(ccb->cpi);
2850 
2851 		cpi->version_num = 1; /* XXX??? */
2852 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2853 		cpi->target_sprt = 0;
2854 		/* Not necessary to reset bus, done by HDM initialization */
2855 		cpi->hba_misc = PIM_NOBUSRESET;
2856 		cpi->hba_eng_cnt = 0;
2857 		cpi->max_target = sc->ha_MaxId;
2858 		cpi->max_lun = sc->ha_MaxLun;
2859 		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2860 		cpi->bus_id = cam_sim_bus(sim);
2861 		cpi->base_transfer_speed = 3300;
2862 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2863 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2864 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2865 		cpi->unit_number = cam_sim_unit(sim);
2866 		cpi->ccb_h.status = CAM_REQ_CMP;
2867                 cpi->transport = XPORT_SPI;
2868                 cpi->transport_version = 2;
2869                 cpi->protocol = PROTO_SCSI;
2870                 cpi->protocol_version = SCSI_REV_2;
2871 		xpt_done(ccb);
2872 		break;
2873 	}
2874 	default:
2875 		ccb->ccb_h.status = CAM_REQ_INVALID;
2876 		xpt_done(ccb);
2877 		break;
2878 	}
2879 } /* asr_action */
2880 
2881 /*
2882  * Handle processing of current CCB as pointed to by the Status.
2883  */
2884 static int
2885 asr_intr(Asr_softc_t *sc)
2886 {
2887 	int processed;
2888 
2889 	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2890 	    processed = 1) {
2891 		union asr_ccb			   *ccb;
2892 		u_int				    dsc;
2893 		U32				    ReplyOffset;
2894 		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2895 
2896 		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2897 		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2898 			break;
2899 		}
2900 		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2901 		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2902 		/*
2903 		 * We do not need any (optional byteswapping) method access to
2904 		 * the Initiator context field.
2905 		 */
2906 		ccb = (union asr_ccb *)(long)
2907 		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2908 		    &(Reply->StdReplyFrame.StdMessageFrame));
2909 		if (I2O_MESSAGE_FRAME_getMsgFlags(
2910 		  &(Reply->StdReplyFrame.StdMessageFrame))
2911 		  & I2O_MESSAGE_FLAGS_FAIL) {
2912 			I2O_UTIL_NOP_MESSAGE	Message;
2913 			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2914 			U32			MessageOffset;
2915 
2916 			MessageOffset = (u_long)
2917 			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2918 			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2919 			/*
2920 			 *  Get the Original Message Frame's address, and get
2921 			 * it's Transaction Context into our space. (Currently
2922 			 * unused at original authorship, but better to be
2923 			 * safe than sorry). Straight copy means that we
2924 			 * need not concern ourselves with the (optional
2925 			 * byteswapping) method access.
2926 			 */
2927 			Reply->StdReplyFrame.TransactionContext =
2928 			    bus_space_read_4(sc->ha_frame_btag,
2929 			    sc->ha_frame_bhandle, MessageOffset +
2930 			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2931 			    TransactionContext));
2932 			/*
2933 			 *	For 64 bit machines, we need to reconstruct the
2934 			 * 64 bit context.
2935 			 */
2936 			ccb = (union asr_ccb *)(long)
2937 			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2938 			    &(Reply->StdReplyFrame.StdMessageFrame));
2939 			/*
2940 			 * Unique error code for command failure.
2941 			 */
2942 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2943 			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2944 			/*
2945 			 *  Modify the message frame to contain a NOP and
2946 			 * re-issue it to the controller.
2947 			 */
2948 			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2949 			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2950 #if (I2O_UTIL_NOP != 0)
2951 				I2O_MESSAGE_FRAME_setFunction (
2952 				  &(Message_Ptr->StdMessageFrame),
2953 				  I2O_UTIL_NOP);
2954 #endif
2955 			/*
2956 			 *  Copy the packet out to the Original Message
2957 			 */
2958 			asr_set_frame(sc, Message_Ptr, MessageOffset,
2959 				      sizeof(I2O_UTIL_NOP_MESSAGE));
2960 			/*
2961 			 *  Issue the NOP
2962 			 */
2963 			asr_set_ToFIFO(sc, MessageOffset);
2964 		}
2965 
2966 		/*
2967 		 *	Asynchronous command with no return requirements,
2968 		 * and a generic handler for immunity against odd error
2969 		 * returns from the adapter.
2970 		 */
2971 		if (ccb == NULL) {
2972 			/*
2973 			 * Return Reply so that it can be used for the
2974 			 * next command
2975 			 */
2976 			asr_set_FromFIFO(sc, ReplyOffset);
2977 			continue;
2978 		}
2979 
2980 		/* Welease Wadjah! (and stop timeouts) */
2981 		ASR_ccbRemove (sc, ccb);
2982 
2983 		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2984 		    &(Reply->StdReplyFrame));
2985 		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2986 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2987 		switch (dsc) {
2988 
2989 		case I2O_SCSI_DSC_SUCCESS:
2990 			ccb->ccb_h.status |= CAM_REQ_CMP;
2991 			break;
2992 
2993 		case I2O_SCSI_DSC_CHECK_CONDITION:
2994 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
2995 			    CAM_AUTOSNS_VALID;
2996 			break;
2997 
2998 		case I2O_SCSI_DSC_BUSY:
2999 			/* FALLTHRU */
3000 		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3001 			/* FALLTHRU */
3002 		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3003 			/* FALLTHRU */
3004 		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3005 			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3006 			break;
3007 
3008 		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3009 			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3010 			break;
3011 
3012 		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3013 			/* FALLTHRU */
3014 		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3015 			/* FALLTHRU */
3016 		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3017 			/* FALLTHRU */
3018 		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3019 			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3020 			break;
3021 
3022 		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3023 			/* FALLTHRU */
3024 		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3025 			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3026 			break;
3027 
3028 		default:
3029 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3030 			break;
3031 		}
3032 		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3033 			ccb->csio.resid -=
3034 			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3035 			    Reply);
3036 		}
3037 
3038 		/* Sense data in reply packet */
3039 		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3040 			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3041 
3042 			if (size) {
3043 				if (size > sizeof(ccb->csio.sense_data)) {
3044 					size = sizeof(ccb->csio.sense_data);
3045 				}
3046 				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3047 					size = I2O_SCSI_SENSE_DATA_SZ;
3048 				}
3049 				if ((ccb->csio.sense_len)
3050 				 && (size > ccb->csio.sense_len)) {
3051 					size = ccb->csio.sense_len;
3052 				}
3053 				if (size < ccb->csio.sense_len) {
3054 					ccb->csio.sense_resid =
3055 					    ccb->csio.sense_len - size;
3056 				} else {
3057 					ccb->csio.sense_resid = 0;
3058 				}
3059 				bzero(&(ccb->csio.sense_data),
3060 				    sizeof(ccb->csio.sense_data));
3061 				bcopy(Reply->SenseData,
3062 				      &(ccb->csio.sense_data), size);
3063 			}
3064 		}
3065 
3066 		/*
3067 		 * Return Reply so that it can be used for the next command
3068 		 * since we have no more need for it now
3069 		 */
3070 		asr_set_FromFIFO(sc, ReplyOffset);
3071 
3072 		if (ccb->ccb_h.path) {
3073 			xpt_done ((union ccb *)ccb);
3074 		} else {
3075 			wakeup (ccb);
3076 		}
3077 	}
3078 	return (processed);
3079 } /* asr_intr */
3080 
3081 #undef QueueSize	/* Grrrr */
3082 #undef SG_Size		/* Grrrr */
3083 
3084 /*
3085  *	Meant to be included at the bottom of asr.c !!!
3086  */
3087 
3088 /*
3089  *	Included here as hard coded. Done because other necessary include
3090  *	files utilize C++ comment structures which make them a nuisance to
3091  *	included here just to pick up these three typedefs.
3092  */
3093 typedef U32   DPT_TAG_T;
3094 typedef U32   DPT_MSG_T;
3095 typedef U32   DPT_RTN_T;
3096 
3097 #undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3098 #include	"dev/raid/asr/osd_unix.h"
3099 
3100 #define	asr_unit(dev)	  minor(dev)
3101 
3102 static u_int8_t ASR_ctlr_held;
3103 
3104 static int
3105 asr_open(struct dev_open_args *ap)
3106 {
3107 	cdev_t dev = ap->a_head.a_dev;
3108 	int		 error;
3109 
3110 	if (dev->si_drv1 == NULL) {
3111 		return (ENODEV);
3112 	}
3113 	crit_enter();
3114 	if (ASR_ctlr_held) {
3115 		error = EBUSY;
3116 	} else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
3117 		++ASR_ctlr_held;
3118 	}
3119 	crit_exit();
3120 	return (error);
3121 } /* asr_open */
3122 
3123 static int
3124 asr_close(struct dev_close_args *ap)
3125 {
3126 
3127 	ASR_ctlr_held = 0;
3128 	return (0);
3129 } /* asr_close */
3130 
3131 
3132 /*-------------------------------------------------------------------------*/
3133 /*		      Function ASR_queue_i				   */
3134 /*-------------------------------------------------------------------------*/
3135 /* The Parameters Passed To This Function Are :				   */
3136 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3137 /*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3138 /*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3139 /*									   */
3140 /* This Function Will Take The User Request Packet And Convert It To An	   */
3141 /* I2O MSG And Send It Off To The Adapter.				   */
3142 /*									   */
3143 /* Return : 0 For OK, Error Code Otherwise				   */
3144 /*-------------------------------------------------------------------------*/
3145 static int
3146 ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3147 {
3148 	union asr_ccb				   * ccb;
3149 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3150 	PI2O_MESSAGE_FRAME			     Message_Ptr;
3151 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3152 	int					     MessageSizeInBytes;
3153 	int					     ReplySizeInBytes;
3154 	int					     error;
3155 	int					     s;
3156 	/* Scatter Gather buffer list */
3157 	struct ioctlSgList_S {
3158 		SLIST_ENTRY(ioctlSgList_S) link;
3159 		caddr_t			   UserSpace;
3160 		I2O_FLAGS_COUNT		   FlagsCount;
3161 		char			   KernelSpace[sizeof(long)];
3162 	}					   * elm;
3163 	/* Generates a `first' entry */
3164 	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3165 
3166 	if (ASR_getBlinkLedCode(sc)) {
3167 		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3168 		  ASR_getBlinkLedCode(sc));
3169 		return (EIO);
3170 	}
3171 	/* Copy in the message into a local allocation */
3172 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3173 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3174 		debug_usr_cmd_printf (
3175 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3176 		return (ENOMEM);
3177 	}
3178 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3179 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3180 		kfree(Message_Ptr, M_TEMP);
3181 		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3182 		return (error);
3183 	}
3184 	/* Acquire information to determine type of packet */
3185 	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3186 	/* The offset of the reply information within the user packet */
3187 	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3188 	  + MessageSizeInBytes);
3189 
3190 	/* Check if the message is a synchronous initialization command */
3191 	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3192 	kfree(Message_Ptr, M_TEMP);
3193 	switch (s) {
3194 
3195 	case I2O_EXEC_IOP_RESET:
3196 	{	U32 status;
3197 
3198 		status = ASR_resetIOP(sc);
3199 		ReplySizeInBytes = sizeof(status);
3200 		debug_usr_cmd_printf ("resetIOP done\n");
3201 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3202 		  ReplySizeInBytes));
3203 	}
3204 
3205 	case I2O_EXEC_STATUS_GET:
3206 	{	PI2O_EXEC_STATUS_GET_REPLY status;
3207 
3208 		status = &sc->ha_statusmem->status;
3209 		if (ASR_getStatus(sc) == NULL) {
3210 			debug_usr_cmd_printf ("getStatus failed\n");
3211 			return (ENXIO);
3212 		}
3213 		ReplySizeInBytes = sizeof(status);
3214 		debug_usr_cmd_printf ("getStatus done\n");
3215 		return (copyout ((caddr_t)status, (caddr_t)Reply,
3216 		  ReplySizeInBytes));
3217 	}
3218 
3219 	case I2O_EXEC_OUTBOUND_INIT:
3220 	{	U32 status;
3221 
3222 		status = ASR_initOutBound(sc);
3223 		ReplySizeInBytes = sizeof(status);
3224 		debug_usr_cmd_printf ("intOutBound done\n");
3225 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3226 		  ReplySizeInBytes));
3227 	}
3228 	}
3229 
3230 	/* Determine if the message size is valid */
3231 	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3232 	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3233 		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3234 		  MessageSizeInBytes);
3235 		return (EINVAL);
3236 	}
3237 
3238 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3239 	  M_TEMP, M_WAITOK)) == NULL) {
3240 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3241 		  MessageSizeInBytes);
3242 		return (ENOMEM);
3243 	}
3244 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3245 	  MessageSizeInBytes)) != 0) {
3246 		kfree(Message_Ptr, M_TEMP);
3247 		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3248 		  MessageSizeInBytes, error);
3249 		return (error);
3250 	}
3251 
3252 	/* Check the size of the reply frame, and start constructing */
3253 
3254 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3255 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3256 		kfree(Message_Ptr, M_TEMP);
3257 		debug_usr_cmd_printf (
3258 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3259 		return (ENOMEM);
3260 	}
3261 	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3262 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3263 		kfree(Reply_Ptr, M_TEMP);
3264 		kfree(Message_Ptr, M_TEMP);
3265 		debug_usr_cmd_printf (
3266 		  "Failed to copy in reply frame, errno=%d\n",
3267 		  error);
3268 		return (error);
3269 	}
3270 	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3271 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3272 	kfree(Reply_Ptr, M_TEMP);
3273 	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3274 		kfree(Message_Ptr, M_TEMP);
3275 		debug_usr_cmd_printf (
3276 		  "Failed to copy in reply frame[%d], errno=%d\n",
3277 		  ReplySizeInBytes, error);
3278 		return (EINVAL);
3279 	}
3280 
3281 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3282 	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3283 	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3284 	  M_TEMP, M_WAITOK)) == NULL) {
3285 		kfree(Message_Ptr, M_TEMP);
3286 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3287 		  ReplySizeInBytes);
3288 		return (ENOMEM);
3289 	}
3290 	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3291 	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3292 	  = Message_Ptr->InitiatorContext;
3293 	Reply_Ptr->StdReplyFrame.TransactionContext
3294 	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3295 	I2O_MESSAGE_FRAME_setMsgFlags(
3296 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3297 	  I2O_MESSAGE_FRAME_getMsgFlags(
3298 	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3299 	      | I2O_MESSAGE_FLAGS_REPLY);
3300 
3301 	/* Check if the message is a special case command */
3302 	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3303 	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3304 		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3305 		  Message_Ptr) & 0xF0) >> 2)) {
3306 			kfree(Message_Ptr, M_TEMP);
3307 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3308 			  &(Reply_Ptr->StdReplyFrame),
3309 			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3310 			I2O_MESSAGE_FRAME_setMessageSize(
3311 			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3312 			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3313 			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3314 			  ReplySizeInBytes);
3315 			kfree(Reply_Ptr, M_TEMP);
3316 			return (error);
3317 		}
3318 	}
3319 
3320 	/* Deal in the general case */
3321 	/* First allocate and optionally copy in each scatter gather element */
3322 	SLIST_INIT(&sgList);
3323 	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3324 		PI2O_SGE_SIMPLE_ELEMENT sg;
3325 
3326 		/*
3327 		 *	since this code is reused in several systems, code
3328 		 * efficiency is greater by using a shift operation rather
3329 		 * than a divide by sizeof(u_int32_t).
3330 		 */
3331 		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3332 		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3333 		    >> 2));
3334 		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3335 		  + MessageSizeInBytes)) {
3336 			caddr_t v;
3337 			int	len;
3338 
3339 			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3340 			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3341 				error = EINVAL;
3342 				break;
3343 			}
3344 			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3345 			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3346 			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3347 			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3348 				Message_Ptr) & 0xF0) >> 2)),
3349 			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3350 
3351 			if ((elm = (struct ioctlSgList_S *)kmalloc (
3352 			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3353 			  M_TEMP, M_WAITOK)) == NULL) {
3354 				debug_usr_cmd_printf (
3355 				  "Failed to allocate SG[%d]\n", len);
3356 				error = ENOMEM;
3357 				break;
3358 			}
3359 			SLIST_INSERT_HEAD(&sgList, elm, link);
3360 			elm->FlagsCount = sg->FlagsCount;
3361 			elm->UserSpace = (caddr_t)
3362 			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3363 			v = elm->KernelSpace;
3364 			/* Copy in outgoing data (DIR bit could be invalid) */
3365 			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3366 			  != 0) {
3367 				break;
3368 			}
3369 			/*
3370 			 *	If the buffer is not contiguous, lets
3371 			 * break up the scatter/gather entries.
3372 			 */
3373 			while ((len > 0)
3374 			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3375 			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3376 				int next, base, span;
3377 
3378 				span = 0;
3379 				next = base = KVTOPHYS(v);
3380 				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3381 				  base);
3382 
3383 				/* How far can we go physically contiguously */
3384 				while ((len > 0) && (base == next)) {
3385 					int size;
3386 
3387 					next = trunc_page(base) + PAGE_SIZE;
3388 					size = next - base;
3389 					if (size > len) {
3390 						size = len;
3391 					}
3392 					span += size;
3393 					v += size;
3394 					len -= size;
3395 					base = KVTOPHYS(v);
3396 				}
3397 
3398 				/* Construct the Flags */
3399 				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3400 				  span);
3401 				{
3402 					int flags = I2O_FLAGS_COUNT_getFlags(
3403 					  &(elm->FlagsCount));
3404 					/* Any remaining length? */
3405 					if (len > 0) {
3406 					    flags &=
3407 						~(I2O_SGL_FLAGS_END_OF_BUFFER
3408 						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3409 					}
3410 					I2O_FLAGS_COUNT_setFlags(
3411 					  &(sg->FlagsCount), flags);
3412 				}
3413 
3414 				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3415 				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3416 				    ((char *)Message_Ptr
3417 				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3418 					Message_Ptr) & 0xF0) >> 2)),
3419 				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3420 				  span);
3421 				if (len <= 0) {
3422 					break;
3423 				}
3424 
3425 				/*
3426 				 * Incrementing requires resizing of the
3427 				 * packet, and moving up the existing SG
3428 				 * elements.
3429 				 */
3430 				++sg;
3431 				MessageSizeInBytes += sizeof(*sg);
3432 				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3433 				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3434 				  + (sizeof(*sg) / sizeof(U32)));
3435 				{
3436 					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3437 
3438 					if ((NewMessage_Ptr
3439 					  = (PI2O_MESSAGE_FRAME)
3440 					    kmalloc (MessageSizeInBytes,
3441 					     M_TEMP, M_WAITOK)) == NULL) {
3442 						debug_usr_cmd_printf (
3443 						  "Failed to acquire frame[%d] memory\n",
3444 						  MessageSizeInBytes);
3445 						error = ENOMEM;
3446 						break;
3447 					}
3448 					span = ((caddr_t)sg)
3449 					     - (caddr_t)Message_Ptr;
3450 					bcopy(Message_Ptr,NewMessage_Ptr, span);
3451 					bcopy((caddr_t)(sg-1),
3452 					  ((caddr_t)NewMessage_Ptr) + span,
3453 					  MessageSizeInBytes - span);
3454 					kfree(Message_Ptr, M_TEMP);
3455 					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3456 					  (((caddr_t)NewMessage_Ptr) + span);
3457 					Message_Ptr = NewMessage_Ptr;
3458 				}
3459 			}
3460 			if ((error)
3461 			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3462 			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3463 				break;
3464 			}
3465 			++sg;
3466 		}
3467 		if (error) {
3468 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3469 				SLIST_REMOVE_HEAD(&sgList, link);
3470 				kfree(elm, M_TEMP);
3471 			}
3472 			kfree(Reply_Ptr, M_TEMP);
3473 			kfree(Message_Ptr, M_TEMP);
3474 			return (error);
3475 		}
3476 	}
3477 
3478 	debug_usr_cmd_printf ("Inbound: ");
3479 	debug_usr_cmd_dump_message(Message_Ptr);
3480 
3481 	/* Send the command */
3482 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3483 		/* Free up in-kernel buffers */
3484 		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3485 			SLIST_REMOVE_HEAD(&sgList, link);
3486 			kfree(elm, M_TEMP);
3487 		}
3488 		kfree(Reply_Ptr, M_TEMP);
3489 		kfree(Message_Ptr, M_TEMP);
3490 		return (ENOMEM);
3491 	}
3492 
3493 	/*
3494 	 * We do not need any (optional byteswapping) method access to
3495 	 * the Initiator context field.
3496 	 */
3497 	I2O_MESSAGE_FRAME_setInitiatorContext64(
3498 	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3499 
3500 	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3501 
3502 	kfree(Message_Ptr, M_TEMP);
3503 
3504 	/*
3505 	 * Wait for the board to report a finished instruction.
3506 	 */
3507 	crit_enter();
3508 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3509 		if (ASR_getBlinkLedCode(sc)) {
3510 			/* Reset Adapter */
3511 			kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3512 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3513 			  ASR_getBlinkLedCode(sc));
3514 			if (ASR_reset (sc) == ENXIO) {
3515 				/* Command Cleanup */
3516 				ASR_ccbRemove(sc, ccb);
3517 			}
3518 			crit_exit();
3519 			/* Free up in-kernel buffers */
3520 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3521 				SLIST_REMOVE_HEAD(&sgList, link);
3522 				kfree(elm, M_TEMP);
3523 			}
3524 			kfree(Reply_Ptr, M_TEMP);
3525 			asr_free_ccb(ccb);
3526 			return (EIO);
3527 		}
3528 		/* Check every second for BlinkLed */
3529 		/* There is no PRICAM, but outwardly PRIBIO is functional */
3530 		tsleep(ccb, 0, "asr", hz);
3531 	}
3532 	crit_exit();
3533 
3534 	debug_usr_cmd_printf ("Outbound: ");
3535 	debug_usr_cmd_dump_message(Reply_Ptr);
3536 
3537 	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3538 	  &(Reply_Ptr->StdReplyFrame),
3539 	  (ccb->ccb_h.status != CAM_REQ_CMP));
3540 
3541 	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3542 	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3543 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3544 		  ccb->csio.dxfer_len - ccb->csio.resid);
3545 	}
3546 	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3547 	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3548 	 - I2O_SCSI_SENSE_DATA_SZ))) {
3549 		int size = ReplySizeInBytes
3550 		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3551 		  - I2O_SCSI_SENSE_DATA_SZ;
3552 
3553 		if (size > sizeof(ccb->csio.sense_data)) {
3554 			size = sizeof(ccb->csio.sense_data);
3555 		}
3556 		if (size < ccb->csio.sense_len) {
3557 			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3558 		} else {
3559 			ccb->csio.sense_resid = 0;
3560 		}
3561 		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3562 		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3563 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3564 		    Reply_Ptr, size);
3565 	}
3566 
3567 	/* Free up in-kernel buffers */
3568 	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3569 		/* Copy out as necessary */
3570 		if ((error == 0)
3571 		/* DIR bit considered `valid', error due to ignorance works */
3572 		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3573 		  & I2O_SGL_FLAGS_DIR) == 0)) {
3574 			error = copyout((caddr_t)(elm->KernelSpace),
3575 			  elm->UserSpace,
3576 			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3577 		}
3578 		SLIST_REMOVE_HEAD(&sgList, link);
3579 		kfree(elm, M_TEMP);
3580 	}
3581 	if (error == 0) {
3582 	/* Copy reply frame to user space */
3583 		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3584 				ReplySizeInBytes);
3585 	}
3586 	kfree(Reply_Ptr, M_TEMP);
3587 	asr_free_ccb(ccb);
3588 
3589 	return (error);
3590 } /* ASR_queue_i */
3591 
3592 /*----------------------------------------------------------------------*/
3593 /*			    Function asr_ioctl			       */
3594 /*----------------------------------------------------------------------*/
3595 /* The parameters passed to this function are :				*/
3596 /*     dev  : Device number.						*/
3597 /*     cmd  : Ioctl Command						*/
3598 /*     data : User Argument Passed In.					*/
3599 /*     flag : Mode Parameter						*/
3600 /*     proc : Process Parameter						*/
3601 /*									*/
3602 /* This function is the user interface into this adapter driver		*/
3603 /*									*/
3604 /* Return : zero if OK, error code if not				*/
3605 /*----------------------------------------------------------------------*/
3606 
3607 static int
3608 asr_ioctl(struct dev_ioctl_args *ap)
3609 {
3610 	cdev_t dev = ap->a_head.a_dev;
3611 	u_long cmd = ap->a_cmd;
3612 	caddr_t data = ap->a_data;
3613 	Asr_softc_t	*sc = dev->si_drv1;
3614 	int		i, error = 0;
3615 #ifdef ASR_IOCTL_COMPAT
3616 	int		j;
3617 #endif /* ASR_IOCTL_COMPAT */
3618 
3619 	if (sc == NULL)
3620 		return (EINVAL);
3621 
3622 	switch(cmd) {
3623 	case DPT_SIGNATURE:
3624 #ifdef ASR_IOCTL_COMPAT
3625 #if (dsDescription_size != 50)
3626 	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3627 #endif
3628 		if (cmd & 0xFFFF0000) {
3629 			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3630 			return (0);
3631 		}
3632 	/* Traditional version of the ioctl interface */
3633 	case DPT_SIGNATURE & 0x0000FFFF:
3634 #endif
3635 		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3636 				sizeof(dpt_sig_S)));
3637 
3638 	/* Traditional version of the ioctl interface */
3639 	case DPT_CTRLINFO & 0x0000FFFF:
3640 	case DPT_CTRLINFO: {
3641 		struct {
3642 			u_int16_t length;
3643 			u_int16_t drvrHBAnum;
3644 			u_int32_t baseAddr;
3645 			u_int16_t blinkState;
3646 			u_int8_t  pciBusNum;
3647 			u_int8_t  pciDeviceNum;
3648 			u_int16_t hbaFlags;
3649 			u_int16_t Interrupt;
3650 			u_int32_t reserved1;
3651 			u_int32_t reserved2;
3652 			u_int32_t reserved3;
3653 		} CtlrInfo;
3654 
3655 		bzero(&CtlrInfo, sizeof(CtlrInfo));
3656 		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3657 		CtlrInfo.drvrHBAnum = asr_unit(dev);
3658 		CtlrInfo.baseAddr = sc->ha_Base;
3659 		i = ASR_getBlinkLedCode (sc);
3660 		if (i == -1)
3661 			i = 0;
3662 
3663 		CtlrInfo.blinkState = i;
3664 		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3665 		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3666 #define	FLG_OSD_PCI_VALID 0x0001
3667 #define	FLG_OSD_DMA	  0x0002
3668 #define	FLG_OSD_I2O	  0x0004
3669 		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3670 		CtlrInfo.Interrupt = sc->ha_irq;
3671 #ifdef ASR_IOCTL_COMPAT
3672 		if (cmd & 0xffff0000)
3673 			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3674 		else
3675 #endif /* ASR_IOCTL_COMPAT */
3676 		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3677 	}	return (error);
3678 
3679 	/* Traditional version of the ioctl interface */
3680 	case DPT_SYSINFO & 0x0000FFFF:
3681 	case DPT_SYSINFO: {
3682 		sysInfo_S	Info;
3683 #ifdef ASR_IOCTL_COMPAT
3684 		char	      * cp;
3685 		/* Kernel Specific ptok `hack' */
3686 #define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3687 
3688 		bzero(&Info, sizeof(Info));
3689 
3690 		/* Appears I am the only person in the Kernel doing this */
3691 		outb (0x70, 0x12);
3692 		i = inb(0x71);
3693 		j = i >> 4;
3694 		if (i == 0x0f) {
3695 			outb (0x70, 0x19);
3696 			j = inb (0x71);
3697 		}
3698 		Info.drive0CMOS = j;
3699 
3700 		j = i & 0x0f;
3701 		if (i == 0x0f) {
3702 			outb (0x70, 0x1a);
3703 			j = inb (0x71);
3704 		}
3705 		Info.drive1CMOS = j;
3706 
3707 		Info.numDrives = *((char *)ptok(0x475));
3708 #else /* ASR_IOCTL_COMPAT */
3709 		bzero(&Info, sizeof(Info));
3710 #endif /* ASR_IOCTL_COMPAT */
3711 
3712 		Info.processorFamily = ASR_sig.dsProcessorFamily;
3713 #if defined(__i386__)
3714 		switch (cpu) {
3715 		case CPU_386SX: case CPU_386:
3716 			Info.processorType = PROC_386; break;
3717 		case CPU_486SX: case CPU_486:
3718 			Info.processorType = PROC_486; break;
3719 		case CPU_586:
3720 			Info.processorType = PROC_PENTIUM; break;
3721 		case CPU_686:
3722 			Info.processorType = PROC_SEXIUM; break;
3723 		}
3724 #endif
3725 
3726 		Info.osType = OS_BSDI_UNIX;
3727 		Info.osMajorVersion = osrelease[0] - '0';
3728 		Info.osMinorVersion = osrelease[2] - '0';
3729 		/* Info.osRevision = 0; */
3730 		/* Info.osSubRevision = 0; */
3731 		Info.busType = SI_PCI_BUS;
3732 		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3733 
3734 #ifdef ASR_IOCTL_COMPAT
3735 		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3736 		/* Go Out And Look For I2O SmartROM */
3737 		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3738 			int k;
3739 
3740 			cp = ptok(j);
3741 			if (*((unsigned short *)cp) != 0xAA55) {
3742 				continue;
3743 			}
3744 			j += (cp[2] * 512) - 2048;
3745 			if ((*((u_long *)(cp + 6))
3746 			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3747 			 || (*((u_long *)(cp + 10))
3748 			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3749 				continue;
3750 			}
3751 			cp += 0x24;
3752 			for (k = 0; k < 64; ++k) {
3753 				if (*((unsigned short *)cp)
3754 				 == (' ' + ('v' * 256))) {
3755 					break;
3756 				}
3757 			}
3758 			if (k < 64) {
3759 				Info.smartROMMajorVersion
3760 				    = *((unsigned char *)(cp += 4)) - '0';
3761 				Info.smartROMMinorVersion
3762 				    = *((unsigned char *)(cp += 2));
3763 				Info.smartROMRevision
3764 				    = *((unsigned char *)(++cp));
3765 				Info.flags |= SI_SmartROMverValid;
3766 				Info.flags &= ~SI_NO_SmartROM;
3767 				break;
3768 			}
3769 		}
3770 		/* Get The Conventional Memory Size From CMOS */
3771 		outb (0x70, 0x16);
3772 		j = inb (0x71);
3773 		j <<= 8;
3774 		outb (0x70, 0x15);
3775 		j |= inb(0x71);
3776 		Info.conventionalMemSize = j;
3777 
3778 		/* Get The Extended Memory Found At Power On From CMOS */
3779 		outb (0x70, 0x31);
3780 		j = inb (0x71);
3781 		j <<= 8;
3782 		outb (0x70, 0x30);
3783 		j |= inb(0x71);
3784 		Info.extendedMemSize = j;
3785 		Info.flags |= SI_MemorySizeValid;
3786 
3787 		/* Copy Out The Info Structure To The User */
3788 		if (cmd & 0xFFFF0000)
3789 			bcopy(&Info, data, sizeof(Info));
3790 		else
3791 #endif /* ASR_IOCTL_COMPAT */
3792 		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3793 		return (error); }
3794 
3795 		/* Get The BlinkLED State */
3796 	case DPT_BLINKLED:
3797 		i = ASR_getBlinkLedCode (sc);
3798 		if (i == -1)
3799 			i = 0;
3800 #ifdef ASR_IOCTL_COMPAT
3801 		if (cmd & 0xffff0000)
3802 			bcopy(&i, data, sizeof(i));
3803 		else
3804 #endif /* ASR_IOCTL_COMPAT */
3805 		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3806 		break;
3807 
3808 		/* Send an I2O command */
3809 	case I2OUSRCMD:
3810 		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3811 
3812 		/* Reset and re-initialize the adapter */
3813 	case I2ORESETCMD:
3814 		return (ASR_reset(sc));
3815 
3816 		/* Rescan the LCT table and resynchronize the information */
3817 	case I2ORESCANCMD:
3818 		return (ASR_rescan(sc));
3819 	}
3820 	return (EINVAL);
3821 } /* asr_ioctl */
3822