xref: /dragonfly/sys/dev/raid/asr/asr.c (revision cec957e9)
1 /*-
2  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3  * Copyright (c) 2000-2001 Adaptec Corporation
4  * All rights reserved.
5  *
6  * TERMS AND CONDITIONS OF USE
7  *
8  * Redistribution and use in source form, with or without modification, are
9  * permitted provided that redistributions of source code must retain the
10  * above copyright notice, this list of conditions and the following disclaimer.
11  *
12  * This software is provided `as is' by Adaptec and any express or implied
13  * warranties, including, but not limited to, the implied warranties of
14  * merchantability and fitness for a particular purpose, are disclaimed. In no
15  * event shall Adaptec be liable for any direct, indirect, incidental, special,
16  * exemplary or consequential damages (including, but not limited to,
17  * procurement of substitute goods or services; loss of use, data, or profits;
18  * or business interruptions) however caused and on any theory of liability,
19  * whether in contract, strict liability, or tort (including negligence or
20  * otherwise) arising in any way out of the use of this driver software, even
21  * if advised of the possibility of such damage.
22  *
23  * SCSI I2O host adapter driver
24  *
25  *	V1.10 2004/05/05 scottl@freebsd.org
26  *		- Massive cleanup of the driver to remove dead code and
27  *		  non-conformant style.
28  *		- Removed most i386-specific code to make it more portable.
29  *		- Converted to the bus_space API.
30  *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31  *		- The 2000S and 2005S do not initialize on some machines,
32  *		  increased timeout to 255ms from 50ms for the StatusGet
33  *		  command.
34  *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35  *		- I knew this one was too good to be true. The error return
36  *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37  *		  to the bit masked status.
38  *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39  *		- The 2005S that was supported is affectionately called the
40  *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41  *		  16MB low-cost configuration, Firmware was forced to go
42  *		  to a Split BAR Firmware. This requires a separate IOP and
43  *		  Messaging base address.
44  *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45  *		- Handle support for 2005S Zero Channel RAID solution.
46  *		- System locked up if the Adapter locked up. Do not try
47  *		  to send other commands if the resetIOP command fails. The
48  *		  fail outstanding command discovery loop was flawed as the
49  *		  removal of the command from the list prevented discovering
50  *		  all the commands.
51  *		- Comment changes to clarify driver.
52  *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53  *		- We do not use the AC_FOUND_DEV event because of I2O.
54  *		  Removed asr_async.
55  *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56  *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57  *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58  *		  mode as this is confused with competitor adapters in run
59  *		  mode.
60  *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61  *		  to prevent operating system panic.
62  *		- moved default major number to 154 from 97.
63  *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64  *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65  *		  series that is visible, it's more of an internal code name.
66  *		  remove any visible references within reason for now.
67  *		- bus_ptr->LUN was not correctly zeroed when initially
68  *		  allocated causing a possible panic of the operating system
69  *		  during boot.
70  *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71  *		- Code always fails for ASR_getTid affecting performance.
72  *		- initiated a set of changes that resulted from a formal
73  *		  code inspection by Mark_Salyzyn@adaptec.com,
74  *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75  *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76  *		  Their findings were focussed on the LCT & TID handler, and
77  *		  all resulting changes were to improve code readability,
78  *		  consistency or have a positive effect on performance.
79  *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80  *		- Passthrough returned an incorrect error.
81  *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82  *		  on command completion.
83  *		- generate control device nodes using make_dev and delete_dev.
84  *		- Performance affected by TID caching reallocing.
85  *		- Made suggested changes by Justin_Gibbs@adaptec.com
86  *			- use splcam instead of splbio.
87  *			- use cam_imask instead of bio_imask.
88  *			- use u_int8_t instead of u_char.
89  *			- use u_int16_t instead of u_short.
90  *			- use u_int32_t instead of u_long where appropriate.
91  *			- use 64 bit context handler instead of 32 bit.
92  *			- create_ccb should only allocate the worst case
93  *			  requirements for the driver since CAM may evolve
94  *			  making union ccb much larger than needed here.
95  *			  renamed create_ccb to asr_alloc_ccb.
96  *			- go nutz justifying all debug prints as macros
97  *			  defined at the top and remove unsightly ifdefs.
98  *			- INLINE STATIC viewed as confusing. Historically
99  *			  utilized to affect code performance and debug
100  *			  issues in OS, Compiler or OEM specific situations.
101  *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102  *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103  *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104  *			changed variable name xs to ccb
105  *			changed struct scsi_link to struct cam_path
106  *			changed struct scsibus_data to struct cam_sim
107  *			stopped using fordriver for holding on to the TID
108  *			use proprietary packet creation instead of scsi_inquire
109  *			CAM layer sends synchronize commands.
110  *
111  * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
112  */
113 
114 #include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
115 #include <sys/kernel.h>
116 #include <sys/module.h>
117 #include <sys/systm.h>
118 #include <sys/malloc.h>
119 #include <sys/conf.h>
120 #include <sys/priv.h>
121 #include <sys/proc.h>
122 #include <sys/bus.h>
123 #include <sys/rman.h>
124 #include <sys/stat.h>
125 #include <sys/device.h>
126 #include <sys/thread2.h>
127 #include <sys/bus_dma.h>
128 
129 #include <bus/cam/cam.h>
130 #include <bus/cam/cam_ccb.h>
131 #include <bus/cam/cam_sim.h>
132 #include <bus/cam/cam_xpt_sim.h>
133 #include <bus/cam/cam_xpt_periph.h>
134 
135 #include <bus/cam/scsi/scsi_all.h>
136 #include <bus/cam/scsi/scsi_message.h>
137 
138 #include <vm/vm.h>
139 #include <vm/pmap.h>
140 
141 #include <machine/vmparam.h>
142 
143 #include <bus/pci/pcivar.h>
144 #include <bus/pci/pcireg.h>
145 
146 #define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
147 #define	KVTOPHYS(x) vtophys(x)
148 #include	<dev/raid/asr/dptalign.h>
149 #include	<dev/raid/asr/i2oexec.h>
150 #include	<dev/raid/asr/i2obscsi.h>
151 #include	<dev/raid/asr/i2odpt.h>
152 #include	<dev/raid/asr/i2oadptr.h>
153 
154 #include	<dev/raid/asr/sys_info.h>
155 
156 #define	ASR_VERSION	1
157 #define	ASR_REVISION	'1'
158 #define	ASR_SUBREVISION '0'
159 #define	ASR_MONTH	5
160 #define	ASR_DAY		5
161 #define	ASR_YEAR	(2004 - 1980)
162 
163 /*
164  *	Debug macros to reduce the unsightly ifdefs
165  */
166 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
167 static __inline void
168 debug_asr_message(PI2O_MESSAGE_FRAME message)
169 {
170 	u_int32_t * pointer = (u_int32_t *)message;
171 	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
172 	u_int32_t   counter = 0;
173 
174 	while (length--) {
175 		kprintf("%08lx%c", (u_long)*(pointer++),
176 		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
177 	}
178 }
179 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
180 
181 #ifdef DEBUG_ASR
182   /* Breaks on none STDC based compilers :-( */
183 #define debug_asr_printf(fmt,args...)	kprintf(fmt, ##args)
184 #define debug_asr_dump_message(message)	debug_asr_message(message)
185 #define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
186 #else /* DEBUG_ASR */
187 #define debug_asr_printf(fmt,args...)
188 #define debug_asr_dump_message(message)
189 #define debug_asr_print_path(ccb)
190 #endif /* DEBUG_ASR */
191 
192 /*
193  *	If DEBUG_ASR_CMD is defined:
194  *		0 - Display incoming SCSI commands
195  *		1 - add in a quick character before queueing.
196  *		2 - add in outgoing message frames.
197  */
198 #if (defined(DEBUG_ASR_CMD))
199 #define debug_asr_cmd_printf(fmt,args...)     kprintf(fmt,##args)
200 static __inline void
201 debug_asr_dump_ccb(union ccb *ccb)
202 {
203 	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
204 	int		len = ccb->csio.cdb_len;
205 
206 	while (len) {
207 		debug_asr_cmd_printf (" %02x", *(cp++));
208 		--len;
209 	}
210 }
211 #if (DEBUG_ASR_CMD > 0)
212 #define debug_asr_cmd1_printf		       debug_asr_cmd_printf
213 #else
214 #define debug_asr_cmd1_printf(fmt,args...)
215 #endif
216 #if (DEBUG_ASR_CMD > 1)
217 #define debug_asr_cmd2_printf			debug_asr_cmd_printf
218 #define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
219 #else
220 #define debug_asr_cmd2_printf(fmt,args...)
221 #define debug_asr_cmd2_dump_message(message)
222 #endif
223 #else /* DEBUG_ASR_CMD */
224 #define debug_asr_cmd_printf(fmt,args...)
225 #define debug_asr_dump_ccb(ccb)
226 #define debug_asr_cmd1_printf(fmt,args...)
227 #define debug_asr_cmd2_printf(fmt,args...)
228 #define debug_asr_cmd2_dump_message(message)
229 #endif /* DEBUG_ASR_CMD */
230 
231 #if (defined(DEBUG_ASR_USR_CMD))
232 #define debug_usr_cmd_printf(fmt,args...)   kprintf(fmt,##args)
233 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
234 #else /* DEBUG_ASR_USR_CMD */
235 #define debug_usr_cmd_printf(fmt,args...)
236 #define debug_usr_cmd_dump_message(message)
237 #endif /* DEBUG_ASR_USR_CMD */
238 
239 #ifdef ASR_IOCTL_COMPAT
240 #define	dsDescription_size 46	/* Snug as a bug in a rug */
241 #endif /* ASR_IOCTL_COMPAT */
242 
243 #include "dev/raid/asr/dptsig.h"
244 
245 static dpt_sig_S ASR_sig = {
246 	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
247 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
248 	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
249 	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
250 	ASR_MONTH, ASR_DAY, ASR_YEAR,
251 /*	 01234567890123456789012345678901234567890123456789	< 50 chars */
252 	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
253 	/*		 ^^^^^ asr_attach alters these to match OS */
254 };
255 
256 /* Configuration Definitions */
257 
258 #define	SG_SIZE		 58	/* Scatter Gather list Size		 */
259 #define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
260 #define	MAX_LUN		 255	/* Maximum LUN Supported		 */
261 #define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
262 #define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
263 #define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
264 #define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
265 #define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
266 				/* Also serves as the minimum map for	 */
267 				/* the 2005S zero channel RAID product	 */
268 
269 /* I2O register set */
270 #define	I2O_REG_STATUS		0x30
271 #define	I2O_REG_MASK		0x34
272 #define	I2O_REG_TOFIFO		0x40
273 #define	I2O_REG_FROMFIFO	0x44
274 
275 #define	Mask_InterruptsDisabled	0x08
276 
277 /*
278  * A MIX of performance and space considerations for TID lookups
279  */
280 typedef u_int16_t tid_t;
281 
282 typedef struct {
283 	u_int32_t size;		/* up to MAX_LUN    */
284 	tid_t	  TID[1];
285 } lun2tid_t;
286 
287 typedef struct {
288 	u_int32_t   size;	/* up to MAX_TARGET */
289 	lun2tid_t * LUN[1];
290 } target2lun_t;
291 
292 /*
293  *	To ensure that we only allocate and use the worst case ccb here, lets
294  *	make our own local ccb union. If asr_alloc_ccb is utilized for another
295  *	ccb type, ensure that you add the additional structures into our local
296  *	ccb union. To ensure strict type checking, we will utilize the local
297  *	ccb definition wherever possible.
298  */
299 union asr_ccb {
300 	struct ccb_hdr	    ccb_h;  /* For convenience */
301 	struct ccb_scsiio   csio;
302 	struct ccb_setasync csa;
303 };
304 
305 struct Asr_status_mem {
306 	I2O_EXEC_STATUS_GET_REPLY	status;
307 	U32				rstatus;
308 };
309 
310 /**************************************************************************
311 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
312 **  Is Configured Into The System.  The Structure Supplies Configuration **
313 **  Information, Status Info, Queue Info And An Active CCB List Pointer. **
314 ***************************************************************************/
315 
316 typedef struct Asr_softc {
317 	device_t		ha_dev;
318 	u_int16_t		ha_irq;
319 	u_long			ha_Base;       /* base port for each board */
320 	bus_size_t		ha_blinkLED;
321 	bus_space_handle_t	ha_i2o_bhandle;
322 	bus_space_tag_t		ha_i2o_btag;
323 	bus_space_handle_t	ha_frame_bhandle;
324 	bus_space_tag_t		ha_frame_btag;
325 	I2O_IOP_ENTRY		ha_SystemTable;
326 	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
327 
328 	bus_dma_tag_t		ha_parent_dmat;
329 	bus_dma_tag_t		ha_statusmem_dmat;
330 	bus_dmamap_t		ha_statusmem_dmamap;
331 	struct Asr_status_mem * ha_statusmem;
332 	u_int32_t		ha_rstatus_phys;
333 	u_int32_t		ha_status_phys;
334 	struct cam_path	      * ha_path[MAX_CHANNEL+1];
335 	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
336 	struct resource	      * ha_mem_res;
337 	struct resource	      * ha_mes_res;
338 	struct resource	      * ha_irq_res;
339 	void		      * ha_intr;
340 	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
341 #define le_type	  IdentityTag[0]
342 #define I2O_BSA	    0x20
343 #define I2O_FCA	    0x40
344 #define I2O_SCSI    0x00
345 #define I2O_PORT    0x80
346 #define I2O_UNKNOWN 0x7F
347 #define le_bus	  IdentityTag[1]
348 #define le_target IdentityTag[2]
349 #define le_lun	  IdentityTag[3]
350 	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
351 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
352 	u_long			ha_Msgs_Phys;
353 
354 	u_int8_t		ha_in_reset;
355 #define HA_OPERATIONAL	    0
356 #define HA_IN_RESET	    1
357 #define HA_OFF_LINE	    2
358 #define HA_OFF_LINE_RECOVERY 3
359 	/* Configuration information */
360 	/* The target id maximums we take */
361 	u_int8_t		ha_MaxBus;     /* Maximum bus */
362 	u_int8_t		ha_MaxId;      /* Maximum target ID */
363 	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
364 	u_int8_t		ha_SgSize;     /* Max SG elements */
365 	u_int8_t		ha_pciBusNum;
366 	u_int8_t		ha_pciDeviceNum;
367 	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
368 	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
369 	u_int16_t		ha_Msgs_Count;
370 
371 	/* Links into other parents and HBAs */
372 	struct Asr_softc      * ha_next;       /* HBA list */
373 	struct cdev *ha_devt;
374 } Asr_softc_t;
375 
376 static Asr_softc_t *Asr_softc_list;
377 
378 /*
379  *	Prototypes of the routines we have in this object.
380  */
381 
382 /* I2O HDM interface */
383 static int	asr_probe(device_t dev);
384 static int	asr_attach(device_t dev);
385 
386 static d_ioctl_t asr_ioctl;
387 static d_open_t asr_open;
388 static d_close_t asr_close;
389 static int	asr_intr(Asr_softc_t *sc);
390 static void	asr_timeout(void *arg);
391 static int	ASR_init(Asr_softc_t *sc);
392 static int	ASR_acquireLct(Asr_softc_t *sc);
393 static int	ASR_acquireHrt(Asr_softc_t *sc);
394 static void	asr_action(struct cam_sim *sim, union ccb *ccb);
395 static void	asr_poll(struct cam_sim *sim);
396 static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
397 
398 /*
399  *	Here is the auto-probe structure used to nest our tests appropriately
400  *	during the startup phase of the operating system.
401  */
402 static device_method_t asr_methods[] = {
403 	DEVMETHOD(device_probe,	 asr_probe),
404 	DEVMETHOD(device_attach, asr_attach),
405 	DEVMETHOD_END
406 };
407 
408 static driver_t asr_driver = {
409 	"asr",
410 	asr_methods,
411 	sizeof(Asr_softc_t)
412 };
413 
414 static devclass_t asr_devclass;
415 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, NULL, NULL);
416 MODULE_VERSION(asr, 1);
417 MODULE_DEPEND(asr, pci, 1, 1, 1);
418 MODULE_DEPEND(asr, cam, 1, 1, 1);
419 
420 /*
421  * devsw for asr hba driver
422  *
423  * only ioctl is used. the sd driver provides all other access.
424  */
425 static struct dev_ops asr_ops = {
426 	{ "asr", 0, 0 },
427 	.d_open =	asr_open,
428 	.d_close =	asr_close,
429 	.d_ioctl =	asr_ioctl,
430 };
431 
432 /* I2O support routines */
433 
434 static __inline u_int32_t
435 asr_get_FromFIFO(Asr_softc_t *sc)
436 {
437 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
438 				 I2O_REG_FROMFIFO));
439 }
440 
441 static __inline u_int32_t
442 asr_get_ToFIFO(Asr_softc_t *sc)
443 {
444 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
445 				 I2O_REG_TOFIFO));
446 }
447 
448 static __inline u_int32_t
449 asr_get_intr(Asr_softc_t *sc)
450 {
451 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
452 				 I2O_REG_MASK));
453 }
454 
455 static __inline u_int32_t
456 asr_get_status(Asr_softc_t *sc)
457 {
458 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
459 				 I2O_REG_STATUS));
460 }
461 
462 static __inline void
463 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
464 {
465 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
466 			  val);
467 }
468 
469 static __inline void
470 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
471 {
472 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
473 			  val);
474 }
475 
476 static __inline void
477 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
478 {
479 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
480 			  val);
481 }
482 
483 static __inline void
484 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
485 {
486 	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
487 				 offset, (u_int32_t *)frame, len);
488 }
489 
490 /*
491  *	Fill message with default.
492  */
493 static PI2O_MESSAGE_FRAME
494 ASR_fillMessage(void *Message, u_int16_t size)
495 {
496 	PI2O_MESSAGE_FRAME Message_Ptr;
497 
498 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
499 	bzero(Message_Ptr, size);
500 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
501 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
502 	  (size + sizeof(U32) - 1) >> 2);
503 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
504 	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
505 	return (Message_Ptr);
506 } /* ASR_fillMessage */
507 
508 #define	EMPTY_QUEUE (0xffffffff)
509 
510 static __inline U32
511 ASR_getMessage(Asr_softc_t *sc)
512 {
513 	U32	MessageOffset;
514 
515 	MessageOffset = asr_get_ToFIFO(sc);
516 	if (MessageOffset == EMPTY_QUEUE)
517 		MessageOffset = asr_get_ToFIFO(sc);
518 
519 	return (MessageOffset);
520 } /* ASR_getMessage */
521 
522 /* Issue a polled command */
523 static U32
524 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
525 {
526 	U32	Mask = 0xffffffff;
527 	U32	MessageOffset;
528 	u_int	Delay = 1500;
529 
530 	/*
531 	 * ASR_initiateCp is only used for synchronous commands and will
532 	 * be made more resiliant to adapter delays since commands like
533 	 * resetIOP can cause the adapter to be deaf for a little time.
534 	 */
535 	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
536 	 && (--Delay != 0)) {
537 		DELAY (10000);
538 	}
539 	if (MessageOffset != EMPTY_QUEUE) {
540 		asr_set_frame(sc, Message, MessageOffset,
541 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
542 		/*
543 		 *	Disable the Interrupts
544 		 */
545 		Mask = asr_get_intr(sc);
546 		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
547 		asr_set_ToFIFO(sc, MessageOffset);
548 	}
549 	return (Mask);
550 } /* ASR_initiateCp */
551 
552 /*
553  *	Reset the adapter.
554  */
555 static U32
556 ASR_resetIOP(Asr_softc_t *sc)
557 {
558 	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
559 	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
560 	U32			       * Reply_Ptr;
561 	U32				 Old;
562 
563 	/*
564 	 *  Build up our copy of the Message.
565 	 */
566 	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
567 	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
568 	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
569 	/*
570 	 *  Reset the Reply Status
571 	 */
572 	Reply_Ptr = &sc->ha_statusmem->rstatus;
573 	*Reply_Ptr = 0;
574 	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
575 	    sc->ha_rstatus_phys);
576 	/*
577 	 *	Send the Message out
578 	 */
579 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
580 	     0xffffffff) {
581 		/*
582 		 * Wait for a response (Poll), timeouts are dangerous if
583 		 * the card is truly responsive. We assume response in 2s.
584 		 */
585 		u_int8_t Delay = 200;
586 
587 		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
588 			DELAY (10000);
589 		}
590 		/*
591 		 *	Re-enable the interrupts.
592 		 */
593 		asr_set_intr(sc, Old);
594 		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
595 		return(*Reply_Ptr);
596 	}
597 	KASSERT(Old != 0xffffffff, ("Old == -1"));
598 	return (0);
599 } /* ASR_resetIOP */
600 
601 /*
602  *	Get the curent state of the adapter
603  */
604 static PI2O_EXEC_STATUS_GET_REPLY
605 ASR_getStatus(Asr_softc_t *sc)
606 {
607 	I2O_EXEC_STATUS_GET_MESSAGE	Message;
608 	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
609 	PI2O_EXEC_STATUS_GET_REPLY	buffer;
610 	U32				Old;
611 
612 	/*
613 	 *  Build up our copy of the Message.
614 	 */
615 	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
616 	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
617 	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
618 	    I2O_EXEC_STATUS_GET);
619 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
620 	    sc->ha_status_phys);
621 	/* This one is a Byte Count */
622 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
623 	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
624 	/*
625 	 *  Reset the Reply Status
626 	 */
627 	buffer = &sc->ha_statusmem->status;
628 	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
629 	/*
630 	 *	Send the Message out
631 	 */
632 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
633 	    0xffffffff) {
634 		/*
635 		 *	Wait for a response (Poll), timeouts are dangerous if
636 		 * the card is truly responsive. We assume response in 50ms.
637 		 */
638 		u_int8_t Delay = 255;
639 
640 		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
641 			if (--Delay == 0) {
642 				buffer = NULL;
643 				break;
644 			}
645 			DELAY (1000);
646 		}
647 		/*
648 		 *	Re-enable the interrupts.
649 		 */
650 		asr_set_intr(sc, Old);
651 		return (buffer);
652 	}
653 	return (NULL);
654 } /* ASR_getStatus */
655 
656 /*
657  *	Check if the device is a SCSI I2O HBA, and add it to the list.
658  */
659 
660 /*
661  * Probe for ASR controller.  If we find it, we will use it.
662  * virtual adapters.
663  */
664 static int
665 asr_probe(device_t dev)
666 {
667 	u_int32_t id;
668 
669 	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
670 	if ((id == 0xA5011044) || (id == 0xA5111044)) {
671 		device_set_desc(dev, "Adaptec Caching SCSI RAID");
672 		return (BUS_PROBE_DEFAULT);
673 	}
674 	return (ENXIO);
675 } /* asr_probe */
676 
677 static __inline union asr_ccb *
678 asr_alloc_ccb(Asr_softc_t *sc)
679 {
680 	union asr_ccb *new_ccb;
681 
682 	if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb),
683 	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
684 		new_ccb->ccb_h.pinfo.priority = 1;
685 		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
686 		new_ccb->ccb_h.spriv_ptr0 = sc;
687 	}
688 	return (new_ccb);
689 } /* asr_alloc_ccb */
690 
691 static __inline void
692 asr_free_ccb(union asr_ccb *free_ccb)
693 {
694 	kfree(free_ccb, M_DEVBUF);
695 } /* asr_free_ccb */
696 
697 /*
698  *	Print inquiry data `carefully'
699  */
700 static void
701 ASR_prstring(u_int8_t *s, int len)
702 {
703 	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
704 		kprintf ("%c", *(s++));
705 	}
706 } /* ASR_prstring */
707 
708 /*
709  *	Send a message synchronously and without Interrupt to a ccb.
710  */
711 static int
712 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
713 {
714 	U32		Mask;
715 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
716 
717 	/*
718 	 * We do not need any (optional byteswapping) method access to
719 	 * the Initiator context field.
720 	 */
721 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
722 
723 	/* Prevent interrupt service */
724 	crit_enter();
725 	Mask = asr_get_intr(sc);
726 	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
727 
728 	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
729 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
730 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
731 	}
732 
733 	/*
734 	 * Wait for this board to report a finished instruction.
735 	 */
736 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
737 		(void)asr_intr (sc);
738 	}
739 
740 	/* Re-enable Interrupts */
741 	asr_set_intr(sc, Mask);
742 	crit_exit();
743 
744 	return (ccb->ccb_h.status);
745 } /* ASR_queue_s */
746 
747 /*
748  *	Send a message synchronously to an Asr_softc_t.
749  */
750 static int
751 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
752 {
753 	union asr_ccb	*ccb;
754 	int		status;
755 
756 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
757 		return (CAM_REQUEUE_REQ);
758 	}
759 
760 	status = ASR_queue_s (ccb, Message);
761 
762 	asr_free_ccb(ccb);
763 
764 	return (status);
765 } /* ASR_queue_c */
766 
767 /*
768  *	Add the specified ccb to the active queue
769  */
770 static __inline void
771 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
772 {
773 	crit_enter();
774 	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
775 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
776 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
777 			/*
778 			 * RAID systems can take considerable time to
779 			 * complete some commands given the large cache
780 			 * flashes switching from write back to write thru.
781 			 */
782 			ccb->ccb_h.timeout = 6 * 60 * 1000;
783 		}
784 		callout_reset(ccb->ccb_h.timeout_ch,
785 			      (ccb->ccb_h.timeout * hz) / 1000,
786 			      asr_timeout, ccb);
787 	}
788 	crit_exit();
789 } /* ASR_ccbAdd */
790 
791 /*
792  *	Remove the specified ccb from the active queue.
793  */
794 static __inline void
795 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
796 {
797 	crit_enter();
798 	callout_stop(ccb->ccb_h.timeout_ch);
799 	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
800 	crit_exit();
801 } /* ASR_ccbRemove */
802 
803 /*
804  *	Fail all the active commands, so they get re-issued by the operating
805  *	system.
806  */
807 static void
808 ASR_failActiveCommands(Asr_softc_t *sc)
809 {
810 	struct ccb_hdr	*ccb;
811 
812 	crit_enter();
813 	/*
814 	 *	We do not need to inform the CAM layer that we had a bus
815 	 * reset since we manage it on our own, this also prevents the
816 	 * SCSI_DELAY settling that would be required on other systems.
817 	 * The `SCSI_DELAY' has already been handled by the card via the
818 	 * acquisition of the LCT table while we are at CAM priority level.
819 	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
820 	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
821 	 *  }
822 	 */
823 	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
824 		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
825 
826 		ccb->status &= ~CAM_STATUS_MASK;
827 		ccb->status |= CAM_REQUEUE_REQ;
828 		/* Nothing Transfered */
829 		((struct ccb_scsiio *)ccb)->resid
830 		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
831 
832 		if (ccb->path) {
833 			xpt_done ((union ccb *)ccb);
834 		} else {
835 			wakeup (ccb);
836 		}
837 	}
838 	crit_exit();
839 } /* ASR_failActiveCommands */
840 
841 /*
842  *	The following command causes the HBA to reset the specific bus
843  */
844 static void
845 ASR_resetBus(Asr_softc_t *sc, int bus)
846 {
847 	I2O_HBA_BUS_RESET_MESSAGE	Message;
848 	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
849 	PI2O_LCT_ENTRY			Device;
850 
851 	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
852 	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
853 	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
854 	  I2O_HBA_BUS_RESET);
855 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
856 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
857 	  ++Device) {
858 		if (((Device->le_type & I2O_PORT) != 0)
859 		 && (Device->le_bus == bus)) {
860 			I2O_MESSAGE_FRAME_setTargetAddress(
861 			  &Message_Ptr->StdMessageFrame,
862 			  I2O_LCT_ENTRY_getLocalTID(Device));
863 			/* Asynchronous command, with no expectations */
864 			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
865 			break;
866 		}
867 	}
868 } /* ASR_resetBus */
869 
870 static __inline int
871 ASR_getBlinkLedCode(Asr_softc_t *sc)
872 {
873 	U8	blink;
874 
875 	if (sc == NULL)
876 		return (0);
877 
878 	blink = bus_space_read_1(sc->ha_frame_btag,
879 				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
880 	if (blink != 0xBC)
881 		return (0);
882 
883 	blink = bus_space_read_1(sc->ha_frame_btag,
884 				 sc->ha_frame_bhandle, sc->ha_blinkLED);
885 	return (blink);
886 } /* ASR_getBlinkCode */
887 
888 /*
889  *	Determine the address of an TID lookup. Must be done at high priority
890  *	since the address can be changed by other threads of execution.
891  *
892  *	Returns NULL pointer if not indexible (but will attempt to generate
893  *	an index if `new_entry' flag is set to TRUE).
894  *
895  *	All addressible entries are to be guaranteed zero if never initialized.
896  */
897 static tid_t *
898 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
899 {
900 	target2lun_t	*bus_ptr;
901 	lun2tid_t	*target_ptr;
902 	unsigned	new_size;
903 
904 	/*
905 	 *	Validity checking of incoming parameters. More of a bound
906 	 * expansion limit than an issue with the code dealing with the
907 	 * values.
908 	 *
909 	 *	sc must be valid before it gets here, so that check could be
910 	 * dropped if speed a critical issue.
911 	 */
912 	if ((sc == NULL)
913 	 || (bus > MAX_CHANNEL)
914 	 || (target > sc->ha_MaxId)
915 	 || (lun > sc->ha_MaxLun)) {
916 		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
917 		  (u_long)sc, bus, target, lun);
918 		return (NULL);
919 	}
920 	/*
921 	 *	See if there is an associated bus list.
922 	 *
923 	 *	for performance, allocate in size of BUS_CHUNK chunks.
924 	 *	BUS_CHUNK must be a power of two. This is to reduce
925 	 *	fragmentation effects on the allocations.
926 	 */
927 #define BUS_CHUNK 8
928 	new_size = roundup2(target, BUS_CHUNK);
929 	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
930 		/*
931 		 *	Allocate a new structure?
932 		 *		Since one element in structure, the +1
933 		 *		needed for size has been abstracted.
934 		 */
935 		if ((new_entry == FALSE)
936 		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
937 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
938 		    M_TEMP, M_WAITOK | M_ZERO))
939 		   == NULL)) {
940 			debug_asr_printf("failed to allocate bus list\n");
941 			return (NULL);
942 		}
943 		bus_ptr->size = new_size + 1;
944 	} else if (bus_ptr->size <= new_size) {
945 		target2lun_t * new_bus_ptr;
946 
947 		/*
948 		 *	Reallocate a new structure?
949 		 *		Since one element in structure, the +1
950 		 *		needed for size has been abstracted.
951 		 */
952 		if ((new_entry == FALSE)
953 		 || ((new_bus_ptr = (target2lun_t *)kmalloc (
954 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
955 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
956 			debug_asr_printf("failed to reallocate bus list\n");
957 			return (NULL);
958 		}
959 		/*
960 		 *	Copy the whole thing, safer, simpler coding
961 		 * and not really performance critical at this point.
962 		 */
963 		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
964 		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
965 		sc->ha_targets[bus] = new_bus_ptr;
966 		kfree(bus_ptr, M_TEMP);
967 		bus_ptr = new_bus_ptr;
968 		bus_ptr->size = new_size + 1;
969 	}
970 	/*
971 	 *	We now have the bus list, lets get to the target list.
972 	 *	Since most systems have only *one* lun, we do not allocate
973 	 *	in chunks as above, here we allow one, then in chunk sizes.
974 	 *	TARGET_CHUNK must be a power of two. This is to reduce
975 	 *	fragmentation effects on the allocations.
976 	 */
977 #define TARGET_CHUNK 8
978 	if ((new_size = lun) != 0) {
979 		new_size = roundup2(lun, TARGET_CHUNK);
980 	}
981 	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
982 		/*
983 		 *	Allocate a new structure?
984 		 *		Since one element in structure, the +1
985 		 *		needed for size has been abstracted.
986 		 */
987 		if ((new_entry == FALSE)
988 		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
989 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
990 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
991 			debug_asr_printf("failed to allocate target list\n");
992 			return (NULL);
993 		}
994 		target_ptr->size = new_size + 1;
995 	} else if (target_ptr->size <= new_size) {
996 		lun2tid_t * new_target_ptr;
997 
998 		/*
999 		 *	Reallocate a new structure?
1000 		 *		Since one element in structure, the +1
1001 		 *		needed for size has been abstracted.
1002 		 */
1003 		if ((new_entry == FALSE)
1004 		 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1005 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1006 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1007 			debug_asr_printf("failed to reallocate target list\n");
1008 			return (NULL);
1009 		}
1010 		/*
1011 		 *	Copy the whole thing, safer, simpler coding
1012 		 * and not really performance critical at this point.
1013 		 */
1014 		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1015 		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1016 		bus_ptr->LUN[target] = new_target_ptr;
1017 		kfree(target_ptr, M_TEMP);
1018 		target_ptr = new_target_ptr;
1019 		target_ptr->size = new_size + 1;
1020 	}
1021 	/*
1022 	 *	Now, acquire the TID address from the LUN indexed list.
1023 	 */
1024 	return (&(target_ptr->TID[lun]));
1025 } /* ASR_getTidAddress */
1026 
1027 /*
1028  *	Get a pre-existing TID relationship.
1029  *
1030  *	If the TID was never set, return (tid_t)-1.
1031  *
1032  *	should use mutex rather than spl.
1033  */
1034 static __inline tid_t
1035 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1036 {
1037 	tid_t	*tid_ptr;
1038 	tid_t	retval;
1039 
1040 	crit_enter();
1041 	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1042 	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1043 	 || (*tid_ptr == (tid_t)0)) {
1044 		crit_exit();
1045 		return ((tid_t)-1);
1046 	}
1047 	retval = *tid_ptr;
1048 	crit_exit();
1049 	return (retval);
1050 } /* ASR_getTid */
1051 
1052 /*
1053  *	Set a TID relationship.
1054  *
1055  *	If the TID was not set, return (tid_t)-1.
1056  *
1057  *	should use mutex rather than spl.
1058  */
1059 static __inline tid_t
1060 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1061 {
1062 	tid_t	*tid_ptr;
1063 
1064 	if (TID != (tid_t)-1) {
1065 		if (TID == 0) {
1066 			return ((tid_t)-1);
1067 		}
1068 		crit_enter();
1069 		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1070 		 == NULL) {
1071 			crit_exit();
1072 			return ((tid_t)-1);
1073 		}
1074 		*tid_ptr = TID;
1075 		crit_exit();
1076 	}
1077 	return (TID);
1078 } /* ASR_setTid */
1079 
1080 /*-------------------------------------------------------------------------*/
1081 /*		      Function ASR_rescan				   */
1082 /*-------------------------------------------------------------------------*/
1083 /* The Parameters Passed To This Function Are :				   */
1084 /*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1085 /*									   */
1086 /* This Function Will rescan the adapter and resynchronize any data	   */
1087 /*									   */
1088 /* Return : 0 For OK, Error Code Otherwise				   */
1089 /*-------------------------------------------------------------------------*/
1090 
1091 static int
1092 ASR_rescan(Asr_softc_t *sc)
1093 {
1094 	int bus;
1095 	int error;
1096 
1097 	/*
1098 	 * Re-acquire the LCT table and synchronize us to the adapter.
1099 	 */
1100 	if ((error = ASR_acquireLct(sc)) == 0) {
1101 		error = ASR_acquireHrt(sc);
1102 	}
1103 
1104 	if (error != 0) {
1105 		return error;
1106 	}
1107 
1108 	bus = sc->ha_MaxBus;
1109 	/* Reset all existing cached TID lookups */
1110 	do {
1111 		int target, event = 0;
1112 
1113 		/*
1114 		 *	Scan for all targets on this bus to see if they
1115 		 * got affected by the rescan.
1116 		 */
1117 		for (target = 0; target <= sc->ha_MaxId; ++target) {
1118 			int lun;
1119 
1120 			/* Stay away from the controller ID */
1121 			if (target == sc->ha_adapter_target[bus]) {
1122 				continue;
1123 			}
1124 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1125 				PI2O_LCT_ENTRY Device;
1126 				tid_t	       TID = (tid_t)-1;
1127 				tid_t	       LastTID;
1128 
1129 				/*
1130 				 * See if the cached TID changed. Search for
1131 				 * the device in our new LCT.
1132 				 */
1133 				for (Device = sc->ha_LCT->LCTEntry;
1134 				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1135 				   + I2O_LCT_getTableSize(sc->ha_LCT));
1136 				  ++Device) {
1137 					if ((Device->le_type != I2O_UNKNOWN)
1138 					 && (Device->le_bus == bus)
1139 					 && (Device->le_target == target)
1140 					 && (Device->le_lun == lun)
1141 					 && (I2O_LCT_ENTRY_getUserTID(Device)
1142 					  == 0xFFF)) {
1143 						TID = I2O_LCT_ENTRY_getLocalTID(
1144 						  Device);
1145 						break;
1146 					}
1147 				}
1148 				/*
1149 				 * Indicate to the OS that the label needs
1150 				 * to be recalculated, or that the specific
1151 				 * open device is no longer valid (Merde)
1152 				 * because the cached TID changed.
1153 				 */
1154 				LastTID = ASR_getTid (sc, bus, target, lun);
1155 				if (LastTID != TID) {
1156 					struct cam_path * path;
1157 
1158 					if (xpt_create_path(&path,
1159 					  /*periph*/NULL,
1160 					  cam_sim_path(sc->ha_sim[bus]),
1161 					  target, lun) != CAM_REQ_CMP) {
1162 						if (TID == (tid_t)-1) {
1163 							event |= AC_LOST_DEVICE;
1164 						} else {
1165 							event |= AC_INQ_CHANGED
1166 							       | AC_GETDEV_CHANGED;
1167 						}
1168 					} else {
1169 						if (TID == (tid_t)-1) {
1170 							xpt_async(
1171 							  AC_LOST_DEVICE,
1172 							  path, NULL);
1173 						} else if (LastTID == (tid_t)-1) {
1174 							struct ccb_getdev *ccb;
1175 
1176 							ccb = &xpt_alloc_ccb()->cgd;
1177 
1178 							xpt_setup_ccb(
1179 							  &ccb->ccb_h,
1180 							  path, /*priority*/5);
1181 							xpt_async(
1182 							  AC_FOUND_DEVICE,
1183 							  path,
1184 							  ccb);
1185 							xpt_free_ccb(&ccb->ccb_h);
1186 						} else {
1187 							xpt_async(
1188 							  AC_INQ_CHANGED,
1189 							  path, NULL);
1190 							xpt_async(
1191 							  AC_GETDEV_CHANGED,
1192 							  path, NULL);
1193 						}
1194 					}
1195 				}
1196 				/*
1197 				 *	We have the option of clearing the
1198 				 * cached TID for it to be rescanned, or to
1199 				 * set it now even if the device never got
1200 				 * accessed. We chose the later since we
1201 				 * currently do not use the condition that
1202 				 * the TID ever got cached.
1203 				 */
1204 				ASR_setTid (sc, bus, target, lun, TID);
1205 			}
1206 		}
1207 		/*
1208 		 *	The xpt layer can not handle multiple events at the
1209 		 * same call.
1210 		 */
1211 		if (event & AC_LOST_DEVICE) {
1212 			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1213 		}
1214 		if (event & AC_INQ_CHANGED) {
1215 			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1216 		}
1217 		if (event & AC_GETDEV_CHANGED) {
1218 			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1219 		}
1220 	} while (--bus >= 0);
1221 	return (error);
1222 } /* ASR_rescan */
1223 
1224 /*-------------------------------------------------------------------------*/
1225 /*		      Function ASR_reset				   */
1226 /*-------------------------------------------------------------------------*/
1227 /* The Parameters Passed To This Function Are :				   */
1228 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1229 /*									   */
1230 /* This Function Will reset the adapter and resynchronize any data	   */
1231 /*									   */
1232 /* Return : None							   */
1233 /*-------------------------------------------------------------------------*/
1234 
1235 static int
1236 ASR_reset(Asr_softc_t *sc)
1237 {
1238 	int retVal;
1239 
1240 	crit_enter();
1241 	if ((sc->ha_in_reset == HA_IN_RESET)
1242 	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1243 		crit_exit();
1244 		return (EBUSY);
1245 	}
1246 	/*
1247 	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1248 	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1249 	 */
1250 	++(sc->ha_in_reset);
1251 	if (ASR_resetIOP(sc) == 0) {
1252 		debug_asr_printf ("ASR_resetIOP failed\n");
1253 		/*
1254 		 *	We really need to take this card off-line, easier said
1255 		 * than make sense. Better to keep retrying for now since if a
1256 		 * UART cable is connected the blinkLEDs the adapter is now in
1257 		 * a hard state requiring action from the monitor commands to
1258 		 * the HBA to continue. For debugging waiting forever is a
1259 		 * good thing. In a production system, however, one may wish
1260 		 * to instead take the card off-line ...
1261 		 */
1262 		/* Wait Forever */
1263 		while (ASR_resetIOP(sc) == 0);
1264 	}
1265 	retVal = ASR_init (sc);
1266 	crit_exit();
1267 	if (retVal != 0) {
1268 		debug_asr_printf ("ASR_init failed\n");
1269 		sc->ha_in_reset = HA_OFF_LINE;
1270 		return (ENXIO);
1271 	}
1272 	if (ASR_rescan (sc) != 0) {
1273 		debug_asr_printf ("ASR_rescan failed\n");
1274 	}
1275 	ASR_failActiveCommands (sc);
1276 	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1277 		kprintf ("asr%d: Brining adapter back on-line\n",
1278 		  sc->ha_path[0]
1279 		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1280 		    : 0);
1281 	}
1282 	sc->ha_in_reset = HA_OPERATIONAL;
1283 	return (0);
1284 } /* ASR_reset */
1285 
1286 /*
1287  *	Device timeout handler.
1288  */
1289 static void
1290 asr_timeout(void *arg)
1291 {
1292 	union asr_ccb	*ccb = (union asr_ccb *)arg;
1293 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1294 	int		s;
1295 
1296 	debug_asr_print_path(ccb);
1297 	debug_asr_printf("timed out");
1298 
1299 	/*
1300 	 *	Check if the adapter has locked up?
1301 	 */
1302 	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1303 		/* Reset Adapter */
1304 		kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1305 		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1306 		if (ASR_reset (sc) == ENXIO) {
1307 			/* Try again later */
1308 			callout_reset(ccb->ccb_h.timeout_ch,
1309 				      (ccb->ccb_h.timeout * hz) / 1000,
1310 				      asr_timeout, ccb);
1311 		}
1312 		return;
1313 	}
1314 	/*
1315 	 *	Abort does not function on the ASR card!!! Walking away from
1316 	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1317 	 * our best bet, followed by a complete adapter reset if that fails.
1318 	 */
1319 	crit_enter();
1320 	/* Check if we already timed out once to raise the issue */
1321 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1322 		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1323 		if (ASR_reset (sc) == ENXIO) {
1324 			callout_reset(ccb->ccb_h.timeout_ch,
1325 				      (ccb->ccb_h.timeout * hz) / 1000,
1326 				      asr_timeout, ccb);
1327 		}
1328 		crit_exit();
1329 		return;
1330 	}
1331 	debug_asr_printf ("\nresetting bus\n");
1332 	/* If the BUS reset does not take, then an adapter reset is next! */
1333 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1334 	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1335 	callout_reset(ccb->ccb_h.timeout_ch,
1336 		      (ccb->ccb_h.timeout * hz) / 1000,
1337 		      asr_timeout, ccb);
1338 	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1339 	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1340 	crit_exit();
1341 } /* asr_timeout */
1342 
1343 /*
1344  * send a message asynchronously
1345  */
1346 static int
1347 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1348 {
1349 	U32		MessageOffset;
1350 	union asr_ccb	*ccb;
1351 
1352 	debug_asr_printf("Host Command Dump:\n");
1353 	debug_asr_dump_message(Message);
1354 
1355 	ccb = (union asr_ccb *)(long)
1356 	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1357 
1358 	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1359 		asr_set_frame(sc, Message, MessageOffset,
1360 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1361 		if (ccb) {
1362 			ASR_ccbAdd (sc, ccb);
1363 		}
1364 		/* Post the command */
1365 		asr_set_ToFIFO(sc, MessageOffset);
1366 	} else {
1367 		if (ASR_getBlinkLedCode(sc)) {
1368 			/*
1369 			 *	Unlikely we can do anything if we can't grab a
1370 			 * message frame :-(, but lets give it a try.
1371 			 */
1372 			(void)ASR_reset(sc);
1373 		}
1374 	}
1375 	return (MessageOffset);
1376 } /* ASR_queue */
1377 
1378 
1379 /* Simple Scatter Gather elements */
1380 #define	SG(SGL,Index,Flags,Buffer,Size)				   \
1381 	I2O_FLAGS_COUNT_setCount(				   \
1382 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1383 	  Size);						   \
1384 	I2O_FLAGS_COUNT_setFlags(				   \
1385 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1386 	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1387 	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1388 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1389 	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1390 
1391 /*
1392  *	Retrieve Parameter Group.
1393  */
1394 static void *
1395 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1396 	      unsigned BufferSize)
1397 {
1398 	struct paramGetMessage {
1399 		I2O_UTIL_PARAMS_GET_MESSAGE M;
1400 		char
1401 		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1402 		struct Operations {
1403 			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1404 			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1405 		}			     O;
1406 	}				Message;
1407 	struct Operations		*Operations_Ptr;
1408 	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1409 	struct ParamBuffer {
1410 		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1411 		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1412 		char				    Info[1];
1413 	}				*Buffer_Ptr;
1414 
1415 	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1416 	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1417 	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1418 	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1419 	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1420 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1421 	bzero(Operations_Ptr, sizeof(struct Operations));
1422 	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1423 	  &(Operations_Ptr->Header), 1);
1424 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1425 	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1426 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1427 	  &(Operations_Ptr->Template[0]), 0xFFFF);
1428 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1429 	  &(Operations_Ptr->Template[0]), Group);
1430 	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1431 	bzero(Buffer_Ptr, BufferSize);
1432 
1433 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1434 	  I2O_VERSION_11
1435 	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1436 	    / sizeof(U32)) << 4));
1437 	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1438 	  TID);
1439 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1440 	  I2O_UTIL_PARAMS_GET);
1441 	/*
1442 	 *  Set up the buffers as scatter gather elements.
1443 	 */
1444 	SG(&(Message_Ptr->SGL), 0,
1445 	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1446 	  Operations_Ptr, sizeof(struct Operations));
1447 	SG(&(Message_Ptr->SGL), 1,
1448 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1449 	  Buffer_Ptr, BufferSize);
1450 
1451 	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1452 	 && (Buffer_Ptr->Header.ResultCount)) {
1453 		return ((void *)(Buffer_Ptr->Info));
1454 	}
1455 	return (NULL);
1456 } /* ASR_getParams */
1457 
1458 /*
1459  *	Acquire the LCT information.
1460  */
1461 static int
1462 ASR_acquireLct(Asr_softc_t *sc)
1463 {
1464 	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1465 	PI2O_SGE_SIMPLE_ELEMENT		sg;
1466 	int				MessageSizeInBytes;
1467 	caddr_t				v;
1468 	int				len;
1469 	I2O_LCT				Table, *TableP = &Table;
1470 	PI2O_LCT_ENTRY			Entry;
1471 
1472 	/*
1473 	 *	sc value assumed valid
1474 	 */
1475 	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1476 	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1477 	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1478 	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1479 		return (ENOMEM);
1480 	}
1481 	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1482 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1483 	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1484 	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1485 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1486 	    I2O_EXEC_LCT_NOTIFY);
1487 	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1488 	    I2O_CLASS_MATCH_ANYCLASS);
1489 	/*
1490 	 *	Call the LCT table to determine the number of device entries
1491 	 * to reserve space for.
1492 	 */
1493 	SG(&(Message_Ptr->SGL), 0,
1494 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, TableP,
1495 	  sizeof(I2O_LCT));
1496 	/*
1497 	 *	since this code is reused in several systems, code efficiency
1498 	 * is greater by using a shift operation rather than a divide by
1499 	 * sizeof(u_int32_t).
1500 	 */
1501 	I2O_LCT_setTableSize(&Table,
1502 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1503 	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1504 	/*
1505 	 *	Determine the size of the LCT table.
1506 	 */
1507 	if (sc->ha_LCT) {
1508 		kfree(sc->ha_LCT, M_TEMP);
1509 	}
1510 	/*
1511 	 *	malloc only generates contiguous memory when less than a
1512 	 * page is expected. We must break the request up into an SG list ...
1513 	 */
1514 	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1515 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1516 	 || (len > (128 * 1024))) {	/* Arbitrary */
1517 		kfree(Message_Ptr, M_TEMP);
1518 		return (EINVAL);
1519 	}
1520 	if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1521 		kfree(Message_Ptr, M_TEMP);
1522 		return (ENOMEM);
1523 	}
1524 	/*
1525 	 *	since this code is reused in several systems, code efficiency
1526 	 * is greater by using a shift operation rather than a divide by
1527 	 * sizeof(u_int32_t).
1528 	 */
1529 	I2O_LCT_setTableSize(sc->ha_LCT,
1530 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1531 	/*
1532 	 *	Convert the access to the LCT table into a SG list.
1533 	 */
1534 	sg = Message_Ptr->SGL.u.Simple;
1535 	v = (caddr_t)(sc->ha_LCT);
1536 	for (;;) {
1537 		int next, base, span;
1538 
1539 		span = 0;
1540 		next = base = KVTOPHYS(v);
1541 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1542 
1543 		/* How far can we go contiguously */
1544 		while ((len > 0) && (base == next)) {
1545 			int size;
1546 
1547 			next = trunc_page(base) + PAGE_SIZE;
1548 			size = next - base;
1549 			if (size > len) {
1550 				size = len;
1551 			}
1552 			span += size;
1553 			v += size;
1554 			len -= size;
1555 			base = KVTOPHYS(v);
1556 		}
1557 
1558 		/* Construct the Flags */
1559 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1560 		{
1561 			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1562 			if (len <= 0) {
1563 				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1564 				    | I2O_SGL_FLAGS_LAST_ELEMENT
1565 				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1566 			}
1567 			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1568 		}
1569 
1570 		if (len <= 0) {
1571 			break;
1572 		}
1573 
1574 		/*
1575 		 * Incrementing requires resizing of the packet.
1576 		 */
1577 		++sg;
1578 		MessageSizeInBytes += sizeof(*sg);
1579 		I2O_MESSAGE_FRAME_setMessageSize(
1580 		  &(Message_Ptr->StdMessageFrame),
1581 		  I2O_MESSAGE_FRAME_getMessageSize(
1582 		    &(Message_Ptr->StdMessageFrame))
1583 		  + (sizeof(*sg) / sizeof(U32)));
1584 		{
1585 			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1586 
1587 			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1588 			    kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1589 			    == NULL) {
1590 				kfree(sc->ha_LCT, M_TEMP);
1591 				sc->ha_LCT = NULL;
1592 				kfree(Message_Ptr, M_TEMP);
1593 				return (ENOMEM);
1594 			}
1595 			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1596 			bcopy(Message_Ptr, NewMessage_Ptr, span);
1597 			kfree(Message_Ptr, M_TEMP);
1598 			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1599 			  (((caddr_t)NewMessage_Ptr) + span);
1600 			Message_Ptr = NewMessage_Ptr;
1601 		}
1602 	}
1603 	{	int retval;
1604 
1605 		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1606 		kfree(Message_Ptr, M_TEMP);
1607 		if (retval != CAM_REQ_CMP) {
1608 			return (ENODEV);
1609 		}
1610 	}
1611 	/* If the LCT table grew, lets truncate accesses */
1612 	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1613 		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1614 	}
1615 	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1616 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1617 	  ++Entry) {
1618 		Entry->le_type = I2O_UNKNOWN;
1619 		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1620 
1621 		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1622 			Entry->le_type = I2O_BSA;
1623 			break;
1624 
1625 		case I2O_CLASS_SCSI_PERIPHERAL:
1626 			Entry->le_type = I2O_SCSI;
1627 			break;
1628 
1629 		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1630 			Entry->le_type = I2O_FCA;
1631 			break;
1632 
1633 		case I2O_CLASS_BUS_ADAPTER_PORT:
1634 			Entry->le_type = I2O_PORT | I2O_SCSI;
1635 			/* FALLTHRU */
1636 		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1637 			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1638 			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1639 				Entry->le_type = I2O_PORT | I2O_FCA;
1640 			}
1641 		{	struct ControllerInfo {
1642 				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1643 				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1644 				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1645 			} Buffer;
1646 			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1647 
1648 			Entry->le_bus = 0xff;
1649 			Entry->le_target = 0xff;
1650 			Entry->le_lun = 0xff;
1651 
1652 			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1653 			  ASR_getParams(sc,
1654 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1655 			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1656 			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1657 				continue;
1658 			}
1659 			Entry->le_target
1660 			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1661 			    Info);
1662 			Entry->le_lun = 0;
1663 		}	/* FALLTHRU */
1664 		default:
1665 			continue;
1666 		}
1667 		{	struct DeviceInfo {
1668 				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1669 				I2O_PARAM_READ_OPERATION_RESULT Read;
1670 				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1671 			} Buffer;
1672 			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1673 
1674 			Entry->le_bus = 0xff;
1675 			Entry->le_target = 0xff;
1676 			Entry->le_lun = 0xff;
1677 
1678 			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1679 			  ASR_getParams(sc,
1680 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1681 			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1682 			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1683 				continue;
1684 			}
1685 			Entry->le_type
1686 			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1687 			Entry->le_bus
1688 			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1689 			if ((Entry->le_bus > sc->ha_MaxBus)
1690 			 && (Entry->le_bus <= MAX_CHANNEL)) {
1691 				sc->ha_MaxBus = Entry->le_bus;
1692 			}
1693 			Entry->le_target
1694 			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1695 			Entry->le_lun
1696 			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1697 		}
1698 	}
1699 	/*
1700 	 *	A zero return value indicates success.
1701 	 */
1702 	return (0);
1703 } /* ASR_acquireLct */
1704 
1705 /*
1706  * Initialize a message frame.
1707  * We assume that the CDB has already been set up, so all we do here is
1708  * generate the Scatter Gather list.
1709  */
1710 static PI2O_MESSAGE_FRAME
1711 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1712 {
1713 	PI2O_MESSAGE_FRAME	Message_Ptr;
1714 	PI2O_SGE_SIMPLE_ELEMENT sg;
1715 	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1716 	vm_size_t		size, len;
1717 	caddr_t			v;
1718 	U32			MessageSize;
1719 	int			next, span, base, rw;
1720 	int			target = ccb->ccb_h.target_id;
1721 	int			lun = ccb->ccb_h.target_lun;
1722 	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1723 	tid_t			TID;
1724 
1725 	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1726 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1727 	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1728 	      sizeof(I2O_SG_ELEMENT)));
1729 
1730 	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1731 		PI2O_LCT_ENTRY Device;
1732 
1733 		TID = 0;
1734 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1735 		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1736 		    ++Device) {
1737 			if ((Device->le_type != I2O_UNKNOWN)
1738 			 && (Device->le_bus == bus)
1739 			 && (Device->le_target == target)
1740 			 && (Device->le_lun == lun)
1741 			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1742 				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1743 				ASR_setTid(sc, Device->le_bus,
1744 					   Device->le_target, Device->le_lun,
1745 					   TID);
1746 				break;
1747 			}
1748 		}
1749 	}
1750 	if (TID == (tid_t)0) {
1751 		return (NULL);
1752 	}
1753 	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1754 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1755 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1756 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1757 	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1758 		/ sizeof(U32)) << 4));
1759 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1760 	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1761 	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1762 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1763 	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1764 	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1765 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1766 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1767 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1768 	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1769 	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1770 	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1771 	/*
1772 	 * We do not need any (optional byteswapping) method access to
1773 	 * the Initiator & Transaction context field.
1774 	 */
1775 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1776 
1777 	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1778 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1779 	/*
1780 	 * copy the cdb over
1781 	 */
1782 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1783 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1784 	bcopy(&(ccb->csio.cdb_io),
1785 	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1786 	    ccb->csio.cdb_len);
1787 
1788 	/*
1789 	 * Given a buffer describing a transfer, set up a scatter/gather map
1790 	 * in a ccb to map that SCSI transfer.
1791 	 */
1792 
1793 	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1794 
1795 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1796 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1797 	  (ccb->csio.dxfer_len)
1798 	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1799 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1800 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1801 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1802 		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1803 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1804 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1805 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1806 	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1807 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1808 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1809 
1810 	/*
1811 	 * Given a transfer described by a `data', fill in the SG list.
1812 	 */
1813 	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1814 
1815 	len = ccb->csio.dxfer_len;
1816 	v = ccb->csio.data_ptr;
1817 	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1818 	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1819 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1820 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1821 	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1822 	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1823 		span = 0;
1824 		next = base = KVTOPHYS(v);
1825 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1826 
1827 		/* How far can we go contiguously */
1828 		while ((len > 0) && (base == next)) {
1829 			next = trunc_page(base) + PAGE_SIZE;
1830 			size = next - base;
1831 			if (size > len) {
1832 				size = len;
1833 			}
1834 			span += size;
1835 			v += size;
1836 			len -= size;
1837 			base = KVTOPHYS(v);
1838 		}
1839 
1840 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1841 		if (len == 0) {
1842 			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1843 		}
1844 		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1845 		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1846 		++sg;
1847 		MessageSize += sizeof(*sg) / sizeof(U32);
1848 	}
1849 	/* We always do the request sense ... */
1850 	if ((span = ccb->csio.sense_len) == 0) {
1851 		span = sizeof(ccb->csio.sense_data);
1852 	}
1853 	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1854 	  &(ccb->csio.sense_data), span);
1855 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1856 	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1857 	return (Message_Ptr);
1858 } /* ASR_init_message */
1859 
1860 /*
1861  *	Reset the adapter.
1862  */
1863 static U32
1864 ASR_initOutBound(Asr_softc_t *sc)
1865 {
1866 	struct initOutBoundMessage {
1867 		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1868 		U32			       R;
1869 	}				Message;
1870 	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1871 	U32				*volatile Reply_Ptr;
1872 	U32				Old;
1873 
1874 	/*
1875 	 *  Build up our copy of the Message.
1876 	 */
1877 	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1878 	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1879 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1880 	  I2O_EXEC_OUTBOUND_INIT);
1881 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1882 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1883 	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1884 	/*
1885 	 *  Reset the Reply Status
1886 	 */
1887 	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1888 	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1889 	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1890 	  sizeof(U32));
1891 	/*
1892 	 *	Send the Message out
1893 	 */
1894 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1895 	    0xffffffff) {
1896 		u_long size, addr;
1897 
1898 		/*
1899 		 *	Wait for a response (Poll).
1900 		 */
1901 		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1902 		/*
1903 		 *	Re-enable the interrupts.
1904 		 */
1905 		asr_set_intr(sc, Old);
1906 		/*
1907 		 *	Populate the outbound table.
1908 		 */
1909 		if (sc->ha_Msgs == NULL) {
1910 
1911 			/* Allocate the reply frames */
1912 			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1913 			  * sc->ha_Msgs_Count;
1914 
1915 			/*
1916 			 *	contigmalloc only works reliably at
1917 			 * initialization time.
1918 			 */
1919 			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1920 			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1921 			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1922 				bzero(sc->ha_Msgs, size);
1923 				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1924 			}
1925 		}
1926 
1927 		/* Initialize the outbound FIFO */
1928 		if (sc->ha_Msgs != NULL)
1929 			for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1930 			    size; --size) {
1931 				asr_set_FromFIFO(sc, addr);
1932 				addr +=
1933 				    sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1934 			}
1935 		return (*Reply_Ptr);
1936 	}
1937 	return (0);
1938 } /* ASR_initOutBound */
1939 
1940 /*
1941  *	Set the system table
1942  */
1943 static int
1944 ASR_setSysTab(Asr_softc_t *sc)
1945 {
1946 	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1947 	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1948 	Asr_softc_t		    * ha;
1949 	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1950 	int			      retVal;
1951 
1952 	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1953 	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1954 		return (ENOMEM);
1955 	}
1956 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1957 		++SystemTable->NumberEntries;
1958 	}
1959 	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1960 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1961 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1962 	  M_TEMP, M_WAITOK)) == NULL) {
1963 		kfree(SystemTable, M_TEMP);
1964 		return (ENOMEM);
1965 	}
1966 	(void)ASR_fillMessage((void *)Message_Ptr,
1967 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1968 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1969 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1970 	  (I2O_VERSION_11 +
1971 	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1972 			/ sizeof(U32)) << 4)));
1973 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1974 	  I2O_EXEC_SYS_TAB_SET);
1975 	/*
1976 	 *	Call the LCT table to determine the number of device entries
1977 	 * to reserve space for.
1978 	 *	since this code is reused in several systems, code efficiency
1979 	 * is greater by using a shift operation rather than a divide by
1980 	 * sizeof(u_int32_t).
1981 	 */
1982 	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1983 	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1984 	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1985 	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1986 	++sg;
1987 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1988 		SG(sg, 0,
1989 		  ((ha->ha_next)
1990 		    ? (I2O_SGL_FLAGS_DIR)
1991 		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1992 		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1993 		++sg;
1994 	}
1995 	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1996 	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1997 	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1998 	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1999 	kfree(Message_Ptr, M_TEMP);
2000 	kfree(SystemTable, M_TEMP);
2001 	return (retVal);
2002 } /* ASR_setSysTab */
2003 
2004 static int
2005 ASR_acquireHrt(Asr_softc_t *sc)
2006 {
2007 	I2O_EXEC_HRT_GET_MESSAGE	Message;
2008 	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2009 	struct {
2010 		I2O_HRT	      Header;
2011 		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2012 	}				Hrt, *HrtP = &Hrt;
2013 	u_int8_t			NumberOfEntries;
2014 	PI2O_HRT_ENTRY			Entry;
2015 
2016 	bzero(&Hrt, sizeof (Hrt));
2017 	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2018 	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2019 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2020 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2021 	  (I2O_VERSION_11
2022 	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2023 		   / sizeof(U32)) << 4)));
2024 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2025 	  I2O_EXEC_HRT_GET);
2026 
2027 	/*
2028 	 *  Set up the buffers as scatter gather elements.
2029 	 */
2030 	SG(&(Message_Ptr->SGL), 0,
2031 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2032 	  HrtP, sizeof(Hrt));
2033 	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2034 		return (ENODEV);
2035 	}
2036 	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2037 	  > (MAX_CHANNEL + 1)) {
2038 		NumberOfEntries = MAX_CHANNEL + 1;
2039 	}
2040 	for (Entry = Hrt.Header.HRTEntry;
2041 	  NumberOfEntries != 0;
2042 	  ++Entry, --NumberOfEntries) {
2043 		PI2O_LCT_ENTRY Device;
2044 
2045 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2046 		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2047 		  ++Device) {
2048 			if (I2O_LCT_ENTRY_getLocalTID(Device)
2049 			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2050 				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2051 				  Entry) >> 16;
2052 				if ((Device->le_bus > sc->ha_MaxBus)
2053 				 && (Device->le_bus <= MAX_CHANNEL)) {
2054 					sc->ha_MaxBus = Device->le_bus;
2055 				}
2056 			}
2057 		}
2058 	}
2059 	return (0);
2060 } /* ASR_acquireHrt */
2061 
2062 /*
2063  *	Enable the adapter.
2064  */
2065 static int
2066 ASR_enableSys(Asr_softc_t *sc)
2067 {
2068 	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2069 	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2070 
2071 	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2072 	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2073 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2074 	  I2O_EXEC_SYS_ENABLE);
2075 	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2076 } /* ASR_enableSys */
2077 
2078 /*
2079  *	Perform the stages necessary to initialize the adapter
2080  */
2081 static int
2082 ASR_init(Asr_softc_t *sc)
2083 {
2084 	return ((ASR_initOutBound(sc) == 0)
2085 	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2086 	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2087 } /* ASR_init */
2088 
2089 /*
2090  *	Send a Synchronize Cache command to the target device.
2091  */
2092 static void
2093 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2094 {
2095 	tid_t TID;
2096 
2097 	/*
2098 	 * We will not synchronize the device when there are outstanding
2099 	 * commands issued by the OS (this is due to a locked up device,
2100 	 * as the OS normally would flush all outstanding commands before
2101 	 * issuing a shutdown or an adapter reset).
2102 	 */
2103 	if ((sc != NULL)
2104 	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2105 	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2106 	 && (TID != (tid_t)0)) {
2107 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2108 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2109 
2110 		Message_Ptr = &Message;
2111 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2112 		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2113 
2114 		I2O_MESSAGE_FRAME_setVersionOffset(
2115 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2116 		  I2O_VERSION_11
2117 		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2118 		    - sizeof(I2O_SG_ELEMENT))
2119 			/ sizeof(U32)) << 4));
2120 		I2O_MESSAGE_FRAME_setMessageSize(
2121 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2122 		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2123 		  - sizeof(I2O_SG_ELEMENT))
2124 			/ sizeof(U32));
2125 		I2O_MESSAGE_FRAME_setInitiatorAddress (
2126 		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2127 		I2O_MESSAGE_FRAME_setFunction(
2128 		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2129 		I2O_MESSAGE_FRAME_setTargetAddress(
2130 		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2131 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2132 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2133 		  I2O_SCSI_SCB_EXEC);
2134 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2135 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2136 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2137 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2138 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2139 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2140 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2141 		  DPT_ORGANIZATION_ID);
2142 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2143 		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2144 		Message_Ptr->CDB[1] = (lun << 5);
2145 
2146 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2147 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2148 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2149 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2150 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2151 
2152 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2153 
2154 	}
2155 }
2156 
2157 static void
2158 ASR_synchronize(Asr_softc_t *sc)
2159 {
2160 	int bus, target, lun;
2161 
2162 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2163 		for (target = 0; target <= sc->ha_MaxId; ++target) {
2164 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2165 				ASR_sync(sc,bus,target,lun);
2166 			}
2167 		}
2168 	}
2169 }
2170 
2171 /*
2172  *	Reset the HBA, targets and BUS.
2173  *		Currently this resets *all* the SCSI busses.
2174  */
2175 static __inline void
2176 asr_hbareset(Asr_softc_t *sc)
2177 {
2178 	ASR_synchronize(sc);
2179 	(void)ASR_reset(sc);
2180 } /* asr_hbareset */
2181 
2182 /*
2183  *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2184  * limit and a reduction in error checking (in the pre 4.0 case).
2185  */
2186 static int
2187 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2188 {
2189 	int		rid;
2190 	u_int32_t	p, l, s;
2191 
2192 	/*
2193 	 * I2O specification says we must find first *memory* mapped BAR
2194 	 */
2195 	for (rid = 0; rid < 4; rid++) {
2196 		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2197 		if ((p & 1) == 0) {
2198 			break;
2199 		}
2200 	}
2201 	/*
2202 	 *	Give up?
2203 	 */
2204 	if (rid >= 4) {
2205 		rid = 0;
2206 	}
2207 	rid = PCIR_BAR(rid);
2208 	p = pci_read_config(dev, rid, sizeof(p));
2209 	pci_write_config(dev, rid, -1, sizeof(p));
2210 	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2211 	pci_write_config(dev, rid, p, sizeof(p));
2212 	if (l > MAX_MAP) {
2213 		l = MAX_MAP;
2214 	}
2215 	/*
2216 	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2217 	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2218 	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2219 	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2220 	 * accessible via BAR0, the messaging registers are accessible
2221 	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2222 	 */
2223 	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2224 	if (s != 0xA5111044) {
2225 		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2226 		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2227 		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2228 		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2229 			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2230 		}
2231 	}
2232 	p &= ~15;
2233 	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2234 	  p, p + l, l, RF_ACTIVE);
2235 	if (sc->ha_mem_res == NULL) {
2236 		return (0);
2237 	}
2238 	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2239 	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2240 	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2241 
2242 	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2243 		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2244 			return (0);
2245 		}
2246 		p = pci_read_config(dev, rid, sizeof(p));
2247 		pci_write_config(dev, rid, -1, sizeof(p));
2248 		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2249 		pci_write_config(dev, rid, p, sizeof(p));
2250 		if (l > MAX_MAP) {
2251 			l = MAX_MAP;
2252 		}
2253 		p &= ~15;
2254 		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2255 		  p, p + l, l, RF_ACTIVE);
2256 		if (sc->ha_mes_res == NULL) {
2257 			return (0);
2258 		}
2259 		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2260 		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2261 	} else {
2262 		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2263 		sc->ha_frame_btag = sc->ha_i2o_btag;
2264 	}
2265 	return (1);
2266 } /* asr_pci_map_mem */
2267 
2268 /*
2269  *	A simplified copy of the real pci_map_int with additional
2270  * registration requirements.
2271  */
2272 static int
2273 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2274 {
2275 	int rid = 0;
2276 
2277 	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2278 	  RF_ACTIVE | RF_SHAREABLE);
2279 	if (sc->ha_irq_res == NULL) {
2280 		return (0);
2281 	}
2282 	if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2283 	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2284 		return (0);
2285 	}
2286 	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2287 	return (1);
2288 } /* asr_pci_map_int */
2289 
2290 static void
2291 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2292 {
2293 	Asr_softc_t *sc;
2294 
2295 	if (error)
2296 		return;
2297 
2298 	sc = (Asr_softc_t *)arg;
2299 
2300 	/* XXX
2301 	 * The status word can be at a 64-bit address, but the existing
2302 	 * accessor macros simply cannot manipulate 64-bit addresses.
2303 	 */
2304 	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2305 	    offsetof(struct Asr_status_mem, status);
2306 	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2307 	    offsetof(struct Asr_status_mem, rstatus);
2308 }
2309 
2310 static int
2311 asr_alloc_dma(Asr_softc_t *sc)
2312 {
2313 	device_t dev;
2314 
2315 	dev = sc->ha_dev;
2316 
2317 	if (bus_dma_tag_create(NULL,			/* parent */
2318 			       1, 0,			/* algnmnt, boundary */
2319 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2320 			       BUS_SPACE_MAXADDR,	/* highaddr */
2321 			       NULL, NULL,		/* filter, filterarg */
2322 			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2323 			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2324 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2325 			       0,			/* flags */
2326 			       &sc->ha_parent_dmat)) {
2327 		device_printf(dev, "Cannot allocate parent DMA tag\n");
2328 		return (ENOMEM);
2329 	}
2330 
2331 	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2332 			       1, 0,			/* algnmnt, boundary */
2333 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2334 			       BUS_SPACE_MAXADDR,	/* highaddr */
2335 			       NULL, NULL,		/* filter, filterarg */
2336 			       sizeof(sc->ha_statusmem),/* maxsize */
2337 			       1,			/* nsegments */
2338 			       sizeof(sc->ha_statusmem),/* maxsegsize */
2339 			       0,			/* flags */
2340 			       &sc->ha_statusmem_dmat)) {
2341 		device_printf(dev, "Cannot allocate status DMA tag\n");
2342 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2343 		return (ENOMEM);
2344 	}
2345 
2346 	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2347 	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2348 		device_printf(dev, "Cannot allocate status memory\n");
2349 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2350 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2351 		return (ENOMEM);
2352 	}
2353 	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2354 	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2355 
2356 	return (0);
2357 }
2358 
2359 static void
2360 asr_release_dma(Asr_softc_t *sc)
2361 {
2362 
2363 	if (sc->ha_rstatus_phys != 0)
2364 		bus_dmamap_unload(sc->ha_statusmem_dmat,
2365 		    sc->ha_statusmem_dmamap);
2366 	if (sc->ha_statusmem != NULL)
2367 		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2368 		    sc->ha_statusmem_dmamap);
2369 	if (sc->ha_statusmem_dmat != NULL)
2370 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2371 	if (sc->ha_parent_dmat != NULL)
2372 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2373 }
2374 
2375 /*
2376  *	Attach the devices, and virtual devices to the driver list.
2377  */
2378 static int
2379 asr_attach(device_t dev)
2380 {
2381 	PI2O_EXEC_STATUS_GET_REPLY status;
2382 	PI2O_LCT_ENTRY		 Device;
2383 	Asr_softc_t		 *sc, **ha;
2384 	struct scsi_inquiry_data *iq;
2385 	int			 bus, size, unit;
2386 	int			 error;
2387 
2388 	sc = device_get_softc(dev);
2389 	unit = device_get_unit(dev);
2390 	sc->ha_dev = dev;
2391 
2392 	if (Asr_softc_list == NULL) {
2393 		/*
2394 		 *	Fixup the OS revision as saved in the dptsig for the
2395 		 *	engine (dptioctl.h) to pick up.
2396 		 */
2397 		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2398 	}
2399 	/*
2400 	 *	Initialize the software structure
2401 	 */
2402 	LIST_INIT(&(sc->ha_ccb));
2403 	/* Link us into the HA list */
2404 	for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next))
2405 		;
2406 	*(ha) = sc;
2407 
2408 	/*
2409 	 *	This is the real McCoy!
2410 	 */
2411 	if (!asr_pci_map_mem(dev, sc)) {
2412 		device_printf(dev, "could not map memory\n");
2413 		return(ENXIO);
2414 	}
2415 	/* Enable if not formerly enabled */
2416 	pci_write_config(dev, PCIR_COMMAND,
2417 	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2418 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2419 
2420 	sc->ha_pciBusNum = pci_get_bus(dev);
2421 	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2422 
2423 	if ((error = asr_alloc_dma(sc)) != 0)
2424 		return (error);
2425 
2426 	/* Check if the device is there? */
2427 	if (ASR_resetIOP(sc) == 0) {
2428 		device_printf(dev, "Cannot reset adapter\n");
2429 		asr_release_dma(sc);
2430 		return (EIO);
2431 	}
2432 	status = &sc->ha_statusmem->status;
2433 	if (ASR_getStatus(sc) == NULL) {
2434 		device_printf(dev, "could not initialize hardware\n");
2435 		asr_release_dma(sc);
2436 		return(ENODEV);
2437 	}
2438 	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2439 	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2440 	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2441 	sc->ha_SystemTable.IopState = status->IopState;
2442 	sc->ha_SystemTable.MessengerType = status->MessengerType;
2443 	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2444 	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2445 	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2446 
2447 	if (!asr_pci_map_int(dev, (void *)sc)) {
2448 		device_printf(dev, "could not map interrupt\n");
2449 		asr_release_dma(sc);
2450 		return(ENXIO);
2451 	}
2452 
2453 	/* Adjust the maximim inbound count */
2454 	if (((sc->ha_QueueSize =
2455 	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2456 	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2457 		sc->ha_QueueSize = MAX_INBOUND;
2458 	}
2459 
2460 	/* Adjust the maximum outbound count */
2461 	if (((sc->ha_Msgs_Count =
2462 	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2463 	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2464 		sc->ha_Msgs_Count = MAX_OUTBOUND;
2465 	}
2466 	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2467 		sc->ha_Msgs_Count = sc->ha_QueueSize;
2468 	}
2469 
2470 	/* Adjust the maximum SG size to adapter */
2471 	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2472 	    2)) > MAX_INBOUND_SIZE) {
2473 		size = MAX_INBOUND_SIZE;
2474 	}
2475 	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2476 	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2477 
2478 	/*
2479 	 *	Only do a bus/HBA reset on the first time through. On this
2480 	 * first time through, we do not send a flush to the devices.
2481 	 */
2482 	if (ASR_init(sc) == 0) {
2483 		struct BufferInfo {
2484 			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2485 			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2486 			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2487 		} Buffer;
2488 		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2489 #define FW_DEBUG_BLED_OFFSET 8
2490 
2491 		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2492 		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2493 		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2494 			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2495 			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2496 		}
2497 		if (ASR_acquireLct(sc) == 0) {
2498 			(void)ASR_acquireHrt(sc);
2499 		}
2500 	} else {
2501 		device_printf(dev, "failed to initialize\n");
2502 		asr_release_dma(sc);
2503 		return(ENXIO);
2504 	}
2505 	/*
2506 	 *	Add in additional probe responses for more channels. We
2507 	 * are reusing the variable `target' for a channel loop counter.
2508 	 * Done here because of we need both the acquireLct and
2509 	 * acquireHrt data.
2510 	 */
2511 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2512 	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2513 		if (Device->le_type == I2O_UNKNOWN) {
2514 			continue;
2515 		}
2516 		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2517 			if (Device->le_target > sc->ha_MaxId) {
2518 				sc->ha_MaxId = Device->le_target;
2519 			}
2520 			if (Device->le_lun > sc->ha_MaxLun) {
2521 				sc->ha_MaxLun = Device->le_lun;
2522 			}
2523 		}
2524 		if (((Device->le_type & I2O_PORT) != 0)
2525 		 && (Device->le_bus <= MAX_CHANNEL)) {
2526 			/* Do not increase MaxId for efficiency */
2527 			sc->ha_adapter_target[Device->le_bus] =
2528 			    Device->le_target;
2529 		}
2530 	}
2531 
2532 	/*
2533 	 *	Print the HBA model number as inquired from the card.
2534 	 */
2535 
2536 	device_printf(dev, " ");
2537 
2538 	if ((iq = (struct scsi_inquiry_data *)kmalloc(
2539 	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2540 	    NULL) {
2541 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2542 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2543 		int					posted = 0;
2544 
2545 		Message_Ptr = &Message;
2546 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2547 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2548 
2549 		I2O_MESSAGE_FRAME_setVersionOffset(
2550 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2551 		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2552 		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2553 		I2O_MESSAGE_FRAME_setMessageSize(
2554 		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2555 		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2556 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2557 		    sizeof(U32));
2558 		I2O_MESSAGE_FRAME_setInitiatorAddress(
2559 		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2560 		I2O_MESSAGE_FRAME_setFunction(
2561 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2562 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2563 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2564 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2565 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2566 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2567 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2568 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2569 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2570 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2571 		    DPT_ORGANIZATION_ID);
2572 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2573 		Message_Ptr->CDB[0] = INQUIRY;
2574 		Message_Ptr->CDB[4] =
2575 		    (unsigned char)sizeof(struct scsi_inquiry_data);
2576 		if (Message_Ptr->CDB[4] == 0) {
2577 			Message_Ptr->CDB[4] = 255;
2578 		}
2579 
2580 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2581 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2582 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2583 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2584 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2585 
2586 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2587 		  Message_Ptr, sizeof(struct scsi_inquiry_data));
2588 		SG(&(Message_Ptr->SGL), 0,
2589 		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2590 		  iq, sizeof(struct scsi_inquiry_data));
2591 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2592 
2593 		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2594 			kprintf (" ");
2595 			ASR_prstring (iq->vendor, 8);
2596 			++posted;
2597 		}
2598 		if (iq->product[0] && (iq->product[0] != ' ')) {
2599 			kprintf (" ");
2600 			ASR_prstring (iq->product, 16);
2601 			++posted;
2602 		}
2603 		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2604 			kprintf (" FW Rev. ");
2605 			ASR_prstring (iq->revision, 4);
2606 			++posted;
2607 		}
2608 		kfree(iq, M_TEMP);
2609 		if (posted) {
2610 			kprintf (",");
2611 		}
2612 	}
2613 	kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2614 	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2615 
2616 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2617 		struct cam_devq	  * devq;
2618 		int		    QueueSize = sc->ha_QueueSize;
2619 
2620 		if (QueueSize > MAX_INBOUND) {
2621 			QueueSize = MAX_INBOUND;
2622 		}
2623 
2624 		/*
2625 		 *	Create the device queue for our SIM(s).
2626 		 */
2627 		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2628 			continue;
2629 		}
2630 
2631 		/*
2632 		 *	Construct our first channel SIM entry
2633 		 */
2634 		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2635 						unit, &sim_mplock,
2636 						1, QueueSize, devq);
2637 		if (sc->ha_sim[bus] == NULL) {
2638 			continue;
2639 		}
2640 
2641 		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2642 			cam_sim_free(sc->ha_sim[bus]);
2643 			sc->ha_sim[bus] = NULL;
2644 			continue;
2645 		}
2646 
2647 		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2648 		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2649 		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2650 			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2651 			cam_sim_free(sc->ha_sim[bus]);
2652 			sc->ha_sim[bus] = NULL;
2653 			continue;
2654 		}
2655 	}
2656 
2657 	/*
2658 	 *	Generate the device node information
2659 	 */
2660 	sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2661 			       "asr%d", unit);
2662 	if (sc->ha_devt != NULL)
2663 		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2664 	sc->ha_devt->si_drv1 = sc;
2665 	return(0);
2666 } /* asr_attach */
2667 
2668 static void
2669 asr_poll(struct cam_sim *sim)
2670 {
2671 	asr_intr(cam_sim_softc(sim));
2672 } /* asr_poll */
2673 
2674 static void
2675 asr_action(struct cam_sim *sim, union ccb  *ccb)
2676 {
2677 	struct Asr_softc *sc;
2678 
2679 	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2680 			 ccb->ccb_h.func_code);
2681 
2682 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2683 
2684 	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2685 
2686 	switch (ccb->ccb_h.func_code) {
2687 
2688 	/* Common cases first */
2689 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2690 	{
2691 		struct Message {
2692 			char M[MAX_INBOUND_SIZE];
2693 		} Message;
2694 		PI2O_MESSAGE_FRAME   Message_Ptr;
2695 
2696 		/* Reject incoming commands while we are resetting the card */
2697 		if (sc->ha_in_reset != HA_OPERATIONAL) {
2698 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2699 			if (sc->ha_in_reset >= HA_OFF_LINE) {
2700 				/* HBA is now off-line */
2701 				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2702 			} else {
2703 				/* HBA currently resetting, try again later. */
2704 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2705 			}
2706 			debug_asr_cmd_printf (" e\n");
2707 			xpt_done(ccb);
2708 			debug_asr_cmd_printf (" q\n");
2709 			break;
2710 		}
2711 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2712 			kprintf(
2713 			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2714 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2715 			  ccb->csio.cdb_io.cdb_bytes[0],
2716 			  cam_sim_bus(sim),
2717 			  ccb->ccb_h.target_id,
2718 			  ccb->ccb_h.target_lun);
2719 		}
2720 		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2721 				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2722 				     ccb->ccb_h.target_lun);
2723 		debug_asr_dump_ccb(ccb);
2724 
2725 		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2726 		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2727 			debug_asr_cmd2_printf ("TID=%x:\n",
2728 			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2729 			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2730 			debug_asr_cmd2_dump_message(Message_Ptr);
2731 			debug_asr_cmd1_printf (" q");
2732 
2733 			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2734 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2735 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2736 				debug_asr_cmd_printf (" E\n");
2737 				xpt_done(ccb);
2738 			}
2739 			debug_asr_cmd_printf(" Q\n");
2740 			break;
2741 		}
2742 		/*
2743 		 *	We will get here if there is no valid TID for the device
2744 		 * referenced in the scsi command packet.
2745 		 */
2746 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2747 		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2748 		debug_asr_cmd_printf (" B\n");
2749 		xpt_done(ccb);
2750 		break;
2751 	}
2752 
2753 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2754 		/* Reset HBA device ... */
2755 		asr_hbareset (sc);
2756 		ccb->ccb_h.status = CAM_REQ_CMP;
2757 		xpt_done(ccb);
2758 		break;
2759 
2760 	case XPT_ABORT:			/* Abort the specified CCB */
2761 		/* XXX Implement */
2762 		ccb->ccb_h.status = CAM_REQ_INVALID;
2763 		xpt_done(ccb);
2764 		break;
2765 
2766 	case XPT_SET_TRAN_SETTINGS:
2767 		/* XXX Implement */
2768 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2769 		xpt_done(ccb);
2770 		break;
2771 
2772 	case XPT_GET_TRAN_SETTINGS:
2773 	/* Get default/user set transfer settings for the target */
2774 	{
2775 		struct	ccb_trans_settings *cts = &(ccb->cts);
2776 		struct ccb_trans_settings_scsi *scsi =
2777 		    &cts->proto_specific.scsi;
2778 		struct ccb_trans_settings_spi *spi =
2779 		    &cts->xport_specific.spi;
2780 
2781 		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2782 			cts->protocol = PROTO_SCSI;
2783 			cts->protocol_version = SCSI_REV_2;
2784 			cts->transport = XPORT_SPI;
2785 			cts->transport_version = 2;
2786 
2787 			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2788 			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2789 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2790 			spi->sync_period = 6; /* 40MHz */
2791 			spi->sync_offset = 15;
2792 			spi->valid = CTS_SPI_VALID_SYNC_RATE
2793 				   | CTS_SPI_VALID_SYNC_OFFSET
2794 				   | CTS_SPI_VALID_BUS_WIDTH
2795 				   | CTS_SPI_VALID_DISC;
2796 			scsi->valid = CTS_SCSI_VALID_TQ;
2797 
2798 			ccb->ccb_h.status = CAM_REQ_CMP;
2799 		} else {
2800 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2801 		}
2802 		xpt_done(ccb);
2803 		break;
2804 	}
2805 
2806 	case XPT_CALC_GEOMETRY:
2807 	{
2808 		struct	  ccb_calc_geometry *ccg;
2809 		u_int32_t size_mb;
2810 		u_int32_t secs_per_cylinder;
2811 
2812 		ccg = &(ccb->ccg);
2813 		size_mb = ccg->volume_size
2814 			/ ((1024L * 1024L) / ccg->block_size);
2815 
2816 		if (size_mb > 4096) {
2817 			ccg->heads = 255;
2818 			ccg->secs_per_track = 63;
2819 		} else if (size_mb > 2048) {
2820 			ccg->heads = 128;
2821 			ccg->secs_per_track = 63;
2822 		} else if (size_mb > 1024) {
2823 			ccg->heads = 65;
2824 			ccg->secs_per_track = 63;
2825 		} else {
2826 			ccg->heads = 64;
2827 			ccg->secs_per_track = 32;
2828 		}
2829 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2830 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2831 		ccb->ccb_h.status = CAM_REQ_CMP;
2832 		xpt_done(ccb);
2833 		break;
2834 	}
2835 
2836 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2837 		ASR_resetBus (sc, cam_sim_bus(sim));
2838 		ccb->ccb_h.status = CAM_REQ_CMP;
2839 		xpt_done(ccb);
2840 		break;
2841 
2842 	case XPT_TERM_IO:		/* Terminate the I/O process */
2843 		/* XXX Implement */
2844 		ccb->ccb_h.status = CAM_REQ_INVALID;
2845 		xpt_done(ccb);
2846 		break;
2847 
2848 	case XPT_PATH_INQ:		/* Path routing inquiry */
2849 	{
2850 		struct ccb_pathinq *cpi = &(ccb->cpi);
2851 
2852 		cpi->version_num = 1; /* XXX??? */
2853 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2854 		cpi->target_sprt = 0;
2855 		/* Not necessary to reset bus, done by HDM initialization */
2856 		cpi->hba_misc = PIM_NOBUSRESET;
2857 		cpi->hba_eng_cnt = 0;
2858 		cpi->max_target = sc->ha_MaxId;
2859 		cpi->max_lun = sc->ha_MaxLun;
2860 		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2861 		cpi->bus_id = cam_sim_bus(sim);
2862 		cpi->base_transfer_speed = 3300;
2863 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2864 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2865 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2866 		cpi->unit_number = cam_sim_unit(sim);
2867 		cpi->ccb_h.status = CAM_REQ_CMP;
2868                 cpi->transport = XPORT_SPI;
2869                 cpi->transport_version = 2;
2870                 cpi->protocol = PROTO_SCSI;
2871                 cpi->protocol_version = SCSI_REV_2;
2872 		xpt_done(ccb);
2873 		break;
2874 	}
2875 	default:
2876 		ccb->ccb_h.status = CAM_REQ_INVALID;
2877 		xpt_done(ccb);
2878 		break;
2879 	}
2880 } /* asr_action */
2881 
2882 /*
2883  * Handle processing of current CCB as pointed to by the Status.
2884  */
2885 static int
2886 asr_intr(Asr_softc_t *sc)
2887 {
2888 	int processed;
2889 
2890 	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2891 	    processed = 1) {
2892 		union asr_ccb			   *ccb;
2893 		u_int				    dsc;
2894 		U32				    ReplyOffset;
2895 		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2896 
2897 		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2898 		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2899 			break;
2900 		}
2901 		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2902 		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2903 		/*
2904 		 * We do not need any (optional byteswapping) method access to
2905 		 * the Initiator context field.
2906 		 */
2907 		ccb = (union asr_ccb *)(long)
2908 		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2909 		    &(Reply->StdReplyFrame.StdMessageFrame));
2910 		if (I2O_MESSAGE_FRAME_getMsgFlags(
2911 		  &(Reply->StdReplyFrame.StdMessageFrame))
2912 		  & I2O_MESSAGE_FLAGS_FAIL) {
2913 			I2O_UTIL_NOP_MESSAGE	Message;
2914 			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2915 			U32			MessageOffset;
2916 
2917 			MessageOffset = (u_long)
2918 			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2919 			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2920 			/*
2921 			 *  Get the Original Message Frame's address, and get
2922 			 * it's Transaction Context into our space. (Currently
2923 			 * unused at original authorship, but better to be
2924 			 * safe than sorry). Straight copy means that we
2925 			 * need not concern ourselves with the (optional
2926 			 * byteswapping) method access.
2927 			 */
2928 			Reply->StdReplyFrame.TransactionContext =
2929 			    bus_space_read_4(sc->ha_frame_btag,
2930 			    sc->ha_frame_bhandle, MessageOffset +
2931 			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2932 			    TransactionContext));
2933 			/*
2934 			 *	For 64 bit machines, we need to reconstruct the
2935 			 * 64 bit context.
2936 			 */
2937 			ccb = (union asr_ccb *)(long)
2938 			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2939 			    &(Reply->StdReplyFrame.StdMessageFrame));
2940 			/*
2941 			 * Unique error code for command failure.
2942 			 */
2943 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2944 			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2945 			/*
2946 			 *  Modify the message frame to contain a NOP and
2947 			 * re-issue it to the controller.
2948 			 */
2949 			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2950 			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2951 #if (I2O_UTIL_NOP != 0)
2952 				I2O_MESSAGE_FRAME_setFunction (
2953 				  &(Message_Ptr->StdMessageFrame),
2954 				  I2O_UTIL_NOP);
2955 #endif
2956 			/*
2957 			 *  Copy the packet out to the Original Message
2958 			 */
2959 			asr_set_frame(sc, Message_Ptr, MessageOffset,
2960 				      sizeof(I2O_UTIL_NOP_MESSAGE));
2961 			/*
2962 			 *  Issue the NOP
2963 			 */
2964 			asr_set_ToFIFO(sc, MessageOffset);
2965 		}
2966 
2967 		/*
2968 		 *	Asynchronous command with no return requirements,
2969 		 * and a generic handler for immunity against odd error
2970 		 * returns from the adapter.
2971 		 */
2972 		if (ccb == NULL) {
2973 			/*
2974 			 * Return Reply so that it can be used for the
2975 			 * next command
2976 			 */
2977 			asr_set_FromFIFO(sc, ReplyOffset);
2978 			continue;
2979 		}
2980 
2981 		/* Welease Wadjah! (and stop timeouts) */
2982 		ASR_ccbRemove (sc, ccb);
2983 
2984 		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2985 		    &(Reply->StdReplyFrame));
2986 		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2987 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2988 		switch (dsc) {
2989 
2990 		case I2O_SCSI_DSC_SUCCESS:
2991 			ccb->ccb_h.status |= CAM_REQ_CMP;
2992 			break;
2993 
2994 		case I2O_SCSI_DSC_CHECK_CONDITION:
2995 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
2996 			    CAM_AUTOSNS_VALID;
2997 			break;
2998 
2999 		case I2O_SCSI_DSC_BUSY:
3000 			/* FALLTHRU */
3001 		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3002 			/* FALLTHRU */
3003 		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3004 			/* FALLTHRU */
3005 		case I2O_SCSI_HBA_DSC_BUS_BUSY:
3006 			ccb->ccb_h.status |= CAM_SCSI_BUSY;
3007 			break;
3008 
3009 		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3010 			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3011 			break;
3012 
3013 		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3014 			/* FALLTHRU */
3015 		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3016 			/* FALLTHRU */
3017 		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3018 			/* FALLTHRU */
3019 		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3020 			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3021 			break;
3022 
3023 		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3024 			/* FALLTHRU */
3025 		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3026 			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3027 			break;
3028 
3029 		default:
3030 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3031 			break;
3032 		}
3033 		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3034 			ccb->csio.resid -=
3035 			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3036 			    Reply);
3037 		}
3038 
3039 		/* Sense data in reply packet */
3040 		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3041 			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3042 
3043 			if (size) {
3044 				if (size > sizeof(ccb->csio.sense_data)) {
3045 					size = sizeof(ccb->csio.sense_data);
3046 				}
3047 				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3048 					size = I2O_SCSI_SENSE_DATA_SZ;
3049 				}
3050 				if ((ccb->csio.sense_len)
3051 				 && (size > ccb->csio.sense_len)) {
3052 					size = ccb->csio.sense_len;
3053 				}
3054 				if (size < ccb->csio.sense_len) {
3055 					ccb->csio.sense_resid =
3056 					    ccb->csio.sense_len - size;
3057 				} else {
3058 					ccb->csio.sense_resid = 0;
3059 				}
3060 				bzero(&(ccb->csio.sense_data),
3061 				    sizeof(ccb->csio.sense_data));
3062 				bcopy(Reply->SenseData,
3063 				      &(ccb->csio.sense_data), size);
3064 			}
3065 		}
3066 
3067 		/*
3068 		 * Return Reply so that it can be used for the next command
3069 		 * since we have no more need for it now
3070 		 */
3071 		asr_set_FromFIFO(sc, ReplyOffset);
3072 
3073 		if (ccb->ccb_h.path) {
3074 			xpt_done ((union ccb *)ccb);
3075 		} else {
3076 			wakeup (ccb);
3077 		}
3078 	}
3079 	return (processed);
3080 } /* asr_intr */
3081 
3082 #undef QueueSize	/* Grrrr */
3083 #undef SG_Size		/* Grrrr */
3084 
3085 /*
3086  *	Meant to be included at the bottom of asr.c !!!
3087  */
3088 
3089 /*
3090  *	Included here as hard coded. Done because other necessary include
3091  *	files utilize C++ comment structures which make them a nuisance to
3092  *	included here just to pick up these three typedefs.
3093  */
3094 typedef U32   DPT_TAG_T;
3095 typedef U32   DPT_MSG_T;
3096 typedef U32   DPT_RTN_T;
3097 
3098 #undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3099 #include	"dev/raid/asr/osd_unix.h"
3100 
3101 #define	asr_unit(dev)	  minor(dev)
3102 
3103 static u_int8_t ASR_ctlr_held;
3104 
3105 static int
3106 asr_open(struct dev_open_args *ap)
3107 {
3108 	cdev_t dev = ap->a_head.a_dev;
3109 	int		 error;
3110 
3111 	if (dev->si_drv1 == NULL) {
3112 		return (ENODEV);
3113 	}
3114 	crit_enter();
3115 	if (ASR_ctlr_held) {
3116 		error = EBUSY;
3117 	} else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
3118 		++ASR_ctlr_held;
3119 	}
3120 	crit_exit();
3121 	return (error);
3122 } /* asr_open */
3123 
3124 static int
3125 asr_close(struct dev_close_args *ap)
3126 {
3127 
3128 	ASR_ctlr_held = 0;
3129 	return (0);
3130 } /* asr_close */
3131 
3132 
3133 /*-------------------------------------------------------------------------*/
3134 /*		      Function ASR_queue_i				   */
3135 /*-------------------------------------------------------------------------*/
3136 /* The Parameters Passed To This Function Are :				   */
3137 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3138 /*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3139 /*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3140 /*									   */
3141 /* This Function Will Take The User Request Packet And Convert It To An	   */
3142 /* I2O MSG And Send It Off To The Adapter.				   */
3143 /*									   */
3144 /* Return : 0 For OK, Error Code Otherwise				   */
3145 /*-------------------------------------------------------------------------*/
3146 static int
3147 ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3148 {
3149 	union asr_ccb				   * ccb;
3150 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3151 	PI2O_MESSAGE_FRAME			     Message_Ptr;
3152 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3153 	int					     MessageSizeInBytes;
3154 	int					     ReplySizeInBytes;
3155 	int					     error;
3156 	int					     s;
3157 	/* Scatter Gather buffer list */
3158 	struct ioctlSgList_S {
3159 		SLIST_ENTRY(ioctlSgList_S) link;
3160 		caddr_t			   UserSpace;
3161 		I2O_FLAGS_COUNT		   FlagsCount;
3162 		char			   KernelSpace[sizeof(long)];
3163 	}					   * elm;
3164 	/* Generates a `first' entry */
3165 	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3166 
3167 	if (ASR_getBlinkLedCode(sc)) {
3168 		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3169 		  ASR_getBlinkLedCode(sc));
3170 		return (EIO);
3171 	}
3172 	/* Copy in the message into a local allocation */
3173 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3174 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3175 		debug_usr_cmd_printf (
3176 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3177 		return (ENOMEM);
3178 	}
3179 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3180 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3181 		kfree(Message_Ptr, M_TEMP);
3182 		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3183 		return (error);
3184 	}
3185 	/* Acquire information to determine type of packet */
3186 	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3187 	/* The offset of the reply information within the user packet */
3188 	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3189 	  + MessageSizeInBytes);
3190 
3191 	/* Check if the message is a synchronous initialization command */
3192 	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3193 	kfree(Message_Ptr, M_TEMP);
3194 	switch (s) {
3195 
3196 	case I2O_EXEC_IOP_RESET:
3197 	{	U32 status;
3198 
3199 		status = ASR_resetIOP(sc);
3200 		ReplySizeInBytes = sizeof(status);
3201 		debug_usr_cmd_printf ("resetIOP done\n");
3202 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3203 		  ReplySizeInBytes));
3204 	}
3205 
3206 	case I2O_EXEC_STATUS_GET:
3207 	{	PI2O_EXEC_STATUS_GET_REPLY status;
3208 
3209 		status = &sc->ha_statusmem->status;
3210 		if (ASR_getStatus(sc) == NULL) {
3211 			debug_usr_cmd_printf ("getStatus failed\n");
3212 			return (ENXIO);
3213 		}
3214 		ReplySizeInBytes = sizeof(status);
3215 		debug_usr_cmd_printf ("getStatus done\n");
3216 		return (copyout ((caddr_t)status, (caddr_t)Reply,
3217 		  ReplySizeInBytes));
3218 	}
3219 
3220 	case I2O_EXEC_OUTBOUND_INIT:
3221 	{	U32 status;
3222 
3223 		status = ASR_initOutBound(sc);
3224 		ReplySizeInBytes = sizeof(status);
3225 		debug_usr_cmd_printf ("intOutBound done\n");
3226 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3227 		  ReplySizeInBytes));
3228 	}
3229 	}
3230 
3231 	/* Determine if the message size is valid */
3232 	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3233 	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3234 		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3235 		  MessageSizeInBytes);
3236 		return (EINVAL);
3237 	}
3238 
3239 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3240 	  M_TEMP, M_WAITOK)) == NULL) {
3241 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3242 		  MessageSizeInBytes);
3243 		return (ENOMEM);
3244 	}
3245 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3246 	  MessageSizeInBytes)) != 0) {
3247 		kfree(Message_Ptr, M_TEMP);
3248 		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3249 		  MessageSizeInBytes, error);
3250 		return (error);
3251 	}
3252 
3253 	/* Check the size of the reply frame, and start constructing */
3254 
3255 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3256 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3257 		kfree(Message_Ptr, M_TEMP);
3258 		debug_usr_cmd_printf (
3259 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3260 		return (ENOMEM);
3261 	}
3262 	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3263 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3264 		kfree(Reply_Ptr, M_TEMP);
3265 		kfree(Message_Ptr, M_TEMP);
3266 		debug_usr_cmd_printf (
3267 		  "Failed to copy in reply frame, errno=%d\n",
3268 		  error);
3269 		return (error);
3270 	}
3271 	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3272 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3273 	kfree(Reply_Ptr, M_TEMP);
3274 	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3275 		kfree(Message_Ptr, M_TEMP);
3276 		debug_usr_cmd_printf (
3277 		  "Failed to copy in reply frame[%d], errno=%d\n",
3278 		  ReplySizeInBytes, error);
3279 		return (EINVAL);
3280 	}
3281 
3282 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3283 	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3284 	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3285 	  M_TEMP, M_WAITOK)) == NULL) {
3286 		kfree(Message_Ptr, M_TEMP);
3287 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3288 		  ReplySizeInBytes);
3289 		return (ENOMEM);
3290 	}
3291 	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3292 	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3293 	  = Message_Ptr->InitiatorContext;
3294 	Reply_Ptr->StdReplyFrame.TransactionContext
3295 	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3296 	I2O_MESSAGE_FRAME_setMsgFlags(
3297 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3298 	  I2O_MESSAGE_FRAME_getMsgFlags(
3299 	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3300 	      | I2O_MESSAGE_FLAGS_REPLY);
3301 
3302 	/* Check if the message is a special case command */
3303 	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3304 	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3305 		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3306 		  Message_Ptr) & 0xF0) >> 2)) {
3307 			kfree(Message_Ptr, M_TEMP);
3308 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3309 			  &(Reply_Ptr->StdReplyFrame),
3310 			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3311 			I2O_MESSAGE_FRAME_setMessageSize(
3312 			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3313 			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3314 			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3315 			  ReplySizeInBytes);
3316 			kfree(Reply_Ptr, M_TEMP);
3317 			return (error);
3318 		}
3319 	}
3320 
3321 	/* Deal in the general case */
3322 	/* First allocate and optionally copy in each scatter gather element */
3323 	SLIST_INIT(&sgList);
3324 	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3325 		PI2O_SGE_SIMPLE_ELEMENT sg;
3326 
3327 		/*
3328 		 *	since this code is reused in several systems, code
3329 		 * efficiency is greater by using a shift operation rather
3330 		 * than a divide by sizeof(u_int32_t).
3331 		 */
3332 		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3333 		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3334 		    >> 2));
3335 		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3336 		  + MessageSizeInBytes)) {
3337 			caddr_t v;
3338 			int	len;
3339 
3340 			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3341 			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3342 				error = EINVAL;
3343 				break;
3344 			}
3345 			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3346 			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3347 			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3348 			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3349 				Message_Ptr) & 0xF0) >> 2)),
3350 			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3351 
3352 			if ((elm = (struct ioctlSgList_S *)kmalloc (
3353 			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3354 			  M_TEMP, M_WAITOK)) == NULL) {
3355 				debug_usr_cmd_printf (
3356 				  "Failed to allocate SG[%d]\n", len);
3357 				error = ENOMEM;
3358 				break;
3359 			}
3360 			SLIST_INSERT_HEAD(&sgList, elm, link);
3361 			elm->FlagsCount = sg->FlagsCount;
3362 			elm->UserSpace = (caddr_t)
3363 			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3364 			v = elm->KernelSpace;
3365 			/* Copy in outgoing data (DIR bit could be invalid) */
3366 			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3367 			  != 0) {
3368 				break;
3369 			}
3370 			/*
3371 			 *	If the buffer is not contiguous, lets
3372 			 * break up the scatter/gather entries.
3373 			 */
3374 			while ((len > 0)
3375 			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3376 			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3377 				int next, base, span;
3378 
3379 				span = 0;
3380 				next = base = KVTOPHYS(v);
3381 				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3382 				  base);
3383 
3384 				/* How far can we go physically contiguously */
3385 				while ((len > 0) && (base == next)) {
3386 					int size;
3387 
3388 					next = trunc_page(base) + PAGE_SIZE;
3389 					size = next - base;
3390 					if (size > len) {
3391 						size = len;
3392 					}
3393 					span += size;
3394 					v += size;
3395 					len -= size;
3396 					base = KVTOPHYS(v);
3397 				}
3398 
3399 				/* Construct the Flags */
3400 				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3401 				  span);
3402 				{
3403 					int flags = I2O_FLAGS_COUNT_getFlags(
3404 					  &(elm->FlagsCount));
3405 					/* Any remaining length? */
3406 					if (len > 0) {
3407 					    flags &=
3408 						~(I2O_SGL_FLAGS_END_OF_BUFFER
3409 						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3410 					}
3411 					I2O_FLAGS_COUNT_setFlags(
3412 					  &(sg->FlagsCount), flags);
3413 				}
3414 
3415 				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3416 				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3417 				    ((char *)Message_Ptr
3418 				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3419 					Message_Ptr) & 0xF0) >> 2)),
3420 				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3421 				  span);
3422 				if (len <= 0) {
3423 					break;
3424 				}
3425 
3426 				/*
3427 				 * Incrementing requires resizing of the
3428 				 * packet, and moving up the existing SG
3429 				 * elements.
3430 				 */
3431 				++sg;
3432 				MessageSizeInBytes += sizeof(*sg);
3433 				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3434 				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3435 				  + (sizeof(*sg) / sizeof(U32)));
3436 				{
3437 					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3438 
3439 					if ((NewMessage_Ptr
3440 					  = (PI2O_MESSAGE_FRAME)
3441 					    kmalloc (MessageSizeInBytes,
3442 					     M_TEMP, M_WAITOK)) == NULL) {
3443 						debug_usr_cmd_printf (
3444 						  "Failed to acquire frame[%d] memory\n",
3445 						  MessageSizeInBytes);
3446 						error = ENOMEM;
3447 						break;
3448 					}
3449 					span = ((caddr_t)sg)
3450 					     - (caddr_t)Message_Ptr;
3451 					bcopy(Message_Ptr,NewMessage_Ptr, span);
3452 					bcopy((caddr_t)(sg-1),
3453 					  ((caddr_t)NewMessage_Ptr) + span,
3454 					  MessageSizeInBytes - span);
3455 					kfree(Message_Ptr, M_TEMP);
3456 					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3457 					  (((caddr_t)NewMessage_Ptr) + span);
3458 					Message_Ptr = NewMessage_Ptr;
3459 				}
3460 			}
3461 			if ((error)
3462 			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3463 			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3464 				break;
3465 			}
3466 			++sg;
3467 		}
3468 		if (error) {
3469 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3470 				SLIST_REMOVE_HEAD(&sgList, link);
3471 				kfree(elm, M_TEMP);
3472 			}
3473 			kfree(Reply_Ptr, M_TEMP);
3474 			kfree(Message_Ptr, M_TEMP);
3475 			return (error);
3476 		}
3477 	}
3478 
3479 	debug_usr_cmd_printf ("Inbound: ");
3480 	debug_usr_cmd_dump_message(Message_Ptr);
3481 
3482 	/* Send the command */
3483 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3484 		/* Free up in-kernel buffers */
3485 		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3486 			SLIST_REMOVE_HEAD(&sgList, link);
3487 			kfree(elm, M_TEMP);
3488 		}
3489 		kfree(Reply_Ptr, M_TEMP);
3490 		kfree(Message_Ptr, M_TEMP);
3491 		return (ENOMEM);
3492 	}
3493 
3494 	/*
3495 	 * We do not need any (optional byteswapping) method access to
3496 	 * the Initiator context field.
3497 	 */
3498 	I2O_MESSAGE_FRAME_setInitiatorContext64(
3499 	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3500 
3501 	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3502 
3503 	kfree(Message_Ptr, M_TEMP);
3504 
3505 	/*
3506 	 * Wait for the board to report a finished instruction.
3507 	 */
3508 	crit_enter();
3509 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3510 		if (ASR_getBlinkLedCode(sc)) {
3511 			/* Reset Adapter */
3512 			kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3513 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3514 			  ASR_getBlinkLedCode(sc));
3515 			if (ASR_reset (sc) == ENXIO) {
3516 				/* Command Cleanup */
3517 				ASR_ccbRemove(sc, ccb);
3518 			}
3519 			crit_exit();
3520 			/* Free up in-kernel buffers */
3521 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3522 				SLIST_REMOVE_HEAD(&sgList, link);
3523 				kfree(elm, M_TEMP);
3524 			}
3525 			kfree(Reply_Ptr, M_TEMP);
3526 			asr_free_ccb(ccb);
3527 			return (EIO);
3528 		}
3529 		/* Check every second for BlinkLed */
3530 		/* There is no PRICAM, but outwardly PRIBIO is functional */
3531 		tsleep(ccb, 0, "asr", hz);
3532 	}
3533 	crit_exit();
3534 
3535 	debug_usr_cmd_printf ("Outbound: ");
3536 	debug_usr_cmd_dump_message(Reply_Ptr);
3537 
3538 	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3539 	  &(Reply_Ptr->StdReplyFrame),
3540 	  (ccb->ccb_h.status != CAM_REQ_CMP));
3541 
3542 	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3543 	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3544 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3545 		  ccb->csio.dxfer_len - ccb->csio.resid);
3546 	}
3547 	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3548 	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3549 	 - I2O_SCSI_SENSE_DATA_SZ))) {
3550 		int size = ReplySizeInBytes
3551 		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3552 		  - I2O_SCSI_SENSE_DATA_SZ;
3553 
3554 		if (size > sizeof(ccb->csio.sense_data)) {
3555 			size = sizeof(ccb->csio.sense_data);
3556 		}
3557 		if (size < ccb->csio.sense_len) {
3558 			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3559 		} else {
3560 			ccb->csio.sense_resid = 0;
3561 		}
3562 		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3563 		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3564 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3565 		    Reply_Ptr, size);
3566 	}
3567 
3568 	/* Free up in-kernel buffers */
3569 	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3570 		/* Copy out as necessary */
3571 		if ((error == 0)
3572 		/* DIR bit considered `valid', error due to ignorance works */
3573 		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3574 		  & I2O_SGL_FLAGS_DIR) == 0)) {
3575 			error = copyout((caddr_t)(elm->KernelSpace),
3576 			  elm->UserSpace,
3577 			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3578 		}
3579 		SLIST_REMOVE_HEAD(&sgList, link);
3580 		kfree(elm, M_TEMP);
3581 	}
3582 	if (error == 0) {
3583 	/* Copy reply frame to user space */
3584 		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3585 				ReplySizeInBytes);
3586 	}
3587 	kfree(Reply_Ptr, M_TEMP);
3588 	asr_free_ccb(ccb);
3589 
3590 	return (error);
3591 } /* ASR_queue_i */
3592 
3593 /*----------------------------------------------------------------------*/
3594 /*			    Function asr_ioctl			       */
3595 /*----------------------------------------------------------------------*/
3596 /* The parameters passed to this function are :				*/
3597 /*     dev  : Device number.						*/
3598 /*     cmd  : Ioctl Command						*/
3599 /*     data : User Argument Passed In.					*/
3600 /*     flag : Mode Parameter						*/
3601 /*     proc : Process Parameter						*/
3602 /*									*/
3603 /* This function is the user interface into this adapter driver		*/
3604 /*									*/
3605 /* Return : zero if OK, error code if not				*/
3606 /*----------------------------------------------------------------------*/
3607 
3608 static int
3609 asr_ioctl(struct dev_ioctl_args *ap)
3610 {
3611 	cdev_t dev = ap->a_head.a_dev;
3612 	u_long cmd = ap->a_cmd;
3613 	caddr_t data = ap->a_data;
3614 	Asr_softc_t	*sc = dev->si_drv1;
3615 	int		i, error = 0;
3616 #ifdef ASR_IOCTL_COMPAT
3617 	int		j;
3618 #endif /* ASR_IOCTL_COMPAT */
3619 
3620 	if (sc == NULL)
3621 		return (EINVAL);
3622 
3623 	switch(cmd) {
3624 	case DPT_SIGNATURE:
3625 #ifdef ASR_IOCTL_COMPAT
3626 #if (dsDescription_size != 50)
3627 	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3628 #endif
3629 		if (cmd & 0xFFFF0000) {
3630 			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3631 			return (0);
3632 		}
3633 	/* Traditional version of the ioctl interface */
3634 	case DPT_SIGNATURE & 0x0000FFFF:
3635 #endif
3636 		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3637 				sizeof(dpt_sig_S)));
3638 
3639 	/* Traditional version of the ioctl interface */
3640 	case DPT_CTRLINFO & 0x0000FFFF:
3641 	case DPT_CTRLINFO: {
3642 		struct {
3643 			u_int16_t length;
3644 			u_int16_t drvrHBAnum;
3645 			u_int32_t baseAddr;
3646 			u_int16_t blinkState;
3647 			u_int8_t  pciBusNum;
3648 			u_int8_t  pciDeviceNum;
3649 			u_int16_t hbaFlags;
3650 			u_int16_t Interrupt;
3651 			u_int32_t reserved1;
3652 			u_int32_t reserved2;
3653 			u_int32_t reserved3;
3654 		} CtlrInfo;
3655 
3656 		bzero(&CtlrInfo, sizeof(CtlrInfo));
3657 		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3658 		CtlrInfo.drvrHBAnum = asr_unit(dev);
3659 		CtlrInfo.baseAddr = sc->ha_Base;
3660 		i = ASR_getBlinkLedCode (sc);
3661 		if (i == -1)
3662 			i = 0;
3663 
3664 		CtlrInfo.blinkState = i;
3665 		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3666 		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3667 #define	FLG_OSD_PCI_VALID 0x0001
3668 #define	FLG_OSD_DMA	  0x0002
3669 #define	FLG_OSD_I2O	  0x0004
3670 		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3671 		CtlrInfo.Interrupt = sc->ha_irq;
3672 #ifdef ASR_IOCTL_COMPAT
3673 		if (cmd & 0xffff0000)
3674 			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3675 		else
3676 #endif /* ASR_IOCTL_COMPAT */
3677 		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3678 	}	return (error);
3679 
3680 	/* Traditional version of the ioctl interface */
3681 	case DPT_SYSINFO & 0x0000FFFF:
3682 	case DPT_SYSINFO: {
3683 		sysInfo_S	Info;
3684 #ifdef ASR_IOCTL_COMPAT
3685 		char	      * cp;
3686 		/* Kernel Specific ptok `hack' */
3687 #define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3688 
3689 		bzero(&Info, sizeof(Info));
3690 
3691 		/* Appears I am the only person in the Kernel doing this */
3692 		outb (0x70, 0x12);
3693 		i = inb(0x71);
3694 		j = i >> 4;
3695 		if (i == 0x0f) {
3696 			outb (0x70, 0x19);
3697 			j = inb (0x71);
3698 		}
3699 		Info.drive0CMOS = j;
3700 
3701 		j = i & 0x0f;
3702 		if (i == 0x0f) {
3703 			outb (0x70, 0x1a);
3704 			j = inb (0x71);
3705 		}
3706 		Info.drive1CMOS = j;
3707 
3708 		Info.numDrives = *((char *)ptok(0x475));
3709 #else /* ASR_IOCTL_COMPAT */
3710 		bzero(&Info, sizeof(Info));
3711 #endif /* ASR_IOCTL_COMPAT */
3712 
3713 		Info.processorFamily = ASR_sig.dsProcessorFamily;
3714 		Info.osType = OS_BSDI_UNIX;
3715 		Info.osMajorVersion = osrelease[0] - '0';
3716 		Info.osMinorVersion = osrelease[2] - '0';
3717 		/* Info.osRevision = 0; */
3718 		/* Info.osSubRevision = 0; */
3719 		Info.busType = SI_PCI_BUS;
3720 		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3721 
3722 #ifdef ASR_IOCTL_COMPAT
3723 		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3724 		/* Go Out And Look For I2O SmartROM */
3725 		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3726 			int k;
3727 
3728 			cp = ptok(j);
3729 			if (*((unsigned short *)cp) != 0xAA55) {
3730 				continue;
3731 			}
3732 			j += (cp[2] * 512) - 2048;
3733 			if ((*((u_long *)(cp + 6))
3734 			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3735 			 || (*((u_long *)(cp + 10))
3736 			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3737 				continue;
3738 			}
3739 			cp += 0x24;
3740 			for (k = 0; k < 64; ++k) {
3741 				if (*((unsigned short *)cp)
3742 				 == (' ' + ('v' * 256))) {
3743 					break;
3744 				}
3745 			}
3746 			if (k < 64) {
3747 				Info.smartROMMajorVersion
3748 				    = *((unsigned char *)(cp += 4)) - '0';
3749 				Info.smartROMMinorVersion
3750 				    = *((unsigned char *)(cp += 2));
3751 				Info.smartROMRevision
3752 				    = *((unsigned char *)(++cp));
3753 				Info.flags |= SI_SmartROMverValid;
3754 				Info.flags &= ~SI_NO_SmartROM;
3755 				break;
3756 			}
3757 		}
3758 		/* Get The Conventional Memory Size From CMOS */
3759 		outb (0x70, 0x16);
3760 		j = inb (0x71);
3761 		j <<= 8;
3762 		outb (0x70, 0x15);
3763 		j |= inb(0x71);
3764 		Info.conventionalMemSize = j;
3765 
3766 		/* Get The Extended Memory Found At Power On From CMOS */
3767 		outb (0x70, 0x31);
3768 		j = inb (0x71);
3769 		j <<= 8;
3770 		outb (0x70, 0x30);
3771 		j |= inb(0x71);
3772 		Info.extendedMemSize = j;
3773 		Info.flags |= SI_MemorySizeValid;
3774 
3775 		/* Copy Out The Info Structure To The User */
3776 		if (cmd & 0xFFFF0000)
3777 			bcopy(&Info, data, sizeof(Info));
3778 		else
3779 #endif /* ASR_IOCTL_COMPAT */
3780 		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3781 		return (error); }
3782 
3783 		/* Get The BlinkLED State */
3784 	case DPT_BLINKLED:
3785 		i = ASR_getBlinkLedCode (sc);
3786 		if (i == -1)
3787 			i = 0;
3788 #ifdef ASR_IOCTL_COMPAT
3789 		if (cmd & 0xffff0000)
3790 			bcopy(&i, data, sizeof(i));
3791 		else
3792 #endif /* ASR_IOCTL_COMPAT */
3793 		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3794 		break;
3795 
3796 		/* Send an I2O command */
3797 	case I2OUSRCMD:
3798 		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3799 
3800 		/* Reset and re-initialize the adapter */
3801 	case I2ORESETCMD:
3802 		return (ASR_reset(sc));
3803 
3804 		/* Rescan the LCT table and resynchronize the information */
3805 	case I2ORESCANCMD:
3806 		return (ASR_rescan(sc));
3807 	}
3808 	return (EINVAL);
3809 } /* asr_ioctl */
3810