xref: /dragonfly/sys/dev/raid/asr/asr.c (revision a4c31683)
1 /*-
2  * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3  * Copyright (c) 2000-2001 Adaptec Corporation
4  * All rights reserved.
5  *
6  * TERMS AND CONDITIONS OF USE
7  *
8  * Redistribution and use in source form, with or without modification, are
9  * permitted provided that redistributions of source code must retain the
10  * above copyright notice, this list of conditions and the following disclaimer.
11  *
12  * This software is provided `as is' by Adaptec and any express or implied
13  * warranties, including, but not limited to, the implied warranties of
14  * merchantability and fitness for a particular purpose, are disclaimed. In no
15  * event shall Adaptec be liable for any direct, indirect, incidental, special,
16  * exemplary or consequential damages (including, but not limited to,
17  * procurement of substitute goods or services; loss of use, data, or profits;
18  * or business interruptions) however caused and on any theory of liability,
19  * whether in contract, strict liability, or tort (including negligence or
20  * otherwise) arising in any way out of the use of this driver software, even
21  * if advised of the possibility of such damage.
22  *
23  * SCSI I2O host adapter driver
24  *
25  *	V1.10 2004/05/05 scottl@freebsd.org
26  *		- Massive cleanup of the driver to remove dead code and
27  *		  non-conformant style.
28  *		- Removed most i386-specific code to make it more portable.
29  *		- Converted to the bus_space API.
30  *	V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31  *		- The 2000S and 2005S do not initialize on some machines,
32  *		  increased timeout to 255ms from 50ms for the StatusGet
33  *		  command.
34  *	V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35  *		- I knew this one was too good to be true. The error return
36  *		  on ioctl commands needs to be compared to CAM_REQ_CMP, not
37  *		  to the bit masked status.
38  *	V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39  *		- The 2005S that was supported is affectionately called the
40  *		  Conjoined BAR Firmware. In order to support RAID-5 in a
41  *		  16MB low-cost configuration, Firmware was forced to go
42  *		  to a Split BAR Firmware. This requires a separate IOP and
43  *		  Messaging base address.
44  *	V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45  *		- Handle support for 2005S Zero Channel RAID solution.
46  *		- System locked up if the Adapter locked up. Do not try
47  *		  to send other commands if the resetIOP command fails. The
48  *		  fail outstanding command discovery loop was flawed as the
49  *		  removal of the command from the list prevented discovering
50  *		  all the commands.
51  *		- Comment changes to clarify driver.
52  *		- SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53  *		- We do not use the AC_FOUND_DEV event because of I2O.
54  *		  Removed asr_async.
55  *	V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56  *			 lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57  *		- Removed support for PM1554, PM2554 and PM2654 in Mode-0
58  *		  mode as this is confused with competitor adapters in run
59  *		  mode.
60  *		- critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61  *		  to prevent operating system panic.
62  *		- moved default major number to 154 from 97.
63  *	V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64  *		- The controller is not actually an ASR (Adaptec SCSI RAID)
65  *		  series that is visible, it's more of an internal code name.
66  *		  remove any visible references within reason for now.
67  *		- bus_ptr->LUN was not correctly zeroed when initially
68  *		  allocated causing a possible panic of the operating system
69  *		  during boot.
70  *	V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71  *		- Code always fails for ASR_getTid affecting performance.
72  *		- initiated a set of changes that resulted from a formal
73  *		  code inspection by Mark_Salyzyn@adaptec.com,
74  *		  George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75  *		  Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76  *		  Their findings were focussed on the LCT & TID handler, and
77  *		  all resulting changes were to improve code readability,
78  *		  consistency or have a positive effect on performance.
79  *	V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80  *		- Passthrough returned an incorrect error.
81  *		- Passthrough did not migrate the intrinsic scsi layer wakeup
82  *		  on command completion.
83  *		- generate control device nodes using make_dev and delete_dev.
84  *		- Performance affected by TID caching reallocing.
85  *		- Made suggested changes by Justin_Gibbs@adaptec.com
86  *			- use splcam instead of splbio.
87  *			- use cam_imask instead of bio_imask.
88  *			- use u_int8_t instead of u_char.
89  *			- use u_int16_t instead of u_short.
90  *			- use u_int32_t instead of u_long where appropriate.
91  *			- use 64 bit context handler instead of 32 bit.
92  *			- create_ccb should only allocate the worst case
93  *			  requirements for the driver since CAM may evolve
94  *			  making union ccb much larger than needed here.
95  *			  renamed create_ccb to asr_alloc_ccb.
96  *			- go nutz justifying all debug prints as macros
97  *			  defined at the top and remove unsightly ifdefs.
98  *			- INLINE STATIC viewed as confusing. Historically
99  *			  utilized to affect code performance and debug
100  *			  issues in OS, Compiler or OEM specific situations.
101  *	V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102  *		- Ported from FreeBSD 2.2.X DPT I2O driver.
103  *			changed struct scsi_xfer to union ccb/struct ccb_hdr
104  *			changed variable name xs to ccb
105  *			changed struct scsi_link to struct cam_path
106  *			changed struct scsibus_data to struct cam_sim
107  *			stopped using fordriver for holding on to the TID
108  *			use proprietary packet creation instead of scsi_inquire
109  *			CAM layer sends synchronize commands.
110  *
111  * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
112  */
113 
114 #include <sys/param.h>	/* TRUE=1 and FALSE=0 defined here */
115 #include <sys/kernel.h>
116 #include <sys/module.h>
117 #include <sys/systm.h>
118 #include <sys/malloc.h>
119 #include <sys/conf.h>
120 #include <sys/priv.h>
121 #include <sys/proc.h>
122 #include <sys/bus.h>
123 #include <sys/rman.h>
124 #include <sys/stat.h>
125 #include <sys/device.h>
126 #include <sys/thread2.h>
127 #include <sys/bus_dma.h>
128 
129 #include <bus/cam/cam.h>
130 #include <bus/cam/cam_ccb.h>
131 #include <bus/cam/cam_sim.h>
132 #include <bus/cam/cam_xpt_sim.h>
133 
134 #include <bus/cam/scsi/scsi_all.h>
135 #include <bus/cam/scsi/scsi_message.h>
136 
137 #include <vm/vm.h>
138 #include <vm/pmap.h>
139 
140 #include <machine/vmparam.h>
141 
142 #include <bus/pci/pcivar.h>
143 #include <bus/pci/pcireg.h>
144 
145 #define	osdSwap4(x) ((u_long)ntohl((u_long)(x)))
146 #define	KVTOPHYS(x) vtophys(x)
147 #include	<dev/raid/asr/dptalign.h>
148 #include	<dev/raid/asr/i2oexec.h>
149 #include	<dev/raid/asr/i2obscsi.h>
150 #include	<dev/raid/asr/i2odpt.h>
151 #include	<dev/raid/asr/i2oadptr.h>
152 
153 #include	<dev/raid/asr/sys_info.h>
154 
155 #define	ASR_VERSION	1
156 #define	ASR_REVISION	'1'
157 #define	ASR_SUBREVISION '0'
158 #define	ASR_MONTH	5
159 #define	ASR_DAY		5
160 #define	ASR_YEAR	(2004 - 1980)
161 
162 /*
163  *	Debug macros to reduce the unsightly ifdefs
164  */
165 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
166 static __inline void
167 debug_asr_message(PI2O_MESSAGE_FRAME message)
168 {
169 	u_int32_t * pointer = (u_int32_t *)message;
170 	u_int32_t   length = I2O_MESSAGE_FRAME_getMessageSize(message);
171 	u_int32_t   counter = 0;
172 
173 	while (length--) {
174 		kprintf("%08lx%c", (u_long)*(pointer++),
175 		  (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
176 	}
177 }
178 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
179 
180 #ifdef DEBUG_ASR
181   /* Breaks on none STDC based compilers :-( */
182 #define debug_asr_printf(fmt,args...)	kprintf(fmt, ##args)
183 #define debug_asr_dump_message(message)	debug_asr_message(message)
184 #define debug_asr_print_path(ccb)	xpt_print_path(ccb->ccb_h.path);
185 #else /* DEBUG_ASR */
186 #define debug_asr_printf(fmt,args...)
187 #define debug_asr_dump_message(message)
188 #define debug_asr_print_path(ccb)
189 #endif /* DEBUG_ASR */
190 
191 /*
192  *	If DEBUG_ASR_CMD is defined:
193  *		0 - Display incoming SCSI commands
194  *		1 - add in a quick character before queueing.
195  *		2 - add in outgoing message frames.
196  */
197 #if (defined(DEBUG_ASR_CMD))
198 #define debug_asr_cmd_printf(fmt,args...)     kprintf(fmt,##args)
199 static __inline void
200 debug_asr_dump_ccb(union ccb *ccb)
201 {
202 	u_int8_t	*cp = (unsigned char *)&(ccb->csio.cdb_io);
203 	int		len = ccb->csio.cdb_len;
204 
205 	while (len) {
206 		debug_asr_cmd_printf (" %02x", *(cp++));
207 		--len;
208 	}
209 }
210 #if (DEBUG_ASR_CMD > 0)
211 #define debug_asr_cmd1_printf		       debug_asr_cmd_printf
212 #else
213 #define debug_asr_cmd1_printf(fmt,args...)
214 #endif
215 #if (DEBUG_ASR_CMD > 1)
216 #define debug_asr_cmd2_printf			debug_asr_cmd_printf
217 #define debug_asr_cmd2_dump_message(message)	debug_asr_message(message)
218 #else
219 #define debug_asr_cmd2_printf(fmt,args...)
220 #define debug_asr_cmd2_dump_message(message)
221 #endif
222 #else /* DEBUG_ASR_CMD */
223 #define debug_asr_cmd_printf(fmt,args...)
224 #define debug_asr_dump_ccb(ccb)
225 #define debug_asr_cmd1_printf(fmt,args...)
226 #define debug_asr_cmd2_printf(fmt,args...)
227 #define debug_asr_cmd2_dump_message(message)
228 #endif /* DEBUG_ASR_CMD */
229 
230 #if (defined(DEBUG_ASR_USR_CMD))
231 #define debug_usr_cmd_printf(fmt,args...)   kprintf(fmt,##args)
232 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
233 #else /* DEBUG_ASR_USR_CMD */
234 #define debug_usr_cmd_printf(fmt,args...)
235 #define debug_usr_cmd_dump_message(message)
236 #endif /* DEBUG_ASR_USR_CMD */
237 
238 #ifdef ASR_IOCTL_COMPAT
239 #define	dsDescription_size 46	/* Snug as a bug in a rug */
240 #endif /* ASR_IOCTL_COMPAT */
241 
242 #include "dev/raid/asr/dptsig.h"
243 
244 static dpt_sig_S ASR_sig = {
245 	{ 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
246 	PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
247 	OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
248 	0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
249 	ASR_MONTH, ASR_DAY, ASR_YEAR,
250 /*	 01234567890123456789012345678901234567890123456789	< 50 chars */
251 	"Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
252 	/*		 ^^^^^ asr_attach alters these to match OS */
253 };
254 
255 /* Configuration Definitions */
256 
257 #define	SG_SIZE		 58	/* Scatter Gather list Size		 */
258 #define	MAX_TARGET_ID	 126	/* Maximum Target ID supported		 */
259 #define	MAX_LUN		 255	/* Maximum LUN Supported		 */
260 #define	MAX_CHANNEL	 7	/* Maximum Channel # Supported by driver */
261 #define	MAX_INBOUND	 2000	/* Max CCBs, Also Max Queue Size	 */
262 #define	MAX_OUTBOUND	 256	/* Maximum outbound frames/adapter	 */
263 #define	MAX_INBOUND_SIZE 512	/* Maximum inbound frame size		 */
264 #define	MAX_MAP		 4194304L /* Maximum mapping size of IOP	 */
265 				/* Also serves as the minimum map for	 */
266 				/* the 2005S zero channel RAID product	 */
267 
268 /* I2O register set */
269 #define	I2O_REG_STATUS		0x30
270 #define	I2O_REG_MASK		0x34
271 #define	I2O_REG_TOFIFO		0x40
272 #define	I2O_REG_FROMFIFO	0x44
273 
274 #define	Mask_InterruptsDisabled	0x08
275 
276 /*
277  * A MIX of performance and space considerations for TID lookups
278  */
279 typedef u_int16_t tid_t;
280 
281 typedef struct {
282 	u_int32_t size;		/* up to MAX_LUN    */
283 	tid_t	  TID[1];
284 } lun2tid_t;
285 
286 typedef struct {
287 	u_int32_t   size;	/* up to MAX_TARGET */
288 	lun2tid_t * LUN[1];
289 } target2lun_t;
290 
291 /*
292  *	To ensure that we only allocate and use the worst case ccb here, lets
293  *	make our own local ccb union. If asr_alloc_ccb is utilized for another
294  *	ccb type, ensure that you add the additional structures into our local
295  *	ccb union. To ensure strict type checking, we will utilize the local
296  *	ccb definition wherever possible.
297  */
298 union asr_ccb {
299 	struct ccb_hdr	    ccb_h;  /* For convenience */
300 	struct ccb_scsiio   csio;
301 	struct ccb_setasync csa;
302 };
303 
304 struct Asr_status_mem {
305 	I2O_EXEC_STATUS_GET_REPLY	status;
306 	U32				rstatus;
307 };
308 
309 /**************************************************************************
310 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
311 **  Is Configured Into The System.  The Structure Supplies Configuration **
312 **  Information, Status Info, Queue Info And An Active CCB List Pointer. **
313 ***************************************************************************/
314 
315 typedef struct Asr_softc {
316 	device_t		ha_dev;
317 	u_int16_t		ha_irq;
318 	u_long			ha_Base;       /* base port for each board */
319 	bus_size_t		ha_blinkLED;
320 	bus_space_handle_t	ha_i2o_bhandle;
321 	bus_space_tag_t		ha_i2o_btag;
322 	bus_space_handle_t	ha_frame_bhandle;
323 	bus_space_tag_t		ha_frame_btag;
324 	I2O_IOP_ENTRY		ha_SystemTable;
325 	LIST_HEAD(,ccb_hdr)	ha_ccb;	       /* ccbs in use		   */
326 
327 	bus_dma_tag_t		ha_parent_dmat;
328 	bus_dma_tag_t		ha_statusmem_dmat;
329 	bus_dmamap_t		ha_statusmem_dmamap;
330 	struct Asr_status_mem * ha_statusmem;
331 	u_int32_t		ha_rstatus_phys;
332 	u_int32_t		ha_status_phys;
333 	struct cam_path	      * ha_path[MAX_CHANNEL+1];
334 	struct cam_sim	      * ha_sim[MAX_CHANNEL+1];
335 	struct resource	      * ha_mem_res;
336 	struct resource	      * ha_mes_res;
337 	struct resource	      * ha_irq_res;
338 	void		      * ha_intr;
339 	PI2O_LCT		ha_LCT;	       /* Complete list of devices */
340 #define le_type	  IdentityTag[0]
341 #define I2O_BSA	    0x20
342 #define I2O_FCA	    0x40
343 #define I2O_SCSI    0x00
344 #define I2O_PORT    0x80
345 #define I2O_UNKNOWN 0x7F
346 #define le_bus	  IdentityTag[1]
347 #define le_target IdentityTag[2]
348 #define le_lun	  IdentityTag[3]
349 	target2lun_t	      * ha_targets[MAX_CHANNEL+1];
350 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
351 	u_long			ha_Msgs_Phys;
352 
353 	u_int8_t		ha_in_reset;
354 #define HA_OPERATIONAL	    0
355 #define HA_IN_RESET	    1
356 #define HA_OFF_LINE	    2
357 #define HA_OFF_LINE_RECOVERY 3
358 	/* Configuration information */
359 	/* The target id maximums we take */
360 	u_int8_t		ha_MaxBus;     /* Maximum bus */
361 	u_int8_t		ha_MaxId;      /* Maximum target ID */
362 	u_int8_t		ha_MaxLun;     /* Maximum target LUN */
363 	u_int8_t		ha_SgSize;     /* Max SG elements */
364 	u_int8_t		ha_pciBusNum;
365 	u_int8_t		ha_pciDeviceNum;
366 	u_int8_t		ha_adapter_target[MAX_CHANNEL+1];
367 	u_int16_t		ha_QueueSize;  /* Max outstanding commands */
368 	u_int16_t		ha_Msgs_Count;
369 
370 	/* Links into other parents and HBAs */
371 	struct Asr_softc      * ha_next;       /* HBA list */
372 	struct cdev *ha_devt;
373 } Asr_softc_t;
374 
375 static Asr_softc_t *Asr_softc_list;
376 
377 /*
378  *	Prototypes of the routines we have in this object.
379  */
380 
381 /* I2O HDM interface */
382 static int	asr_probe(device_t dev);
383 static int	asr_attach(device_t dev);
384 
385 static d_ioctl_t asr_ioctl;
386 static d_open_t asr_open;
387 static d_close_t asr_close;
388 static int	asr_intr(Asr_softc_t *sc);
389 static void	asr_timeout(void *arg);
390 static int	ASR_init(Asr_softc_t *sc);
391 static int	ASR_acquireLct(Asr_softc_t *sc);
392 static int	ASR_acquireHrt(Asr_softc_t *sc);
393 static void	asr_action(struct cam_sim *sim, union ccb *ccb);
394 static void	asr_poll(struct cam_sim *sim);
395 static int	ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
396 
397 /*
398  *	Here is the auto-probe structure used to nest our tests appropriately
399  *	during the startup phase of the operating system.
400  */
401 static device_method_t asr_methods[] = {
402 	DEVMETHOD(device_probe,	 asr_probe),
403 	DEVMETHOD(device_attach, asr_attach),
404 	DEVMETHOD_END
405 };
406 
407 static driver_t asr_driver = {
408 	"asr",
409 	asr_methods,
410 	sizeof(Asr_softc_t)
411 };
412 
413 static devclass_t asr_devclass;
414 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, NULL, NULL);
415 MODULE_VERSION(asr, 1);
416 MODULE_DEPEND(asr, pci, 1, 1, 1);
417 MODULE_DEPEND(asr, cam, 1, 1, 1);
418 
419 /*
420  * devsw for asr hba driver
421  *
422  * only ioctl is used. the sd driver provides all other access.
423  */
424 static struct dev_ops asr_ops = {
425 	{ "asr", 0, 0 },
426 	.d_open =	asr_open,
427 	.d_close =	asr_close,
428 	.d_ioctl =	asr_ioctl,
429 };
430 
431 /* I2O support routines */
432 
433 static __inline u_int32_t
434 asr_get_FromFIFO(Asr_softc_t *sc)
435 {
436 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
437 				 I2O_REG_FROMFIFO));
438 }
439 
440 static __inline u_int32_t
441 asr_get_ToFIFO(Asr_softc_t *sc)
442 {
443 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
444 				 I2O_REG_TOFIFO));
445 }
446 
447 static __inline u_int32_t
448 asr_get_intr(Asr_softc_t *sc)
449 {
450 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
451 				 I2O_REG_MASK));
452 }
453 
454 static __inline u_int32_t
455 asr_get_status(Asr_softc_t *sc)
456 {
457 	return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
458 				 I2O_REG_STATUS));
459 }
460 
461 static __inline void
462 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
463 {
464 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
465 			  val);
466 }
467 
468 static __inline void
469 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
470 {
471 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
472 			  val);
473 }
474 
475 static __inline void
476 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
477 {
478 	bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
479 			  val);
480 }
481 
482 static __inline void
483 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
484 {
485 	bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
486 				 offset, (u_int32_t *)frame, len);
487 }
488 
489 /*
490  *	Fill message with default.
491  */
492 static PI2O_MESSAGE_FRAME
493 ASR_fillMessage(void *Message, u_int16_t size)
494 {
495 	PI2O_MESSAGE_FRAME Message_Ptr;
496 
497 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
498 	bzero(Message_Ptr, size);
499 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
500 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
501 	  (size + sizeof(U32) - 1) >> 2);
502 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
503 	KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
504 	return (Message_Ptr);
505 } /* ASR_fillMessage */
506 
507 #define	EMPTY_QUEUE (0xffffffff)
508 
509 static __inline U32
510 ASR_getMessage(Asr_softc_t *sc)
511 {
512 	U32	MessageOffset;
513 
514 	MessageOffset = asr_get_ToFIFO(sc);
515 	if (MessageOffset == EMPTY_QUEUE)
516 		MessageOffset = asr_get_ToFIFO(sc);
517 
518 	return (MessageOffset);
519 } /* ASR_getMessage */
520 
521 /* Issue a polled command */
522 static U32
523 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
524 {
525 	U32	Mask = 0xffffffff;
526 	U32	MessageOffset;
527 	u_int	Delay = 1500;
528 
529 	/*
530 	 * ASR_initiateCp is only used for synchronous commands and will
531 	 * be made more resiliant to adapter delays since commands like
532 	 * resetIOP can cause the adapter to be deaf for a little time.
533 	 */
534 	while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
535 	 && (--Delay != 0)) {
536 		DELAY (10000);
537 	}
538 	if (MessageOffset != EMPTY_QUEUE) {
539 		asr_set_frame(sc, Message, MessageOffset,
540 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
541 		/*
542 		 *	Disable the Interrupts
543 		 */
544 		Mask = asr_get_intr(sc);
545 		asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
546 		asr_set_ToFIFO(sc, MessageOffset);
547 	}
548 	return (Mask);
549 } /* ASR_initiateCp */
550 
551 /*
552  *	Reset the adapter.
553  */
554 static U32
555 ASR_resetIOP(Asr_softc_t *sc)
556 {
557 	I2O_EXEC_IOP_RESET_MESSAGE	 Message;
558 	PI2O_EXEC_IOP_RESET_MESSAGE	 Message_Ptr;
559 	U32			       * Reply_Ptr;
560 	U32				 Old;
561 
562 	/*
563 	 *  Build up our copy of the Message.
564 	 */
565 	Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
566 	  sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
567 	I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
568 	/*
569 	 *  Reset the Reply Status
570 	 */
571 	Reply_Ptr = &sc->ha_statusmem->rstatus;
572 	*Reply_Ptr = 0;
573 	I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
574 	    sc->ha_rstatus_phys);
575 	/*
576 	 *	Send the Message out
577 	 */
578 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
579 	     0xffffffff) {
580 		/*
581 		 * Wait for a response (Poll), timeouts are dangerous if
582 		 * the card is truly responsive. We assume response in 2s.
583 		 */
584 		u_int8_t Delay = 200;
585 
586 		while ((*Reply_Ptr == 0) && (--Delay != 0)) {
587 			DELAY (10000);
588 		}
589 		/*
590 		 *	Re-enable the interrupts.
591 		 */
592 		asr_set_intr(sc, Old);
593 		KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
594 		return(*Reply_Ptr);
595 	}
596 	KASSERT(Old != 0xffffffff, ("Old == -1"));
597 	return (0);
598 } /* ASR_resetIOP */
599 
600 /*
601  *	Get the curent state of the adapter
602  */
603 static PI2O_EXEC_STATUS_GET_REPLY
604 ASR_getStatus(Asr_softc_t *sc)
605 {
606 	I2O_EXEC_STATUS_GET_MESSAGE	Message;
607 	PI2O_EXEC_STATUS_GET_MESSAGE	Message_Ptr;
608 	PI2O_EXEC_STATUS_GET_REPLY	buffer;
609 	U32				Old;
610 
611 	/*
612 	 *  Build up our copy of the Message.
613 	 */
614 	Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
615 	    sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
616 	I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
617 	    I2O_EXEC_STATUS_GET);
618 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
619 	    sc->ha_status_phys);
620 	/* This one is a Byte Count */
621 	I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
622 	    sizeof(I2O_EXEC_STATUS_GET_REPLY));
623 	/*
624 	 *  Reset the Reply Status
625 	 */
626 	buffer = &sc->ha_statusmem->status;
627 	bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
628 	/*
629 	 *	Send the Message out
630 	 */
631 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
632 	    0xffffffff) {
633 		/*
634 		 *	Wait for a response (Poll), timeouts are dangerous if
635 		 * the card is truly responsive. We assume response in 50ms.
636 		 */
637 		u_int8_t Delay = 255;
638 
639 		while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
640 			if (--Delay == 0) {
641 				buffer = NULL;
642 				break;
643 			}
644 			DELAY (1000);
645 		}
646 		/*
647 		 *	Re-enable the interrupts.
648 		 */
649 		asr_set_intr(sc, Old);
650 		return (buffer);
651 	}
652 	return (NULL);
653 } /* ASR_getStatus */
654 
655 /*
656  *	Check if the device is a SCSI I2O HBA, and add it to the list.
657  */
658 
659 /*
660  * Probe for ASR controller.  If we find it, we will use it.
661  * virtual adapters.
662  */
663 static int
664 asr_probe(device_t dev)
665 {
666 	u_int32_t id;
667 
668 	id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
669 	if ((id == 0xA5011044) || (id == 0xA5111044)) {
670 		device_set_desc(dev, "Adaptec Caching SCSI RAID");
671 		return (BUS_PROBE_DEFAULT);
672 	}
673 	return (ENXIO);
674 } /* asr_probe */
675 
676 static __inline union asr_ccb *
677 asr_alloc_ccb(Asr_softc_t *sc)
678 {
679 	union asr_ccb *new_ccb;
680 
681 	if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb),
682 	  M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
683 		new_ccb->ccb_h.pinfo.priority = 1;
684 		new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
685 		new_ccb->ccb_h.spriv_ptr0 = sc;
686 	}
687 	return (new_ccb);
688 } /* asr_alloc_ccb */
689 
690 static __inline void
691 asr_free_ccb(union asr_ccb *free_ccb)
692 {
693 	kfree(free_ccb, M_DEVBUF);
694 } /* asr_free_ccb */
695 
696 /*
697  *	Print inquiry data `carefully'
698  */
699 static void
700 ASR_prstring(u_int8_t *s, int len)
701 {
702 	while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
703 		kprintf ("%c", *(s++));
704 	}
705 } /* ASR_prstring */
706 
707 /*
708  *	Send a message synchronously and without Interrupt to a ccb.
709  */
710 static int
711 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
712 {
713 	U32		Mask;
714 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
715 
716 	/*
717 	 * We do not need any (optional byteswapping) method access to
718 	 * the Initiator context field.
719 	 */
720 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
721 
722 	/* Prevent interrupt service */
723 	crit_enter();
724 	Mask = asr_get_intr(sc);
725 	asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
726 
727 	if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
728 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
729 		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
730 	}
731 
732 	/*
733 	 * Wait for this board to report a finished instruction.
734 	 */
735 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
736 		(void)asr_intr (sc);
737 	}
738 
739 	/* Re-enable Interrupts */
740 	asr_set_intr(sc, Mask);
741 	crit_exit();
742 
743 	return (ccb->ccb_h.status);
744 } /* ASR_queue_s */
745 
746 /*
747  *	Send a message synchronously to an Asr_softc_t.
748  */
749 static int
750 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
751 {
752 	union asr_ccb	*ccb;
753 	int		status;
754 
755 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
756 		return (CAM_REQUEUE_REQ);
757 	}
758 
759 	status = ASR_queue_s (ccb, Message);
760 
761 	asr_free_ccb(ccb);
762 
763 	return (status);
764 } /* ASR_queue_c */
765 
766 /*
767  *	Add the specified ccb to the active queue
768  */
769 static __inline void
770 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
771 {
772 	crit_enter();
773 	LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
774 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
775 		if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
776 			/*
777 			 * RAID systems can take considerable time to
778 			 * complete some commands given the large cache
779 			 * flashes switching from write back to write thru.
780 			 */
781 			ccb->ccb_h.timeout = 6 * 60 * 1000;
782 		}
783 		callout_reset(&ccb->ccb_h.timeout_ch,
784 		    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
785 	}
786 	crit_exit();
787 } /* ASR_ccbAdd */
788 
789 /*
790  *	Remove the specified ccb from the active queue.
791  */
792 static __inline void
793 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
794 {
795 	crit_enter();
796 	callout_stop(&ccb->ccb_h.timeout_ch);
797 	LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
798 	crit_exit();
799 } /* ASR_ccbRemove */
800 
801 /*
802  *	Fail all the active commands, so they get re-issued by the operating
803  *	system.
804  */
805 static void
806 ASR_failActiveCommands(Asr_softc_t *sc)
807 {
808 	struct ccb_hdr	*ccb;
809 
810 	crit_enter();
811 	/*
812 	 *	We do not need to inform the CAM layer that we had a bus
813 	 * reset since we manage it on our own, this also prevents the
814 	 * SCSI_DELAY settling that would be required on other systems.
815 	 * The `SCSI_DELAY' has already been handled by the card via the
816 	 * acquisition of the LCT table while we are at CAM priority level.
817 	 *  for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
818 	 *	xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
819 	 *  }
820 	 */
821 	while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
822 		ASR_ccbRemove (sc, (union asr_ccb *)ccb);
823 
824 		ccb->status &= ~CAM_STATUS_MASK;
825 		ccb->status |= CAM_REQUEUE_REQ;
826 		/* Nothing Transfered */
827 		((struct ccb_scsiio *)ccb)->resid
828 		  = ((struct ccb_scsiio *)ccb)->dxfer_len;
829 
830 		if (ccb->path) {
831 			xpt_done ((union ccb *)ccb);
832 		} else {
833 			wakeup (ccb);
834 		}
835 	}
836 	crit_exit();
837 } /* ASR_failActiveCommands */
838 
839 /*
840  *	The following command causes the HBA to reset the specific bus
841  */
842 static void
843 ASR_resetBus(Asr_softc_t *sc, int bus)
844 {
845 	I2O_HBA_BUS_RESET_MESSAGE	Message;
846 	I2O_HBA_BUS_RESET_MESSAGE	*Message_Ptr;
847 	PI2O_LCT_ENTRY			Device;
848 
849 	Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
850 	  sizeof(I2O_HBA_BUS_RESET_MESSAGE));
851 	I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
852 	  I2O_HBA_BUS_RESET);
853 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
854 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
855 	  ++Device) {
856 		if (((Device->le_type & I2O_PORT) != 0)
857 		 && (Device->le_bus == bus)) {
858 			I2O_MESSAGE_FRAME_setTargetAddress(
859 			  &Message_Ptr->StdMessageFrame,
860 			  I2O_LCT_ENTRY_getLocalTID(Device));
861 			/* Asynchronous command, with no expectations */
862 			(void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
863 			break;
864 		}
865 	}
866 } /* ASR_resetBus */
867 
868 static __inline int
869 ASR_getBlinkLedCode(Asr_softc_t *sc)
870 {
871 	U8	blink;
872 
873 	if (sc == NULL)
874 		return (0);
875 
876 	blink = bus_space_read_1(sc->ha_frame_btag,
877 				 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
878 	if (blink != 0xBC)
879 		return (0);
880 
881 	blink = bus_space_read_1(sc->ha_frame_btag,
882 				 sc->ha_frame_bhandle, sc->ha_blinkLED);
883 	return (blink);
884 } /* ASR_getBlinkCode */
885 
886 /*
887  *	Determine the address of an TID lookup. Must be done at high priority
888  *	since the address can be changed by other threads of execution.
889  *
890  *	Returns NULL pointer if not indexible (but will attempt to generate
891  *	an index if `new_entry' flag is set to TRUE).
892  *
893  *	All addressible entries are to be guaranteed zero if never initialized.
894  */
895 static tid_t *
896 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
897 {
898 	target2lun_t	*bus_ptr;
899 	lun2tid_t	*target_ptr;
900 	unsigned	new_size;
901 
902 	/*
903 	 *	Validity checking of incoming parameters. More of a bound
904 	 * expansion limit than an issue with the code dealing with the
905 	 * values.
906 	 *
907 	 *	sc must be valid before it gets here, so that check could be
908 	 * dropped if speed a critical issue.
909 	 */
910 	if ((sc == NULL)
911 	 || (bus > MAX_CHANNEL)
912 	 || (target > sc->ha_MaxId)
913 	 || (lun > sc->ha_MaxLun)) {
914 		debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
915 		  (u_long)sc, bus, target, lun);
916 		return (NULL);
917 	}
918 	/*
919 	 *	See if there is an associated bus list.
920 	 *
921 	 *	for performance, allocate in size of BUS_CHUNK chunks.
922 	 *	BUS_CHUNK must be a power of two. This is to reduce
923 	 *	fragmentation effects on the allocations.
924 	 */
925 #define BUS_CHUNK 8
926 	new_size = roundup2(target, BUS_CHUNK);
927 	if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
928 		/*
929 		 *	Allocate a new structure?
930 		 *		Since one element in structure, the +1
931 		 *		needed for size has been abstracted.
932 		 */
933 		if ((new_entry == FALSE)
934 		 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
935 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
936 		    M_TEMP, M_WAITOK | M_ZERO))
937 		   == NULL)) {
938 			debug_asr_printf("failed to allocate bus list\n");
939 			return (NULL);
940 		}
941 		bus_ptr->size = new_size + 1;
942 	} else if (bus_ptr->size <= new_size) {
943 		target2lun_t * new_bus_ptr;
944 
945 		/*
946 		 *	Reallocate a new structure?
947 		 *		Since one element in structure, the +1
948 		 *		needed for size has been abstracted.
949 		 */
950 		if ((new_entry == FALSE)
951 		 || ((new_bus_ptr = (target2lun_t *)kmalloc (
952 		    sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
953 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
954 			debug_asr_printf("failed to reallocate bus list\n");
955 			return (NULL);
956 		}
957 		/*
958 		 *	Copy the whole thing, safer, simpler coding
959 		 * and not really performance critical at this point.
960 		 */
961 		bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
962 		    + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
963 		sc->ha_targets[bus] = new_bus_ptr;
964 		kfree(bus_ptr, M_TEMP);
965 		bus_ptr = new_bus_ptr;
966 		bus_ptr->size = new_size + 1;
967 	}
968 	/*
969 	 *	We now have the bus list, lets get to the target list.
970 	 *	Since most systems have only *one* lun, we do not allocate
971 	 *	in chunks as above, here we allow one, then in chunk sizes.
972 	 *	TARGET_CHUNK must be a power of two. This is to reduce
973 	 *	fragmentation effects on the allocations.
974 	 */
975 #define TARGET_CHUNK 8
976 	if ((new_size = lun) != 0) {
977 		new_size = roundup2(lun, TARGET_CHUNK);
978 	}
979 	if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
980 		/*
981 		 *	Allocate a new structure?
982 		 *		Since one element in structure, the +1
983 		 *		needed for size has been abstracted.
984 		 */
985 		if ((new_entry == FALSE)
986 		 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
987 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
988 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
989 			debug_asr_printf("failed to allocate target list\n");
990 			return (NULL);
991 		}
992 		target_ptr->size = new_size + 1;
993 	} else if (target_ptr->size <= new_size) {
994 		lun2tid_t * new_target_ptr;
995 
996 		/*
997 		 *	Reallocate a new structure?
998 		 *		Since one element in structure, the +1
999 		 *		needed for size has been abstracted.
1000 		 */
1001 		if ((new_entry == FALSE)
1002 		 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1003 		    sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1004 		    M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1005 			debug_asr_printf("failed to reallocate target list\n");
1006 			return (NULL);
1007 		}
1008 		/*
1009 		 *	Copy the whole thing, safer, simpler coding
1010 		 * and not really performance critical at this point.
1011 		 */
1012 		bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1013 		    + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1014 		bus_ptr->LUN[target] = new_target_ptr;
1015 		kfree(target_ptr, M_TEMP);
1016 		target_ptr = new_target_ptr;
1017 		target_ptr->size = new_size + 1;
1018 	}
1019 	/*
1020 	 *	Now, acquire the TID address from the LUN indexed list.
1021 	 */
1022 	return (&(target_ptr->TID[lun]));
1023 } /* ASR_getTidAddress */
1024 
1025 /*
1026  *	Get a pre-existing TID relationship.
1027  *
1028  *	If the TID was never set, return (tid_t)-1.
1029  *
1030  *	should use mutex rather than spl.
1031  */
1032 static __inline tid_t
1033 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1034 {
1035 	tid_t	*tid_ptr;
1036 	tid_t	retval;
1037 
1038 	crit_enter();
1039 	if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1040 	/* (tid_t)0 or (tid_t)-1 indicate no TID */
1041 	 || (*tid_ptr == (tid_t)0)) {
1042 		crit_exit();
1043 		return ((tid_t)-1);
1044 	}
1045 	retval = *tid_ptr;
1046 	crit_exit();
1047 	return (retval);
1048 } /* ASR_getTid */
1049 
1050 /*
1051  *	Set a TID relationship.
1052  *
1053  *	If the TID was not set, return (tid_t)-1.
1054  *
1055  *	should use mutex rather than spl.
1056  */
1057 static __inline tid_t
1058 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t	TID)
1059 {
1060 	tid_t	*tid_ptr;
1061 
1062 	if (TID != (tid_t)-1) {
1063 		if (TID == 0) {
1064 			return ((tid_t)-1);
1065 		}
1066 		crit_enter();
1067 		if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1068 		 == NULL) {
1069 			crit_exit();
1070 			return ((tid_t)-1);
1071 		}
1072 		*tid_ptr = TID;
1073 		crit_exit();
1074 	}
1075 	return (TID);
1076 } /* ASR_setTid */
1077 
1078 /*-------------------------------------------------------------------------*/
1079 /*		      Function ASR_rescan				   */
1080 /*-------------------------------------------------------------------------*/
1081 /* The Parameters Passed To This Function Are :				   */
1082 /*     Asr_softc_t *	 : HBA miniport driver's adapter data storage.	   */
1083 /*									   */
1084 /* This Function Will rescan the adapter and resynchronize any data	   */
1085 /*									   */
1086 /* Return : 0 For OK, Error Code Otherwise				   */
1087 /*-------------------------------------------------------------------------*/
1088 
1089 static int
1090 ASR_rescan(Asr_softc_t *sc)
1091 {
1092 	int bus;
1093 	int error;
1094 
1095 	/*
1096 	 * Re-acquire the LCT table and synchronize us to the adapter.
1097 	 */
1098 	if ((error = ASR_acquireLct(sc)) == 0) {
1099 		error = ASR_acquireHrt(sc);
1100 	}
1101 
1102 	if (error != 0) {
1103 		return error;
1104 	}
1105 
1106 	bus = sc->ha_MaxBus;
1107 	/* Reset all existing cached TID lookups */
1108 	do {
1109 		int target, event = 0;
1110 
1111 		/*
1112 		 *	Scan for all targets on this bus to see if they
1113 		 * got affected by the rescan.
1114 		 */
1115 		for (target = 0; target <= sc->ha_MaxId; ++target) {
1116 			int lun;
1117 
1118 			/* Stay away from the controller ID */
1119 			if (target == sc->ha_adapter_target[bus]) {
1120 				continue;
1121 			}
1122 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1123 				PI2O_LCT_ENTRY Device;
1124 				tid_t	       TID = (tid_t)-1;
1125 				tid_t	       LastTID;
1126 
1127 				/*
1128 				 * See if the cached TID changed. Search for
1129 				 * the device in our new LCT.
1130 				 */
1131 				for (Device = sc->ha_LCT->LCTEntry;
1132 				  Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1133 				   + I2O_LCT_getTableSize(sc->ha_LCT));
1134 				  ++Device) {
1135 					if ((Device->le_type != I2O_UNKNOWN)
1136 					 && (Device->le_bus == bus)
1137 					 && (Device->le_target == target)
1138 					 && (Device->le_lun == lun)
1139 					 && (I2O_LCT_ENTRY_getUserTID(Device)
1140 					  == 0xFFF)) {
1141 						TID = I2O_LCT_ENTRY_getLocalTID(
1142 						  Device);
1143 						break;
1144 					}
1145 				}
1146 				/*
1147 				 * Indicate to the OS that the label needs
1148 				 * to be recalculated, or that the specific
1149 				 * open device is no longer valid (Merde)
1150 				 * because the cached TID changed.
1151 				 */
1152 				LastTID = ASR_getTid (sc, bus, target, lun);
1153 				if (LastTID != TID) {
1154 					struct cam_path * path;
1155 
1156 					if (xpt_create_path(&path,
1157 					  /*periph*/NULL,
1158 					  cam_sim_path(sc->ha_sim[bus]),
1159 					  target, lun) != CAM_REQ_CMP) {
1160 						if (TID == (tid_t)-1) {
1161 							event |= AC_LOST_DEVICE;
1162 						} else {
1163 							event |= AC_INQ_CHANGED
1164 							       | AC_GETDEV_CHANGED;
1165 						}
1166 					} else {
1167 						if (TID == (tid_t)-1) {
1168 							xpt_async(
1169 							  AC_LOST_DEVICE,
1170 							  path, NULL);
1171 						} else if (LastTID == (tid_t)-1) {
1172 							struct ccb_getdev ccb;
1173 
1174 							xpt_setup_ccb(
1175 							  &(ccb.ccb_h),
1176 							  path, /*priority*/5);
1177 							xpt_async(
1178 							  AC_FOUND_DEVICE,
1179 							  path,
1180 							  &ccb);
1181 						} else {
1182 							xpt_async(
1183 							  AC_INQ_CHANGED,
1184 							  path, NULL);
1185 							xpt_async(
1186 							  AC_GETDEV_CHANGED,
1187 							  path, NULL);
1188 						}
1189 					}
1190 				}
1191 				/*
1192 				 *	We have the option of clearing the
1193 				 * cached TID for it to be rescanned, or to
1194 				 * set it now even if the device never got
1195 				 * accessed. We chose the later since we
1196 				 * currently do not use the condition that
1197 				 * the TID ever got cached.
1198 				 */
1199 				ASR_setTid (sc, bus, target, lun, TID);
1200 			}
1201 		}
1202 		/*
1203 		 *	The xpt layer can not handle multiple events at the
1204 		 * same call.
1205 		 */
1206 		if (event & AC_LOST_DEVICE) {
1207 			xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1208 		}
1209 		if (event & AC_INQ_CHANGED) {
1210 			xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1211 		}
1212 		if (event & AC_GETDEV_CHANGED) {
1213 			xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1214 		}
1215 	} while (--bus >= 0);
1216 	return (error);
1217 } /* ASR_rescan */
1218 
1219 /*-------------------------------------------------------------------------*/
1220 /*		      Function ASR_reset				   */
1221 /*-------------------------------------------------------------------------*/
1222 /* The Parameters Passed To This Function Are :				   */
1223 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
1224 /*									   */
1225 /* This Function Will reset the adapter and resynchronize any data	   */
1226 /*									   */
1227 /* Return : None							   */
1228 /*-------------------------------------------------------------------------*/
1229 
1230 static int
1231 ASR_reset(Asr_softc_t *sc)
1232 {
1233 	int retVal;
1234 
1235 	crit_enter();
1236 	if ((sc->ha_in_reset == HA_IN_RESET)
1237 	 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1238 		crit_exit();
1239 		return (EBUSY);
1240 	}
1241 	/*
1242 	 *	Promotes HA_OPERATIONAL to HA_IN_RESET,
1243 	 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1244 	 */
1245 	++(sc->ha_in_reset);
1246 	if (ASR_resetIOP(sc) == 0) {
1247 		debug_asr_printf ("ASR_resetIOP failed\n");
1248 		/*
1249 		 *	We really need to take this card off-line, easier said
1250 		 * than make sense. Better to keep retrying for now since if a
1251 		 * UART cable is connected the blinkLEDs the adapter is now in
1252 		 * a hard state requiring action from the monitor commands to
1253 		 * the HBA to continue. For debugging waiting forever is a
1254 		 * good thing. In a production system, however, one may wish
1255 		 * to instead take the card off-line ...
1256 		 */
1257 		/* Wait Forever */
1258 		while (ASR_resetIOP(sc) == 0);
1259 	}
1260 	retVal = ASR_init (sc);
1261 	crit_exit();
1262 	if (retVal != 0) {
1263 		debug_asr_printf ("ASR_init failed\n");
1264 		sc->ha_in_reset = HA_OFF_LINE;
1265 		return (ENXIO);
1266 	}
1267 	if (ASR_rescan (sc) != 0) {
1268 		debug_asr_printf ("ASR_rescan failed\n");
1269 	}
1270 	ASR_failActiveCommands (sc);
1271 	if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1272 		kprintf ("asr%d: Brining adapter back on-line\n",
1273 		  sc->ha_path[0]
1274 		    ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1275 		    : 0);
1276 	}
1277 	sc->ha_in_reset = HA_OPERATIONAL;
1278 	return (0);
1279 } /* ASR_reset */
1280 
1281 /*
1282  *	Device timeout handler.
1283  */
1284 static void
1285 asr_timeout(void *arg)
1286 {
1287 	union asr_ccb	*ccb = (union asr_ccb *)arg;
1288 	Asr_softc_t	*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1289 	int		s;
1290 
1291 	debug_asr_print_path(ccb);
1292 	debug_asr_printf("timed out");
1293 
1294 	/*
1295 	 *	Check if the adapter has locked up?
1296 	 */
1297 	if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1298 		/* Reset Adapter */
1299 		kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1300 		  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1301 		if (ASR_reset (sc) == ENXIO) {
1302 			/* Try again later */
1303 			callout_reset(&ccb->ccb_h.timeout_ch,
1304 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1305 		}
1306 		return;
1307 	}
1308 	/*
1309 	 *	Abort does not function on the ASR card!!! Walking away from
1310 	 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1311 	 * our best bet, followed by a complete adapter reset if that fails.
1312 	 */
1313 	crit_enter();
1314 	/* Check if we already timed out once to raise the issue */
1315 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1316 		debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1317 		if (ASR_reset (sc) == ENXIO) {
1318 			callout_reset(&ccb->ccb_h.timeout_ch,
1319 			    (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1320 		}
1321 		crit_exit();
1322 		return;
1323 	}
1324 	debug_asr_printf ("\nresetting bus\n");
1325 	/* If the BUS reset does not take, then an adapter reset is next! */
1326 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1327 	ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1328 	callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1329 		      asr_timeout, ccb);
1330 	ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1331 	xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1332 	crit_exit();
1333 } /* asr_timeout */
1334 
1335 /*
1336  * send a message asynchronously
1337  */
1338 static int
1339 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1340 {
1341 	U32		MessageOffset;
1342 	union asr_ccb	*ccb;
1343 
1344 	debug_asr_printf("Host Command Dump:\n");
1345 	debug_asr_dump_message(Message);
1346 
1347 	ccb = (union asr_ccb *)(long)
1348 	  I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1349 
1350 	if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1351 		asr_set_frame(sc, Message, MessageOffset,
1352 			      I2O_MESSAGE_FRAME_getMessageSize(Message));
1353 		if (ccb) {
1354 			ASR_ccbAdd (sc, ccb);
1355 		}
1356 		/* Post the command */
1357 		asr_set_ToFIFO(sc, MessageOffset);
1358 	} else {
1359 		if (ASR_getBlinkLedCode(sc)) {
1360 			/*
1361 			 *	Unlikely we can do anything if we can't grab a
1362 			 * message frame :-(, but lets give it a try.
1363 			 */
1364 			(void)ASR_reset(sc);
1365 		}
1366 	}
1367 	return (MessageOffset);
1368 } /* ASR_queue */
1369 
1370 
1371 /* Simple Scatter Gather elements */
1372 #define	SG(SGL,Index,Flags,Buffer,Size)				   \
1373 	I2O_FLAGS_COUNT_setCount(				   \
1374 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1375 	  Size);						   \
1376 	I2O_FLAGS_COUNT_setFlags(				   \
1377 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1378 	  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags));	   \
1379 	I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(		   \
1380 	  &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]),		   \
1381 	  (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1382 
1383 /*
1384  *	Retrieve Parameter Group.
1385  */
1386 static void *
1387 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1388 	      unsigned BufferSize)
1389 {
1390 	struct paramGetMessage {
1391 		I2O_UTIL_PARAMS_GET_MESSAGE M;
1392 		char
1393 		   F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1394 		struct Operations {
1395 			I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1396 			I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1397 		}			     O;
1398 	}				Message;
1399 	struct Operations		*Operations_Ptr;
1400 	I2O_UTIL_PARAMS_GET_MESSAGE	*Message_Ptr;
1401 	struct ParamBuffer {
1402 		I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1403 		I2O_PARAM_READ_OPERATION_RESULT	    Read;
1404 		char				    Info[1];
1405 	}				*Buffer_Ptr;
1406 
1407 	Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1408 	  sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1409 	    + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1410 	Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1411 	  + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1412 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1413 	bzero(Operations_Ptr, sizeof(struct Operations));
1414 	I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1415 	  &(Operations_Ptr->Header), 1);
1416 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1417 	  &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1418 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1419 	  &(Operations_Ptr->Template[0]), 0xFFFF);
1420 	I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1421 	  &(Operations_Ptr->Template[0]), Group);
1422 	Buffer_Ptr = (struct ParamBuffer *)Buffer;
1423 	bzero(Buffer_Ptr, BufferSize);
1424 
1425 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1426 	  I2O_VERSION_11
1427 	  + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1428 	    / sizeof(U32)) << 4));
1429 	I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1430 	  TID);
1431 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1432 	  I2O_UTIL_PARAMS_GET);
1433 	/*
1434 	 *  Set up the buffers as scatter gather elements.
1435 	 */
1436 	SG(&(Message_Ptr->SGL), 0,
1437 	  I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1438 	  Operations_Ptr, sizeof(struct Operations));
1439 	SG(&(Message_Ptr->SGL), 1,
1440 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1441 	  Buffer_Ptr, BufferSize);
1442 
1443 	if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1444 	 && (Buffer_Ptr->Header.ResultCount)) {
1445 		return ((void *)(Buffer_Ptr->Info));
1446 	}
1447 	return (NULL);
1448 } /* ASR_getParams */
1449 
1450 /*
1451  *	Acquire the LCT information.
1452  */
1453 static int
1454 ASR_acquireLct(Asr_softc_t *sc)
1455 {
1456 	PI2O_EXEC_LCT_NOTIFY_MESSAGE	Message_Ptr;
1457 	PI2O_SGE_SIMPLE_ELEMENT		sg;
1458 	int				MessageSizeInBytes;
1459 	caddr_t				v;
1460 	int				len;
1461 	I2O_LCT				Table, *TableP = &Table;
1462 	PI2O_LCT_ENTRY			Entry;
1463 
1464 	/*
1465 	 *	sc value assumed valid
1466 	 */
1467 	MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1468 	    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1469 	if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1470 	    MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1471 		return (ENOMEM);
1472 	}
1473 	(void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1474 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1475 	    (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1476 	    sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1477 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1478 	    I2O_EXEC_LCT_NOTIFY);
1479 	I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1480 	    I2O_CLASS_MATCH_ANYCLASS);
1481 	/*
1482 	 *	Call the LCT table to determine the number of device entries
1483 	 * to reserve space for.
1484 	 */
1485 	SG(&(Message_Ptr->SGL), 0,
1486 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, TableP,
1487 	  sizeof(I2O_LCT));
1488 	/*
1489 	 *	since this code is reused in several systems, code efficiency
1490 	 * is greater by using a shift operation rather than a divide by
1491 	 * sizeof(u_int32_t).
1492 	 */
1493 	I2O_LCT_setTableSize(&Table,
1494 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1495 	(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1496 	/*
1497 	 *	Determine the size of the LCT table.
1498 	 */
1499 	if (sc->ha_LCT) {
1500 		kfree(sc->ha_LCT, M_TEMP);
1501 	}
1502 	/*
1503 	 *	malloc only generates contiguous memory when less than a
1504 	 * page is expected. We must break the request up into an SG list ...
1505 	 */
1506 	if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1507 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1508 	 || (len > (128 * 1024))) {	/* Arbitrary */
1509 		kfree(Message_Ptr, M_TEMP);
1510 		return (EINVAL);
1511 	}
1512 	if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1513 		kfree(Message_Ptr, M_TEMP);
1514 		return (ENOMEM);
1515 	}
1516 	/*
1517 	 *	since this code is reused in several systems, code efficiency
1518 	 * is greater by using a shift operation rather than a divide by
1519 	 * sizeof(u_int32_t).
1520 	 */
1521 	I2O_LCT_setTableSize(sc->ha_LCT,
1522 	  (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1523 	/*
1524 	 *	Convert the access to the LCT table into a SG list.
1525 	 */
1526 	sg = Message_Ptr->SGL.u.Simple;
1527 	v = (caddr_t)(sc->ha_LCT);
1528 	for (;;) {
1529 		int next, base, span;
1530 
1531 		span = 0;
1532 		next = base = KVTOPHYS(v);
1533 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1534 
1535 		/* How far can we go contiguously */
1536 		while ((len > 0) && (base == next)) {
1537 			int size;
1538 
1539 			next = trunc_page(base) + PAGE_SIZE;
1540 			size = next - base;
1541 			if (size > len) {
1542 				size = len;
1543 			}
1544 			span += size;
1545 			v += size;
1546 			len -= size;
1547 			base = KVTOPHYS(v);
1548 		}
1549 
1550 		/* Construct the Flags */
1551 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1552 		{
1553 			int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1554 			if (len <= 0) {
1555 				rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1556 				    | I2O_SGL_FLAGS_LAST_ELEMENT
1557 				    | I2O_SGL_FLAGS_END_OF_BUFFER);
1558 			}
1559 			I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1560 		}
1561 
1562 		if (len <= 0) {
1563 			break;
1564 		}
1565 
1566 		/*
1567 		 * Incrementing requires resizing of the packet.
1568 		 */
1569 		++sg;
1570 		MessageSizeInBytes += sizeof(*sg);
1571 		I2O_MESSAGE_FRAME_setMessageSize(
1572 		  &(Message_Ptr->StdMessageFrame),
1573 		  I2O_MESSAGE_FRAME_getMessageSize(
1574 		    &(Message_Ptr->StdMessageFrame))
1575 		  + (sizeof(*sg) / sizeof(U32)));
1576 		{
1577 			PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1578 
1579 			if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1580 			    kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1581 			    == NULL) {
1582 				kfree(sc->ha_LCT, M_TEMP);
1583 				sc->ha_LCT = NULL;
1584 				kfree(Message_Ptr, M_TEMP);
1585 				return (ENOMEM);
1586 			}
1587 			span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1588 			bcopy(Message_Ptr, NewMessage_Ptr, span);
1589 			kfree(Message_Ptr, M_TEMP);
1590 			sg = (PI2O_SGE_SIMPLE_ELEMENT)
1591 			  (((caddr_t)NewMessage_Ptr) + span);
1592 			Message_Ptr = NewMessage_Ptr;
1593 		}
1594 	}
1595 	{	int retval;
1596 
1597 		retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1598 		kfree(Message_Ptr, M_TEMP);
1599 		if (retval != CAM_REQ_CMP) {
1600 			return (ENODEV);
1601 		}
1602 	}
1603 	/* If the LCT table grew, lets truncate accesses */
1604 	if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1605 		I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1606 	}
1607 	for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1608 	  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1609 	  ++Entry) {
1610 		Entry->le_type = I2O_UNKNOWN;
1611 		switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1612 
1613 		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1614 			Entry->le_type = I2O_BSA;
1615 			break;
1616 
1617 		case I2O_CLASS_SCSI_PERIPHERAL:
1618 			Entry->le_type = I2O_SCSI;
1619 			break;
1620 
1621 		case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1622 			Entry->le_type = I2O_FCA;
1623 			break;
1624 
1625 		case I2O_CLASS_BUS_ADAPTER_PORT:
1626 			Entry->le_type = I2O_PORT | I2O_SCSI;
1627 			/* FALLTHRU */
1628 		case I2O_CLASS_FIBRE_CHANNEL_PORT:
1629 			if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1630 			  I2O_CLASS_FIBRE_CHANNEL_PORT) {
1631 				Entry->le_type = I2O_PORT | I2O_FCA;
1632 			}
1633 		{	struct ControllerInfo {
1634 				I2O_PARAM_RESULTS_LIST_HEADER	    Header;
1635 				I2O_PARAM_READ_OPERATION_RESULT	    Read;
1636 				I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1637 			} Buffer;
1638 			PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1639 
1640 			Entry->le_bus = 0xff;
1641 			Entry->le_target = 0xff;
1642 			Entry->le_lun = 0xff;
1643 
1644 			if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1645 			  ASR_getParams(sc,
1646 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1647 			    I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1648 			    &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1649 				continue;
1650 			}
1651 			Entry->le_target
1652 			  = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1653 			    Info);
1654 			Entry->le_lun = 0;
1655 		}	/* FALLTHRU */
1656 		default:
1657 			continue;
1658 		}
1659 		{	struct DeviceInfo {
1660 				I2O_PARAM_RESULTS_LIST_HEADER	Header;
1661 				I2O_PARAM_READ_OPERATION_RESULT Read;
1662 				I2O_DPT_DEVICE_INFO_SCALAR	Info;
1663 			} Buffer;
1664 			PI2O_DPT_DEVICE_INFO_SCALAR	 Info;
1665 
1666 			Entry->le_bus = 0xff;
1667 			Entry->le_target = 0xff;
1668 			Entry->le_lun = 0xff;
1669 
1670 			if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1671 			  ASR_getParams(sc,
1672 			    I2O_LCT_ENTRY_getLocalTID(Entry),
1673 			    I2O_DPT_DEVICE_INFO_GROUP_NO,
1674 			    &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1675 				continue;
1676 			}
1677 			Entry->le_type
1678 			  |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1679 			Entry->le_bus
1680 			  = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1681 			if ((Entry->le_bus > sc->ha_MaxBus)
1682 			 && (Entry->le_bus <= MAX_CHANNEL)) {
1683 				sc->ha_MaxBus = Entry->le_bus;
1684 			}
1685 			Entry->le_target
1686 			  = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1687 			Entry->le_lun
1688 			  = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1689 		}
1690 	}
1691 	/*
1692 	 *	A zero return value indicates success.
1693 	 */
1694 	return (0);
1695 } /* ASR_acquireLct */
1696 
1697 /*
1698  * Initialize a message frame.
1699  * We assume that the CDB has already been set up, so all we do here is
1700  * generate the Scatter Gather list.
1701  */
1702 static PI2O_MESSAGE_FRAME
1703 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME	Message)
1704 {
1705 	PI2O_MESSAGE_FRAME	Message_Ptr;
1706 	PI2O_SGE_SIMPLE_ELEMENT sg;
1707 	Asr_softc_t		*sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1708 	vm_size_t		size, len;
1709 	caddr_t			v;
1710 	U32			MessageSize;
1711 	int			next, span, base, rw;
1712 	int			target = ccb->ccb_h.target_id;
1713 	int			lun = ccb->ccb_h.target_lun;
1714 	int			bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1715 	tid_t			TID;
1716 
1717 	/* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1718 	Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1719 	bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1720 	      sizeof(I2O_SG_ELEMENT)));
1721 
1722 	if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1723 		PI2O_LCT_ENTRY Device;
1724 
1725 		TID = 0;
1726 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1727 		    (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1728 		    ++Device) {
1729 			if ((Device->le_type != I2O_UNKNOWN)
1730 			 && (Device->le_bus == bus)
1731 			 && (Device->le_target == target)
1732 			 && (Device->le_lun == lun)
1733 			 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1734 				TID = I2O_LCT_ENTRY_getLocalTID(Device);
1735 				ASR_setTid(sc, Device->le_bus,
1736 					   Device->le_target, Device->le_lun,
1737 					   TID);
1738 				break;
1739 			}
1740 		}
1741 	}
1742 	if (TID == (tid_t)0) {
1743 		return (NULL);
1744 	}
1745 	I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1746 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1747 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1748 	I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1749 	  (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1750 		/ sizeof(U32)) << 4));
1751 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1752 	  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1753 	  - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1754 	I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1755 	I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1756 	I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1757 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1758 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1759 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1760 	    I2O_SCB_FLAG_ENABLE_DISCONNECT
1761 	  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1762 	  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1763 	/*
1764 	 * We do not need any (optional byteswapping) method access to
1765 	 * the Initiator & Transaction context field.
1766 	 */
1767 	I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1768 
1769 	I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1770 	  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1771 	/*
1772 	 * copy the cdb over
1773 	 */
1774 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1775 	    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1776 	bcopy(&(ccb->csio.cdb_io),
1777 	    ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1778 	    ccb->csio.cdb_len);
1779 
1780 	/*
1781 	 * Given a buffer describing a transfer, set up a scatter/gather map
1782 	 * in a ccb to map that SCSI transfer.
1783 	 */
1784 
1785 	rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1786 
1787 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1788 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1789 	  (ccb->csio.dxfer_len)
1790 	    ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1791 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1792 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1793 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1794 		    : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1795 		     | I2O_SCB_FLAG_ENABLE_DISCONNECT
1796 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1797 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1798 	    :	      (I2O_SCB_FLAG_ENABLE_DISCONNECT
1799 		     | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1800 		     | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1801 
1802 	/*
1803 	 * Given a transfer described by a `data', fill in the SG list.
1804 	 */
1805 	sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1806 
1807 	len = ccb->csio.dxfer_len;
1808 	v = ccb->csio.data_ptr;
1809 	KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1810 	MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1811 	PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1812 	  (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1813 	while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1814 	  Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1815 		span = 0;
1816 		next = base = KVTOPHYS(v);
1817 		I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1818 
1819 		/* How far can we go contiguously */
1820 		while ((len > 0) && (base == next)) {
1821 			next = trunc_page(base) + PAGE_SIZE;
1822 			size = next - base;
1823 			if (size > len) {
1824 				size = len;
1825 			}
1826 			span += size;
1827 			v += size;
1828 			len -= size;
1829 			base = KVTOPHYS(v);
1830 		}
1831 
1832 		I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1833 		if (len == 0) {
1834 			rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1835 		}
1836 		I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1837 		  I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1838 		++sg;
1839 		MessageSize += sizeof(*sg) / sizeof(U32);
1840 	}
1841 	/* We always do the request sense ... */
1842 	if ((span = ccb->csio.sense_len) == 0) {
1843 		span = sizeof(ccb->csio.sense_data);
1844 	}
1845 	SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1846 	  &(ccb->csio.sense_data), span);
1847 	I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1848 	  MessageSize + (sizeof(*sg) / sizeof(U32)));
1849 	return (Message_Ptr);
1850 } /* ASR_init_message */
1851 
1852 /*
1853  *	Reset the adapter.
1854  */
1855 static U32
1856 ASR_initOutBound(Asr_softc_t *sc)
1857 {
1858 	struct initOutBoundMessage {
1859 		I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1860 		U32			       R;
1861 	}				Message;
1862 	PI2O_EXEC_OUTBOUND_INIT_MESSAGE	Message_Ptr;
1863 	U32				*volatile Reply_Ptr;
1864 	U32				Old;
1865 
1866 	/*
1867 	 *  Build up our copy of the Message.
1868 	 */
1869 	Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1870 	  sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1871 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1872 	  I2O_EXEC_OUTBOUND_INIT);
1873 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1874 	I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1875 	  sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1876 	/*
1877 	 *  Reset the Reply Status
1878 	 */
1879 	*(Reply_Ptr = (U32 *)((char *)Message_Ptr
1880 	  + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1881 	SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1882 	  sizeof(U32));
1883 	/*
1884 	 *	Send the Message out
1885 	 */
1886 	if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1887 	    0xffffffff) {
1888 		u_long size, addr;
1889 
1890 		/*
1891 		 *	Wait for a response (Poll).
1892 		 */
1893 		while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1894 		/*
1895 		 *	Re-enable the interrupts.
1896 		 */
1897 		asr_set_intr(sc, Old);
1898 		/*
1899 		 *	Populate the outbound table.
1900 		 */
1901 		if (sc->ha_Msgs == NULL) {
1902 
1903 			/* Allocate the reply frames */
1904 			size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1905 			  * sc->ha_Msgs_Count;
1906 
1907 			/*
1908 			 *	contigmalloc only works reliably at
1909 			 * initialization time.
1910 			 */
1911 			if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1912 			  contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1913 			    0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1914 				bzero(sc->ha_Msgs, size);
1915 				sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1916 			}
1917 		}
1918 
1919 		/* Initialize the outbound FIFO */
1920 		if (sc->ha_Msgs != NULL)
1921 			for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1922 			    size; --size) {
1923 				asr_set_FromFIFO(sc, addr);
1924 				addr +=
1925 				    sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1926 			}
1927 		return (*Reply_Ptr);
1928 	}
1929 	return (0);
1930 } /* ASR_initOutBound */
1931 
1932 /*
1933  *	Set the system table
1934  */
1935 static int
1936 ASR_setSysTab(Asr_softc_t *sc)
1937 {
1938 	PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1939 	PI2O_SET_SYSTAB_HEADER	      SystemTable;
1940 	Asr_softc_t		    * ha;
1941 	PI2O_SGE_SIMPLE_ELEMENT	      sg;
1942 	int			      retVal;
1943 
1944 	if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1945 	  sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1946 		return (ENOMEM);
1947 	}
1948 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1949 		++SystemTable->NumberEntries;
1950 	}
1951 	if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1952 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1953 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1954 	  M_TEMP, M_WAITOK)) == NULL) {
1955 		kfree(SystemTable, M_TEMP);
1956 		return (ENOMEM);
1957 	}
1958 	(void)ASR_fillMessage((void *)Message_Ptr,
1959 	  sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1960 	   + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1961 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1962 	  (I2O_VERSION_11 +
1963 	  (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1964 			/ sizeof(U32)) << 4)));
1965 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1966 	  I2O_EXEC_SYS_TAB_SET);
1967 	/*
1968 	 *	Call the LCT table to determine the number of device entries
1969 	 * to reserve space for.
1970 	 *	since this code is reused in several systems, code efficiency
1971 	 * is greater by using a shift operation rather than a divide by
1972 	 * sizeof(u_int32_t).
1973 	 */
1974 	sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1975 	  + ((I2O_MESSAGE_FRAME_getVersionOffset(
1976 	      &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1977 	SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1978 	++sg;
1979 	for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1980 		SG(sg, 0,
1981 		  ((ha->ha_next)
1982 		    ? (I2O_SGL_FLAGS_DIR)
1983 		    : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1984 		  &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1985 		++sg;
1986 	}
1987 	SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1988 	SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1989 	    | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1990 	retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1991 	kfree(Message_Ptr, M_TEMP);
1992 	kfree(SystemTable, M_TEMP);
1993 	return (retVal);
1994 } /* ASR_setSysTab */
1995 
1996 static int
1997 ASR_acquireHrt(Asr_softc_t *sc)
1998 {
1999 	I2O_EXEC_HRT_GET_MESSAGE	Message;
2000 	I2O_EXEC_HRT_GET_MESSAGE	*Message_Ptr;
2001 	struct {
2002 		I2O_HRT	      Header;
2003 		I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2004 	}				Hrt, *HrtP = &Hrt;
2005 	u_int8_t			NumberOfEntries;
2006 	PI2O_HRT_ENTRY			Entry;
2007 
2008 	bzero(&Hrt, sizeof (Hrt));
2009 	Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2010 	  sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2011 	  + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2012 	I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2013 	  (I2O_VERSION_11
2014 	  + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2015 		   / sizeof(U32)) << 4)));
2016 	I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2017 	  I2O_EXEC_HRT_GET);
2018 
2019 	/*
2020 	 *  Set up the buffers as scatter gather elements.
2021 	 */
2022 	SG(&(Message_Ptr->SGL), 0,
2023 	  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2024 	  HrtP, sizeof(Hrt));
2025 	if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2026 		return (ENODEV);
2027 	}
2028 	if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2029 	  > (MAX_CHANNEL + 1)) {
2030 		NumberOfEntries = MAX_CHANNEL + 1;
2031 	}
2032 	for (Entry = Hrt.Header.HRTEntry;
2033 	  NumberOfEntries != 0;
2034 	  ++Entry, --NumberOfEntries) {
2035 		PI2O_LCT_ENTRY Device;
2036 
2037 		for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2038 		  (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2039 		  ++Device) {
2040 			if (I2O_LCT_ENTRY_getLocalTID(Device)
2041 			  == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2042 				Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2043 				  Entry) >> 16;
2044 				if ((Device->le_bus > sc->ha_MaxBus)
2045 				 && (Device->le_bus <= MAX_CHANNEL)) {
2046 					sc->ha_MaxBus = Device->le_bus;
2047 				}
2048 			}
2049 		}
2050 	}
2051 	return (0);
2052 } /* ASR_acquireHrt */
2053 
2054 /*
2055  *	Enable the adapter.
2056  */
2057 static int
2058 ASR_enableSys(Asr_softc_t *sc)
2059 {
2060 	I2O_EXEC_SYS_ENABLE_MESSAGE	Message;
2061 	PI2O_EXEC_SYS_ENABLE_MESSAGE	Message_Ptr;
2062 
2063 	Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2064 	  sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2065 	I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2066 	  I2O_EXEC_SYS_ENABLE);
2067 	return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2068 } /* ASR_enableSys */
2069 
2070 /*
2071  *	Perform the stages necessary to initialize the adapter
2072  */
2073 static int
2074 ASR_init(Asr_softc_t *sc)
2075 {
2076 	return ((ASR_initOutBound(sc) == 0)
2077 	 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2078 	 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2079 } /* ASR_init */
2080 
2081 /*
2082  *	Send a Synchronize Cache command to the target device.
2083  */
2084 static void
2085 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2086 {
2087 	tid_t TID;
2088 
2089 	/*
2090 	 * We will not synchronize the device when there are outstanding
2091 	 * commands issued by the OS (this is due to a locked up device,
2092 	 * as the OS normally would flush all outstanding commands before
2093 	 * issuing a shutdown or an adapter reset).
2094 	 */
2095 	if ((sc != NULL)
2096 	 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2097 	 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2098 	 && (TID != (tid_t)0)) {
2099 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2100 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2101 
2102 		Message_Ptr = &Message;
2103 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2104 		    - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2105 
2106 		I2O_MESSAGE_FRAME_setVersionOffset(
2107 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2108 		  I2O_VERSION_11
2109 		    | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2110 		    - sizeof(I2O_SG_ELEMENT))
2111 			/ sizeof(U32)) << 4));
2112 		I2O_MESSAGE_FRAME_setMessageSize(
2113 		  (PI2O_MESSAGE_FRAME)Message_Ptr,
2114 		  (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2115 		  - sizeof(I2O_SG_ELEMENT))
2116 			/ sizeof(U32));
2117 		I2O_MESSAGE_FRAME_setInitiatorAddress (
2118 		  (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2119 		I2O_MESSAGE_FRAME_setFunction(
2120 		  (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2121 		I2O_MESSAGE_FRAME_setTargetAddress(
2122 		  (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2123 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2124 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2125 		  I2O_SCSI_SCB_EXEC);
2126 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2127 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2128 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2129 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2130 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2131 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2132 		  (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2133 		  DPT_ORGANIZATION_ID);
2134 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2135 		Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2136 		Message_Ptr->CDB[1] = (lun << 5);
2137 
2138 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2139 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2140 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2141 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2142 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2143 
2144 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2145 
2146 	}
2147 }
2148 
2149 static void
2150 ASR_synchronize(Asr_softc_t *sc)
2151 {
2152 	int bus, target, lun;
2153 
2154 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2155 		for (target = 0; target <= sc->ha_MaxId; ++target) {
2156 			for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2157 				ASR_sync(sc,bus,target,lun);
2158 			}
2159 		}
2160 	}
2161 }
2162 
2163 /*
2164  *	Reset the HBA, targets and BUS.
2165  *		Currently this resets *all* the SCSI busses.
2166  */
2167 static __inline void
2168 asr_hbareset(Asr_softc_t *sc)
2169 {
2170 	ASR_synchronize(sc);
2171 	(void)ASR_reset(sc);
2172 } /* asr_hbareset */
2173 
2174 /*
2175  *	A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2176  * limit and a reduction in error checking (in the pre 4.0 case).
2177  */
2178 static int
2179 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2180 {
2181 	int		rid;
2182 	u_int32_t	p, l, s;
2183 
2184 	/*
2185 	 * I2O specification says we must find first *memory* mapped BAR
2186 	 */
2187 	for (rid = 0; rid < 4; rid++) {
2188 		p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2189 		if ((p & 1) == 0) {
2190 			break;
2191 		}
2192 	}
2193 	/*
2194 	 *	Give up?
2195 	 */
2196 	if (rid >= 4) {
2197 		rid = 0;
2198 	}
2199 	rid = PCIR_BAR(rid);
2200 	p = pci_read_config(dev, rid, sizeof(p));
2201 	pci_write_config(dev, rid, -1, sizeof(p));
2202 	l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2203 	pci_write_config(dev, rid, p, sizeof(p));
2204 	if (l > MAX_MAP) {
2205 		l = MAX_MAP;
2206 	}
2207 	/*
2208 	 * The 2005S Zero Channel RAID solution is not a perfect PCI
2209 	 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2210 	 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2211 	 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2212 	 * accessible via BAR0, the messaging registers are accessible
2213 	 * via BAR1. If the subdevice code is 50 to 59 decimal.
2214 	 */
2215 	s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2216 	if (s != 0xA5111044) {
2217 		s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2218 		if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2219 		 && (ADPTDOMINATOR_SUB_ID_START <= s)
2220 		 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2221 			l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2222 		}
2223 	}
2224 	p &= ~15;
2225 	sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2226 	  p, p + l, l, RF_ACTIVE);
2227 	if (sc->ha_mem_res == NULL) {
2228 		return (0);
2229 	}
2230 	sc->ha_Base = rman_get_start(sc->ha_mem_res);
2231 	sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2232 	sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2233 
2234 	if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2235 		if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2236 			return (0);
2237 		}
2238 		p = pci_read_config(dev, rid, sizeof(p));
2239 		pci_write_config(dev, rid, -1, sizeof(p));
2240 		l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2241 		pci_write_config(dev, rid, p, sizeof(p));
2242 		if (l > MAX_MAP) {
2243 			l = MAX_MAP;
2244 		}
2245 		p &= ~15;
2246 		sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2247 		  p, p + l, l, RF_ACTIVE);
2248 		if (sc->ha_mes_res == NULL) {
2249 			return (0);
2250 		}
2251 		sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2252 		sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2253 	} else {
2254 		sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2255 		sc->ha_frame_btag = sc->ha_i2o_btag;
2256 	}
2257 	return (1);
2258 } /* asr_pci_map_mem */
2259 
2260 /*
2261  *	A simplified copy of the real pci_map_int with additional
2262  * registration requirements.
2263  */
2264 static int
2265 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2266 {
2267 	int rid = 0;
2268 
2269 	sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2270 	  RF_ACTIVE | RF_SHAREABLE);
2271 	if (sc->ha_irq_res == NULL) {
2272 		return (0);
2273 	}
2274 	if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2275 	  (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2276 		return (0);
2277 	}
2278 	sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2279 	return (1);
2280 } /* asr_pci_map_int */
2281 
2282 static void
2283 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2284 {
2285 	Asr_softc_t *sc;
2286 
2287 	if (error)
2288 		return;
2289 
2290 	sc = (Asr_softc_t *)arg;
2291 
2292 	/* XXX
2293 	 * The status word can be at a 64-bit address, but the existing
2294 	 * accessor macros simply cannot manipulate 64-bit addresses.
2295 	 */
2296 	sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2297 	    offsetof(struct Asr_status_mem, status);
2298 	sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2299 	    offsetof(struct Asr_status_mem, rstatus);
2300 }
2301 
2302 static int
2303 asr_alloc_dma(Asr_softc_t *sc)
2304 {
2305 	device_t dev;
2306 
2307 	dev = sc->ha_dev;
2308 
2309 	if (bus_dma_tag_create(NULL,			/* parent */
2310 			       1, 0,			/* algnmnt, boundary */
2311 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2312 			       BUS_SPACE_MAXADDR,	/* highaddr */
2313 			       NULL, NULL,		/* filter, filterarg */
2314 			       BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2315 			       BUS_SPACE_UNRESTRICTED,	/* nsegments */
2316 			       BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2317 			       0,			/* flags */
2318 			       &sc->ha_parent_dmat)) {
2319 		device_printf(dev, "Cannot allocate parent DMA tag\n");
2320 		return (ENOMEM);
2321 	}
2322 
2323 	if (bus_dma_tag_create(sc->ha_parent_dmat,	/* parent */
2324 			       1, 0,			/* algnmnt, boundary */
2325 			       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
2326 			       BUS_SPACE_MAXADDR,	/* highaddr */
2327 			       NULL, NULL,		/* filter, filterarg */
2328 			       sizeof(sc->ha_statusmem),/* maxsize */
2329 			       1,			/* nsegments */
2330 			       sizeof(sc->ha_statusmem),/* maxsegsize */
2331 			       0,			/* flags */
2332 			       &sc->ha_statusmem_dmat)) {
2333 		device_printf(dev, "Cannot allocate status DMA tag\n");
2334 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2335 		return (ENOMEM);
2336 	}
2337 
2338 	if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2339 	    BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2340 		device_printf(dev, "Cannot allocate status memory\n");
2341 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2342 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2343 		return (ENOMEM);
2344 	}
2345 	(void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2346 	    sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2347 
2348 	return (0);
2349 }
2350 
2351 static void
2352 asr_release_dma(Asr_softc_t *sc)
2353 {
2354 
2355 	if (sc->ha_rstatus_phys != 0)
2356 		bus_dmamap_unload(sc->ha_statusmem_dmat,
2357 		    sc->ha_statusmem_dmamap);
2358 	if (sc->ha_statusmem != NULL)
2359 		bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2360 		    sc->ha_statusmem_dmamap);
2361 	if (sc->ha_statusmem_dmat != NULL)
2362 		bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2363 	if (sc->ha_parent_dmat != NULL)
2364 		bus_dma_tag_destroy(sc->ha_parent_dmat);
2365 }
2366 
2367 /*
2368  *	Attach the devices, and virtual devices to the driver list.
2369  */
2370 static int
2371 asr_attach(device_t dev)
2372 {
2373 	PI2O_EXEC_STATUS_GET_REPLY status;
2374 	PI2O_LCT_ENTRY		 Device;
2375 	Asr_softc_t		 *sc, **ha;
2376 	struct scsi_inquiry_data *iq;
2377 	int			 bus, size, unit;
2378 	int			 error;
2379 
2380 	sc = device_get_softc(dev);
2381 	unit = device_get_unit(dev);
2382 	sc->ha_dev = dev;
2383 
2384 	if (Asr_softc_list == NULL) {
2385 		/*
2386 		 *	Fixup the OS revision as saved in the dptsig for the
2387 		 *	engine (dptioctl.h) to pick up.
2388 		 */
2389 		bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2390 	}
2391 	/*
2392 	 *	Initialize the software structure
2393 	 */
2394 	LIST_INIT(&(sc->ha_ccb));
2395 	/* Link us into the HA list */
2396 	for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next))
2397 		;
2398 	*(ha) = sc;
2399 
2400 	/*
2401 	 *	This is the real McCoy!
2402 	 */
2403 	if (!asr_pci_map_mem(dev, sc)) {
2404 		device_printf(dev, "could not map memory\n");
2405 		return(ENXIO);
2406 	}
2407 	/* Enable if not formerly enabled */
2408 	pci_write_config(dev, PCIR_COMMAND,
2409 	    pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2410 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2411 
2412 	sc->ha_pciBusNum = pci_get_bus(dev);
2413 	sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2414 
2415 	if ((error = asr_alloc_dma(sc)) != 0)
2416 		return (error);
2417 
2418 	/* Check if the device is there? */
2419 	if (ASR_resetIOP(sc) == 0) {
2420 		device_printf(dev, "Cannot reset adapter\n");
2421 		asr_release_dma(sc);
2422 		return (EIO);
2423 	}
2424 	status = &sc->ha_statusmem->status;
2425 	if (ASR_getStatus(sc) == NULL) {
2426 		device_printf(dev, "could not initialize hardware\n");
2427 		asr_release_dma(sc);
2428 		return(ENODEV);
2429 	}
2430 	sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2431 	sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2432 	sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2433 	sc->ha_SystemTable.IopState = status->IopState;
2434 	sc->ha_SystemTable.MessengerType = status->MessengerType;
2435 	sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2436 	sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2437 	    (U32)(sc->ha_Base + I2O_REG_TOFIFO);	/* XXX 64-bit */
2438 
2439 	if (!asr_pci_map_int(dev, (void *)sc)) {
2440 		device_printf(dev, "could not map interrupt\n");
2441 		asr_release_dma(sc);
2442 		return(ENXIO);
2443 	}
2444 
2445 	/* Adjust the maximim inbound count */
2446 	if (((sc->ha_QueueSize =
2447 	    I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2448 	    MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2449 		sc->ha_QueueSize = MAX_INBOUND;
2450 	}
2451 
2452 	/* Adjust the maximum outbound count */
2453 	if (((sc->ha_Msgs_Count =
2454 	    I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2455 	    MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2456 		sc->ha_Msgs_Count = MAX_OUTBOUND;
2457 	}
2458 	if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2459 		sc->ha_Msgs_Count = sc->ha_QueueSize;
2460 	}
2461 
2462 	/* Adjust the maximum SG size to adapter */
2463 	if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2464 	    2)) > MAX_INBOUND_SIZE) {
2465 		size = MAX_INBOUND_SIZE;
2466 	}
2467 	sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2468 	  + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2469 
2470 	/*
2471 	 *	Only do a bus/HBA reset on the first time through. On this
2472 	 * first time through, we do not send a flush to the devices.
2473 	 */
2474 	if (ASR_init(sc) == 0) {
2475 		struct BufferInfo {
2476 			I2O_PARAM_RESULTS_LIST_HEADER	    Header;
2477 			I2O_PARAM_READ_OPERATION_RESULT	    Read;
2478 			I2O_DPT_EXEC_IOP_BUFFERS_SCALAR	    Info;
2479 		} Buffer;
2480 		PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2481 #define FW_DEBUG_BLED_OFFSET 8
2482 
2483 		if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2484 		    ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2485 		    &Buffer, sizeof(struct BufferInfo))) != NULL) {
2486 			sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2487 			    I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2488 		}
2489 		if (ASR_acquireLct(sc) == 0) {
2490 			(void)ASR_acquireHrt(sc);
2491 		}
2492 	} else {
2493 		device_printf(dev, "failed to initialize\n");
2494 		asr_release_dma(sc);
2495 		return(ENXIO);
2496 	}
2497 	/*
2498 	 *	Add in additional probe responses for more channels. We
2499 	 * are reusing the variable `target' for a channel loop counter.
2500 	 * Done here because of we need both the acquireLct and
2501 	 * acquireHrt data.
2502 	 */
2503 	for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2504 	    (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2505 		if (Device->le_type == I2O_UNKNOWN) {
2506 			continue;
2507 		}
2508 		if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2509 			if (Device->le_target > sc->ha_MaxId) {
2510 				sc->ha_MaxId = Device->le_target;
2511 			}
2512 			if (Device->le_lun > sc->ha_MaxLun) {
2513 				sc->ha_MaxLun = Device->le_lun;
2514 			}
2515 		}
2516 		if (((Device->le_type & I2O_PORT) != 0)
2517 		 && (Device->le_bus <= MAX_CHANNEL)) {
2518 			/* Do not increase MaxId for efficiency */
2519 			sc->ha_adapter_target[Device->le_bus] =
2520 			    Device->le_target;
2521 		}
2522 	}
2523 
2524 	/*
2525 	 *	Print the HBA model number as inquired from the card.
2526 	 */
2527 
2528 	device_printf(dev, " ");
2529 
2530 	if ((iq = (struct scsi_inquiry_data *)kmalloc(
2531 	    sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2532 	    NULL) {
2533 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message;
2534 		PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE	Message_Ptr;
2535 		int					posted = 0;
2536 
2537 		Message_Ptr = &Message;
2538 		bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2539 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2540 
2541 		I2O_MESSAGE_FRAME_setVersionOffset(
2542 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2543 		    (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2544 		    - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2545 		I2O_MESSAGE_FRAME_setMessageSize(
2546 		    (PI2O_MESSAGE_FRAME)Message_Ptr,
2547 		    (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2548 		    sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2549 		    sizeof(U32));
2550 		I2O_MESSAGE_FRAME_setInitiatorAddress(
2551 		    (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2552 		I2O_MESSAGE_FRAME_setFunction(
2553 		    (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2554 		I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2555 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2556 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2557 		    I2O_SCB_FLAG_ENABLE_DISCONNECT
2558 		  | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2559 		  | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2560 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2561 		I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2562 		    (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2563 		    DPT_ORGANIZATION_ID);
2564 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2565 		Message_Ptr->CDB[0] = INQUIRY;
2566 		Message_Ptr->CDB[4] =
2567 		    (unsigned char)sizeof(struct scsi_inquiry_data);
2568 		if (Message_Ptr->CDB[4] == 0) {
2569 			Message_Ptr->CDB[4] = 255;
2570 		}
2571 
2572 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2573 		  (I2O_SCB_FLAG_XFER_FROM_DEVICE
2574 		    | I2O_SCB_FLAG_ENABLE_DISCONNECT
2575 		    | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2576 		    | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2577 
2578 		PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2579 		  Message_Ptr, sizeof(struct scsi_inquiry_data));
2580 		SG(&(Message_Ptr->SGL), 0,
2581 		  I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2582 		  iq, sizeof(struct scsi_inquiry_data));
2583 		(void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2584 
2585 		if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2586 			kprintf (" ");
2587 			ASR_prstring (iq->vendor, 8);
2588 			++posted;
2589 		}
2590 		if (iq->product[0] && (iq->product[0] != ' ')) {
2591 			kprintf (" ");
2592 			ASR_prstring (iq->product, 16);
2593 			++posted;
2594 		}
2595 		if (iq->revision[0] && (iq->revision[0] != ' ')) {
2596 			kprintf (" FW Rev. ");
2597 			ASR_prstring (iq->revision, 4);
2598 			++posted;
2599 		}
2600 		kfree(iq, M_TEMP);
2601 		if (posted) {
2602 			kprintf (",");
2603 		}
2604 	}
2605 	kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2606 	  (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2607 
2608 	for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2609 		struct cam_devq	  * devq;
2610 		int		    QueueSize = sc->ha_QueueSize;
2611 
2612 		if (QueueSize > MAX_INBOUND) {
2613 			QueueSize = MAX_INBOUND;
2614 		}
2615 
2616 		/*
2617 		 *	Create the device queue for our SIM(s).
2618 		 */
2619 		if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2620 			continue;
2621 		}
2622 
2623 		/*
2624 		 *	Construct our first channel SIM entry
2625 		 */
2626 		sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2627 						unit, &sim_mplock,
2628 						1, QueueSize, devq);
2629 		if (sc->ha_sim[bus] == NULL) {
2630 			continue;
2631 		}
2632 
2633 		if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2634 			cam_sim_free(sc->ha_sim[bus]);
2635 			sc->ha_sim[bus] = NULL;
2636 			continue;
2637 		}
2638 
2639 		if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2640 		    cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2641 		    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2642 			xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2643 			cam_sim_free(sc->ha_sim[bus]);
2644 			sc->ha_sim[bus] = NULL;
2645 			continue;
2646 		}
2647 	}
2648 
2649 	/*
2650 	 *	Generate the device node information
2651 	 */
2652 	sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2653 			       "asr%d", unit);
2654 	if (sc->ha_devt != NULL)
2655 		(void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2656 	sc->ha_devt->si_drv1 = sc;
2657 	return(0);
2658 } /* asr_attach */
2659 
2660 static void
2661 asr_poll(struct cam_sim *sim)
2662 {
2663 	asr_intr(cam_sim_softc(sim));
2664 } /* asr_poll */
2665 
2666 static void
2667 asr_action(struct cam_sim *sim, union ccb  *ccb)
2668 {
2669 	struct Asr_softc *sc;
2670 
2671 	debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2672 			 ccb->ccb_h.func_code);
2673 
2674 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2675 
2676 	ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2677 
2678 	switch (ccb->ccb_h.func_code) {
2679 
2680 	/* Common cases first */
2681 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2682 	{
2683 		struct Message {
2684 			char M[MAX_INBOUND_SIZE];
2685 		} Message;
2686 		PI2O_MESSAGE_FRAME   Message_Ptr;
2687 
2688 		/* Reject incoming commands while we are resetting the card */
2689 		if (sc->ha_in_reset != HA_OPERATIONAL) {
2690 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2691 			if (sc->ha_in_reset >= HA_OFF_LINE) {
2692 				/* HBA is now off-line */
2693 				ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2694 			} else {
2695 				/* HBA currently resetting, try again later. */
2696 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2697 			}
2698 			debug_asr_cmd_printf (" e\n");
2699 			xpt_done(ccb);
2700 			debug_asr_cmd_printf (" q\n");
2701 			break;
2702 		}
2703 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2704 			kprintf(
2705 			  "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2706 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2707 			  ccb->csio.cdb_io.cdb_bytes[0],
2708 			  cam_sim_bus(sim),
2709 			  ccb->ccb_h.target_id,
2710 			  ccb->ccb_h.target_lun);
2711 		}
2712 		debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2713 				     cam_sim_bus(sim), ccb->ccb_h.target_id,
2714 				     ccb->ccb_h.target_lun);
2715 		debug_asr_dump_ccb(ccb);
2716 
2717 		if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2718 		  (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2719 			debug_asr_cmd2_printf ("TID=%x:\n",
2720 			  PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2721 			    (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2722 			debug_asr_cmd2_dump_message(Message_Ptr);
2723 			debug_asr_cmd1_printf (" q");
2724 
2725 			if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2726 				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2727 				ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2728 				debug_asr_cmd_printf (" E\n");
2729 				xpt_done(ccb);
2730 			}
2731 			debug_asr_cmd_printf(" Q\n");
2732 			break;
2733 		}
2734 		/*
2735 		 *	We will get here if there is no valid TID for the device
2736 		 * referenced in the scsi command packet.
2737 		 */
2738 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2739 		ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2740 		debug_asr_cmd_printf (" B\n");
2741 		xpt_done(ccb);
2742 		break;
2743 	}
2744 
2745 	case XPT_RESET_DEV:	/* Bus Device Reset the specified SCSI device */
2746 		/* Reset HBA device ... */
2747 		asr_hbareset (sc);
2748 		ccb->ccb_h.status = CAM_REQ_CMP;
2749 		xpt_done(ccb);
2750 		break;
2751 
2752 	case XPT_ABORT:			/* Abort the specified CCB */
2753 		/* XXX Implement */
2754 		ccb->ccb_h.status = CAM_REQ_INVALID;
2755 		xpt_done(ccb);
2756 		break;
2757 
2758 	case XPT_SET_TRAN_SETTINGS:
2759 		/* XXX Implement */
2760 		ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2761 		xpt_done(ccb);
2762 		break;
2763 
2764 	case XPT_GET_TRAN_SETTINGS:
2765 	/* Get default/user set transfer settings for the target */
2766 	{
2767 		struct	ccb_trans_settings *cts = &(ccb->cts);
2768 		struct ccb_trans_settings_scsi *scsi =
2769 		    &cts->proto_specific.scsi;
2770 		struct ccb_trans_settings_spi *spi =
2771 		    &cts->xport_specific.spi;
2772 
2773 		if (cts->type == CTS_TYPE_USER_SETTINGS) {
2774 			cts->protocol = PROTO_SCSI;
2775 			cts->protocol_version = SCSI_REV_2;
2776 			cts->transport = XPORT_SPI;
2777 			cts->transport_version = 2;
2778 
2779 			scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2780 			spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2781 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2782 			spi->sync_period = 6; /* 40MHz */
2783 			spi->sync_offset = 15;
2784 			spi->valid = CTS_SPI_VALID_SYNC_RATE
2785 				   | CTS_SPI_VALID_SYNC_OFFSET
2786 				   | CTS_SPI_VALID_BUS_WIDTH
2787 				   | CTS_SPI_VALID_DISC;
2788 			scsi->valid = CTS_SCSI_VALID_TQ;
2789 
2790 			ccb->ccb_h.status = CAM_REQ_CMP;
2791 		} else {
2792 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2793 		}
2794 		xpt_done(ccb);
2795 		break;
2796 	}
2797 
2798 	case XPT_CALC_GEOMETRY:
2799 	{
2800 		struct	  ccb_calc_geometry *ccg;
2801 		u_int32_t size_mb;
2802 		u_int32_t secs_per_cylinder;
2803 
2804 		ccg = &(ccb->ccg);
2805 		size_mb = ccg->volume_size
2806 			/ ((1024L * 1024L) / ccg->block_size);
2807 
2808 		if (size_mb > 4096) {
2809 			ccg->heads = 255;
2810 			ccg->secs_per_track = 63;
2811 		} else if (size_mb > 2048) {
2812 			ccg->heads = 128;
2813 			ccg->secs_per_track = 63;
2814 		} else if (size_mb > 1024) {
2815 			ccg->heads = 65;
2816 			ccg->secs_per_track = 63;
2817 		} else {
2818 			ccg->heads = 64;
2819 			ccg->secs_per_track = 32;
2820 		}
2821 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2822 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2823 		ccb->ccb_h.status = CAM_REQ_CMP;
2824 		xpt_done(ccb);
2825 		break;
2826 	}
2827 
2828 	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
2829 		ASR_resetBus (sc, cam_sim_bus(sim));
2830 		ccb->ccb_h.status = CAM_REQ_CMP;
2831 		xpt_done(ccb);
2832 		break;
2833 
2834 	case XPT_TERM_IO:		/* Terminate the I/O process */
2835 		/* XXX Implement */
2836 		ccb->ccb_h.status = CAM_REQ_INVALID;
2837 		xpt_done(ccb);
2838 		break;
2839 
2840 	case XPT_PATH_INQ:		/* Path routing inquiry */
2841 	{
2842 		struct ccb_pathinq *cpi = &(ccb->cpi);
2843 
2844 		cpi->version_num = 1; /* XXX??? */
2845 		cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2846 		cpi->target_sprt = 0;
2847 		/* Not necessary to reset bus, done by HDM initialization */
2848 		cpi->hba_misc = PIM_NOBUSRESET;
2849 		cpi->hba_eng_cnt = 0;
2850 		cpi->max_target = sc->ha_MaxId;
2851 		cpi->max_lun = sc->ha_MaxLun;
2852 		cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2853 		cpi->bus_id = cam_sim_bus(sim);
2854 		cpi->base_transfer_speed = 3300;
2855 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2856 		strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2857 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2858 		cpi->unit_number = cam_sim_unit(sim);
2859 		cpi->ccb_h.status = CAM_REQ_CMP;
2860                 cpi->transport = XPORT_SPI;
2861                 cpi->transport_version = 2;
2862                 cpi->protocol = PROTO_SCSI;
2863                 cpi->protocol_version = SCSI_REV_2;
2864 		xpt_done(ccb);
2865 		break;
2866 	}
2867 	default:
2868 		ccb->ccb_h.status = CAM_REQ_INVALID;
2869 		xpt_done(ccb);
2870 		break;
2871 	}
2872 } /* asr_action */
2873 
2874 /*
2875  * Handle processing of current CCB as pointed to by the Status.
2876  */
2877 static int
2878 asr_intr(Asr_softc_t *sc)
2879 {
2880 	int processed;
2881 
2882 	for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2883 	    processed = 1) {
2884 		union asr_ccb			   *ccb;
2885 		u_int				    dsc;
2886 		U32				    ReplyOffset;
2887 		PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2888 
2889 		if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2890 		 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2891 			break;
2892 		}
2893 		Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2894 		  - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2895 		/*
2896 		 * We do not need any (optional byteswapping) method access to
2897 		 * the Initiator context field.
2898 		 */
2899 		ccb = (union asr_ccb *)(long)
2900 		  I2O_MESSAGE_FRAME_getInitiatorContext64(
2901 		    &(Reply->StdReplyFrame.StdMessageFrame));
2902 		if (I2O_MESSAGE_FRAME_getMsgFlags(
2903 		  &(Reply->StdReplyFrame.StdMessageFrame))
2904 		  & I2O_MESSAGE_FLAGS_FAIL) {
2905 			I2O_UTIL_NOP_MESSAGE	Message;
2906 			PI2O_UTIL_NOP_MESSAGE	Message_Ptr;
2907 			U32			MessageOffset;
2908 
2909 			MessageOffset = (u_long)
2910 			  I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2911 			    (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2912 			/*
2913 			 *  Get the Original Message Frame's address, and get
2914 			 * it's Transaction Context into our space. (Currently
2915 			 * unused at original authorship, but better to be
2916 			 * safe than sorry). Straight copy means that we
2917 			 * need not concern ourselves with the (optional
2918 			 * byteswapping) method access.
2919 			 */
2920 			Reply->StdReplyFrame.TransactionContext =
2921 			    bus_space_read_4(sc->ha_frame_btag,
2922 			    sc->ha_frame_bhandle, MessageOffset +
2923 			    offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2924 			    TransactionContext));
2925 			/*
2926 			 *	For 64 bit machines, we need to reconstruct the
2927 			 * 64 bit context.
2928 			 */
2929 			ccb = (union asr_ccb *)(long)
2930 			  I2O_MESSAGE_FRAME_getInitiatorContext64(
2931 			    &(Reply->StdReplyFrame.StdMessageFrame));
2932 			/*
2933 			 * Unique error code for command failure.
2934 			 */
2935 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2936 			  &(Reply->StdReplyFrame), (u_int16_t)-2);
2937 			/*
2938 			 *  Modify the message frame to contain a NOP and
2939 			 * re-issue it to the controller.
2940 			 */
2941 			Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2942 			    &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2943 #if (I2O_UTIL_NOP != 0)
2944 				I2O_MESSAGE_FRAME_setFunction (
2945 				  &(Message_Ptr->StdMessageFrame),
2946 				  I2O_UTIL_NOP);
2947 #endif
2948 			/*
2949 			 *  Copy the packet out to the Original Message
2950 			 */
2951 			asr_set_frame(sc, Message_Ptr, MessageOffset,
2952 				      sizeof(I2O_UTIL_NOP_MESSAGE));
2953 			/*
2954 			 *  Issue the NOP
2955 			 */
2956 			asr_set_ToFIFO(sc, MessageOffset);
2957 		}
2958 
2959 		/*
2960 		 *	Asynchronous command with no return requirements,
2961 		 * and a generic handler for immunity against odd error
2962 		 * returns from the adapter.
2963 		 */
2964 		if (ccb == NULL) {
2965 			/*
2966 			 * Return Reply so that it can be used for the
2967 			 * next command
2968 			 */
2969 			asr_set_FromFIFO(sc, ReplyOffset);
2970 			continue;
2971 		}
2972 
2973 		/* Welease Wadjah! (and stop timeouts) */
2974 		ASR_ccbRemove (sc, ccb);
2975 
2976 		dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2977 		    &(Reply->StdReplyFrame));
2978 		ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2979 		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2980 		switch (dsc) {
2981 
2982 		case I2O_SCSI_DSC_SUCCESS:
2983 			ccb->ccb_h.status |= CAM_REQ_CMP;
2984 			break;
2985 
2986 		case I2O_SCSI_DSC_CHECK_CONDITION:
2987 			ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
2988 			    CAM_AUTOSNS_VALID;
2989 			break;
2990 
2991 		case I2O_SCSI_DSC_BUSY:
2992 			/* FALLTHRU */
2993 		case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
2994 			/* FALLTHRU */
2995 		case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
2996 			/* FALLTHRU */
2997 		case I2O_SCSI_HBA_DSC_BUS_BUSY:
2998 			ccb->ccb_h.status |= CAM_SCSI_BUSY;
2999 			break;
3000 
3001 		case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3002 			ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3003 			break;
3004 
3005 		case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3006 			/* FALLTHRU */
3007 		case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3008 			/* FALLTHRU */
3009 		case I2O_SCSI_HBA_DSC_LUN_INVALID:
3010 			/* FALLTHRU */
3011 		case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3012 			ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3013 			break;
3014 
3015 		case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3016 			/* FALLTHRU */
3017 		case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3018 			ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3019 			break;
3020 
3021 		default:
3022 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3023 			break;
3024 		}
3025 		if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3026 			ccb->csio.resid -=
3027 			  I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3028 			    Reply);
3029 		}
3030 
3031 		/* Sense data in reply packet */
3032 		if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3033 			u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3034 
3035 			if (size) {
3036 				if (size > sizeof(ccb->csio.sense_data)) {
3037 					size = sizeof(ccb->csio.sense_data);
3038 				}
3039 				if (size > I2O_SCSI_SENSE_DATA_SZ) {
3040 					size = I2O_SCSI_SENSE_DATA_SZ;
3041 				}
3042 				if ((ccb->csio.sense_len)
3043 				 && (size > ccb->csio.sense_len)) {
3044 					size = ccb->csio.sense_len;
3045 				}
3046 				if (size < ccb->csio.sense_len) {
3047 					ccb->csio.sense_resid =
3048 					    ccb->csio.sense_len - size;
3049 				} else {
3050 					ccb->csio.sense_resid = 0;
3051 				}
3052 				bzero(&(ccb->csio.sense_data),
3053 				    sizeof(ccb->csio.sense_data));
3054 				bcopy(Reply->SenseData,
3055 				      &(ccb->csio.sense_data), size);
3056 			}
3057 		}
3058 
3059 		/*
3060 		 * Return Reply so that it can be used for the next command
3061 		 * since we have no more need for it now
3062 		 */
3063 		asr_set_FromFIFO(sc, ReplyOffset);
3064 
3065 		if (ccb->ccb_h.path) {
3066 			xpt_done ((union ccb *)ccb);
3067 		} else {
3068 			wakeup (ccb);
3069 		}
3070 	}
3071 	return (processed);
3072 } /* asr_intr */
3073 
3074 #undef QueueSize	/* Grrrr */
3075 #undef SG_Size		/* Grrrr */
3076 
3077 /*
3078  *	Meant to be included at the bottom of asr.c !!!
3079  */
3080 
3081 /*
3082  *	Included here as hard coded. Done because other necessary include
3083  *	files utilize C++ comment structures which make them a nuisance to
3084  *	included here just to pick up these three typedefs.
3085  */
3086 typedef U32   DPT_TAG_T;
3087 typedef U32   DPT_MSG_T;
3088 typedef U32   DPT_RTN_T;
3089 
3090 #undef SCSI_RESET	/* Conflicts with "scsi/scsiconf.h" defintion */
3091 #include	"dev/raid/asr/osd_unix.h"
3092 
3093 #define	asr_unit(dev)	  minor(dev)
3094 
3095 static u_int8_t ASR_ctlr_held;
3096 
3097 static int
3098 asr_open(struct dev_open_args *ap)
3099 {
3100 	cdev_t dev = ap->a_head.a_dev;
3101 	int		 error;
3102 
3103 	if (dev->si_drv1 == NULL) {
3104 		return (ENODEV);
3105 	}
3106 	crit_enter();
3107 	if (ASR_ctlr_held) {
3108 		error = EBUSY;
3109 	} else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
3110 		++ASR_ctlr_held;
3111 	}
3112 	crit_exit();
3113 	return (error);
3114 } /* asr_open */
3115 
3116 static int
3117 asr_close(struct dev_close_args *ap)
3118 {
3119 
3120 	ASR_ctlr_held = 0;
3121 	return (0);
3122 } /* asr_close */
3123 
3124 
3125 /*-------------------------------------------------------------------------*/
3126 /*		      Function ASR_queue_i				   */
3127 /*-------------------------------------------------------------------------*/
3128 /* The Parameters Passed To This Function Are :				   */
3129 /*     Asr_softc_t *	  : HBA miniport driver's adapter data storage.	   */
3130 /*     PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command	   */
3131 /*	I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure	   */
3132 /*									   */
3133 /* This Function Will Take The User Request Packet And Convert It To An	   */
3134 /* I2O MSG And Send It Off To The Adapter.				   */
3135 /*									   */
3136 /* Return : 0 For OK, Error Code Otherwise				   */
3137 /*-------------------------------------------------------------------------*/
3138 static int
3139 ASR_queue_i(Asr_softc_t	*sc, PI2O_MESSAGE_FRAME	Packet)
3140 {
3141 	union asr_ccb				   * ccb;
3142 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply;
3143 	PI2O_MESSAGE_FRAME			     Message_Ptr;
3144 	PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME	     Reply_Ptr;
3145 	int					     MessageSizeInBytes;
3146 	int					     ReplySizeInBytes;
3147 	int					     error;
3148 	int					     s;
3149 	/* Scatter Gather buffer list */
3150 	struct ioctlSgList_S {
3151 		SLIST_ENTRY(ioctlSgList_S) link;
3152 		caddr_t			   UserSpace;
3153 		I2O_FLAGS_COUNT		   FlagsCount;
3154 		char			   KernelSpace[sizeof(long)];
3155 	}					   * elm;
3156 	/* Generates a `first' entry */
3157 	SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3158 
3159 	if (ASR_getBlinkLedCode(sc)) {
3160 		debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3161 		  ASR_getBlinkLedCode(sc));
3162 		return (EIO);
3163 	}
3164 	/* Copy in the message into a local allocation */
3165 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3166 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3167 		debug_usr_cmd_printf (
3168 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3169 		return (ENOMEM);
3170 	}
3171 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3172 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3173 		kfree(Message_Ptr, M_TEMP);
3174 		debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3175 		return (error);
3176 	}
3177 	/* Acquire information to determine type of packet */
3178 	MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3179 	/* The offset of the reply information within the user packet */
3180 	Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3181 	  + MessageSizeInBytes);
3182 
3183 	/* Check if the message is a synchronous initialization command */
3184 	s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3185 	kfree(Message_Ptr, M_TEMP);
3186 	switch (s) {
3187 
3188 	case I2O_EXEC_IOP_RESET:
3189 	{	U32 status;
3190 
3191 		status = ASR_resetIOP(sc);
3192 		ReplySizeInBytes = sizeof(status);
3193 		debug_usr_cmd_printf ("resetIOP done\n");
3194 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3195 		  ReplySizeInBytes));
3196 	}
3197 
3198 	case I2O_EXEC_STATUS_GET:
3199 	{	PI2O_EXEC_STATUS_GET_REPLY status;
3200 
3201 		status = &sc->ha_statusmem->status;
3202 		if (ASR_getStatus(sc) == NULL) {
3203 			debug_usr_cmd_printf ("getStatus failed\n");
3204 			return (ENXIO);
3205 		}
3206 		ReplySizeInBytes = sizeof(status);
3207 		debug_usr_cmd_printf ("getStatus done\n");
3208 		return (copyout ((caddr_t)status, (caddr_t)Reply,
3209 		  ReplySizeInBytes));
3210 	}
3211 
3212 	case I2O_EXEC_OUTBOUND_INIT:
3213 	{	U32 status;
3214 
3215 		status = ASR_initOutBound(sc);
3216 		ReplySizeInBytes = sizeof(status);
3217 		debug_usr_cmd_printf ("intOutBound done\n");
3218 		return (copyout ((caddr_t)&status, (caddr_t)Reply,
3219 		  ReplySizeInBytes));
3220 	}
3221 	}
3222 
3223 	/* Determine if the message size is valid */
3224 	if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3225 	 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3226 		debug_usr_cmd_printf ("Packet size %d incorrect\n",
3227 		  MessageSizeInBytes);
3228 		return (EINVAL);
3229 	}
3230 
3231 	if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3232 	  M_TEMP, M_WAITOK)) == NULL) {
3233 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3234 		  MessageSizeInBytes);
3235 		return (ENOMEM);
3236 	}
3237 	if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3238 	  MessageSizeInBytes)) != 0) {
3239 		kfree(Message_Ptr, M_TEMP);
3240 		debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3241 		  MessageSizeInBytes, error);
3242 		return (error);
3243 	}
3244 
3245 	/* Check the size of the reply frame, and start constructing */
3246 
3247 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3248 	  sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3249 		kfree(Message_Ptr, M_TEMP);
3250 		debug_usr_cmd_printf (
3251 		  "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3252 		return (ENOMEM);
3253 	}
3254 	if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3255 	  sizeof(I2O_MESSAGE_FRAME))) != 0) {
3256 		kfree(Reply_Ptr, M_TEMP);
3257 		kfree(Message_Ptr, M_TEMP);
3258 		debug_usr_cmd_printf (
3259 		  "Failed to copy in reply frame, errno=%d\n",
3260 		  error);
3261 		return (error);
3262 	}
3263 	ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3264 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3265 	kfree(Reply_Ptr, M_TEMP);
3266 	if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3267 		kfree(Message_Ptr, M_TEMP);
3268 		debug_usr_cmd_printf (
3269 		  "Failed to copy in reply frame[%d], errno=%d\n",
3270 		  ReplySizeInBytes, error);
3271 		return (EINVAL);
3272 	}
3273 
3274 	if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3275 	  ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3276 	    ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3277 	  M_TEMP, M_WAITOK)) == NULL) {
3278 		kfree(Message_Ptr, M_TEMP);
3279 		debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3280 		  ReplySizeInBytes);
3281 		return (ENOMEM);
3282 	}
3283 	(void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3284 	Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3285 	  = Message_Ptr->InitiatorContext;
3286 	Reply_Ptr->StdReplyFrame.TransactionContext
3287 	  = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3288 	I2O_MESSAGE_FRAME_setMsgFlags(
3289 	  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3290 	  I2O_MESSAGE_FRAME_getMsgFlags(
3291 	    &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3292 	      | I2O_MESSAGE_FLAGS_REPLY);
3293 
3294 	/* Check if the message is a special case command */
3295 	switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3296 	case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3297 		if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3298 		  Message_Ptr) & 0xF0) >> 2)) {
3299 			kfree(Message_Ptr, M_TEMP);
3300 			I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3301 			  &(Reply_Ptr->StdReplyFrame),
3302 			  (ASR_setSysTab(sc) != CAM_REQ_CMP));
3303 			I2O_MESSAGE_FRAME_setMessageSize(
3304 			  &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3305 			  sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3306 			error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3307 			  ReplySizeInBytes);
3308 			kfree(Reply_Ptr, M_TEMP);
3309 			return (error);
3310 		}
3311 	}
3312 
3313 	/* Deal in the general case */
3314 	/* First allocate and optionally copy in each scatter gather element */
3315 	SLIST_INIT(&sgList);
3316 	if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3317 		PI2O_SGE_SIMPLE_ELEMENT sg;
3318 
3319 		/*
3320 		 *	since this code is reused in several systems, code
3321 		 * efficiency is greater by using a shift operation rather
3322 		 * than a divide by sizeof(u_int32_t).
3323 		 */
3324 		sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3325 		  + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3326 		    >> 2));
3327 		while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3328 		  + MessageSizeInBytes)) {
3329 			caddr_t v;
3330 			int	len;
3331 
3332 			if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3333 			 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3334 				error = EINVAL;
3335 				break;
3336 			}
3337 			len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3338 			debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3339 			  sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3340 			  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3341 				Message_Ptr) & 0xF0) >> 2)),
3342 			  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3343 
3344 			if ((elm = (struct ioctlSgList_S *)kmalloc (
3345 			  sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3346 			  M_TEMP, M_WAITOK)) == NULL) {
3347 				debug_usr_cmd_printf (
3348 				  "Failed to allocate SG[%d]\n", len);
3349 				error = ENOMEM;
3350 				break;
3351 			}
3352 			SLIST_INSERT_HEAD(&sgList, elm, link);
3353 			elm->FlagsCount = sg->FlagsCount;
3354 			elm->UserSpace = (caddr_t)
3355 			  (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3356 			v = elm->KernelSpace;
3357 			/* Copy in outgoing data (DIR bit could be invalid) */
3358 			if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3359 			  != 0) {
3360 				break;
3361 			}
3362 			/*
3363 			 *	If the buffer is not contiguous, lets
3364 			 * break up the scatter/gather entries.
3365 			 */
3366 			while ((len > 0)
3367 			 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3368 			  (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3369 				int next, base, span;
3370 
3371 				span = 0;
3372 				next = base = KVTOPHYS(v);
3373 				I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3374 				  base);
3375 
3376 				/* How far can we go physically contiguously */
3377 				while ((len > 0) && (base == next)) {
3378 					int size;
3379 
3380 					next = trunc_page(base) + PAGE_SIZE;
3381 					size = next - base;
3382 					if (size > len) {
3383 						size = len;
3384 					}
3385 					span += size;
3386 					v += size;
3387 					len -= size;
3388 					base = KVTOPHYS(v);
3389 				}
3390 
3391 				/* Construct the Flags */
3392 				I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3393 				  span);
3394 				{
3395 					int flags = I2O_FLAGS_COUNT_getFlags(
3396 					  &(elm->FlagsCount));
3397 					/* Any remaining length? */
3398 					if (len > 0) {
3399 					    flags &=
3400 						~(I2O_SGL_FLAGS_END_OF_BUFFER
3401 						 | I2O_SGL_FLAGS_LAST_ELEMENT);
3402 					}
3403 					I2O_FLAGS_COUNT_setFlags(
3404 					  &(sg->FlagsCount), flags);
3405 				}
3406 
3407 				debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3408 				  sg - (PI2O_SGE_SIMPLE_ELEMENT)
3409 				    ((char *)Message_Ptr
3410 				  + ((I2O_MESSAGE_FRAME_getVersionOffset(
3411 					Message_Ptr) & 0xF0) >> 2)),
3412 				  I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3413 				  span);
3414 				if (len <= 0) {
3415 					break;
3416 				}
3417 
3418 				/*
3419 				 * Incrementing requires resizing of the
3420 				 * packet, and moving up the existing SG
3421 				 * elements.
3422 				 */
3423 				++sg;
3424 				MessageSizeInBytes += sizeof(*sg);
3425 				I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3426 				  I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3427 				  + (sizeof(*sg) / sizeof(U32)));
3428 				{
3429 					PI2O_MESSAGE_FRAME NewMessage_Ptr;
3430 
3431 					if ((NewMessage_Ptr
3432 					  = (PI2O_MESSAGE_FRAME)
3433 					    kmalloc (MessageSizeInBytes,
3434 					     M_TEMP, M_WAITOK)) == NULL) {
3435 						debug_usr_cmd_printf (
3436 						  "Failed to acquire frame[%d] memory\n",
3437 						  MessageSizeInBytes);
3438 						error = ENOMEM;
3439 						break;
3440 					}
3441 					span = ((caddr_t)sg)
3442 					     - (caddr_t)Message_Ptr;
3443 					bcopy(Message_Ptr,NewMessage_Ptr, span);
3444 					bcopy((caddr_t)(sg-1),
3445 					  ((caddr_t)NewMessage_Ptr) + span,
3446 					  MessageSizeInBytes - span);
3447 					kfree(Message_Ptr, M_TEMP);
3448 					sg = (PI2O_SGE_SIMPLE_ELEMENT)
3449 					  (((caddr_t)NewMessage_Ptr) + span);
3450 					Message_Ptr = NewMessage_Ptr;
3451 				}
3452 			}
3453 			if ((error)
3454 			 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3455 			  & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3456 				break;
3457 			}
3458 			++sg;
3459 		}
3460 		if (error) {
3461 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3462 				SLIST_REMOVE_HEAD(&sgList, link);
3463 				kfree(elm, M_TEMP);
3464 			}
3465 			kfree(Reply_Ptr, M_TEMP);
3466 			kfree(Message_Ptr, M_TEMP);
3467 			return (error);
3468 		}
3469 	}
3470 
3471 	debug_usr_cmd_printf ("Inbound: ");
3472 	debug_usr_cmd_dump_message(Message_Ptr);
3473 
3474 	/* Send the command */
3475 	if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3476 		/* Free up in-kernel buffers */
3477 		while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3478 			SLIST_REMOVE_HEAD(&sgList, link);
3479 			kfree(elm, M_TEMP);
3480 		}
3481 		kfree(Reply_Ptr, M_TEMP);
3482 		kfree(Message_Ptr, M_TEMP);
3483 		return (ENOMEM);
3484 	}
3485 
3486 	/*
3487 	 * We do not need any (optional byteswapping) method access to
3488 	 * the Initiator context field.
3489 	 */
3490 	I2O_MESSAGE_FRAME_setInitiatorContext64(
3491 	  (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3492 
3493 	(void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3494 
3495 	kfree(Message_Ptr, M_TEMP);
3496 
3497 	/*
3498 	 * Wait for the board to report a finished instruction.
3499 	 */
3500 	crit_enter();
3501 	while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3502 		if (ASR_getBlinkLedCode(sc)) {
3503 			/* Reset Adapter */
3504 			kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3505 			  cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3506 			  ASR_getBlinkLedCode(sc));
3507 			if (ASR_reset (sc) == ENXIO) {
3508 				/* Command Cleanup */
3509 				ASR_ccbRemove(sc, ccb);
3510 			}
3511 			crit_exit();
3512 			/* Free up in-kernel buffers */
3513 			while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3514 				SLIST_REMOVE_HEAD(&sgList, link);
3515 				kfree(elm, M_TEMP);
3516 			}
3517 			kfree(Reply_Ptr, M_TEMP);
3518 			asr_free_ccb(ccb);
3519 			return (EIO);
3520 		}
3521 		/* Check every second for BlinkLed */
3522 		/* There is no PRICAM, but outwardly PRIBIO is functional */
3523 		tsleep(ccb, 0, "asr", hz);
3524 	}
3525 	crit_exit();
3526 
3527 	debug_usr_cmd_printf ("Outbound: ");
3528 	debug_usr_cmd_dump_message(Reply_Ptr);
3529 
3530 	I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3531 	  &(Reply_Ptr->StdReplyFrame),
3532 	  (ccb->ccb_h.status != CAM_REQ_CMP));
3533 
3534 	if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3535 	  - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3536 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3537 		  ccb->csio.dxfer_len - ccb->csio.resid);
3538 	}
3539 	if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3540 	 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3541 	 - I2O_SCSI_SENSE_DATA_SZ))) {
3542 		int size = ReplySizeInBytes
3543 		  - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3544 		  - I2O_SCSI_SENSE_DATA_SZ;
3545 
3546 		if (size > sizeof(ccb->csio.sense_data)) {
3547 			size = sizeof(ccb->csio.sense_data);
3548 		}
3549 		if (size < ccb->csio.sense_len) {
3550 			ccb->csio.sense_resid = ccb->csio.sense_len - size;
3551 		} else {
3552 			ccb->csio.sense_resid = 0;
3553 		}
3554 		bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3555 		bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3556 		I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3557 		    Reply_Ptr, size);
3558 	}
3559 
3560 	/* Free up in-kernel buffers */
3561 	while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3562 		/* Copy out as necessary */
3563 		if ((error == 0)
3564 		/* DIR bit considered `valid', error due to ignorance works */
3565 		 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3566 		  & I2O_SGL_FLAGS_DIR) == 0)) {
3567 			error = copyout((caddr_t)(elm->KernelSpace),
3568 			  elm->UserSpace,
3569 			  I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3570 		}
3571 		SLIST_REMOVE_HEAD(&sgList, link);
3572 		kfree(elm, M_TEMP);
3573 	}
3574 	if (error == 0) {
3575 	/* Copy reply frame to user space */
3576 		error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3577 				ReplySizeInBytes);
3578 	}
3579 	kfree(Reply_Ptr, M_TEMP);
3580 	asr_free_ccb(ccb);
3581 
3582 	return (error);
3583 } /* ASR_queue_i */
3584 
3585 /*----------------------------------------------------------------------*/
3586 /*			    Function asr_ioctl			       */
3587 /*----------------------------------------------------------------------*/
3588 /* The parameters passed to this function are :				*/
3589 /*     dev  : Device number.						*/
3590 /*     cmd  : Ioctl Command						*/
3591 /*     data : User Argument Passed In.					*/
3592 /*     flag : Mode Parameter						*/
3593 /*     proc : Process Parameter						*/
3594 /*									*/
3595 /* This function is the user interface into this adapter driver		*/
3596 /*									*/
3597 /* Return : zero if OK, error code if not				*/
3598 /*----------------------------------------------------------------------*/
3599 
3600 static int
3601 asr_ioctl(struct dev_ioctl_args *ap)
3602 {
3603 	cdev_t dev = ap->a_head.a_dev;
3604 	u_long cmd = ap->a_cmd;
3605 	caddr_t data = ap->a_data;
3606 	Asr_softc_t	*sc = dev->si_drv1;
3607 	int		i, error = 0;
3608 #ifdef ASR_IOCTL_COMPAT
3609 	int		j;
3610 #endif /* ASR_IOCTL_COMPAT */
3611 
3612 	if (sc == NULL)
3613 		return (EINVAL);
3614 
3615 	switch(cmd) {
3616 	case DPT_SIGNATURE:
3617 #ifdef ASR_IOCTL_COMPAT
3618 #if (dsDescription_size != 50)
3619 	case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3620 #endif
3621 		if (cmd & 0xFFFF0000) {
3622 			bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3623 			return (0);
3624 		}
3625 	/* Traditional version of the ioctl interface */
3626 	case DPT_SIGNATURE & 0x0000FFFF:
3627 #endif
3628 		return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3629 				sizeof(dpt_sig_S)));
3630 
3631 	/* Traditional version of the ioctl interface */
3632 	case DPT_CTRLINFO & 0x0000FFFF:
3633 	case DPT_CTRLINFO: {
3634 		struct {
3635 			u_int16_t length;
3636 			u_int16_t drvrHBAnum;
3637 			u_int32_t baseAddr;
3638 			u_int16_t blinkState;
3639 			u_int8_t  pciBusNum;
3640 			u_int8_t  pciDeviceNum;
3641 			u_int16_t hbaFlags;
3642 			u_int16_t Interrupt;
3643 			u_int32_t reserved1;
3644 			u_int32_t reserved2;
3645 			u_int32_t reserved3;
3646 		} CtlrInfo;
3647 
3648 		bzero(&CtlrInfo, sizeof(CtlrInfo));
3649 		CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3650 		CtlrInfo.drvrHBAnum = asr_unit(dev);
3651 		CtlrInfo.baseAddr = sc->ha_Base;
3652 		i = ASR_getBlinkLedCode (sc);
3653 		if (i == -1)
3654 			i = 0;
3655 
3656 		CtlrInfo.blinkState = i;
3657 		CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3658 		CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3659 #define	FLG_OSD_PCI_VALID 0x0001
3660 #define	FLG_OSD_DMA	  0x0002
3661 #define	FLG_OSD_I2O	  0x0004
3662 		CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3663 		CtlrInfo.Interrupt = sc->ha_irq;
3664 #ifdef ASR_IOCTL_COMPAT
3665 		if (cmd & 0xffff0000)
3666 			bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3667 		else
3668 #endif /* ASR_IOCTL_COMPAT */
3669 		error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3670 	}	return (error);
3671 
3672 	/* Traditional version of the ioctl interface */
3673 	case DPT_SYSINFO & 0x0000FFFF:
3674 	case DPT_SYSINFO: {
3675 		sysInfo_S	Info;
3676 #ifdef ASR_IOCTL_COMPAT
3677 		char	      * cp;
3678 		/* Kernel Specific ptok `hack' */
3679 #define		ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3680 
3681 		bzero(&Info, sizeof(Info));
3682 
3683 		/* Appears I am the only person in the Kernel doing this */
3684 		outb (0x70, 0x12);
3685 		i = inb(0x71);
3686 		j = i >> 4;
3687 		if (i == 0x0f) {
3688 			outb (0x70, 0x19);
3689 			j = inb (0x71);
3690 		}
3691 		Info.drive0CMOS = j;
3692 
3693 		j = i & 0x0f;
3694 		if (i == 0x0f) {
3695 			outb (0x70, 0x1a);
3696 			j = inb (0x71);
3697 		}
3698 		Info.drive1CMOS = j;
3699 
3700 		Info.numDrives = *((char *)ptok(0x475));
3701 #else /* ASR_IOCTL_COMPAT */
3702 		bzero(&Info, sizeof(Info));
3703 #endif /* ASR_IOCTL_COMPAT */
3704 
3705 		Info.processorFamily = ASR_sig.dsProcessorFamily;
3706 		Info.osType = OS_BSDI_UNIX;
3707 		Info.osMajorVersion = osrelease[0] - '0';
3708 		Info.osMinorVersion = osrelease[2] - '0';
3709 		/* Info.osRevision = 0; */
3710 		/* Info.osSubRevision = 0; */
3711 		Info.busType = SI_PCI_BUS;
3712 		Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3713 
3714 #ifdef ASR_IOCTL_COMPAT
3715 		Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3716 		/* Go Out And Look For I2O SmartROM */
3717 		for(j = 0xC8000; j < 0xE0000; j += 2048) {
3718 			int k;
3719 
3720 			cp = ptok(j);
3721 			if (*((unsigned short *)cp) != 0xAA55) {
3722 				continue;
3723 			}
3724 			j += (cp[2] * 512) - 2048;
3725 			if ((*((u_long *)(cp + 6))
3726 			  != ('S' + (' ' * 256) + (' ' * 65536L)))
3727 			 || (*((u_long *)(cp + 10))
3728 			  != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3729 				continue;
3730 			}
3731 			cp += 0x24;
3732 			for (k = 0; k < 64; ++k) {
3733 				if (*((unsigned short *)cp)
3734 				 == (' ' + ('v' * 256))) {
3735 					break;
3736 				}
3737 			}
3738 			if (k < 64) {
3739 				Info.smartROMMajorVersion
3740 				    = *((unsigned char *)(cp += 4)) - '0';
3741 				Info.smartROMMinorVersion
3742 				    = *((unsigned char *)(cp += 2));
3743 				Info.smartROMRevision
3744 				    = *((unsigned char *)(++cp));
3745 				Info.flags |= SI_SmartROMverValid;
3746 				Info.flags &= ~SI_NO_SmartROM;
3747 				break;
3748 			}
3749 		}
3750 		/* Get The Conventional Memory Size From CMOS */
3751 		outb (0x70, 0x16);
3752 		j = inb (0x71);
3753 		j <<= 8;
3754 		outb (0x70, 0x15);
3755 		j |= inb(0x71);
3756 		Info.conventionalMemSize = j;
3757 
3758 		/* Get The Extended Memory Found At Power On From CMOS */
3759 		outb (0x70, 0x31);
3760 		j = inb (0x71);
3761 		j <<= 8;
3762 		outb (0x70, 0x30);
3763 		j |= inb(0x71);
3764 		Info.extendedMemSize = j;
3765 		Info.flags |= SI_MemorySizeValid;
3766 
3767 		/* Copy Out The Info Structure To The User */
3768 		if (cmd & 0xFFFF0000)
3769 			bcopy(&Info, data, sizeof(Info));
3770 		else
3771 #endif /* ASR_IOCTL_COMPAT */
3772 		error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3773 		return (error); }
3774 
3775 		/* Get The BlinkLED State */
3776 	case DPT_BLINKLED:
3777 		i = ASR_getBlinkLedCode (sc);
3778 		if (i == -1)
3779 			i = 0;
3780 #ifdef ASR_IOCTL_COMPAT
3781 		if (cmd & 0xffff0000)
3782 			bcopy(&i, data, sizeof(i));
3783 		else
3784 #endif /* ASR_IOCTL_COMPAT */
3785 		error = copyout(&i, *(caddr_t *)data, sizeof(i));
3786 		break;
3787 
3788 		/* Send an I2O command */
3789 	case I2OUSRCMD:
3790 		return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3791 
3792 		/* Reset and re-initialize the adapter */
3793 	case I2ORESETCMD:
3794 		return (ASR_reset(sc));
3795 
3796 		/* Rescan the LCT table and resynchronize the information */
3797 	case I2ORESCANCMD:
3798 		return (ASR_rescan(sc));
3799 	}
3800 	return (EINVAL);
3801 } /* asr_ioctl */
3802