xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision 4d0c54c1)
1 /*
2 *****************************************************************************************
3 **        O.S   : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 **                ARCMSR RAID Host adapter
9 **                [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 **        Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 **    notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 **    notice, this list of conditions and the following disclaimer in the
23 **    documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 **    derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 **        REV#         DATE             NAME             DESCRIPTION
41 **     1.00.00.00   03/31/2004      Erich Chen           First release
42 **     1.20.00.02   11/29/2004      Erich Chen           bug fix with arcmsr_bus_reset when PHY error
43 **     1.20.00.03   04/19/2005      Erich Chen           add SATA 24 Ports adapter type support
44 **                                                       clean unused function
45 **     1.20.00.12   09/12/2005      Erich Chen           bug fix with abort command handling,
46 **                                                       firmware version check
47 **                                                       and firmware update notify for hardware bug fix
48 **                                                       handling if none zero high part physical address
49 **                                                       of srb resource
50 **     1.20.00.13   08/18/2006      Erich Chen           remove pending srb and report busy
51 **                                                       add iop message xfer
52 **                                                       with scsi pass-through command
53 **                                                       add new device id of sas raid adapters
54 **                                                       code fit for SPARC64 & PPC
55 **     1.20.00.14   02/05/2007      Erich Chen           bug fix for incorrect ccb_h.status report
56 **                                                       and cause g_vfs_done() read write error
57 **     1.20.00.15   10/10/2007      Erich Chen           support new RAID adapter type ARC120x
58 **     1.20.00.16   10/10/2009      Erich Chen           Bug fix for RAID adapter type ARC120x
59 **                                                       bus_dmamem_alloc() with BUS_DMA_ZERO
60 **     1.20.00.17   07/15/2010      Ching Huang          Added support ARC1880
61 **                                                       report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 **                                                       prevent cam_periph_error removing all LUN devices of one Target id
63 **                                                       for any one LUN device failed
64 **     1.20.00.18   10/14/2010      Ching Huang          Fixed "inquiry data fails comparion at DV1 step"
65 **                  10/25/2010      Ching Huang          Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 **     1.20.00.19   11/11/2010      Ching Huang          Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 **     1.20.00.20   12/08/2010      Ching Huang          Avoid calling atomic_set_int function
68 **     1.20.00.21   02/08/2011      Ching Huang          Implement I/O request timeout
69 **                  02/14/2011      Ching Huang          Modified pktRequestCount
70 **     1.20.00.21   03/03/2011      Ching Huang          if a command timeout, then wait its ccb back before free it
71 **     1.20.00.22   07/04/2011      Ching Huang          Fixed multiple MTX panic
72 **     1.20.00.23   10/28/2011      Ching Huang          Added TIMEOUT_DELAY in case of too many HDDs need to start
73 **     1.20.00.23   11/08/2011      Ching Huang          Added report device transfer speed
74 **     1.20.00.23   01/30/2012      Ching Huang          Fixed Request requeued and Retrying command
75 **     1.20.00.24   06/11/2012      Ching Huang          Fixed return sense data condition
76 **     1.20.00.25   08/17/2012      Ching Huang          Fixed hotplug device no function on type A adapter
77 ******************************************************************************************
78 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.43 2012/09/04 05:15:54 delphij Exp $
79 */
80 #if 0
81 #define ARCMSR_DEBUG1			1
82 #endif
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/bus.h>
88 #include <sys/queue.h>
89 #include <sys/stat.h>
90 #include <sys/kthread.h>
91 #include <sys/module.h>
92 #include <sys/proc.h>
93 #include <sys/lock.h>
94 #include <sys/sysctl.h>
95 #include <sys/thread2.h>
96 #include <sys/poll.h>
97 #include <sys/device.h>
98 #include <vm/vm.h>
99 #include <vm/vm_param.h>
100 #include <vm/pmap.h>
101 
102 #include <machine/atomic.h>
103 #include <sys/conf.h>
104 #include <sys/rman.h>
105 
106 #include <bus/cam/cam.h>
107 #include <bus/cam/cam_ccb.h>
108 #include <bus/cam/cam_sim.h>
109 #include <bus/cam/cam_periph.h>
110 #include <bus/cam/cam_xpt_periph.h>
111 #include <bus/cam/cam_xpt_sim.h>
112 #include <bus/cam/cam_debug.h>
113 #include <bus/cam/scsi/scsi_all.h>
114 #include <bus/cam/scsi/scsi_message.h>
115 /*
116 **************************************************************************
117 **************************************************************************
118 */
119 #include <sys/endian.h>
120 #include <bus/pci/pcivar.h>
121 #include <bus/pci/pcireg.h>
122 #define ARCMSR_LOCK_INIT(l, s)	lockinit(l, s, 0, LK_CANRECURSE)
123 #define ARCMSR_LOCK_DESTROY(l)	lockuninit(l)
124 #define ARCMSR_LOCK_ACQUIRE(l)	lockmgr(l, LK_EXCLUSIVE)
125 #define ARCMSR_LOCK_RELEASE(l)	lockmgr(l, LK_RELEASE)
126 #define ARCMSR_LOCK_TRY(l)	lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
127 #define arcmsr_htole32(x)	htole32(x)
128 typedef struct lock		arcmsr_lock_t;
129 
130 #if !defined(CAM_NEW_TRAN_CODE)
131 #define	CAM_NEW_TRAN_CODE	1
132 #endif
133 
134 #define arcmsr_callout_init(a)	callout_init_mp(a);
135 
136 #define ARCMSR_DRIVER_VERSION			"Driver Version 1.20.00.25 2012-08-17"
137 #include <dev/raid/arcmsr/arcmsr.h>
138 #define	SRB_SIZE						((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
139 #define ARCMSR_SRBS_POOL_SIZE           (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
140 /*
141 **************************************************************************
142 **************************************************************************
143 */
144 #define CHIP_REG_READ32(s, b, r)	bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
145 #define CHIP_REG_WRITE32(s, b, r, d)	bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
146 /*
147 **************************************************************************
148 **************************************************************************
149 */
150 static void arcmsr_free_srb(struct CommandControlBlock *srb);
151 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
152 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
153 static int arcmsr_probe(device_t dev);
154 static int arcmsr_attach(device_t dev);
155 static int arcmsr_detach(device_t dev);
156 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
157 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
158 static int arcmsr_shutdown(device_t dev);
159 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
160 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
161 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
162 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
163 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
164 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
165 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
166 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
167 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
168 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
169 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
170 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
171 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
172 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
173 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
174 static int arcmsr_resume(device_t dev);
175 static int arcmsr_suspend(device_t dev);
176 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
177 static void	arcmsr_polling_devmap(void* arg);
178 static void	arcmsr_srb_timeout(void* arg);
179 #ifdef ARCMSR_DEBUG1
180 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
181 #endif
182 /*
183 **************************************************************************
184 **************************************************************************
185 */
186 static void UDELAY(u_int32_t us) { DELAY(us); }
187 /*
188 **************************************************************************
189 **************************************************************************
190 */
191 static bus_dmamap_callback_t arcmsr_map_free_srb;
192 static bus_dmamap_callback_t arcmsr_execute_srb;
193 /*
194 **************************************************************************
195 **************************************************************************
196 */
197 static d_open_t	arcmsr_open;
198 static d_close_t arcmsr_close;
199 static d_ioctl_t arcmsr_ioctl;
200 
201 static device_method_t arcmsr_methods[]={
202 	DEVMETHOD(device_probe,		arcmsr_probe),
203 	DEVMETHOD(device_attach,	arcmsr_attach),
204 	DEVMETHOD(device_detach,	arcmsr_detach),
205 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
206 	DEVMETHOD(device_suspend,	arcmsr_suspend),
207 	DEVMETHOD(device_resume,	arcmsr_resume),
208 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
209 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
210 	{ 0, 0 }
211 };
212 
213 static driver_t arcmsr_driver={
214 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
215 };
216 
217 static devclass_t arcmsr_devclass;
218 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
219 MODULE_VERSION(arcmsr, 1);
220 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
221 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
222 #ifndef BUS_DMA_COHERENT
223 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
224 #endif
225 
226 static struct dev_ops arcmsr_ops = {
227 	{ "arcmsr", 0, 0 },
228 	.d_open =	arcmsr_open,		        /* open     */
229 	.d_close =	arcmsr_close,		        /* close    */
230 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
231 };
232 
233 static int	arcmsr_msi_enable = 1;
234 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
235 
236 
237 /*
238 **************************************************************************
239 **************************************************************************
240 */
241 
242 static int
243 arcmsr_open(struct dev_open_args *ap)
244 {
245 	cdev_t dev = ap->a_head.a_dev;
246 	struct AdapterControlBlock *acb=dev->si_drv1;
247 
248 	if(acb==NULL) {
249 		return ENXIO;
250 	}
251 	return (0);
252 }
253 
254 /*
255 **************************************************************************
256 **************************************************************************
257 */
258 
259 static int
260 arcmsr_close(struct dev_close_args *ap)
261 {
262 	cdev_t dev = ap->a_head.a_dev;
263 	struct AdapterControlBlock *acb=dev->si_drv1;
264 
265 	if(acb==NULL) {
266 		return ENXIO;
267 	}
268 	return 0;
269 }
270 
271 /*
272 **************************************************************************
273 **************************************************************************
274 */
275 
276 static int
277 arcmsr_ioctl(struct dev_ioctl_args *ap)
278 {
279 	cdev_t dev = ap->a_head.a_dev;
280 	u_long ioctl_cmd = ap->a_cmd;
281 	caddr_t arg = ap->a_data;
282 	struct AdapterControlBlock *acb=dev->si_drv1;
283 
284 	if(acb==NULL) {
285 		return ENXIO;
286 	}
287 	return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
288 }
289 
290 /*
291 **********************************************************************
292 **********************************************************************
293 */
294 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
295 {
296 	u_int32_t intmask_org=0;
297 
298 	switch (acb->adapter_type) {
299 	case ACB_ADAPTER_TYPE_A: {
300 			/* disable all outbound interrupt */
301 			intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
302 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
303 		}
304 		break;
305 	case ACB_ADAPTER_TYPE_B: {
306 			/* disable all outbound interrupt */
307 			intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
308 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
309 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
310 		}
311 		break;
312 	case ACB_ADAPTER_TYPE_C: {
313 			/* disable all outbound interrupt */
314 			intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
315 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
316 		}
317 		break;
318 	}
319 	return (intmask_org);
320 }
321 /*
322 **********************************************************************
323 **********************************************************************
324 */
325 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
326 {
327 	u_int32_t mask;
328 
329 	switch (acb->adapter_type) {
330 	case ACB_ADAPTER_TYPE_A: {
331 			/* enable outbound Post Queue, outbound doorbell Interrupt */
332 			mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
333 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
334 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
335 		}
336 		break;
337 	case ACB_ADAPTER_TYPE_B: {
338 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
339 			mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
340 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
341 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
342 		}
343 		break;
344 	case ACB_ADAPTER_TYPE_C: {
345 			/* enable outbound Post Queue, outbound doorbell Interrupt */
346 			mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
347 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
348 			acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
349 		}
350 		break;
351 	}
352 }
353 /*
354 **********************************************************************
355 **********************************************************************
356 */
357 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
358 {
359 	u_int32_t Index;
360 	u_int8_t Retries=0x00;
361 
362 	do {
363 		for(Index=0; Index < 100; Index++) {
364 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
365 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
366 				return TRUE;
367 			}
368 			UDELAY(10000);
369 		}/*max 1 seconds*/
370 	}while(Retries++ < 20);/*max 20 sec*/
371 	return (FALSE);
372 }
373 /*
374 **********************************************************************
375 **********************************************************************
376 */
377 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
378 {
379 	u_int32_t Index;
380 	u_int8_t Retries=0x00;
381 
382 	do {
383 		for(Index=0; Index < 100; Index++) {
384 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
385 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
386 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
387 				return TRUE;
388 			}
389 			UDELAY(10000);
390 		}/*max 1 seconds*/
391 	}while(Retries++ < 20);/*max 20 sec*/
392 	return (FALSE);
393 }
394 /*
395 **********************************************************************
396 **********************************************************************
397 */
398 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
399 {
400 	u_int32_t Index;
401 	u_int8_t Retries=0x00;
402 
403 	do {
404 		for(Index=0; Index < 100; Index++) {
405 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
406 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
407 				return TRUE;
408 			}
409 			UDELAY(10000);
410 		}/*max 1 seconds*/
411 	}while(Retries++ < 20);/*max 20 sec*/
412 	return (FALSE);
413 }
414 /*
415 ************************************************************************
416 ************************************************************************
417 */
418 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
419 {
420 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
421 
422 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
423 	do {
424 		if(arcmsr_hba_wait_msgint_ready(acb)) {
425 			break;
426 		} else {
427 			retry_count--;
428 		}
429 	}while(retry_count!=0);
430 }
431 /*
432 ************************************************************************
433 ************************************************************************
434 */
435 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
436 {
437 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
438 
439 	CHIP_REG_WRITE32(HBB_DOORBELL,
440 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
441 	do {
442 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
443 			break;
444 		} else {
445 			retry_count--;
446 		}
447 	}while(retry_count!=0);
448 }
449 /*
450 ************************************************************************
451 ************************************************************************
452 */
453 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
454 {
455 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
456 
457 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
458 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
459 	do {
460 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
461 			break;
462 		} else {
463 			retry_count--;
464 		}
465 	}while(retry_count!=0);
466 }
467 /*
468 ************************************************************************
469 ************************************************************************
470 */
471 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
472 {
473 	switch (acb->adapter_type) {
474 	case ACB_ADAPTER_TYPE_A: {
475 			arcmsr_flush_hba_cache(acb);
476 		}
477 		break;
478 	case ACB_ADAPTER_TYPE_B: {
479 			arcmsr_flush_hbb_cache(acb);
480 		}
481 		break;
482 	case ACB_ADAPTER_TYPE_C: {
483 			arcmsr_flush_hbc_cache(acb);
484 		}
485 		break;
486 	}
487 }
488 /*
489 *******************************************************************************
490 *******************************************************************************
491 */
492 static int arcmsr_suspend(device_t dev)
493 {
494 	struct AdapterControlBlock	*acb = device_get_softc(dev);
495 
496 	/* flush controller */
497 	arcmsr_iop_parking(acb);
498 	/* disable all outbound interrupt */
499 	arcmsr_disable_allintr(acb);
500 	return(0);
501 }
502 /*
503 *******************************************************************************
504 *******************************************************************************
505 */
506 static int arcmsr_resume(device_t dev)
507 {
508 	struct AdapterControlBlock	*acb = device_get_softc(dev);
509 
510 	arcmsr_iop_init(acb);
511 	return(0);
512 }
513 /*
514 *********************************************************************************
515 *********************************************************************************
516 */
517 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
518 {
519 	struct AdapterControlBlock *acb;
520 	u_int8_t target_id, target_lun;
521 	struct cam_sim * sim;
522 
523 	sim=(struct cam_sim *) cb_arg;
524 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
525 	switch (code) {
526 	case AC_LOST_DEVICE:
527 		target_id=xpt_path_target_id(path);
528 		target_lun=xpt_path_lun_id(path);
529 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
530 			break;
531 		}
532 	//	kprintf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
533 		break;
534 	default:
535 		break;
536 	}
537 }
538 /*
539 **********************************************************************
540 **********************************************************************
541 */
542 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
543 {
544 	struct AdapterControlBlock *acb=srb->acb;
545 	union ccb * pccb=srb->pccb;
546 
547 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
548 		callout_stop(&srb->ccb_callout);
549 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
550 		bus_dmasync_op_t op;
551 
552 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
553 			op = BUS_DMASYNC_POSTREAD;
554 		} else {
555 			op = BUS_DMASYNC_POSTWRITE;
556 		}
557 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
558 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
559 	}
560 	if(stand_flag==1) {
561 		atomic_subtract_int(&acb->srboutstandingcount, 1);
562 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
563 		acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
564 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
565 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
566 		}
567 	}
568 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
569 		arcmsr_free_srb(srb);
570 #ifdef ARCMSR_DEBUG1
571 	acb->pktReturnCount++;
572 #endif
573 	xpt_done(pccb);
574 	return;
575 }
576 /*
577 **********************************************************************
578 **********************************************************************
579 */
580 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
581 {
582 	union ccb * pccb=srb->pccb;
583 
584 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
585 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
586 	if(pccb->csio.sense_len) {
587 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
588 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
589 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
590 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
591 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
592 	}
593 }
594 /*
595 *********************************************************************
596 *********************************************************************
597 */
598 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
599 {
600 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
601 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
602 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
603 	}
604 }
605 /*
606 *********************************************************************
607 *********************************************************************
608 */
609 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
610 {
611 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
612 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
613 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
614 	}
615 }
616 /*
617 *********************************************************************
618 *********************************************************************
619 */
620 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
621 {
622 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
623 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
624 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
625 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
626 	}
627 }
628 /*
629 *********************************************************************
630 *********************************************************************
631 */
632 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
633 {
634 	switch (acb->adapter_type) {
635 	case ACB_ADAPTER_TYPE_A: {
636 			arcmsr_abort_hba_allcmd(acb);
637 		}
638 		break;
639 	case ACB_ADAPTER_TYPE_B: {
640 			arcmsr_abort_hbb_allcmd(acb);
641 		}
642 		break;
643 	case ACB_ADAPTER_TYPE_C: {
644 			arcmsr_abort_hbc_allcmd(acb);
645 		}
646 		break;
647 	}
648 }
649 /*
650 **************************************************************************
651 **************************************************************************
652 */
653 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
654 {
655 	int target, lun;
656 
657 	target=srb->pccb->ccb_h.target_id;
658 	lun=srb->pccb->ccb_h.target_lun;
659 	if(error == FALSE) {
660 		if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
661 			acb->devstate[target][lun]=ARECA_RAID_GOOD;
662 		}
663 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
664 		arcmsr_srb_complete(srb, 1);
665 	} else {
666 		switch(srb->arcmsr_cdb.DeviceStatus) {
667 		case ARCMSR_DEV_SELECT_TIMEOUT: {
668 				if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
669 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
670 				}
671 				acb->devstate[target][lun]=ARECA_RAID_GONE;
672 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
673 				arcmsr_srb_complete(srb, 1);
674 			}
675 			break;
676 		case ARCMSR_DEV_ABORTED:
677 		case ARCMSR_DEV_INIT_FAIL: {
678 				acb->devstate[target][lun]=ARECA_RAID_GONE;
679 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
680 				arcmsr_srb_complete(srb, 1);
681 			}
682 			break;
683 		case SCSISTAT_CHECK_CONDITION: {
684 				acb->devstate[target][lun]=ARECA_RAID_GOOD;
685 				arcmsr_report_sense_info(srb);
686 				arcmsr_srb_complete(srb, 1);
687 			}
688 			break;
689 		default:
690 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done, but got unknown DeviceStatus=0x%x\n"
691 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
692 			acb->devstate[target][lun]=ARECA_RAID_GONE;
693 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
694 			/*unknown error or crc error just for retry*/
695 			arcmsr_srb_complete(srb, 1);
696 			break;
697 		}
698 	}
699 }
700 /*
701 **************************************************************************
702 **************************************************************************
703 */
704 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
705 {
706 	struct CommandControlBlock *srb;
707 
708 	/* check if command done with no error*/
709 	switch (acb->adapter_type) {
710 	case ACB_ADAPTER_TYPE_C:
711 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
712 		break;
713 	case ACB_ADAPTER_TYPE_A:
714 	case ACB_ADAPTER_TYPE_B:
715 	default:
716 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
717 		break;
718 	}
719 	if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
720 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
721 			arcmsr_free_srb(srb);
722 			kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
723 			return;
724 		}
725 		kprintf("arcmsr%d: return srb has been completed\n"
726 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
727 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
728 		return;
729 	}
730 	arcmsr_report_srb_state(acb, srb, error);
731 }
732 /*
733 **************************************************************************
734 **************************************************************************
735 */
736 static void	arcmsr_srb_timeout(void* arg)
737 {
738 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
739 	struct AdapterControlBlock *acb;
740 	int target, lun;
741 	u_int8_t cmd;
742 
743 	target=srb->pccb->ccb_h.target_id;
744 	lun=srb->pccb->ccb_h.target_lun;
745 	acb = srb->acb;
746 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
747 	if(srb->srb_state == ARCMSR_SRB_START)
748 	{
749 		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
750 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
751 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
752 		arcmsr_srb_complete(srb, 1);
753 		kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
754 				 acb->pci_unit, target, lun, cmd, srb);
755 	}
756 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
757 #ifdef ARCMSR_DEBUG1
758 	arcmsr_dump_data(acb);
759 #endif
760 }
761 
762 /*
763 **********************************************************************
764 **********************************************************************
765 */
766 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
767 {
768 	int i=0;
769 	u_int32_t flag_srb;
770 	u_int16_t error;
771 
772 	switch (acb->adapter_type) {
773 	case ACB_ADAPTER_TYPE_A: {
774 			u_int32_t outbound_intstatus;
775 
776 			/*clear and abort all outbound posted Q*/
777 			outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
778 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
779 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
780                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
781 				arcmsr_drain_donequeue(acb, flag_srb, error);
782 			}
783 		}
784 		break;
785 	case ACB_ADAPTER_TYPE_B: {
786 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
787 
788 			/*clear all outbound posted Q*/
789 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
790 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
791 				if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
792 					phbbmu->done_qbuffer[i]=0;
793 			error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
794 					arcmsr_drain_donequeue(acb, flag_srb, error);
795 				}
796 				phbbmu->post_qbuffer[i]=0;
797 			}/*drain reply FIFO*/
798 			phbbmu->doneq_index=0;
799 			phbbmu->postq_index=0;
800 		}
801 		break;
802 	case ACB_ADAPTER_TYPE_C: {
803 
804 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
805 				flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
806                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
807 				arcmsr_drain_donequeue(acb, flag_srb, error);
808 			}
809 		}
810 		break;
811 	}
812 }
813 /*
814 ****************************************************************************
815 ****************************************************************************
816 */
817 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
818 {
819 	struct CommandControlBlock *srb;
820 	u_int32_t intmask_org;
821 	u_int32_t i=0;
822 
823 	if(acb->srboutstandingcount>0) {
824 		/* disable all outbound interrupt */
825 		intmask_org=arcmsr_disable_allintr(acb);
826 		/*clear and abort all outbound posted Q*/
827 		arcmsr_done4abort_postqueue(acb);
828 		/* talk to iop 331 outstanding command aborted*/
829 		arcmsr_abort_allcmd(acb);
830 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
831 			srb=acb->psrb_pool[i];
832 			if(srb->srb_state==ARCMSR_SRB_START) {
833 				srb->srb_state=ARCMSR_SRB_ABORTED;
834 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
835 				arcmsr_srb_complete(srb, 1);
836 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
837 						, acb->pci_unit, srb->pccb->ccb_h.target_id
838 						, srb->pccb->ccb_h.target_lun, srb);
839 			}
840 		}
841 		/* enable all outbound interrupt */
842 		arcmsr_enable_allintr(acb, intmask_org);
843 	}
844 	acb->srboutstandingcount=0;
845 	acb->workingsrb_doneindex=0;
846 	acb->workingsrb_startindex=0;
847 #ifdef ARCMSR_DEBUG1
848 	acb->pktRequestCount = 0;
849 	acb->pktReturnCount = 0;
850 #endif
851 }
852 /*
853 **********************************************************************
854 **********************************************************************
855 */
856 static void arcmsr_build_srb(struct CommandControlBlock *srb,
857 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
858 {
859 	struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
860 	u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
861 	u_int32_t address_lo, address_hi;
862 	union ccb * pccb=srb->pccb;
863 	struct ccb_scsiio * pcsio= &pccb->csio;
864 	u_int32_t arccdbsize=0x30;
865 
866 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
867 	arcmsr_cdb->Bus=0;
868 	arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
869 	arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
870 	arcmsr_cdb->Function=1;
871 	arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
872 	arcmsr_cdb->Context=0;
873 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
874 	if(nseg != 0) {
875 		struct AdapterControlBlock *acb=srb->acb;
876 		bus_dmasync_op_t op;
877 		u_int32_t length, i, cdb_sgcount=0;
878 
879 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
880 			op=BUS_DMASYNC_PREREAD;
881 		} else {
882 			op=BUS_DMASYNC_PREWRITE;
883 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
884 			srb->srb_flags|=SRB_FLAG_WRITE;
885 		}
886 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
887 		for(i=0;i<nseg;i++) {
888 			/* Get the physical address of the current data pointer */
889 			length=arcmsr_htole32(dm_segs[i].ds_len);
890 			address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
891 			address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
892 			if(address_hi==0) {
893 				struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
894 				pdma_sg->address=address_lo;
895 				pdma_sg->length=length;
896 				psge += sizeof(struct SG32ENTRY);
897 				arccdbsize += sizeof(struct SG32ENTRY);
898 			} else {
899 				u_int32_t sg64s_size=0, tmplength=length;
900 
901 				while(1) {
902 					u_int64_t span4G, length0;
903 					struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
904 
905 					span4G=(u_int64_t)address_lo + tmplength;
906 					pdma_sg->addresshigh=address_hi;
907 					pdma_sg->address=address_lo;
908 					if(span4G > 0x100000000) {
909 						/*see if cross 4G boundary*/
910 						length0=0x100000000-address_lo;
911 						pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
912 						address_hi=address_hi+1;
913 						address_lo=0;
914 						tmplength=tmplength-(u_int32_t)length0;
915 						sg64s_size += sizeof(struct SG64ENTRY);
916 						psge += sizeof(struct SG64ENTRY);
917 						cdb_sgcount++;
918 					} else {
919 						pdma_sg->length=tmplength|IS_SG64_ADDR;
920 						sg64s_size += sizeof(struct SG64ENTRY);
921 						psge += sizeof(struct SG64ENTRY);
922 						break;
923 					}
924 				}
925 				arccdbsize += sg64s_size;
926 			}
927 			cdb_sgcount++;
928 		}
929 		arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
930 		arcmsr_cdb->DataLength=pcsio->dxfer_len;
931 		if( arccdbsize > 256) {
932 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
933 		}
934 	} else {
935 		arcmsr_cdb->DataLength = 0;
936 	}
937 	srb->arc_cdb_size=arccdbsize;
938 }
939 /*
940 **************************************************************************
941 **************************************************************************
942 */
943 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
944 {
945 	u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
946 	struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
947 
948 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
949 	atomic_add_int(&acb->srboutstandingcount, 1);
950 	srb->srb_state=ARCMSR_SRB_START;
951 
952 	switch (acb->adapter_type) {
953 	case ACB_ADAPTER_TYPE_A: {
954 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
955 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
956 			} else {
957 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
958 			}
959 		}
960 		break;
961 	case ACB_ADAPTER_TYPE_B: {
962 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
963 			int ending_index, index;
964 
965 			index=phbbmu->postq_index;
966 			ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
967 			phbbmu->post_qbuffer[ending_index]=0;
968 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
969 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
970 			} else {
971 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
972 			}
973 			index++;
974 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
975 			phbbmu->postq_index=index;
976 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
977 		}
978 		break;
979     case ACB_ADAPTER_TYPE_C:
980         {
981             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
982 
983             arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
984             ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
985 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
986             if(cdb_phyaddr_hi32)
987             {
988 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
989 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
990             }
991             else
992             {
993 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
994             }
995         }
996         break;
997 	}
998 }
999 /*
1000 ************************************************************************
1001 ************************************************************************
1002 */
1003 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
1004 {
1005 	struct QBUFFER *qbuffer=NULL;
1006 
1007 	switch (acb->adapter_type) {
1008 	case ACB_ADAPTER_TYPE_A: {
1009 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1010 
1011 			qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
1012 		}
1013 		break;
1014 	case ACB_ADAPTER_TYPE_B: {
1015 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1016 
1017 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1018 		}
1019 		break;
1020 	case ACB_ADAPTER_TYPE_C: {
1021 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1022 
1023 			qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
1024 		}
1025 		break;
1026 	}
1027 	return(qbuffer);
1028 }
1029 /*
1030 ************************************************************************
1031 ************************************************************************
1032 */
1033 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1034 {
1035 	struct QBUFFER *qbuffer=NULL;
1036 
1037 	switch (acb->adapter_type) {
1038 	case ACB_ADAPTER_TYPE_A: {
1039 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1040 
1041 			qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
1042 		}
1043 		break;
1044 	case ACB_ADAPTER_TYPE_B: {
1045 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1046 
1047 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1048 		}
1049 		break;
1050 	case ACB_ADAPTER_TYPE_C: {
1051 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1052 
1053 			qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1054 		}
1055 		break;
1056 	}
1057 	return(qbuffer);
1058 }
1059 /*
1060 **************************************************************************
1061 **************************************************************************
1062 */
1063 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1064 {
1065 	switch (acb->adapter_type) {
1066 	case ACB_ADAPTER_TYPE_A: {
1067 			/* let IOP know data has been read */
1068 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1069 		}
1070 		break;
1071 	case ACB_ADAPTER_TYPE_B: {
1072 			/* let IOP know data has been read */
1073 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1074 		}
1075 		break;
1076 	case ACB_ADAPTER_TYPE_C: {
1077 			/* let IOP know data has been read */
1078 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1079 		}
1080 	}
1081 }
1082 /*
1083 **************************************************************************
1084 **************************************************************************
1085 */
1086 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1087 {
1088 	switch (acb->adapter_type) {
1089 	case ACB_ADAPTER_TYPE_A: {
1090 			/*
1091 			** push inbound doorbell tell iop, driver data write ok
1092 			** and wait reply on next hwinterrupt for next Qbuffer post
1093 			*/
1094 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1095 		}
1096 		break;
1097 	case ACB_ADAPTER_TYPE_B: {
1098 			/*
1099 			** push inbound doorbell tell iop, driver data write ok
1100 			** and wait reply on next hwinterrupt for next Qbuffer post
1101 			*/
1102 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1103 		}
1104 		break;
1105 	case ACB_ADAPTER_TYPE_C: {
1106 			/*
1107 			** push inbound doorbell tell iop, driver data write ok
1108 			** and wait reply on next hwinterrupt for next Qbuffer post
1109 			*/
1110 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1111 		}
1112 		break;
1113 	}
1114 }
1115 /*
1116 **********************************************************************
1117 **********************************************************************
1118 */
1119 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1120 {
1121 	u_int8_t *pQbuffer;
1122 	struct QBUFFER *pwbuffer;
1123 	u_int8_t * iop_data;
1124 	int32_t allxfer_len=0;
1125 
1126 	pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1127 	iop_data=(u_int8_t *)pwbuffer->data;
1128 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1129 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1130 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1131 			&& (allxfer_len<124)) {
1132 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1133 			memcpy(iop_data, pQbuffer, 1);
1134 			acb->wqbuf_firstindex++;
1135 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1136 			iop_data++;
1137 			allxfer_len++;
1138 		}
1139 		pwbuffer->data_len=allxfer_len;
1140 		/*
1141 		** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1142 		*/
1143 		arcmsr_iop_message_wrote(acb);
1144 	}
1145 }
1146 /*
1147 ************************************************************************
1148 ************************************************************************
1149 */
1150 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1151 {
1152 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1153 	CHIP_REG_WRITE32(HBA_MessageUnit,
1154 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1155 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1156 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1157 			, acb->pci_unit);
1158 	}
1159 	return;
1160 }
1161 /*
1162 ************************************************************************
1163 ************************************************************************
1164 */
1165 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1166 {
1167 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1168 	CHIP_REG_WRITE32(HBB_DOORBELL,
1169 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1170 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1171 		kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1172 			, acb->pci_unit);
1173 	}
1174 }
1175 /*
1176 ************************************************************************
1177 ************************************************************************
1178 */
1179 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1180 {
1181 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1182 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1183 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1184 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1185 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1186 	}
1187 }
1188 /*
1189 ************************************************************************
1190 ************************************************************************
1191 */
1192 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1193 {
1194 	switch (acb->adapter_type) {
1195 	case ACB_ADAPTER_TYPE_A: {
1196 			arcmsr_stop_hba_bgrb(acb);
1197 		}
1198 		break;
1199 	case ACB_ADAPTER_TYPE_B: {
1200 			arcmsr_stop_hbb_bgrb(acb);
1201 		}
1202 		break;
1203 	case ACB_ADAPTER_TYPE_C: {
1204 			arcmsr_stop_hbc_bgrb(acb);
1205 		}
1206 		break;
1207 	}
1208 }
1209 /*
1210 ************************************************************************
1211 ************************************************************************
1212 */
1213 static void arcmsr_poll(struct cam_sim * psim)
1214 {
1215 	struct AdapterControlBlock *acb;
1216 	int	mutex;
1217 
1218 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1219 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1220 	if( mutex == 0 )
1221 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1222 	arcmsr_interrupt(acb);
1223 	if( mutex == 0 )
1224 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1225 }
1226 /*
1227 **************************************************************************
1228 **************************************************************************
1229 */
1230 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1231 {
1232 	struct QBUFFER *prbuffer;
1233 	u_int8_t *pQbuffer;
1234 	u_int8_t *iop_data;
1235 	int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1236 
1237 	/*check this iop data if overflow my rqbuffer*/
1238 	rqbuf_lastindex=acb->rqbuf_lastindex;
1239 	rqbuf_firstindex=acb->rqbuf_firstindex;
1240 	prbuffer=arcmsr_get_iop_rqbuffer(acb);
1241 	iop_data=(u_int8_t *)prbuffer->data;
1242 	iop_len=prbuffer->data_len;
1243 	my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1244 	if(my_empty_len>=iop_len) {
1245 		while(iop_len > 0) {
1246 			pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1247 			memcpy(pQbuffer, iop_data, 1);
1248 			rqbuf_lastindex++;
1249 			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1250 			iop_data++;
1251 			iop_len--;
1252 		}
1253 		acb->rqbuf_lastindex=rqbuf_lastindex;
1254 		arcmsr_iop_message_read(acb);
1255 		/*signature, let IOP know data has been read */
1256 	} else {
1257 		acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1258 	}
1259 }
1260 /*
1261 **************************************************************************
1262 **************************************************************************
1263 */
1264 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1265 {
1266 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1267 	/*
1268 	*****************************************************************
1269 	**   check if there are any mail packages from user space program
1270 	**   in my post bag, now is the time to send them into Areca's firmware
1271 	*****************************************************************
1272 	*/
1273 	if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1274 		u_int8_t *pQbuffer;
1275 		struct QBUFFER *pwbuffer;
1276 		u_int8_t *iop_data;
1277 		int allxfer_len=0;
1278 
1279 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1280 		pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1281 		iop_data=(u_int8_t *)pwbuffer->data;
1282 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1283 			&& (allxfer_len<124)) {
1284 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1285 			memcpy(iop_data, pQbuffer, 1);
1286 			acb->wqbuf_firstindex++;
1287 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1288 			iop_data++;
1289 			allxfer_len++;
1290 		}
1291 		pwbuffer->data_len=allxfer_len;
1292 		/*
1293 		** push inbound doorbell tell iop driver data write ok
1294 		** and wait reply on next hwinterrupt for next Qbuffer post
1295 		*/
1296 		arcmsr_iop_message_wrote(acb);
1297 	}
1298 	if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1299 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1300 	}
1301 }
1302 
1303 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1304 {
1305 /*
1306 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1307 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1308 	else
1309 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1310 */
1311 	xpt_free_path(ccb->ccb_h.path);
1312 }
1313 
1314 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1315 {
1316 	struct cam_path     *path;
1317 	union ccb            ccb;
1318 
1319 	if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1320 		return;
1321 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1322 	bzero(&ccb, sizeof(union ccb));
1323 	xpt_setup_ccb(&ccb.ccb_h, path, 5);
1324 	ccb.ccb_h.func_code = XPT_SCAN_LUN;
1325 	ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1326 	ccb.crcn.flags = CAM_FLAG_NONE;
1327 	xpt_action(&ccb);
1328 }
1329 
1330 
1331 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1332 {
1333 	struct CommandControlBlock *srb;
1334 	u_int32_t intmask_org;
1335 	int i;
1336 
1337 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1338 	/* disable all outbound interrupts */
1339 	intmask_org = arcmsr_disable_allintr(acb);
1340 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1341 	{
1342 		srb = acb->psrb_pool[i];
1343 		if (srb->srb_state == ARCMSR_SRB_START)
1344 		{
1345 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1346             {
1347 			srb->srb_state = ARCMSR_SRB_ABORTED;
1348 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1349 			arcmsr_srb_complete(srb, 1);
1350 				kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1351 		}
1352 		}
1353 	}
1354 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1355 	arcmsr_enable_allintr(acb, intmask_org);
1356 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1357 }
1358 
1359 
1360 /*
1361 **************************************************************************
1362 **************************************************************************
1363 */
1364 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1365 	u_int32_t	devicemap;
1366 	u_int32_t	target, lun;
1367     u_int32_t	deviceMapCurrent[4]={0};
1368     u_int8_t	*pDevMap;
1369 
1370 	switch (acb->adapter_type) {
1371 	case ACB_ADAPTER_TYPE_A:
1372 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1373 			for (target= 0; target < 4; target++)
1374 			{
1375 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1376 		devicemap += 4;
1377 			}
1378 			break;
1379 
1380 	case ACB_ADAPTER_TYPE_B:
1381 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1382 			for (target= 0; target < 4; target++)
1383 			{
1384 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1385 		devicemap += 4;
1386 			}
1387 			break;
1388 
1389 	case ACB_ADAPTER_TYPE_C:
1390 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1391 			for (target= 0; target < 4; target++)
1392 			{
1393 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1394 		devicemap += 4;
1395 			}
1396 			break;
1397 	}
1398 
1399 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1400 		{
1401 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1402 		}
1403 		/*
1404 		** adapter posted CONFIG message
1405 		** copy the new map, note if there are differences with the current map
1406 		*/
1407 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1408 		for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1409 		{
1410 			if (*pDevMap != acb->device_map[target])
1411 			{
1412                 u_int8_t difference, bit_check;
1413 
1414                 difference= *pDevMap ^ acb->device_map[target];
1415                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1416                 {
1417                     bit_check=(1 << lun);						/*check bit from 0....31*/
1418                     if(difference & bit_check)
1419                     {
1420                         if(acb->device_map[target] & bit_check)
1421                         {/* unit departed */
1422 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1423 							arcmsr_abort_dr_ccbs(acb, target, lun);
1424 				arcmsr_rescan_lun(acb, target, lun);
1425 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1426                         }
1427                         else
1428                         {/* unit arrived */
1429 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1430 				arcmsr_rescan_lun(acb, target, lun);
1431 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1432                         }
1433                     }
1434                 }
1435 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1436 				acb->device_map[target]= *pDevMap;
1437 			}
1438 			pDevMap++;
1439 		}
1440 }
1441 /*
1442 **************************************************************************
1443 **************************************************************************
1444 */
1445 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1446 	u_int32_t outbound_message;
1447 
1448 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1449 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1450 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1451 		arcmsr_dr_handle( acb );
1452 }
1453 /*
1454 **************************************************************************
1455 **************************************************************************
1456 */
1457 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1458 	u_int32_t outbound_message;
1459 
1460 	/* clear interrupts */
1461 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1462 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1463 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1464 		arcmsr_dr_handle( acb );
1465 }
1466 /*
1467 **************************************************************************
1468 **************************************************************************
1469 */
1470 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1471 	u_int32_t outbound_message;
1472 
1473 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1474 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1475 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1476 		arcmsr_dr_handle( acb );
1477 }
1478 /*
1479 **************************************************************************
1480 **************************************************************************
1481 */
1482 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1483 {
1484 	u_int32_t outbound_doorbell;
1485 
1486 	/*
1487 	*******************************************************************
1488 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1489 	**  DOORBELL: din! don!
1490 	**  check if there are any mail need to pack from firmware
1491 	*******************************************************************
1492 	*/
1493 	outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1494 	0, outbound_doorbell);
1495 	CHIP_REG_WRITE32(HBA_MessageUnit,
1496 	0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1497 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1498 		arcmsr_iop2drv_data_wrote_handle(acb);
1499 	}
1500 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1501 		arcmsr_iop2drv_data_read_handle(acb);
1502 	}
1503 }
1504 /*
1505 **************************************************************************
1506 **************************************************************************
1507 */
1508 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1509 {
1510 	u_int32_t outbound_doorbell;
1511 
1512 	/*
1513 	*******************************************************************
1514 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1515 	**  DOORBELL: din! don!
1516 	**  check if there are any mail need to pack from firmware
1517 	*******************************************************************
1518 	*/
1519 	outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1520 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1521 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1522 		arcmsr_iop2drv_data_wrote_handle(acb);
1523 	}
1524 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1525 		arcmsr_iop2drv_data_read_handle(acb);
1526 	}
1527 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1528 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1529 	}
1530 }
1531 /*
1532 **************************************************************************
1533 **************************************************************************
1534 */
1535 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1536 {
1537 	u_int32_t flag_srb;
1538 	u_int16_t error;
1539 
1540 	/*
1541 	*****************************************************************************
1542 	**               areca cdb command done
1543 	*****************************************************************************
1544 	*/
1545 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1546 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1547 	while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1548 		0, outbound_queueport)) != 0xFFFFFFFF) {
1549 		/* check if command done with no error*/
1550         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1551 		arcmsr_drain_donequeue(acb, flag_srb, error);
1552 	}	/*drain reply FIFO*/
1553 }
1554 /*
1555 **************************************************************************
1556 **************************************************************************
1557 */
1558 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1559 {
1560 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1561 	u_int32_t flag_srb;
1562 	int index;
1563 	u_int16_t error;
1564 
1565 	/*
1566 	*****************************************************************************
1567 	**               areca cdb command done
1568 	*****************************************************************************
1569 	*/
1570 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1571 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1572 	index=phbbmu->doneq_index;
1573 	while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1574 		phbbmu->done_qbuffer[index]=0;
1575 		index++;
1576 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1577 		phbbmu->doneq_index=index;
1578 		/* check if command done with no error*/
1579         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1580 		arcmsr_drain_donequeue(acb, flag_srb, error);
1581 	}	/*drain reply FIFO*/
1582 }
1583 /*
1584 **************************************************************************
1585 **************************************************************************
1586 */
1587 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1588 {
1589 	u_int32_t flag_srb,throttling=0;
1590 	u_int16_t error;
1591 
1592 	/*
1593 	*****************************************************************************
1594 	**               areca cdb command done
1595 	*****************************************************************************
1596 	*/
1597 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1598 
1599 	while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1600 
1601 		flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1602 		/* check if command done with no error*/
1603         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1604 		arcmsr_drain_donequeue(acb, flag_srb, error);
1605         if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1606             CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1607             break;
1608         }
1609         throttling++;
1610 	}	/*drain reply FIFO*/
1611 }
1612 /*
1613 **********************************************************************
1614 **********************************************************************
1615 */
1616 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1617 {
1618 	u_int32_t outbound_intStatus;
1619 	/*
1620 	*********************************************
1621 	**   check outbound intstatus
1622 	*********************************************
1623 	*/
1624 	outbound_intStatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1625 	if(!outbound_intStatus) {
1626 		/*it must be share irq*/
1627 		return;
1628 	}
1629 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus);/*clear interrupt*/
1630 	/* MU doorbell interrupts*/
1631 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1632 		arcmsr_hba_doorbell_isr(acb);
1633 	}
1634 	/* MU post queue interrupts*/
1635 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1636 		arcmsr_hba_postqueue_isr(acb);
1637 	}
1638 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1639 		arcmsr_hba_message_isr(acb);
1640 	}
1641 }
1642 /*
1643 **********************************************************************
1644 **********************************************************************
1645 */
1646 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1647 {
1648 	u_int32_t outbound_doorbell;
1649 	/*
1650 	*********************************************
1651 	**   check outbound intstatus
1652 	*********************************************
1653 	*/
1654 	outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1655 	if(!outbound_doorbell) {
1656 		/*it must be share irq*/
1657 		return;
1658 	}
1659 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1660 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1661 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1662 	/* MU ioctl transfer doorbell interrupts*/
1663 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1664 		arcmsr_iop2drv_data_wrote_handle(acb);
1665 	}
1666 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1667 		arcmsr_iop2drv_data_read_handle(acb);
1668 	}
1669 	/* MU post queue interrupts*/
1670 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1671 		arcmsr_hbb_postqueue_isr(acb);
1672 	}
1673 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1674 		arcmsr_hbb_message_isr(acb);
1675 	}
1676 }
1677 /*
1678 **********************************************************************
1679 **********************************************************************
1680 */
1681 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1682 {
1683 	u_int32_t host_interrupt_status;
1684 	/*
1685 	*********************************************
1686 	**   check outbound intstatus
1687 	*********************************************
1688 	*/
1689 	host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1690 	if(!host_interrupt_status) {
1691 		/*it must be share irq*/
1692 		return;
1693 	}
1694 	/* MU doorbell interrupts*/
1695 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1696 		arcmsr_hbc_doorbell_isr(acb);
1697 	}
1698 	/* MU post queue interrupts*/
1699 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1700 		arcmsr_hbc_postqueue_isr(acb);
1701 	}
1702 }
1703 /*
1704 ******************************************************************************
1705 ******************************************************************************
1706 */
1707 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1708 {
1709 	switch (acb->adapter_type) {
1710 	case ACB_ADAPTER_TYPE_A:
1711 		arcmsr_handle_hba_isr(acb);
1712 		break;
1713 	case ACB_ADAPTER_TYPE_B:
1714 		arcmsr_handle_hbb_isr(acb);
1715 		break;
1716 	case ACB_ADAPTER_TYPE_C:
1717 		arcmsr_handle_hbc_isr(acb);
1718 		break;
1719 	default:
1720 		kprintf("arcmsr%d: interrupt service,"
1721 		" unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1722 		break;
1723 	}
1724 }
1725 /*
1726 **********************************************************************
1727 **********************************************************************
1728 */
1729 static void arcmsr_intr_handler(void *arg)
1730 {
1731 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1732 
1733 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1734 	arcmsr_interrupt(acb);
1735 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1736 }
1737 /*
1738 ******************************************************************************
1739 ******************************************************************************
1740 */
1741 static void	arcmsr_polling_devmap(void* arg)
1742 {
1743 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1744 	switch (acb->adapter_type) {
1745 	case ACB_ADAPTER_TYPE_A:
1746 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1747 		break;
1748 
1749 	case ACB_ADAPTER_TYPE_B:
1750 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1751 		break;
1752 
1753 	case ACB_ADAPTER_TYPE_C:
1754 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1755 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1756 		break;
1757 	}
1758 
1759 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1760 	{
1761 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
1762 	}
1763 }
1764 
1765 /*
1766 *******************************************************************************
1767 **
1768 *******************************************************************************
1769 */
1770 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1771 {
1772 	u_int32_t intmask_org;
1773 
1774 	if(acb!=NULL) {
1775 		/* stop adapter background rebuild */
1776 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1777 			intmask_org = arcmsr_disable_allintr(acb);
1778 			arcmsr_stop_adapter_bgrb(acb);
1779 			arcmsr_flush_adapter_cache(acb);
1780 			arcmsr_enable_allintr(acb, intmask_org);
1781 		}
1782 	}
1783 }
1784 /*
1785 ***********************************************************************
1786 **
1787 ************************************************************************
1788 */
1789 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1790 {
1791 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1792 	u_int32_t retvalue=EINVAL;
1793 
1794 	pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1795 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1796 		return retvalue;
1797 	}
1798 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1799 	switch(ioctl_cmd) {
1800 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1801 			u_int8_t * pQbuffer;
1802 			u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1803 			u_int32_t allxfer_len=0;
1804 
1805 			while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1806 				&& (allxfer_len<1031)) {
1807 				/*copy READ QBUFFER to srb*/
1808 				pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1809 				memcpy(ptmpQbuffer, pQbuffer, 1);
1810 				acb->rqbuf_firstindex++;
1811 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1812 				/*if last index number set it to 0 */
1813 				ptmpQbuffer++;
1814 				allxfer_len++;
1815 			}
1816 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1817 				struct QBUFFER * prbuffer;
1818 				u_int8_t * iop_data;
1819 				u_int32_t iop_len;
1820 
1821 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1822 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
1823 				iop_data=(u_int8_t *)prbuffer->data;
1824 				iop_len=(u_int32_t)prbuffer->data_len;
1825 				/*this iop data does no chance to make me overflow again here, so just do it*/
1826 				while(iop_len>0) {
1827 					pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1828 					memcpy(pQbuffer, iop_data, 1);
1829 					acb->rqbuf_lastindex++;
1830 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1831 					/*if last index number set it to 0 */
1832 					iop_data++;
1833 					iop_len--;
1834 				}
1835 				arcmsr_iop_message_read(acb);
1836 				/*signature, let IOP know data has been readed */
1837 			}
1838 			pcmdmessagefld->cmdmessage.Length=allxfer_len;
1839 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1840 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1841 		}
1842 		break;
1843 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1844 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1845 			u_int8_t * pQbuffer;
1846 			u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1847 
1848 			user_len=pcmdmessagefld->cmdmessage.Length;
1849 			/*check if data xfer length of this request will overflow my array qbuffer */
1850 			wqbuf_lastindex=acb->wqbuf_lastindex;
1851 			wqbuf_firstindex=acb->wqbuf_firstindex;
1852 			if(wqbuf_lastindex!=wqbuf_firstindex) {
1853 				arcmsr_post_ioctldata2iop(acb);
1854 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1855 			} else {
1856 				my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1857 				if(my_empty_len>=user_len) {
1858 					while(user_len>0) {
1859 						/*copy srb data to wqbuffer*/
1860 						pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1861 						memcpy(pQbuffer, ptmpuserbuffer, 1);
1862 						acb->wqbuf_lastindex++;
1863 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1864 						/*if last index number set it to 0 */
1865 						ptmpuserbuffer++;
1866 						user_len--;
1867 					}
1868 					/*post fist Qbuffer*/
1869 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1870 						acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1871 						arcmsr_post_ioctldata2iop(acb);
1872 					}
1873 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1874 				} else {
1875 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1876 				}
1877 			}
1878 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1879 		}
1880 		break;
1881 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1882 			u_int8_t * pQbuffer=acb->rqbuffer;
1883 
1884 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1885 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1886 				arcmsr_iop_message_read(acb);
1887 				/*signature, let IOP know data has been readed */
1888 			}
1889 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1890 			acb->rqbuf_firstindex=0;
1891 			acb->rqbuf_lastindex=0;
1892 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1893 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1894 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1895 		}
1896 		break;
1897 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1898 		{
1899 			u_int8_t * pQbuffer=acb->wqbuffer;
1900 
1901 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1902 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1903                 arcmsr_iop_message_read(acb);
1904 				/*signature, let IOP know data has been readed */
1905 			}
1906 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1907 			acb->wqbuf_firstindex=0;
1908 			acb->wqbuf_lastindex=0;
1909 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1910 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1911 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1912 		}
1913 		break;
1914 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1915 			u_int8_t * pQbuffer;
1916 
1917 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1918 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1919                 arcmsr_iop_message_read(acb);
1920 				/*signature, let IOP know data has been readed */
1921 			}
1922 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1923 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
1924 					|ACB_F_MESSAGE_WQBUFFER_READ);
1925 			acb->rqbuf_firstindex=0;
1926 			acb->rqbuf_lastindex=0;
1927 			acb->wqbuf_firstindex=0;
1928 			acb->wqbuf_lastindex=0;
1929 			pQbuffer=acb->rqbuffer;
1930 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1931 			pQbuffer=acb->wqbuffer;
1932 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1933 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1934 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1935 		}
1936 		break;
1937 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1938 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1939 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1940 		}
1941 		break;
1942 	case ARCMSR_MESSAGE_SAY_HELLO: {
1943 			u_int8_t * hello_string="Hello! I am ARCMSR";
1944 			u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1945 
1946 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1947 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1948 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1949 				return ENOIOCTL;
1950 			}
1951 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1952 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1953 		}
1954 		break;
1955 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
1956 			arcmsr_iop_parking(acb);
1957 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1958 		}
1959 		break;
1960 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1961 			arcmsr_flush_adapter_cache(acb);
1962 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1963 		}
1964 		break;
1965 	}
1966 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1967 	return (retvalue);
1968 }
1969 /*
1970 **************************************************************************
1971 **************************************************************************
1972 */
1973 static void arcmsr_free_srb(struct CommandControlBlock *srb)
1974 {
1975 	struct AdapterControlBlock	*acb;
1976 	int	mutex;
1977 
1978 	acb = srb->acb;
1979 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1980 	if( mutex == 0 )
1981 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1982 	srb->srb_state=ARCMSR_SRB_DONE;
1983 	srb->srb_flags=0;
1984 	acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
1985 	acb->workingsrb_doneindex++;
1986 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
1987 	if( mutex == 0 )
1988 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1989 }
1990 /*
1991 **************************************************************************
1992 **************************************************************************
1993 */
1994 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1995 {
1996 	struct CommandControlBlock *srb=NULL;
1997 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
1998 	int	mutex;
1999 
2000 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
2001 	if( mutex == 0 )
2002 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2003 	workingsrb_doneindex=acb->workingsrb_doneindex;
2004 	workingsrb_startindex=acb->workingsrb_startindex;
2005 	srb=acb->srbworkingQ[workingsrb_startindex];
2006 	workingsrb_startindex++;
2007 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
2008 	if(workingsrb_doneindex!=workingsrb_startindex) {
2009 		acb->workingsrb_startindex=workingsrb_startindex;
2010 	} else {
2011 		srb=NULL;
2012 	}
2013 	if( mutex == 0 )
2014 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2015 	return(srb);
2016 }
2017 /*
2018 **************************************************************************
2019 **************************************************************************
2020 */
2021 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
2022 {
2023 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
2024 	int retvalue = 0, transfer_len = 0;
2025 	char *buffer;
2026 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2027 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2028 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
2029 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2030 					/* 4 bytes: Areca io control code */
2031 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2032 		buffer = pccb->csio.data_ptr;
2033 		transfer_len = pccb->csio.dxfer_len;
2034 	} else {
2035 		retvalue = ARCMSR_MESSAGE_FAIL;
2036 		goto message_out;
2037 	}
2038 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2039 		retvalue = ARCMSR_MESSAGE_FAIL;
2040 		goto message_out;
2041 	}
2042 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2043 	switch(controlcode) {
2044 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2045 			u_int8_t *pQbuffer;
2046 			u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
2047 			int32_t allxfer_len = 0;
2048 
2049 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2050 				&& (allxfer_len < 1031)) {
2051 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2052 				memcpy(ptmpQbuffer, pQbuffer, 1);
2053 				acb->rqbuf_firstindex++;
2054 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2055 				ptmpQbuffer++;
2056 				allxfer_len++;
2057 			}
2058 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2059 				struct QBUFFER  *prbuffer;
2060 				u_int8_t  *iop_data;
2061 				int32_t iop_len;
2062 
2063 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2064 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
2065 				iop_data = (u_int8_t *)prbuffer->data;
2066 				iop_len =(u_int32_t)prbuffer->data_len;
2067 				while (iop_len > 0) {
2068 			        pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2069 					memcpy(pQbuffer, iop_data, 1);
2070 					acb->rqbuf_lastindex++;
2071 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2072 					iop_data++;
2073 					iop_len--;
2074 				}
2075 				arcmsr_iop_message_read(acb);
2076 			}
2077 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2078 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2079 			retvalue=ARCMSR_MESSAGE_SUCCESS;
2080 		}
2081 		break;
2082 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2083 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2084 			u_int8_t *pQbuffer;
2085 			u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2086 
2087 			user_len = pcmdmessagefld->cmdmessage.Length;
2088 			wqbuf_lastindex = acb->wqbuf_lastindex;
2089 			wqbuf_firstindex = acb->wqbuf_firstindex;
2090 			if (wqbuf_lastindex != wqbuf_firstindex) {
2091 				arcmsr_post_ioctldata2iop(acb);
2092 				/* has error report sensedata */
2093 			    if(pccb->csio.sense_len) {
2094 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2095 				/* Valid,ErrorCode */
2096 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2097 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2098 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2099 				/* AdditionalSenseLength */
2100 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2101 				/* AdditionalSenseCode */
2102 				}
2103 				retvalue = ARCMSR_MESSAGE_FAIL;
2104 			} else {
2105 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2106 						&(ARCMSR_MAX_QBUFFER - 1);
2107 				if (my_empty_len >= user_len) {
2108 					while (user_len > 0) {
2109 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2110 						memcpy(pQbuffer, ptmpuserbuffer, 1);
2111 						acb->wqbuf_lastindex++;
2112 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2113 						ptmpuserbuffer++;
2114 						user_len--;
2115 					}
2116 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2117 						acb->acb_flags &=
2118 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2119 						arcmsr_post_ioctldata2iop(acb);
2120 					}
2121 				} else {
2122 					/* has error report sensedata */
2123 					if(pccb->csio.sense_len) {
2124 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2125 					/* Valid,ErrorCode */
2126 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2127 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2128 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2129 					/* AdditionalSenseLength */
2130 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2131 					/* AdditionalSenseCode */
2132 					}
2133 					retvalue = ARCMSR_MESSAGE_FAIL;
2134 				}
2135 			}
2136 		}
2137 		break;
2138 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2139 			u_int8_t *pQbuffer = acb->rqbuffer;
2140 
2141 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2142 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2143 				arcmsr_iop_message_read(acb);
2144 			}
2145 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2146 			acb->rqbuf_firstindex = 0;
2147 			acb->rqbuf_lastindex = 0;
2148 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2149 			pcmdmessagefld->cmdmessage.ReturnCode =
2150 			ARCMSR_MESSAGE_RETURNCODE_OK;
2151 		}
2152 		break;
2153 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2154 			u_int8_t *pQbuffer = acb->wqbuffer;
2155 
2156 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2157 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2158 				arcmsr_iop_message_read(acb);
2159 			}
2160 			acb->acb_flags |=
2161 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2162 					ACB_F_MESSAGE_WQBUFFER_READ);
2163 			acb->wqbuf_firstindex = 0;
2164 			acb->wqbuf_lastindex = 0;
2165 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2166 			pcmdmessagefld->cmdmessage.ReturnCode =
2167 				ARCMSR_MESSAGE_RETURNCODE_OK;
2168 		}
2169 		break;
2170 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2171 			u_int8_t *pQbuffer;
2172 
2173 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2174 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2175 				arcmsr_iop_message_read(acb);
2176 			}
2177 			acb->acb_flags |=
2178 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2179 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2180 				| ACB_F_MESSAGE_WQBUFFER_READ);
2181 			acb->rqbuf_firstindex = 0;
2182 			acb->rqbuf_lastindex = 0;
2183 			acb->wqbuf_firstindex = 0;
2184 			acb->wqbuf_lastindex = 0;
2185 			pQbuffer = acb->rqbuffer;
2186 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2187 			pQbuffer = acb->wqbuffer;
2188 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2189 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2190 		}
2191 		break;
2192 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2193 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2194 		}
2195 		break;
2196 	case ARCMSR_MESSAGE_SAY_HELLO: {
2197 			int8_t * hello_string = "Hello! I am ARCMSR";
2198 
2199 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2200 				, (int16_t)strlen(hello_string));
2201 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2202 		}
2203 		break;
2204 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2205 		arcmsr_iop_parking(acb);
2206 		break;
2207 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2208 		arcmsr_flush_adapter_cache(acb);
2209 		break;
2210 	default:
2211 		retvalue = ARCMSR_MESSAGE_FAIL;
2212 	}
2213 message_out:
2214 	return (retvalue);
2215 }
2216 /*
2217 *********************************************************************
2218 *********************************************************************
2219 */
2220 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2221 {
2222 	struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2223 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2224 	union ccb * pccb;
2225 	int target, lun;
2226 
2227 	pccb=srb->pccb;
2228 	target=pccb->ccb_h.target_id;
2229 	lun=pccb->ccb_h.target_lun;
2230 #ifdef ARCMSR_DEBUG1
2231 	acb->pktRequestCount++;
2232 #endif
2233 	if(error != 0) {
2234 		if(error != EFBIG) {
2235 			kprintf("arcmsr%d: unexpected error %x"
2236 				" returned from 'bus_dmamap_load' \n"
2237 				, acb->pci_unit, error);
2238 		}
2239 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2240 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2241 		}
2242 		arcmsr_srb_complete(srb, 0);
2243 		return;
2244 	}
2245 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2246 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2247 		arcmsr_srb_complete(srb, 0);
2248 		return;
2249 	}
2250 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2251 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2252 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2253 		arcmsr_srb_complete(srb, 0);
2254 		return;
2255 	}
2256 	if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2257 		u_int8_t block_cmd, cmd;
2258 
2259 		cmd = pccb->csio.cdb_io.cdb_bytes[0];
2260 		block_cmd= cmd & 0x0f;
2261 		if(block_cmd==0x08 || block_cmd==0x0a) {
2262 			kprintf("arcmsr%d:block 'read/write' command "
2263 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2264 				, acb->pci_unit, cmd, target, lun);
2265 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2266 			arcmsr_srb_complete(srb, 0);
2267 			return;
2268 		}
2269 	}
2270 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2271 		if(nseg != 0) {
2272 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2273 		}
2274 		arcmsr_srb_complete(srb, 0);
2275 		return;
2276 	}
2277 	if(acb->srboutstandingcount > ARCMSR_MAX_OUTSTANDING_CMD) {
2278 		xpt_freeze_simq(acb->psim, 1);
2279 		pccb->ccb_h.status = CAM_REQUEUE_REQ;
2280 		acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2281 		arcmsr_srb_complete(srb, 0);
2282 		return;
2283 	}
2284 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2285 	arcmsr_build_srb(srb, dm_segs, nseg);
2286 	arcmsr_post_srb(acb, srb);
2287 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2288 	{
2289 		arcmsr_callout_init(&srb->ccb_callout);
2290 		callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2291 		srb->srb_flags |= SRB_FLAG_TIMER_START;
2292 	}
2293 }
2294 /*
2295 *****************************************************************************************
2296 *****************************************************************************************
2297 */
2298 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2299 {
2300 	struct CommandControlBlock *srb;
2301 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2302 	u_int32_t intmask_org;
2303 	int i=0;
2304 
2305 	acb->num_aborts++;
2306 	/*
2307 	***************************************************************************
2308 	** It is the upper layer do abort command this lock just prior to calling us.
2309 	** First determine if we currently own this command.
2310 	** Start by searching the device queue. If not found
2311 	** at all, and the system wanted us to just abort the
2312 	** command return success.
2313 	***************************************************************************
2314 	*/
2315 	if(acb->srboutstandingcount!=0) {
2316 		/* disable all outbound interrupt */
2317 		intmask_org=arcmsr_disable_allintr(acb);
2318 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2319 			srb=acb->psrb_pool[i];
2320 			if(srb->srb_state==ARCMSR_SRB_START) {
2321 				if(srb->pccb==abortccb) {
2322 					srb->srb_state=ARCMSR_SRB_ABORTED;
2323 					kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2324 						"outstanding command \n"
2325 						, acb->pci_unit, abortccb->ccb_h.target_id
2326 						, abortccb->ccb_h.target_lun, srb);
2327 					arcmsr_polling_srbdone(acb, srb);
2328 					/* enable outbound Post Queue, outbound doorbell Interrupt */
2329 					arcmsr_enable_allintr(acb, intmask_org);
2330 					return (TRUE);
2331 				}
2332 			}
2333 		}
2334 		/* enable outbound Post Queue, outbound doorbell Interrupt */
2335 		arcmsr_enable_allintr(acb, intmask_org);
2336 	}
2337 	return(FALSE);
2338 }
2339 /*
2340 ****************************************************************************
2341 ****************************************************************************
2342 */
2343 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2344 {
2345 	int retry=0;
2346 
2347 	acb->num_resets++;
2348 	acb->acb_flags |=ACB_F_BUS_RESET;
2349 	while(acb->srboutstandingcount!=0 && retry < 400) {
2350 		arcmsr_interrupt(acb);
2351 		UDELAY(25000);
2352 		retry++;
2353 	}
2354 	arcmsr_iop_reset(acb);
2355 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2356 }
2357 /*
2358 **************************************************************************
2359 **************************************************************************
2360 */
2361 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2362 		union ccb * pccb)
2363 {
2364 	pccb->ccb_h.status |= CAM_REQ_CMP;
2365 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2366 	case INQUIRY: {
2367 		unsigned char inqdata[36];
2368 		char *buffer=pccb->csio.data_ptr;
2369 
2370 		if (pccb->ccb_h.target_lun) {
2371 			pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2372 			xpt_done(pccb);
2373 			return;
2374 		}
2375 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2376 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2377 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2378 		inqdata[3] = 0;
2379 		inqdata[4] = 31;			/* length of additional data */
2380 		inqdata[5] = 0;
2381 		inqdata[6] = 0;
2382 		inqdata[7] = 0;
2383 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2384 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2385 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2386 		memcpy(buffer, inqdata, sizeof(inqdata));
2387 		xpt_done(pccb);
2388 	}
2389 	break;
2390 	case WRITE_BUFFER:
2391 	case READ_BUFFER: {
2392 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2393 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2394 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2395 		}
2396 		xpt_done(pccb);
2397 	}
2398 	break;
2399 	default:
2400 		xpt_done(pccb);
2401 	}
2402 }
2403 /*
2404 *********************************************************************
2405 *********************************************************************
2406 */
2407 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2408 {
2409 	struct AdapterControlBlock *  acb;
2410 
2411 	acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2412 	if(acb==NULL) {
2413 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2414 		xpt_done(pccb);
2415 		return;
2416 	}
2417 	switch (pccb->ccb_h.func_code) {
2418 	case XPT_SCSI_IO: {
2419 			struct CommandControlBlock *srb;
2420 			int target=pccb->ccb_h.target_id;
2421 
2422 			if(target == 16) {
2423 				/* virtual device for iop message transfer */
2424 				arcmsr_handle_virtual_command(acb, pccb);
2425 				return;
2426 			}
2427 			if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2428 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2429 				xpt_done(pccb);
2430 				return;
2431 			}
2432 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2433 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2434 			srb->pccb=pccb;
2435 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2436 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2437 					/* Single buffer */
2438 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2439 						/* Buffer is virtual */
2440 						u_int32_t error;
2441 
2442 						crit_enter();
2443 						error =	bus_dmamap_load(acb->dm_segs_dmat
2444 							, srb->dm_segs_dmamap
2445 							, pccb->csio.data_ptr
2446 							, pccb->csio.dxfer_len
2447 							, arcmsr_execute_srb, srb, /*flags*/0);
2448 						if(error == EINPROGRESS) {
2449 							xpt_freeze_simq(acb->psim, 1);
2450 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2451 						}
2452 						crit_exit();
2453 					}
2454 					else {		/* Buffer is physical */
2455 						struct bus_dma_segment seg;
2456 
2457 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2458 						seg.ds_len = pccb->csio.dxfer_len;
2459 						arcmsr_execute_srb(srb, &seg, 1, 0);
2460 					}
2461 				} else {
2462 					/* Scatter/gather list */
2463 					struct bus_dma_segment *segs;
2464 
2465 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2466 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2467 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2468 						xpt_done(pccb);
2469 						kfree(srb, M_DEVBUF);
2470 						return;
2471 					}
2472 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2473 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2474 				}
2475 			} else {
2476 				arcmsr_execute_srb(srb, NULL, 0, 0);
2477 			}
2478 			break;
2479 		}
2480 	case XPT_TARGET_IO: {
2481 			/* target mode not yet support vendor specific commands. */
2482 			pccb->ccb_h.status |= CAM_REQ_CMP;
2483 			xpt_done(pccb);
2484 			break;
2485 		}
2486 	case XPT_PATH_INQ: {
2487 			struct ccb_pathinq *cpi= &pccb->cpi;
2488 
2489 			cpi->version_num=1;
2490 			cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2491 			cpi->target_sprt=0;
2492 			cpi->hba_misc=0;
2493 			cpi->hba_eng_cnt=0;
2494 			cpi->max_target=ARCMSR_MAX_TARGETID;        /* 0-16 */
2495 			cpi->max_lun=ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2496 			cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2497 			cpi->bus_id=cam_sim_bus(psim);
2498 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2499 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2500 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2501 			cpi->unit_number=cam_sim_unit(psim);
2502 		#ifdef	CAM_NEW_TRAN_CODE
2503 			if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2504 				cpi->base_transfer_speed = 600000;
2505 			else
2506 				cpi->base_transfer_speed = 300000;
2507 			if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2508 			   (acb->vendor_device_id == PCIDevVenIDARC1680))
2509 			{
2510 				cpi->transport = XPORT_SAS;
2511 				cpi->transport_version = 0;
2512 				cpi->protocol_version = SCSI_REV_SPC2;
2513 			}
2514 			else
2515 			{
2516 				cpi->transport = XPORT_SPI;
2517 				cpi->transport_version = 2;
2518 				cpi->protocol_version = SCSI_REV_2;
2519 			}
2520 			cpi->protocol = PROTO_SCSI;
2521 		#endif
2522 			cpi->ccb_h.status |= CAM_REQ_CMP;
2523 			xpt_done(pccb);
2524 			break;
2525 		}
2526 	case XPT_ABORT: {
2527 			union ccb *pabort_ccb;
2528 
2529 			pabort_ccb=pccb->cab.abort_ccb;
2530 			switch (pabort_ccb->ccb_h.func_code) {
2531 			case XPT_ACCEPT_TARGET_IO:
2532 			case XPT_IMMED_NOTIFY:
2533 			case XPT_CONT_TARGET_IO:
2534 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2535 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2536 					xpt_done(pabort_ccb);
2537 					pccb->ccb_h.status |= CAM_REQ_CMP;
2538 				} else {
2539 					xpt_print_path(pabort_ccb->ccb_h.path);
2540 					kprintf("Not found\n");
2541 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2542 				}
2543 				break;
2544 			case XPT_SCSI_IO:
2545 				pccb->ccb_h.status |= CAM_UA_ABORT;
2546 				break;
2547 			default:
2548 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2549 				break;
2550 			}
2551 			xpt_done(pccb);
2552 			break;
2553 		}
2554 	case XPT_RESET_BUS:
2555 	case XPT_RESET_DEV: {
2556 			u_int32_t     i;
2557 
2558 			arcmsr_bus_reset(acb);
2559 			for (i=0; i < 500; i++) {
2560 				DELAY(1000);
2561 			}
2562 			pccb->ccb_h.status |= CAM_REQ_CMP;
2563 			xpt_done(pccb);
2564 			break;
2565 		}
2566 	case XPT_TERM_IO: {
2567 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2568 			xpt_done(pccb);
2569 			break;
2570 		}
2571 	case XPT_GET_TRAN_SETTINGS: {
2572 			struct ccb_trans_settings *cts;
2573 
2574 			if(pccb->ccb_h.target_id == 16) {
2575 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2576 				xpt_done(pccb);
2577 				break;
2578 			}
2579 			cts= &pccb->cts;
2580 		#ifdef	CAM_NEW_TRAN_CODE
2581 			{
2582 				struct ccb_trans_settings_scsi *scsi;
2583 				struct ccb_trans_settings_spi *spi;
2584 				struct ccb_trans_settings_sas *sas;
2585 
2586 				scsi = &cts->proto_specific.scsi;
2587 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2588 				scsi->valid = CTS_SCSI_VALID_TQ;
2589 				cts->protocol = PROTO_SCSI;
2590 
2591 				if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2592 				   (acb->vendor_device_id == PCIDevVenIDARC1680))
2593 				{
2594 					cts->protocol_version = SCSI_REV_SPC2;
2595 					cts->transport_version = 0;
2596 					cts->transport = XPORT_SAS;
2597 					sas = &cts->xport_specific.sas;
2598 					sas->valid = CTS_SAS_VALID_SPEED;
2599 					if(acb->vendor_device_id == PCIDevVenIDARC1880)
2600 						sas->bitrate = 600000;
2601 					else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2602 						sas->bitrate = 300000;
2603 				}
2604 				else
2605 				{
2606 					cts->protocol_version = SCSI_REV_2;
2607 					cts->transport_version = 2;
2608 					cts->transport = XPORT_SPI;
2609 					spi = &cts->xport_specific.spi;
2610 					spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2611 					spi->sync_period=2;
2612 					spi->sync_offset=32;
2613 					spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2614 					spi->valid = CTS_SPI_VALID_DISC
2615 						| CTS_SPI_VALID_SYNC_RATE
2616 						| CTS_SPI_VALID_SYNC_OFFSET
2617 						| CTS_SPI_VALID_BUS_WIDTH;
2618 				}
2619 			}
2620 		#else
2621 			{
2622 				cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
2623 				cts->sync_period=2;
2624 				cts->sync_offset=32;
2625 				cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2626 				cts->valid=CCB_TRANS_SYNC_RATE_VALID |
2627 				CCB_TRANS_SYNC_OFFSET_VALID |
2628 				CCB_TRANS_BUS_WIDTH_VALID |
2629 				CCB_TRANS_DISC_VALID |
2630 				CCB_TRANS_TQ_VALID;
2631 			}
2632 		#endif
2633 			pccb->ccb_h.status |= CAM_REQ_CMP;
2634 			xpt_done(pccb);
2635 			break;
2636 		}
2637 	case XPT_SET_TRAN_SETTINGS: {
2638 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2639 			xpt_done(pccb);
2640 			break;
2641 		}
2642 	case XPT_CALC_GEOMETRY:
2643 			if(pccb->ccb_h.target_id == 16) {
2644 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2645 				xpt_done(pccb);
2646 				break;
2647 			}
2648 			cam_calc_geometry(&pccb->ccg, 1);
2649 			xpt_done(pccb);
2650 			break;
2651 	default:
2652 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2653 		xpt_done(pccb);
2654 		break;
2655 	}
2656 }
2657 /*
2658 **********************************************************************
2659 **********************************************************************
2660 */
2661 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2662 {
2663 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2664 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2665 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2666 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2667 	}
2668 }
2669 /*
2670 **********************************************************************
2671 **********************************************************************
2672 */
2673 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2674 {
2675 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2676 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
2677 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2678 		kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2679 	}
2680 }
2681 /*
2682 **********************************************************************
2683 **********************************************************************
2684 */
2685 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2686 {
2687 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2688 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2689 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2690 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2691 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2692 	}
2693 }
2694 /*
2695 **********************************************************************
2696 **********************************************************************
2697 */
2698 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2699 {
2700 	switch (acb->adapter_type) {
2701 	case ACB_ADAPTER_TYPE_A:
2702 		arcmsr_start_hba_bgrb(acb);
2703 		break;
2704 	case ACB_ADAPTER_TYPE_B:
2705 		arcmsr_start_hbb_bgrb(acb);
2706 		break;
2707 	case ACB_ADAPTER_TYPE_C:
2708 		arcmsr_start_hbc_bgrb(acb);
2709 		break;
2710 	}
2711 }
2712 /*
2713 **********************************************************************
2714 **
2715 **********************************************************************
2716 */
2717 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2718 {
2719 	struct CommandControlBlock *srb;
2720 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2721 	u_int16_t	error;
2722 
2723 polling_ccb_retry:
2724 	poll_count++;
2725 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2726 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
2727 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2728 	while(1) {
2729 		if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2730 			0, outbound_queueport))==0xFFFFFFFF) {
2731 			if(poll_srb_done) {
2732 				break;/*chip FIFO no ccb for completion already*/
2733 			} else {
2734 				UDELAY(25000);
2735 				if ((poll_count > 100) && (poll_srb != NULL)) {
2736 					break;
2737 				}
2738 				goto polling_ccb_retry;
2739 			}
2740 		}
2741 		/* check if command done with no error*/
2742 		srb=(struct CommandControlBlock *)
2743 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2744         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2745 		poll_srb_done = (srb==poll_srb) ? 1:0;
2746 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2747 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2748 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2749 					"poll command abort successfully \n"
2750 					, acb->pci_unit
2751 					, srb->pccb->ccb_h.target_id
2752 					, srb->pccb->ccb_h.target_lun, srb);
2753 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2754 				arcmsr_srb_complete(srb, 1);
2755 				continue;
2756 			}
2757 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2758 				"srboutstandingcount=%d \n"
2759 				, acb->pci_unit
2760 				, srb, acb->srboutstandingcount);
2761 			continue;
2762 		}
2763 		arcmsr_report_srb_state(acb, srb, error);
2764 	}	/*drain reply FIFO*/
2765 }
2766 /*
2767 **********************************************************************
2768 **
2769 **********************************************************************
2770 */
2771 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2772 {
2773 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2774 	struct CommandControlBlock *srb;
2775 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2776 	int index;
2777 	u_int16_t	error;
2778 
2779 polling_ccb_retry:
2780 	poll_count++;
2781 	CHIP_REG_WRITE32(HBB_DOORBELL,
2782 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2783 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2784 	while(1) {
2785 		index=phbbmu->doneq_index;
2786 		if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2787 			if(poll_srb_done) {
2788 				break;/*chip FIFO no ccb for completion already*/
2789 			} else {
2790 				UDELAY(25000);
2791 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2792 					break;
2793 				}
2794 				goto polling_ccb_retry;
2795 			}
2796 		}
2797 		phbbmu->done_qbuffer[index]=0;
2798 		index++;
2799 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
2800 		phbbmu->doneq_index=index;
2801 		/* check if command done with no error*/
2802 		srb=(struct CommandControlBlock *)
2803 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2804         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2805 		poll_srb_done = (srb==poll_srb) ? 1:0;
2806 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2807 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2808 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2809 					"poll command abort successfully \n"
2810 					, acb->pci_unit
2811 					, srb->pccb->ccb_h.target_id
2812 					, srb->pccb->ccb_h.target_lun, srb);
2813 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2814 				arcmsr_srb_complete(srb, 1);
2815 				continue;
2816 			}
2817 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2818 				"srboutstandingcount=%d \n"
2819 				, acb->pci_unit
2820 				, srb, acb->srboutstandingcount);
2821 			continue;
2822 		}
2823 		arcmsr_report_srb_state(acb, srb, error);
2824 	}	/*drain reply FIFO*/
2825 }
2826 /*
2827 **********************************************************************
2828 **
2829 **********************************************************************
2830 */
2831 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2832 {
2833 	struct CommandControlBlock *srb;
2834 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2835 	u_int16_t	error;
2836 
2837 polling_ccb_retry:
2838 	poll_count++;
2839 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2840 	while(1) {
2841 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2842 			if(poll_srb_done) {
2843 				break;/*chip FIFO no ccb for completion already*/
2844 			} else {
2845 				UDELAY(25000);
2846 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2847 					break;
2848 				}
2849 			    if (acb->srboutstandingcount == 0) {
2850 				    break;
2851 			    }
2852 				goto polling_ccb_retry;
2853 			}
2854 		}
2855 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2856 		/* check if command done with no error*/
2857 		srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
2858         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2859 		if (poll_srb != NULL)
2860 			poll_srb_done = (srb==poll_srb) ? 1:0;
2861 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2862 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2863 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2864 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2865 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2866 				arcmsr_srb_complete(srb, 1);
2867 				continue;
2868 			}
2869 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2870 					, acb->pci_unit, srb, acb->srboutstandingcount);
2871 			continue;
2872 		}
2873 		arcmsr_report_srb_state(acb, srb, error);
2874 	}	/*drain reply FIFO*/
2875 }
2876 /*
2877 **********************************************************************
2878 **********************************************************************
2879 */
2880 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2881 {
2882 	switch (acb->adapter_type) {
2883 	case ACB_ADAPTER_TYPE_A: {
2884 			arcmsr_polling_hba_srbdone(acb, poll_srb);
2885 		}
2886 		break;
2887 	case ACB_ADAPTER_TYPE_B: {
2888 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
2889 		}
2890 		break;
2891 	case ACB_ADAPTER_TYPE_C: {
2892 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
2893 		}
2894 		break;
2895 	}
2896 }
2897 /*
2898 **********************************************************************
2899 **********************************************************************
2900 */
2901 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2902 {
2903 	char *acb_firm_model=acb->firm_model;
2904 	char *acb_firm_version=acb->firm_version;
2905 	char *acb_device_map = acb->device_map;
2906 	size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2907 	size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2908 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2909 	int i;
2910 
2911 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2912 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2913 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2914 	}
2915 	i=0;
2916 	while(i<8) {
2917 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2918 		/* 8 bytes firm_model, 15, 60-67*/
2919 		acb_firm_model++;
2920 		i++;
2921 	}
2922 	i=0;
2923 	while(i<16) {
2924 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2925 		/* 16 bytes firm_version, 17, 68-83*/
2926 		acb_firm_version++;
2927 		i++;
2928 	}
2929 	i=0;
2930 	while(i<16) {
2931 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2932 		acb_device_map++;
2933 		i++;
2934 	}
2935 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2936 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2937 	acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2938 	acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2939 	acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2940 	acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2941 	acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2942 }
2943 /*
2944 **********************************************************************
2945 **********************************************************************
2946 */
2947 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2948 {
2949 	char *acb_firm_model=acb->firm_model;
2950 	char *acb_firm_version=acb->firm_version;
2951 	char *acb_device_map = acb->device_map;
2952 	size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2953 	size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2954 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2955 	int i;
2956 
2957 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2958 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2959 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2960 	}
2961 	i=0;
2962 	while(i<8) {
2963 		*acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2964 		/* 8 bytes firm_model, 15, 60-67*/
2965 		acb_firm_model++;
2966 		i++;
2967 	}
2968 	i=0;
2969 	while(i<16) {
2970 		*acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2971 		/* 16 bytes firm_version, 17, 68-83*/
2972 		acb_firm_version++;
2973 		i++;
2974 	}
2975 	i=0;
2976 	while(i<16) {
2977 		*acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2978 		acb_device_map++;
2979 		i++;
2980 	}
2981 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2982 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2983 	acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2984 	acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2985 	acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2986 	acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2987 	acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2988 }
2989 /*
2990 **********************************************************************
2991 **********************************************************************
2992 */
2993 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2994 {
2995 	char *acb_firm_model=acb->firm_model;
2996 	char *acb_firm_version=acb->firm_version;
2997 	char *acb_device_map = acb->device_map;
2998 	size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
2999 	size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3000 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3001 	int i;
3002 
3003 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3004 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3005 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3006 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3007 	}
3008 	i=0;
3009 	while(i<8) {
3010 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3011 		/* 8 bytes firm_model, 15, 60-67*/
3012 		acb_firm_model++;
3013 		i++;
3014 	}
3015 	i=0;
3016 	while(i<16) {
3017 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3018 		/* 16 bytes firm_version, 17, 68-83*/
3019 		acb_firm_version++;
3020 		i++;
3021 	}
3022 	i=0;
3023 	while(i<16) {
3024 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3025 		acb_device_map++;
3026 		i++;
3027 	}
3028 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3029 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
3030 	acb->firm_request_len	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
3031 	acb->firm_numbers_queue	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
3032 	acb->firm_sdram_size	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
3033 	acb->firm_ide_channels	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
3034 	acb->firm_cfg_version	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
3035 }
3036 /*
3037 **********************************************************************
3038 **********************************************************************
3039 */
3040 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3041 {
3042 	switch (acb->adapter_type) {
3043 	case ACB_ADAPTER_TYPE_A: {
3044 			arcmsr_get_hba_config(acb);
3045 		}
3046 		break;
3047 	case ACB_ADAPTER_TYPE_B: {
3048 			arcmsr_get_hbb_config(acb);
3049 		}
3050 		break;
3051 	case ACB_ADAPTER_TYPE_C: {
3052 			arcmsr_get_hbc_config(acb);
3053 		}
3054 		break;
3055 	}
3056 }
3057 /*
3058 **********************************************************************
3059 **********************************************************************
3060 */
3061 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3062 {
3063 	int	timeout=0;
3064 
3065 	switch (acb->adapter_type) {
3066 	case ACB_ADAPTER_TYPE_A: {
3067 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3068 			{
3069 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3070 				{
3071 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3072 					return;
3073 				}
3074 				UDELAY(15000); /* wait 15 milli-seconds */
3075 			}
3076 		}
3077 		break;
3078 	case ACB_ADAPTER_TYPE_B: {
3079 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3080 			{
3081 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3082 				{
3083 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3084 					return;
3085 				}
3086 				UDELAY(15000); /* wait 15 milli-seconds */
3087 			}
3088 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3089 		}
3090 		break;
3091 	case ACB_ADAPTER_TYPE_C: {
3092 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3093 			{
3094 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3095 				{
3096 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3097 					return;
3098 				}
3099 				UDELAY(15000); /* wait 15 milli-seconds */
3100 			}
3101 		}
3102 		break;
3103 	}
3104 }
3105 /*
3106 **********************************************************************
3107 **********************************************************************
3108 */
3109 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3110 {
3111 	u_int32_t outbound_doorbell;
3112 
3113 	switch (acb->adapter_type) {
3114 	case ACB_ADAPTER_TYPE_A: {
3115 			/* empty doorbell Qbuffer if door bell ringed */
3116 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3117 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3118 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3119 
3120 		}
3121 		break;
3122 	case ACB_ADAPTER_TYPE_B: {
3123 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3124 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3125 			/* let IOP know data has been read */
3126 		}
3127 		break;
3128 	case ACB_ADAPTER_TYPE_C: {
3129 			/* empty doorbell Qbuffer if door bell ringed */
3130 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3131 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3132 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3133 
3134 		}
3135 		break;
3136 	}
3137 }
3138 /*
3139 ************************************************************************
3140 ************************************************************************
3141 */
3142 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3143 {
3144 	unsigned long srb_phyaddr;
3145 	u_int32_t srb_phyaddr_hi32;
3146 
3147 	/*
3148 	********************************************************************
3149 	** here we need to tell iop 331 our freesrb.HighPart
3150 	** if freesrb.HighPart is not zero
3151 	********************************************************************
3152 	*/
3153 	srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3154 //	srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3155 	srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3156 	switch (acb->adapter_type) {
3157 	case ACB_ADAPTER_TYPE_A: {
3158 			if(srb_phyaddr_hi32!=0) {
3159 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3160 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3161 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3162 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3163 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3164 					return FALSE;
3165 				}
3166 			}
3167 		}
3168 		break;
3169 		/*
3170 		***********************************************************************
3171 		**    if adapter type B, set window of "post command Q"
3172 		***********************************************************************
3173 		*/
3174 	case ACB_ADAPTER_TYPE_B: {
3175 			u_int32_t post_queue_phyaddr;
3176 			struct HBB_MessageUnit *phbbmu;
3177 
3178 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3179 			phbbmu->postq_index=0;
3180 			phbbmu->doneq_index=0;
3181 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3182 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3183 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3184 				return FALSE;
3185 			}
3186 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3187 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3188 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3189 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3190 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3191 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3192 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3193 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3194 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3195 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3196 				return FALSE;
3197 			}
3198 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3199 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3200 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3201 				return FALSE;
3202 			}
3203 		}
3204 		break;
3205 	case ACB_ADAPTER_TYPE_C: {
3206 			if(srb_phyaddr_hi32!=0) {
3207 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3208 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3209 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3210 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3211 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3212 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3213 					return FALSE;
3214 				}
3215 			}
3216 		}
3217 		break;
3218 	}
3219 	return (TRUE);
3220 }
3221 /*
3222 ************************************************************************
3223 ************************************************************************
3224 */
3225 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3226 {
3227 	switch (acb->adapter_type)
3228 	{
3229 	case ACB_ADAPTER_TYPE_A:
3230 	case ACB_ADAPTER_TYPE_C:
3231 		break;
3232 	case ACB_ADAPTER_TYPE_B: {
3233 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3234 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3235 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3236 
3237 				return;
3238 			}
3239 		}
3240 		break;
3241 	}
3242 }
3243 /*
3244 **********************************************************************
3245 **********************************************************************
3246 */
3247 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3248 {
3249 	u_int32_t intmask_org;
3250 
3251 	/* disable all outbound interrupt */
3252 	intmask_org=arcmsr_disable_allintr(acb);
3253 	arcmsr_wait_firmware_ready(acb);
3254 	arcmsr_iop_confirm(acb);
3255 	arcmsr_get_firmware_spec(acb);
3256 	/*start background rebuild*/
3257 	arcmsr_start_adapter_bgrb(acb);
3258 	/* empty doorbell Qbuffer if door bell ringed */
3259 	arcmsr_clear_doorbell_queue_buffer(acb);
3260 	arcmsr_enable_eoi_mode(acb);
3261 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3262 	arcmsr_enable_allintr(acb, intmask_org);
3263 	acb->acb_flags |=ACB_F_IOP_INITED;
3264 }
3265 /*
3266 **********************************************************************
3267 **********************************************************************
3268 */
3269 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3270 {
3271 	struct AdapterControlBlock *acb=arg;
3272 	struct CommandControlBlock *srb_tmp;
3273 	u_int8_t * dma_memptr;
3274 	u_int32_t i;
3275 	unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3276 
3277 	dma_memptr=acb->uncacheptr;
3278 	acb->srb_phyaddr.phyaddr=srb_phyaddr;
3279 	srb_tmp=(struct CommandControlBlock *)dma_memptr;
3280 	for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3281 		if(bus_dmamap_create(acb->dm_segs_dmat,
3282 			 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3283 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3284 			kprintf("arcmsr%d:"
3285 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3286 			return;
3287 		}
3288 		srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3289 		srb_tmp->acb=acb;
3290 		acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3291 		srb_phyaddr=srb_phyaddr+SRB_SIZE;
3292 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE);
3293 	}
3294 	acb->vir2phy_offset=(unsigned long)srb_tmp-srb_phyaddr;
3295 }
3296 /*
3297 ************************************************************************
3298 **
3299 **
3300 ************************************************************************
3301 */
3302 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3303 {
3304 	/* remove the control device */
3305 	if(acb->ioctl_dev != NULL) {
3306 		destroy_dev(acb->ioctl_dev);
3307 	}
3308 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3309 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3310 	bus_dma_tag_destroy(acb->srb_dmat);
3311 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3312 	bus_dma_tag_destroy(acb->parent_dmat);
3313 }
3314 /*
3315 ************************************************************************
3316 ************************************************************************
3317 */
3318 static u_int32_t arcmsr_initialize(device_t dev)
3319 {
3320 	struct AdapterControlBlock *acb=device_get_softc(dev);
3321 	u_int16_t pci_command;
3322 	int i, j,max_coherent_size;
3323 	u_int32_t vendor_dev_id;
3324 
3325 	vendor_dev_id = pci_get_devid(dev);
3326 	acb->vendor_device_id = vendor_dev_id;
3327 	switch (vendor_dev_id) {
3328 	case PCIDevVenIDARC1880:
3329 	case PCIDevVenIDARC1882:
3330 	case PCIDevVenIDARC1213:
3331 	case PCIDevVenIDARC1223: {
3332 			acb->adapter_type=ACB_ADAPTER_TYPE_C;
3333 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3334 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3335 		}
3336 		break;
3337 	case PCIDevVenIDARC1200:
3338 	case PCIDevVenIDARC1201: {
3339 			acb->adapter_type=ACB_ADAPTER_TYPE_B;
3340 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3341 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3342 		}
3343 		break;
3344 	case PCIDevVenIDARC1110:
3345 	case PCIDevVenIDARC1120:
3346 	case PCIDevVenIDARC1130:
3347 	case PCIDevVenIDARC1160:
3348 	case PCIDevVenIDARC1170:
3349 	case PCIDevVenIDARC1210:
3350 	case PCIDevVenIDARC1220:
3351 	case PCIDevVenIDARC1230:
3352 	case PCIDevVenIDARC1231:
3353 	case PCIDevVenIDARC1260:
3354 	case PCIDevVenIDARC1261:
3355 	case PCIDevVenIDARC1270:
3356 	case PCIDevVenIDARC1280:
3357 	case PCIDevVenIDARC1212:
3358 	case PCIDevVenIDARC1222:
3359 	case PCIDevVenIDARC1380:
3360 	case PCIDevVenIDARC1381:
3361 	case PCIDevVenIDARC1680:
3362 	case PCIDevVenIDARC1681: {
3363 			acb->adapter_type=ACB_ADAPTER_TYPE_A;
3364 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3365 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3366 		}
3367 		break;
3368 	default: {
3369 			kprintf("arcmsr%d:"
3370 			" unknown RAID adapter type \n", device_get_unit(dev));
3371 			return ENOMEM;
3372 		}
3373 	}
3374 	if(bus_dma_tag_create(  /*parent*/	NULL,
3375 				/*alignemnt*/	1,
3376 				/*boundary*/	0,
3377 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3378 				/*highaddr*/	BUS_SPACE_MAXADDR,
3379 				/*filter*/	NULL,
3380 				/*filterarg*/	NULL,
3381 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3382 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3383 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3384 				/*flags*/	0,
3385 						&acb->parent_dmat) != 0)
3386 	{
3387 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3388 		return ENOMEM;
3389 	}
3390 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3391 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3392 				/*alignment*/	1,
3393 				/*boundary*/	0,
3394 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3395 				/*highaddr*/	BUS_SPACE_MAXADDR,
3396 				/*filter*/	NULL,
3397 				/*filterarg*/	NULL,
3398 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3399 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3400 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3401 				/*flags*/	0,
3402 						&acb->dm_segs_dmat) != 0)
3403 	{
3404 		bus_dma_tag_destroy(acb->parent_dmat);
3405 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3406 		return ENOMEM;
3407 	}
3408 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3409 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3410 				/*alignment*/	0x20,
3411 				/*boundary*/	0,
3412 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3413 				/*highaddr*/	BUS_SPACE_MAXADDR,
3414 				/*filter*/	NULL,
3415 				/*filterarg*/	NULL,
3416 				/*maxsize*/	max_coherent_size,
3417 				/*nsegments*/	1,
3418 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3419 				/*flags*/	0,
3420 						&acb->srb_dmat) != 0)
3421 	{
3422 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3423 		bus_dma_tag_destroy(acb->parent_dmat);
3424 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3425 		return ENXIO;
3426 	}
3427 	/* Allocation for our srbs */
3428 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3429 		bus_dma_tag_destroy(acb->srb_dmat);
3430 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3431 		bus_dma_tag_destroy(acb->parent_dmat);
3432 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3433 		return ENXIO;
3434 	}
3435 	/* And permanently map them */
3436 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3437 		bus_dma_tag_destroy(acb->srb_dmat);
3438 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3439 		bus_dma_tag_destroy(acb->parent_dmat);
3440 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3441 		return ENXIO;
3442 	}
3443 	pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3444 	pci_command |= PCIM_CMD_BUSMASTEREN;
3445 	pci_command |= PCIM_CMD_PERRESPEN;
3446 	pci_command |= PCIM_CMD_MWRICEN;
3447 	/* Enable Busmaster/Mem */
3448 	pci_command |= PCIM_CMD_MEMEN;
3449 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3450 	switch(acb->adapter_type) {
3451 	case ACB_ADAPTER_TYPE_A: {
3452 			u_int32_t rid0=PCIR_BAR(0);
3453 			vm_offset_t	mem_base0;
3454 
3455 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3456 			if(acb->sys_res_arcmsr[0] == NULL) {
3457 				arcmsr_free_resource(acb);
3458 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3459 				return ENOMEM;
3460 			}
3461 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3462 				arcmsr_free_resource(acb);
3463 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3464 				return ENXIO;
3465 			}
3466 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3467 			if(mem_base0==0) {
3468 				arcmsr_free_resource(acb);
3469 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3470 				return ENXIO;
3471 			}
3472 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3473 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3474 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3475 		}
3476 		break;
3477 	case ACB_ADAPTER_TYPE_B: {
3478 			struct HBB_MessageUnit *phbbmu;
3479 			struct CommandControlBlock *freesrb;
3480 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3481 			vm_offset_t	mem_base[]={0,0};
3482 			for(i=0; i<2; i++) {
3483 				if(i==0) {
3484 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3485 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3486 				} else {
3487 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3488 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3489 				}
3490 				if(acb->sys_res_arcmsr[i] == NULL) {
3491 					arcmsr_free_resource(acb);
3492 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3493 					return ENOMEM;
3494 				}
3495 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3496 					arcmsr_free_resource(acb);
3497 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3498 					return ENXIO;
3499 				}
3500 				mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3501 				if(mem_base[i]==0) {
3502 					arcmsr_free_resource(acb);
3503 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3504 					return ENXIO;
3505 				}
3506 				acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3507 				acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3508 			}
3509 			freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3510 //			acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3511 			acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
3512 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3513 			phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3514 			phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3515 		}
3516 		break;
3517 	case ACB_ADAPTER_TYPE_C: {
3518 			u_int32_t rid0=PCIR_BAR(1);
3519 			vm_offset_t	mem_base0;
3520 
3521 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3522 			if(acb->sys_res_arcmsr[0] == NULL) {
3523 				arcmsr_free_resource(acb);
3524 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3525 				return ENOMEM;
3526 			}
3527 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3528 				arcmsr_free_resource(acb);
3529 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3530 				return ENXIO;
3531 			}
3532 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3533 			if(mem_base0==0) {
3534 				arcmsr_free_resource(acb);
3535 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3536 				return ENXIO;
3537 			}
3538 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3539 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3540 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3541 		}
3542 		break;
3543 	}
3544 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3545 		arcmsr_free_resource(acb);
3546 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3547 		return ENXIO;
3548 	}
3549 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3550 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3551 	/*
3552 	********************************************************************
3553 	** init raid volume state
3554 	********************************************************************
3555 	*/
3556 	for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3557 		for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3558 			acb->devstate[i][j]=ARECA_RAID_GONE;
3559 		}
3560 	}
3561 	arcmsr_iop_init(acb);
3562 	return(0);
3563 }
3564 /*
3565 ************************************************************************
3566 ************************************************************************
3567 */
3568 static int arcmsr_attach(device_t dev)
3569 {
3570 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3571 	u_int32_t unit=device_get_unit(dev);
3572 	struct ccb_setasync csa;
3573 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
3574 	struct resource	*irqres;
3575 	int	rid;
3576 	u_int irq_flags;
3577 
3578 	if(acb == NULL) {
3579 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
3580 		return (ENOMEM);
3581 	}
3582 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3583 	if(arcmsr_initialize(dev)) {
3584 		kprintf("arcmsr%d: initialize failure!\n", unit);
3585 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3586 		return ENXIO;
3587 	}
3588 	/* After setting up the adapter, map our interrupt */
3589 	rid=0;
3590 	acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
3591 	    &irq_flags);
3592 	irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
3593 	    irq_flags);
3594 	if(irqres == NULL ||
3595 		bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3596 		arcmsr_free_resource(acb);
3597 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3598 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3599 		return ENXIO;
3600 	}
3601 	acb->irqres=irqres;
3602 	acb->pci_dev=dev;
3603 	acb->pci_unit=unit;
3604 	/*
3605 	 * Now let the CAM generic SCSI layer find the SCSI devices on
3606 	 * the bus *  start queue to reset to the idle loop. *
3607 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
3608 	 * max_sim_transactions
3609 	*/
3610 	devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3611 	if(devq == NULL) {
3612 	    arcmsr_free_resource(acb);
3613 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3614 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3615 			pci_release_msi(dev);
3616 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3617 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3618 		return ENXIO;
3619 	}
3620 	acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3621 	cam_simq_release(devq);
3622 	if(acb->psim == NULL) {
3623 		arcmsr_free_resource(acb);
3624 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3625 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3626 			pci_release_msi(dev);
3627 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3628 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3629 		return ENXIO;
3630 	}
3631 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3632 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3633 		arcmsr_free_resource(acb);
3634 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3635 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3636 			pci_release_msi(dev);
3637 		cam_sim_free(acb->psim);
3638 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3639 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3640 		return ENXIO;
3641 	}
3642 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3643 		arcmsr_free_resource(acb);
3644 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3645 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3646 			pci_release_msi(dev);
3647 		xpt_bus_deregister(cam_sim_path(acb->psim));
3648 		cam_sim_free(acb->psim);
3649 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3650 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3651 		return ENXIO;
3652 	}
3653 	/*
3654 	****************************************************
3655 	*/
3656 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3657 	csa.ccb_h.func_code=XPT_SASYNC_CB;
3658 	csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3659 	csa.callback=arcmsr_async;
3660 	csa.callback_arg=acb->psim;
3661 	xpt_action((union ccb *)&csa);
3662 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3663 	/* Create the control device.  */
3664 	acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3665 
3666 	acb->ioctl_dev->si_drv1=acb;
3667 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3668 	arcmsr_callout_init(&acb->devmap_callout);
3669 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3670 	return (0);
3671 }
3672 
3673 /*
3674 ************************************************************************
3675 ************************************************************************
3676 */
3677 static int arcmsr_probe(device_t dev)
3678 {
3679 	u_int32_t id;
3680 	static char buf[256];
3681 	char x_type[]={"X-TYPE"};
3682 	char *type;
3683 	int raid6 = 1;
3684 
3685 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3686 		return (ENXIO);
3687 	}
3688 	switch(id=pci_get_devid(dev)) {
3689 	case PCIDevVenIDARC1110:
3690 	case PCIDevVenIDARC1200:
3691 	case PCIDevVenIDARC1201:
3692 	case PCIDevVenIDARC1210:
3693 		raid6 = 0;
3694 		/*FALLTHRU*/
3695 	case PCIDevVenIDARC1120:
3696 	case PCIDevVenIDARC1130:
3697 	case PCIDevVenIDARC1160:
3698 	case PCIDevVenIDARC1170:
3699 	case PCIDevVenIDARC1220:
3700 	case PCIDevVenIDARC1230:
3701 	case PCIDevVenIDARC1231:
3702 	case PCIDevVenIDARC1260:
3703 	case PCIDevVenIDARC1261:
3704 	case PCIDevVenIDARC1270:
3705 	case PCIDevVenIDARC1280:
3706 		type = "SATA";
3707 		break;
3708 	case PCIDevVenIDARC1212:
3709 	case PCIDevVenIDARC1222:
3710 	case PCIDevVenIDARC1380:
3711 	case PCIDevVenIDARC1381:
3712 	case PCIDevVenIDARC1680:
3713 	case PCIDevVenIDARC1681:
3714 		type = "SAS 3G";
3715 		break;
3716 	case PCIDevVenIDARC1880:
3717 	case PCIDevVenIDARC1882:
3718 	case PCIDevVenIDARC1213:
3719 	case PCIDevVenIDARC1223:
3720 		type = "SAS 6G";
3721 		arcmsr_msi_enable = 0;
3722 		break;
3723 	default:
3724 		type = x_type;
3725 		break;
3726 	}
3727 	if(type == x_type)
3728 		return(ENXIO);
3729 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3730 	device_set_desc_copy(dev, buf);
3731 	return (BUS_PROBE_DEFAULT);
3732 }
3733 /*
3734 ************************************************************************
3735 ************************************************************************
3736 */
3737 static int arcmsr_shutdown(device_t dev)
3738 {
3739 	u_int32_t  i;
3740 	u_int32_t intmask_org;
3741 	struct CommandControlBlock *srb;
3742 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3743 
3744 	/* stop adapter background rebuild */
3745 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3746 	/* disable all outbound interrupt */
3747 	intmask_org=arcmsr_disable_allintr(acb);
3748 	arcmsr_stop_adapter_bgrb(acb);
3749 	arcmsr_flush_adapter_cache(acb);
3750 	/* abort all outstanding command */
3751 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3752 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3753 	if(acb->srboutstandingcount!=0) {
3754 		/*clear and abort all outbound posted Q*/
3755 		arcmsr_done4abort_postqueue(acb);
3756 		/* talk to iop 331 outstanding command aborted*/
3757 		arcmsr_abort_allcmd(acb);
3758 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3759 			srb=acb->psrb_pool[i];
3760 			if(srb->srb_state==ARCMSR_SRB_START) {
3761 				srb->srb_state=ARCMSR_SRB_ABORTED;
3762 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3763 				arcmsr_srb_complete(srb, 1);
3764 			}
3765 		}
3766 	}
3767 	acb->srboutstandingcount=0;
3768 	acb->workingsrb_doneindex=0;
3769 	acb->workingsrb_startindex=0;
3770 #ifdef ARCMSR_DEBUG1
3771 	acb->pktRequestCount = 0;
3772 	acb->pktReturnCount = 0;
3773 #endif
3774 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3775 	return (0);
3776 }
3777 /*
3778 ************************************************************************
3779 ************************************************************************
3780 */
3781 static int arcmsr_detach(device_t dev)
3782 {
3783 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3784 	int i;
3785 
3786 	callout_stop(&acb->devmap_callout);
3787 	bus_teardown_intr(dev, acb->irqres, acb->ih);
3788 	arcmsr_shutdown(dev);
3789 	arcmsr_free_resource(acb);
3790 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3791 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3792 	}
3793 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3794 	if (acb->irq_type == PCI_INTR_TYPE_MSI)
3795 		pci_release_msi(dev);
3796 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3797 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3798 	xpt_free_path(acb->ppath);
3799 	xpt_bus_deregister(cam_sim_path(acb->psim));
3800 	cam_sim_free(acb->psim);
3801 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3802 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3803 	return (0);
3804 }
3805 
3806 #ifdef ARCMSR_DEBUG1
3807 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
3808 {
3809 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
3810 		return;
3811 	printf("Command Request Count   =0x%x\n",acb->pktRequestCount);
3812 	printf("Command Return Count    =0x%x\n",acb->pktReturnCount);
3813 	printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
3814 	printf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
3815 }
3816 #endif
3817