xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision 19380330)
1 /*
2 *****************************************************************************************
3 **        O.S   : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 **                ARCMSR RAID Host adapter
9 **                [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 **        Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 **    notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 **    notice, this list of conditions and the following disclaimer in the
23 **    documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 **    derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 **        REV#         DATE             NAME             DESCRIPTION
41 **     1.00.00.00   03/31/2004      Erich Chen           First release
42 **     1.20.00.02   11/29/2004      Erich Chen           bug fix with arcmsr_bus_reset when PHY error
43 **     1.20.00.03   04/19/2005      Erich Chen           add SATA 24 Ports adapter type support
44 **                                                       clean unused function
45 **     1.20.00.12   09/12/2005      Erich Chen           bug fix with abort command handling,
46 **                                                       firmware version check
47 **                                                       and firmware update notify for hardware bug fix
48 **                                                       handling if none zero high part physical address
49 **                                                       of srb resource
50 **     1.20.00.13   08/18/2006      Erich Chen           remove pending srb and report busy
51 **                                                       add iop message xfer
52 **                                                       with scsi pass-through command
53 **                                                       add new device id of sas raid adapters
54 **                                                       code fit for SPARC64 & PPC
55 **     1.20.00.14   02/05/2007      Erich Chen           bug fix for incorrect ccb_h.status report
56 **                                                       and cause g_vfs_done() read write error
57 **     1.20.00.15   10/10/2007      Erich Chen           support new RAID adapter type ARC120x
58 **     1.20.00.16   10/10/2009      Erich Chen           Bug fix for RAID adapter type ARC120x
59 **                                                       bus_dmamem_alloc() with BUS_DMA_ZERO
60 **     1.20.00.17   07/15/2010      Ching Huang          Added support ARC1880
61 **                                                       report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 **                                                       prevent cam_periph_error removing all LUN devices of one Target id
63 **                                                       for any one LUN device failed
64 **     1.20.00.18   10/14/2010      Ching Huang          Fixed "inquiry data fails comparion at DV1 step"
65 **                  10/25/2010      Ching Huang          Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 **     1.20.00.19   11/11/2010      Ching Huang          Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 **     1.20.00.20   12/08/2010      Ching Huang          Avoid calling atomic_set_int function
68 **     1.20.00.21   02/08/2011      Ching Huang          Implement I/O request timeout
69 **                  02/14/2011      Ching Huang          Modified pktRequestCount
70 **     1.20.00.21   03/03/2011      Ching Huang          if a command timeout, then wait its ccb back before free it
71 **     1.20.00.22   07/04/2011      Ching Huang          Fixed multiple MTX panic
72 **     1.20.00.23   10/28/2011      Ching Huang          Added TIMEOUT_DELAY in case of too many HDDs need to start
73 **     1.20.00.23   11/08/2011      Ching Huang          Added report device transfer speed
74 **     1.20.00.23   01/30/2012      Ching Huang          Fixed Request requeued and Retrying command
75 **     1.20.00.24   06/11/2012      Ching Huang          Fixed return sense data condition
76 **     1.20.00.25   08/17/2012      Ching Huang          Fixed hotplug device no function on type A adapter
77 ******************************************************************************************
78 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.43 2012/09/04 05:15:54 delphij Exp $
79 */
80 #if 0
81 #define ARCMSR_DEBUG1			1
82 #endif
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/bus.h>
88 #include <sys/queue.h>
89 #include <sys/stat.h>
90 #include <sys/devicestat.h>
91 #include <sys/kthread.h>
92 #include <sys/module.h>
93 #include <sys/proc.h>
94 #include <sys/lock.h>
95 #include <sys/sysctl.h>
96 #include <sys/thread2.h>
97 #include <sys/poll.h>
98 #include <sys/device.h>
99 #include <vm/vm.h>
100 #include <vm/vm_param.h>
101 #include <vm/pmap.h>
102 
103 #include <machine/atomic.h>
104 #include <sys/conf.h>
105 #include <sys/rman.h>
106 
107 #include <bus/cam/cam.h>
108 #include <bus/cam/cam_ccb.h>
109 #include <bus/cam/cam_sim.h>
110 #include <bus/cam/cam_periph.h>
111 #include <bus/cam/cam_xpt_periph.h>
112 #include <bus/cam/cam_xpt_sim.h>
113 #include <bus/cam/cam_debug.h>
114 #include <bus/cam/scsi/scsi_all.h>
115 #include <bus/cam/scsi/scsi_message.h>
116 /*
117 **************************************************************************
118 **************************************************************************
119 */
120 #include <sys/endian.h>
121 #include <bus/pci/pcivar.h>
122 #include <bus/pci/pcireg.h>
123 #define ARCMSR_LOCK_INIT(l, s)	lockinit(l, s, 0, LK_CANRECURSE)
124 #define ARCMSR_LOCK_DESTROY(l)	lockuninit(l)
125 #define ARCMSR_LOCK_ACQUIRE(l)	lockmgr(l, LK_EXCLUSIVE)
126 #define ARCMSR_LOCK_RELEASE(l)	lockmgr(l, LK_RELEASE)
127 #define ARCMSR_LOCK_TRY(l)	lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
128 #define arcmsr_htole32(x)	htole32(x)
129 typedef struct lock		arcmsr_lock_t;
130 
131 #if !defined(CAM_NEW_TRAN_CODE)
132 #define	CAM_NEW_TRAN_CODE	1
133 #endif
134 
135 #define arcmsr_callout_init(a)	callout_init_mp(a);
136 
137 #define ARCMSR_DRIVER_VERSION			"Driver Version 1.20.00.25 2012-08-17"
138 #include <dev/raid/arcmsr/arcmsr.h>
139 #define	SRB_SIZE						((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
140 #define ARCMSR_SRBS_POOL_SIZE           (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
141 /*
142 **************************************************************************
143 **************************************************************************
144 */
145 #define CHIP_REG_READ32(s, b, r)	bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
146 #define CHIP_REG_WRITE32(s, b, r, d)	bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
147 /*
148 **************************************************************************
149 **************************************************************************
150 */
151 static void arcmsr_free_srb(struct CommandControlBlock *srb);
152 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
153 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
154 static int arcmsr_probe(device_t dev);
155 static int arcmsr_attach(device_t dev);
156 static int arcmsr_detach(device_t dev);
157 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
158 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
159 static int arcmsr_shutdown(device_t dev);
160 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
161 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
162 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
163 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
164 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
165 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
166 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
167 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
168 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
169 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
170 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
171 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
172 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
173 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
174 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
175 static int arcmsr_resume(device_t dev);
176 static int arcmsr_suspend(device_t dev);
177 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
178 static void	arcmsr_polling_devmap(void* arg);
179 static void	arcmsr_srb_timeout(void* arg);
180 #ifdef ARCMSR_DEBUG1
181 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
182 #endif
183 /*
184 **************************************************************************
185 **************************************************************************
186 */
187 static void UDELAY(u_int32_t us) { DELAY(us); }
188 /*
189 **************************************************************************
190 **************************************************************************
191 */
192 static bus_dmamap_callback_t arcmsr_map_free_srb;
193 static bus_dmamap_callback_t arcmsr_execute_srb;
194 /*
195 **************************************************************************
196 **************************************************************************
197 */
198 static d_open_t	arcmsr_open;
199 static d_close_t arcmsr_close;
200 static d_ioctl_t arcmsr_ioctl;
201 
202 static device_method_t arcmsr_methods[]={
203 	DEVMETHOD(device_probe,		arcmsr_probe),
204 	DEVMETHOD(device_attach,	arcmsr_attach),
205 	DEVMETHOD(device_detach,	arcmsr_detach),
206 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
207 	DEVMETHOD(device_suspend,	arcmsr_suspend),
208 	DEVMETHOD(device_resume,	arcmsr_resume),
209 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
210 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
211 	{ 0, 0 }
212 };
213 
214 static driver_t arcmsr_driver={
215 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
216 };
217 
218 static devclass_t arcmsr_devclass;
219 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
220 MODULE_VERSION(arcmsr, 1);
221 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
222 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
223 #ifndef BUS_DMA_COHERENT
224 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
225 #endif
226 
227 static struct dev_ops arcmsr_ops = {
228 	{ "arcmsr", 0, 0 },
229 	.d_open =	arcmsr_open,		        /* open     */
230 	.d_close =	arcmsr_close,		        /* close    */
231 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
232 };
233 
234 static int	arcmsr_msi_enable = 1;
235 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
236 
237 
238 /*
239 **************************************************************************
240 **************************************************************************
241 */
242 
243 static int
244 arcmsr_open(struct dev_open_args *ap)
245 {
246 	cdev_t dev = ap->a_head.a_dev;
247 	struct AdapterControlBlock *acb=dev->si_drv1;
248 
249 	if(acb==NULL) {
250 		return ENXIO;
251 	}
252 	return (0);
253 }
254 
255 /*
256 **************************************************************************
257 **************************************************************************
258 */
259 
260 static int
261 arcmsr_close(struct dev_close_args *ap)
262 {
263 	cdev_t dev = ap->a_head.a_dev;
264 	struct AdapterControlBlock *acb=dev->si_drv1;
265 
266 	if(acb==NULL) {
267 		return ENXIO;
268 	}
269 	return 0;
270 }
271 
272 /*
273 **************************************************************************
274 **************************************************************************
275 */
276 
277 static int
278 arcmsr_ioctl(struct dev_ioctl_args *ap)
279 {
280 	cdev_t dev = ap->a_head.a_dev;
281 	u_long ioctl_cmd = ap->a_cmd;
282 	caddr_t arg = ap->a_data;
283 	struct AdapterControlBlock *acb=dev->si_drv1;
284 
285 	if(acb==NULL) {
286 		return ENXIO;
287 	}
288 	return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
289 }
290 
291 /*
292 **********************************************************************
293 **********************************************************************
294 */
295 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
296 {
297 	u_int32_t intmask_org=0;
298 
299 	switch (acb->adapter_type) {
300 	case ACB_ADAPTER_TYPE_A: {
301 			/* disable all outbound interrupt */
302 			intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
303 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
304 		}
305 		break;
306 	case ACB_ADAPTER_TYPE_B: {
307 			/* disable all outbound interrupt */
308 			intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
309 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
310 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
311 		}
312 		break;
313 	case ACB_ADAPTER_TYPE_C: {
314 			/* disable all outbound interrupt */
315 			intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
316 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
317 		}
318 		break;
319 	}
320 	return (intmask_org);
321 }
322 /*
323 **********************************************************************
324 **********************************************************************
325 */
326 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
327 {
328 	u_int32_t mask;
329 
330 	switch (acb->adapter_type) {
331 	case ACB_ADAPTER_TYPE_A: {
332 			/* enable outbound Post Queue, outbound doorbell Interrupt */
333 			mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
334 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
335 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
336 		}
337 		break;
338 	case ACB_ADAPTER_TYPE_B: {
339 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
340 			mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
341 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
342 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
343 		}
344 		break;
345 	case ACB_ADAPTER_TYPE_C: {
346 			/* enable outbound Post Queue, outbound doorbell Interrupt */
347 			mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
348 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
349 			acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
350 		}
351 		break;
352 	}
353 }
354 /*
355 **********************************************************************
356 **********************************************************************
357 */
358 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
359 {
360 	u_int32_t Index;
361 	u_int8_t Retries=0x00;
362 
363 	do {
364 		for(Index=0; Index < 100; Index++) {
365 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
366 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
367 				return TRUE;
368 			}
369 			UDELAY(10000);
370 		}/*max 1 seconds*/
371 	}while(Retries++ < 20);/*max 20 sec*/
372 	return (FALSE);
373 }
374 /*
375 **********************************************************************
376 **********************************************************************
377 */
378 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
379 {
380 	u_int32_t Index;
381 	u_int8_t Retries=0x00;
382 
383 	do {
384 		for(Index=0; Index < 100; Index++) {
385 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
386 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
387 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
388 				return TRUE;
389 			}
390 			UDELAY(10000);
391 		}/*max 1 seconds*/
392 	}while(Retries++ < 20);/*max 20 sec*/
393 	return (FALSE);
394 }
395 /*
396 **********************************************************************
397 **********************************************************************
398 */
399 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
400 {
401 	u_int32_t Index;
402 	u_int8_t Retries=0x00;
403 
404 	do {
405 		for(Index=0; Index < 100; Index++) {
406 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
407 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
408 				return TRUE;
409 			}
410 			UDELAY(10000);
411 		}/*max 1 seconds*/
412 	}while(Retries++ < 20);/*max 20 sec*/
413 	return (FALSE);
414 }
415 /*
416 ************************************************************************
417 ************************************************************************
418 */
419 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
420 {
421 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
422 
423 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
424 	do {
425 		if(arcmsr_hba_wait_msgint_ready(acb)) {
426 			break;
427 		} else {
428 			retry_count--;
429 		}
430 	}while(retry_count!=0);
431 }
432 /*
433 ************************************************************************
434 ************************************************************************
435 */
436 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
437 {
438 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
439 
440 	CHIP_REG_WRITE32(HBB_DOORBELL,
441 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
442 	do {
443 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
444 			break;
445 		} else {
446 			retry_count--;
447 		}
448 	}while(retry_count!=0);
449 }
450 /*
451 ************************************************************************
452 ************************************************************************
453 */
454 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
455 {
456 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
457 
458 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
459 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
460 	do {
461 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
462 			break;
463 		} else {
464 			retry_count--;
465 		}
466 	}while(retry_count!=0);
467 }
468 /*
469 ************************************************************************
470 ************************************************************************
471 */
472 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
473 {
474 	switch (acb->adapter_type) {
475 	case ACB_ADAPTER_TYPE_A: {
476 			arcmsr_flush_hba_cache(acb);
477 		}
478 		break;
479 	case ACB_ADAPTER_TYPE_B: {
480 			arcmsr_flush_hbb_cache(acb);
481 		}
482 		break;
483 	case ACB_ADAPTER_TYPE_C: {
484 			arcmsr_flush_hbc_cache(acb);
485 		}
486 		break;
487 	}
488 }
489 /*
490 *******************************************************************************
491 *******************************************************************************
492 */
493 static int arcmsr_suspend(device_t dev)
494 {
495 	struct AdapterControlBlock	*acb = device_get_softc(dev);
496 
497 	/* flush controller */
498 	arcmsr_iop_parking(acb);
499 	/* disable all outbound interrupt */
500 	arcmsr_disable_allintr(acb);
501 	return(0);
502 }
503 /*
504 *******************************************************************************
505 *******************************************************************************
506 */
507 static int arcmsr_resume(device_t dev)
508 {
509 	struct AdapterControlBlock	*acb = device_get_softc(dev);
510 
511 	arcmsr_iop_init(acb);
512 	return(0);
513 }
514 /*
515 *********************************************************************************
516 *********************************************************************************
517 */
518 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
519 {
520 	struct AdapterControlBlock *acb;
521 	u_int8_t target_id, target_lun;
522 	struct cam_sim * sim;
523 
524 	sim=(struct cam_sim *) cb_arg;
525 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
526 	switch (code) {
527 	case AC_LOST_DEVICE:
528 		target_id=xpt_path_target_id(path);
529 		target_lun=xpt_path_lun_id(path);
530 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
531 			break;
532 		}
533 	//	kprintf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
534 		break;
535 	default:
536 		break;
537 	}
538 }
539 /*
540 **********************************************************************
541 **********************************************************************
542 */
543 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
544 {
545 	struct AdapterControlBlock *acb=srb->acb;
546 	union ccb * pccb=srb->pccb;
547 
548 	if(srb->srb_flags & SRB_FLAG_TIMER_START)
549 		callout_stop(&srb->ccb_callout);
550 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
551 		bus_dmasync_op_t op;
552 
553 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
554 			op = BUS_DMASYNC_POSTREAD;
555 		} else {
556 			op = BUS_DMASYNC_POSTWRITE;
557 		}
558 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
559 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
560 	}
561 	if(stand_flag==1) {
562 		atomic_subtract_int(&acb->srboutstandingcount, 1);
563 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
564 		acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
565 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
566 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
567 		}
568 	}
569 	if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
570 		arcmsr_free_srb(srb);
571 #ifdef ARCMSR_DEBUG1
572 	acb->pktReturnCount++;
573 #endif
574 	xpt_done(pccb);
575 	return;
576 }
577 /*
578 **********************************************************************
579 **********************************************************************
580 */
581 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
582 {
583 	union ccb * pccb=srb->pccb;
584 
585 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
586 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
587 	if(pccb->csio.sense_len) {
588 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
589 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
590 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
591 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
592 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
593 	}
594 }
595 /*
596 *********************************************************************
597 *********************************************************************
598 */
599 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
600 {
601 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
602 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
603 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
604 	}
605 }
606 /*
607 *********************************************************************
608 *********************************************************************
609 */
610 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
611 {
612 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
613 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
614 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
615 	}
616 }
617 /*
618 *********************************************************************
619 *********************************************************************
620 */
621 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
622 {
623 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
624 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
625 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
626 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
627 	}
628 }
629 /*
630 *********************************************************************
631 *********************************************************************
632 */
633 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
634 {
635 	switch (acb->adapter_type) {
636 	case ACB_ADAPTER_TYPE_A: {
637 			arcmsr_abort_hba_allcmd(acb);
638 		}
639 		break;
640 	case ACB_ADAPTER_TYPE_B: {
641 			arcmsr_abort_hbb_allcmd(acb);
642 		}
643 		break;
644 	case ACB_ADAPTER_TYPE_C: {
645 			arcmsr_abort_hbc_allcmd(acb);
646 		}
647 		break;
648 	}
649 }
650 /*
651 **************************************************************************
652 **************************************************************************
653 */
654 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
655 {
656 	int target, lun;
657 
658 	target=srb->pccb->ccb_h.target_id;
659 	lun=srb->pccb->ccb_h.target_lun;
660 	if(error == FALSE) {
661 		if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
662 			acb->devstate[target][lun]=ARECA_RAID_GOOD;
663 		}
664 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
665 		arcmsr_srb_complete(srb, 1);
666 	} else {
667 		switch(srb->arcmsr_cdb.DeviceStatus) {
668 		case ARCMSR_DEV_SELECT_TIMEOUT: {
669 				if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
670 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
671 				}
672 				acb->devstate[target][lun]=ARECA_RAID_GONE;
673 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
674 				arcmsr_srb_complete(srb, 1);
675 			}
676 			break;
677 		case ARCMSR_DEV_ABORTED:
678 		case ARCMSR_DEV_INIT_FAIL: {
679 				acb->devstate[target][lun]=ARECA_RAID_GONE;
680 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
681 				arcmsr_srb_complete(srb, 1);
682 			}
683 			break;
684 		case SCSISTAT_CHECK_CONDITION: {
685 				acb->devstate[target][lun]=ARECA_RAID_GOOD;
686 				arcmsr_report_sense_info(srb);
687 				arcmsr_srb_complete(srb, 1);
688 			}
689 			break;
690 		default:
691 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknow DeviceStatus=0x%x \n"
692 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
693 			acb->devstate[target][lun]=ARECA_RAID_GONE;
694 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
695 			/*unknow error or crc error just for retry*/
696 			arcmsr_srb_complete(srb, 1);
697 			break;
698 		}
699 	}
700 }
701 /*
702 **************************************************************************
703 **************************************************************************
704 */
705 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
706 {
707 	struct CommandControlBlock *srb;
708 
709 	/* check if command done with no error*/
710 	switch (acb->adapter_type) {
711 	case ACB_ADAPTER_TYPE_C:
712 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
713 		break;
714 	case ACB_ADAPTER_TYPE_A:
715 	case ACB_ADAPTER_TYPE_B:
716 	default:
717 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
718 		break;
719 	}
720 	if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
721 		if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
722 			arcmsr_free_srb(srb);
723 			kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
724 			return;
725 		}
726 		kprintf("arcmsr%d: return srb has been completed\n"
727 			"srb='%p' srb_state=0x%x outstanding srb count=%d \n",
728 			acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
729 		return;
730 	}
731 	arcmsr_report_srb_state(acb, srb, error);
732 }
733 /*
734 **************************************************************************
735 **************************************************************************
736 */
737 static void	arcmsr_srb_timeout(void* arg)
738 {
739 	struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
740 	struct AdapterControlBlock *acb;
741 	int target, lun;
742 	u_int8_t cmd;
743 
744 	target=srb->pccb->ccb_h.target_id;
745 	lun=srb->pccb->ccb_h.target_lun;
746 	acb = srb->acb;
747 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
748 	if(srb->srb_state == ARCMSR_SRB_START)
749 	{
750 		cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
751 		srb->srb_state = ARCMSR_SRB_TIMEOUT;
752 		srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
753 		arcmsr_srb_complete(srb, 1);
754 		kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
755 				 acb->pci_unit, target, lun, cmd, srb);
756 	}
757 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
758 #ifdef ARCMSR_DEBUG1
759 	arcmsr_dump_data(acb);
760 #endif
761 }
762 
763 /*
764 **********************************************************************
765 **********************************************************************
766 */
767 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
768 {
769 	int i=0;
770 	u_int32_t flag_srb;
771 	u_int16_t error;
772 
773 	switch (acb->adapter_type) {
774 	case ACB_ADAPTER_TYPE_A: {
775 			u_int32_t outbound_intstatus;
776 
777 			/*clear and abort all outbound posted Q*/
778 			outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
779 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
780 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
781                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
782 				arcmsr_drain_donequeue(acb, flag_srb, error);
783 			}
784 		}
785 		break;
786 	case ACB_ADAPTER_TYPE_B: {
787 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
788 
789 			/*clear all outbound posted Q*/
790 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
791 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
792 				if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
793 					phbbmu->done_qbuffer[i]=0;
794 			error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
795 					arcmsr_drain_donequeue(acb, flag_srb, error);
796 				}
797 				phbbmu->post_qbuffer[i]=0;
798 			}/*drain reply FIFO*/
799 			phbbmu->doneq_index=0;
800 			phbbmu->postq_index=0;
801 		}
802 		break;
803 	case ACB_ADAPTER_TYPE_C: {
804 
805 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
806 				flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
807                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
808 				arcmsr_drain_donequeue(acb, flag_srb, error);
809 			}
810 		}
811 		break;
812 	}
813 }
814 /*
815 ****************************************************************************
816 ****************************************************************************
817 */
818 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
819 {
820 	struct CommandControlBlock *srb;
821 	u_int32_t intmask_org;
822 	u_int32_t i=0;
823 
824 	if(acb->srboutstandingcount>0) {
825 		/* disable all outbound interrupt */
826 		intmask_org=arcmsr_disable_allintr(acb);
827 		/*clear and abort all outbound posted Q*/
828 		arcmsr_done4abort_postqueue(acb);
829 		/* talk to iop 331 outstanding command aborted*/
830 		arcmsr_abort_allcmd(acb);
831 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
832 			srb=acb->psrb_pool[i];
833 			if(srb->srb_state==ARCMSR_SRB_START) {
834 				srb->srb_state=ARCMSR_SRB_ABORTED;
835 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
836 				arcmsr_srb_complete(srb, 1);
837 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
838 						, acb->pci_unit, srb->pccb->ccb_h.target_id
839 						, srb->pccb->ccb_h.target_lun, srb);
840 			}
841 		}
842 		/* enable all outbound interrupt */
843 		arcmsr_enable_allintr(acb, intmask_org);
844 	}
845 	acb->srboutstandingcount=0;
846 	acb->workingsrb_doneindex=0;
847 	acb->workingsrb_startindex=0;
848 #ifdef ARCMSR_DEBUG1
849 	acb->pktRequestCount = 0;
850 	acb->pktReturnCount = 0;
851 #endif
852 }
853 /*
854 **********************************************************************
855 **********************************************************************
856 */
857 static void arcmsr_build_srb(struct CommandControlBlock *srb,
858 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
859 {
860 	struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
861 	u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
862 	u_int32_t address_lo, address_hi;
863 	union ccb * pccb=srb->pccb;
864 	struct ccb_scsiio * pcsio= &pccb->csio;
865 	u_int32_t arccdbsize=0x30;
866 
867 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
868 	arcmsr_cdb->Bus=0;
869 	arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
870 	arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
871 	arcmsr_cdb->Function=1;
872 	arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
873 	arcmsr_cdb->Context=0;
874 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
875 	if(nseg != 0) {
876 		struct AdapterControlBlock *acb=srb->acb;
877 		bus_dmasync_op_t op;
878 		u_int32_t length, i, cdb_sgcount=0;
879 
880 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
881 			op=BUS_DMASYNC_PREREAD;
882 		} else {
883 			op=BUS_DMASYNC_PREWRITE;
884 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
885 			srb->srb_flags|=SRB_FLAG_WRITE;
886 		}
887 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
888 		for(i=0;i<nseg;i++) {
889 			/* Get the physical address of the current data pointer */
890 			length=arcmsr_htole32(dm_segs[i].ds_len);
891 			address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
892 			address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
893 			if(address_hi==0) {
894 				struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
895 				pdma_sg->address=address_lo;
896 				pdma_sg->length=length;
897 				psge += sizeof(struct SG32ENTRY);
898 				arccdbsize += sizeof(struct SG32ENTRY);
899 			} else {
900 				u_int32_t sg64s_size=0, tmplength=length;
901 
902 				while(1) {
903 					u_int64_t span4G, length0;
904 					struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
905 
906 					span4G=(u_int64_t)address_lo + tmplength;
907 					pdma_sg->addresshigh=address_hi;
908 					pdma_sg->address=address_lo;
909 					if(span4G > 0x100000000) {
910 						/*see if cross 4G boundary*/
911 						length0=0x100000000-address_lo;
912 						pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
913 						address_hi=address_hi+1;
914 						address_lo=0;
915 						tmplength=tmplength-(u_int32_t)length0;
916 						sg64s_size += sizeof(struct SG64ENTRY);
917 						psge += sizeof(struct SG64ENTRY);
918 						cdb_sgcount++;
919 					} else {
920 						pdma_sg->length=tmplength|IS_SG64_ADDR;
921 						sg64s_size += sizeof(struct SG64ENTRY);
922 						psge += sizeof(struct SG64ENTRY);
923 						break;
924 					}
925 				}
926 				arccdbsize += sg64s_size;
927 			}
928 			cdb_sgcount++;
929 		}
930 		arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
931 		arcmsr_cdb->DataLength=pcsio->dxfer_len;
932 		if( arccdbsize > 256) {
933 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
934 		}
935 	} else {
936 		arcmsr_cdb->DataLength = 0;
937 	}
938 	srb->arc_cdb_size=arccdbsize;
939 }
940 /*
941 **************************************************************************
942 **************************************************************************
943 */
944 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
945 {
946 	u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
947 	struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
948 
949 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
950 	atomic_add_int(&acb->srboutstandingcount, 1);
951 	srb->srb_state=ARCMSR_SRB_START;
952 
953 	switch (acb->adapter_type) {
954 	case ACB_ADAPTER_TYPE_A: {
955 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
956 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
957 			} else {
958 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
959 			}
960 		}
961 		break;
962 	case ACB_ADAPTER_TYPE_B: {
963 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
964 			int ending_index, index;
965 
966 			index=phbbmu->postq_index;
967 			ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
968 			phbbmu->post_qbuffer[ending_index]=0;
969 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
970 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
971 			} else {
972 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
973 			}
974 			index++;
975 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
976 			phbbmu->postq_index=index;
977 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
978 		}
979 		break;
980     case ACB_ADAPTER_TYPE_C:
981         {
982             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
983 
984             arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
985             ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
986 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
987             if(cdb_phyaddr_hi32)
988             {
989 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
990 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
991             }
992             else
993             {
994 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
995             }
996         }
997         break;
998 	}
999 }
1000 /*
1001 ************************************************************************
1002 ************************************************************************
1003 */
1004 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
1005 {
1006 	struct QBUFFER *qbuffer=NULL;
1007 
1008 	switch (acb->adapter_type) {
1009 	case ACB_ADAPTER_TYPE_A: {
1010 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1011 
1012 			qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
1013 		}
1014 		break;
1015 	case ACB_ADAPTER_TYPE_B: {
1016 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1017 
1018 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1019 		}
1020 		break;
1021 	case ACB_ADAPTER_TYPE_C: {
1022 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1023 
1024 			qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
1025 		}
1026 		break;
1027 	}
1028 	return(qbuffer);
1029 }
1030 /*
1031 ************************************************************************
1032 ************************************************************************
1033 */
1034 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1035 {
1036 	struct QBUFFER *qbuffer=NULL;
1037 
1038 	switch (acb->adapter_type) {
1039 	case ACB_ADAPTER_TYPE_A: {
1040 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1041 
1042 			qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
1043 		}
1044 		break;
1045 	case ACB_ADAPTER_TYPE_B: {
1046 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1047 
1048 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1049 		}
1050 		break;
1051 	case ACB_ADAPTER_TYPE_C: {
1052 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1053 
1054 			qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1055 		}
1056 		break;
1057 	}
1058 	return(qbuffer);
1059 }
1060 /*
1061 **************************************************************************
1062 **************************************************************************
1063 */
1064 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1065 {
1066 	switch (acb->adapter_type) {
1067 	case ACB_ADAPTER_TYPE_A: {
1068 			/* let IOP know data has been read */
1069 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1070 		}
1071 		break;
1072 	case ACB_ADAPTER_TYPE_B: {
1073 			/* let IOP know data has been read */
1074 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1075 		}
1076 		break;
1077 	case ACB_ADAPTER_TYPE_C: {
1078 			/* let IOP know data has been read */
1079 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1080 		}
1081 	}
1082 }
1083 /*
1084 **************************************************************************
1085 **************************************************************************
1086 */
1087 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1088 {
1089 	switch (acb->adapter_type) {
1090 	case ACB_ADAPTER_TYPE_A: {
1091 			/*
1092 			** push inbound doorbell tell iop, driver data write ok
1093 			** and wait reply on next hwinterrupt for next Qbuffer post
1094 			*/
1095 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1096 		}
1097 		break;
1098 	case ACB_ADAPTER_TYPE_B: {
1099 			/*
1100 			** push inbound doorbell tell iop, driver data write ok
1101 			** and wait reply on next hwinterrupt for next Qbuffer post
1102 			*/
1103 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1104 		}
1105 		break;
1106 	case ACB_ADAPTER_TYPE_C: {
1107 			/*
1108 			** push inbound doorbell tell iop, driver data write ok
1109 			** and wait reply on next hwinterrupt for next Qbuffer post
1110 			*/
1111 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1112 		}
1113 		break;
1114 	}
1115 }
1116 /*
1117 **********************************************************************
1118 **********************************************************************
1119 */
1120 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1121 {
1122 	u_int8_t *pQbuffer;
1123 	struct QBUFFER *pwbuffer;
1124 	u_int8_t * iop_data;
1125 	int32_t allxfer_len=0;
1126 
1127 	pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1128 	iop_data=(u_int8_t *)pwbuffer->data;
1129 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1130 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1131 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1132 			&& (allxfer_len<124)) {
1133 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1134 			memcpy(iop_data, pQbuffer, 1);
1135 			acb->wqbuf_firstindex++;
1136 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1137 			iop_data++;
1138 			allxfer_len++;
1139 		}
1140 		pwbuffer->data_len=allxfer_len;
1141 		/*
1142 		** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1143 		*/
1144 		arcmsr_iop_message_wrote(acb);
1145 	}
1146 }
1147 /*
1148 ************************************************************************
1149 ************************************************************************
1150 */
1151 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1152 {
1153 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1154 	CHIP_REG_WRITE32(HBA_MessageUnit,
1155 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1156 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1157 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1158 			, acb->pci_unit);
1159 	}
1160 	return;
1161 }
1162 /*
1163 ************************************************************************
1164 ************************************************************************
1165 */
1166 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1167 {
1168 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1169 	CHIP_REG_WRITE32(HBB_DOORBELL,
1170 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1171 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1172 		kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1173 			, acb->pci_unit);
1174 	}
1175 }
1176 /*
1177 ************************************************************************
1178 ************************************************************************
1179 */
1180 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1181 {
1182 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1183 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1184 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1185 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1186 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1187 	}
1188 }
1189 /*
1190 ************************************************************************
1191 ************************************************************************
1192 */
1193 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1194 {
1195 	switch (acb->adapter_type) {
1196 	case ACB_ADAPTER_TYPE_A: {
1197 			arcmsr_stop_hba_bgrb(acb);
1198 		}
1199 		break;
1200 	case ACB_ADAPTER_TYPE_B: {
1201 			arcmsr_stop_hbb_bgrb(acb);
1202 		}
1203 		break;
1204 	case ACB_ADAPTER_TYPE_C: {
1205 			arcmsr_stop_hbc_bgrb(acb);
1206 		}
1207 		break;
1208 	}
1209 }
1210 /*
1211 ************************************************************************
1212 ************************************************************************
1213 */
1214 static void arcmsr_poll(struct cam_sim * psim)
1215 {
1216 	struct AdapterControlBlock *acb;
1217 	int	mutex;
1218 
1219 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1220 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1221 	if( mutex == 0 )
1222 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1223 	arcmsr_interrupt(acb);
1224 	if( mutex == 0 )
1225 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1226 }
1227 /*
1228 **************************************************************************
1229 **************************************************************************
1230 */
1231 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1232 {
1233 	struct QBUFFER *prbuffer;
1234 	u_int8_t *pQbuffer;
1235 	u_int8_t *iop_data;
1236 	int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1237 
1238 	/*check this iop data if overflow my rqbuffer*/
1239 	rqbuf_lastindex=acb->rqbuf_lastindex;
1240 	rqbuf_firstindex=acb->rqbuf_firstindex;
1241 	prbuffer=arcmsr_get_iop_rqbuffer(acb);
1242 	iop_data=(u_int8_t *)prbuffer->data;
1243 	iop_len=prbuffer->data_len;
1244 	my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1245 	if(my_empty_len>=iop_len) {
1246 		while(iop_len > 0) {
1247 			pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1248 			memcpy(pQbuffer, iop_data, 1);
1249 			rqbuf_lastindex++;
1250 			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1251 			iop_data++;
1252 			iop_len--;
1253 		}
1254 		acb->rqbuf_lastindex=rqbuf_lastindex;
1255 		arcmsr_iop_message_read(acb);
1256 		/*signature, let IOP know data has been read */
1257 	} else {
1258 		acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1259 	}
1260 }
1261 /*
1262 **************************************************************************
1263 **************************************************************************
1264 */
1265 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1266 {
1267 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1268 	/*
1269 	*****************************************************************
1270 	**   check if there are any mail packages from user space program
1271 	**   in my post bag, now is the time to send them into Areca's firmware
1272 	*****************************************************************
1273 	*/
1274 	if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1275 		u_int8_t *pQbuffer;
1276 		struct QBUFFER *pwbuffer;
1277 		u_int8_t *iop_data;
1278 		int allxfer_len=0;
1279 
1280 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1281 		pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1282 		iop_data=(u_int8_t *)pwbuffer->data;
1283 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1284 			&& (allxfer_len<124)) {
1285 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1286 			memcpy(iop_data, pQbuffer, 1);
1287 			acb->wqbuf_firstindex++;
1288 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1289 			iop_data++;
1290 			allxfer_len++;
1291 		}
1292 		pwbuffer->data_len=allxfer_len;
1293 		/*
1294 		** push inbound doorbell tell iop driver data write ok
1295 		** and wait reply on next hwinterrupt for next Qbuffer post
1296 		*/
1297 		arcmsr_iop_message_wrote(acb);
1298 	}
1299 	if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1300 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1301 	}
1302 }
1303 
1304 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1305 {
1306 /*
1307 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1308 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1309 	else
1310 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1311 */
1312 	xpt_free_path(ccb->ccb_h.path);
1313 }
1314 
1315 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1316 {
1317 	struct cam_path     *path;
1318 	union ccb            ccb;
1319 
1320 	if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1321 		return;
1322 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1323 	bzero(&ccb, sizeof(union ccb));
1324 	xpt_setup_ccb(&ccb.ccb_h, path, 5);
1325 	ccb.ccb_h.func_code = XPT_SCAN_LUN;
1326 	ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1327 	ccb.crcn.flags = CAM_FLAG_NONE;
1328 	xpt_action(&ccb);
1329 }
1330 
1331 
1332 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1333 {
1334 	struct CommandControlBlock *srb;
1335 	u_int32_t intmask_org;
1336 	int i;
1337 
1338 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1339 	/* disable all outbound interrupts */
1340 	intmask_org = arcmsr_disable_allintr(acb);
1341 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1342 	{
1343 		srb = acb->psrb_pool[i];
1344 		if (srb->srb_state == ARCMSR_SRB_START)
1345 		{
1346 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1347             {
1348 			srb->srb_state = ARCMSR_SRB_ABORTED;
1349 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1350 			arcmsr_srb_complete(srb, 1);
1351 				kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1352 		}
1353 		}
1354 	}
1355 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1356 	arcmsr_enable_allintr(acb, intmask_org);
1357 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1358 }
1359 
1360 
1361 /*
1362 **************************************************************************
1363 **************************************************************************
1364 */
1365 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1366 	u_int32_t	devicemap;
1367 	u_int32_t	target, lun;
1368     u_int32_t	deviceMapCurrent[4]={0};
1369     u_int8_t	*pDevMap;
1370 
1371 	switch (acb->adapter_type) {
1372 	case ACB_ADAPTER_TYPE_A:
1373 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1374 			for (target= 0; target < 4; target++)
1375 			{
1376 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1377 		devicemap += 4;
1378 			}
1379 			break;
1380 
1381 	case ACB_ADAPTER_TYPE_B:
1382 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1383 			for (target= 0; target < 4; target++)
1384 			{
1385 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1386 		devicemap += 4;
1387 			}
1388 			break;
1389 
1390 	case ACB_ADAPTER_TYPE_C:
1391 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1392 			for (target= 0; target < 4; target++)
1393 			{
1394 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1395 		devicemap += 4;
1396 			}
1397 			break;
1398 	}
1399 
1400 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1401 		{
1402 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1403 		}
1404 		/*
1405 		** adapter posted CONFIG message
1406 		** copy the new map, note if there are differences with the current map
1407 		*/
1408 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1409 		for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1410 		{
1411 			if (*pDevMap != acb->device_map[target])
1412 			{
1413                 u_int8_t difference, bit_check;
1414 
1415                 difference= *pDevMap ^ acb->device_map[target];
1416                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1417                 {
1418                     bit_check=(1 << lun);						/*check bit from 0....31*/
1419                     if(difference & bit_check)
1420                     {
1421                         if(acb->device_map[target] & bit_check)
1422                         {/* unit departed */
1423 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1424 							arcmsr_abort_dr_ccbs(acb, target, lun);
1425 				arcmsr_rescan_lun(acb, target, lun);
1426 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1427                         }
1428                         else
1429                         {/* unit arrived */
1430 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1431 				arcmsr_rescan_lun(acb, target, lun);
1432 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1433                         }
1434                     }
1435                 }
1436 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1437 				acb->device_map[target]= *pDevMap;
1438 			}
1439 			pDevMap++;
1440 		}
1441 }
1442 /*
1443 **************************************************************************
1444 **************************************************************************
1445 */
1446 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1447 	u_int32_t outbound_message;
1448 
1449 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1450 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1451 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1452 		arcmsr_dr_handle( acb );
1453 }
1454 /*
1455 **************************************************************************
1456 **************************************************************************
1457 */
1458 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1459 	u_int32_t outbound_message;
1460 
1461 	/* clear interrupts */
1462 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1463 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1464 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1465 		arcmsr_dr_handle( acb );
1466 }
1467 /*
1468 **************************************************************************
1469 **************************************************************************
1470 */
1471 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1472 	u_int32_t outbound_message;
1473 
1474 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1475 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1476 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1477 		arcmsr_dr_handle( acb );
1478 }
1479 /*
1480 **************************************************************************
1481 **************************************************************************
1482 */
1483 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1484 {
1485 	u_int32_t outbound_doorbell;
1486 
1487 	/*
1488 	*******************************************************************
1489 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1490 	**  DOORBELL: din! don!
1491 	**  check if there are any mail need to pack from firmware
1492 	*******************************************************************
1493 	*/
1494 	outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1495 	0, outbound_doorbell);
1496 	CHIP_REG_WRITE32(HBA_MessageUnit,
1497 	0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1498 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1499 		arcmsr_iop2drv_data_wrote_handle(acb);
1500 	}
1501 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1502 		arcmsr_iop2drv_data_read_handle(acb);
1503 	}
1504 }
1505 /*
1506 **************************************************************************
1507 **************************************************************************
1508 */
1509 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1510 {
1511 	u_int32_t outbound_doorbell;
1512 
1513 	/*
1514 	*******************************************************************
1515 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1516 	**  DOORBELL: din! don!
1517 	**  check if there are any mail need to pack from firmware
1518 	*******************************************************************
1519 	*/
1520 	outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1521 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1522 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1523 		arcmsr_iop2drv_data_wrote_handle(acb);
1524 	}
1525 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1526 		arcmsr_iop2drv_data_read_handle(acb);
1527 	}
1528 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1529 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1530 	}
1531 }
1532 /*
1533 **************************************************************************
1534 **************************************************************************
1535 */
1536 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1537 {
1538 	u_int32_t flag_srb;
1539 	u_int16_t error;
1540 
1541 	/*
1542 	*****************************************************************************
1543 	**               areca cdb command done
1544 	*****************************************************************************
1545 	*/
1546 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1547 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1548 	while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1549 		0, outbound_queueport)) != 0xFFFFFFFF) {
1550 		/* check if command done with no error*/
1551         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1552 		arcmsr_drain_donequeue(acb, flag_srb, error);
1553 	}	/*drain reply FIFO*/
1554 }
1555 /*
1556 **************************************************************************
1557 **************************************************************************
1558 */
1559 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1560 {
1561 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1562 	u_int32_t flag_srb;
1563 	int index;
1564 	u_int16_t error;
1565 
1566 	/*
1567 	*****************************************************************************
1568 	**               areca cdb command done
1569 	*****************************************************************************
1570 	*/
1571 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1572 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1573 	index=phbbmu->doneq_index;
1574 	while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1575 		phbbmu->done_qbuffer[index]=0;
1576 		index++;
1577 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1578 		phbbmu->doneq_index=index;
1579 		/* check if command done with no error*/
1580         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1581 		arcmsr_drain_donequeue(acb, flag_srb, error);
1582 	}	/*drain reply FIFO*/
1583 }
1584 /*
1585 **************************************************************************
1586 **************************************************************************
1587 */
1588 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1589 {
1590 	u_int32_t flag_srb,throttling=0;
1591 	u_int16_t error;
1592 
1593 	/*
1594 	*****************************************************************************
1595 	**               areca cdb command done
1596 	*****************************************************************************
1597 	*/
1598 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1599 
1600 	while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1601 
1602 		flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1603 		/* check if command done with no error*/
1604         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1605 		arcmsr_drain_donequeue(acb, flag_srb, error);
1606         if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1607             CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1608             break;
1609         }
1610         throttling++;
1611 	}	/*drain reply FIFO*/
1612 }
1613 /*
1614 **********************************************************************
1615 **********************************************************************
1616 */
1617 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1618 {
1619 	u_int32_t outbound_intStatus;
1620 	/*
1621 	*********************************************
1622 	**   check outbound intstatus
1623 	*********************************************
1624 	*/
1625 	outbound_intStatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1626 	if(!outbound_intStatus) {
1627 		/*it must be share irq*/
1628 		return;
1629 	}
1630 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus);/*clear interrupt*/
1631 	/* MU doorbell interrupts*/
1632 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1633 		arcmsr_hba_doorbell_isr(acb);
1634 	}
1635 	/* MU post queue interrupts*/
1636 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1637 		arcmsr_hba_postqueue_isr(acb);
1638 	}
1639 	if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1640 		arcmsr_hba_message_isr(acb);
1641 	}
1642 }
1643 /*
1644 **********************************************************************
1645 **********************************************************************
1646 */
1647 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1648 {
1649 	u_int32_t outbound_doorbell;
1650 	/*
1651 	*********************************************
1652 	**   check outbound intstatus
1653 	*********************************************
1654 	*/
1655 	outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1656 	if(!outbound_doorbell) {
1657 		/*it must be share irq*/
1658 		return;
1659 	}
1660 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1661 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1662 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1663 	/* MU ioctl transfer doorbell interrupts*/
1664 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1665 		arcmsr_iop2drv_data_wrote_handle(acb);
1666 	}
1667 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1668 		arcmsr_iop2drv_data_read_handle(acb);
1669 	}
1670 	/* MU post queue interrupts*/
1671 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1672 		arcmsr_hbb_postqueue_isr(acb);
1673 	}
1674 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1675 		arcmsr_hbb_message_isr(acb);
1676 	}
1677 }
1678 /*
1679 **********************************************************************
1680 **********************************************************************
1681 */
1682 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1683 {
1684 	u_int32_t host_interrupt_status;
1685 	/*
1686 	*********************************************
1687 	**   check outbound intstatus
1688 	*********************************************
1689 	*/
1690 	host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1691 	if(!host_interrupt_status) {
1692 		/*it must be share irq*/
1693 		return;
1694 	}
1695 	/* MU doorbell interrupts*/
1696 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1697 		arcmsr_hbc_doorbell_isr(acb);
1698 	}
1699 	/* MU post queue interrupts*/
1700 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1701 		arcmsr_hbc_postqueue_isr(acb);
1702 	}
1703 }
1704 /*
1705 ******************************************************************************
1706 ******************************************************************************
1707 */
1708 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1709 {
1710 	switch (acb->adapter_type) {
1711 	case ACB_ADAPTER_TYPE_A:
1712 		arcmsr_handle_hba_isr(acb);
1713 		break;
1714 	case ACB_ADAPTER_TYPE_B:
1715 		arcmsr_handle_hbb_isr(acb);
1716 		break;
1717 	case ACB_ADAPTER_TYPE_C:
1718 		arcmsr_handle_hbc_isr(acb);
1719 		break;
1720 	default:
1721 		kprintf("arcmsr%d: interrupt service,"
1722 		" unknow adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1723 		break;
1724 	}
1725 }
1726 /*
1727 **********************************************************************
1728 **********************************************************************
1729 */
1730 static void arcmsr_intr_handler(void *arg)
1731 {
1732 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1733 
1734 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1735 	arcmsr_interrupt(acb);
1736 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1737 }
1738 /*
1739 ******************************************************************************
1740 ******************************************************************************
1741 */
1742 static void	arcmsr_polling_devmap(void* arg)
1743 {
1744 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1745 	switch (acb->adapter_type) {
1746 	case ACB_ADAPTER_TYPE_A:
1747 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1748 		break;
1749 
1750 	case ACB_ADAPTER_TYPE_B:
1751 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1752 		break;
1753 
1754 	case ACB_ADAPTER_TYPE_C:
1755 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1756 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1757 		break;
1758 	}
1759 
1760 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1761 	{
1762 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
1763 	}
1764 }
1765 
1766 /*
1767 *******************************************************************************
1768 **
1769 *******************************************************************************
1770 */
1771 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1772 {
1773 	u_int32_t intmask_org;
1774 
1775 	if(acb!=NULL) {
1776 		/* stop adapter background rebuild */
1777 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1778 			intmask_org = arcmsr_disable_allintr(acb);
1779 			arcmsr_stop_adapter_bgrb(acb);
1780 			arcmsr_flush_adapter_cache(acb);
1781 			arcmsr_enable_allintr(acb, intmask_org);
1782 		}
1783 	}
1784 }
1785 /*
1786 ***********************************************************************
1787 **
1788 ************************************************************************
1789 */
1790 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1791 {
1792 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1793 	u_int32_t retvalue=EINVAL;
1794 
1795 	pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1796 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1797 		return retvalue;
1798 	}
1799 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1800 	switch(ioctl_cmd) {
1801 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1802 			u_int8_t * pQbuffer;
1803 			u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1804 			u_int32_t allxfer_len=0;
1805 
1806 			while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1807 				&& (allxfer_len<1031)) {
1808 				/*copy READ QBUFFER to srb*/
1809 				pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1810 				memcpy(ptmpQbuffer, pQbuffer, 1);
1811 				acb->rqbuf_firstindex++;
1812 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1813 				/*if last index number set it to 0 */
1814 				ptmpQbuffer++;
1815 				allxfer_len++;
1816 			}
1817 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1818 				struct QBUFFER * prbuffer;
1819 				u_int8_t * iop_data;
1820 				u_int32_t iop_len;
1821 
1822 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1823 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
1824 				iop_data=(u_int8_t *)prbuffer->data;
1825 				iop_len=(u_int32_t)prbuffer->data_len;
1826 				/*this iop data does no chance to make me overflow again here, so just do it*/
1827 				while(iop_len>0) {
1828 					pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1829 					memcpy(pQbuffer, iop_data, 1);
1830 					acb->rqbuf_lastindex++;
1831 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1832 					/*if last index number set it to 0 */
1833 					iop_data++;
1834 					iop_len--;
1835 				}
1836 				arcmsr_iop_message_read(acb);
1837 				/*signature, let IOP know data has been readed */
1838 			}
1839 			pcmdmessagefld->cmdmessage.Length=allxfer_len;
1840 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1841 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1842 		}
1843 		break;
1844 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1845 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1846 			u_int8_t * pQbuffer;
1847 			u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1848 
1849 			user_len=pcmdmessagefld->cmdmessage.Length;
1850 			/*check if data xfer length of this request will overflow my array qbuffer */
1851 			wqbuf_lastindex=acb->wqbuf_lastindex;
1852 			wqbuf_firstindex=acb->wqbuf_firstindex;
1853 			if(wqbuf_lastindex!=wqbuf_firstindex) {
1854 				arcmsr_post_ioctldata2iop(acb);
1855 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1856 			} else {
1857 				my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1858 				if(my_empty_len>=user_len) {
1859 					while(user_len>0) {
1860 						/*copy srb data to wqbuffer*/
1861 						pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1862 						memcpy(pQbuffer, ptmpuserbuffer, 1);
1863 						acb->wqbuf_lastindex++;
1864 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1865 						/*if last index number set it to 0 */
1866 						ptmpuserbuffer++;
1867 						user_len--;
1868 					}
1869 					/*post fist Qbuffer*/
1870 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1871 						acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1872 						arcmsr_post_ioctldata2iop(acb);
1873 					}
1874 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1875 				} else {
1876 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1877 				}
1878 			}
1879 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1880 		}
1881 		break;
1882 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1883 			u_int8_t * pQbuffer=acb->rqbuffer;
1884 
1885 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1886 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1887 				arcmsr_iop_message_read(acb);
1888 				/*signature, let IOP know data has been readed */
1889 			}
1890 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1891 			acb->rqbuf_firstindex=0;
1892 			acb->rqbuf_lastindex=0;
1893 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1894 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1895 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1896 		}
1897 		break;
1898 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1899 		{
1900 			u_int8_t * pQbuffer=acb->wqbuffer;
1901 
1902 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1903 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1904                 arcmsr_iop_message_read(acb);
1905 				/*signature, let IOP know data has been readed */
1906 			}
1907 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1908 			acb->wqbuf_firstindex=0;
1909 			acb->wqbuf_lastindex=0;
1910 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1911 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1912 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1913 		}
1914 		break;
1915 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1916 			u_int8_t * pQbuffer;
1917 
1918 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1919 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1920                 arcmsr_iop_message_read(acb);
1921 				/*signature, let IOP know data has been readed */
1922 			}
1923 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1924 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
1925 					|ACB_F_MESSAGE_WQBUFFER_READ);
1926 			acb->rqbuf_firstindex=0;
1927 			acb->rqbuf_lastindex=0;
1928 			acb->wqbuf_firstindex=0;
1929 			acb->wqbuf_lastindex=0;
1930 			pQbuffer=acb->rqbuffer;
1931 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1932 			pQbuffer=acb->wqbuffer;
1933 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1934 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1935 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1936 		}
1937 		break;
1938 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1939 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1940 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1941 		}
1942 		break;
1943 	case ARCMSR_MESSAGE_SAY_HELLO: {
1944 			u_int8_t * hello_string="Hello! I am ARCMSR";
1945 			u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1946 
1947 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1948 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1949 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1950 				return ENOIOCTL;
1951 			}
1952 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1953 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1954 		}
1955 		break;
1956 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
1957 			arcmsr_iop_parking(acb);
1958 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1959 		}
1960 		break;
1961 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1962 			arcmsr_flush_adapter_cache(acb);
1963 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1964 		}
1965 		break;
1966 	}
1967 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1968 	return (retvalue);
1969 }
1970 /*
1971 **************************************************************************
1972 **************************************************************************
1973 */
1974 static void arcmsr_free_srb(struct CommandControlBlock *srb)
1975 {
1976 	struct AdapterControlBlock	*acb;
1977 	int	mutex;
1978 
1979 	acb = srb->acb;
1980 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
1981 	if( mutex == 0 )
1982 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1983 	srb->srb_state=ARCMSR_SRB_DONE;
1984 	srb->srb_flags=0;
1985 	acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
1986 	acb->workingsrb_doneindex++;
1987 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
1988 	if( mutex == 0 )
1989 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1990 }
1991 /*
1992 **************************************************************************
1993 **************************************************************************
1994 */
1995 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1996 {
1997 	struct CommandControlBlock *srb=NULL;
1998 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
1999 	int	mutex;
2000 
2001 	mutex = lockstatus(&acb->qbuffer_lock, curthread);
2002 	if( mutex == 0 )
2003 		ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
2004 	workingsrb_doneindex=acb->workingsrb_doneindex;
2005 	workingsrb_startindex=acb->workingsrb_startindex;
2006 	srb=acb->srbworkingQ[workingsrb_startindex];
2007 	workingsrb_startindex++;
2008 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
2009 	if(workingsrb_doneindex!=workingsrb_startindex) {
2010 		acb->workingsrb_startindex=workingsrb_startindex;
2011 	} else {
2012 		srb=NULL;
2013 	}
2014 	if( mutex == 0 )
2015 		ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2016 	return(srb);
2017 }
2018 /*
2019 **************************************************************************
2020 **************************************************************************
2021 */
2022 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
2023 {
2024 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
2025 	int retvalue = 0, transfer_len = 0;
2026 	char *buffer;
2027 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2028 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2029 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
2030 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2031 					/* 4 bytes: Areca io control code */
2032 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2033 		buffer = pccb->csio.data_ptr;
2034 		transfer_len = pccb->csio.dxfer_len;
2035 	} else {
2036 		retvalue = ARCMSR_MESSAGE_FAIL;
2037 		goto message_out;
2038 	}
2039 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2040 		retvalue = ARCMSR_MESSAGE_FAIL;
2041 		goto message_out;
2042 	}
2043 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2044 	switch(controlcode) {
2045 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
2046 			u_int8_t *pQbuffer;
2047 			u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
2048 			int32_t allxfer_len = 0;
2049 
2050 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2051 				&& (allxfer_len < 1031)) {
2052 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2053 				memcpy(ptmpQbuffer, pQbuffer, 1);
2054 				acb->rqbuf_firstindex++;
2055 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2056 				ptmpQbuffer++;
2057 				allxfer_len++;
2058 			}
2059 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2060 				struct QBUFFER  *prbuffer;
2061 				u_int8_t  *iop_data;
2062 				int32_t iop_len;
2063 
2064 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2065 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
2066 				iop_data = (u_int8_t *)prbuffer->data;
2067 				iop_len =(u_int32_t)prbuffer->data_len;
2068 				while (iop_len > 0) {
2069 			        pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2070 					memcpy(pQbuffer, iop_data, 1);
2071 					acb->rqbuf_lastindex++;
2072 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2073 					iop_data++;
2074 					iop_len--;
2075 				}
2076 				arcmsr_iop_message_read(acb);
2077 			}
2078 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2079 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2080 			retvalue=ARCMSR_MESSAGE_SUCCESS;
2081 		}
2082 		break;
2083 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2084 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2085 			u_int8_t *pQbuffer;
2086 			u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2087 
2088 			user_len = pcmdmessagefld->cmdmessage.Length;
2089 			wqbuf_lastindex = acb->wqbuf_lastindex;
2090 			wqbuf_firstindex = acb->wqbuf_firstindex;
2091 			if (wqbuf_lastindex != wqbuf_firstindex) {
2092 				arcmsr_post_ioctldata2iop(acb);
2093 				/* has error report sensedata */
2094 			    if(pccb->csio.sense_len) {
2095 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2096 				/* Valid,ErrorCode */
2097 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2098 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2099 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2100 				/* AdditionalSenseLength */
2101 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2102 				/* AdditionalSenseCode */
2103 				}
2104 				retvalue = ARCMSR_MESSAGE_FAIL;
2105 			} else {
2106 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2107 						&(ARCMSR_MAX_QBUFFER - 1);
2108 				if (my_empty_len >= user_len) {
2109 					while (user_len > 0) {
2110 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2111 						memcpy(pQbuffer, ptmpuserbuffer, 1);
2112 						acb->wqbuf_lastindex++;
2113 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2114 						ptmpuserbuffer++;
2115 						user_len--;
2116 					}
2117 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2118 						acb->acb_flags &=
2119 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2120 						arcmsr_post_ioctldata2iop(acb);
2121 					}
2122 				} else {
2123 					/* has error report sensedata */
2124 					if(pccb->csio.sense_len) {
2125 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2126 					/* Valid,ErrorCode */
2127 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2128 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2129 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2130 					/* AdditionalSenseLength */
2131 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2132 					/* AdditionalSenseCode */
2133 					}
2134 					retvalue = ARCMSR_MESSAGE_FAIL;
2135 				}
2136 			}
2137 		}
2138 		break;
2139 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2140 			u_int8_t *pQbuffer = acb->rqbuffer;
2141 
2142 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2143 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2144 				arcmsr_iop_message_read(acb);
2145 			}
2146 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2147 			acb->rqbuf_firstindex = 0;
2148 			acb->rqbuf_lastindex = 0;
2149 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2150 			pcmdmessagefld->cmdmessage.ReturnCode =
2151 			ARCMSR_MESSAGE_RETURNCODE_OK;
2152 		}
2153 		break;
2154 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2155 			u_int8_t *pQbuffer = acb->wqbuffer;
2156 
2157 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2158 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2159 				arcmsr_iop_message_read(acb);
2160 			}
2161 			acb->acb_flags |=
2162 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2163 					ACB_F_MESSAGE_WQBUFFER_READ);
2164 			acb->wqbuf_firstindex = 0;
2165 			acb->wqbuf_lastindex = 0;
2166 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2167 			pcmdmessagefld->cmdmessage.ReturnCode =
2168 				ARCMSR_MESSAGE_RETURNCODE_OK;
2169 		}
2170 		break;
2171 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2172 			u_int8_t *pQbuffer;
2173 
2174 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2175 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2176 				arcmsr_iop_message_read(acb);
2177 			}
2178 			acb->acb_flags |=
2179 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2180 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2181 				| ACB_F_MESSAGE_WQBUFFER_READ);
2182 			acb->rqbuf_firstindex = 0;
2183 			acb->rqbuf_lastindex = 0;
2184 			acb->wqbuf_firstindex = 0;
2185 			acb->wqbuf_lastindex = 0;
2186 			pQbuffer = acb->rqbuffer;
2187 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2188 			pQbuffer = acb->wqbuffer;
2189 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2190 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2191 		}
2192 		break;
2193 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2194 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2195 		}
2196 		break;
2197 	case ARCMSR_MESSAGE_SAY_HELLO: {
2198 			int8_t * hello_string = "Hello! I am ARCMSR";
2199 
2200 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2201 				, (int16_t)strlen(hello_string));
2202 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2203 		}
2204 		break;
2205 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2206 		arcmsr_iop_parking(acb);
2207 		break;
2208 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2209 		arcmsr_flush_adapter_cache(acb);
2210 		break;
2211 	default:
2212 		retvalue = ARCMSR_MESSAGE_FAIL;
2213 	}
2214 message_out:
2215 	return (retvalue);
2216 }
2217 /*
2218 *********************************************************************
2219 *********************************************************************
2220 */
2221 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2222 {
2223 	struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2224 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2225 	union ccb * pccb;
2226 	int target, lun;
2227 
2228 	pccb=srb->pccb;
2229 	target=pccb->ccb_h.target_id;
2230 	lun=pccb->ccb_h.target_lun;
2231 #ifdef ARCMSR_DEBUG1
2232 	acb->pktRequestCount++;
2233 #endif
2234 	if(error != 0) {
2235 		if(error != EFBIG) {
2236 			kprintf("arcmsr%d: unexpected error %x"
2237 				" returned from 'bus_dmamap_load' \n"
2238 				, acb->pci_unit, error);
2239 		}
2240 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2241 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2242 		}
2243 		arcmsr_srb_complete(srb, 0);
2244 		return;
2245 	}
2246 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2247 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2248 		arcmsr_srb_complete(srb, 0);
2249 		return;
2250 	}
2251 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2252 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2253 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2254 		arcmsr_srb_complete(srb, 0);
2255 		return;
2256 	}
2257 	if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2258 		u_int8_t block_cmd, cmd;
2259 
2260 		cmd = pccb->csio.cdb_io.cdb_bytes[0];
2261 		block_cmd= cmd & 0x0f;
2262 		if(block_cmd==0x08 || block_cmd==0x0a) {
2263 			kprintf("arcmsr%d:block 'read/write' command "
2264 				"with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2265 				, acb->pci_unit, cmd, target, lun);
2266 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2267 			arcmsr_srb_complete(srb, 0);
2268 			return;
2269 		}
2270 	}
2271 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2272 		if(nseg != 0) {
2273 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2274 		}
2275 		arcmsr_srb_complete(srb, 0);
2276 		return;
2277 	}
2278 	if(acb->srboutstandingcount > ARCMSR_MAX_OUTSTANDING_CMD) {
2279 		xpt_freeze_simq(acb->psim, 1);
2280 		pccb->ccb_h.status = CAM_REQUEUE_REQ;
2281 		acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2282 		arcmsr_srb_complete(srb, 0);
2283 		return;
2284 	}
2285 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2286 	arcmsr_build_srb(srb, dm_segs, nseg);
2287 	arcmsr_post_srb(acb, srb);
2288 	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2289 	{
2290 		arcmsr_callout_init(&srb->ccb_callout);
2291 		callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2292 		srb->srb_flags |= SRB_FLAG_TIMER_START;
2293 	}
2294 }
2295 /*
2296 *****************************************************************************************
2297 *****************************************************************************************
2298 */
2299 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2300 {
2301 	struct CommandControlBlock *srb;
2302 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2303 	u_int32_t intmask_org;
2304 	int i=0;
2305 
2306 	acb->num_aborts++;
2307 	/*
2308 	***************************************************************************
2309 	** It is the upper layer do abort command this lock just prior to calling us.
2310 	** First determine if we currently own this command.
2311 	** Start by searching the device queue. If not found
2312 	** at all, and the system wanted us to just abort the
2313 	** command return success.
2314 	***************************************************************************
2315 	*/
2316 	if(acb->srboutstandingcount!=0) {
2317 		/* disable all outbound interrupt */
2318 		intmask_org=arcmsr_disable_allintr(acb);
2319 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2320 			srb=acb->psrb_pool[i];
2321 			if(srb->srb_state==ARCMSR_SRB_START) {
2322 				if(srb->pccb==abortccb) {
2323 					srb->srb_state=ARCMSR_SRB_ABORTED;
2324 					kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2325 						"outstanding command \n"
2326 						, acb->pci_unit, abortccb->ccb_h.target_id
2327 						, abortccb->ccb_h.target_lun, srb);
2328 					arcmsr_polling_srbdone(acb, srb);
2329 					/* enable outbound Post Queue, outbound doorbell Interrupt */
2330 					arcmsr_enable_allintr(acb, intmask_org);
2331 					return (TRUE);
2332 				}
2333 			}
2334 		}
2335 		/* enable outbound Post Queue, outbound doorbell Interrupt */
2336 		arcmsr_enable_allintr(acb, intmask_org);
2337 	}
2338 	return(FALSE);
2339 }
2340 /*
2341 ****************************************************************************
2342 ****************************************************************************
2343 */
2344 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2345 {
2346 	int retry=0;
2347 
2348 	acb->num_resets++;
2349 	acb->acb_flags |=ACB_F_BUS_RESET;
2350 	while(acb->srboutstandingcount!=0 && retry < 400) {
2351 		arcmsr_interrupt(acb);
2352 		UDELAY(25000);
2353 		retry++;
2354 	}
2355 	arcmsr_iop_reset(acb);
2356 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2357 }
2358 /*
2359 **************************************************************************
2360 **************************************************************************
2361 */
2362 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2363 		union ccb * pccb)
2364 {
2365 	pccb->ccb_h.status |= CAM_REQ_CMP;
2366 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2367 	case INQUIRY: {
2368 		unsigned char inqdata[36];
2369 		char *buffer=pccb->csio.data_ptr;
2370 
2371 		if (pccb->ccb_h.target_lun) {
2372 			pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2373 			xpt_done(pccb);
2374 			return;
2375 		}
2376 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2377 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2378 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2379 		inqdata[3] = 0;
2380 		inqdata[4] = 31;			/* length of additional data */
2381 		inqdata[5] = 0;
2382 		inqdata[6] = 0;
2383 		inqdata[7] = 0;
2384 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2385 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2386 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2387 		memcpy(buffer, inqdata, sizeof(inqdata));
2388 		xpt_done(pccb);
2389 	}
2390 	break;
2391 	case WRITE_BUFFER:
2392 	case READ_BUFFER: {
2393 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2394 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2395 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2396 		}
2397 		xpt_done(pccb);
2398 	}
2399 	break;
2400 	default:
2401 		xpt_done(pccb);
2402 	}
2403 }
2404 /*
2405 *********************************************************************
2406 *********************************************************************
2407 */
2408 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2409 {
2410 	struct AdapterControlBlock *  acb;
2411 
2412 	acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2413 	if(acb==NULL) {
2414 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2415 		xpt_done(pccb);
2416 		return;
2417 	}
2418 	switch (pccb->ccb_h.func_code) {
2419 	case XPT_SCSI_IO: {
2420 			struct CommandControlBlock *srb;
2421 			int target=pccb->ccb_h.target_id;
2422 
2423 			if(target == 16) {
2424 				/* virtual device for iop message transfer */
2425 				arcmsr_handle_virtual_command(acb, pccb);
2426 				return;
2427 			}
2428 			if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2429 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2430 				xpt_done(pccb);
2431 				return;
2432 			}
2433 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2434 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2435 			srb->pccb=pccb;
2436 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2437 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2438 					/* Single buffer */
2439 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2440 						/* Buffer is virtual */
2441 						u_int32_t error;
2442 
2443 						crit_enter();
2444 						error =	bus_dmamap_load(acb->dm_segs_dmat
2445 							, srb->dm_segs_dmamap
2446 							, pccb->csio.data_ptr
2447 							, pccb->csio.dxfer_len
2448 							, arcmsr_execute_srb, srb, /*flags*/0);
2449 						if(error == EINPROGRESS) {
2450 							xpt_freeze_simq(acb->psim, 1);
2451 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2452 						}
2453 						crit_exit();
2454 					}
2455 					else {		/* Buffer is physical */
2456 						struct bus_dma_segment seg;
2457 
2458 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2459 						seg.ds_len = pccb->csio.dxfer_len;
2460 						arcmsr_execute_srb(srb, &seg, 1, 0);
2461 					}
2462 				} else {
2463 					/* Scatter/gather list */
2464 					struct bus_dma_segment *segs;
2465 
2466 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2467 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2468 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2469 						xpt_done(pccb);
2470 						kfree(srb, M_DEVBUF);
2471 						return;
2472 					}
2473 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2474 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2475 				}
2476 			} else {
2477 				arcmsr_execute_srb(srb, NULL, 0, 0);
2478 			}
2479 			break;
2480 		}
2481 	case XPT_TARGET_IO: {
2482 			/* target mode not yet support vendor specific commands. */
2483 			pccb->ccb_h.status |= CAM_REQ_CMP;
2484 			xpt_done(pccb);
2485 			break;
2486 		}
2487 	case XPT_PATH_INQ: {
2488 			struct ccb_pathinq *cpi= &pccb->cpi;
2489 
2490 			cpi->version_num=1;
2491 			cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2492 			cpi->target_sprt=0;
2493 			cpi->hba_misc=0;
2494 			cpi->hba_eng_cnt=0;
2495 			cpi->max_target=ARCMSR_MAX_TARGETID;        /* 0-16 */
2496 			cpi->max_lun=ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2497 			cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2498 			cpi->bus_id=cam_sim_bus(psim);
2499 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2500 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2501 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2502 			cpi->unit_number=cam_sim_unit(psim);
2503 		#ifdef	CAM_NEW_TRAN_CODE
2504 			if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2505 				cpi->base_transfer_speed = 600000;
2506 			else
2507 				cpi->base_transfer_speed = 300000;
2508 			if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2509 			   (acb->vendor_device_id == PCIDevVenIDARC1680))
2510 			{
2511 				cpi->transport = XPORT_SAS;
2512 				cpi->transport_version = 0;
2513 				cpi->protocol_version = SCSI_REV_SPC2;
2514 			}
2515 			else
2516 			{
2517 				cpi->transport = XPORT_SPI;
2518 				cpi->transport_version = 2;
2519 				cpi->protocol_version = SCSI_REV_2;
2520 			}
2521 			cpi->protocol = PROTO_SCSI;
2522 		#endif
2523 			cpi->ccb_h.status |= CAM_REQ_CMP;
2524 			xpt_done(pccb);
2525 			break;
2526 		}
2527 	case XPT_ABORT: {
2528 			union ccb *pabort_ccb;
2529 
2530 			pabort_ccb=pccb->cab.abort_ccb;
2531 			switch (pabort_ccb->ccb_h.func_code) {
2532 			case XPT_ACCEPT_TARGET_IO:
2533 			case XPT_IMMED_NOTIFY:
2534 			case XPT_CONT_TARGET_IO:
2535 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2536 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2537 					xpt_done(pabort_ccb);
2538 					pccb->ccb_h.status |= CAM_REQ_CMP;
2539 				} else {
2540 					xpt_print_path(pabort_ccb->ccb_h.path);
2541 					kprintf("Not found\n");
2542 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2543 				}
2544 				break;
2545 			case XPT_SCSI_IO:
2546 				pccb->ccb_h.status |= CAM_UA_ABORT;
2547 				break;
2548 			default:
2549 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2550 				break;
2551 			}
2552 			xpt_done(pccb);
2553 			break;
2554 		}
2555 	case XPT_RESET_BUS:
2556 	case XPT_RESET_DEV: {
2557 			u_int32_t     i;
2558 
2559 			arcmsr_bus_reset(acb);
2560 			for (i=0; i < 500; i++) {
2561 				DELAY(1000);
2562 			}
2563 			pccb->ccb_h.status |= CAM_REQ_CMP;
2564 			xpt_done(pccb);
2565 			break;
2566 		}
2567 	case XPT_TERM_IO: {
2568 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2569 			xpt_done(pccb);
2570 			break;
2571 		}
2572 	case XPT_GET_TRAN_SETTINGS: {
2573 			struct ccb_trans_settings *cts;
2574 
2575 			if(pccb->ccb_h.target_id == 16) {
2576 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2577 				xpt_done(pccb);
2578 				break;
2579 			}
2580 			cts= &pccb->cts;
2581 		#ifdef	CAM_NEW_TRAN_CODE
2582 			{
2583 				struct ccb_trans_settings_scsi *scsi;
2584 				struct ccb_trans_settings_spi *spi;
2585 				struct ccb_trans_settings_sas *sas;
2586 
2587 				scsi = &cts->proto_specific.scsi;
2588 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2589 				scsi->valid = CTS_SCSI_VALID_TQ;
2590 				cts->protocol = PROTO_SCSI;
2591 
2592 				if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2593 				   (acb->vendor_device_id == PCIDevVenIDARC1680))
2594 				{
2595 					cts->protocol_version = SCSI_REV_SPC2;
2596 					cts->transport_version = 0;
2597 					cts->transport = XPORT_SAS;
2598 					sas = &cts->xport_specific.sas;
2599 					sas->valid = CTS_SAS_VALID_SPEED;
2600 					if(acb->vendor_device_id == PCIDevVenIDARC1880)
2601 						sas->bitrate = 600000;
2602 					else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2603 						sas->bitrate = 300000;
2604 				}
2605 				else
2606 				{
2607 					cts->protocol_version = SCSI_REV_2;
2608 					cts->transport_version = 2;
2609 					cts->transport = XPORT_SPI;
2610 					spi = &cts->xport_specific.spi;
2611 					spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2612 					spi->sync_period=2;
2613 					spi->sync_offset=32;
2614 					spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2615 					spi->valid = CTS_SPI_VALID_DISC
2616 						| CTS_SPI_VALID_SYNC_RATE
2617 						| CTS_SPI_VALID_SYNC_OFFSET
2618 						| CTS_SPI_VALID_BUS_WIDTH;
2619 				}
2620 			}
2621 		#else
2622 			{
2623 				cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
2624 				cts->sync_period=2;
2625 				cts->sync_offset=32;
2626 				cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2627 				cts->valid=CCB_TRANS_SYNC_RATE_VALID |
2628 				CCB_TRANS_SYNC_OFFSET_VALID |
2629 				CCB_TRANS_BUS_WIDTH_VALID |
2630 				CCB_TRANS_DISC_VALID |
2631 				CCB_TRANS_TQ_VALID;
2632 			}
2633 		#endif
2634 			pccb->ccb_h.status |= CAM_REQ_CMP;
2635 			xpt_done(pccb);
2636 			break;
2637 		}
2638 	case XPT_SET_TRAN_SETTINGS: {
2639 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2640 			xpt_done(pccb);
2641 			break;
2642 		}
2643 	case XPT_CALC_GEOMETRY:
2644 			if(pccb->ccb_h.target_id == 16) {
2645 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2646 				xpt_done(pccb);
2647 				break;
2648 			}
2649 			cam_calc_geometry(&pccb->ccg, 1);
2650 			xpt_done(pccb);
2651 			break;
2652 	default:
2653 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2654 		xpt_done(pccb);
2655 		break;
2656 	}
2657 }
2658 /*
2659 **********************************************************************
2660 **********************************************************************
2661 */
2662 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2663 {
2664 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2665 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2666 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2667 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2668 	}
2669 }
2670 /*
2671 **********************************************************************
2672 **********************************************************************
2673 */
2674 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2675 {
2676 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2677 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
2678 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2679 		kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2680 	}
2681 }
2682 /*
2683 **********************************************************************
2684 **********************************************************************
2685 */
2686 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2687 {
2688 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2689 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2690 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2691 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2692 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2693 	}
2694 }
2695 /*
2696 **********************************************************************
2697 **********************************************************************
2698 */
2699 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2700 {
2701 	switch (acb->adapter_type) {
2702 	case ACB_ADAPTER_TYPE_A:
2703 		arcmsr_start_hba_bgrb(acb);
2704 		break;
2705 	case ACB_ADAPTER_TYPE_B:
2706 		arcmsr_start_hbb_bgrb(acb);
2707 		break;
2708 	case ACB_ADAPTER_TYPE_C:
2709 		arcmsr_start_hbc_bgrb(acb);
2710 		break;
2711 	}
2712 }
2713 /*
2714 **********************************************************************
2715 **
2716 **********************************************************************
2717 */
2718 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2719 {
2720 	struct CommandControlBlock *srb;
2721 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2722 	u_int16_t	error;
2723 
2724 polling_ccb_retry:
2725 	poll_count++;
2726 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2727 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
2728 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2729 	while(1) {
2730 		if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2731 			0, outbound_queueport))==0xFFFFFFFF) {
2732 			if(poll_srb_done) {
2733 				break;/*chip FIFO no ccb for completion already*/
2734 			} else {
2735 				UDELAY(25000);
2736 				if ((poll_count > 100) && (poll_srb != NULL)) {
2737 					break;
2738 				}
2739 				goto polling_ccb_retry;
2740 			}
2741 		}
2742 		/* check if command done with no error*/
2743 		srb=(struct CommandControlBlock *)
2744 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2745         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2746 		poll_srb_done = (srb==poll_srb) ? 1:0;
2747 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2748 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2749 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2750 					"poll command abort successfully \n"
2751 					, acb->pci_unit
2752 					, srb->pccb->ccb_h.target_id
2753 					, srb->pccb->ccb_h.target_lun, srb);
2754 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2755 				arcmsr_srb_complete(srb, 1);
2756 				continue;
2757 			}
2758 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2759 				"srboutstandingcount=%d \n"
2760 				, acb->pci_unit
2761 				, srb, acb->srboutstandingcount);
2762 			continue;
2763 		}
2764 		arcmsr_report_srb_state(acb, srb, error);
2765 	}	/*drain reply FIFO*/
2766 }
2767 /*
2768 **********************************************************************
2769 **
2770 **********************************************************************
2771 */
2772 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2773 {
2774 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2775 	struct CommandControlBlock *srb;
2776 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2777 	int index;
2778 	u_int16_t	error;
2779 
2780 polling_ccb_retry:
2781 	poll_count++;
2782 	CHIP_REG_WRITE32(HBB_DOORBELL,
2783 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2784 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2785 	while(1) {
2786 		index=phbbmu->doneq_index;
2787 		if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2788 			if(poll_srb_done) {
2789 				break;/*chip FIFO no ccb for completion already*/
2790 			} else {
2791 				UDELAY(25000);
2792 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2793 					break;
2794 				}
2795 				goto polling_ccb_retry;
2796 			}
2797 		}
2798 		phbbmu->done_qbuffer[index]=0;
2799 		index++;
2800 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
2801 		phbbmu->doneq_index=index;
2802 		/* check if command done with no error*/
2803 		srb=(struct CommandControlBlock *)
2804 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2805         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2806 		poll_srb_done = (srb==poll_srb) ? 1:0;
2807 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2808 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2809 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2810 					"poll command abort successfully \n"
2811 					, acb->pci_unit
2812 					, srb->pccb->ccb_h.target_id
2813 					, srb->pccb->ccb_h.target_lun, srb);
2814 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2815 				arcmsr_srb_complete(srb, 1);
2816 				continue;
2817 			}
2818 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2819 				"srboutstandingcount=%d \n"
2820 				, acb->pci_unit
2821 				, srb, acb->srboutstandingcount);
2822 			continue;
2823 		}
2824 		arcmsr_report_srb_state(acb, srb, error);
2825 	}	/*drain reply FIFO*/
2826 }
2827 /*
2828 **********************************************************************
2829 **
2830 **********************************************************************
2831 */
2832 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2833 {
2834 	struct CommandControlBlock *srb;
2835 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2836 	u_int16_t	error;
2837 
2838 polling_ccb_retry:
2839 	poll_count++;
2840 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2841 	while(1) {
2842 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2843 			if(poll_srb_done) {
2844 				break;/*chip FIFO no ccb for completion already*/
2845 			} else {
2846 				UDELAY(25000);
2847 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2848 					break;
2849 				}
2850 			    if (acb->srboutstandingcount == 0) {
2851 				    break;
2852 			    }
2853 				goto polling_ccb_retry;
2854 			}
2855 		}
2856 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2857 		/* check if command done with no error*/
2858 		srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
2859         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2860 		if (poll_srb != NULL)
2861 			poll_srb_done = (srb==poll_srb) ? 1:0;
2862 		if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2863 			if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2864 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2865 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2866 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2867 				arcmsr_srb_complete(srb, 1);
2868 				continue;
2869 			}
2870 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2871 					, acb->pci_unit, srb, acb->srboutstandingcount);
2872 			continue;
2873 		}
2874 		arcmsr_report_srb_state(acb, srb, error);
2875 	}	/*drain reply FIFO*/
2876 }
2877 /*
2878 **********************************************************************
2879 **********************************************************************
2880 */
2881 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2882 {
2883 	switch (acb->adapter_type) {
2884 	case ACB_ADAPTER_TYPE_A: {
2885 			arcmsr_polling_hba_srbdone(acb, poll_srb);
2886 		}
2887 		break;
2888 	case ACB_ADAPTER_TYPE_B: {
2889 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
2890 		}
2891 		break;
2892 	case ACB_ADAPTER_TYPE_C: {
2893 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
2894 		}
2895 		break;
2896 	}
2897 }
2898 /*
2899 **********************************************************************
2900 **********************************************************************
2901 */
2902 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2903 {
2904 	char *acb_firm_model=acb->firm_model;
2905 	char *acb_firm_version=acb->firm_version;
2906 	char *acb_device_map = acb->device_map;
2907 	size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2908 	size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2909 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2910 	int i;
2911 
2912 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2913 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2914 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2915 	}
2916 	i=0;
2917 	while(i<8) {
2918 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2919 		/* 8 bytes firm_model, 15, 60-67*/
2920 		acb_firm_model++;
2921 		i++;
2922 	}
2923 	i=0;
2924 	while(i<16) {
2925 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2926 		/* 16 bytes firm_version, 17, 68-83*/
2927 		acb_firm_version++;
2928 		i++;
2929 	}
2930 	i=0;
2931 	while(i<16) {
2932 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2933 		acb_device_map++;
2934 		i++;
2935 	}
2936 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2937 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2938 	acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2939 	acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2940 	acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2941 	acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2942 	acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2943 }
2944 /*
2945 **********************************************************************
2946 **********************************************************************
2947 */
2948 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2949 {
2950 	char *acb_firm_model=acb->firm_model;
2951 	char *acb_firm_version=acb->firm_version;
2952 	char *acb_device_map = acb->device_map;
2953 	size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2954 	size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2955 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2956 	int i;
2957 
2958 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2959 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2960 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2961 	}
2962 	i=0;
2963 	while(i<8) {
2964 		*acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2965 		/* 8 bytes firm_model, 15, 60-67*/
2966 		acb_firm_model++;
2967 		i++;
2968 	}
2969 	i=0;
2970 	while(i<16) {
2971 		*acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2972 		/* 16 bytes firm_version, 17, 68-83*/
2973 		acb_firm_version++;
2974 		i++;
2975 	}
2976 	i=0;
2977 	while(i<16) {
2978 		*acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2979 		acb_device_map++;
2980 		i++;
2981 	}
2982 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2983 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2984 	acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2985 	acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2986 	acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2987 	acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2988 	acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2989 }
2990 /*
2991 **********************************************************************
2992 **********************************************************************
2993 */
2994 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2995 {
2996 	char *acb_firm_model=acb->firm_model;
2997 	char *acb_firm_version=acb->firm_version;
2998 	char *acb_device_map = acb->device_map;
2999 	size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
3000 	size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
3001 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
3002 	int i;
3003 
3004 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
3005 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3006 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3007 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
3008 	}
3009 	i=0;
3010 	while(i<8) {
3011 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
3012 		/* 8 bytes firm_model, 15, 60-67*/
3013 		acb_firm_model++;
3014 		i++;
3015 	}
3016 	i=0;
3017 	while(i<16) {
3018 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3019 		/* 16 bytes firm_version, 17, 68-83*/
3020 		acb_firm_version++;
3021 		i++;
3022 	}
3023 	i=0;
3024 	while(i<16) {
3025 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3026 		acb_device_map++;
3027 		i++;
3028 	}
3029 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3030 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
3031 	acb->firm_request_len	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
3032 	acb->firm_numbers_queue	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
3033 	acb->firm_sdram_size	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
3034 	acb->firm_ide_channels	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
3035 	acb->firm_cfg_version	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
3036 }
3037 /*
3038 **********************************************************************
3039 **********************************************************************
3040 */
3041 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3042 {
3043 	switch (acb->adapter_type) {
3044 	case ACB_ADAPTER_TYPE_A: {
3045 			arcmsr_get_hba_config(acb);
3046 		}
3047 		break;
3048 	case ACB_ADAPTER_TYPE_B: {
3049 			arcmsr_get_hbb_config(acb);
3050 		}
3051 		break;
3052 	case ACB_ADAPTER_TYPE_C: {
3053 			arcmsr_get_hbc_config(acb);
3054 		}
3055 		break;
3056 	}
3057 }
3058 /*
3059 **********************************************************************
3060 **********************************************************************
3061 */
3062 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3063 {
3064 	int	timeout=0;
3065 
3066 	switch (acb->adapter_type) {
3067 	case ACB_ADAPTER_TYPE_A: {
3068 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3069 			{
3070 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3071 				{
3072 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3073 					return;
3074 				}
3075 				UDELAY(15000); /* wait 15 milli-seconds */
3076 			}
3077 		}
3078 		break;
3079 	case ACB_ADAPTER_TYPE_B: {
3080 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3081 			{
3082 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3083 				{
3084 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3085 					return;
3086 				}
3087 				UDELAY(15000); /* wait 15 milli-seconds */
3088 			}
3089 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3090 		}
3091 		break;
3092 	case ACB_ADAPTER_TYPE_C: {
3093 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3094 			{
3095 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3096 				{
3097 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3098 					return;
3099 				}
3100 				UDELAY(15000); /* wait 15 milli-seconds */
3101 			}
3102 		}
3103 		break;
3104 	}
3105 }
3106 /*
3107 **********************************************************************
3108 **********************************************************************
3109 */
3110 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3111 {
3112 	u_int32_t outbound_doorbell;
3113 
3114 	switch (acb->adapter_type) {
3115 	case ACB_ADAPTER_TYPE_A: {
3116 			/* empty doorbell Qbuffer if door bell ringed */
3117 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3118 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3119 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3120 
3121 		}
3122 		break;
3123 	case ACB_ADAPTER_TYPE_B: {
3124 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3125 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3126 			/* let IOP know data has been read */
3127 		}
3128 		break;
3129 	case ACB_ADAPTER_TYPE_C: {
3130 			/* empty doorbell Qbuffer if door bell ringed */
3131 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3132 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3133 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3134 
3135 		}
3136 		break;
3137 	}
3138 }
3139 /*
3140 ************************************************************************
3141 ************************************************************************
3142 */
3143 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3144 {
3145 	unsigned long srb_phyaddr;
3146 	u_int32_t srb_phyaddr_hi32;
3147 
3148 	/*
3149 	********************************************************************
3150 	** here we need to tell iop 331 our freesrb.HighPart
3151 	** if freesrb.HighPart is not zero
3152 	********************************************************************
3153 	*/
3154 	srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3155 //	srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3156 	srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3157 	switch (acb->adapter_type) {
3158 	case ACB_ADAPTER_TYPE_A: {
3159 			if(srb_phyaddr_hi32!=0) {
3160 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3161 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3162 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3163 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3164 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3165 					return FALSE;
3166 				}
3167 			}
3168 		}
3169 		break;
3170 		/*
3171 		***********************************************************************
3172 		**    if adapter type B, set window of "post command Q"
3173 		***********************************************************************
3174 		*/
3175 	case ACB_ADAPTER_TYPE_B: {
3176 			u_int32_t post_queue_phyaddr;
3177 			struct HBB_MessageUnit *phbbmu;
3178 
3179 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3180 			phbbmu->postq_index=0;
3181 			phbbmu->doneq_index=0;
3182 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3183 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3184 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3185 				return FALSE;
3186 			}
3187 			post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3188 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3189 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3190 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3191 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3192 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3193 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3194 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3195 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3196 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3197 				return FALSE;
3198 			}
3199 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3200 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3201 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3202 				return FALSE;
3203 			}
3204 		}
3205 		break;
3206 	case ACB_ADAPTER_TYPE_C: {
3207 			if(srb_phyaddr_hi32!=0) {
3208 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3209 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3210 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3211 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3212 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3213 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3214 					return FALSE;
3215 				}
3216 			}
3217 		}
3218 		break;
3219 	}
3220 	return (TRUE);
3221 }
3222 /*
3223 ************************************************************************
3224 ************************************************************************
3225 */
3226 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3227 {
3228 	switch (acb->adapter_type)
3229 	{
3230 	case ACB_ADAPTER_TYPE_A:
3231 	case ACB_ADAPTER_TYPE_C:
3232 		break;
3233 	case ACB_ADAPTER_TYPE_B: {
3234 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3235 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3236 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3237 
3238 				return;
3239 			}
3240 		}
3241 		break;
3242 	}
3243 }
3244 /*
3245 **********************************************************************
3246 **********************************************************************
3247 */
3248 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3249 {
3250 	u_int32_t intmask_org;
3251 
3252 	/* disable all outbound interrupt */
3253 	intmask_org=arcmsr_disable_allintr(acb);
3254 	arcmsr_wait_firmware_ready(acb);
3255 	arcmsr_iop_confirm(acb);
3256 	arcmsr_get_firmware_spec(acb);
3257 	/*start background rebuild*/
3258 	arcmsr_start_adapter_bgrb(acb);
3259 	/* empty doorbell Qbuffer if door bell ringed */
3260 	arcmsr_clear_doorbell_queue_buffer(acb);
3261 	arcmsr_enable_eoi_mode(acb);
3262 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3263 	arcmsr_enable_allintr(acb, intmask_org);
3264 	acb->acb_flags |=ACB_F_IOP_INITED;
3265 }
3266 /*
3267 **********************************************************************
3268 **********************************************************************
3269 */
3270 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3271 {
3272 	struct AdapterControlBlock *acb=arg;
3273 	struct CommandControlBlock *srb_tmp;
3274 	u_int8_t * dma_memptr;
3275 	u_int32_t i;
3276 	unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3277 
3278 	dma_memptr=acb->uncacheptr;
3279 	acb->srb_phyaddr.phyaddr=srb_phyaddr;
3280 	srb_tmp=(struct CommandControlBlock *)dma_memptr;
3281 	for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3282 		if(bus_dmamap_create(acb->dm_segs_dmat,
3283 			 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3284 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3285 			kprintf("arcmsr%d:"
3286 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3287 			return;
3288 		}
3289 		srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3290 		srb_tmp->acb=acb;
3291 		acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3292 		srb_phyaddr=srb_phyaddr+SRB_SIZE;
3293 		srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE);
3294 	}
3295 	acb->vir2phy_offset=(unsigned long)srb_tmp-srb_phyaddr;
3296 }
3297 /*
3298 ************************************************************************
3299 **
3300 **
3301 ************************************************************************
3302 */
3303 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3304 {
3305 	/* remove the control device */
3306 	if(acb->ioctl_dev != NULL) {
3307 		destroy_dev(acb->ioctl_dev);
3308 	}
3309 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3310 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3311 	bus_dma_tag_destroy(acb->srb_dmat);
3312 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3313 	bus_dma_tag_destroy(acb->parent_dmat);
3314 }
3315 /*
3316 ************************************************************************
3317 ************************************************************************
3318 */
3319 static u_int32_t arcmsr_initialize(device_t dev)
3320 {
3321 	struct AdapterControlBlock *acb=device_get_softc(dev);
3322 	u_int16_t pci_command;
3323 	int i, j,max_coherent_size;
3324 	u_int32_t vendor_dev_id;
3325 
3326 	vendor_dev_id = pci_get_devid(dev);
3327 	acb->vendor_device_id = vendor_dev_id;
3328 	switch (vendor_dev_id) {
3329 	case PCIDevVenIDARC1880:
3330 	case PCIDevVenIDARC1882:
3331 	case PCIDevVenIDARC1213:
3332 	case PCIDevVenIDARC1223: {
3333 			acb->adapter_type=ACB_ADAPTER_TYPE_C;
3334 			acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3335 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3336 		}
3337 		break;
3338 	case PCIDevVenIDARC1200:
3339 	case PCIDevVenIDARC1201: {
3340 			acb->adapter_type=ACB_ADAPTER_TYPE_B;
3341 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3342 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3343 		}
3344 		break;
3345 	case PCIDevVenIDARC1110:
3346 	case PCIDevVenIDARC1120:
3347 	case PCIDevVenIDARC1130:
3348 	case PCIDevVenIDARC1160:
3349 	case PCIDevVenIDARC1170:
3350 	case PCIDevVenIDARC1210:
3351 	case PCIDevVenIDARC1220:
3352 	case PCIDevVenIDARC1230:
3353 	case PCIDevVenIDARC1231:
3354 	case PCIDevVenIDARC1260:
3355 	case PCIDevVenIDARC1261:
3356 	case PCIDevVenIDARC1270:
3357 	case PCIDevVenIDARC1280:
3358 	case PCIDevVenIDARC1212:
3359 	case PCIDevVenIDARC1222:
3360 	case PCIDevVenIDARC1380:
3361 	case PCIDevVenIDARC1381:
3362 	case PCIDevVenIDARC1680:
3363 	case PCIDevVenIDARC1681: {
3364 			acb->adapter_type=ACB_ADAPTER_TYPE_A;
3365 			acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3366 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3367 		}
3368 		break;
3369 	default: {
3370 			kprintf("arcmsr%d:"
3371 			" unknown RAID adapter type \n", device_get_unit(dev));
3372 			return ENOMEM;
3373 		}
3374 	}
3375 	if(bus_dma_tag_create(  /*parent*/	NULL,
3376 				/*alignemnt*/	1,
3377 				/*boundary*/	0,
3378 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3379 				/*highaddr*/	BUS_SPACE_MAXADDR,
3380 				/*filter*/	NULL,
3381 				/*filterarg*/	NULL,
3382 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3383 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3384 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3385 				/*flags*/	0,
3386 						&acb->parent_dmat) != 0)
3387 	{
3388 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3389 		return ENOMEM;
3390 	}
3391 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3392 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3393 				/*alignment*/	1,
3394 				/*boundary*/	0,
3395 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3396 				/*highaddr*/	BUS_SPACE_MAXADDR,
3397 				/*filter*/	NULL,
3398 				/*filterarg*/	NULL,
3399 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3400 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3401 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3402 				/*flags*/	0,
3403 						&acb->dm_segs_dmat) != 0)
3404 	{
3405 		bus_dma_tag_destroy(acb->parent_dmat);
3406 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3407 		return ENOMEM;
3408 	}
3409 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3410 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3411 				/*alignment*/	0x20,
3412 				/*boundary*/	0,
3413 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3414 				/*highaddr*/	BUS_SPACE_MAXADDR,
3415 				/*filter*/	NULL,
3416 				/*filterarg*/	NULL,
3417 				/*maxsize*/	max_coherent_size,
3418 				/*nsegments*/	1,
3419 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3420 				/*flags*/	0,
3421 						&acb->srb_dmat) != 0)
3422 	{
3423 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3424 		bus_dma_tag_destroy(acb->parent_dmat);
3425 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3426 		return ENXIO;
3427 	}
3428 	/* Allocation for our srbs */
3429 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3430 		bus_dma_tag_destroy(acb->srb_dmat);
3431 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3432 		bus_dma_tag_destroy(acb->parent_dmat);
3433 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3434 		return ENXIO;
3435 	}
3436 	/* And permanently map them */
3437 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3438 		bus_dma_tag_destroy(acb->srb_dmat);
3439 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3440 		bus_dma_tag_destroy(acb->parent_dmat);
3441 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3442 		return ENXIO;
3443 	}
3444 	pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3445 	pci_command |= PCIM_CMD_BUSMASTEREN;
3446 	pci_command |= PCIM_CMD_PERRESPEN;
3447 	pci_command |= PCIM_CMD_MWRICEN;
3448 	/* Enable Busmaster/Mem */
3449 	pci_command |= PCIM_CMD_MEMEN;
3450 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3451 	switch(acb->adapter_type) {
3452 	case ACB_ADAPTER_TYPE_A: {
3453 			u_int32_t rid0=PCIR_BAR(0);
3454 			vm_offset_t	mem_base0;
3455 
3456 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3457 			if(acb->sys_res_arcmsr[0] == NULL) {
3458 				arcmsr_free_resource(acb);
3459 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3460 				return ENOMEM;
3461 			}
3462 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3463 				arcmsr_free_resource(acb);
3464 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3465 				return ENXIO;
3466 			}
3467 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3468 			if(mem_base0==0) {
3469 				arcmsr_free_resource(acb);
3470 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3471 				return ENXIO;
3472 			}
3473 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3474 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3475 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3476 		}
3477 		break;
3478 	case ACB_ADAPTER_TYPE_B: {
3479 			struct HBB_MessageUnit *phbbmu;
3480 			struct CommandControlBlock *freesrb;
3481 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3482 			vm_offset_t	mem_base[]={0,0};
3483 			for(i=0; i<2; i++) {
3484 				if(i==0) {
3485 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3486 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3487 				} else {
3488 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3489 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3490 				}
3491 				if(acb->sys_res_arcmsr[i] == NULL) {
3492 					arcmsr_free_resource(acb);
3493 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3494 					return ENOMEM;
3495 				}
3496 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3497 					arcmsr_free_resource(acb);
3498 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3499 					return ENXIO;
3500 				}
3501 				mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3502 				if(mem_base[i]==0) {
3503 					arcmsr_free_resource(acb);
3504 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3505 					return ENXIO;
3506 				}
3507 				acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3508 				acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3509 			}
3510 			freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3511 //			acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3512 			acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
3513 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3514 			phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3515 			phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3516 		}
3517 		break;
3518 	case ACB_ADAPTER_TYPE_C: {
3519 			u_int32_t rid0=PCIR_BAR(1);
3520 			vm_offset_t	mem_base0;
3521 
3522 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3523 			if(acb->sys_res_arcmsr[0] == NULL) {
3524 				arcmsr_free_resource(acb);
3525 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3526 				return ENOMEM;
3527 			}
3528 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3529 				arcmsr_free_resource(acb);
3530 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3531 				return ENXIO;
3532 			}
3533 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3534 			if(mem_base0==0) {
3535 				arcmsr_free_resource(acb);
3536 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3537 				return ENXIO;
3538 			}
3539 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3540 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3541 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3542 		}
3543 		break;
3544 	}
3545 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3546 		arcmsr_free_resource(acb);
3547 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3548 		return ENXIO;
3549 	}
3550 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3551 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3552 	/*
3553 	********************************************************************
3554 	** init raid volume state
3555 	********************************************************************
3556 	*/
3557 	for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3558 		for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3559 			acb->devstate[i][j]=ARECA_RAID_GONE;
3560 		}
3561 	}
3562 	arcmsr_iop_init(acb);
3563 	return(0);
3564 }
3565 /*
3566 ************************************************************************
3567 ************************************************************************
3568 */
3569 static int arcmsr_attach(device_t dev)
3570 {
3571 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3572 	u_int32_t unit=device_get_unit(dev);
3573 	struct ccb_setasync csa;
3574 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
3575 	struct resource	*irqres;
3576 	int	rid;
3577 	u_int irq_flags;
3578 
3579 	if(acb == NULL) {
3580 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
3581 		return (ENOMEM);
3582 	}
3583 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3584 	if(arcmsr_initialize(dev)) {
3585 		kprintf("arcmsr%d: initialize failure!\n", unit);
3586 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3587 		return ENXIO;
3588 	}
3589 	/* After setting up the adapter, map our interrupt */
3590 	rid=0;
3591 	acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
3592 	    &irq_flags);
3593 	irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
3594 	    irq_flags);
3595 	if(irqres == NULL ||
3596 		bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3597 		arcmsr_free_resource(acb);
3598 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3599 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3600 		return ENXIO;
3601 	}
3602 	acb->irqres=irqres;
3603 	acb->pci_dev=dev;
3604 	acb->pci_unit=unit;
3605 	/*
3606 	 * Now let the CAM generic SCSI layer find the SCSI devices on
3607 	 * the bus *  start queue to reset to the idle loop. *
3608 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
3609 	 * max_sim_transactions
3610 	*/
3611 	devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3612 	if(devq == NULL) {
3613 	    arcmsr_free_resource(acb);
3614 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3615 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3616 			pci_release_msi(dev);
3617 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3618 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3619 		return ENXIO;
3620 	}
3621 	acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3622 	if(acb->psim == NULL) {
3623 		arcmsr_free_resource(acb);
3624 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3625 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3626 			pci_release_msi(dev);
3627 		cam_simq_release(devq);
3628 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3629 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3630 		return ENXIO;
3631 	}
3632 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3633 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3634 		arcmsr_free_resource(acb);
3635 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3636 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3637 			pci_release_msi(dev);
3638 		cam_sim_free(acb->psim);
3639 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3640 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3641 		return ENXIO;
3642 	}
3643 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3644 		arcmsr_free_resource(acb);
3645 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3646 		if (acb->irq_type == PCI_INTR_TYPE_MSI)
3647 			pci_release_msi(dev);
3648 		xpt_bus_deregister(cam_sim_path(acb->psim));
3649 		cam_sim_free(acb->psim);
3650 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3651 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3652 		return ENXIO;
3653 	}
3654 	/*
3655 	****************************************************
3656 	*/
3657 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3658 	csa.ccb_h.func_code=XPT_SASYNC_CB;
3659 	csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3660 	csa.callback=arcmsr_async;
3661 	csa.callback_arg=acb->psim;
3662 	xpt_action((union ccb *)&csa);
3663 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3664 	/* Create the control device.  */
3665 	acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3666 
3667 	acb->ioctl_dev->si_drv1=acb;
3668 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3669 	arcmsr_callout_init(&acb->devmap_callout);
3670 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3671 	return (0);
3672 }
3673 
3674 /*
3675 ************************************************************************
3676 ************************************************************************
3677 */
3678 static int arcmsr_probe(device_t dev)
3679 {
3680 	u_int32_t id;
3681 	static char buf[256];
3682 	char x_type[]={"X-TYPE"};
3683 	char *type;
3684 	int raid6 = 1;
3685 
3686 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3687 		return (ENXIO);
3688 	}
3689 	switch(id=pci_get_devid(dev)) {
3690 	case PCIDevVenIDARC1110:
3691 	case PCIDevVenIDARC1200:
3692 	case PCIDevVenIDARC1201:
3693 	case PCIDevVenIDARC1210:
3694 		raid6 = 0;
3695 		/*FALLTHRU*/
3696 	case PCIDevVenIDARC1120:
3697 	case PCIDevVenIDARC1130:
3698 	case PCIDevVenIDARC1160:
3699 	case PCIDevVenIDARC1170:
3700 	case PCIDevVenIDARC1220:
3701 	case PCIDevVenIDARC1230:
3702 	case PCIDevVenIDARC1231:
3703 	case PCIDevVenIDARC1260:
3704 	case PCIDevVenIDARC1261:
3705 	case PCIDevVenIDARC1270:
3706 	case PCIDevVenIDARC1280:
3707 		type = "SATA";
3708 		break;
3709 	case PCIDevVenIDARC1212:
3710 	case PCIDevVenIDARC1222:
3711 	case PCIDevVenIDARC1380:
3712 	case PCIDevVenIDARC1381:
3713 	case PCIDevVenIDARC1680:
3714 	case PCIDevVenIDARC1681:
3715 		type = "SAS 3G";
3716 		break;
3717 	case PCIDevVenIDARC1880:
3718 	case PCIDevVenIDARC1882:
3719 	case PCIDevVenIDARC1213:
3720 	case PCIDevVenIDARC1223:
3721 		type = "SAS 6G";
3722 		arcmsr_msi_enable = 0;
3723 		break;
3724 	default:
3725 		type = x_type;
3726 		break;
3727 	}
3728 	if(type == x_type)
3729 		return(ENXIO);
3730 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3731 	device_set_desc_copy(dev, buf);
3732 	return (BUS_PROBE_DEFAULT);
3733 }
3734 /*
3735 ************************************************************************
3736 ************************************************************************
3737 */
3738 static int arcmsr_shutdown(device_t dev)
3739 {
3740 	u_int32_t  i;
3741 	u_int32_t intmask_org;
3742 	struct CommandControlBlock *srb;
3743 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3744 
3745 	/* stop adapter background rebuild */
3746 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3747 	/* disable all outbound interrupt */
3748 	intmask_org=arcmsr_disable_allintr(acb);
3749 	arcmsr_stop_adapter_bgrb(acb);
3750 	arcmsr_flush_adapter_cache(acb);
3751 	/* abort all outstanding command */
3752 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3753 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3754 	if(acb->srboutstandingcount!=0) {
3755 		/*clear and abort all outbound posted Q*/
3756 		arcmsr_done4abort_postqueue(acb);
3757 		/* talk to iop 331 outstanding command aborted*/
3758 		arcmsr_abort_allcmd(acb);
3759 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3760 			srb=acb->psrb_pool[i];
3761 			if(srb->srb_state==ARCMSR_SRB_START) {
3762 				srb->srb_state=ARCMSR_SRB_ABORTED;
3763 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3764 				arcmsr_srb_complete(srb, 1);
3765 			}
3766 		}
3767 	}
3768 	acb->srboutstandingcount=0;
3769 	acb->workingsrb_doneindex=0;
3770 	acb->workingsrb_startindex=0;
3771 #ifdef ARCMSR_DEBUG1
3772 	acb->pktRequestCount = 0;
3773 	acb->pktReturnCount = 0;
3774 #endif
3775 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3776 	return (0);
3777 }
3778 /*
3779 ************************************************************************
3780 ************************************************************************
3781 */
3782 static int arcmsr_detach(device_t dev)
3783 {
3784 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3785 	int i;
3786 
3787 	callout_stop(&acb->devmap_callout);
3788 	bus_teardown_intr(dev, acb->irqres, acb->ih);
3789 	arcmsr_shutdown(dev);
3790 	arcmsr_free_resource(acb);
3791 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3792 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3793 	}
3794 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3795 	if (acb->irq_type == PCI_INTR_TYPE_MSI)
3796 		pci_release_msi(dev);
3797 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3798 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3799 	xpt_free_path(acb->ppath);
3800 	xpt_bus_deregister(cam_sim_path(acb->psim));
3801 	cam_sim_free(acb->psim);
3802 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3803 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3804 	return (0);
3805 }
3806 
3807 #ifdef ARCMSR_DEBUG1
3808 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
3809 {
3810 	if((acb->pktRequestCount - acb->pktReturnCount) == 0)
3811 		return;
3812 	printf("Command Request Count   =0x%x\n",acb->pktRequestCount);
3813 	printf("Command Return Count    =0x%x\n",acb->pktReturnCount);
3814 	printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
3815 	printf("Queued Command Count    =0x%x\n",acb->srboutstandingcount);
3816 }
3817 #endif
3818