xref: /dragonfly/sys/dev/raid/arcmsr/arcmsr.c (revision 92fc8b5c)
1 /*
2 *****************************************************************************************
3 **        O.S   : FreeBSD
4 **   FILE NAME  : arcmsr.c
5 **        BY    : Erich Chen, Ching Huang
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 **                ARCMSR RAID Host adapter
9 **                [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
12 **
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 **        Erich Chen, Taipei Taiwan All rights reserved.
15 **
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
18 ** are met:
19 ** 1. Redistributions of source code must retain the above copyright
20 **    notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 **    notice, this list of conditions and the following disclaimer in the
23 **    documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 **    derived from this software without specific prior written permission.
26 **
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
38 ** History
39 **
40 **        REV#         DATE	            NAME	         DESCRIPTION
41 **     1.00.00.00    3/31/2004	       Erich Chen	 First release
42 **     1.20.00.02   11/29/2004         Erich Chen        bug fix with arcmsr_bus_reset when PHY error
43 **     1.20.00.03    4/19/2005         Erich Chen        add SATA 24 Ports adapter type support
44 **                                                       clean unused function
45 **     1.20.00.12    9/12/2005         Erich Chen        bug fix with abort command handling,
46 **                                                       firmware version check
47 **                                                       and firmware update notify for hardware bug fix
48 **                                                       handling if none zero high part physical address
49 **                                                       of srb resource
50 **     1.20.00.13    8/18/2006         Erich Chen        remove pending srb and report busy
51 **                                                       add iop message xfer
52 **                                                       with scsi pass-through command
53 **                                                       add new device id of sas raid adapters
54 **                                                       code fit for SPARC64 & PPC
55 **     1.20.00.14   02/05/2007         Erich Chen        bug fix for incorrect ccb_h.status report
56 **                                                       and cause g_vfs_done() read write error
57 **     1.20.00.15   10/10/2007         Erich Chen        support new RAID adapter type ARC120x
58 **     1.20.00.16   10/10/2009         Erich Chen        Bug fix for RAID adapter type ARC120x
59 **                                                       bus_dmamem_alloc() with BUS_DMA_ZERO
60 **     1.20.00.17   07/15/2010         Ching Huang       Added support ARC1880
61 **							 report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 **							 prevent cam_periph_error removing all LUN devices of one Target id
63 **							 for any one LUN device failed
64 **     1.20.00.18   10/14/2010         Ching Huang	 Fixed "inquiry data fails comparion at DV1 step"
65 **	   	    10/25/2010         Ching Huang	 Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 **     1.20.00.19   11/11/2010         Ching Huang	 Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 ******************************************************************************************
68 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.35 2010/11/13 08:58:36 delphij Exp $
69 */
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/malloc.h>
73 #include <sys/kernel.h>
74 #include <sys/bus.h>
75 #include <sys/queue.h>
76 #include <sys/stat.h>
77 #include <sys/devicestat.h>
78 #include <sys/kthread.h>
79 #include <sys/module.h>
80 #include <sys/proc.h>
81 #include <sys/lock.h>
82 #include <sys/sysctl.h>
83 #include <sys/thread2.h>
84 #include <sys/poll.h>
85 #include <sys/ioccom.h>
86 #include <sys/device.h>
87 #include <vm/vm.h>
88 #include <vm/vm_param.h>
89 #include <vm/pmap.h>
90 
91 #include <machine/atomic.h>
92 #include <sys/conf.h>
93 #include <sys/rman.h>
94 
95 #include <bus/cam/cam.h>
96 #include <bus/cam/cam_ccb.h>
97 #include <bus/cam/cam_sim.h>
98 #include <bus/cam/cam_periph.h>
99 #include <bus/cam/cam_xpt_periph.h>
100 #include <bus/cam/cam_xpt_sim.h>
101 #include <bus/cam/cam_debug.h>
102 #include <bus/cam/scsi/scsi_all.h>
103 #include <bus/cam/scsi/scsi_message.h>
104 /*
105 **************************************************************************
106 **************************************************************************
107 */
108 #include <sys/endian.h>
109 #include <bus/pci/pcivar.h>
110 #include <bus/pci/pcireg.h>
111 #define ARCMSR_LOCK_INIT(l, s)	lockinit(l, s, 0, LK_CANRECURSE)
112 #define ARCMSR_LOCK_DESTROY(l)	lockuninit(l)
113 #define ARCMSR_LOCK_ACQUIRE(l)	lockmgr(l, LK_EXCLUSIVE)
114 #define ARCMSR_LOCK_RELEASE(l)	lockmgr(l, LK_RELEASE)
115 #define ARCMSR_LOCK_TRY(l)	lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
116 #define arcmsr_htole32(x)	htole32(x)
117 typedef struct lock		arcmsr_lock_t;
118 
119 #if !defined(CAM_NEW_TRAN_CODE)
120 #define	CAM_NEW_TRAN_CODE	1
121 #endif
122 
123 #define ARCMSR_DRIVER_VERSION			"Driver Version 1.20.00.19 2010-11-11"
124 #include <dev/raid/arcmsr/arcmsr.h>
125 #define ARCMSR_SRBS_POOL_SIZE           ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM))
126 /*
127 **************************************************************************
128 **************************************************************************
129 */
130 #define CHIP_REG_READ32(s, b, r)	bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
131 #define CHIP_REG_WRITE32(s, b, r, d)	bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
132 /*
133 **************************************************************************
134 **************************************************************************
135 */
136 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
137 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
138 static int arcmsr_probe(device_t dev);
139 static int arcmsr_attach(device_t dev);
140 static int arcmsr_detach(device_t dev);
141 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
142 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
143 static int arcmsr_shutdown(device_t dev);
144 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
145 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
146 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
147 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
148 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
149 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
150 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
151 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
152 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
153 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
154 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
155 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
156 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
157 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
158 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
159 static int arcmsr_resume(device_t dev);
160 static int arcmsr_suspend(device_t dev);
161 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
162 static void	arcmsr_polling_devmap(void* arg);
163 /*
164 **************************************************************************
165 **************************************************************************
166 */
167 static void UDELAY(u_int32_t us) { DELAY(us); }
168 /*
169 **************************************************************************
170 **************************************************************************
171 */
172 static bus_dmamap_callback_t arcmsr_map_free_srb;
173 static bus_dmamap_callback_t arcmsr_execute_srb;
174 /*
175 **************************************************************************
176 **************************************************************************
177 */
178 static d_open_t	arcmsr_open;
179 static d_close_t arcmsr_close;
180 static d_ioctl_t arcmsr_ioctl;
181 
182 static device_method_t arcmsr_methods[]={
183 	DEVMETHOD(device_probe,		arcmsr_probe),
184 	DEVMETHOD(device_attach,	arcmsr_attach),
185 	DEVMETHOD(device_detach,	arcmsr_detach),
186 	DEVMETHOD(device_shutdown,	arcmsr_shutdown),
187 	DEVMETHOD(device_suspend,	arcmsr_suspend),
188 	DEVMETHOD(device_resume,	arcmsr_resume),
189 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
190 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
191 	{ 0, 0 }
192 };
193 
194 static driver_t arcmsr_driver={
195 	"arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
196 };
197 
198 static devclass_t arcmsr_devclass;
199 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, 0, 0);
200 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
201 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
202 #ifndef BUS_DMA_COHERENT
203 	#define	BUS_DMA_COHERENT	0x04	/* hint: map memory in a coherent way */
204 #endif
205 
206 static struct dev_ops arcmsr_ops = {
207 	{ "arcmsr", 0, 0 },
208 	.d_open =	arcmsr_open,		        /* open     */
209 	.d_close =	arcmsr_close,		        /* close    */
210 	.d_ioctl =	arcmsr_ioctl,		        /* ioctl    */
211 };
212 
213 /*
214 **************************************************************************
215 **************************************************************************
216 */
217 
218 static int
219 arcmsr_open(struct dev_open_args *ap)
220 {
221 	cdev_t dev = ap->a_head.a_dev;
222 	struct AdapterControlBlock *acb=dev->si_drv1;
223 
224 	if(acb==NULL) {
225 		return ENXIO;
226 	}
227 	return 0;
228 }
229 
230 /*
231 **************************************************************************
232 **************************************************************************
233 */
234 
235 static int
236 arcmsr_close(struct dev_close_args *ap)
237 {
238 	cdev_t dev = ap->a_head.a_dev;
239 	struct AdapterControlBlock *acb=dev->si_drv1;
240 
241 	if(acb==NULL) {
242 		return ENXIO;
243 	}
244 	return 0;
245 }
246 
247 /*
248 **************************************************************************
249 **************************************************************************
250 */
251 
252 static int
253 arcmsr_ioctl(struct dev_ioctl_args *ap)
254 {
255 	cdev_t dev = ap->a_head.a_dev;
256 	u_long ioctl_cmd = ap->a_cmd;
257 	caddr_t arg = ap->a_data;
258 	struct AdapterControlBlock *acb=dev->si_drv1;
259 
260 	if(acb==NULL) {
261 		return ENXIO;
262 	}
263 	return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
264 }
265 
266 /*
267 **********************************************************************
268 **********************************************************************
269 */
270 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
271 {
272 	u_int32_t intmask_org=0;
273 
274 	switch (acb->adapter_type) {
275 	case ACB_ADAPTER_TYPE_A: {
276 			/* disable all outbound interrupt */
277 			intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
278 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
279 		}
280 		break;
281 	case ACB_ADAPTER_TYPE_B: {
282 			/* disable all outbound interrupt */
283 			intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
284 			0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
285 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
286 		}
287 		break;
288 	case ACB_ADAPTER_TYPE_C: {
289 			/* disable all outbound interrupt */
290 			intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask)	; /* disable outbound message0 int */
291 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
292 		}
293 		break;
294 	}
295 	return(intmask_org);
296 }
297 /*
298 **********************************************************************
299 **********************************************************************
300 */
301 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
302 {
303 	u_int32_t mask;
304 
305 	switch (acb->adapter_type) {
306 	case ACB_ADAPTER_TYPE_A: {
307 			/* enable outbound Post Queue, outbound doorbell Interrupt */
308 			mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
309 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
310 			acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
311 		}
312 		break;
313 	case ACB_ADAPTER_TYPE_B: {
314 			/* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
315 			mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
316 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
317 			acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
318 		}
319 		break;
320 	case ACB_ADAPTER_TYPE_C: {
321 			/* enable outbound Post Queue, outbound doorbell Interrupt */
322 			mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
323 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
324 			acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
325 		}
326 		break;
327 	}
328 	return;
329 }
330 /*
331 **********************************************************************
332 **********************************************************************
333 */
334 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
335 {
336 	u_int32_t Index;
337 	u_int8_t Retries=0x00;
338 
339 	do {
340 		for(Index=0; Index < 100; Index++) {
341 			if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
342 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
343 				return TRUE;
344 			}
345 			UDELAY(10000);
346 		}/*max 1 seconds*/
347 	}while(Retries++ < 20);/*max 20 sec*/
348 	return FALSE;
349 }
350 /*
351 **********************************************************************
352 **********************************************************************
353 */
354 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
355 {
356 	u_int32_t Index;
357 	u_int8_t Retries=0x00;
358 
359 	do {
360 		for(Index=0; Index < 100; Index++) {
361 			if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
362 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
363 				CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
364 				return TRUE;
365 			}
366 			UDELAY(10000);
367 		}/*max 1 seconds*/
368 	}while(Retries++ < 20);/*max 20 sec*/
369 	return FALSE;
370 }
371 /*
372 **********************************************************************
373 **********************************************************************
374 */
375 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
376 {
377 	u_int32_t Index;
378 	u_int8_t Retries=0x00;
379 
380 	do {
381 		for(Index=0; Index < 100; Index++) {
382 			if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
383 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
384 				return TRUE;
385 			}
386 			UDELAY(10000);
387 		}/*max 1 seconds*/
388 	}while(Retries++ < 20);/*max 20 sec*/
389 	return FALSE;
390 }
391 /*
392 ************************************************************************
393 ************************************************************************
394 */
395 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
396 {
397 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
398 
399 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
400 	do {
401 		if(arcmsr_hba_wait_msgint_ready(acb)) {
402 			break;
403 		} else {
404 			retry_count--;
405 		}
406 	}while(retry_count!=0);
407 	return;
408 }
409 /*
410 ************************************************************************
411 ************************************************************************
412 */
413 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
414 {
415 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
416 
417 	CHIP_REG_WRITE32(HBB_DOORBELL,
418 	0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
419 	do {
420 		if(arcmsr_hbb_wait_msgint_ready(acb)) {
421 			break;
422 		} else {
423 			retry_count--;
424 		}
425 	}while(retry_count!=0);
426 	return;
427 }
428 /*
429 ************************************************************************
430 ************************************************************************
431 */
432 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
433 {
434 	int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
435 
436 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
437 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
438 	do {
439 		if(arcmsr_hbc_wait_msgint_ready(acb)) {
440 			break;
441 		} else {
442 			retry_count--;
443 		}
444 	}while(retry_count!=0);
445 	return;
446 }
447 /*
448 ************************************************************************
449 ************************************************************************
450 */
451 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
452 {
453 	switch (acb->adapter_type) {
454 	case ACB_ADAPTER_TYPE_A: {
455 			arcmsr_flush_hba_cache(acb);
456 		}
457 		break;
458 	case ACB_ADAPTER_TYPE_B: {
459 			arcmsr_flush_hbb_cache(acb);
460 		}
461 		break;
462 	case ACB_ADAPTER_TYPE_C: {
463 			arcmsr_flush_hbc_cache(acb);
464 		}
465 		break;
466 	}
467 	return;
468 }
469 /*
470 *******************************************************************************
471 *******************************************************************************
472 */
473 static int arcmsr_suspend(device_t dev)
474 {
475 	struct AdapterControlBlock	*acb = device_get_softc(dev);
476 
477 	/* flush controller */
478 	arcmsr_iop_parking(acb);
479 	/* disable all outbound interrupt */
480 	arcmsr_disable_allintr(acb);
481 	return(0);
482 }
483 /*
484 *******************************************************************************
485 *******************************************************************************
486 */
487 static int arcmsr_resume(device_t dev)
488 {
489 	struct AdapterControlBlock	*acb = device_get_softc(dev);
490 
491 	arcmsr_iop_init(acb);
492 	return(0);
493 }
494 /*
495 *********************************************************************************
496 *********************************************************************************
497 */
498 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
499 {
500 	struct AdapterControlBlock *acb;
501 	u_int8_t target_id, target_lun;
502 	struct cam_sim * sim;
503 
504 	sim=(struct cam_sim *) cb_arg;
505 	acb =(struct AdapterControlBlock *) cam_sim_softc(sim);
506 	switch (code) {
507 	case AC_LOST_DEVICE:
508 		target_id=xpt_path_target_id(path);
509 		target_lun=xpt_path_lun_id(path);
510 		if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) {
511 			break;
512 		}
513 		kprintf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun);
514 		break;
515 	default:
516 		break;
517 	}
518 }
519 /*
520 **********************************************************************
521 **********************************************************************
522 */
523 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
524 {
525 	struct AdapterControlBlock *acb=srb->acb;
526 	union ccb * pccb=srb->pccb;
527 
528 	if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
529 		bus_dmasync_op_t op;
530 
531 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
532 			op = BUS_DMASYNC_POSTREAD;
533 		} else {
534 			op = BUS_DMASYNC_POSTWRITE;
535 		}
536 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
537 		bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
538 	}
539 	if(stand_flag==1) {
540 		atomic_subtract_int(&acb->srboutstandingcount, 1);
541 		if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
542 		acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
543 			acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
544 			pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
545 		}
546 	}
547 	srb->startdone=ARCMSR_SRB_DONE;
548 	srb->srb_flags=0;
549 	acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
550 	acb->workingsrb_doneindex++;
551 	acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
552 	xpt_done(pccb);
553 	return;
554 }
555 /*
556 **********************************************************************
557 **********************************************************************
558 */
559 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
560 {
561 	union ccb * pccb=srb->pccb;
562 
563 	pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
564 	pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
565 	if(&pccb->csio.sense_data) {
566 		memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
567 		memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
568 		get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
569 		((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
570 		pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
571 	}
572 	return;
573 }
574 /*
575 *********************************************************************
576 *********************************************************************
577 */
578 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
579 {
580 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
581 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
582 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
583 	}
584 	return;
585 }
586 /*
587 *********************************************************************
588 *********************************************************************
589 */
590 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
591 {
592 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
593 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
594 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
595 	}
596 	return;
597 }
598 /*
599 *********************************************************************
600 *********************************************************************
601 */
602 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
603 {
604 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
605 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
606 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
607 		kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
608 	}
609 	return;
610 }
611 /*
612 *********************************************************************
613 *********************************************************************
614 */
615 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
616 {
617 	switch (acb->adapter_type) {
618 	case ACB_ADAPTER_TYPE_A: {
619 			arcmsr_abort_hba_allcmd(acb);
620 		}
621 		break;
622 	case ACB_ADAPTER_TYPE_B: {
623 			arcmsr_abort_hbb_allcmd(acb);
624 		}
625 		break;
626 	case ACB_ADAPTER_TYPE_C: {
627 			arcmsr_abort_hbc_allcmd(acb);
628 		}
629 		break;
630 	}
631 	return;
632 }
633 /*
634 **************************************************************************
635 **************************************************************************
636 */
637 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
638 {
639 	int target, lun;
640 
641 	target=srb->pccb->ccb_h.target_id;
642 	lun=srb->pccb->ccb_h.target_lun;
643 	if(error == FALSE) {
644 		if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
645 			acb->devstate[target][lun]=ARECA_RAID_GOOD;
646 		}
647 		srb->pccb->ccb_h.status |= CAM_REQ_CMP;
648 		arcmsr_srb_complete(srb, 1);
649 	} else {
650 		switch(srb->arcmsr_cdb.DeviceStatus) {
651 		case ARCMSR_DEV_SELECT_TIMEOUT: {
652 				if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
653 					kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
654 				}
655 				acb->devstate[target][lun]=ARECA_RAID_GONE;
656 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
657 				arcmsr_srb_complete(srb, 1);
658 			}
659 			break;
660 		case ARCMSR_DEV_ABORTED:
661 		case ARCMSR_DEV_INIT_FAIL: {
662 				acb->devstate[target][lun]=ARECA_RAID_GONE;
663 				srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
664 				arcmsr_srb_complete(srb, 1);
665 			}
666 			break;
667 		case SCSISTAT_CHECK_CONDITION: {
668 				acb->devstate[target][lun]=ARECA_RAID_GOOD;
669 				arcmsr_report_sense_info(srb);
670 				arcmsr_srb_complete(srb, 1);
671 			}
672 			break;
673 		default:
674 			kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknow DeviceStatus=0x%x \n"
675 					, acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
676 			acb->devstate[target][lun]=ARECA_RAID_GONE;
677 			srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
678 			/*unknow error or crc error just for retry*/
679 			arcmsr_srb_complete(srb, 1);
680 			break;
681 		}
682 	}
683 	return;
684 }
685 /*
686 **************************************************************************
687 **************************************************************************
688 */
689 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
690 {
691 	struct CommandControlBlock *srb;
692 
693 	/* check if command done with no error*/
694 	switch (acb->adapter_type) {
695 	case ACB_ADAPTER_TYPE_C:
696 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/
697 		break;
698 	case ACB_ADAPTER_TYPE_A:
699 	case ACB_ADAPTER_TYPE_B:
700 	default:
701 		srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
702 		break;
703 	}
704 	if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
705 		if(srb->startdone==ARCMSR_SRB_ABORTED) {
706 			kprintf("arcmsr%d: srb='%p' isr got aborted command \n", acb->pci_unit, srb);
707 			srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
708 			arcmsr_srb_complete(srb, 1);
709 			return;
710 		}
711 		kprintf("arcmsr%d: isr get an illegal srb command done"
712 			"acb='%p' srb='%p' srbacb='%p' startdone=0x%xsrboutstandingcount=%d \n",
713 			acb->pci_unit, acb, srb, srb->acb,srb->startdone, acb->srboutstandingcount);
714 		return;
715 	}
716 	arcmsr_report_srb_state(acb, srb, error);
717 	return;
718 }
719 /*
720 **********************************************************************
721 **********************************************************************
722 */
723 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
724 {
725 	int i=0;
726 	u_int32_t flag_srb;
727 	u_int16_t error;
728 
729 	switch (acb->adapter_type) {
730 	case ACB_ADAPTER_TYPE_A: {
731 			u_int32_t outbound_intstatus;
732 
733 			/*clear and abort all outbound posted Q*/
734 			outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
735 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
736 			while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
737                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
738 				arcmsr_drain_donequeue(acb, flag_srb, error);
739 			}
740 		}
741 		break;
742 	case ACB_ADAPTER_TYPE_B: {
743 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
744 
745 			/*clear all outbound posted Q*/
746 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
747 			for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
748 				if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
749 					phbbmu->done_qbuffer[i]=0;
750 			error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
751 					arcmsr_drain_donequeue(acb, flag_srb, error);
752 				}
753 				phbbmu->post_qbuffer[i]=0;
754 			}/*drain reply FIFO*/
755 			phbbmu->doneq_index=0;
756 			phbbmu->postq_index=0;
757 		}
758 		break;
759 	case ACB_ADAPTER_TYPE_C: {
760 
761 			while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
762 				flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
763                 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
764 				arcmsr_drain_donequeue(acb, flag_srb, error);
765 			}
766 		}
767 		break;
768 	}
769 	return;
770 }
771 /*
772 ****************************************************************************
773 ****************************************************************************
774 */
775 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
776 {
777 	struct CommandControlBlock *srb;
778 	u_int32_t intmask_org;
779 	u_int32_t i=0;
780 
781 	if(acb->srboutstandingcount>0) {
782 		/* disable all outbound interrupt */
783 		intmask_org=arcmsr_disable_allintr(acb);
784 		/*clear and abort all outbound posted Q*/
785 		arcmsr_done4abort_postqueue(acb);
786 		/* talk to iop 331 outstanding command aborted*/
787 		arcmsr_abort_allcmd(acb);
788 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
789 			srb=acb->psrb_pool[i];
790 			if(srb->startdone==ARCMSR_SRB_START) {
791 				srb->startdone=ARCMSR_SRB_ABORTED;
792 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
793 				arcmsr_srb_complete(srb, 1);
794 			}
795 		}
796 		/* enable all outbound interrupt */
797 		arcmsr_enable_allintr(acb, intmask_org);
798 	}
799 	atomic_set_int(&acb->srboutstandingcount, 0);
800 	acb->workingsrb_doneindex=0;
801 	acb->workingsrb_startindex=0;
802 	return;
803 }
804 /*
805 **********************************************************************
806 **********************************************************************
807 */
808 static void arcmsr_build_srb(struct CommandControlBlock *srb,
809 		bus_dma_segment_t *dm_segs, u_int32_t nseg)
810 {
811 	struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
812 	u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
813 	u_int32_t address_lo, address_hi;
814 	union ccb * pccb=srb->pccb;
815 	struct ccb_scsiio * pcsio= &pccb->csio;
816 	u_int32_t arccdbsize=0x30;
817 
818 	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
819 	arcmsr_cdb->Bus=0;
820 	arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
821 	arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
822 	arcmsr_cdb->Function=1;
823 	arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
824 	arcmsr_cdb->Context=0;
825 	bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
826 	if(nseg != 0) {
827 		struct AdapterControlBlock *acb=srb->acb;
828 		bus_dmasync_op_t op;
829 		u_int32_t length, i, cdb_sgcount=0;
830 
831 		if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
832 			op=BUS_DMASYNC_PREREAD;
833 		} else {
834 			op=BUS_DMASYNC_PREWRITE;
835 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
836 			srb->srb_flags|=SRB_FLAG_WRITE;
837 		}
838 		bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
839 		for(i=0;i<nseg;i++) {
840 			/* Get the physical address of the current data pointer */
841 			length=arcmsr_htole32(dm_segs[i].ds_len);
842 			address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
843 			address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
844 			if(address_hi==0) {
845 				struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
846 				pdma_sg->address=address_lo;
847 				pdma_sg->length=length;
848 				psge += sizeof(struct SG32ENTRY);
849 				arccdbsize += sizeof(struct SG32ENTRY);
850 			} else {
851 				u_int32_t sg64s_size=0, tmplength=length;
852 
853 				while(1) {
854 					u_int64_t span4G, length0;
855 					struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
856 
857 					span4G=(u_int64_t)address_lo + tmplength;
858 					pdma_sg->addresshigh=address_hi;
859 					pdma_sg->address=address_lo;
860 					if(span4G > 0x100000000) {
861 						/*see if cross 4G boundary*/
862 						length0=0x100000000-address_lo;
863 						pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
864 						address_hi=address_hi+1;
865 						address_lo=0;
866 						tmplength=tmplength-(u_int32_t)length0;
867 						sg64s_size += sizeof(struct SG64ENTRY);
868 						psge += sizeof(struct SG64ENTRY);
869 						cdb_sgcount++;
870 					} else {
871 						pdma_sg->length=tmplength|IS_SG64_ADDR;
872 						sg64s_size += sizeof(struct SG64ENTRY);
873 						psge += sizeof(struct SG64ENTRY);
874 						break;
875 					}
876 				}
877 				arccdbsize += sg64s_size;
878 			}
879 			cdb_sgcount++;
880 		}
881 		arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
882 		arcmsr_cdb->DataLength=pcsio->dxfer_len;
883 		if( arccdbsize > 256) {
884 			arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
885 		}
886 	} else {
887 		arcmsr_cdb->DataLength = 0;
888 	}
889     srb->arc_cdb_size=arccdbsize;
890 	return;
891 }
892 /*
893 **************************************************************************
894 **************************************************************************
895 */
896 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
897 {
898 	u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
899 	struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
900 
901 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
902 	atomic_add_int(&acb->srboutstandingcount, 1);
903 	srb->startdone=ARCMSR_SRB_START;
904 
905 	switch (acb->adapter_type) {
906 	case ACB_ADAPTER_TYPE_A: {
907 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
908 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
909 			} else {
910 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
911 			}
912 		}
913 		break;
914 	case ACB_ADAPTER_TYPE_B: {
915 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
916 			int ending_index, index;
917 
918 			index=phbbmu->postq_index;
919 			ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
920 			phbbmu->post_qbuffer[ending_index]=0;
921 			if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
922 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
923 			} else {
924 				phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
925 			}
926 			index++;
927 			index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
928 			phbbmu->postq_index=index;
929 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
930 		}
931 		break;
932     case ACB_ADAPTER_TYPE_C:
933         {
934             u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
935 
936             arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
937             ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
938 			cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
939             if(cdb_phyaddr_hi32)
940             {
941 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
942 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
943             }
944             else
945             {
946 			    CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
947             }
948         }
949         break;
950 	}
951 	return;
952 }
953 /*
954 ************************************************************************
955 ************************************************************************
956 */
957 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
958 {
959 	struct QBUFFER *qbuffer=NULL;
960 
961 	switch (acb->adapter_type) {
962 	case ACB_ADAPTER_TYPE_A: {
963 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
964 
965 			qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
966 		}
967 		break;
968 	case ACB_ADAPTER_TYPE_B: {
969 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
970 
971 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
972 		}
973 		break;
974 	case ACB_ADAPTER_TYPE_C: {
975 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
976 
977 			qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
978 		}
979 		break;
980 	}
981 	return(qbuffer);
982 }
983 /*
984 ************************************************************************
985 ************************************************************************
986 */
987 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
988 {
989 	struct QBUFFER *qbuffer=NULL;
990 
991 	switch (acb->adapter_type) {
992 	case ACB_ADAPTER_TYPE_A: {
993 			struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
994 
995 			qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
996 		}
997 		break;
998 	case ACB_ADAPTER_TYPE_B: {
999 			struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1000 
1001 			qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1002 		}
1003 		break;
1004 	case ACB_ADAPTER_TYPE_C: {
1005 			struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1006 
1007 			qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1008 		}
1009 		break;
1010 	}
1011 	return(qbuffer);
1012 }
1013 /*
1014 **************************************************************************
1015 **************************************************************************
1016 */
1017 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1018 {
1019 	switch (acb->adapter_type) {
1020 	case ACB_ADAPTER_TYPE_A: {
1021 			/* let IOP know data has been read */
1022 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1023 		}
1024 		break;
1025 	case ACB_ADAPTER_TYPE_B: {
1026 			/* let IOP know data has been read */
1027 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1028 		}
1029 		break;
1030 	case ACB_ADAPTER_TYPE_C: {
1031 			/* let IOP know data has been read */
1032 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1033 		}
1034 	}
1035 	return;
1036 }
1037 /*
1038 **************************************************************************
1039 **************************************************************************
1040 */
1041 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1042 {
1043 	switch (acb->adapter_type) {
1044 	case ACB_ADAPTER_TYPE_A: {
1045 			/*
1046 			** push inbound doorbell tell iop, driver data write ok
1047 			** and wait reply on next hwinterrupt for next Qbuffer post
1048 			*/
1049 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1050 		}
1051 		break;
1052 	case ACB_ADAPTER_TYPE_B: {
1053 			/*
1054 			** push inbound doorbell tell iop, driver data write ok
1055 			** and wait reply on next hwinterrupt for next Qbuffer post
1056 			*/
1057 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1058 		}
1059 		break;
1060 	case ACB_ADAPTER_TYPE_C: {
1061 			/*
1062 			** push inbound doorbell tell iop, driver data write ok
1063 			** and wait reply on next hwinterrupt for next Qbuffer post
1064 			*/
1065 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1066 		}
1067 		break;
1068 	}
1069 }
1070 /*
1071 **********************************************************************
1072 **********************************************************************
1073 */
1074 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1075 {
1076 	u_int8_t *pQbuffer;
1077 	struct QBUFFER *pwbuffer;
1078 	u_int8_t * iop_data;
1079 	int32_t allxfer_len=0;
1080 
1081 	pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1082 	iop_data=(u_int8_t *)pwbuffer->data;
1083 	if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1084 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1085 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1086 			&& (allxfer_len<124)) {
1087 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1088 			memcpy(iop_data, pQbuffer, 1);
1089 			acb->wqbuf_firstindex++;
1090 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1091 			iop_data++;
1092 			allxfer_len++;
1093 		}
1094 		pwbuffer->data_len=allxfer_len;
1095 		/*
1096 		** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1097 		*/
1098 		arcmsr_iop_message_wrote(acb);
1099 	}
1100 	return;
1101 }
1102 /*
1103 ************************************************************************
1104 ************************************************************************
1105 */
1106 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1107 {
1108 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1109 	CHIP_REG_WRITE32(HBA_MessageUnit,
1110 	0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1111 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
1112 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1113 			, acb->pci_unit);
1114 	}
1115 	return;
1116 }
1117 /*
1118 ************************************************************************
1119 ************************************************************************
1120 */
1121 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1122 {
1123 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1124 	CHIP_REG_WRITE32(HBB_DOORBELL,
1125 	0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1126 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1127 		kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1128 			, acb->pci_unit);
1129 	}
1130 	return;
1131 }
1132 /*
1133 ************************************************************************
1134 ************************************************************************
1135 */
1136 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1137 {
1138 	acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1139 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1140 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1141 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1142 		kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1143 	}
1144 	return;
1145 }
1146 /*
1147 ************************************************************************
1148 ************************************************************************
1149 */
1150 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1151 {
1152 	switch (acb->adapter_type) {
1153 	case ACB_ADAPTER_TYPE_A: {
1154 			arcmsr_stop_hba_bgrb(acb);
1155 		}
1156 		break;
1157 	case ACB_ADAPTER_TYPE_B: {
1158 			arcmsr_stop_hbb_bgrb(acb);
1159 		}
1160 		break;
1161 	case ACB_ADAPTER_TYPE_C: {
1162 			arcmsr_stop_hbc_bgrb(acb);
1163 		}
1164 		break;
1165 	}
1166 	return;
1167 }
1168 /*
1169 ************************************************************************
1170 ************************************************************************
1171 */
1172 static void arcmsr_poll(struct cam_sim * psim)
1173 {
1174 	struct AdapterControlBlock *acb;
1175 
1176 	acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1177 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1178 	arcmsr_interrupt(acb);
1179 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1180 	return;
1181 }
1182 /*
1183 **************************************************************************
1184 **************************************************************************
1185 */
1186 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1187 {
1188 	struct QBUFFER *prbuffer;
1189 	u_int8_t *pQbuffer;
1190 	u_int8_t *iop_data;
1191 	int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1192 
1193 	/*check this iop data if overflow my rqbuffer*/
1194 	rqbuf_lastindex=acb->rqbuf_lastindex;
1195 	rqbuf_firstindex=acb->rqbuf_firstindex;
1196 	prbuffer=arcmsr_get_iop_rqbuffer(acb);
1197 	iop_data=(u_int8_t *)prbuffer->data;
1198 	iop_len=prbuffer->data_len;
1199 	my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1200 	if(my_empty_len>=iop_len) {
1201 		while(iop_len > 0) {
1202 			pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1203 			memcpy(pQbuffer, iop_data, 1);
1204 			rqbuf_lastindex++;
1205 			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1206 			iop_data++;
1207 			iop_len--;
1208 		}
1209 		acb->rqbuf_lastindex=rqbuf_lastindex;
1210 		arcmsr_iop_message_read(acb);
1211 		/*signature, let IOP know data has been read */
1212 	} else {
1213 		acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1214 	}
1215 	return;
1216 }
1217 /*
1218 **************************************************************************
1219 **************************************************************************
1220 */
1221 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1222 {
1223 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1224 	/*
1225 	*****************************************************************
1226 	**   check if there are any mail packages from user space program
1227 	**   in my post bag, now is the time to send them into Areca's firmware
1228 	*****************************************************************
1229 	*/
1230 	if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1231 		u_int8_t *pQbuffer;
1232 		struct QBUFFER *pwbuffer;
1233 		u_int8_t *iop_data;
1234 		int allxfer_len=0;
1235 
1236 		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1237 		pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1238 		iop_data=(u_int8_t *)pwbuffer->data;
1239 		while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1240 			&& (allxfer_len<124)) {
1241 			pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1242 			memcpy(iop_data, pQbuffer, 1);
1243 			acb->wqbuf_firstindex++;
1244 			acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1245 			iop_data++;
1246 			allxfer_len++;
1247 		}
1248 		pwbuffer->data_len=allxfer_len;
1249 		/*
1250 		** push inbound doorbell tell iop driver data write ok
1251 		** and wait reply on next hwinterrupt for next Qbuffer post
1252 		*/
1253 		arcmsr_iop_message_wrote(acb);
1254 	}
1255 	if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1256 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1257 	}
1258 	return;
1259 }
1260 
1261 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1262 {
1263 /*
1264 	if (ccb->ccb_h.status != CAM_REQ_CMP)
1265 		kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1266 	else
1267 		kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1268 */
1269 	xpt_free_path(ccb->ccb_h.path);
1270 }
1271 
1272 static void	arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1273 {
1274 	struct cam_path     *path;
1275 	union ccb            ccb;
1276 
1277 	if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1278 		return;
1279 /*	kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1280 	bzero(&ccb, sizeof(union ccb));
1281 	xpt_setup_ccb(&ccb.ccb_h, path, 5);
1282 	ccb.ccb_h.func_code = XPT_SCAN_LUN;
1283 	ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1284 	ccb.crcn.flags = CAM_FLAG_NONE;
1285 	xpt_action(&ccb);
1286 	return;
1287 }
1288 
1289 
1290 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1291 {
1292 	struct CommandControlBlock *srb;
1293 	u_int32_t intmask_org;
1294 	int i;
1295 
1296 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1297 	/* disable all outbound interrupts */
1298 	intmask_org = arcmsr_disable_allintr(acb);
1299 	for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1300 	{
1301 		srb = acb->psrb_pool[i];
1302 		if (srb->startdone == ARCMSR_SRB_START)
1303 		{
1304 		if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1305             {
1306 			srb->startdone = ARCMSR_SRB_ABORTED;
1307 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1308 			arcmsr_srb_complete(srb, 1);
1309 		}
1310 		}
1311 	}
1312 	/* enable outbound Post Queue, outbound doorbell Interrupt */
1313 	arcmsr_enable_allintr(acb, intmask_org);
1314 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1315 }
1316 
1317 
1318 /*
1319 **************************************************************************
1320 **************************************************************************
1321 */
1322 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1323 	u_int32_t	devicemap;
1324 	u_int32_t	target, lun;
1325     u_int32_t	deviceMapCurrent[4]={0};
1326     u_int8_t	*pDevMap;
1327 
1328 	switch (acb->adapter_type) {
1329 	case ACB_ADAPTER_TYPE_A:
1330 			devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1331 			for (target= 0; target < 4; target++)
1332 			{
1333 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1334 		devicemap += 4;
1335 			}
1336 			break;
1337 
1338 	case ACB_ADAPTER_TYPE_B:
1339 			devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1340 			for (target= 0; target < 4; target++)
1341 			{
1342 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1],  devicemap);
1343 		devicemap += 4;
1344 			}
1345 			break;
1346 
1347 	case ACB_ADAPTER_TYPE_C:
1348 			devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1349 			for (target= 0; target < 4; target++)
1350 			{
1351 		deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0],  devicemap);
1352 		devicemap += 4;
1353 			}
1354 			break;
1355 	}
1356 		if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1357 		{
1358 			acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1359 		}
1360 		/*
1361 		** adapter posted CONFIG message
1362 		** copy the new map, note if there are differences with the current map
1363 		*/
1364 		pDevMap = (u_int8_t	*)&deviceMapCurrent[0];
1365 		for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1366 		{
1367 			if (*pDevMap != acb->device_map[target])
1368 			{
1369                 u_int8_t difference, bit_check;
1370 
1371                 difference= *pDevMap ^ acb->device_map[target];
1372                 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1373                 {
1374                     bit_check=(1 << lun);						/*check bit from 0....31*/
1375                     if(difference & bit_check)
1376                     {
1377                         if(acb->device_map[target] & bit_check)
1378                         {/* unit departed */
1379 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1380 							arcmsr_abort_dr_ccbs(acb, target, lun);
1381 				arcmsr_rescan_lun(acb, target, lun);
1382 						acb->devstate[target][lun] = ARECA_RAID_GONE;
1383                         }
1384                         else
1385                         {/* unit arrived */
1386 							kprintf("arcmsr_dr_handle: Target=%x, lun=%x, ARRIVING!!!\n",target,lun);
1387 				arcmsr_rescan_lun(acb, target, lun);
1388 						acb->devstate[target][lun] = ARECA_RAID_GOOD;
1389                         }
1390                     }
1391                 }
1392 /*				kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1393 				acb->device_map[target]= *pDevMap;
1394 			}
1395 			pDevMap++;
1396 		}
1397 }
1398 /*
1399 **************************************************************************
1400 **************************************************************************
1401 */
1402 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1403 	u_int32_t outbound_message;
1404 
1405 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1406 	outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1407 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1408 		arcmsr_dr_handle( acb );
1409 }
1410 /*
1411 **************************************************************************
1412 **************************************************************************
1413 */
1414 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1415 	u_int32_t outbound_message;
1416 
1417 	/* clear interrupts */
1418 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1419 	outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1420 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1421 		arcmsr_dr_handle( acb );
1422 }
1423 /*
1424 **************************************************************************
1425 **************************************************************************
1426 */
1427 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1428 	u_int32_t outbound_message;
1429 
1430 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1431 	outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1432 	if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1433 		arcmsr_dr_handle( acb );
1434 }
1435 /*
1436 **************************************************************************
1437 **************************************************************************
1438 */
1439 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1440 {
1441 	u_int32_t outbound_doorbell;
1442 
1443 	/*
1444 	*******************************************************************
1445 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1446 	**  DOORBELL: din! don!
1447 	**  check if there are any mail need to pack from firmware
1448 	*******************************************************************
1449 	*/
1450 	outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1451 	0, outbound_doorbell);
1452 	CHIP_REG_WRITE32(HBA_MessageUnit,
1453 	0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1454 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1455 		arcmsr_iop2drv_data_wrote_handle(acb);
1456 	}
1457 	if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1458 		arcmsr_iop2drv_data_read_handle(acb);
1459 	}
1460 	return;
1461 }
1462 /*
1463 **************************************************************************
1464 **************************************************************************
1465 */
1466 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1467 {
1468 	u_int32_t outbound_doorbell;
1469 
1470 	/*
1471 	*******************************************************************
1472 	**  Maybe here we need to check wrqbuffer_lock is lock or not
1473 	**  DOORBELL: din! don!
1474 	**  check if there are any mail need to pack from firmware
1475 	*******************************************************************
1476 	*/
1477 	outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1478 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1479 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1480 		arcmsr_iop2drv_data_wrote_handle(acb);
1481 	}
1482 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1483 		arcmsr_iop2drv_data_read_handle(acb);
1484 	}
1485 	if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1486 		arcmsr_hbc_message_isr(acb);    /* messenger of "driver to iop commands" */
1487 	}
1488 	return;
1489 }
1490 /*
1491 **************************************************************************
1492 **************************************************************************
1493 */
1494 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1495 {
1496 	u_int32_t flag_srb;
1497 	u_int16_t error;
1498 
1499 	/*
1500 	*****************************************************************************
1501 	**               areca cdb command done
1502 	*****************************************************************************
1503 	*/
1504 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1505 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1506 	while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1507 		0, outbound_queueport)) != 0xFFFFFFFF) {
1508 		/* check if command done with no error*/
1509         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1510 		arcmsr_drain_donequeue(acb, flag_srb, error);
1511 	}	/*drain reply FIFO*/
1512 	return;
1513 }
1514 /*
1515 **************************************************************************
1516 **************************************************************************
1517 */
1518 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1519 {
1520 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1521 	u_int32_t flag_srb;
1522 	int index;
1523 	u_int16_t error;
1524 
1525 	/*
1526 	*****************************************************************************
1527 	**               areca cdb command done
1528 	*****************************************************************************
1529 	*/
1530 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1531 		BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1532 	index=phbbmu->doneq_index;
1533 	while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1534 		phbbmu->done_qbuffer[index]=0;
1535 		index++;
1536 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
1537 		phbbmu->doneq_index=index;
1538 		/* check if command done with no error*/
1539         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1540 		arcmsr_drain_donequeue(acb, flag_srb, error);
1541 	}	/*drain reply FIFO*/
1542 	return;
1543 }
1544 /*
1545 **************************************************************************
1546 **************************************************************************
1547 */
1548 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1549 {
1550 	u_int32_t flag_srb,throttling=0;
1551 	u_int16_t error;
1552 
1553 	/*
1554 	*****************************************************************************
1555 	**               areca cdb command done
1556 	*****************************************************************************
1557 	*/
1558 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1559 
1560 	while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1561 
1562 		flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1563 		/* check if command done with no error*/
1564         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1565 		arcmsr_drain_donequeue(acb, flag_srb, error);
1566         if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1567             CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1568             break;
1569         }
1570         throttling++;
1571 	}	/*drain reply FIFO*/
1572 	return;
1573 }
1574 /*
1575 **********************************************************************
1576 **********************************************************************
1577 */
1578 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1579 {
1580 	u_int32_t outbound_intstatus;
1581 	/*
1582 	*********************************************
1583 	**   check outbound intstatus
1584 	*********************************************
1585 	*/
1586 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1587 	if(!outbound_intstatus) {
1588 		/*it must be share irq*/
1589 		return;
1590 	}
1591 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
1592 	/* MU doorbell interrupts*/
1593 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1594 		arcmsr_hba_doorbell_isr(acb);
1595 	}
1596 	/* MU post queue interrupts*/
1597 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1598 		arcmsr_hba_postqueue_isr(acb);
1599 	}
1600 	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1601 		arcmsr_hba_message_isr(acb);
1602 	}
1603 	return;
1604 }
1605 /*
1606 **********************************************************************
1607 **********************************************************************
1608 */
1609 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1610 {
1611 	u_int32_t outbound_doorbell;
1612 	/*
1613 	*********************************************
1614 	**   check outbound intstatus
1615 	*********************************************
1616 	*/
1617 	outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1618 	if(!outbound_doorbell) {
1619 		/*it must be share irq*/
1620 		return;
1621 	}
1622 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1623 	CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1624 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1625 	/* MU ioctl transfer doorbell interrupts*/
1626 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1627 		arcmsr_iop2drv_data_wrote_handle(acb);
1628 	}
1629 	if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1630 		arcmsr_iop2drv_data_read_handle(acb);
1631 	}
1632 	/* MU post queue interrupts*/
1633 	if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1634 		arcmsr_hbb_postqueue_isr(acb);
1635 	}
1636 	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1637 		arcmsr_hbb_message_isr(acb);
1638 	}
1639 	return;
1640 }
1641 /*
1642 **********************************************************************
1643 **********************************************************************
1644 */
1645 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1646 {
1647 	u_int32_t host_interrupt_status;
1648 	/*
1649 	*********************************************
1650 	**   check outbound intstatus
1651 	*********************************************
1652 	*/
1653 	host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1654 	if(!host_interrupt_status) {
1655 		/*it must be share irq*/
1656 		return;
1657 	}
1658 	/* MU doorbell interrupts*/
1659 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1660 		arcmsr_hbc_doorbell_isr(acb);
1661 	}
1662 	/* MU post queue interrupts*/
1663 	if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1664 		arcmsr_hbc_postqueue_isr(acb);
1665 	}
1666 	return;
1667 }
1668 /*
1669 ******************************************************************************
1670 ******************************************************************************
1671 */
1672 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1673 {
1674 	switch (acb->adapter_type) {
1675 	case ACB_ADAPTER_TYPE_A:
1676 		arcmsr_handle_hba_isr(acb);
1677 		break;
1678 	case ACB_ADAPTER_TYPE_B:
1679 		arcmsr_handle_hbb_isr(acb);
1680 		break;
1681 	case ACB_ADAPTER_TYPE_C:
1682 		arcmsr_handle_hbc_isr(acb);
1683 		break;
1684 	default:
1685 		kprintf("arcmsr%d: interrupt service,"
1686 		" unknow adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1687 		break;
1688 	}
1689 	return;
1690 }
1691 /*
1692 **********************************************************************
1693 **********************************************************************
1694 */
1695 static void arcmsr_intr_handler(void *arg)
1696 {
1697 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1698 
1699 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1700 	arcmsr_interrupt(acb);
1701 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1702 }
1703 /*
1704 ******************************************************************************
1705 ******************************************************************************
1706 */
1707 static void	arcmsr_polling_devmap(void* arg)
1708 {
1709 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1710 	switch (acb->adapter_type) {
1711 	case ACB_ADAPTER_TYPE_A:
1712 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1713 		break;
1714 
1715 	case ACB_ADAPTER_TYPE_B:
1716 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1717 		break;
1718 
1719 	case ACB_ADAPTER_TYPE_C:
1720 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1721 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1722 		break;
1723 	}
1724 
1725 	if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1726 	{
1727 		callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb);	/* polling per 5 seconds */
1728 	}
1729 }
1730 
1731 /*
1732 *******************************************************************************
1733 **
1734 *******************************************************************************
1735 */
1736 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1737 {
1738 	u_int32_t intmask_org;
1739 
1740 	if(acb!=NULL) {
1741 		/* stop adapter background rebuild */
1742 		if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1743 			intmask_org = arcmsr_disable_allintr(acb);
1744 			arcmsr_stop_adapter_bgrb(acb);
1745 			arcmsr_flush_adapter_cache(acb);
1746 			arcmsr_enable_allintr(acb, intmask_org);
1747 		}
1748 	}
1749 }
1750 /*
1751 ***********************************************************************
1752 **
1753 ************************************************************************
1754 */
1755 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1756 {
1757 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1758 	u_int32_t retvalue=EINVAL;
1759 
1760 	pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1761 	if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1762 		return retvalue;
1763 	}
1764 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1765 	switch(ioctl_cmd) {
1766 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1767 			u_int8_t * pQbuffer;
1768 			u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1769 			u_int32_t allxfer_len=0;
1770 
1771 			while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1772 				&& (allxfer_len<1031)) {
1773 				/*copy READ QBUFFER to srb*/
1774 				pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1775 				memcpy(ptmpQbuffer, pQbuffer, 1);
1776 				acb->rqbuf_firstindex++;
1777 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1778 				/*if last index number set it to 0 */
1779 				ptmpQbuffer++;
1780 				allxfer_len++;
1781 			}
1782 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1783 				struct QBUFFER * prbuffer;
1784 				u_int8_t * iop_data;
1785 				u_int32_t iop_len;
1786 
1787 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1788 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
1789 				iop_data=(u_int8_t *)prbuffer->data;
1790 				iop_len=(u_int32_t)prbuffer->data_len;
1791 				/*this iop data does no chance to make me overflow again here, so just do it*/
1792 				while(iop_len>0) {
1793 					pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1794 					memcpy(pQbuffer, iop_data, 1);
1795 					acb->rqbuf_lastindex++;
1796 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1797 					/*if last index number set it to 0 */
1798 					iop_data++;
1799 					iop_len--;
1800 				}
1801 				arcmsr_iop_message_read(acb);
1802 				/*signature, let IOP know data has been readed */
1803 			}
1804 			pcmdmessagefld->cmdmessage.Length=allxfer_len;
1805 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1806 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1807 		}
1808 		break;
1809 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1810 			u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1811 			u_int8_t * pQbuffer;
1812 			u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1813 
1814 			user_len=pcmdmessagefld->cmdmessage.Length;
1815 			/*check if data xfer length of this request will overflow my array qbuffer */
1816 			wqbuf_lastindex=acb->wqbuf_lastindex;
1817 			wqbuf_firstindex=acb->wqbuf_firstindex;
1818 			if(wqbuf_lastindex!=wqbuf_firstindex) {
1819 				arcmsr_post_ioctldata2iop(acb);
1820 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1821 			} else {
1822 				my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1823 				if(my_empty_len>=user_len) {
1824 					while(user_len>0) {
1825 						/*copy srb data to wqbuffer*/
1826 						pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1827 						memcpy(pQbuffer, ptmpuserbuffer, 1);
1828 						acb->wqbuf_lastindex++;
1829 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1830 						/*if last index number set it to 0 */
1831 						ptmpuserbuffer++;
1832 						user_len--;
1833 					}
1834 					/*post fist Qbuffer*/
1835 					if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1836 						acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1837 						arcmsr_post_ioctldata2iop(acb);
1838 					}
1839 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1840 				} else {
1841 					pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1842 				}
1843 			}
1844 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1845 		}
1846 		break;
1847 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1848 			u_int8_t * pQbuffer=acb->rqbuffer;
1849 
1850 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1851 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1852 				arcmsr_iop_message_read(acb);
1853 				/*signature, let IOP know data has been readed */
1854 			}
1855 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1856 			acb->rqbuf_firstindex=0;
1857 			acb->rqbuf_lastindex=0;
1858 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1859 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1860 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1861 		}
1862 		break;
1863 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1864 		{
1865 			u_int8_t * pQbuffer=acb->wqbuffer;
1866 
1867 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1868 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1869                 arcmsr_iop_message_read(acb);
1870 				/*signature, let IOP know data has been readed */
1871 			}
1872 			acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1873 			acb->wqbuf_firstindex=0;
1874 			acb->wqbuf_lastindex=0;
1875 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1876 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1877 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1878 		}
1879 		break;
1880 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1881 			u_int8_t * pQbuffer;
1882 
1883 			if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1884 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1885                 arcmsr_iop_message_read(acb);
1886 				/*signature, let IOP know data has been readed */
1887 			}
1888 			acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1889 					|ACB_F_MESSAGE_RQBUFFER_CLEARED
1890 					|ACB_F_MESSAGE_WQBUFFER_READ);
1891 			acb->rqbuf_firstindex=0;
1892 			acb->rqbuf_lastindex=0;
1893 			acb->wqbuf_firstindex=0;
1894 			acb->wqbuf_lastindex=0;
1895 			pQbuffer=acb->rqbuffer;
1896 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1897 			pQbuffer=acb->wqbuffer;
1898 			memset(pQbuffer, 0, sizeof(struct QBUFFER));
1899 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1900 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1901 		}
1902 		break;
1903 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1904 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1905 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1906 		}
1907 		break;
1908 	case ARCMSR_MESSAGE_SAY_HELLO: {
1909 			u_int8_t * hello_string="Hello! I am ARCMSR";
1910 			u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1911 
1912 			if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1913 				pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1914 				ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1915 				return ENOIOCTL;
1916 			}
1917 			pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1918 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1919 		}
1920 		break;
1921 	case ARCMSR_MESSAGE_SAY_GOODBYE: {
1922 			arcmsr_iop_parking(acb);
1923 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1924 		}
1925 		break;
1926 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1927 			arcmsr_flush_adapter_cache(acb);
1928 			retvalue=ARCMSR_MESSAGE_SUCCESS;
1929 		}
1930 		break;
1931 	}
1932 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1933 	return retvalue;
1934 }
1935 /*
1936 **************************************************************************
1937 **************************************************************************
1938 */
1939 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1940 {
1941 	struct CommandControlBlock *srb=NULL;
1942 	u_int32_t workingsrb_startindex, workingsrb_doneindex;
1943 
1944 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1945 	workingsrb_doneindex=acb->workingsrb_doneindex;
1946 	workingsrb_startindex=acb->workingsrb_startindex;
1947 	srb=acb->srbworkingQ[workingsrb_startindex];
1948 	workingsrb_startindex++;
1949 	workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
1950 	if(workingsrb_doneindex!=workingsrb_startindex) {
1951 		acb->workingsrb_startindex=workingsrb_startindex;
1952 	} else {
1953 		srb=NULL;
1954 	}
1955 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1956 	return(srb);
1957 }
1958 /*
1959 **************************************************************************
1960 **************************************************************************
1961 */
1962 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
1963 {
1964 	struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1965 	int retvalue = 0, transfer_len = 0;
1966 	char *buffer;
1967 	u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
1968 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
1969 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8  |
1970 				(u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
1971 					/* 4 bytes: Areca io control code */
1972 	if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1973 		buffer = pccb->csio.data_ptr;
1974 		transfer_len = pccb->csio.dxfer_len;
1975 	} else {
1976 		retvalue = ARCMSR_MESSAGE_FAIL;
1977 		goto message_out;
1978 	}
1979 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1980 		retvalue = ARCMSR_MESSAGE_FAIL;
1981 		goto message_out;
1982 	}
1983 	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
1984 	switch(controlcode) {
1985 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1986 			u_int8_t *pQbuffer;
1987 			u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1988 			int32_t allxfer_len = 0;
1989 
1990 			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1991 				&& (allxfer_len < 1031)) {
1992 				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1993 				memcpy(ptmpQbuffer, pQbuffer, 1);
1994 				acb->rqbuf_firstindex++;
1995 				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1996 				ptmpQbuffer++;
1997 				allxfer_len++;
1998 			}
1999 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2000 				struct QBUFFER  *prbuffer;
2001 				u_int8_t  *iop_data;
2002 				int32_t iop_len;
2003 
2004 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2005 				prbuffer=arcmsr_get_iop_rqbuffer(acb);
2006 				iop_data = (u_int8_t *)prbuffer->data;
2007 				iop_len =(u_int32_t)prbuffer->data_len;
2008 				while (iop_len > 0) {
2009 			        pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2010 					memcpy(pQbuffer, iop_data, 1);
2011 					acb->rqbuf_lastindex++;
2012 					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2013 					iop_data++;
2014 					iop_len--;
2015 				}
2016 				arcmsr_iop_message_read(acb);
2017 			}
2018 			pcmdmessagefld->cmdmessage.Length = allxfer_len;
2019 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2020 			retvalue=ARCMSR_MESSAGE_SUCCESS;
2021 		}
2022 		break;
2023 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2024 			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2025 			u_int8_t *pQbuffer;
2026 			u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2027 
2028 			user_len = pcmdmessagefld->cmdmessage.Length;
2029 			wqbuf_lastindex = acb->wqbuf_lastindex;
2030 			wqbuf_firstindex = acb->wqbuf_firstindex;
2031 			if (wqbuf_lastindex != wqbuf_firstindex) {
2032 				arcmsr_post_ioctldata2iop(acb);
2033 				/* has error report sensedata */
2034 			    if(&pccb->csio.sense_data) {
2035 				((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2036 				/* Valid,ErrorCode */
2037 				((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2038 				/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2039 				((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2040 				/* AdditionalSenseLength */
2041 				((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2042 				/* AdditionalSenseCode */
2043 				}
2044 				retvalue = ARCMSR_MESSAGE_FAIL;
2045 			} else {
2046 				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2047 						&(ARCMSR_MAX_QBUFFER - 1);
2048 				if (my_empty_len >= user_len) {
2049 					while (user_len > 0) {
2050 						pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2051 						memcpy(pQbuffer, ptmpuserbuffer, 1);
2052 						acb->wqbuf_lastindex++;
2053 						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2054 						ptmpuserbuffer++;
2055 						user_len--;
2056 					}
2057 					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2058 						acb->acb_flags &=
2059 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2060 						arcmsr_post_ioctldata2iop(acb);
2061 					}
2062 				} else {
2063 					/* has error report sensedata */
2064 					if(&pccb->csio.sense_data) {
2065 					((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2066 					/* Valid,ErrorCode */
2067 					((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2068 					/* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2069 					((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2070 					/* AdditionalSenseLength */
2071 					((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2072 					/* AdditionalSenseCode */
2073 					}
2074 					retvalue = ARCMSR_MESSAGE_FAIL;
2075 				}
2076 			}
2077 		}
2078 		break;
2079 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2080 			u_int8_t *pQbuffer = acb->rqbuffer;
2081 
2082 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2083 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2084 				arcmsr_iop_message_read(acb);
2085 			}
2086 			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2087 			acb->rqbuf_firstindex = 0;
2088 			acb->rqbuf_lastindex = 0;
2089 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2090 			pcmdmessagefld->cmdmessage.ReturnCode =
2091 			ARCMSR_MESSAGE_RETURNCODE_OK;
2092 		}
2093 		break;
2094 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2095 			u_int8_t *pQbuffer = acb->wqbuffer;
2096 
2097 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2098 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2099 				arcmsr_iop_message_read(acb);
2100 			}
2101 			acb->acb_flags |=
2102 				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
2103 					ACB_F_MESSAGE_WQBUFFER_READ);
2104 			acb->wqbuf_firstindex = 0;
2105 			acb->wqbuf_lastindex = 0;
2106 			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2107 			pcmdmessagefld->cmdmessage.ReturnCode =
2108 				ARCMSR_MESSAGE_RETURNCODE_OK;
2109 		}
2110 		break;
2111 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2112 			u_int8_t *pQbuffer;
2113 
2114 			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2115 				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2116 				arcmsr_iop_message_read(acb);
2117 			}
2118 			acb->acb_flags |=
2119 				(ACB_F_MESSAGE_WQBUFFER_CLEARED
2120 				| ACB_F_MESSAGE_RQBUFFER_CLEARED
2121 				| ACB_F_MESSAGE_WQBUFFER_READ);
2122 			acb->rqbuf_firstindex = 0;
2123 			acb->rqbuf_lastindex = 0;
2124 			acb->wqbuf_firstindex = 0;
2125 			acb->wqbuf_lastindex = 0;
2126 			pQbuffer = acb->rqbuffer;
2127 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2128 			pQbuffer = acb->wqbuffer;
2129 			memset(pQbuffer, 0, sizeof (struct QBUFFER));
2130 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2131 		}
2132 		break;
2133 	case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2134 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2135 		}
2136 		break;
2137 	case ARCMSR_MESSAGE_SAY_HELLO: {
2138 			int8_t * hello_string = "Hello! I am ARCMSR";
2139 
2140 			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2141 				, (int16_t)strlen(hello_string));
2142 			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2143 		}
2144 		break;
2145 	case ARCMSR_MESSAGE_SAY_GOODBYE:
2146 		arcmsr_iop_parking(acb);
2147 		break;
2148 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2149 		arcmsr_flush_adapter_cache(acb);
2150 		break;
2151 	default:
2152 		retvalue = ARCMSR_MESSAGE_FAIL;
2153 	}
2154 message_out:
2155 	return retvalue;
2156 }
2157 /*
2158 *********************************************************************
2159 *********************************************************************
2160 */
2161 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2162 {
2163 	struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2164 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2165 	union ccb * pccb;
2166 	int target, lun;
2167 
2168 	pccb=srb->pccb;
2169 	target=pccb->ccb_h.target_id;
2170 	lun=pccb->ccb_h.target_lun;
2171 	if(error != 0) {
2172 		if(error != EFBIG) {
2173 			kprintf("arcmsr%d: unexpected error %x"
2174 				" returned from 'bus_dmamap_load' \n"
2175 				, acb->pci_unit, error);
2176 		}
2177 		if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2178 			pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2179 		}
2180 		arcmsr_srb_complete(srb, 0);
2181 		return;
2182 	}
2183 	if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2184 		pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2185 		arcmsr_srb_complete(srb, 0);
2186 		return;
2187 	}
2188 	if(acb->acb_flags & ACB_F_BUS_RESET) {
2189 		kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2190 		pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2191 		arcmsr_srb_complete(srb, 0);
2192 		return;
2193 	}
2194 	if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2195 		u_int8_t block_cmd;
2196 
2197 		block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f;
2198 		if(block_cmd==0x08 || block_cmd==0x0a) {
2199 			kprintf("arcmsr%d:block 'read/write' command "
2200 				"with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n"
2201 				, acb->pci_unit, block_cmd, target, lun);
2202 			pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2203 			arcmsr_srb_complete(srb, 0);
2204 			return;
2205 		}
2206 	}
2207 	if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2208 		if(nseg != 0) {
2209 			bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2210 		}
2211 		arcmsr_srb_complete(srb, 0);
2212 		return;
2213 	}
2214 	if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
2215 		xpt_freeze_simq(acb->psim, 1);
2216 		pccb->ccb_h.status = CAM_REQUEUE_REQ;
2217 		acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2218 		arcmsr_srb_complete(srb, 0);
2219 		return;
2220 	}
2221 	pccb->ccb_h.status |= CAM_SIM_QUEUED;
2222 	arcmsr_build_srb(srb, dm_segs, nseg);
2223 /*	if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2224 		callout_reset(&srb->ccb_callout, (pccb->ccb_h.timeout * hz) / 1000, arcmsr_srb_timeout, srb);
2225 */
2226 	arcmsr_post_srb(acb, srb);
2227 	return;
2228 }
2229 /*
2230 *****************************************************************************************
2231 *****************************************************************************************
2232 */
2233 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2234 {
2235 	struct CommandControlBlock *srb;
2236 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2237 	u_int32_t intmask_org;
2238 	int i=0;
2239 
2240 	acb->num_aborts++;
2241 	/*
2242 	***************************************************************************
2243 	** It is the upper layer do abort command this lock just prior to calling us.
2244 	** First determine if we currently own this command.
2245 	** Start by searching the device queue. If not found
2246 	** at all, and the system wanted us to just abort the
2247 	** command return success.
2248 	***************************************************************************
2249 	*/
2250 	if(acb->srboutstandingcount!=0) {
2251 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2252 			srb=acb->psrb_pool[i];
2253 			if(srb->startdone==ARCMSR_SRB_START) {
2254 				if(srb->pccb==abortccb) {
2255 					srb->startdone=ARCMSR_SRB_ABORTED;
2256 					kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2257 						"outstanding command \n"
2258 						, acb->pci_unit, abortccb->ccb_h.target_id
2259 						, abortccb->ccb_h.target_lun, srb);
2260 					goto abort_outstanding_cmd;
2261 				}
2262 			}
2263 		}
2264 	}
2265 	return(FALSE);
2266 abort_outstanding_cmd:
2267 	/* disable all outbound interrupt */
2268 	intmask_org=arcmsr_disable_allintr(acb);
2269 	arcmsr_polling_srbdone(acb, srb);
2270 	/* enable outbound Post Queue, outbound doorbell Interrupt */
2271 	arcmsr_enable_allintr(acb, intmask_org);
2272 	return (TRUE);
2273 }
2274 /*
2275 ****************************************************************************
2276 ****************************************************************************
2277 */
2278 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2279 {
2280 	int retry=0;
2281 
2282 	acb->num_resets++;
2283 	acb->acb_flags |=ACB_F_BUS_RESET;
2284 	while(acb->srboutstandingcount!=0 && retry < 400) {
2285 		arcmsr_interrupt(acb);
2286 		UDELAY(25000);
2287 		retry++;
2288 	}
2289 	arcmsr_iop_reset(acb);
2290 	acb->acb_flags &= ~ACB_F_BUS_RESET;
2291 	return;
2292 }
2293 /*
2294 **************************************************************************
2295 **************************************************************************
2296 */
2297 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2298 		union ccb * pccb)
2299 {
2300 	pccb->ccb_h.status |= CAM_REQ_CMP;
2301 	switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2302 	case INQUIRY: {
2303 		unsigned char inqdata[36];
2304 		char *buffer=pccb->csio.data_ptr;
2305 
2306 		if (pccb->ccb_h.target_lun) {
2307 			pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2308 			xpt_done(pccb);
2309 			return;
2310 		}
2311 		inqdata[0] = T_PROCESSOR;	/* Periph Qualifier & Periph Dev Type */
2312 		inqdata[1] = 0;				/* rem media bit & Dev Type Modifier */
2313 		inqdata[2] = 0;				/* ISO, ECMA, & ANSI versions */
2314 		inqdata[3] = 0;
2315 		inqdata[4] = 31;			/* length of additional data */
2316 		inqdata[5] = 0;
2317 		inqdata[6] = 0;
2318 		inqdata[7] = 0;
2319 		strncpy(&inqdata[8], "Areca   ", 8);	/* Vendor Identification */
2320 		strncpy(&inqdata[16], "RAID controller ", 16);	/* Product Identification */
2321 		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2322 		memcpy(buffer, inqdata, sizeof(inqdata));
2323 		xpt_done(pccb);
2324 	}
2325 	break;
2326 	case WRITE_BUFFER:
2327 	case READ_BUFFER: {
2328 		if (arcmsr_iop_message_xfer(acb, pccb)) {
2329 			pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2330 			pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2331 		}
2332 		xpt_done(pccb);
2333 	}
2334 	break;
2335 	default:
2336 		xpt_done(pccb);
2337 	}
2338 }
2339 /*
2340 *********************************************************************
2341 *********************************************************************
2342 */
2343 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2344 {
2345 	struct AdapterControlBlock *  acb;
2346 
2347 	acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2348 	if(acb==NULL) {
2349 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2350 		xpt_done(pccb);
2351 		return;
2352 	}
2353 	switch (pccb->ccb_h.func_code) {
2354 	case XPT_SCSI_IO: {
2355 			struct CommandControlBlock *srb;
2356 			int target=pccb->ccb_h.target_id;
2357 
2358 			if(target == 16) {
2359 				/* virtual device for iop message transfer */
2360 				arcmsr_handle_virtual_command(acb, pccb);
2361 				return;
2362 			}
2363 			if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2364 				pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2365 				xpt_done(pccb);
2366 				return;
2367 			}
2368 			pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2369 			pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2370 			srb->pccb=pccb;
2371 			if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2372 				if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2373 					/* Single buffer */
2374 					if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2375 						/* Buffer is virtual */
2376 						u_int32_t error;
2377 
2378 						crit_enter();
2379 						error =	bus_dmamap_load(acb->dm_segs_dmat
2380 							, srb->dm_segs_dmamap
2381 							, pccb->csio.data_ptr
2382 							, pccb->csio.dxfer_len
2383 							, arcmsr_execute_srb, srb, /*flags*/0);
2384 						if(error == EINPROGRESS) {
2385 							xpt_freeze_simq(acb->psim, 1);
2386 							pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2387 						}
2388 						crit_exit();
2389 					}
2390 					else {		/* Buffer is physical */
2391 						struct bus_dma_segment seg;
2392 
2393 						seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2394 						seg.ds_len = pccb->csio.dxfer_len;
2395 						arcmsr_execute_srb(srb, &seg, 1, 0);
2396 					}
2397 				} else {
2398 					/* Scatter/gather list */
2399 					struct bus_dma_segment *segs;
2400 
2401 					if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2402 					|| (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2403 						pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2404 						xpt_done(pccb);
2405 						kfree(srb, M_DEVBUF);
2406 						return;
2407 					}
2408 					segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2409 					arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2410 				}
2411 			} else {
2412 				arcmsr_execute_srb(srb, NULL, 0, 0);
2413 			}
2414 			break;
2415 		}
2416 	case XPT_TARGET_IO: {
2417 			/* target mode not yet support vendor specific commands. */
2418 			pccb->ccb_h.status |= CAM_REQ_CMP;
2419 			xpt_done(pccb);
2420 			break;
2421 		}
2422 	case XPT_PATH_INQ: {
2423 			struct ccb_pathinq *cpi= &pccb->cpi;
2424 
2425 			cpi->version_num=1;
2426 			cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2427 			cpi->target_sprt=0;
2428 			cpi->hba_misc=0;
2429 			cpi->hba_eng_cnt=0;
2430 			cpi->max_target=ARCMSR_MAX_TARGETID;        /* 0-16 */
2431 			cpi->max_lun=ARCMSR_MAX_TARGETLUN;	    /* 0-7 */
2432 			cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2433 			cpi->bus_id=cam_sim_bus(psim);
2434 			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2435 			strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2436 			strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2437 			cpi->unit_number=cam_sim_unit(psim);
2438 		#ifdef	CAM_NEW_TRAN_CODE
2439 			cpi->transport = XPORT_SPI;
2440 			cpi->transport_version = 2;
2441 			cpi->protocol = PROTO_SCSI;
2442 			cpi->protocol_version = SCSI_REV_2;
2443 		#endif
2444 			cpi->ccb_h.status |= CAM_REQ_CMP;
2445 			xpt_done(pccb);
2446 			break;
2447 		}
2448 	case XPT_ABORT: {
2449 			union ccb *pabort_ccb;
2450 
2451 			pabort_ccb=pccb->cab.abort_ccb;
2452 			switch (pabort_ccb->ccb_h.func_code) {
2453 			case XPT_ACCEPT_TARGET_IO:
2454 			case XPT_IMMED_NOTIFY:
2455 			case XPT_CONT_TARGET_IO:
2456 				if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2457 					pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2458 					xpt_done(pabort_ccb);
2459 					pccb->ccb_h.status |= CAM_REQ_CMP;
2460 				} else {
2461 					xpt_print_path(pabort_ccb->ccb_h.path);
2462 					kprintf("Not found\n");
2463 					pccb->ccb_h.status |= CAM_PATH_INVALID;
2464 				}
2465 				break;
2466 			case XPT_SCSI_IO:
2467 				pccb->ccb_h.status |= CAM_UA_ABORT;
2468 				break;
2469 			default:
2470 				pccb->ccb_h.status |= CAM_REQ_INVALID;
2471 				break;
2472 			}
2473 			xpt_done(pccb);
2474 			break;
2475 		}
2476 	case XPT_RESET_BUS:
2477 	case XPT_RESET_DEV: {
2478 			u_int32_t     i;
2479 
2480 			arcmsr_bus_reset(acb);
2481 			for (i=0; i < 500; i++) {
2482 				DELAY(1000);
2483 			}
2484 			pccb->ccb_h.status |= CAM_REQ_CMP;
2485 			xpt_done(pccb);
2486 			break;
2487 		}
2488 	case XPT_TERM_IO: {
2489 			pccb->ccb_h.status |= CAM_REQ_INVALID;
2490 			xpt_done(pccb);
2491 			break;
2492 		}
2493 	case XPT_GET_TRAN_SETTINGS: {
2494 			struct ccb_trans_settings *cts;
2495 
2496 			if(pccb->ccb_h.target_id == 16) {
2497 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2498 				xpt_done(pccb);
2499 				break;
2500 			}
2501 			cts= &pccb->cts;
2502 		#ifdef	CAM_NEW_TRAN_CODE
2503 			{
2504 				struct ccb_trans_settings_scsi *scsi;
2505 				struct ccb_trans_settings_spi *spi;
2506 
2507 				scsi = &cts->proto_specific.scsi;
2508 				spi = &cts->xport_specific.spi;
2509 				cts->protocol = PROTO_SCSI;
2510 				cts->protocol_version = SCSI_REV_2;
2511 				cts->transport = XPORT_SPI;
2512 				cts->transport_version = 2;
2513 				spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2514 				spi->sync_period=3;
2515 				spi->sync_offset=32;
2516 				spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2517 				scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2518 				spi->valid = CTS_SPI_VALID_DISC
2519 					| CTS_SPI_VALID_SYNC_RATE
2520 					| CTS_SPI_VALID_SYNC_OFFSET
2521 					| CTS_SPI_VALID_BUS_WIDTH;
2522 				scsi->valid = CTS_SCSI_VALID_TQ;
2523 			}
2524 		#else
2525 			{
2526 				cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
2527 				cts->sync_period=3;
2528 				cts->sync_offset=32;
2529 				cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2530 				cts->valid=CCB_TRANS_SYNC_RATE_VALID |
2531 				CCB_TRANS_SYNC_OFFSET_VALID |
2532 				CCB_TRANS_BUS_WIDTH_VALID |
2533 				CCB_TRANS_DISC_VALID |
2534 				CCB_TRANS_TQ_VALID;
2535 			}
2536 		#endif
2537 			pccb->ccb_h.status |= CAM_REQ_CMP;
2538 			xpt_done(pccb);
2539 			break;
2540 		}
2541 	case XPT_SET_TRAN_SETTINGS: {
2542 			pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2543 			xpt_done(pccb);
2544 			break;
2545 		}
2546 	case XPT_CALC_GEOMETRY: {
2547 			struct ccb_calc_geometry *ccg;
2548 			u_int32_t size_mb;
2549 			u_int32_t secs_per_cylinder;
2550 
2551 			if(pccb->ccb_h.target_id == 16) {
2552 				pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2553 				xpt_done(pccb);
2554 				break;
2555 			}
2556 			ccg= &pccb->ccg;
2557 			if (ccg->block_size == 0) {
2558 				pccb->ccb_h.status = CAM_REQ_INVALID;
2559 				xpt_done(pccb);
2560 				break;
2561 			}
2562 			if(((1024L * 1024L)/ccg->block_size) < 0) {
2563 				pccb->ccb_h.status = CAM_REQ_INVALID;
2564 				xpt_done(pccb);
2565 				break;
2566 			}
2567 			size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size);
2568 			if(size_mb > 1024 ) {
2569 				ccg->heads=255;
2570 				ccg->secs_per_track=63;
2571 			} else {
2572 				ccg->heads=64;
2573 				ccg->secs_per_track=32;
2574 			}
2575 			secs_per_cylinder=ccg->heads * ccg->secs_per_track;
2576 			ccg->cylinders=ccg->volume_size / secs_per_cylinder;
2577 			pccb->ccb_h.status |= CAM_REQ_CMP;
2578 			xpt_done(pccb);
2579 			break;
2580 		}
2581 	default:
2582 		pccb->ccb_h.status |= CAM_REQ_INVALID;
2583 		xpt_done(pccb);
2584 		break;
2585 	}
2586 	return;
2587 }
2588 /*
2589 **********************************************************************
2590 **********************************************************************
2591 */
2592 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2593 {
2594 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2595 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2596 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2597 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2598 	}
2599 	return;
2600 }
2601 /*
2602 **********************************************************************
2603 **********************************************************************
2604 */
2605 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2606 {
2607 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2608 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,  ARCMSR_MESSAGE_START_BGRB);
2609 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2610 		kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2611 	}
2612 	return;
2613 }
2614 /*
2615 **********************************************************************
2616 **********************************************************************
2617 */
2618 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2619 {
2620 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2621 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2622 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2623 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2624 		kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2625 	}
2626 	return;
2627 }
2628 /*
2629 **********************************************************************
2630 **********************************************************************
2631 */
2632 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2633 {
2634 	switch (acb->adapter_type) {
2635 	case ACB_ADAPTER_TYPE_A:
2636 		arcmsr_start_hba_bgrb(acb);
2637 		break;
2638 	case ACB_ADAPTER_TYPE_B:
2639 		arcmsr_start_hbb_bgrb(acb);
2640 		break;
2641 	case ACB_ADAPTER_TYPE_C:
2642 		arcmsr_start_hbc_bgrb(acb);
2643 		break;
2644 	}
2645 	return;
2646 }
2647 /*
2648 **********************************************************************
2649 **
2650 **********************************************************************
2651 */
2652 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2653 {
2654 	struct CommandControlBlock *srb;
2655 	u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2656 	u_int16_t	error;
2657 
2658 polling_ccb_retry:
2659 	poll_count++;
2660 	outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2661 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);	/*clear interrupt*/
2662 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2663 	while(1) {
2664 		if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2665 			0, outbound_queueport))==0xFFFFFFFF) {
2666 			if(poll_srb_done) {
2667 				break;/*chip FIFO no ccb for completion already*/
2668 			} else {
2669 				UDELAY(25000);
2670 				if ((poll_count > 100) && (poll_srb != NULL)) {
2671 					break;
2672 				}
2673 				goto polling_ccb_retry;
2674 			}
2675 		}
2676 		/* check if command done with no error*/
2677 		srb=(struct CommandControlBlock *)
2678 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2679         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2680 		poll_srb_done = (srb==poll_srb) ? 1:0;
2681 		if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
2682 			if(srb->startdone==ARCMSR_SRB_ABORTED) {
2683 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2684 					"poll command abort successfully \n"
2685 					, acb->pci_unit
2686 					, srb->pccb->ccb_h.target_id
2687 					, srb->pccb->ccb_h.target_lun, srb);
2688 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2689 				arcmsr_srb_complete(srb, 1);
2690 				continue;
2691 			}
2692 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2693 				"srboutstandingcount=%d \n"
2694 				, acb->pci_unit
2695 				, srb, acb->srboutstandingcount);
2696 			continue;
2697 		}
2698 		arcmsr_report_srb_state(acb, srb, error);
2699 	}	/*drain reply FIFO*/
2700 	return;
2701 }
2702 /*
2703 **********************************************************************
2704 **
2705 **********************************************************************
2706 */
2707 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2708 {
2709 	struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2710 	struct CommandControlBlock *srb;
2711 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2712 	int index;
2713 	u_int16_t	error;
2714 
2715 polling_ccb_retry:
2716 	poll_count++;
2717 	CHIP_REG_WRITE32(HBB_DOORBELL,
2718 	0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2719 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2720 	while(1) {
2721 		index=phbbmu->doneq_index;
2722 		if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2723 			if(poll_srb_done) {
2724 				break;/*chip FIFO no ccb for completion already*/
2725 			} else {
2726 				UDELAY(25000);
2727 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2728 					break;
2729 				}
2730 				goto polling_ccb_retry;
2731 			}
2732 		}
2733 		phbbmu->done_qbuffer[index]=0;
2734 		index++;
2735 		index %= ARCMSR_MAX_HBB_POSTQUEUE;     /*if last index number set it to 0 */
2736 		phbbmu->doneq_index=index;
2737 		/* check if command done with no error*/
2738 		srb=(struct CommandControlBlock *)
2739 			(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2740         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2741 		poll_srb_done = (srb==poll_srb) ? 1:0;
2742 		if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
2743 			if(srb->startdone==ARCMSR_SRB_ABORTED) {
2744 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2745 					"poll command abort successfully \n"
2746 					, acb->pci_unit
2747 					, srb->pccb->ccb_h.target_id
2748 					, srb->pccb->ccb_h.target_lun, srb);
2749 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2750 				arcmsr_srb_complete(srb, 1);
2751 				continue;
2752 			}
2753 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2754 				"srboutstandingcount=%d \n"
2755 				, acb->pci_unit
2756 				, srb, acb->srboutstandingcount);
2757 			continue;
2758 		}
2759 		arcmsr_report_srb_state(acb, srb, error);
2760 	}	/*drain reply FIFO*/
2761 	return;
2762 }
2763 /*
2764 **********************************************************************
2765 **
2766 **********************************************************************
2767 */
2768 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2769 {
2770 	struct CommandControlBlock *srb;
2771 	u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2772 	u_int16_t	error;
2773 
2774 polling_ccb_retry:
2775 	poll_count++;
2776 	bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2777 	while(1) {
2778 		if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2779 			if(poll_srb_done) {
2780 				break;/*chip FIFO no ccb for completion already*/
2781 			} else {
2782 				UDELAY(25000);
2783 			    if ((poll_count > 100) && (poll_srb != NULL)) {
2784 					break;
2785 				}
2786 			    if (acb->srboutstandingcount == 0) {
2787 				    break;
2788 			    }
2789 				goto polling_ccb_retry;
2790 			}
2791 		}
2792 		flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2793 		/* check if command done with no error*/
2794 		srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/
2795         error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2796 		if (poll_srb != NULL)
2797 			poll_srb_done = (srb==poll_srb) ? 1:0;
2798 		if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) {
2799 			if(srb->startdone==ARCMSR_SRB_ABORTED) {
2800 				kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2801 						, acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2802 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2803 				arcmsr_srb_complete(srb, 1);
2804 				continue;
2805 			}
2806 			kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2807 					, acb->pci_unit, srb, acb->srboutstandingcount);
2808 			continue;
2809 		}
2810 		arcmsr_report_srb_state(acb, srb, error);
2811 	}	/*drain reply FIFO*/
2812 	return;
2813 }
2814 /*
2815 **********************************************************************
2816 **********************************************************************
2817 */
2818 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2819 {
2820 	switch (acb->adapter_type) {
2821 	case ACB_ADAPTER_TYPE_A: {
2822 			arcmsr_polling_hba_srbdone(acb, poll_srb);
2823 		}
2824 		break;
2825 	case ACB_ADAPTER_TYPE_B: {
2826 			arcmsr_polling_hbb_srbdone(acb, poll_srb);
2827 		}
2828 		break;
2829 	case ACB_ADAPTER_TYPE_C: {
2830 			arcmsr_polling_hbc_srbdone(acb, poll_srb);
2831 		}
2832 		break;
2833 	}
2834 }
2835 /*
2836 **********************************************************************
2837 **********************************************************************
2838 */
2839 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2840 {
2841 	char *acb_firm_model=acb->firm_model;
2842 	char *acb_firm_version=acb->firm_version;
2843 	char *acb_device_map = acb->device_map;
2844 	size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2845 	size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2846 	size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2847 	int i;
2848 
2849 	CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2850 	if(!arcmsr_hba_wait_msgint_ready(acb)) {
2851 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2852 	}
2853 	i=0;
2854 	while(i<8) {
2855 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2856 		/* 8 bytes firm_model, 15, 60-67*/
2857 		acb_firm_model++;
2858 		i++;
2859 	}
2860 	i=0;
2861 	while(i<16) {
2862 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2863 		/* 16 bytes firm_version, 17, 68-83*/
2864 		acb_firm_version++;
2865 		i++;
2866 	}
2867 	i=0;
2868 	while(i<16) {
2869 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2870 		acb_device_map++;
2871 		i++;
2872 	}
2873 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2874 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2875 	acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2876 	acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2877 	acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2878 	acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2879 	acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2880 	return;
2881 }
2882 /*
2883 **********************************************************************
2884 **********************************************************************
2885 */
2886 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2887 {
2888 	char *acb_firm_model=acb->firm_model;
2889 	char *acb_firm_version=acb->firm_version;
2890 	char *acb_device_map = acb->device_map;
2891 	size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);	/*firm_model,15,60-67*/
2892 	size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);	/*firm_version,17,68-83*/
2893 	size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2894 	int i;
2895 
2896 	CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2897 	if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2898 		kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2899 	}
2900 	i=0;
2901 	while(i<8) {
2902 		*acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2903 		/* 8 bytes firm_model, 15, 60-67*/
2904 		acb_firm_model++;
2905 		i++;
2906 	}
2907 	i=0;
2908 	while(i<16) {
2909 		*acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2910 		/* 16 bytes firm_version, 17, 68-83*/
2911 		acb_firm_version++;
2912 		i++;
2913 	}
2914 	i=0;
2915 	while(i<16) {
2916 		*acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2917 		acb_device_map++;
2918 		i++;
2919 	}
2920 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2921 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2922 	acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]);   /*firm_request_len, 1, 04-07*/
2923 	acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2924 	acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]);    /*firm_sdram_size, 3, 12-15*/
2925 	acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]);  /*firm_ide_channels, 4, 16-19*/
2926 	acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2927 	return;
2928 }
2929 /*
2930 **********************************************************************
2931 **********************************************************************
2932 */
2933 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2934 {
2935 	char *acb_firm_model=acb->firm_model;
2936 	char *acb_firm_version=acb->firm_version;
2937 	char *acb_device_map = acb->device_map;
2938 	size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);   /*firm_model,15,60-67*/
2939 	size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2940 	size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2941 	int i;
2942 
2943 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2944 	CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2945 	if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2946 		kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2947 	}
2948 	i=0;
2949 	while(i<8) {
2950 		*acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2951 		/* 8 bytes firm_model, 15, 60-67*/
2952 		acb_firm_model++;
2953 		i++;
2954 	}
2955 	i=0;
2956 	while(i<16) {
2957 		*acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2958 		/* 16 bytes firm_version, 17, 68-83*/
2959 		acb_firm_version++;
2960 		i++;
2961 	}
2962 	i=0;
2963 	while(i<16) {
2964 		*acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2965 		acb_device_map++;
2966 		i++;
2967 	}
2968 	kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2969 	kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2970 	acb->firm_request_len	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]);	/*firm_request_len,   1, 04-07*/
2971 	acb->firm_numbers_queue	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]);	/*firm_numbers_queue, 2, 08-11*/
2972 	acb->firm_sdram_size	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]);	/*firm_sdram_size,    3, 12-15*/
2973 	acb->firm_ide_channels	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]);	/*firm_ide_channels,  4, 16-19*/
2974 	acb->firm_cfg_version	=CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]);	/*firm_cfg_version,  25,	  */
2975 	return;
2976 }
2977 /*
2978 **********************************************************************
2979 **********************************************************************
2980 */
2981 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
2982 {
2983 	switch (acb->adapter_type) {
2984 	case ACB_ADAPTER_TYPE_A: {
2985 			arcmsr_get_hba_config(acb);
2986 		}
2987 		break;
2988 	case ACB_ADAPTER_TYPE_B: {
2989 			arcmsr_get_hbb_config(acb);
2990 		}
2991 		break;
2992 	case ACB_ADAPTER_TYPE_C: {
2993 			arcmsr_get_hbc_config(acb);
2994 		}
2995 		break;
2996 	}
2997 	return;
2998 }
2999 /*
3000 **********************************************************************
3001 **********************************************************************
3002 */
3003 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3004 {
3005 	int	timeout=0;
3006 
3007 	switch (acb->adapter_type) {
3008 	case ACB_ADAPTER_TYPE_A: {
3009 			while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3010 			{
3011 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3012 				{
3013 					kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3014 					return;
3015 				}
3016 				UDELAY(15000); /* wait 15 milli-seconds */
3017 			}
3018 		}
3019 		break;
3020 	case ACB_ADAPTER_TYPE_B: {
3021 			while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3022 			{
3023 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3024 				{
3025 					kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3026 					return;
3027 				}
3028 				UDELAY(15000); /* wait 15 milli-seconds */
3029 			}
3030 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3031 		}
3032 		break;
3033 	case ACB_ADAPTER_TYPE_C: {
3034 			while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3035 			{
3036 				if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3037 				{
3038 					kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3039 					return;
3040 				}
3041 				UDELAY(15000); /* wait 15 milli-seconds */
3042 			}
3043 		}
3044 		break;
3045 	}
3046 	return;
3047 }
3048 /*
3049 **********************************************************************
3050 **********************************************************************
3051 */
3052 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3053 {
3054 	u_int32_t outbound_doorbell;
3055 
3056 	switch (acb->adapter_type) {
3057 	case ACB_ADAPTER_TYPE_A: {
3058 			/* empty doorbell Qbuffer if door bell ringed */
3059 			outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3060 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell);	/*clear doorbell interrupt */
3061 			CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3062 
3063 		}
3064 		break;
3065 	case ACB_ADAPTER_TYPE_B: {
3066 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3067 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3068 			/* let IOP know data has been read */
3069 		}
3070 		break;
3071 	case ACB_ADAPTER_TYPE_C: {
3072 			/* empty doorbell Qbuffer if door bell ringed */
3073 			outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3074 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell);	/*clear doorbell interrupt */
3075 			CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3076 
3077 		}
3078 		break;
3079 	}
3080 	return;
3081 }
3082 /*
3083 ************************************************************************
3084 ************************************************************************
3085 */
3086 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3087 {
3088 	unsigned long srb_phyaddr;
3089 	u_int32_t srb_phyaddr_hi32;
3090 
3091 	/*
3092 	********************************************************************
3093 	** here we need to tell iop 331 our freesrb.HighPart
3094 	** if freesrb.HighPart is not zero
3095 	********************************************************************
3096 	*/
3097 	srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3098 //	srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3099 	srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3100 	switch (acb->adapter_type) {
3101 	case ACB_ADAPTER_TYPE_A: {
3102 			if(srb_phyaddr_hi32!=0) {
3103 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3104 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3105 				CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3106 				if(!arcmsr_hba_wait_msgint_ready(acb)) {
3107 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3108 					return FALSE;
3109 				}
3110 			}
3111 		}
3112 		break;
3113 		/*
3114 		***********************************************************************
3115 		**    if adapter type B, set window of "post command Q"
3116 		***********************************************************************
3117 		*/
3118 	case ACB_ADAPTER_TYPE_B: {
3119 			u_int32_t post_queue_phyaddr;
3120 			struct HBB_MessageUnit *phbbmu;
3121 
3122 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3123 			phbbmu->postq_index=0;
3124 			phbbmu->doneq_index=0;
3125 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3126 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3127 				kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3128 				return FALSE;
3129 			}
3130 			post_queue_phyaddr = srb_phyaddr + ARCMSR_MAX_FREESRB_NUM*sizeof(struct CommandControlBlock)
3131 			+ offsetof(struct HBB_MessageUnit, post_qbuffer);
3132 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3133 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3134 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3135 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3136 			CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3137 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3138 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3139 				kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3140 				return FALSE;
3141 			}
3142 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3143 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3144 				kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3145 				return FALSE;
3146 			}
3147 		}
3148 		break;
3149 	case ACB_ADAPTER_TYPE_C: {
3150 			if(srb_phyaddr_hi32!=0) {
3151 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3152 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3153 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3154 				CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3155 				if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3156 					kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3157 					return FALSE;
3158 				}
3159 			}
3160 		}
3161 		break;
3162 	}
3163 	return TRUE;
3164 }
3165 /*
3166 ************************************************************************
3167 ************************************************************************
3168 */
3169 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3170 {
3171 	switch (acb->adapter_type)
3172 	{
3173 	case ACB_ADAPTER_TYPE_A:
3174 	case ACB_ADAPTER_TYPE_C:
3175 		break;
3176 	case ACB_ADAPTER_TYPE_B: {
3177 			CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3178 			if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3179 				kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3180 
3181 				return;
3182 			}
3183 		}
3184 		break;
3185 	}
3186 	return;
3187 }
3188 /*
3189 **********************************************************************
3190 **********************************************************************
3191 */
3192 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3193 {
3194 	u_int32_t intmask_org;
3195 
3196 	/* disable all outbound interrupt */
3197 	intmask_org=arcmsr_disable_allintr(acb);
3198 	arcmsr_wait_firmware_ready(acb);
3199 	arcmsr_iop_confirm(acb);
3200 	arcmsr_get_firmware_spec(acb);
3201 	/*start background rebuild*/
3202 	arcmsr_start_adapter_bgrb(acb);
3203 	/* empty doorbell Qbuffer if door bell ringed */
3204 	arcmsr_clear_doorbell_queue_buffer(acb);
3205 	arcmsr_enable_eoi_mode(acb);
3206 	/* enable outbound Post Queue, outbound doorbell Interrupt */
3207 	arcmsr_enable_allintr(acb, intmask_org);
3208 	acb->acb_flags |=ACB_F_IOP_INITED;
3209 	return;
3210 }
3211 /*
3212 **********************************************************************
3213 **********************************************************************
3214 */
3215 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3216 {
3217 	struct AdapterControlBlock *acb=arg;
3218 	struct CommandControlBlock *srb_tmp;
3219 	u_int8_t * dma_memptr;
3220 	u_int32_t i;
3221 	unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3222 
3223 	dma_memptr=acb->uncacheptr;
3224 	acb->srb_phyaddr.phyaddr=srb_phyaddr;
3225 	srb_tmp=(struct CommandControlBlock *)dma_memptr;
3226 	for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3227 		if(bus_dmamap_create(acb->dm_segs_dmat,
3228 			 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3229 			acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3230 			kprintf("arcmsr%d:"
3231 			" srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3232 			return;
3233 		}
3234 		srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3235 		srb_tmp->acb=acb;
3236 		acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3237 		srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock);
3238 		srb_tmp++;
3239 	}
3240 	acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr;
3241 	return;
3242 }
3243 /*
3244 ************************************************************************
3245 **
3246 **
3247 ************************************************************************
3248 */
3249 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3250 {
3251 	/* remove the control device */
3252 	if(acb->ioctl_dev != NULL) {
3253 		destroy_dev(acb->ioctl_dev);
3254 	}
3255 	bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3256 	bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3257 	bus_dma_tag_destroy(acb->srb_dmat);
3258 	bus_dma_tag_destroy(acb->dm_segs_dmat);
3259 	bus_dma_tag_destroy(acb->parent_dmat);
3260 	return;
3261 }
3262 /*
3263 ************************************************************************
3264 ************************************************************************
3265 */
3266 static u_int32_t arcmsr_initialize(device_t dev)
3267 {
3268 	struct AdapterControlBlock *acb=device_get_softc(dev);
3269 	u_int16_t pci_command;
3270 	int i, j,max_coherent_size;
3271 
3272 	switch (pci_get_devid(dev)) {
3273 	case PCIDevVenIDARC1880: {
3274 			acb->adapter_type=ACB_ADAPTER_TYPE_C;
3275 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3276 		}
3277 		break;
3278 	case PCIDevVenIDARC1200:
3279 	case PCIDevVenIDARC1201: {
3280 			acb->adapter_type=ACB_ADAPTER_TYPE_B;
3281 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3282 		}
3283 		break;
3284 	case PCIDevVenIDARC1110:
3285 	case PCIDevVenIDARC1120:
3286 	case PCIDevVenIDARC1130:
3287 	case PCIDevVenIDARC1160:
3288 	case PCIDevVenIDARC1170:
3289 	case PCIDevVenIDARC1210:
3290 	case PCIDevVenIDARC1220:
3291 	case PCIDevVenIDARC1230:
3292 	case PCIDevVenIDARC1231:
3293 	case PCIDevVenIDARC1260:
3294 	case PCIDevVenIDARC1261:
3295 	case PCIDevVenIDARC1270:
3296 	case PCIDevVenIDARC1280:
3297 	case PCIDevVenIDARC1212:
3298 	case PCIDevVenIDARC1222:
3299 	case PCIDevVenIDARC1380:
3300 	case PCIDevVenIDARC1381:
3301 	case PCIDevVenIDARC1680:
3302 	case PCIDevVenIDARC1681: {
3303 			acb->adapter_type=ACB_ADAPTER_TYPE_A;
3304 			max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3305 		}
3306 		break;
3307 	default: {
3308 			kprintf("arcmsr%d:"
3309 			" unknown RAID adapter type \n", device_get_unit(dev));
3310 			return ENOMEM;
3311 		}
3312 	}
3313 	if(bus_dma_tag_create(  /*parent*/	NULL,
3314 				/*alignemnt*/	1,
3315 				/*boundary*/	0,
3316 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3317 				/*highaddr*/	BUS_SPACE_MAXADDR,
3318 				/*filter*/	NULL,
3319 				/*filterarg*/	NULL,
3320 				/*maxsize*/	BUS_SPACE_MAXSIZE_32BIT,
3321 				/*nsegments*/	BUS_SPACE_UNRESTRICTED,
3322 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3323 				/*flags*/	0,
3324 						&acb->parent_dmat) != 0)
3325 	{
3326 		kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3327 		return ENOMEM;
3328 	}
3329 	/* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3330 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3331 				/*alignment*/	1,
3332 				/*boundary*/	0,
3333 				/*lowaddr*/	BUS_SPACE_MAXADDR,
3334 				/*highaddr*/	BUS_SPACE_MAXADDR,
3335 				/*filter*/	NULL,
3336 				/*filterarg*/	NULL,
3337 				/*maxsize*/	ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3338 				/*nsegments*/	ARCMSR_MAX_SG_ENTRIES,
3339 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3340 				/*flags*/	0,
3341 						&acb->dm_segs_dmat) != 0)
3342 	{
3343 		bus_dma_tag_destroy(acb->parent_dmat);
3344 		kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3345 		return ENOMEM;
3346 	}
3347 	/* DMA tag for our srb structures.... Allocate the freesrb memory */
3348 	if(bus_dma_tag_create(  /*parent_dmat*/	acb->parent_dmat,
3349 				/*alignment*/	0x20,
3350 				/*boundary*/	0,
3351 				/*lowaddr*/	BUS_SPACE_MAXADDR_32BIT,
3352 				/*highaddr*/	BUS_SPACE_MAXADDR,
3353 				/*filter*/	NULL,
3354 				/*filterarg*/	NULL,
3355 				/*maxsize*/	max_coherent_size,
3356 				/*nsegments*/	1,
3357 				/*maxsegsz*/	BUS_SPACE_MAXSIZE_32BIT,
3358 				/*flags*/	0,
3359 						&acb->srb_dmat) != 0)
3360 	{
3361 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3362 		bus_dma_tag_destroy(acb->parent_dmat);
3363 		kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3364 		return ENXIO;
3365 	}
3366 	/* Allocation for our srbs */
3367 	if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3368 		bus_dma_tag_destroy(acb->srb_dmat);
3369 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3370 		bus_dma_tag_destroy(acb->parent_dmat);
3371 		kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3372 		return ENXIO;
3373 	}
3374 	/* And permanently map them */
3375 	if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3376 		bus_dma_tag_destroy(acb->srb_dmat);
3377 		bus_dma_tag_destroy(acb->dm_segs_dmat);
3378 		bus_dma_tag_destroy(acb->parent_dmat);
3379 		kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3380 		return ENXIO;
3381 	}
3382 	pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3383 	pci_command |= PCIM_CMD_BUSMASTEREN;
3384 	pci_command |= PCIM_CMD_PERRESPEN;
3385 	pci_command |= PCIM_CMD_MWRICEN;
3386 	/* Enable Busmaster/Mem */
3387 	pci_command |= PCIM_CMD_MEMEN;
3388 	pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3389 	switch(acb->adapter_type) {
3390 	case ACB_ADAPTER_TYPE_A: {
3391 			u_int32_t rid0=PCIR_BAR(0);
3392 			vm_offset_t	mem_base0;
3393 
3394 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3395 			if(acb->sys_res_arcmsr[0] == NULL) {
3396 				arcmsr_free_resource(acb);
3397 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3398 				return ENOMEM;
3399 			}
3400 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3401 				arcmsr_free_resource(acb);
3402 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3403 				return ENXIO;
3404 			}
3405 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3406 			if(mem_base0==0) {
3407 				arcmsr_free_resource(acb);
3408 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3409 				return ENXIO;
3410 			}
3411 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3412 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3413 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3414 		}
3415 		break;
3416 	case ACB_ADAPTER_TYPE_B: {
3417 			struct HBB_MessageUnit *phbbmu;
3418 			struct CommandControlBlock *freesrb;
3419 			u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3420 			vm_offset_t	mem_base[]={0,0};
3421 			for(i=0; i<2; i++) {
3422 				if(i==0) {
3423 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3424 											0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3425 				} else {
3426 					acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3427 											0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3428 				}
3429 				if(acb->sys_res_arcmsr[i] == NULL) {
3430 					arcmsr_free_resource(acb);
3431 					kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3432 					return ENOMEM;
3433 				}
3434 				if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3435 					arcmsr_free_resource(acb);
3436 					kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3437 					return ENXIO;
3438 				}
3439 				mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3440 				if(mem_base[i]==0) {
3441 					arcmsr_free_resource(acb);
3442 					kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3443 					return ENXIO;
3444 				}
3445 				acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3446 				acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3447 			}
3448 			freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3449 			acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3450 			phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3451 			phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3452 			phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3453 		}
3454 		break;
3455 	case ACB_ADAPTER_TYPE_C: {
3456 			u_int32_t rid0=PCIR_BAR(1);
3457 			vm_offset_t	mem_base0;
3458 
3459 			acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3460 			if(acb->sys_res_arcmsr[0] == NULL) {
3461 				arcmsr_free_resource(acb);
3462 				kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3463 				return ENOMEM;
3464 			}
3465 			if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3466 				arcmsr_free_resource(acb);
3467 				kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3468 				return ENXIO;
3469 			}
3470 			mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3471 			if(mem_base0==0) {
3472 				arcmsr_free_resource(acb);
3473 				kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3474 				return ENXIO;
3475 			}
3476 			acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3477 			acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3478 			acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3479 		}
3480 		break;
3481 	}
3482 	if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3483 		arcmsr_free_resource(acb);
3484 		kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3485 		return ENXIO;
3486 	}
3487 	acb->acb_flags  |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3488 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3489 	/*
3490 	********************************************************************
3491 	** init raid volume state
3492 	********************************************************************
3493 	*/
3494 	for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3495 		for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3496 			acb->devstate[i][j]=ARECA_RAID_GONE;
3497 		}
3498 	}
3499 	arcmsr_iop_init(acb);
3500 	return(0);
3501 }
3502 /*
3503 ************************************************************************
3504 ************************************************************************
3505 */
3506 static int arcmsr_attach(device_t dev)
3507 {
3508 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3509 	u_int32_t unit=device_get_unit(dev);
3510 	struct ccb_setasync csa;
3511 	struct cam_devq	*devq;	/* Device Queue to use for this SIM */
3512 	struct resource	*irqres;
3513 	int	rid;
3514 
3515 	if(acb == NULL) {
3516 		kprintf("arcmsr%d: cannot allocate softc\n", unit);
3517 		return (ENOMEM);
3518 	}
3519 	ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3520 	if(arcmsr_initialize(dev)) {
3521 		kprintf("arcmsr%d: initialize failure!\n", unit);
3522 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3523 		return ENXIO;
3524 	}
3525 	/* After setting up the adapter, map our interrupt */
3526 	rid=0;
3527 	irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE);
3528 	if(irqres == NULL ||
3529 		bus_setup_intr(dev, irqres, 0, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3530 		arcmsr_free_resource(acb);
3531 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3532 		kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3533 		return ENXIO;
3534 	}
3535 	acb->irqres=irqres;
3536 	acb->pci_dev=dev;
3537 	acb->pci_unit=unit;
3538 	/*
3539 	 * Now let the CAM generic SCSI layer find the SCSI devices on
3540 	 * the bus *  start queue to reset to the idle loop. *
3541 	 * Create device queue of SIM(s) *  (MAX_START_JOB - 1) :
3542 	 * max_sim_transactions
3543 	*/
3544 	devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3545 	if(devq == NULL) {
3546 	    arcmsr_free_resource(acb);
3547 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3548 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3549 		kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3550 		return ENXIO;
3551 	}
3552 	acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3553 	if(acb->psim == NULL) {
3554 		arcmsr_free_resource(acb);
3555 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3556 		cam_simq_release(devq);
3557 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3558 		kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3559 		return ENXIO;
3560 	}
3561 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3562 	if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3563 		arcmsr_free_resource(acb);
3564 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3565 		cam_sim_free(acb->psim);
3566 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3567 		kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3568 		return ENXIO;
3569 	}
3570 	if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3571 		arcmsr_free_resource(acb);
3572 		bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3573 		xpt_bus_deregister(cam_sim_path(acb->psim));
3574 		cam_sim_free(acb->psim);
3575 		ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3576 		kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3577 		return ENXIO;
3578 	}
3579 	/*
3580 	****************************************************
3581 	*/
3582 	xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3583 	csa.ccb_h.func_code=XPT_SASYNC_CB;
3584 	csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3585 	csa.callback=arcmsr_async;
3586 	csa.callback_arg=acb->psim;
3587 	xpt_action((union ccb *)&csa);
3588 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3589 	/* Create the control device.  */
3590 	acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3591 
3592 	acb->ioctl_dev->si_drv1=acb;
3593 	(void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3594 	callout_init(&acb->devmap_callout);
3595 	callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3596 	return 0;
3597 }
3598 /*
3599 ************************************************************************
3600 ************************************************************************
3601 */
3602 static int arcmsr_probe(device_t dev)
3603 {
3604 	u_int32_t id;
3605 	static char buf[256];
3606 	char x_type[]={"X-TYPE"};
3607 	char *type;
3608 	int raid6 = 1;
3609 
3610 	if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3611 		return (ENXIO);
3612 	}
3613 	switch(id=pci_get_devid(dev)) {
3614 	case PCIDevVenIDARC1110:
3615 	case PCIDevVenIDARC1200:
3616 	case PCIDevVenIDARC1201:
3617 	case PCIDevVenIDARC1210:
3618 		raid6 = 0;
3619 		/*FALLTHRU*/
3620 	case PCIDevVenIDARC1120:
3621 	case PCIDevVenIDARC1130:
3622 	case PCIDevVenIDARC1160:
3623 	case PCIDevVenIDARC1170:
3624 	case PCIDevVenIDARC1220:
3625 	case PCIDevVenIDARC1230:
3626 	case PCIDevVenIDARC1231:
3627 	case PCIDevVenIDARC1260:
3628 	case PCIDevVenIDARC1261:
3629 	case PCIDevVenIDARC1270:
3630 	case PCIDevVenIDARC1280:
3631 		type = "SATA";
3632 		break;
3633 	case PCIDevVenIDARC1212:
3634 	case PCIDevVenIDARC1222:
3635 	case PCIDevVenIDARC1380:
3636 	case PCIDevVenIDARC1381:
3637 	case PCIDevVenIDARC1680:
3638 	case PCIDevVenIDARC1681:
3639 		type = "SAS 3G";
3640 		break;
3641 	case PCIDevVenIDARC1880:
3642 		type = "SAS 6G";
3643 		break;
3644 	default:
3645 		type = x_type;
3646 		break;
3647 	}
3648 	if(type == x_type)
3649 		return(ENXIO);
3650 	ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3651 	device_set_desc_copy(dev, buf);
3652 	return 0;
3653 }
3654 /*
3655 ************************************************************************
3656 ************************************************************************
3657 */
3658 static int arcmsr_shutdown(device_t dev)
3659 {
3660 	u_int32_t  i;
3661 	u_int32_t intmask_org;
3662 	struct CommandControlBlock *srb;
3663 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3664 
3665 	/* stop adapter background rebuild */
3666 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3667 	/* disable all outbound interrupt */
3668 	intmask_org=arcmsr_disable_allintr(acb);
3669 	arcmsr_stop_adapter_bgrb(acb);
3670 	arcmsr_flush_adapter_cache(acb);
3671 	/* abort all outstanding command */
3672 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3673 	acb->acb_flags &= ~ACB_F_IOP_INITED;
3674 	if(acb->srboutstandingcount!=0) {
3675 		/*clear and abort all outbound posted Q*/
3676 		arcmsr_done4abort_postqueue(acb);
3677 		/* talk to iop 331 outstanding command aborted*/
3678 		arcmsr_abort_allcmd(acb);
3679 		for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3680 			srb=acb->psrb_pool[i];
3681 			if(srb->startdone==ARCMSR_SRB_START) {
3682 				srb->startdone=ARCMSR_SRB_ABORTED;
3683 				srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3684 				arcmsr_srb_complete(srb, 1);
3685 			}
3686 		}
3687 	}
3688 	atomic_set_int(&acb->srboutstandingcount, 0);
3689 	acb->workingsrb_doneindex=0;
3690 	acb->workingsrb_startindex=0;
3691 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3692 	return (0);
3693 }
3694 /*
3695 ************************************************************************
3696 ************************************************************************
3697 */
3698 static int arcmsr_detach(device_t dev)
3699 {
3700 	struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3701 	int i;
3702 
3703 	callout_stop(&acb->devmap_callout);
3704 	bus_teardown_intr(dev, acb->irqres, acb->ih);
3705 	arcmsr_shutdown(dev);
3706 	arcmsr_free_resource(acb);
3707 	for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3708 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3709 	}
3710 	bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3711 	ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3712 	xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3713 	xpt_free_path(acb->ppath);
3714 	xpt_bus_deregister(cam_sim_path(acb->psim));
3715 	cam_sim_free(acb->psim);
3716 	ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3717 	ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3718 	return (0);
3719 }
3720